--由於pg中表和索引的資訊收集都是基於時間點的,對於以往的資訊無法與現在的資訊進行對比,故寫下此工具進行統計資訊收集
--建立資料資訊的schema
create schema db_stat;
--建立收集資訊的基礎表
create table db_stat.snapshot_pg_stat_all_indexes
(relid int,indexrelid int,schemaname varchar(200),relname varchar(550),indexrelname varchar(550),idx_scan bigint,idx_tup_read bigint,idx_tup_fetch bigint,
snapid int,snap_create_time timestamp,host_ip cidr,host_port int,host_type varchar(20),comment varchar(500));
create index idx_stat_indexe_relname_indexrelname_createtime on db_stat.snapshot_pg_stat_all_indexes(relname varchar_pattern_ops,indexrelname varchar_pattern_ops,snap_create_time);
create index idx_stat_indexe_snapid on db_stat.snapshot_pg_stat_all_indexes(snapid);
create table db_stat.snapshot_pg_stat_all_tables
(relid int,schemaname varchar(200),relname varchar(550),seq_scan bigint,seq_tup_read bigint,idx_scan bigint,idx_tup_fetch bigint,n_tup_ins bigint,n_tup_upd bigint,
n_tup_del bigint,n_tup_hot_upd bigint,n_live_tup bigint,n_dead_tup bigint,last_vacuum timestamp,last_autovacuum timestamp,last_analyze timestamp,last_autoanalyze timestamp,vacuum_count bigint,autovacuum_count bigint,analyze_count bigint,autoanalyze_count bigint,
snapid int,snap_create_time timestamp,host_ip cidr,host_port int,host_type varchar(20),comment varchar(500));
create index idx_stat_table_relname_createtime on db_stat.snapshot_pg_stat_all_tables(relname varchar_pattern_ops,snap_create_time);
create index idx_stat_table_snapid on db_stat.snapshot_pg_stat_all_tables(snapid);
create table db_stat.snapshot_pg_statio_all_indexes
(relid int,indexrelid int,schemaname varchar(200),relname varchar(550),indexrelname varchar(550),idx_blks_read bigint,idx_blks_hit bigint,
snapid int,snap_create_time timestamp,host_ip cidr,host_port int,host_type varchar(20),comment varchar(500));
create index idx_statio_indexe_relname_indexrelname_createtime on db_stat.snapshot_pg_statio_all_indexes(relname varchar_pattern_ops,indexrelname varchar_pattern_ops,snap_create_time);
create index idx_statio_indexe_snapid on db_stat.snapshot_pg_statio_all_indexes(snapid);
create table db_stat.snapshot_pg_statio_all_tables
(relid int,schemaname varchar(200),relname varchar(550),heap_blks_read bigint,heap_blks_hit bigint,idx_blks_read bigint,idx_blks_hit bigint,toast_blks_read bigint,toast_blks_hit bigint,
tidx_blks_read bigint,tidx_blks_hit bigint,
snapid int,snap_create_time timestamp,host_ip cidr,host_port int,host_type varchar(20),comment varchar(500));
create index idx_statio_table_relname_createtime on db_stat.snapshot_pg_statio_all_tables(relname varchar_pattern_ops,snap_create_time);
create index idx_statio_table_snapid on db_stat.snapshot_pg_statio_all_tables(snapid);
--建立快照的序列
create sequence db_stat.seq_snapshot minvalue 1 maxvalue 99999999999999;
--每收集完資訊之後,對時間,主機列等進行填充
create or replace function db_stat.process_snapshot_table(in i_host_ip cidr,in i_host_port int,in i_host_type varchar,in i_comment varchar default '') returns int as $$
declare
v_snapid int;
_detail text;
_hint text;
_message text;
begin
select nextval('db_stat.seq_snapshot') into v_snapid;
update db_stat.snapshot_pg_stat_all_indexes set snapid=v_snapid,snap_create_time=now(),host_ip=i_host_ip,host_port=i_host_port,host_type=i_host_type,comment=i_comment where snapid is null;
update db_stat.snapshot_pg_stat_all_tables set snapid=v_snapid,snap_create_time=now(),host_ip=i_host_ip,host_port=i_host_port,host_type=i_host_type,comment=i_comment where snapid is null;
update db_stat.snapshot_pg_statio_all_indexes set snapid=v_snapid,snap_create_time=now(),host_ip=i_host_ip,host_port=i_host_port,host_type=i_host_type,comment=i_comment where snapid is null;
update db_stat.snapshot_pg_statio_all_tables set snapid=v_snapid,snap_create_time=now(),host_ip=i_host_ip,host_port=i_host_port,host_type=i_host_type,comment=i_comment where snapid is null;
-- 返回值 1 代表成功,0 代表失敗
return 1;
EXCEPTION WHEN others then
GET STACKED DIAGNOSTICS
_message = message_text,
_detail = pg_exception_detail,
_hint = pg_exception_hint;
raise notice 'message: %, detail: %, hint: %', _message, _detail, _hint;
return 0;
end;
$$ language plpgsql;
--收動進行資訊採集,測試用
INSERT INTO db_stat.snapshot_pg_stat_all_indexes(relid ,indexrelid ,schemaname ,relname ,indexrelname,idx_scan ,idx_tup_read,idx_tup_fetch)
SELECT relid ,
indexrelid ,
schemaname ,
relname ,
indexrelname,
idx_scan ,
idx_tup_read,
idx_tup_fetch
FROM pg_stat_all_indexes;
INSERT INTO db_stat.snapshot_pg_stat_all_tables(relid ,schemaname ,relname ,seq_scan ,seq_tup_read ,idx_scan ,idx_tup_fetch ,n_tup_ins ,n_tup_upd ,n_tup_del ,n_tup_hot_upd ,n_live_tup ,n_dead_tup ,last_vacuum ,last_autovacuum ,last_analyze ,last_autoanalyze ,vacuum_count ,autovacuum_count ,analyze_count ,autoanalyze_count)
SELECT relid ,
schemaname ,
relname ,
seq_scan ,
seq_tup_read ,
idx_scan ,
idx_tup_fetch ,
n_tup_ins ,
n_tup_upd ,
n_tup_del ,
n_tup_hot_upd ,
n_live_tup ,
n_dead_tup ,
last_vacuum ,
last_autovacuum ,
last_analyze ,
last_autoanalyze ,
vacuum_count ,
autovacuum_count ,
analyze_count ,
autoanalyze_count
FROM pg_stat_all_tables;
INSERT INTO db_stat.snapshot_pg_statio_all_indexes(relid ,indexrelid ,schemaname ,relname ,indexrelname ,idx_blks_read ,idx_blks_hit)
SELECT relid ,
indexrelid ,
schemaname ,
relname ,
indexrelname ,
idx_blks_read ,
idx_blks_hit
FROM pg_statio_all_indexes;
INSERT INTO db_stat.snapshot_pg_statio_all_tables(relid ,schemaname ,relname ,heap_blks_read ,heap_blks_hit ,idx_blks_read ,idx_blks_hit ,toast_blks_read ,toast_blks_hit ,tidx_blks_read ,tidx_blks_hit)
SELECT relid ,
schemaname ,
relname ,
heap_blks_read ,
heap_blks_hit ,
idx_blks_read ,
idx_blks_hit ,
toast_blks_read ,
toast_blks_hit ,
tidx_blks_read ,
tidx_blks_hit
FROM pg_statio_all_tables;
--
select db_stat.process_snapshot_table('192.168.174.10',5432,'MASTER','');
--建立一個shell指令碼,每天透過定時任務進行資訊採集
cat snap_stat.sh
#!/bin/sh
source ~/.bash_profile
source /etc/profile
PSQL="psql"
help_msg (){
echo ""
echo "Usage:"
echo " -f 要輸出結果的檔案,如果為null,則預設為/tmp/snapshot_pg_stat.log"
echo " -u 資料庫連線使用者名稱,如果為null,則為postgresql預設"
echo " -d 連線的資料庫名,如果為null,則為postgresql預設"
echo " -H 資料庫的主機ip,如果為null,則為postgresql預設"
echo " -p 資料庫的埠,如果為null,則為postgresql預設"
echo " -m 資料庫的型別,MASTER為主,SLAVE為從"
echo ""
exit 0
}
# end functions
while getopts "f:u:d:H:p:m:" flag
do
case $flag in
f) FILENAME=$OPTARG
;;
u) USERNAME=$OPTARG
;;
d) DATABASE=$OPTARG
;;
H) HOST=$OPTARG
;;
p) PORT=$OPTARG
;;
m) DATABASE_TYPE=$OPTARG
;;
\?|h) help_msg
;;
esac
done
if [ $USERNAME"x" == "x" ]
then
USERNAME=postgres
fi
if [ $DATABASE"x" == "x" ]
then
DATABASE=postgres
fi
if [ $HOST"x" == "x" ]
then
help_msg
fi
if [ $PORT"x" == "x" ]
then
PORT=5432
fi
if [ $DATABASE_TYPE"x" == "x" ]
then
DATABASE_TYPE=MASTER
fi
if [ $FILENAME"x" == "x" ]
then
FILENAME=/tmp/snapshot_pg_stat.log
fi
OUTPUT_FILENAME=/tmp/snapshot_pg_stat.csv
echo "" > $FILENAME
if [ ! -f $FILENAME ]
then
touch $FILENAME
else
printf "" | tee -a $FILENAME
fi
echo "指令碼於時間 `date "+%Y-%m-%d %H:%M:%S"` 開始執行" >> $FILENAME
echo "指令碼開始於`date "+%Y-%m-%d %H:%M:%S"` 處理pg_stat_all_indexes表" >> $FILENAME
$PSQL -p $PORT -U $USERNAME -d $DATABASE -c "copy (select relid ,indexrelid ,schemaname ,relname ,indexrelname,idx_scan ,idx_tup_read,idx_tup_fetch from pg_stat_all_indexes) to '$OUTPUT_FILENAME' with csv"
$PSQL -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "\copy db_stat.snapshot_pg_stat_all_indexes(relid ,indexrelid ,schemaname ,relname ,indexrelname,idx_scan ,idx_tup_read,idx_tup_fetch) from '$OUTPUT_FILENAME' with csv"
echo "指令碼開始於`date "+%Y-%m-%d %H:%M:%S"` 處理pg_stat_all_tables表" >> $FILENAME
$PSQL -p $PORT -U $USERNAME -d $DATABASE -c "copy (select relid ,schemaname ,relname ,seq_scan ,seq_tup_read ,idx_scan ,idx_tup_fetch ,n_tup_ins ,n_tup_upd ,n_tup_del ,n_tup_hot_upd ,n_live_tup ,n_dead_tup ,last_vacuum ,last_autovacuum ,last_analyze ,last_autoanalyze ,vacuum_count ,autovacuum_count ,analyze_count ,autoanalyze_count from pg_stat_all_tables) to '$OUTPUT_FILENAME' with csv"
$PSQL -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "\copy db_stat.snapshot_pg_stat_all_tables(relid ,schemaname ,relname ,seq_scan ,seq_tup_read ,idx_scan ,idx_tup_fetch ,n_tup_ins ,n_tup_upd ,n_tup_del ,n_tup_hot_upd ,n_live_tup ,n_dead_tup ,last_vacuum ,last_autovacuum ,last_analyze ,last_autoanalyze ,vacuum_count ,autovacuum_count ,analyze_count ,autoanalyze_count) from '$OUTPUT_FILENAME' with csv"
echo "指令碼開始於`date "+%Y-%m-%d %H:%M:%S"` 處理pg_statio_all_indexes表" >> $FILENAME
$PSQL -p $PORT -U $USERNAME -d $DATABASE -c "copy (select relid ,indexrelid ,schemaname ,relname ,indexrelname ,idx_blks_read ,idx_blks_hit from pg_statio_all_indexes) to '$OUTPUT_FILENAME' with csv"
$PSQL -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "\copy db_stat.snapshot_pg_statio_all_indexes(relid ,indexrelid ,schemaname ,relname ,indexrelname ,idx_blks_read ,idx_blks_hit) from '$OUTPUT_FILENAME' with csv"
echo "指令碼開始於`date "+%Y-%m-%d %H:%M:%S"` 處理pg_statio_all_tables表" >> $FILENAME
$PSQL -p $PORT -U $USERNAME -d $DATABASE -c "copy (select relid ,schemaname ,relname ,heap_blks_read ,heap_blks_hit ,idx_blks_read ,idx_blks_hit ,toast_blks_read ,toast_blks_hit ,tidx_blks_read ,tidx_blks_hit from pg_statio_all_tables) to '$OUTPUT_FILENAME' with csv"
$PSQL -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "\copy db_stat.snapshot_pg_statio_all_tables(relid ,schemaname ,relname ,heap_blks_read ,heap_blks_hit ,idx_blks_read ,idx_blks_hit ,toast_blks_read ,toast_blks_hit ,tidx_blks_read ,tidx_blks_hit) from '$OUTPUT_FILENAME' with csv"
$PSQL -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "select db_stat.process_snapshot_table('$HOST',$PORT,'$DATABASE_TYPE','database stat snapshot');"
echo "############################################################################################" >> $FILENAME
echo "指令碼於時間 `date "+%Y-%m-%d %H:%M:%S"` 結束執行" >> $FILENAME
--清空資料表
truncate table db_stat.snapshot_pg_stat_all_indexes ;
truncate table db_stat.snapshot_pg_stat_all_tables ;
truncate table db_stat.snapshot_pg_statio_all_indexes ;
truncate table db_stat.snapshot_pg_statio_all_tables ;
--手動執行shell指令碼
./snap_stat.sh -d mydb -p 5432 -m SLAVE -u postgres -H 192.168.174.10
--定時任務,每天8點開始執行
8 8 * * * /db/pgsql/snap_stat.sh -d mydb -p 5435 -m SLAVE -u postgres -H 192.168.174.10
--檢視使用比較少的索引
select *
from (
SELECT t.relname,
t.indexrelname ,
max(idx_scan)-min(idx_scan) AS diff_idx_scan,
max(idx_tup_read)-min(idx_tup_read) AS diff_idx_tup_read
FROM db_stat.snapshot_pg_stat_all_indexes t
--WHERE snap_create_time BETWEEN '2015-12-11' AND '2016-03-11'
GROUP BY t.relname, t.indexrelname) t1
order by diff_idx_scan,relname,indexrelname ;
--檢視索引使用率趨勢圖
select relname,
indexrelname,
snap_day,
diff_idx_scan,
case when sum(diff_idx_scan) over w1 >0 then diff_idx_scan*100/sum(diff_idx_scan) over w1 else 0 end as diff_idx_scan_percent,
diff_idx_tup_read,
case when sum(diff_idx_tup_read) over w1 >0 then diff_idx_tup_read*100/sum(diff_idx_tup_read) over w1 else 0 end as diff_idx_tup_read_percent
from (
SELECT t.relname,
t.indexrelname,
date_trunc('hour', snap_create_time) snap_day,
t.idx_scan-lag(t.idx_scan,1) over w AS diff_idx_scan,
t.idx_tup_read - lag(t.idx_tup_read,1) over w AS diff_idx_tup_read
from db_stat.snapshot_pg_stat_all_indexes t
--where indexrelname in ('','')
WINDOW w AS (PARTITION BY t.relname,t.indexrelname ORDER BY date_trunc('hour', t.snap_create_time))
) t1
where diff_idx_scan is not null
WINDOW w1 as (PARTITION BY t1.relname,t1.indexrelname)
order by relname,indexrelname,snap_day;
postgresql 定時收集表和索引統計資訊 轉發:https://blog.csdn.net/weixin_33711641/article/details/89752462
相關文章
- Oracle歸檔日誌異常增長問題的排查過程 轉載 : https://blog.csdn.net/3moods/article/details/132031152OracleHTTPAI
- ORACLE表統計資訊與列統計資訊、索引統計資訊Oracle索引
- 手動收集——收集統計資訊
- Oracle收集統計資訊Oracle
- 收集統計資訊方案
- Oracle統計資訊的收集和維護Oracle
- 【統計資訊】Oracle常用的收集統計資訊方式Oracle
- 收集全庫統計資訊
- SQL優化之統計資訊和索引SQL優化索引
- 微課sql最佳化(3)、統計資訊收集(2)-如何收集統計資訊SQL
- [重慶思莊每日技術分享]-ORA-01760 當表中出現物化檢視時,收集索引統計資訊失敗索引
- 【STAT】Oracle 表統計資訊被鎖,如何建立索引Oracle索引
- ORACLE19c新特性-實時統計資訊收集Oracle
- 【TUNE_ORACLE】定製化收集統計資訊SQL參考OracleSQL
- 6 收集資料庫統計資訊資料庫
- PostgreSQL DBA(10) - 統計資訊SQL
- MySQL對所有表收集統計資訊(Python 2指令碼)MySqlPython指令碼
- oracle 統計資訊檢視與收集Oracle
- Nebula Graph 特性講解——RocksDB 統計資訊的收集和展示
- 資料字典和固定表統計資訊更新
- [統計資訊系列7] Oracle 11g的自動統計資訊收集Oracle
- Oracle運維指令碼-收集統計資訊Oracle運維指令碼
- https://heapdump.cn/u/358613/articleHTTP
- MySQL資料庫表索引取樣統計MySql資料庫索引
- 微課sql最佳化(8)、統計資訊收集(6)-統計資訊查詢SQL
- 啟用與禁用統計資訊自動收集
- stored procedure 收集session wait 資訊(轉)SessionAI
- 【TABLE】Oracle表資訊收集指令碼Oracle指令碼
- 通過鎖定表的統計資訊來穩定sql的執行計劃SQL
- MySQL中Innodb如何計算索引的統計資訊?MySql索引
- [20181024]修改awr收集資訊設定.txt
- 【STAT】函式索引和使用表示式統計資訊有什麼不同函式索引
- PostgreSQL統計資訊的幾個重要檢視SQL
- postgresql資料定時轉存mongodb方案SQLMongoDB
- PostgreSQL:臨時表SQL
- 【統計資訊】如何備份和還原統計資訊
- 【TUNE_ORACLE】檢視錶,列和索引的統計資訊SQL參考Oracle索引SQL
- 資訊收集