介紹
本文講述一個傳輸組的同步過程。從txg_sync_thread
函式直到dbuf_sync_indirect
和dbuf_sync_leaf
函式層層呼叫( dbuf_sync_indirect
和dbuf_sync_leaf
作為資料集的快取存在)。dbuf_sync_indirect
中,間接塊I/O依賴於其子間接塊I/O的排程更新,但是在同一等級(level
)的間接塊又是相互獨立的。葉子節點資料塊的寫相對其他葉子節點資料節點也是相互獨立的。分析dbuf_sync_{leaf, indirect}
,我們可以知道,最後快取刷盤的本質是處理一個髒資料記錄的連結串列。那麼髒資料記錄又是怎麼跟ZFS物件,也就是dnode,對應起來呢?在下一篇文章裡我們會介紹VFS的寫操作是怎麼最後在ZFS中生成髒資料記錄的。
正常的寫ZIO在ZIO流水線中被非同步分發,流水線等待其所有的獨立子IO結束。0級的資料塊會被併發處理。
正文
接下來介紹對於任意儲存池IO,從txg_sync_start
到 zio_wait
(或zio_nowait
) 的函式呼叫的流程。對於程式碼,我們只摘取其中核心的部分,使用(…)來省略其他程式碼,增加本文中程式碼的可讀性。
一個儲存池在建立和匯入的時候,txg_sync_start
函式會被呼叫,建立txg_sync_thread
執行緒。
void
txg_sync_start(dsl_pool_t *dp)
{
...
tx->tx_sync_thread = thread_create(NULL, 32 << 10, txg_sync_thread, dp, 0, &p0, TS_RUN, minclsyspri);
...
}
儲存池執行期間會不停地在txg
狀態之間切換。在進入syncing
狀態的時候,就會呼叫spa_sync
。而spa_sync
呼叫完畢後,就會喚醒所有等待在tx_sync_done_cv
上的執行緒。
static void
txg_sync_thread(void *arg)
{
dsl_pool_t *dp = arg;
spa_t *spa = dp->dp_spa;
...
for (;;) {
...
txg = tx->tx_quiesced_txg;
...
spa_sync(spa, txg);
...
cv_broadcast(&tx->tx_sync_done_cv);
...
}
}
spa_sync
會呼叫dsl_pool_sync
,直到沒有新的髒資料需要被更新。
void
spa_sync(spa_t *spa, uint64_t txg)
{
dsl_pool_t *dp = spa->spa_dsl_pool;
objset_t *mos = spa->spa_meta_objset;
...
do {
...
dsl_pool_sync(dp, txg);
} while (dmu_objset_is_dirty(mos, txg));
}
dsl_pool_sync
遍歷儲存池內所有髒資料集,呼叫dsl_dataset_sync
兩次。第一次將所有髒資料塊下盤。第二次則為所有的使用者空間改變下盤。這兩個遍歷操作都會以一個同步ZIO
的形式建立到本儲存池的根ZIO下。(同步的體現形式為呼叫了ZIO_WAIT
)。
void
dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
{
...
dsl_dataset_t *ds;
objset_t *mos = dp->dp_meta_objset;
...
tx = dmu_tx_create_assigned(dp, txg);
/*
* Write out all dirty blocks of dirty datasets.
*/
zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
/*
* We must not sync any non-MOS datasets twice,
* because we may have taken a snapshot of them.
* However, we may sync newly-created datasets on
* pass 2.
*/
ASSERT(!list_link_active(&ds->ds_synced_link));
list_insert_tail(&synced_datasets, ds);
dsl_dataset_sync(ds, zio, tx);
}
VERIFY0(zio_wait(zio));
...
/*
* After the data blocks have been written (ensured by the zio_wait()
* above), update the user/group space accounting.
*/
for (ds = list_head(&synced_datasets); ds != NULL;
ds = list_next(&synced_datasets, ds)) {
dmu_objset_do_userquota_updates(ds->ds_objset, tx);
}
/*
* Sync the datasets again to push out the changes due to
* userspace updates. This must be done before we process the
* sync tasks, so that any snapshots will have the correct
* user accounting information (and we won`t get confused
* about which blocks are part of the snapshot).
*/
zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
ASSERT(list_link_active(&ds->ds_synced_link));
dmu_buf_rele(ds->ds_dbuf, ds);
dsl_dataset_sync(ds, zio, tx);
}
VERIFY0(zio_wait(zio));
...
}
dsl_dataset_sync
傳遞資料集(dataset)的物件集合(objset)給dmu_objset_sync
函式進行資料集同步。
void
dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
{
...
dmu_objset_sync(ds->ds_objset, zio, tx);
}
dmu_objset_sync
呼叫dmu_objset_sync_dnodes
將物件集合(objectset)下的髒dnode連結串列和被釋放dnode連結串列中的dnode下盤。需要注意的是,對於特殊的後設資料物件(special metadata dnodes),需要先行同步,呼叫dnode_sync
即可。
/* called from dsl */
void
dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
{
int txgoff;
...
list_t *newlist = NULL;
dbuf_dirty_record_t *dr;
...
/*
* Create the root block IO
*/
...
zio = arc_write(pio, os->os_spa, tx->tx_txg,
os->os_rootbp, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os),
DMU_OS_IS_L2COMPRESSIBLE(os), &zp, dmu_objset_write_ready,
NULL, dmu_objset_write_done, os, ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_MUSTSUCCEED, &zb);
/*
* Sync special dnodes - the parent IO for the sync is the root block
*/
dnode_sync(DMU_META_DNODE(os), tx);
...
if (DMU_USERUSED_DNODE(os) &&
DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
DMU_USERUSED_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_USERUSED_DNODE(os), tx);
DMU_GROUPUSED_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_GROUPUSED_DNODE(os), tx);
}
...
txgoff = tx->tx_txg & TXG_MASK;
...
if (dmu_objset_userused_enabled(os)) {
newlist = &os->os_synced_dnodes;
/*
* We must create the list here because it uses the
* dn_dirty_link[] of this txg.
*/
list_create(newlist, sizeof (dnode_t),
offsetof(dnode_t, dn_dirty_link[txgoff]));
}
dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], newlist, tx);
dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], newlist, tx);
list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff];
while (dr = list_head(list)) {
ASSERT0(dr->dr_dbuf->db_level);
list_remove(list, dr);
if (dr->dr_zio)
zio_nowait(dr->dr_zio);
}
/*
* Free intent log blocks up to this tx.
*/
zil_sync(os->os_zil, tx);
os->os_phys->os_zil_header = os->os_zil_header;
zio_nowait(zio);
}
dmu_objset_sync_dnodes
對於連結串列內的置髒物件,會呼叫dnode_sync
,將dnode下盤,把他們加入到newlist (如果,非空)中。(根據入參可以判斷,已經加入到os->os_synced_dnodes)。
static void
dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx)
{
dnode_t *dn;
while (dn = list_head(list)) {
...
/*
* Initialize dn_zio outside dnode_sync() because the
* meta-dnode needs to set it ouside dnode_sync().
*/
dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
list_remove(list, dn);
if (newlist) {
(void) dnode_add_ref(dn, newlist);
list_insert_tail(newlist, dn);
}
dnode_sync(dn, tx);
}
}
dnode_sync
將置髒的緩衝記錄傳遞給dbuf_sync_list
。
void
dnode_sync(dnode_t *dn, dmu_tx_t *tx)
{
...
list_t *list = &dn->dn_dirty_records[txgoff];
...
dbuf_sync_list(list, tx);
}
dbuf_sync_list
函式遍歷訪問髒緩衝記錄連結串列中的每個元素,根據緩衝資料的型別,呼叫 dbuf_sync_leaf
和dbuf_sync_indirect
。
void
dbuf_sync_list(list_t *list, dmu_tx_t *tx)
{
dbuf_dirty_record_t *dr;
while (dr = list_head(list)) {
<...>
list_remove(list, dr);
if (dr->dr_dbuf->db_level > 0)
dbuf_sync_indirect(dr, tx);
else
dbuf_sync_leaf(dr, tx);
}
}
ZFS是COW的檔案系統,對於每個塊都不例外。因此每次資料塊更新後,指向該資料塊的間接塊也會被更新。因此在修改一個檔案內的資料塊的時候,必須從盤中讀取這些間接資料。修改間接塊意味著它指向的資料塊有髒資料。在給間接快的所有孩子節點下發ZIO之後,本間接塊的ZIO被下發。
static void
dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
...
/* Read the block if it hasn`t been read yet. */
if (db->db_buf == NULL) {
mutex_exit(&db->db_mtx);
(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
mutex_enter(&db->db_mtx);
}
..
/* Provide the pending dirty record to child dbufs */
db->db_data_pending = dr;
mutex_exit(&db->db_mtx);
/* doesn`t actually execute a write - it just creates
* dr->dr_zio which is executed by zio_nowait before
* returning
*/
dbuf_write(dr, db->db_buf, tx);
zio = dr->dr_zio;
mutex_enter(&dr->dt.di.dr_mtx);
dbuf_sync_list(&dr->dt.di.dr_children, tx);
ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
mutex_exit(&dr->dt.di.dr_mtx);
zio_nowait(zio);
}
dbuf_sync_leaf
為髒緩衝資料記錄建立ZIO,非同步分發之。
static void
dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
arc_buf_t **datap = &dr->dt.dl.dr_data;
dmu_buf_impl_t *db = dr->dr_dbuf;
...
/* doesn`t actually execute a write - it just creates
* dr->dr_zio which is executed by zio_nowait before
* returning
*/
dbuf_write(dr, *datap, tx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
if (dn->dn_object == DMU_META_DNODE_OBJECT) {
list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
DB_DNODE_EXIT(db);
} else {
/*
* Although zio_nowait() does not "wait for an IO", it does
* initiate the IO. If this is an empty write it seems plausible
* that the IO could actually be completed before the nowait
* returns. We need to DB_DNODE_EXIT() first in case
* zio_nowait() invalidates the dbuf.
*/
DB_DNODE_EXIT(db);
zio_nowait(dr->dr_zio);
}
}