Linux核心搶佔補丁的基本原理(轉)
Linux核心搶佔補丁的基本原理(轉)[@more@]作者 jkl
CPU在核心中執行時並不是處處不可搶佔的,核心中存在一些空隙,在這時進行搶佔是安
全的,核心搶佔補丁的基本原理就是將SMP可並行的程式碼段看成是可以進行核心搶佔的區
域。
2.4核心正好細化了多CPU下的核心執行緒同步機構,對不可並行的指令塊用spinlock和rw
lock作了細緻的表示,該補丁的實現可謂水到渠成。
具體的方法就是在程式的任務結構上增加一個preempt_count變數作為核心搶佔鎖,它隨
著spinlock和rwlock一起加鎖和解鎖。當preempt_count為0時表示可以進行核心排程。
核心排程器的入口為preempt_schedule(),它將當前程式標記為TASK_PREEMPTED狀態再
呼叫schedule(),在TASK_PREEMPTED狀態,schedule()不會將程式從執行佇列中刪除。
下面是核心搶佔補丁的主要程式碼示意:
arch/i386/kernel/entry.S:
preempt_count = 4 # 將task_struct中的flags用作preempt_count,flags被移到了別
的位置
ret_from_exception: # 從異常返回
#ifdef CONFIG_SMP
GET_CURRENT(%ebx)
movl processor(%ebx),%eax
shll $CONFIG_X86_L1_CACHE_SHIFT,%eax
movl SYMBOL_NAME(irq_stat)(,%eax),%ecx # softirq_active
testl SYMBOL_NAME(irq_stat)+4(,%eax),%ecx # softirq_mask
#else
movl SYMBOL_NAME(irq_stat),%ecx # softirq_active
testl SYMBOL_NAME(irq_stat)+4,%ecx # softirq_mask
#endif
jne handle_softirq
#ifdef CONFIG_PREEMPT
cli
incl preempt_count(%ebx) # 異常的入口沒有禁止核心排程的指令,與ret_from_intr
匹配一下
#endif
ENTRY(ret_from_intr) # 硬體中斷的返回
GET_CURRENT(%ebx)
#ifdef CONFIG_PREEMPT
cli
decl preempt_count(%ebx) # 恢復核心搶佔標誌
#endif
movl EFLAGS(%esp),%eax # mix EFLAGS and CS
movb CS(%esp),%al
testl $(VM_MASK | 3),%eax # return to VM86 mode or non-supervisor?
jne ret_with_reschedule
#ifdef CONFIG_PREEMPT
cmpl $0,preempt_count(%ebx)
jnz restore_all # 如果preempt_count非零則表示禁止核心搶佔
cmpl $0,need_resched(%ebx)
jz restore_all #
movl SYMBOL_NAME(irq_stat)+irq_stat_local_bh_count CPU_INDX,%ecx
addl SYMBOL_NAME(irq_stat)+irq_stat_local_irq_count CPU_INDX,%ecx
jnz restore_all
incl preempt_count(%ebx)
sti
call SYMBOL_NAME(preempt_schedule)
jmp ret_from_intr # 新程式返回,返回ret_from_intr恢復搶佔標誌後再返回
#else
jmp restore_all
#endif
ALIGN
handle_softirq:
#ifdef CONFIG_PREEMPT
cli
GET_CURRENT(%ebx)
incl preempt_count(%ebx)
sti
#endif
call SYMBOL_NAME(do_softirq)
jmp ret_from_intr
ALIGN
reschedule:
call SYMBOL_NAME(schedule) # test
jmp ret_from_sys_call
include/asm/hw_irq.h:
...
#ifdef CONFIG_PREEMPT
#define BUMP_CONTEX_SWITCH_LOCK
GET_CURRENT
"incl 4(%ebx) "
#else
#define BUMP_CONTEX_SWITCH_LOCK
#endif
#define SAVE_ALL 硬體中斷保護入口現場
"cld "
"pushl %es "
"pushl %ds "
"pushl %eax "
"pushl %ebp "
"pushl %edi "
"pushl %esi "
"pushl %edx "
"pushl %ecx "
"pushl %ebx "
"movl $" STR(__KERNEL_DS) ",%edx "
"movl %edx,%ds "
"movl %edx,%es "
BUMP_CONTEX_SWITCH_LOCK # 硬體中斷的入口禁止核心搶佔
include/linux/spinlock.h:
#ifdef CONFIG_PREEMPT
#define switch_lock_count() current->preempt_count
#define in_ctx_sw_off() (switch_lock_count().counter) 判斷當前程式的搶佔計數
是否非零
#define atomic_ptr_in_ctx_sw_off() (&switch_lock_count())
#define ctx_sw_off() 禁止核心搶佔
do {
atomic_inc(atomic_ptr_in_ctx_sw_off()); 當前程式的核心搶佔計數增1
} while (0)
#define ctx_sw_on_no_preempt() 允許核心搶佔
do {
atomic_dec(atomic_ptr_in_ctx_sw_off()); 當前程式的核心搶佔計數減1
} while (0)
#define ctx_sw_on() 允許並完成核心搶佔
do {
if (atomic_dec_and_test(atomic_ptr_in_ctx_sw_off()) &&
current->need_resched)
preempt_schedule();
} while (0)
#define spin_lock(lock)
do {
ctx_sw_off(); 進入自旋鎖時禁止搶佔
_raw_spin_lock(lock);
} while(0)
#define spin_trylock(lock) ({ctx_sw_off(); _raw_spin_trylock(lock) ? 鎖定並
測試原來是否上鎖
1 : ({ctx_sw_on(); 0;});})
#define spin_unlock(lock)
do {
_raw_spin_unlock(lock);
ctx_sw_on(); 離開自旋鎖時允許並完成核心搶佔
} while (0)
#define read_lock(lock) ({ctx_sw_off(); _raw_read_lock(lock);})
#define read_unlock(lock) ({_raw_read_unlock(lock); ctx_sw_on();})
#define write_lock(lock) ({ctx_sw_off(); _raw_write_lock(lock);})
#define write_unlock(lock) ({_raw_write_unlock(lock); ctx_sw_on();})
#define write_trylock(lock) ({ctx_sw_off(); _raw_write_trylock(lock) ?
1 : ({ctx_sw_on(); 0;});})
...
include/asm/softirq.h:
#define cpu_bh_disable(cpu) do { ctx_sw_off(); local_bh_count(cpu)++; barrie
r(); } while (0)
#define cpu_bh_enable(cpu) do { barrier(); local_bh_count(cpu)--;ctx_sw_on()
; } while (0)
kernel/schedule.c:
#ifdef CONFIG_PREEMPT
asmlinkage void preempt_schedule(void)
{
while (current->need_resched) {
ctx_sw_off();
current->state |= TASK_PREEMPTED;
schedule();
current->state &= ~TASK_PREEMPTED;
ctx_sw_on_no_preempt();
}
}
#endif
asmlinkage void schedule(void)
{
struct schedule_data * sched_data;
struct task_struct *prev, *next, *p;
struct list_head *tmp;
int this_cpu, c;
#ifdef CONFIG_PREEMPT
ctx_sw_off();
#endif
if (!current->active_mm) BUG();
need_resched_back:
prev = current;
this_cpu = prev->processor;
if (in_interrupt())
goto scheduling_in_interrupt;
release_kernel_lock(prev, this_cpu);
/* Do "administrative" work here while we don't hold any locks */
if (softirq_active(this_cpu) & softirq_mask(this_cpu))
goto handle_softirq;
handle_softirq_back:
/*
* 'sched_data' is protected by the fact that we can run
* only one process per CPU.
*/
sched_data = & aligned_data[this_cpu].schedule_data;
spin_lock_irq(&runqueue_lock);
/* move an exhausted RR process to be last.. */
if (prev->policy == SCHED_RR)
goto move_rr_last;
move_rr_back:
switch (prev->state) {
case TASK_INTERRUPTIBLE:
if (signal_pending(prev)) {
prev->state = TASK_RUNNING;
break;
}
default:
#ifdef CONFIG_PREEMPT
if (prev->state & TASK_PREEMPTED)
break; 如果是核心搶佔排程,則保留執行佇列
#endif
del_from_runqueue(prev);
#ifdef CONFIG_PREEMPT
case TASK_PREEMPTED:
#endif
case TASK_RUNNING:
}
prev->need_resched = 0;
/*
* this is the scheduler proper:
*/
repeat_schedule:
/*
* Default process to select..
*/
next = idle_task(this_cpu);
c = -1000;
if (task_on_runqueue(prev))
goto still_running;
still_running_back:
list_for_each(tmp, &runqueue_head) {
p = list_entry(tmp, struct task_struct, run_list);
if (can_schedule(p, this_cpu)) {
int weight = goodness(p, this_cpu, prev->active_mm);
if (weight > c)
c = weight, next = p;
}
}
/* Do we need to re-calculate counters? */
if (!c)
goto recalculate;
/*
* from this point on nothing can prevent us from
* switching to the next task, save this fact in
* sched_data.
*/
sched_data->curr = next;
#ifdef CONFIG_SMP
next->has_cpu = 1;
next->processor = this_cpu;
#endif
spin_unlock_irq(&runqueue_lock);
if (prev == next)
goto same_process;
#ifdef CONFIG_SMP
/*
* maintain the per-process 'last schedule' value.
* (this has to be recalculated even if we reschedule to
* the same process) Currently this is only used on SMP,
* and it's approximate, so we do not have to maintain
* it while holding the runqueue spinlock.
*/
sched_data->last_schedule = get_cycles();
/*
* We drop the scheduler lock early (it's a global spinlock),
* thus we have to lock the previous process from getting
* rescheduled during switch_to().
*/
#endif /* CONFIG_SMP */
kstat.context_swtch++;
/*
* there are 3 processes which are affected by a context switch:
*
* prev == .... ==> (last => next)
*
* It's the 'much more previous' 'prev' that is on next's stack,
* but prev is set to (the just run) 'last' process by switch_to().
* This might sound slightly confusing but makes tons of sense.
*/
prepare_to_switch();
{
struct mm_struct *mm = next->mm;
struct mm_struct *oldmm = prev->active_mm;
if (!mm) {
if (next->active_mm) BUG();
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
enter_lazy_tlb(oldmm, next, this_cpu);
} else {
if (next->active_mm != mm) BUG();
switch_mm(oldmm, mm, next, this_cpu);
}
if (!prev->mm) {
prev->active_mm = NULL;
mmdrop(oldmm);
}
}
/*
* This just switches the register state and the
* stack.
*/
switch_to(prev, next, prev);
__schedule_tail(prev);
same_process:
reacquire_kernel_lock(current);
if (current->need_resched)
goto need_resched_back;
#ifdef CONFIG_PREEMPT
ctx_sw_on_no_preempt();
#endif
return;
recalculate:
{
struct task_struct *p;
spin_unlock_irq(&runqueue_lock);
read_lock(&tasklist_lock);
for_each_task(p)
p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice);
read_unlock(&tasklist_lock);
spin_lock_irq(&runqueue_lock);
}
goto repeat_schedule;
still_running:
c = goodness(prev, this_cpu, prev->active_mm);
next = prev;
goto still_running_back;
handle_softirq:
do_softirq();
goto handle_softirq_back;
move_rr_last:
if (!prev->counter) {
prev->counter = NICE_TO_TICKS(prev->nice);
move_last_runqueue(prev);
}
goto move_rr_back;
scheduling_in_interrupt:
printk("Scheduling in interrupt ");
BUG();
return;
}
void schedule_tail(struct task_struct *prev)
{
__schedule_tail(prev);
#ifdef CONFIG_PREEMPT
ctx_sw_on();
#endif
}
CPU在核心中執行時並不是處處不可搶佔的,核心中存在一些空隙,在這時進行搶佔是安
全的,核心搶佔補丁的基本原理就是將SMP可並行的程式碼段看成是可以進行核心搶佔的區
域。
2.4核心正好細化了多CPU下的核心執行緒同步機構,對不可並行的指令塊用spinlock和rw
lock作了細緻的表示,該補丁的實現可謂水到渠成。
具體的方法就是在程式的任務結構上增加一個preempt_count變數作為核心搶佔鎖,它隨
著spinlock和rwlock一起加鎖和解鎖。當preempt_count為0時表示可以進行核心排程。
核心排程器的入口為preempt_schedule(),它將當前程式標記為TASK_PREEMPTED狀態再
呼叫schedule(),在TASK_PREEMPTED狀態,schedule()不會將程式從執行佇列中刪除。
下面是核心搶佔補丁的主要程式碼示意:
arch/i386/kernel/entry.S:
preempt_count = 4 # 將task_struct中的flags用作preempt_count,flags被移到了別
的位置
ret_from_exception: # 從異常返回
#ifdef CONFIG_SMP
GET_CURRENT(%ebx)
movl processor(%ebx),%eax
shll $CONFIG_X86_L1_CACHE_SHIFT,%eax
movl SYMBOL_NAME(irq_stat)(,%eax),%ecx # softirq_active
testl SYMBOL_NAME(irq_stat)+4(,%eax),%ecx # softirq_mask
#else
movl SYMBOL_NAME(irq_stat),%ecx # softirq_active
testl SYMBOL_NAME(irq_stat)+4,%ecx # softirq_mask
#endif
jne handle_softirq
#ifdef CONFIG_PREEMPT
cli
incl preempt_count(%ebx) # 異常的入口沒有禁止核心排程的指令,與ret_from_intr
匹配一下
#endif
ENTRY(ret_from_intr) # 硬體中斷的返回
GET_CURRENT(%ebx)
#ifdef CONFIG_PREEMPT
cli
decl preempt_count(%ebx) # 恢復核心搶佔標誌
#endif
movl EFLAGS(%esp),%eax # mix EFLAGS and CS
movb CS(%esp),%al
testl $(VM_MASK | 3),%eax # return to VM86 mode or non-supervisor?
jne ret_with_reschedule
#ifdef CONFIG_PREEMPT
cmpl $0,preempt_count(%ebx)
jnz restore_all # 如果preempt_count非零則表示禁止核心搶佔
cmpl $0,need_resched(%ebx)
jz restore_all #
movl SYMBOL_NAME(irq_stat)+irq_stat_local_bh_count CPU_INDX,%ecx
addl SYMBOL_NAME(irq_stat)+irq_stat_local_irq_count CPU_INDX,%ecx
jnz restore_all
incl preempt_count(%ebx)
sti
call SYMBOL_NAME(preempt_schedule)
jmp ret_from_intr # 新程式返回,返回ret_from_intr恢復搶佔標誌後再返回
#else
jmp restore_all
#endif
ALIGN
handle_softirq:
#ifdef CONFIG_PREEMPT
cli
GET_CURRENT(%ebx)
incl preempt_count(%ebx)
sti
#endif
call SYMBOL_NAME(do_softirq)
jmp ret_from_intr
ALIGN
reschedule:
call SYMBOL_NAME(schedule) # test
jmp ret_from_sys_call
include/asm/hw_irq.h:
...
#ifdef CONFIG_PREEMPT
#define BUMP_CONTEX_SWITCH_LOCK
GET_CURRENT
"incl 4(%ebx) "
#else
#define BUMP_CONTEX_SWITCH_LOCK
#endif
#define SAVE_ALL 硬體中斷保護入口現場
"cld "
"pushl %es "
"pushl %ds "
"pushl %eax "
"pushl %ebp "
"pushl %edi "
"pushl %esi "
"pushl %edx "
"pushl %ecx "
"pushl %ebx "
"movl $" STR(__KERNEL_DS) ",%edx "
"movl %edx,%ds "
"movl %edx,%es "
BUMP_CONTEX_SWITCH_LOCK # 硬體中斷的入口禁止核心搶佔
include/linux/spinlock.h:
#ifdef CONFIG_PREEMPT
#define switch_lock_count() current->preempt_count
#define in_ctx_sw_off() (switch_lock_count().counter) 判斷當前程式的搶佔計數
是否非零
#define atomic_ptr_in_ctx_sw_off() (&switch_lock_count())
#define ctx_sw_off() 禁止核心搶佔
do {
atomic_inc(atomic_ptr_in_ctx_sw_off()); 當前程式的核心搶佔計數增1
} while (0)
#define ctx_sw_on_no_preempt() 允許核心搶佔
do {
atomic_dec(atomic_ptr_in_ctx_sw_off()); 當前程式的核心搶佔計數減1
} while (0)
#define ctx_sw_on() 允許並完成核心搶佔
do {
if (atomic_dec_and_test(atomic_ptr_in_ctx_sw_off()) &&
current->need_resched)
preempt_schedule();
} while (0)
#define spin_lock(lock)
do {
ctx_sw_off(); 進入自旋鎖時禁止搶佔
_raw_spin_lock(lock);
} while(0)
#define spin_trylock(lock) ({ctx_sw_off(); _raw_spin_trylock(lock) ? 鎖定並
測試原來是否上鎖
1 : ({ctx_sw_on(); 0;});})
#define spin_unlock(lock)
do {
_raw_spin_unlock(lock);
ctx_sw_on(); 離開自旋鎖時允許並完成核心搶佔
} while (0)
#define read_lock(lock) ({ctx_sw_off(); _raw_read_lock(lock);})
#define read_unlock(lock) ({_raw_read_unlock(lock); ctx_sw_on();})
#define write_lock(lock) ({ctx_sw_off(); _raw_write_lock(lock);})
#define write_unlock(lock) ({_raw_write_unlock(lock); ctx_sw_on();})
#define write_trylock(lock) ({ctx_sw_off(); _raw_write_trylock(lock) ?
1 : ({ctx_sw_on(); 0;});})
...
include/asm/softirq.h:
#define cpu_bh_disable(cpu) do { ctx_sw_off(); local_bh_count(cpu)++; barrie
r(); } while (0)
#define cpu_bh_enable(cpu) do { barrier(); local_bh_count(cpu)--;ctx_sw_on()
; } while (0)
kernel/schedule.c:
#ifdef CONFIG_PREEMPT
asmlinkage void preempt_schedule(void)
{
while (current->need_resched) {
ctx_sw_off();
current->state |= TASK_PREEMPTED;
schedule();
current->state &= ~TASK_PREEMPTED;
ctx_sw_on_no_preempt();
}
}
#endif
asmlinkage void schedule(void)
{
struct schedule_data * sched_data;
struct task_struct *prev, *next, *p;
struct list_head *tmp;
int this_cpu, c;
#ifdef CONFIG_PREEMPT
ctx_sw_off();
#endif
if (!current->active_mm) BUG();
need_resched_back:
prev = current;
this_cpu = prev->processor;
if (in_interrupt())
goto scheduling_in_interrupt;
release_kernel_lock(prev, this_cpu);
/* Do "administrative" work here while we don't hold any locks */
if (softirq_active(this_cpu) & softirq_mask(this_cpu))
goto handle_softirq;
handle_softirq_back:
/*
* 'sched_data' is protected by the fact that we can run
* only one process per CPU.
*/
sched_data = & aligned_data[this_cpu].schedule_data;
spin_lock_irq(&runqueue_lock);
/* move an exhausted RR process to be last.. */
if (prev->policy == SCHED_RR)
goto move_rr_last;
move_rr_back:
switch (prev->state) {
case TASK_INTERRUPTIBLE:
if (signal_pending(prev)) {
prev->state = TASK_RUNNING;
break;
}
default:
#ifdef CONFIG_PREEMPT
if (prev->state & TASK_PREEMPTED)
break; 如果是核心搶佔排程,則保留執行佇列
#endif
del_from_runqueue(prev);
#ifdef CONFIG_PREEMPT
case TASK_PREEMPTED:
#endif
case TASK_RUNNING:
}
prev->need_resched = 0;
/*
* this is the scheduler proper:
*/
repeat_schedule:
/*
* Default process to select..
*/
next = idle_task(this_cpu);
c = -1000;
if (task_on_runqueue(prev))
goto still_running;
still_running_back:
list_for_each(tmp, &runqueue_head) {
p = list_entry(tmp, struct task_struct, run_list);
if (can_schedule(p, this_cpu)) {
int weight = goodness(p, this_cpu, prev->active_mm);
if (weight > c)
c = weight, next = p;
}
}
/* Do we need to re-calculate counters? */
if (!c)
goto recalculate;
/*
* from this point on nothing can prevent us from
* switching to the next task, save this fact in
* sched_data.
*/
sched_data->curr = next;
#ifdef CONFIG_SMP
next->has_cpu = 1;
next->processor = this_cpu;
#endif
spin_unlock_irq(&runqueue_lock);
if (prev == next)
goto same_process;
#ifdef CONFIG_SMP
/*
* maintain the per-process 'last schedule' value.
* (this has to be recalculated even if we reschedule to
* the same process) Currently this is only used on SMP,
* and it's approximate, so we do not have to maintain
* it while holding the runqueue spinlock.
*/
sched_data->last_schedule = get_cycles();
/*
* We drop the scheduler lock early (it's a global spinlock),
* thus we have to lock the previous process from getting
* rescheduled during switch_to().
*/
#endif /* CONFIG_SMP */
kstat.context_swtch++;
/*
* there are 3 processes which are affected by a context switch:
*
* prev == .... ==> (last => next)
*
* It's the 'much more previous' 'prev' that is on next's stack,
* but prev is set to (the just run) 'last' process by switch_to().
* This might sound slightly confusing but makes tons of sense.
*/
prepare_to_switch();
{
struct mm_struct *mm = next->mm;
struct mm_struct *oldmm = prev->active_mm;
if (!mm) {
if (next->active_mm) BUG();
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
enter_lazy_tlb(oldmm, next, this_cpu);
} else {
if (next->active_mm != mm) BUG();
switch_mm(oldmm, mm, next, this_cpu);
}
if (!prev->mm) {
prev->active_mm = NULL;
mmdrop(oldmm);
}
}
/*
* This just switches the register state and the
* stack.
*/
switch_to(prev, next, prev);
__schedule_tail(prev);
same_process:
reacquire_kernel_lock(current);
if (current->need_resched)
goto need_resched_back;
#ifdef CONFIG_PREEMPT
ctx_sw_on_no_preempt();
#endif
return;
recalculate:
{
struct task_struct *p;
spin_unlock_irq(&runqueue_lock);
read_lock(&tasklist_lock);
for_each_task(p)
p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice);
read_unlock(&tasklist_lock);
spin_lock_irq(&runqueue_lock);
}
goto repeat_schedule;
still_running:
c = goodness(prev, this_cpu, prev->active_mm);
next = prev;
goto still_running_back;
handle_softirq:
do_softirq();
goto handle_softirq_back;
move_rr_last:
if (!prev->counter) {
prev->counter = NICE_TO_TICKS(prev->nice);
move_last_runqueue(prev);
}
goto move_rr_back;
scheduling_in_interrupt:
printk("Scheduling in interrupt ");
BUG();
return;
}
void schedule_tail(struct task_struct *prev)
{
__schedule_tail(prev);
#ifdef CONFIG_PREEMPT
ctx_sw_on();
#endif
}
來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/10617542/viewspace-958908/,如需轉載,請註明出處,否則將追究法律責任。
相關文章
- 怎樣為linux核心打補丁(轉)Linux
- Torvalds給Linux核心打補丁抵禦病毒(轉)Linux
- Linux 4.1核心熱補丁成功實踐Linux
- 核心補丁熱更新ceph核心模組
- 剛裝的Redhat linux 9.0核心2.4.20,關於補丁的問題(轉)RedhatLinux
- Linux核心即時入侵檢測安全增強-防止緩衝區溢位的核心補丁(轉)Linux
- Linux穩定版核心撤回嚴重影響效能的Spectre補丁Linux
- 微軟重大補丁(轉)微軟
- Linux檔案打補丁Linux
- linux搶佔式排程Linux
- 【補丁】Oracle補丁的知識及術語Oracle
- Rust for Linux 新補丁:為 Linux 核心增加對 Rust 作為第二語言的支援RustLinux
- Oracle的補丁Oracle
- linux自動化建立補丁Linux
- Oracle補丁集的補丁號Patch ID/Number速查Oracle
- 到底打還是不打補丁:安裝第三方補丁(轉)
- oracle 補丁Oracle
- 12. Oracle版本、補丁及升級——12.2. 補丁及補丁集Oracle
- 補丁管理自動化利弊分析(轉)
- 給Oracle資料庫打補丁(轉)Oracle資料庫
- Oracle補丁術語介紹 PSU CPU補丁Oracle
- 怎麼樣安裝AIX 補丁或者補丁集AI
- Oracle的OPatch補丁更新Oracle
- 微軟新補丁與Outlook衝突 CPU佔用率高達99%微軟
- oracle opatch 工具的使用(oracle小補丁安裝)(轉)Oracle
- IEIFRAM漏洞的簡單分析和臨時補丁(轉)
- PHP補丁[LAMP]PHPLAMP
- 軟體補丁
- SAP 補丁升級步驟詳解 (轉)
- 簡單看一下 微軟新出的核心頁表隔離補丁微軟
- 載入ntfs分割槽(透過載入支援ntfs核心補丁的方法)
- Linux下為DB2 9.7打補丁LinuxDB2
- Oracle最新補丁包修101個漏洞(轉)Oracle
- Oracle RAC更新補丁Oracle
- Oracle 安裝補丁Oracle
- Go 的搶佔式排程Go
- 拋棄XP,微軟今起推送Win7/Win8.1核心補丁微軟Win7
- 解決震盪波補丁引起的Oracle不能啟動(轉)Oracle