nginx、uvicorn、gunicorn 這些 HTTP sever 都是 master-slave 架構
你好奇他們是怎麼實現的嗎?
如果你是也是一個好奇寶寶,就接著往下看吧
多程式
使用 multiprocessing.Pool 的實現
import os
import socket
import sys
import time
import threading
from loguru import logger
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures._base import Future
import multiprocessing
default_encoding: str = 'utf-8'
pool = ThreadPoolExecutor(
max_workers=20,
thread_name_prefix='simple-work-thread-pool'
)
def init_serversocket() -> socket.socket:
serversocket = socket.socket(
family=socket.AF_INET,
type=socket.SOCK_STREAM
)
# 獲取本地主機名
host = socket.gethostname()
logger.debug(f'host {host}')
port = 6001
# 繫結埠號
serversocket.bind(('0.0.0.0', port))
# 設定最大連線數,超過後排隊
serversocket.listen(2048)
return serversocket
def send_response(clientsocket: socket.socket, addr: tuple, response_body: bytes) -> int:
send_len: int = clientsocket.send(response_body)
clientsocket.close()
return send_len
def start_request(clientsocket: socket.socket, addr: tuple) -> int:
try:
pid = os.getpid()
logger.debug(f'pid: {pid}, get message from {addr}')
request_body: bytes = clientsocket.recv(2048)
request_text: str = request_body.decode(encoding=default_encoding)
response_text: str = f'server get message: {request_text}'
response_body: bytes = response_text.encode(default_encoding)
# time.sleep(1)
send_len = send_response(
clientsocket=clientsocket, addr=addr, response_body=response_body)
logger.debug(f'傳送了響應')
return send_len
except Exception as error:
logger.exception(error)
def start_request_callback(future: Future) -> None:
send_len: int = future.result()
logger.debug(
f'{threading.current_thread().name}, send payload len is {send_len}')
if __name__ == "__main__":
serversocket = init_serversocket()
pool = multiprocessing.Pool(processes=16)
while True:
clientsocket, addr = serversocket.accept()
clientsocket: socket.socket
addr: tuple
# future: Future = pool.submit(start_request, clientsocket, addr)
# future.add_done_callback(start_request_callback)
pool.apply_async(start_request, (clientsocket, addr))
pool.close()
pool.join()
使用 ProcessPoolExecutor 的實現
import os
import socket
import sys
import time
import threading
from loguru import logger
from concurrent.futures._base import Future
import multiprocessing
from concurrent.futures import ProcessPoolExecutor
default_encoding: str = 'utf-8'
def init_serversocket() -> socket.socket:
serversocket = socket.socket(
family=socket.AF_INET,
type=socket.SOCK_STREAM
)
# 獲取本地主機名
host = socket.gethostname()
logger.debug(f'host {host}')
port = 6001
# 繫結埠號
serversocket.bind(('0.0.0.0', port))
# 設定最大連線數,超過後排隊
serversocket.listen(2048)
return serversocket
def send_response(clientsocket: socket.socket, addr: tuple, response_body: bytes) -> int:
send_len: int = clientsocket.send(response_body)
clientsocket.close()
return send_len
def start_request(clientsocket: socket.socket, addr: tuple) -> int:
try:
pid = os.getpid()
logger.debug(f'pid: {pid}, get message from {addr}')
request_body: bytes = clientsocket.recv(2048)
request_text: str = request_body.decode(encoding=default_encoding)
response_text: str = f'server get message: {request_text}'
response_body: bytes = response_text.encode(default_encoding)
# time.sleep(1)
send_len = send_response(
clientsocket=clientsocket, addr=addr, response_body=response_body)
logger.debug(f'傳送了響應')
return send_len
except Exception as error:
logger.exception(error)
def start_request_callback(future: Future) -> None:
send_len: int = future.result()
logger.debug(
f'{threading.current_thread().name}, send payload len is {send_len}')
if __name__ == "__main__":
serversocket = init_serversocket()
# pool = multiprocessing.Pool(
# processes=16,
# mp_context=multiprocessing.get_context('spawn')
# )
pool = ProcessPoolExecutor(
max_workers=multiprocessing.cpu_count(),
mp_context=multiprocessing.get_context('spawn')
)
while True:
clientsocket, addr = serversocket.accept()
clientsocket: socket.socket
addr: tuple
# future: Future = pool.submit(start_request, clientsocket, addr)
# future.add_done_callback(start_request_callback)
pool.submit(start_request,clientsocket, addr)
# pool.apply_async(start_request, (clientsocket, addr))
pool.close()
pool.join()
上訴兩種實現的潛在問題
問題一:無法完美執行在 mac 平臺
上面兩種方式在 Linux 上都可以工作的良好,但是在 mac 上卻不行
服務端會有很大機率報錯(客戶端請求的時候,隨機出現報錯):
- ConnectionRefusedError: [Errno 61] Connection refused
- concurrent.futures.process.BrokenProcessPool: A child process terminated abruptly, the process pool is not usable anymore
問題二:master 負載太高,容易成為瓶頸,無法實現橫向擴充套件
master 程式都乾冒煙了,slave 都閒著
為什麼呢?因為 slave 乾的事情太少了,而 master 程式負責了 serversocket.accept 等等操作,壓力山大