C++20協程例項:攜程化的IOCP服務端/客戶端

babypapa發表於2021-12-03

VC支援協程已經有一段時間了,之前一直想不明白協程的意義在哪裡,前幾天拉屎的時候突然靈光一閃:

以下是虛擬碼:

task server() {
    for (;;) {
        sock_context s = co_await io.accept();
        for (;;) {
            auto buf = co_await io.recv(s);
            if (!buf.length())
                break;

            std::cout << buf.data() << std::endl;
            int n = co_await io.send(s, "收到!", strlen("收到!") + 1);
        }
        co_await io.close(s);
    }
}

 

如果把IO庫對外的介面做成上面這樣,那豈不是看起來和最簡單的阻塞模型相同的程式碼結構,但它的內在其實是非同步的,用單執行緒相同的程式碼就能支撐一堆連線通訊。

所以才有了接下來的研究(閒出屁才研究的),好在研究出成品了。

最終我也明白協程的意義了:

  協程化的庫越多,C++程式設計師的門檻會越低,做上層開發的程式設計師可以不用知道協程的細節,只要知道如何正確使用庫即可。

好了,真正介紹協程細節的文章有一大堆,不用我來寫,我直接放程式碼,有興趣的可以參考我的實現以及那些細節文章自己做:

#pragma once
#include <WinSock2.h>
#include <MSWSock.h>
#include <ws2tcpip.h>
#pragma comment(lib, "ws2_32.lib")
#include <coroutine>
#include <string>
#include <functional>
#include "logger.hpp"

/**
* 最近花了點時間學習了一下C++20協程,初步改造實現了IOCP協程化的網路IO庫
* 此前基於回撥分發的機制,由於上層協議解析所需的各種上下文,導致這個庫是模板化的,
* 現在有了協程,上層協議上下文已經可以在協程函式中實現,消除了模板化,也變得易於維護了一丟丟。
* 但目前協程還有多少坑是未知的,是好是壞還得再看。
* 使用協程,就意味著,這個庫幾乎完全失去了多執行緒的能力,
* 要維護好一個內部是多執行緒,外皮是協程的IO庫,我承認我沒那個腦子。
* 我個人當前的狀態是不考慮過度設計,只追求上層程式碼優雅簡潔,10幾萬併發對我而言已經滿足了。
* 如果這還不夠用,那就意味著該放棄協程了,協程不是完全沒有損耗的,根據我的測試,協程相比回撥函式分發的方式,有15%左右的效能損耗。
*/
#pragma warning(push)
#pragma warning(disable:4996)
namespace aqx{

    static int init_winsock() {
        WSADATA wd;
        return WSAStartup(MAKEWORD(2, 2), &wd);
    }

    static aqx::log nlog;

#ifndef _nf
#define _nf ((size_t)-1)
#endif
#ifndef __AQX_TIME_HPP
    using clock64_t = long long;
    template<typename period = std::milli>
    clock64_t now() {
        const clock64_t _Freq = _Query_perf_frequency();
        const clock64_t _Ctr = _Query_perf_counter();
        const clock64_t _Whole = (_Ctr / _Freq) * period::den;
        const clock64_t _Part = (_Ctr % _Freq) * period::den / _Freq;
        return _Whole + _Part;
    }
#endif

    /** 
    * 操作碼與狀態碼定義
    */
    struct net_status {
        static constexpr unsigned int s_accept = 0x01;
        static constexpr unsigned int s_connect = 0x02;
        static constexpr unsigned int s_read = 0x04;
        static constexpr unsigned int s_write = 0x08;
        static constexpr unsigned int s_close = 0x10;

        static constexpr unsigned int t_activated = 0x40;

        static constexpr unsigned int t_acceptor = 0x0100;
        static constexpr unsigned int t_connector = 0x0200;
        static constexpr unsigned int t_await_undo = 0x0400;

        static constexpr unsigned int t_await_accept = 0x010000;
        static constexpr unsigned int t_await_connect = 0x020000;
        static constexpr unsigned int t_await_read = 0x040000;
        static constexpr unsigned int t_await_write = 0x080000;
        static constexpr unsigned int t_await_close = 0x100000;
        static constexpr unsigned int t_await = 0xFF0000;
    };

    /** net_base 主要負責銜接作業系統
    * 不考慮過度設計,寫得比較辣雞,能用就行。
    */
    class net_base {
    public:
        net_base() {
            fd = INVALID_SOCKET;
            hIocp = NULL;
            AcceptEx = NULL;
            ConnectEx = NULL;
            DisconnectEx = NULL;
            StreamCapacity = 1440;
            Timeout = 0;
            DataBacklog = 0;
            WorkerThreadId = 0;
        }

        static bool sockaddr_from_string(sockaddr_in& _Addr, const std::string& _Dest) {
            _Addr.sin_addr.S_un.S_addr = INADDR_NONE;

            size_t pos = _Dest.find(":");
            if(pos == _nf) {
                nlog("%s->錯誤的目標地址:(%s)\n", __FUNCTION__, _Dest.data());
                return false;
            }

            auto strip = _Dest.substr(0, pos);
            auto strport = _Dest.substr(pos + 1);
            strport.erase(strport.find_last_not_of("\r\n\t ") + 1);
            strport.erase(0, strport.find_first_not_of("\r\n\t "));
            unsigned short port = (unsigned short)atoi(strport.c_str());
            if (!port) {
                nlog("%s->目標埠號錯誤:(%s)\n", __FUNCTION__, _Dest.data());
                return false;
            }
            
            strip.erase(strip.find_last_not_of("\r\n\t ") + 1);
            strip.erase(0, strip.find_first_not_of("\r\n\t "));
            auto it = std::find_if(strip.begin(), strip.end(), [](char c)->bool {
                return ((c < '0' || c > '9') && (c != '.'));
                });
            _Addr.sin_family = AF_INET;
            _Addr.sin_port = htons(port);
            if (it != strip.end()) {
                hostent* host = gethostbyname(strip.c_str());
                if (!host) {
                    nlog("%s->錯誤的目標域名:(%s)\n", __FUNCTION__, _Dest.data());
                    return false;
                }
                _Addr.sin_addr = *(in_addr*)(host->h_addr_list[0]);
            }
            else {
                _Addr.sin_addr.S_un.S_addr = inet_addr(strip.c_str());
            }

            if (_Addr.sin_addr.S_un.S_addr == INADDR_NONE) {
                nlog("%s->錯誤的目標地址:(%s)\n", __FUNCTION__, _Dest.data());
                return false;
            }
            return true;
        }

        static void sockaddr_any(sockaddr_in& _Addr, unsigned short _Port) {
            _Addr.sin_family = AF_INET;
            _Addr.sin_port = htons(_Port);
            _Addr.sin_addr.S_un.S_addr = INADDR_ANY;
        }

        static void sockaddr_local(sockaddr_in& _Addr, unsigned short _Port) {
            _Addr.sin_family = AF_INET;
            _Addr.sin_port = htons(_Port);
            _Addr.sin_addr.S_un.S_addr = INADDR_LOOPBACK;
        }

        static void* getmswsfunc(SOCKET s, GUID guid) {
            DWORD dwBytes;
            void* lpResult = nullptr;
            WSAIoctl(s, SIO_GET_EXTENSION_FUNCTION_POINTER, &guid,
                sizeof(guid), &lpResult, sizeof(lpResult), &dwBytes, NULL, NULL);
            return lpResult;
        }

        static std::string sockaddr_to_string(const sockaddr_in &_Addr) {
            char buf[256];
            sprintf(buf, "%d.%d.%d.%d:%d", _Addr.sin_addr.S_un.S_un_b.s_b1,
                _Addr.sin_addr.S_un.S_un_b.s_b2,
                _Addr.sin_addr.S_un.S_un_b.s_b3,
                _Addr.sin_addr.S_un.S_un_b.s_b4,
                htons(_Addr.sin_port));
            std::string _Result = buf;
            return _Result;
        }

    private:
        int init(int _StreamCapacity, int _DataBacklog, int _Timeout) {
            if (fd != INVALID_SOCKET) {
                return 0;
            }
            auto reterr = [this](int n) {
                if (fd != INVALID_SOCKET) {
                    closesocket(fd);
                    fd = INVALID_SOCKET;
                }
                return n;
            };
            StreamCapacity = _StreamCapacity;
            Timeout = _Timeout;
            DataBacklog = _DataBacklog;
            fd = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0, WSA_FLAG_OVERLAPPED);
            if (fd == INVALID_SOCKET) {
                nlog("%s->建立套接字失敗:%d", __FUNCTION__, WSAGetLastError());
                return reterr(-1);
            }
            ConnectEx = (LPFN_CONNECTEX)getmswsfunc(fd, WSAID_CONNECTEX);
            if (!ConnectEx) {
                nlog("%s->獲取 ConnectEx 地址失敗,錯誤號:%d", __FUNCTION__, WSAGetLastError());
                return reterr(-2);
            }
            AcceptEx = (LPFN_ACCEPTEX)getmswsfunc(fd, WSAID_ACCEPTEX);
            if (!AcceptEx) {
                nlog("%s->獲取 AcceptEx 函式失敗,錯誤號:%d", __FUNCTION__, WSAGetLastError());
                return reterr(-3);
            }
            
            // 我已經不止一次做過DisconnectEx的測試,最終結論都是DisconnectEx並不能提高併發連線數。
            // DisconnectEx 在想象中會更快是因為用IOCP佇列鎖去換系統全域性鎖帶來了效能提升。
            // 還有一種方法是開一個執行緒搞個表去阻塞呼叫DisconnectEx,完事之後直接AcceptEx,也就最終把全域性核心鎖完全轉嫁成你自己的鎖了。
            // DisconnectEx首先是不同的作業系統行為不一致,真正保險的做法只能在對方關閉連線時,呼叫DisconnectEx來複用。
            // 對於IOCP來說,也就是在WSARecv或者WSASend 從 GetQueuedCompletionStatus 返回之後,第2個引數transferred == 0時
            // 同時它受到TCP TIME_WAIT狀態的影響
            // 系統存在大量TIME_WAIT套接字時,最終得到的效果是,用了更多記憶體,去換來了更少的併發連線數。

            /*DisconnectEx = (LPFN_DISCONNECTEX)getmswsfunc(fd, WSAID_DISCONNECTEX);
            if (!DisconnectEx) {
                nlog("%s->獲取 DisconnectEx 函式失敗,錯誤號:%d", __FUNCTION__, WSAGetLastError());
                return reterr(-4);
            }*/

            hIocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0);
            if (!hIocp) {
                nlog("%s->建立完成埠失敗,錯誤號:%d", __FUNCTION__, GetLastError());
                return reterr(-5);
            }
            CreateIoCompletionPort((HANDLE)fd, hIocp, 0, 0);
            return 0;
        }

        void close() {
            if (fd != INVALID_SOCKET) {
                closesocket(fd);
                fd = INVALID_SOCKET;
            }

            if (hIocp) {
                CloseHandle(hIocp);
                hIocp = NULL;
            }
        }

        BOOL Accept(SOCKET s, char* _Data, LPOVERLAPPED _Overlapped) {
            DWORD _Received = 0;
            return AcceptEx(fd, s, _Data, 0, sizeof(SOCKADDR_IN) << 1, sizeof(SOCKADDR_IN) << 1, &_Received, _Overlapped);
        }

        BOOL Connect(SOCKET s, sockaddr* _Addr, int _AddrLen, LPOVERLAPPED _Overlapped) {
            DWORD _Sent = 0;
            return ConnectEx(s, _Addr, _AddrLen, nullptr, 0, &_Sent, _Overlapped);
        }

        BOOL Disconnect(SOCKET s, LPOVERLAPPED _Overlapped) {
            return DisconnectEx(s, _Overlapped, TF_REUSE_SOCKET, 0);
        }

    private:
        friend class sock;
        friend class netio;
        friend class coio;
        SOCKET fd;
        HANDLE hIocp;
        LPFN_ACCEPTEX AcceptEx;
        LPFN_CONNECTEX ConnectEx;
        LPFN_DISCONNECTEX DisconnectEx;
        int StreamCapacity;
        int Timeout;
        int DataBacklog;
        DWORD WorkerThreadId;
    };

    /*直接繼承一個std::string來作為套接字的各種緩衝區*/
    class sock_buffer : public std::string {
    public:
        using _Basetype = std::string;
        using _Basetype::_Basetype;
        void preset_length(size_t _Length) {
            // 直接在二進位制層面去搞VC的std::string結構,修改std::string::length()的返回值
            // 這麼做的好處是,免去了std::string::resize()的拷貝問題。
            // 注意這段程式碼僅適用於VC,G++的std::string結構和VC不一樣。
            struct __stlstr {
                const char str[0x10];
                size_t len;
            };
            if (this->capacity() < _Length)
                this->reserve(_Length);
            ((__stlstr*)this)->len = _Length;
        }
    };

    /**
    * 協程task
    */
    template<typename _Ty>
    struct net_task_t {
        struct promise_type;
        using _Hty = std::coroutine_handle<promise_type>;
        struct promise_type {
            net_task_t get_return_object() { return { _Hty::from_promise(*this) }; }
            // initial_suspend 裡返回return std::suspend_always{};表示協程初始化成功之後就掛起
            // 這裡就掛起,是為了給set_sock留出操作的時間,否則一個空函式協程,會在建立完之後直接就銷燬。
            auto initial_suspend() { return std::suspend_always{}; }

            auto final_suspend() noexcept { 
                s->on_destroy_coroutine(); 
                return std::suspend_never{}; 
            }
            void unhandled_exception() { std::terminate(); }
            void return_void() { }
            _Ty* s = nullptr;
        };
        _Hty _Handle;
        void resume() { _Handle.resume(); }
        void destroy() { _Handle.destroy(); }
        void set_sock(_Ty* _s) { _Handle.promise().s = _s; }
    };

    /**套接字上下文*/
    class sock {
        // 這是擴充套件OVERLAPPED結構
        struct binding {
            OVERLAPPED ol;
            int opt;
            sock* s;
        };

        /**
        * 返回給協程recv的物件型別
        */
        class sock_data {
            sock_data(sock* _s) : s(_s) {}
        public:
            char* data() { return s->ibuf.data(); }
            void erase(size_t _Count) { s->ibuf.erase(0, _Count); }
            size_t length() { return s->ibuf.length(); }
            void clear() { s->ibuf.clear(); }

        private:
            friend class sock;
            sock* s;
        };

        /**返回給協程connect和accept的物件型別
        * 用於非同步send與close,
        * 其他執行緒也可以利用這個物件通訊,已經處理了執行緒安全問題, safe_send
        */
        class asyncsock {
        public:
            /**
            * send 是未加鎖的傳送資料
            * 沒有多執行緒需求時,send是安全的
            */
            int send(void* data, int len) {
                return s->send(data, len);
            }

            int send(const void* data, int len) {
                return s->send(data, len);
            }

            /**
            * 存在多執行緒非同步寫需求時,就應該所有的寫操作全部用safe_send
            * 
            */
            int safe_send(void* data, int len) {
                return s->safe_send(data, len);
            }

            int safe_send(const void* data, int len) {
                return s->safe_send(data, len);
            }

            void close() {
                s->close();
            }

            bool isactivated() { return s->isactivated(); }

            operator bool() {
                return (s != nullptr);
            }

            sockaddr_in& getsockaddr() {
                return s->getsockaddr();
            }

        private:
            friend class netio;
            friend class coio;
            friend class sock;
            sock* s = nullptr;
        };

        struct recv_awaitable {
            recv_awaitable(sock* s) : data(s) { }

            __declspec(noinline) bool await_ready() {
                // 我當前的vs版本是: vs 2022 17.0.1
                // 這裡發現一個編譯bug,只要await_ready與await_suspend同時被inline優化
                // 會編譯成如下這樣的彙編:
                /*
                * 這段反彙編處於協程函式內部,在刪除__declspec(noinline)之後大致就是如下的樣子。
00007FF69D8A60F8  call        aqx::coio::recv (07FF69D8A2C70h)  

// 這裡直接用rbx儲存了recv_awaitable物件,它是被優化到暫存器的
00007FF69D8A60FD  mov         rbx,rax  
// 但是在從流程態resume()的地方跳回來協程態之後,發現它並沒有維護好rbx暫存器
// 最後導致rbx = __coro_frame_ptr.__resume_address

00007FF69D8A6100  mov         rax,qword ptr [rax]  
00007FF69D8A6103  test        dword ptr [rax+20h],400h  
00007FF69D8A610A  je          `main'::`2'::<lambda_1>$_ResumeCoro$1::operator()+448h (07FF69D8A6318h)  
00007FF69D8A6110  lea         rcx,[rax+0B8h]  
00007FF69D8A6117  cmp         qword ptr [rax+0D0h],10h  
00007FF69D8A611F  jb          `main'::`2'::<lambda_1>$_ResumeCoro$1::operator()+258h (07FF69D8A6128h)  
00007FF69D8A6121  mov         rcx,qword ptr [rax+0B8h]  
00007FF69D8A6128  mov         qword ptr [rax+0C8h],r14  
00007FF69D8A612F  mov         byte ptr [rcx],0  
00007FF69D8A6132  mov         rax,qword ptr [rbx]  
00007FF69D8A6135  and         dword ptr [rax+20h],0FFFFFBFFh  
00007FF69D8A613C  mov         r8,qword ptr [rbx]  
00007FF69D8A613F  mov         rdx,rbx  
                */
                // 我目前尚未完全搞清楚導致這個問題的機制(還沒有真正閒出屁去幫微軟找bug)。
                // 所以我沒上報這個問題。
                // 有興趣的同學可以仔細跟蹤一下。
                // 我目前的緩解措施是,讓await_ready強制noinline,也總比在await_suspend裡面resume()強。

                if (data.s->st & net_status::t_await_undo) {
                    data.s->ibuf.clear();
                    data.s->st &= (~net_status::t_await_undo);
                    return true;
                }
                return false;
            }

            
            void await_suspend(std::coroutine_handle<> handle) { }
            sock_data await_resume() const { 
                return data; 
            }
            sock_data data;
        };

        struct sock_awaitable {
            sock_awaitable(sock* _s) { s.s = _s; }
            __declspec(noinline) bool await_ready() {
                if (s.s->st & net_status::t_await_undo) {
                    s.s->st &= (~net_status::t_await_undo);
                    return true;
                }
                return false;
            }
            void await_suspend(std::coroutine_handle<> handle) { }
            sock::asyncsock await_resume() { return s; }
            sock::asyncsock s;
        };

        struct close_awaitable {
            close_awaitable(bool _IsSuspend) : IsSuspend(_IsSuspend) { }
            __declspec(noinline) bool await_ready() { return (IsSuspend == false); }
            void await_suspend(std::coroutine_handle<> handle) { }
            void await_resume() { }
            bool IsSuspend;
        };

        struct send_awaitable {
            send_awaitable(sock* _s) : s(_s) {}
            __declspec(noinline) bool await_ready() {
                if (s->st & net_status::t_await_undo) {
                    s->st &= (~net_status::t_await_undo);
                    return true;
                }
                return false;
            }
            void await_suspend(std::coroutine_handle<> handle) { }
            int await_resume() { return s->syncsendlen; }
            sock* s;
        };

        struct safe_buffer {
            sock* s = nullptr;
            sock_buffer buf;
        };

    public:
        using opcode = net_status;
        sock(net_base* _v) {
            fd = INVALID_SOCKET;
            v = _v;
            st = 0;
            memset(&input.ol, 0, sizeof(input.ol));
            memset(&output.ol, 0, sizeof(output.ol));
            
            if (v->Timeout)
                output.ol.hEvent = input.ol.hEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
            else
                output.ol.hEvent = input.ol.hEvent = NULL;
            output.s = input.s = this;
            output.opt = opcode::s_write;
            ibuf.reserve(v->StreamCapacity);
            obuf.reserve(v->StreamCapacity);
        }

        ~sock() {
            close();
            if (!output.ol.hEvent)
                return;
            CloseHandle(output.ol.hEvent);
            output.ol.hEvent = output.ol.hEvent = NULL;
            if (st & opcode::t_await) 
                co.destroy();
        }

        void on_destroy_coroutine() {
            st &= (~opcode::t_connector);
        }

        bool isactivated() {
            return ((st & opcode::t_activated) != 0);
        }

        int send(void* data, int len) {
            if (!len)
                return len;
            int n = (int)(obuf.capacity() - obuf.length());
            if (n >= len) {
                obuf.append((char*)data, len);
            }
            else {
                if (v->DataBacklog != 0 && obacklog.length() + len > v->DataBacklog) {
                    //積壓值超過限制
                    close();
                    return -1;
                }
                obacklog.append((char*)data, len);
            }
            return (write() == 0) ? len : -1;
        }

        int send(const void* data, int len) {
            return send((void*)data, len);
        }

        int safe_send(void* data, int len) {
            std::lock_guard<std::mutex> lg(mtx);
            return send(data, len);
        }

        int safe_send(const void* data, int len) {
            std::lock_guard<std::mutex> lg(mtx);
            return send(data, len);
        }

        void close() {
            if (INVALID_SOCKET == fd)
                return;
            closesocket(fd);
            fd = INVALID_SOCKET;
            st &= ~opcode::t_activated;
            st |= opcode::s_close;
            set_timer(false);
            ibuf.clear();
            if (obacklog.capacity() <= 0x0F)
                return;
            sock_buffer tmp;
            obacklog.swap(tmp);
        }

        sockaddr_in& getsockaddr() { return sa; }

    private:
        int initfd() {
            if (INVALID_SOCKET != fd) {
                
                return 0;
            }
                
            fd = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0, WSA_FLAG_OVERLAPPED);
            if (INVALID_SOCKET == fd) {
                nlog("%s->建立套接字失敗,錯誤號:%d", __FUNCTION__, WSAGetLastError());
                return -1;
            }
            LINGER linger = { 1, 0 };
            setsockopt(fd, SOL_SOCKET, SO_LINGER, (char*)&linger, sizeof(linger));
            int b = 1;
            setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char*)&b, sizeof(b));
            CreateIoCompletionPort((HANDLE)fd, v->hIocp, 0, 0);
            return 0;
        }

        int bindlocal() {
            sockaddr_in local;
            local.sin_family = AF_INET;
            local.sin_addr.S_un.S_addr = INADDR_ANY;
            local.sin_port = 0;
            if (SOCKET_ERROR == bind(fd, (LPSOCKADDR)&local, sizeof(local))) {
                nlog("%s->繫結本地埠失敗,錯誤號:%d", __FUNCTION__, WSAGetLastError());
                return -1;
            }
            return 0;
        }

        bool set_dest(const std::string& _Dest) {
            return net_base::sockaddr_from_string(sa, _Dest);
        }

        void set_timer(bool _Enable) {
            if (_Enable) {
                if (hTimer)
                    return;
                RegisterWaitForSingleObject(&hTimer, output.ol.hEvent, [](void* Param, BOOLEAN TimerOrWaitFired) {
                    if (!TimerOrWaitFired)
                        return;
                    sock* p = (sock*)Param;
                    PostQueuedCompletionStatus(p->v->hIocp, 0, (ULONG_PTR)p, nullptr);
                }, this, (ULONG)v->Timeout, WT_EXECUTEDEFAULT);
            }
            else {
                if (!hTimer)
                    return;
                std::ignore = UnregisterWaitEx(hTimer, NULL);
                hTimer = NULL;
            }
        }

        int nat() {
            sockaddr_in _Addr;
            int _AddrLen = sizeof(_Addr);
            if (-1 == getsockname(fd, (sockaddr*)&_Addr, &_AddrLen))
                return -1;
            SOCKET fdNat = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0, WSA_FLAG_OVERLAPPED);
            LINGER linger = { 1, 0 };
            setsockopt(fdNat, SOL_SOCKET, SO_LINGER, (char*)&linger, sizeof(linger));
            CreateIoCompletionPort((HANDLE)fdNat, v->hIocp, 0, 0);
            if (-1 == bind(fdNat, (sockaddr*)&_Addr, sizeof(_Addr))) {
                closesocket(fdNat);
                return -1;
            }
            close();
            fd = fdNat;
            return connect();
        }

        int accept() {
            if (((st & 0xFF) | opcode::s_close) != opcode::s_close) {
                nlog("%s->當前套接字未斷開連線!", __FUNCTION__);
                return -1;
            }

            if (initfd())
                return -1;
            DWORD _Received = 0;
            input.opt = opcode::s_accept;
            st &= (~opcode::s_close);
            st |= opcode::s_accept;
            if (!v->Accept(fd, ibuf.data(), &input.ol)) {
                int _Error = WSAGetLastError();
                if (_Error != ERROR_IO_PENDING) {
                    st &= (~opcode::s_accept);
                    nlog("%s->AcceptEx失敗, 錯誤號:", __FUNCTION__, WSAGetLastError());
                    return -1;
                }
            }
            return 0;
        }

        int connect() {
            if (((st & 0xFF) | opcode::s_close) != opcode::s_close) {
                nlog("%s->當前套接字未斷開連線!", __FUNCTION__);
                return -1;
            }
            if (INVALID_SOCKET == fd) {
                if (initfd())
                    return -1;
                if (bindlocal())
                    return -1;
            }
            input.opt = opcode::s_connect;
            st &= (~opcode::s_close);
            st |= opcode::s_connect;

            if (!v->Connect(fd, (sockaddr*)&sa, sizeof(sa), &input.ol)) {
                int _Error = WSAGetLastError();
                if (_Error != ERROR_IO_PENDING) {
                    nlog("%s->ConnectEx失敗, 錯誤號:", __FUNCTION__, WSAGetLastError());
                    return -1;
                }
            }
            return 0;
        }

        int write() {
            if (!(st & opcode::t_activated)) {
                return -1;
            }
            if (st & (opcode::s_write | opcode::s_close | opcode::s_accept | opcode::s_connect))
                return 0;
            if (obacklog.size()) {
                size_t rl = obuf.capacity() - obuf.length();
                if (rl > obacklog.length())
                    rl = obacklog.length();
                if (rl) {
                    obuf.append(obacklog.data(), rl);
                    obacklog.erase(0, rl);
                }
            }
            WSABUF buf = { (ULONG)(obuf.length()), obuf.data() };
            if (!buf.len)
                return 0;
            st |= opcode::s_write;
            DWORD _Sent = 0;
            if (SOCKET_ERROR == WSASend(fd, &buf, 1, &_Sent, 0, &(output.ol), NULL)) {
                int _Error = WSAGetLastError();
                if (WSA_IO_PENDING != _Error) {
                    st &= (~opcode::s_write);
                    return -1;
                }
            }
            return 0;
        }

        int read() {
            if (!(st & opcode::t_activated)) {
                return -1;
            }
            if (st & (opcode::s_read | opcode::s_close | opcode::s_accept | opcode::s_connect))
                return 0;
            WSABUF buf = {
                (ULONG)(ibuf.capacity() - ibuf.length()),
                ibuf.data() + ibuf.length()
            };
            if ((int)buf.len <= 0) {
                return -1;
            }
            DWORD _Received = 0;
            DWORD _Flags = 0;
            st |= opcode::s_read;
            input.opt = opcode::s_read;
            if (SOCKET_ERROR == WSARecv(fd, &buf, 1, &_Received, &_Flags, &(input.ol), NULL)) {
                int _Error = WSAGetLastError();
                if (WSA_IO_PENDING != _Error) {
                    st &= ~(opcode::s_read);
                    return -1;
                }
            }
            return 0;
        }

    private:
        friend class coio;
        friend class netio;
        SOCKET fd;
        sockaddr_in sa;
        net_base* v;
        int st;
        binding input, output, reuse_sock;
        sock_buffer ibuf, obuf, obacklog;
        HANDLE hTimer;
        aqx::clock64_t rtime;
        net_task_t<sock> co;
        int syncsendlen;
        std::mutex mtx;
    };


    // coio是傳參給協程函式的操作物件
    class coio {
        coio(sock* _s) : s(_s) {}

    public:
        using asyncsock = sock::asyncsock;
        using sock_awaitable = sock::sock_awaitable;
        using close_awaitable = sock::close_awaitable;
        using send_awaitable = sock::send_awaitable;
        using recv_awaitable = sock::recv_awaitable;

        struct nat_awaitable {
            nat_awaitable(bool _ret) : ret(_ret) {  }
            __declspec(noinline) bool await_ready() { return (ret == false); }
            void await_suspend(std::coroutine_handle<> handle) { }
            bool await_resume() { return ret; }
            bool ret;
        };

        coio() : s(nullptr) {}

        sock_awaitable connect(const std::string& _Dest) {
            if (!s->set_dest(_Dest)) {
                // 設定目標地址失敗時,撤銷等待。
                s->st |= net_status::t_await_undo;
                return sock_awaitable(s);
            }

            // 一個套接字的首次connect操作基本都是由其他執行緒引發的
            // 而且很可能在await_suspend之前,IOCP佇列就已經完成
            if (GetCurrentThreadId() == s->v->WorkerThreadId) {
                if (s->connect()) {
                    // 連線失敗時,撤銷等待。
                    s->st |= net_status::t_await_undo;
                    return sock_awaitable(s);
                }
            }
            else {
                // 因此,不是IOCP佇列執行緒引發的connect就傳送到IOCP佇列去處理
                PostQueuedCompletionStatus(s->v->hIocp, net_status::s_connect, (ULONG_PTR)s, 0);
            }

            s->st |= net_status::t_await_connect;
            return sock_awaitable(s);
        }

        sock_awaitable accept() {
            // 首次accept雖然也是其他執行緒呼叫的(一般是main執行緒)
            // 但首次accept時,IOCP工作執行緒尚未啟動,因此可以無視掉connect的那個問題。
            s->st |= ((!s->accept()) ? net_status::t_await_accept : net_status::t_await_undo);
            return sock_awaitable(s);
        }

        /**
        * 以下幾個成員函式中的引數asyncsock _s應該等同於私有成員s,除非強行在外部使用syncio物件
        * 使用引數而不是私有成員的原因是防止在尚未連線前呼叫IO操作。
        * 私有成員s將專用於accept與connect
        */
        close_awaitable close(asyncsock _s) {
            _s.s->close();
            if ((_s.s->st & 0xFF) == net_status::s_close) {
                // 如果套接字上已經沒有任何IO事件,就讓awaitable直接喚醒協程
                // 通常這才是正常狀態,但如果有其他執行緒非同步send時,可能就會有未決IO存在了。
                return close_awaitable(false);
            }
            _s.s->st |= net_status::t_await_close;
            return close_awaitable(true);
        }

        send_awaitable send(asyncsock _s, void *buf, int len) {
            _s.s->syncsendlen = _s.send(buf, len);
            _s.s->st |= ((_s.s->syncsendlen >= 0) ? net_status::t_await_write : net_status::t_await_undo);
            return sock::send_awaitable(_s.s);
        }

        send_awaitable send(asyncsock _s, const void* buf, int len) {
            _s.s->syncsendlen = _s.send(buf, len);
            _s.s->st |= ((_s.s->syncsendlen >= 0) ? net_status::t_await_write : net_status::t_await_undo);
            return sock::send_awaitable(_s.s);
        }
        
        send_awaitable safe_send(asyncsock _s, void* buf, int len) {
            _s.s->syncsendlen = _s.safe_send(buf, len);
            _s.s->st |= ((_s.s->syncsendlen >= 0) ? net_status::t_await_write : net_status::t_await_undo);
            return sock::send_awaitable(_s.s);
        }

        send_awaitable safe_send(asyncsock _s, const void* buf, int len) {
            _s.s->syncsendlen = _s.safe_send(buf, len);
            _s.s->st |= ((_s.s->syncsendlen >= 0) ? net_status::t_await_write : net_status::t_await_undo);
            return sock::send_awaitable(_s.s);
        }

        recv_awaitable recv(asyncsock _s) {
            int n = _s.s->read();
            if (n < 0) {
                _s.s->st |= net_status::t_await_undo;
            }
            else {
                _s.s->st |= net_status::t_await_read;
            }
            return recv_awaitable(_s.s);
        }

        nat_awaitable nat(asyncsock _s, const std::string& _Dest) {
            if ((_s.s->st & 0xFF) != net_status::t_activated) {
                // nat之前必須保證所有未決IO都已經返回,與打洞伺服器保持正常連線狀態,否則就是失敗。
                // 到這裡失敗時,依舊與打洞伺服器保持著正常連線。
                return nat_awaitable(false);
            }

            sockaddr_in sa = _s.s->sa;
            if (!_s.s->set_dest(_Dest)) {
                // 設定目標地址失敗
                // 到這裡失敗時,依舊與打洞伺服器保持著正常連線。
                _s.s->sa = sa;
                return nat_awaitable(false);
            }

            if (_s.s->nat()) {
                // 到這一步失敗時,與打洞伺服器的連線就有可能會斷掉
                // nat失敗時,本就應該直接close(); 
                // 都失敗了,我想不出還要跟打洞伺服器繼續苟合的理由。
                // 如果所有狀態全都對,還失敗,可能就是雙方正好屬於無法穿透的NAT型別環境下。
                // 我對此研究不多,業界內真正懂行的也不多,資料更是少得可憐,我只知道TCP NAT在程式碼上的表現為:
                //     1、與打洞伺服器保持連線的這個套接字設定了SO_REUSEADDR,確保這個套接字繫結的本地埠可複用。
                //          在這個庫裡我全都設定了可複用,但主要目的是為了緩解TIME_WAIT,並不是為了穿透。
                //     2、雙方通過打洞伺服器溝通好各自的遠端地址
                //     3、雙方都建立一個新的套接字,並將該套接字繫結到本地與打洞伺服器進行連線的那個地址(getsockname可以獲得)
                //          到第 3 步處理好之後,與打洞伺服器連線的那個套接字,已經廢了,無法再進行通訊,此時應該把它close掉。
                //     4、最後雙方都connect對方的地址。
                _s.s->sa = sa;
                return nat_awaitable(false);
            }

            s->st |= net_status::t_await_connect;
            return nat_awaitable(true);
        }

        bool valid() {
            return (s != nullptr);
        }

        operator bool () {
            return valid();
        }

    private:
        friend class netio;
        sock* s;
    };

    /**
    * 可以簡單把netio看成是一個容器的作用
    * 它主要用於對接net_base,建立執行緒,處理IO事件。
    */
    class netio {
        struct IOCP_STATUS {
            DWORD transferred;
            SIZE_T key;
            typename sock::binding* pb;
            BOOL ok;
        };

    public:
        /**listener 只是一種簡單的引數包裝,只是為了方便構造而已
        * 構造引數:
        * _Dest 要監聽的地址和埠,格式為:"a.b.c.d:port"
        * _ListenBacklog 系統函式listen的第2個引數
        * _MaxClients 最多同時接受的客戶端數量
        */
        class listener {
        public:
            listener() {
                max_clients = 0;
                listen_backlog = 0;
                addr.sin_addr.S_un.S_addr = INADDR_NONE;
            }

            listener(const std::string& _Dest, int _ListenBacklog, size_t _MaxClients) {
                max_clients = _MaxClients;
                listen_backlog = _ListenBacklog;
                net_base::sockaddr_from_string(addr, _Dest);
            }

        private:
            friend class netio;
            sockaddr_in addr;
            int listen_backlog;
            size_t max_clients;
        };

        using asyncsock = sock::asyncsock;
        using opcode = net_status;
        using task = net_task_t<sock>;

        int init(int _StreamCapacity = 1440, int _DataBacklog = 0, int _Timeout = 0) {
            std::lock_guard<std::mutex> lg(mtx);
            return nwb.init(_StreamCapacity, _DataBacklog, _Timeout);
        }

        int server(const std::function<task(coio)> &_func, const listener &param) {
            std::lock_guard<std::mutex> lg(mtx);
            if (thd.joinable()) {
                nlog("%s->netio已啟動, 請勿重複呼叫!", __FUNCTION__);
                return 0;
            }

            if (nwb.fd == INVALID_SOCKET)
                return -1;

            cofunc = _func;
            if (param.addr.sin_addr.S_un.S_addr != INADDR_NONE) {
                if (SOCKET_ERROR == bind(nwb.fd, (SOCKADDR*)&param.addr, sizeof(SOCKADDR))) {
                    nlog("%s->繫結埠失敗,錯誤號:%d", __FUNCTION__, WSAGetLastError());
                    nwb.close();
                    return -1;
                }

                if (SOCKET_ERROR == ::listen(nwb.fd, param.listen_backlog)) {
                    nlog("%s->監聽失敗,錯誤號:%d", __FUNCTION__, WSAGetLastError());
                    nwb.close();
                    return -1;
                }

                for (int i = 0; i < param.max_clients; i++) {
                    sock* psock = new sock(&nwb);
                    a_list.push_back(psock);
                    psock->st |= opcode::t_acceptor;
                    psock->co = cofunc(coio(psock));
                    psock->co.set_sock(psock);
                    psock->co.resume();
                }
            }
            __start();
            return 0;
        }

        // client是一次性的,專用於客戶端
        // 讓它返回asyncsock物件的理由是為了給指令碼語言預留的
        // 例如可以使用lua去實現類似node.js的那種connect之後不管連沒連上就先得到物件去繫結事件的機制。
        asyncsock client(const std::function<task(coio)>& _func) {
            std::lock_guard<std::mutex> lg(mtx);
            coio io;
            asyncsock ret;
            if (!thd.joinable()) {
                // 如果執行緒未啟動,嘗試啟動執行緒,這之後如果要回收資源,是需要stop和release的
                if (nwb.fd == INVALID_SOCKET)
                    return ret;
                __start();
            }
            io.s = get_connector();
            ret.s = io.s;
            io.s->co = _func(io);
            io.s->co.set_sock(io.s);
            io.s->co.resume();
            return ret;
        }

        void stop() {
            std::lock_guard<std::mutex> lg(mtx);
            if (thd.joinable()) {
                PostQueuedCompletionStatus(nwb.hIocp, -1, 0, 0);
                thd.join();
            }
        }

        void release() {
            std::lock_guard<std::mutex> lg(mtx);
            if (thd.joinable()) {
                nlog("%s->nio正在執行,請先stop", __FUNCTION__);
                return;
            }
            for (auto p : a_list) {
                if (p->st & opcode::t_await)
                    p->co.destroy();
                delete p;
            }
            a_list.clear();

            for (auto p : c_list) {
                if (p->st & opcode::t_await)
                    p->co.destroy();
                delete p;
            }
            c_list.clear();
            nwb.close();
        }

    private:
        sock* get_connector() {
            sock* psock = nullptr;
            
            for (auto v : c_list) {
                if ((v->st & opcode::t_connector) == 0 && ((v->st & 0xFF)| opcode::s_close) == opcode::s_close) {
                    psock = v;
                    break;
                }
            }

            if (!psock) {
                psock = new sock(&nwb);
                c_list.push_back(psock);
            }

            psock->st |= opcode::t_connector;
            return psock;
        }

        void on_connect(sock& s) {
            s.ibuf.clear();
            s.obuf.clear();
            s.obacklog.clear();
            s.rtime = aqx::now() + nwb.Timeout;
            if (nwb.Timeout != 0)
                s.set_timer(true);
            s.st |= opcode::t_activated;
        }
        
        void on_accept(sock &s) {
            // 懶得去呼叫GetAcceptExSockAddrs,有硬編碼可用
#ifndef _WIN64
            s.sa = *(sockaddr_in*)(s.ibuf.data() + 0x26);
#else
            s.sa = *(sockaddr_in*)(s.ibuf.data() + 0x20);
#endif
            on_connect(s);
        }
        
        bool on_resume(sock& s) {
            if (s.st & opcode::t_await) {
                // 清除所有協程等待標誌
                s.st &= (~opcode::t_await);

                // 喚醒協程
                s.co.resume();
                return true;
            }
            return false;
        }

        void on_close(sock& s) {
            if ((s.st & 0xFF) == opcode::s_close) {
                s.st &= ~opcode::s_close;
                on_resume(s);
            }
        }

        bool error_resume(sock &s) {
            int st = s.st & opcode::t_await;
            switch (st) {
            case opcode::t_await_accept:
            case opcode::t_await_connect:
            case opcode::t_await_close:
                s.st &= (~opcode::t_await);
                s.co.resume();
                return true;
            case opcode::t_await_read:
                s.ibuf.clear();
                s.st &= (~opcode::t_await);
                s.co.resume();
                return true;
            case opcode::t_await_write:
                s.syncsendlen = -1;
                s.st &= (~opcode::t_await);
                s.co.resume();
                return true;
            default:
                break;
            }
            return false;
        }

        void on_reset(sock &s) {
            if ((s.st & 0xFF) == opcode::s_close) {
                s.st &= ~opcode::s_close;
                if (s.st & opcode::t_acceptor) {
                    // 如果服務端協程不在一個迴圈裡,協程返回自動銷燬後就會這樣
                    // 此時的挽救措施就是建立一個新的協程
                    s.co = cofunc(coio(&s));
                }
            }
        }

        void on_completion(IOCP_STATUS& st) {
            sock& s = *(st.pb->s);
            int op = st.pb->opt;
            s.st &= (~op);
            if (s.st & opcode::s_close) 
                op = 0;
            //nlog("on_completion:%I64X, %d", &s, op);
            switch (op) {
            case 0:
                break;
            case opcode::s_accept:
                on_accept(s);
                break;
            case opcode::s_connect:
                if (!st.ok && WSAGetLastError() == 1225) {
                    // 出現這種錯誤,一般是由於服務端沒有在監聽指定埠,直接被作業系統拒絕了。
                    op = 0;
                    break;
                }
                on_connect(s);
                break;
            case opcode::s_read:
                if (!st.transferred) {
                    op = 0;
                    break;
                }
                s.ibuf.preset_length(s.ibuf.length() + st.transferred);
                break;
            case opcode::s_write:
                if (!st.transferred) {
                    op = 0;
                    break;
                }

                s.obuf.erase(0, st.transferred);
                if (s.obuf.length() || s.obacklog.length()) {
                    if (s.write()) {
                        op = 0;
                        break;
                    }
                }
                // write操作可能是非協程發起的,協程很可能掛起在recv,因此需要判斷一下。
                if (!(s.st & opcode::t_await_write))
                    return;
                break;
            }
            
            //nlog("on_completion2:%I64X, %d", &s, op);
            if (!op) {
                if (error_resume(s))
                    return;
                // 只有當協程被銷燬時,error_resume才會返回false
                s.close();
                on_reset(s);
                return;
            }
            
            on_resume(s);
            if (s.st & opcode::s_close)
                return on_close(s);
        }

        void __start() {
            thd = std::thread([this]() {
                nwb.WorkerThreadId = GetCurrentThreadId();
                srand((unsigned int)aqx::now() + nwb.WorkerThreadId);
                IOCP_STATUS st = { 0,0,0,0 };
                //nlog("netio::worker->I/O工作執行緒 %d 開始!", nwb.WorkerThreadId);
                for (;;) {
                    st.ok = GetQueuedCompletionStatus(nwb.hIocp,
                        &(st.transferred),
                        &(st.key),
                        (OVERLAPPED**)&(st.pb),
                        INFINITE);

                    if (!st.pb) {
                        if (!st.transferred) {
                            sock* psock = (sock*)st.key;
                            if (aqx::now() > psock->rtime && (psock->st & opcode::t_activated)) {
                                psock->close();
                                if (error_resume(*psock))
                                    return;
                                on_reset(*psock);
                            }
                        }
                        else if (st.transferred == opcode::s_connect) {
                            sock* psock = (sock*)st.key;
                            if (psock->connect()) {
                                psock->close();
                                if (error_resume(*psock))
                                    return;
                                on_reset(*psock);
                            }
                        }
                        else if (st.transferred == -1)
                            break;
                        continue;
                    }
                    on_completion(st);
                }
                
                //nlog("netio::worker->I/O工作執行緒 %d 已停止!", nwb.WorkerThreadId);
            });
        }

    private:
        net_base nwb;
        std::list<sock*> a_list;
        std::list<sock*> c_list;
        std::function<task(coio)> cofunc;
        std::thread thd;
        std::mutex mtx;
    };
}

#pragma warning(pop)

 

這個庫我已經去除了各種耦合,除了日誌庫,aqx::log我自己寫的一個簡單的格式化日誌庫:

logger.hpp
#pragma once
#include <iostream>
#include <string>
#include <time.h>
#include <stdarg.h>
#include <mutex>
#include <vector>

//aqx::log不與aqx其他庫耦合
#if defined(_WIN32) || defined(_WIN64)
#ifndef _WINDOWS_
#include <WinSock2.h>
#endif
#define __aqxlog_getpid GetCurrentProcessId
#define __aqxlog_gettid GetCurrentThreadId
#include <io.h>
#else
#if defined(__linux__)
#include <unistd.h>
#include <sys/syscall.h>
#define __aqxlog_getpid getpid
#define __aqxlog_gettid() syscall(__NR_gettid)
#endif
#endif

#pragma warning(push)
#pragma warning(disable:4996)

namespace aqx {

    class log {
    private:
        struct _format_texts {
            std::string time;
            std::string type;
            std::string pid;
            std::string tid;
        };

    public:
        static constexpr auto hs_time{ static_cast<int>(1) };
        static constexpr auto hs_type{ static_cast<int>(2) };
        static constexpr auto hs_pid{ static_cast<int>(4) };
        static constexpr auto hs_tid{ static_cast<int>(8) };

        log() {
            _stdout_fp = stdout;
            fp = stdout;
            _fmtts = { "%Y/%m/%d %H:%M:%S ", "{%s} ",  "[%d] ",  "(%d) " };
            head_style = log::hs_time;
            head_presize = _gethps();
            _EnableInfo = true;
            _EnableError = false;
            _EnableDebug = false;
            _EnableWarn = false;
            _DefType = "info";
            s.reserve(0x1000);
        }

        ~log() {
            if (fp != _stdout_fp)
                fclose(fp);
        }

        void enable(const std::string_view& _Type, bool _Enable) {
            std::lock_guard<std::mutex> lg(_Mtx);
            if (_Type == "info")
                _EnableInfo = _Enable;
            else if (_Type == "error")
                _EnableError = _Enable;
            else if (_Type == "debug")
                _EnableDebug = _Enable;
            else if (_Type == "warn")
                _EnableWarn = _Enable;
        }

        void seths(int hs) {
            std::lock_guard<std::mutex> lg(_Mtx);
            head_style = hs;
            head_presize = _gethps();
        }

        void sethfmt(int _Style, const char* _Fmt) {
            std::lock_guard<std::mutex> lg(_Mtx);
            switch (_Style) {
            case hs_time:
                _fmtts.time = _Fmt;
                break;
            case hs_type:
                _fmtts.type = _Fmt;
                break;
            case hs_pid:
                _fmtts.pid = _Fmt;
                break;
            case hs_tid:
                _fmtts.tid = _Fmt;
                break;
            }
            head_presize = _gethps();
        }

        bool setvfs(const char* _FileName, bool _PutStdout = false) {
            std::lock_guard<std::mutex> lg(_Mtx);
            FILE* _tmp = fopen(_FileName, "ab");
            if (!_tmp)
                return false;
            if (fp != _stdout_fp)
                fclose(fp);
            fp = _tmp;
            PutStdout = _PutStdout;
            return true;
        }

        log& info(const char* _Fmt, ...) {
            std::lock_guard<std::mutex> lg(_Mtx);
            if (!_EnableInfo)
                return *this;
            va_list vl;
            va_start(vl, _Fmt);
            _build("info", _Fmt, vl);
            va_end(vl);
            _putlog();
            return *this;
        }

        log& debug(const char* _Fmt, ...) {
            std::lock_guard<std::mutex> lg(_Mtx);
            if (!_EnableDebug)
                return *this;
            va_list vl;
            va_start(vl, _Fmt);
            _build("info", _Fmt, vl);
            va_end(vl);
            _putlog();
            return *this;
        }

        log& error(const char* _Fmt, ...) {
            std::lock_guard<std::mutex> lg(_Mtx);
            if (!_EnableError)
                return *this;
            va_list vl;
            va_start(vl, _Fmt);
            _build("info", _Fmt, vl);
            va_end(vl);
            _putlog();
            return *this;
        }

        log& warn(const char* _Fmt, ...) {
            std::lock_guard<std::mutex> lg(_Mtx);
            if (!_EnableWarn)
                return *this;
            va_list vl;
            va_start(vl, _Fmt);
            _build("info", _Fmt, vl);
            va_end(vl);
            _putlog();
            return *this;
        }

        log& operator()(const char* _Fmt, ...) {
            std::lock_guard<std::mutex> lg(_Mtx);
            if (!_EnableInfo)
                return *this;
            va_list vl;
            va_start(vl, _Fmt);
            _build(_DefType.c_str(), _Fmt, vl);
            va_end(vl);
            _putlog();
            return *this;
        }

    private:
        void _putlog() {
            fputs(s.data(), fp);
            if (fp != _stdout_fp) {
                //fflush(fp);
                if (PutStdout)
                    fputs(s.data(), _stdout_fp);
            }
        }

        size_t _build(const char* _Type, const char* _Fmt, va_list vl) {
            s.clear();
            size_t n = vsnprintf(nullptr, 0, _Fmt, vl);
            if (n <= 0)
                return _build_head(_Type);
            if (n >= s.capacity()) {
                s.clear();
                s.reserve(n + head_presize);
            }
            size_t _Pos = _build_head(_Type);
            char* p = (char*)s.data();
            _Pos += vsnprintf(p + _Pos, s.capacity(), _Fmt, vl);
            char c = p[_Pos - 1];
#ifdef _WINDOWS_
            if (c != '\r' && c != '\n') {
                p[_Pos++] = '\r';
                p[_Pos++] = '\n';
                p[_Pos] = '\0';
            }

#else
            if (c != '\r' && c != '\n') {
                p[_Pos++] = '\n';
                p[_Pos] = '\0';
            }
#endif

            return _Pos;
        }

        size_t _build_time(size_t _Pos) {
            if (!(head_style & log::hs_time))
                return _Pos;
            time_t t = time(NULL);
            auto _Tm = localtime(&t);
            _Pos += strftime((char*)s.data() + _Pos, head_presize, _fmtts.time.c_str(), _Tm);
            return _Pos;
        }

        size_t _build_type(size_t _Pos, const char* _Type) {
            if (!(head_style & log::hs_type))
                return _Pos;
            _Pos += sprintf((char*)s.data() + _Pos, _fmtts.type.c_str(), _Type);
            return _Pos;
        }

        size_t _build_pid(size_t _Pos) {
            if (!(head_style & log::hs_pid))
                return _Pos;
            auto _Pid = __aqxlog_getpid();
            _Pos += sprintf((char*)s.data() + _Pos, _fmtts.pid.c_str(), _Pid);
            return _Pos;
        }

        size_t _build_tid(size_t _Pos) {
            if (!(head_style & log::hs_tid))
                return _Pos;
            auto _Tid = __aqxlog_gettid();
            _Pos += sprintf((char*)s.data() + _Pos, _fmtts.tid.c_str(), _Tid);
            return _Pos;
        }

        size_t _build_head(const char* _Type) {
            return _build_tid(_build_pid(_build_type(_build_time(0), _Type)));
        }

        size_t _gethps() {
            size_t _Result = 3;
            if (head_style & log::hs_time)
                _Result += ((_fmtts.time.length() << 1) + 30);
            if (head_style & log::hs_type)
                _Result += ((_fmtts.pid.length() << 1) + 12);
            if (head_style & log::hs_pid)
                _Result += ((_fmtts.pid.length() << 1) + 20);
            if (head_style & log::hs_tid)
                _Result += ((_fmtts.pid.length() << 1) + 20);
            return _Result;
        }

    private:
        std::vector<char> s;
        FILE* fp;
        _format_texts _fmtts;
        int head_style;
        size_t head_presize;
        bool PutStdout;
        FILE* _stdout_fp;
        std::mutex _Mtx;
        std::string _DefType;
        bool _EnableInfo;
        bool _EnableDebug;
        bool _EnableError;
        bool _EnableWarn;
    };
}

static aqx::log logger;
#pragma warning(pop)

 

最後是測試程式碼:客戶端和服務端放在一起了,要分離就從nio.init後面的幾個地方分離一下。

// main.cpp
#include <iostream>
#include <aqx/netio.hpp>

int main()
{
    aqx::init_winsock();

    aqx::netio nio;
    nio.init(1440, 0x10000);

    // 一個簡單的echo伺服器例子:

    nio.server([](aqx::coio io)->aqx::netio::task {
        // 服務端始終應該放在一個死迴圈裡,否則兜底邏輯會反覆建立新協程。
        for (;;) {
            // io.accept會返回一個可用於非同步send和close的物件
            auto s = co_await io.accept();
            logger("客戶端連入:%s", aqx::net_base::sockaddr_to_string(s.getsockaddr()));
            for (;;) {
                auto buf = co_await io.recv(s);
                if (!buf.length()) {
                    logger("斷開連線!");
                    break;
                }

                puts(buf.data());
                buf.clear();
                // 非同步傳送,協程不會在這裡掛起
                s.send("收到!", 5);
                
            }
            co_await io.close(s);
            logger("已關閉!");
        }
    }, aqx::netio::listener("0.0.0.0:55554", 100, 100));



    // 我已經懶到讓客戶端和服務端都放在一起了,要分自己分
    auto sock1 = nio.client([](aqx::coio io)->aqx::netio::task {
        // 客戶端只有需要自動重連,才放在迴圈裡處理
        for (;;) {
            auto s = co_await io.connect("127.0.0.1:55554");
            if (!s) {
                co_await io.close(s);
                continue;
            }

            for (;;) {
                auto buf = co_await io.recv(s);
                if (!buf.length()) {
                    break;
                }
                puts(buf.data());
                buf.clear();
            }
            
            co_await io.close(s);
        }
       
    });

    // 我已經懶到讓客戶端和服務端都放在一起了,要分自己分
    auto sock2 = nio.client([](aqx::coio io)->aqx::netio::task {
        // 客戶端只有需要自動重連,才放在迴圈裡處理
        for (;;) {
            auto s = co_await io.connect("127.0.0.1:55554");
            if (!s) {
                co_await io.close(s);
                continue;
            }

            for (;;) {
                auto buf = co_await io.recv(s);
                if (!buf.length()) {
                    break;
                }
                puts(buf.data());
                buf.clear();
            }

            co_await io.close(s);
        }

    });
    
    std::string str;
    for (;;) {
        std::cin >> str;
        if (str == "exit")
            break;

        std::string sd = "sock1:";
        sd += str;
        sock1.safe_send(sd.data(), (int)sd.length() + 1);

        sd = "sock2:";
        sd += str;
        sock2.safe_send(sd.data(), (int)sd.length() + 1);
    }

    nio.stop();
    nio.release();
}

 

相關文章