gpt4 book ai didi

windows - 远程计算机上的传输速度慢

转载 作者:可可西里 更新时间:2023-11-01 02:34:37 25 4
gpt4 key购买 nike

嘿,StackOverflow 的 friend 们!

我正在制作一个 IOCP 服务器,到目前为止我已经解决了大部分问题,但仍然存在一个问题,我不知道从哪里开始研究。当我在我的机器上运行客户端/服务器时,一切都很好。它与 Windows SDK 示例的速度相匹配,可能更快一点,而且肯定使用更少的 CPU 周期。但是,当我从另一台计算机运行客户端时,传输速度上限为 37 KB/s,往返延迟为 200 毫秒(而不是 0)。现在,如果我将客户端连接到 SDK 示例服务器,我就没有那个问题,所以我的代码有问题。据我所知,套接字的初始化方式完全相同,选项也相同。我还在分析器中运行我的服务器以检查瓶颈,但我找不到任何瓶颈。此外,我试过的计算机都连接到同一个千兆交换机(带有千兆适配器)。我知道这有点含糊,但那是因为到目前为止我无法查明问题所在,如果你们中的任何人能指出我正确的方向,我将永远感激不已。

干杯,

-乐声

编辑2:在听从 Mike 的建议后,我对代码做了一些研究,发现当远程客户端连接到服务器时,大部分时间代码都在等待 GetQueuedCompletionStatus。这表明 IO 请求只是需要很长时间才能完成,但我仍然不明白为什么。这仅在客户端位于远程计算机上时才会发生。我认为这与套接字的设置方式或我发布请求的方式有关,但我看不出与示例代码有任何区别。

有什么想法吗?

编辑(添加示例代码):

好吧,就是它了!虽然它并不漂亮!

如果您安装了 Windows SDK,您可以使用 iocpclient 示例 (Program Files\Microsoft SDKs\Windows\v7.1\Samples\netds\winsock\iocp\client) 连接到它并更改它的默认端口73 到 5000。

我在自己尝试时刚刚注意到的奇怪事情是,示例 iocpclient 似乎不会在 37KB/s 问题上导致相同的上限...但是,示例代码的限制似乎设置为大约 800KB/秒。如果有任何帮助,我会发布一个客户端。

#pragma comment(lib, "Ws2_32.lib")

#include <WinSock2.h>
#include <stdio.h>

unsigned int connection = 0;
unsigned int upload = 0;
unsigned int download = 0;

#define IO_CONTEXT_COUNT 5

class NetClientHost
{
friend class gNetProtocolHost;
public:
enum Operation
{
kOperationUnknown,
kOperationRead,
kOperationWrite,
};

struct ClientData
{
SOCKET socket;
};

struct IOContext
{
WSAOVERLAPPED overlapped;
WSABUF wsaReceiveBuf;
WSABUF wsaSendBuf;
char *buf;
char *TESTbuf;
unsigned long bytesReceived;
unsigned long bytesSent;
unsigned long flags;
unsigned int bytesToSendTotal;
unsigned int remainingBytesToSend;
unsigned int chunk;
Operation operation;
};

NetClientHost()
{
memset((void *) &m_clientData, 0, sizeof(m_clientData));
}

NetClientHost::IOContext *NetClientHost::AcquireContext()
{
while (true)
{
for (int i = 0; i < IO_CONTEXT_COUNT; ++i)
{
if (!(m_ioContexts + i)->inUse)
{
InterlockedIncrement(&(m_ioContexts + i)->inUse);
//ResetEvent(*(m_hContextEvents + i));

if ((m_ioContexts + i)->ioContext.TESTbuf == 0)
Sleep(1);

return &(m_ioContexts + i)->ioContext;
}
}
//++g_blockOnPool;
//WaitForMultipleObjects(IO_CONTEXT_COUNT, m_hContextEvents, FALSE, INFINITE);
}
}

const ClientData *NetClientHost::GetClientData() const
{
return &m_clientData;
};

void NetClientHost::Init(unsigned int bufferSize)
{
_InitializeIOContexts(bufferSize ? bufferSize : 1024);
}

void NetClientHost::ReleaseContext(IOContext *ioContext)
{
int i = sizeof(_IOContextData), j = sizeof(IOContext);
_IOContextData *contextData = (_IOContextData *) (((char *) ioContext) - (i - j));
InterlockedDecrement(&contextData->inUse);
//SetEvent(*(m_hContextEvents + contextData->index));
}

struct _IOContextData
{
unsigned int index;
volatile long inUse;
IOContext ioContext;
};

ClientData m_clientData;
_IOContextData *m_ioContexts;
HANDLE *m_hContextEvents;

void _InitializeIOContexts(unsigned int bufferSize)
{
m_ioContexts = new _IOContextData[IO_CONTEXT_COUNT];
m_hContextEvents = new HANDLE[IO_CONTEXT_COUNT];

memset((void *) m_ioContexts, 0, sizeof(_IOContextData) * IO_CONTEXT_COUNT);

for (int i = 0; i < IO_CONTEXT_COUNT; ++i)
{
(m_ioContexts + i)->index = i;

(m_ioContexts + i)->ioContext.buf = new char[bufferSize];
(m_ioContexts + i)->ioContext.wsaReceiveBuf.len = bufferSize;
(m_ioContexts + i)->ioContext.wsaReceiveBuf.buf = (m_ioContexts + i)->ioContext.buf;
(m_ioContexts + i)->ioContext.TESTbuf = new char[10000];
(m_ioContexts + i)->ioContext.wsaSendBuf.buf = (m_ioContexts + i)->ioContext.TESTbuf;

*(m_hContextEvents + i) = CreateEvent(0, TRUE, FALSE, 0);
}
}
void _SetSocket(SOCKET socket)
{
m_clientData.socket = socket;
}
};



bool WriteChunk(const NetClientHost *clientHost, NetClientHost::IOContext *ioContext)
{
int status;

status = WSASend(clientHost->GetClientData()->socket, &ioContext->wsaSendBuf, 1, &ioContext->bytesSent, ioContext->flags, &ioContext->overlapped, 0);
if (status == SOCKET_ERROR && WSAGetLastError() != WSA_IO_PENDING)
{
// ...
return false;
}

return true;
}

bool Write(NetClientHost *clientHost, void *buffer, unsigned int size, unsigned int chunk)
{
//__ASSERT(m_clientHost);
//__ASSERT(m_clientHost->GetClientData()->remainingBytesToSend == 0);

NetClientHost::IOContext *ioContext = clientHost->AcquireContext();

if (!chunk)
chunk = size;

ioContext->wsaSendBuf.buf = ioContext->TESTbuf;

ioContext->operation = NetClientHost::kOperationWrite;
ioContext->flags = 0;
ioContext->wsaSendBuf.buf = new char[size];
memcpy((void *) ioContext->wsaSendBuf.buf, buffer, chunk);
ioContext->wsaSendBuf.len = chunk;
ioContext->chunk = chunk;
ioContext->bytesToSendTotal = size;
ioContext->remainingBytesToSend = size;

return WriteChunk(clientHost, ioContext);
}



void Read(NetClientHost *clientHost)
{
NetClientHost::IOContext *ioContext = clientHost->AcquireContext();
int status;

memset((void *) ioContext, 0, sizeof(NetClientHost::IOContext));
ioContext->buf = new char[1024];
ioContext->wsaReceiveBuf.len = 1024;
ioContext->wsaReceiveBuf.buf = ioContext->buf;

ioContext->flags = 0;
ioContext->operation = NetClientHost::kOperationRead;

status = WSARecv(clientHost->GetClientData()->socket, &ioContext->wsaReceiveBuf, 1, &ioContext->bytesReceived, &ioContext->flags, &ioContext->overlapped, 0);
int i = WSAGetLastError();
if (status == SOCKET_ERROR && WSAGetLastError() != WSA_IO_PENDING)
{
// ...
}
}

bool AddSocket(HANDLE hIOCP, SOCKET socket)
{
++connection;

int bufSize = 0;
LINGER lingerStruct;
lingerStruct.l_onoff = 1;
lingerStruct.l_linger = 0;
setsockopt(socket, SOL_SOCKET, SO_SNDBUF, (char *) &bufSize, sizeof(int));
setsockopt(socket, SOL_SOCKET, SO_RCVBUF, (char *) &bufSize, sizeof(int));
setsockopt(socket, SOL_SOCKET, SO_LINGER, (char *) &lingerStruct, sizeof(lingerStruct) );

NetClientHost *clientHost = new NetClientHost;

clientHost->_InitializeIOContexts(1024);
clientHost->Init(0);
clientHost->_SetSocket(socket);

// Add this socket to the IO Completion Port
CreateIoCompletionPort((HANDLE) socket, hIOCP, (DWORD_PTR) clientHost, 0);

Read(clientHost);
return true;
}

int read = 0, write = 0;

DWORD WINAPI WorkerThread(LPVOID param)
{
LPOVERLAPPED overlapped;
NetClientHost *clientHost;
HANDLE hIOCP = (HANDLE) param;
DWORD ioSize;
BOOL status;

while (true)
{
status = GetQueuedCompletionStatus(hIOCP, &ioSize, (PULONG_PTR) &clientHost, (LPOVERLAPPED *) &overlapped, INFINITE);

if (!(status || ioSize))
{
--connection;
//_CloseConnection(clientHost);
continue;
}

NetClientHost::IOContext *ioContext = (NetClientHost::IOContext *) overlapped;

switch (ioContext->operation)
{
case NetClientHost::kOperationRead:
download += ioSize;
Write(clientHost, ioContext->wsaReceiveBuf.buf, ioSize, 0);
write++;
clientHost->ReleaseContext(ioContext);
break;

case NetClientHost::kOperationWrite:
upload += ioSize;
if (ioContext->remainingBytesToSend)
{
ioContext->remainingBytesToSend -= ioSize;
ioContext->wsaSendBuf.len = ioContext->chunk <= ioContext->remainingBytesToSend ? ioContext->chunk : ioContext->remainingBytesToSend; // equivalent to min(clientData->chunk, clientData->remainingBytesToSend);
ioContext->wsaSendBuf.buf += ioContext->wsaSendBuf.len;
}

if (ioContext->remainingBytesToSend)
{
WriteChunk(clientHost, ioContext);
}
else
{
clientHost->ReleaseContext(ioContext);
Read(clientHost);
read++;
}
break;
}
}

return 0;
}

DWORD WINAPI ListenThread(LPVOID param)
{
SOCKET sdListen = (SOCKET) param;

HANDLE hIOCP = CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0, 0);
CreateThread(0, 0, WorkerThread, hIOCP, 0, 0);
CreateThread(0, 0, WorkerThread, hIOCP, 0, 0);
CreateThread(0, 0, WorkerThread, hIOCP, 0, 0);
CreateThread(0, 0, WorkerThread, hIOCP, 0, 0);

while (true)
{
SOCKET as = WSAAccept(sdListen, 0, 0, 0, 0);
if (as != INVALID_SOCKET)
AddSocket(hIOCP, as);
}
}

int main()
{
SOCKET sdListen;
SOCKADDR_IN si_addrlocal;
int nRet;
int nZero = 0;
LINGER lingerStruct;

WSADATA wsaData;
WSAStartup(0x202, &wsaData);

sdListen = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_IP, NULL, 0, WSA_FLAG_OVERLAPPED);
si_addrlocal.sin_family = AF_INET;
si_addrlocal.sin_port = htons(5000);
si_addrlocal.sin_addr.s_addr = htonl(INADDR_ANY);
nRet = bind(sdListen, (struct sockaddr *)&si_addrlocal, sizeof(si_addrlocal));
nRet = listen(sdListen, 5);

nZero = 0;
nRet = setsockopt(sdListen, SOL_SOCKET, SO_SNDBUF, (char *) &nZero, sizeof(nZero));
nZero = 0;
nRet = setsockopt(sdListen, SOL_SOCKET, SO_RCVBUF, (char *)&nZero, sizeof(nZero));
lingerStruct.l_onoff = 1;
lingerStruct.l_linger = 0;
nRet = setsockopt(sdListen, SOL_SOCKET, SO_LINGER, (char *)&lingerStruct, sizeof(lingerStruct) );

CreateThread(0, 0, ListenThread, (LPVOID) sdListen, 0, 0);

HANDLE console = GetStdHandle(STD_OUTPUT_HANDLE);
while (true)
{
COORD c = {0};
SetConsoleCursorPosition(console, c);
printf("Connections: %i \nUpload: %iKB/s \nDownload: %iKB/s ", connection, upload * 2 / 1024, download * 2 / 1024);
upload = 0;
download = 0;
Sleep(500);
}



return 0;
}

最佳答案

这种异步系统应该能够以全数据链路速度运行。我发现错误的问题是:

  • 导致不必要重传的超时设置
  • 在接收过程中,接收到的消息 A 可能会触发数据库更新,这样接收到的消息 B 必须等待,从而导致对消息 B 返回给发送者的响应出现不必要的延迟,而实际上数据库更新可以在空闲时间。

wireshark 可以让您了解消息流量。我过去常常使用带时间戳的消息日志来做这件事。

顺便说一句:我会先使用 this method在各个进程上,在进行异步分析之前清除任何瓶颈。如果你还没有这样做,你可以打赌他们在那里。只是任何旧的分析器都不可靠。有好的,包括Zoom .

关于windows - 远程计算机上的传输速度慢,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/5009753/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com