我有一个关于recv()和send()缓冲区大小如何影响TCP性能的问题。考虑以下完全正常工作的C ++示例,该示例通过TCP将1 GB(任意)数据从客户端传输到服务器。
#include <unistd.h>
#include <netdb.h>
#include <errno.h>
#include <netinet/tcp.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/ioctl.h>
#include <iostream>
#include <memory>
#include <cstring>
#include <cstdlib>
#include <stdexcept>
#include <algorithm>
#include <string>
#include <sstream>
typedef unsigned long long TimePoint;
typedef unsigned long long Duration;
inline TimePoint getTimePoint() {
struct ::timeval tv;
::gettimeofday(&tv, nullptr);
return tv.tv_sec * 1000000ULL + tv.tv_usec;
}
const size_t totalSize = 1024 * 1024 * 1024;
const int one = 1;
void server(const size_t blockSize, const std::string& serviceName) {
std::unique_ptr<char[]> block(new char[blockSize]);
const size_t atLeastReads = totalSize / blockSize;
std::cout << "Starting server. Receiving block size is " << blockSize << ", which requires at least " << atLeastReads << " reads." << std::endl;
addrinfo hints;
memset(&hints, 0, sizeof(addrinfo));
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE;
hints.ai_protocol = 0;
addrinfo* firstAddress;
int result = getaddrinfo(nullptr, serviceName.c_str(), &hints, &firstAddress);
if (result != 0) return;
int listener = socket(firstAddress->ai_family, firstAddress->ai_socktype, firstAddress->ai_protocol);
if (listener == -1) return;
if (setsockopt(listener, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) != 0) return;
if (bind(listener, firstAddress->ai_addr, firstAddress->ai_addrlen) != 0) return;
freeaddrinfo(firstAddress);
if (listen(listener, 1) != 0) return;
while (true) {
int server = accept(listener, nullptr, nullptr);
if (server == -1) return;
u_long mode = 1;
if (::ioctl(server, FIONBIO, &mode) != 0) return;
// if (setsockopt(server, IPPROTO_TCP, TCP_NODELAY, &one, sizeof(one)) != 0) return;
// int size = 64000;
// if (setsockopt(server, SOL_SOCKET, SO_RCVBUF, &size, sizeof(size)) != 0) return;
// if (setsockopt(server, SOL_SOCKET, SO_SNDBUF, &size, sizeof(size)) != 0) return;
std::cout << "Server accepted connection." << std::endl;
size_t leftToRead = totalSize;
size_t numberOfReads = 0;
size_t numberOfIncompleteReads = 0;
const TimePoint totalStart = ::getTimePoint();
Duration selectDuration = 0;
Duration readDuration = 0;
while (leftToRead > 0) {
fd_set readSet;
FD_ZERO(&readSet);
FD_SET(server, &readSet);
TimePoint selectStart = ::getTimePoint();
if (select(server + 1, &readSet, nullptr, nullptr, nullptr) == -1) return;
selectDuration += ::getTimePoint() - selectStart;
if (FD_ISSET(server, &readSet) != 0) {
const size_t toRead = std::min(leftToRead, blockSize);
TimePoint readStart = ::getTimePoint();
const ssize_t actuallyRead = recv(server, block.get(), toRead, 0);
readDuration += ::getTimePoint() - readStart;
if (actuallyRead == -1)
return;
else if (actuallyRead == 0) {
std::cout << "Got 0 bytes, which signals that the client closed the socket." << std::endl;
break;
}
else if (toRead != actuallyRead)
++numberOfIncompleteReads;
++numberOfReads;
leftToRead -= actuallyRead;
}
}
const Duration totalDuration = ::getTimePoint() - totalStart;
std::cout << "Receiving took " << totalDuration << " us, transfer rate was " << totalSize / (totalDuration / 1000000.0) << " bytes/s." << std::endl;
std::cout << "Selects took " << selectDuration << " us, while reads took " << readDuration << " us." << std::endl;
std::cout << "There were " << numberOfReads << " reads (factor " << numberOfReads / ((double)atLeastReads) << "), of which " << numberOfIncompleteReads << " (" << (numberOfIncompleteReads / ((double)numberOfReads)) * 100.0 << "%) were incomplete." << std::endl << std::endl;
close(server);
}
}
bool client(const size_t blockSize, const std::string& hostName, const std::string& serviceName) {
std::unique_ptr<char[]> block(new char[blockSize]);
const size_t atLeastWrites = totalSize / blockSize;
std::cout << "Starting client... " << std::endl;
addrinfo hints;
memset(&hints, 0, sizeof(addrinfo));
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = 0;
hints.ai_protocol = 0;
addrinfo* firstAddress;
if (getaddrinfo(hostName.c_str(), serviceName.c_str(), &hints, &firstAddress) != 0) return false;
int client = socket(firstAddress->ai_family, firstAddress->ai_socktype, firstAddress->ai_protocol);
if (client == -1) return false;
if (connect(client, firstAddress->ai_addr, firstAddress->ai_addrlen) != 0) return false;
freeaddrinfo(firstAddress);
u_long mode = 1;
if (::ioctl(client, FIONBIO, &mode) != 0) return false;
// if (setsockopt(client, IPPROTO_TCP, TCP_NODELAY, &one, sizeof(one)) != 0) return false;
// int size = 64000;
// if (setsockopt(client, SOL_SOCKET, SO_RCVBUF, &size, sizeof(size)) != 0) return false;
// if (setsockopt(client, SOL_SOCKET, SO_SNDBUF, &size, sizeof(size)) != 0) return false;
std::cout << "Client connected. Sending block size is " << blockSize << ", which requires at least " << atLeastWrites << " writes." << std::endl;
size_t leftToWrite = totalSize;
size_t numberOfWrites = 0;
size_t numberOfIncompleteWrites = 0;
const TimePoint totalStart = ::getTimePoint();
Duration selectDuration = 0;
Duration writeDuration = 0;
while (leftToWrite > 0) {
fd_set writeSet;
FD_ZERO(&writeSet);
FD_SET(client, &writeSet);
TimePoint selectStart = ::getTimePoint();
if (select(client + 1, nullptr, &writeSet, nullptr, nullptr) == -1) return false;
selectDuration += ::getTimePoint() - selectStart;
if (FD_ISSET(client, &writeSet) != 0) {
const size_t toWrite = std::min(leftToWrite, blockSize);
TimePoint writeStart = ::getTimePoint();
const ssize_t actuallyWritten = send(client, block.get(), toWrite, 0);
writeDuration += ::getTimePoint() - writeStart;
if (actuallyWritten == -1)
return false;
else if (actuallyWritten == 0) {
std::cout << "Got 0 bytes, which shouldn't happen!" << std::endl;
break;
}
else if (toWrite != actuallyWritten)
++numberOfIncompleteWrites;
++numberOfWrites;
leftToWrite -= actuallyWritten;
}
}
const Duration totalDuration = ::getTimePoint() - totalStart;
std::cout << "Writing took " << totalDuration << " us, transfer rate was " << totalSize / (totalDuration / 1000000.0) << " bytes/s." << std::endl;
std::cout << "Selects took " << selectDuration << " us, while writes took " << writeDuration << " us." << std::endl;
std::cout << "There were " << numberOfWrites << " writes (factor " << numberOfWrites / ((double)atLeastWrites) << "), of which " << numberOfIncompleteWrites << " (" << (numberOfIncompleteWrites / ((double)numberOfWrites)) * 100.0 << "%) were incomplete." << std::endl << std::endl;
if (shutdown(client, SHUT_WR) != 0) return false;
if (close(client) != 0) return false;
return true;
}
int main(int argc, char* argv[]) {
if (argc < 2)
std::cout << "Block size is missing." << std::endl;
else {
const size_t blockSize = static_cast<size_t>(std::atoll(argv[argc - 1]));
if (blockSize > 1024 * 1024)
std::cout << "Block size " << blockSize << " is suspicious." << std::endl;
else {
if (argc >= 3) {
if (!client(blockSize, argv[1], "12000"))
std::cout << "The client encountered an error." << std::endl;
}
else {
server(blockSize, "12000");
std::cout << "The server encountered an error." << std::endl;
}
}
}
return 0;
}
我在两台通过1 Gbit / s LAN连接的Linux(内核版本4.1.10-200.fc22.x86_64)机器上运行该示例,我在其上得到以下行为:如果recv()和send()系统调用使用40字节或更多的缓冲区,然后我使用所有可用带宽;但是,如果我在服务器或客户端上使用较小的缓冲区,则吞吐量会下降。这种行为似乎不受注释掉的套接字选项(Nagle&#39的算法和/或发送/接收缓冲区大小)的影响。
我可以理解,以小块发送数据可能效率低下:如果关闭Nagle算法并且块很小,那么TCP和IP的头大小可能占据有用的有效载荷。但是,我不希望接收缓冲区大小影响传输速率:我认为,与通过LAN实际发送数据的成本相比,recv()系统调用的成本更低。因此,如果我以5000字节的块发送数据,我希望传输速率在很大程度上与接收缓冲区的大小无关,因为我调用recv()的速率仍然应该大于LAN传输速率。唉,事实并非如此!
如果有人能向我解释导致速度放缓的原因,我真的很感激:它只是系统调用的成本,还是在协议级别发生的事情?
我在编写基于消息的云应用程序时遇到了这个问题,如果有人可以告诉我这个问题在他们看来应该如何影响系统架构,我将不胜感激。出于各种原因,我没有使用诸如ZeroMQ之类的消息库,而是自己编写消息传递接口。云中的计算使得服务器之间的消息流不是对称的(即,取决于工作负载,服务器A可以向服务器B发送比反之更多的数据),消息是异步的(即消息之间的时间)不可预测,但许多消息可以突发发送),消息大小可变,通常很小(10到20个字节)。此外,消息原则上可以不按顺序传送,但重要的是不丢弃消息并且还需要一些流量/拥塞控制;因此,我使用TCP而不是UDP。由于消息的大小不同,因此每条消息都以一个指定消息大小的整数开头,后跟消息有效负载。要从套接字读取消息,我首先读取消息大小,然后读取有效负载;因此,读取单个消息需要至少两次recv()调用(可能更多,因为recv()可以返回的数据少于请求的数据)。现在因为消息大小和消息有效负载都很小,我最终得到了很多小的recv()请求,正如我的例子所示,这些请求并没有让我充分利用可用带宽。有没有人对&#34;对&#34;有任何建议?在这种情况下构造消息传递的方法是什么?
非常感谢您的所有帮助!
答案 0 :(得分:1)
您不需要两次recv()
次调用来阅读您描述的数据。更智能的代码或recvmsg()
将解决这个问题。您只需要能够处理下一条消息中的某些数据可能已被读取的事实。
套接字接收缓冲区应至少与链路的带宽延迟乘积一样大。通常情况下会有很多千字节。
套接字发送缓冲区应该至少与对等体的套接字接收缓冲区一样大。
否则您无法使用所有可用带宽。
编辑在下面发表评论:
我不明白为什么用户空间中recv()/ send()缓冲区的大小会影响吞吐量。
它影响吞吐量,因为它影响可以在飞行中的数据量,其最大值由链路的带宽延迟乘积给出。
正如人们上面所说,对recv()/ send()的请求不会影响协议。
这是垃圾。对send()
的请求导致数据发送,这会通过使协议参与发送而影响协议,并且recv()
请求导致数据从接收缓冲区中删除,这会影响协议通过更改下一个ACK通告的接收窗口。
因此,我希望,只要内核在其缓冲区中有足够的空间,并且只要我足够快地读取这些数据,就不应该有任何问题。然而,这不是我观察到的:(i)改变内核缓冲区的大小没有影响,(ii)我已经使用了40字节缓冲区的可用带宽。
不,你没有。有一项研究发表于20世纪80年代早期,通过将套接字缓冲区从1024提高到4096,显示当天以太网的早期版本和慢速版本的吞吐量增加了三倍。如果您认为观察到的不同,那么您没有。根据定义,任何小于带宽延迟乘积的套接字缓冲区大小都会抑制性能。
答案 1 :(得分:-1)