gpt4 book ai didi

c++ - boost 批量请求的野兽内存使用率

转载 作者:搜寻专家 更新时间:2023-10-31 01:29:47 26 4
gpt4 key购买 nike

我运行这个 boost-beast-client-async-ssl例子,没关系。但是,如果我同时创建 10000 个 session ,我的程序内存使用量会增长到 400 MB,而且永远不会下降。我会在没有 ssl(简单的 http)的情况下进行测试,并且没有增长内存。

问:openssl 有什么问题?

这是我的主要功能。

    //up boost-beast-client-async-ssl session code.   
struct io_context_runner
{
boost::asio::io_context * ioc;
void operator()()const
{
try{
boost::asio::io_context::work w(*ioc);
ioc->run();
}catch(std::exception& e){
fprintf(stderr, "e: %s\n", e.what());
}
}
};

int main(int argc, char* argv[] ){

try
{
int total_run = 1;
if (argc > 1) total_run = atoi(argv[1]);

const char* const host = "104.236.162.70" ;// IP of isocpp.org
const char* const port = "443"; //
const char* const target= "/" ; //

std::string const body = ""; //
int version = 11;

// The io_context is required for all I/O
boost::asio::io_context ioc;

// The SSL context is required, and holds certificates
ssl::context ctx{ssl::context::sslv23_client};

// This holds the root certificate used for verification
load_root_certificates(ctx);

typedef std::shared_ptr< async_http_ssl::session > pointer;

for(int i = 0; i < total_run; ++i){
pointer s = std::make_shared< async_http_ssl::session >(ioc , ctx ) ;
usleep( 1000000 / total_run ) ;
s->run( host, port, target, version ) ;
}
// Launch the asynchronous operation
//std::make_shared<session>(ioc, ctx)->run(host, port, target, version);

// Run the I/O service. The call will return when
// the get operation is complete.
std::thread t{ io_context_runner{ &ioc } } ;

t.join();

// If we get here then the connection is closed gracefully
}
catch(std::exception const& e)
{
std::cerr << "Error: " << e.what() << std::endl;
return EXIT_FAILURE;
}

return EXIT_SUCCESS ;
}

编辑:ubuntu 14.04、boost 1.66、g++ 4.9.4。 OpenSSL 1.0.1f 2014 年 1 月 6 日。

EDIT2:根据this问题 malloc_trim释放(返回操作系统)大量未使用的内存。如果 boost asio 本身支持 malloc_trim 用于 unix 系统上的 ssl 连接,那将是最好的!

最佳答案

您改编该示例的方式存在几个问题:

  1. 工作线程使用 work 锁定 io_service实例所以它永远不会完成
  2. usleep在生成异步任务之前的某个时间,但是在循环完成之前,您从不首先运行任何任务...这意味着所有延迟都在开始任何<之前完成/strong> 工作。

这是我的建议:

  • 在启动异步任务之前运行服务
  • 有 1 work实例锁定服务,以防服务在发布下一个 http 请求之前变为空闲
  • 不要锁定work在工作线程内

在 Coliru 上生活

#include "example/common/root_certificates.hpp"

#include <boost/beast.hpp>
#include <boost/asio.hpp>

using tcp = boost::asio::ip::tcp; // from <boost/asio/ip/tcp.hpp>
namespace ssl = boost::asio::ssl; // from <boost/asio/ssl.hpp>
namespace http = boost::beast::http; // from <boost/beast/http.hpp>

//------------------------------------------------------------------------------

// Report a failure
void
fail(boost::system::error_code ec, char const* what)
{
std::cerr << what << ": " << ec.message() << "\n";
}

// Performs an HTTP GET and prints the response
class session : public std::enable_shared_from_this<session>
{
tcp::resolver resolver_;
ssl::stream<tcp::socket> stream_;
boost::beast::flat_buffer buffer_; // (Must persist between reads)
http::request<http::empty_body> req_;
http::response<http::string_body> res_;

public:
// Resolver and stream require an io_context
explicit
session(boost::asio::io_context& ioc, ssl::context& ctx)
: resolver_(ioc)
, stream_(ioc, ctx)
{
}

// Start the asynchronous operation
void
run(
char const* host,
char const* port,
char const* target,
int version)
{
// Set SNI Hostname (many hosts need this to handshake successfully)
if(! SSL_set_tlsext_host_name(stream_.native_handle(), host))
{
boost::system::error_code ec{static_cast<int>(::ERR_get_error()), boost::asio::error::get_ssl_category()};
std::cerr << ec.message() << "\n";
return;
}

// Set up an HTTP GET request message
req_.version(version);
req_.method(http::verb::get);
req_.target(target);
req_.set(http::field::host, host);
req_.set(http::field::user_agent, BOOST_BEAST_VERSION_STRING);

// Look up the domain name
resolver_.async_resolve(
host,
port,
std::bind(
&session::on_resolve,
shared_from_this(),
std::placeholders::_1,
std::placeholders::_2));
}

void
on_resolve(
boost::system::error_code ec,
tcp::resolver::results_type results)
{
if(ec)
return fail(ec, "resolve");

// Make the connection on the IP address we get from a lookup
boost::asio::async_connect(
stream_.next_layer(),
results.begin(),
results.end(),
std::bind(
&session::on_connect,
shared_from_this(),
std::placeholders::_1));
}

void
on_connect(boost::system::error_code ec)
{
if(ec)
return fail(ec, "connect");

// Perform the SSL handshake
stream_.async_handshake(
ssl::stream_base::client,
std::bind(
&session::on_handshake,
shared_from_this(),
std::placeholders::_1));
}

void
on_handshake(boost::system::error_code ec)
{
if(ec)
return fail(ec, "handshake");

// Send the HTTP request to the remote host
http::async_write(stream_, req_,
std::bind(
&session::on_write,
shared_from_this(),
std::placeholders::_1,
std::placeholders::_2));
}

void
on_write(
boost::system::error_code ec,
std::size_t bytes_transferred)
{
boost::ignore_unused(bytes_transferred);

if(ec)
return fail(ec, "write");

// Receive the HTTP response
http::async_read(stream_, buffer_, res_,
std::bind(
&session::on_read,
shared_from_this(),
std::placeholders::_1,
std::placeholders::_2));
}

void
on_read(
boost::system::error_code ec,
std::size_t bytes_transferred)
{
boost::ignore_unused(bytes_transferred);

if(ec)
return fail(ec, "read");

// Write the message to standard out
//std::cout << res_ << std::endl;

// Gracefully close the stream
stream_.async_shutdown(
std::bind(
&session::on_shutdown,
shared_from_this(),
std::placeholders::_1));
}

void
on_shutdown(boost::system::error_code ec)
{
if(ec == boost::asio::error::eof)
{
// Rationale:
// http://stackoverflow.com/questions/25587403/boost-asio-ssl-async-shutdown-always-finishes-with-an-error
ec.assign(0, ec.category());
}
if(ec)
return fail(ec, "shutdown");

// If we get here then the connection is closed gracefully
}
};

//up boost-beast-client-async-ssl session code.
struct io_context_runner
{
boost::asio::io_context& ioc;
void operator()()const
{
try{
ioc.run();
}catch(std::exception& e){
fprintf(stderr, "e: %s\n", e.what());
}
}
};

namespace async_http_ssl {
using ::session;
}

#include <thread>

int main(int argc, char *argv[]) {
// The io_context is required for all I/O
boost::asio::io_context ioc;
std::thread t;

try {
// Run the I/O service. The call will return when all work is complete
boost::asio::io_context::work w(ioc);
t = std::thread { io_context_runner{ioc} };

int total_run = 1;
if (argc > 1)
total_run = atoi(argv[1]);

#if 0
auto host = "104.236.162.70"; // IP of isocpp.org
auto port = "443"; //
auto target = "/"; //
#else
auto host = "127.0.0.1";
auto port = "443";
auto target = "/BBB/http_client_async_ssl.cpp";
#endif

std::string const body = ""; //
int version = 11;

// The SSL context is required, and holds certificates
ssl::context ctx{ssl::context::sslv23_client};

// This holds the root certificate used for verification
load_root_certificates(ctx);

typedef std::shared_ptr<async_http_ssl::session> pointer;

for (int i = 0; i < total_run; ++i) {
pointer s = std::make_shared<async_http_ssl::session>(ioc, ctx);
usleep(1000000 / total_run);
s->run(host, port, target, version);
}
} catch (std::exception const &e) {
std::cerr << "Error: " << e.what() << std::endl;
return EXIT_FAILURE;
}

if (t.joinable())
t.join();

// If we get here then the connections have been closed gracefully
}

在我的系统上,使用 1 个连接进行内存分析:

enter image description here

有 100 个连接:

enter image description here

有 1000 个连接:

enter image description here

分析

这是什么意思?发送更多请求时,Beast 似乎仍在逐渐使用更多内存,对吗?

嗯,不。问题是您开始请求的速度快于完成请求的速度。所以,内存负载增加主要是因为很多session实例在给定时间存在。完成后,它们将自动释放资源(由于使用了 shared_ptr<session> )。

顺序请求

为了说明问题,这里有一个修改版本,它接受 on_completion_ session 处理程序:

std::function<void()> on_complete_;

// Resolver and stream require an io_context
template <typename Handler>
explicit
session(boost::asio::io_context& ioc, ssl::context& ctx, Handler&& handler)
: resolver_(ioc)
, stream_(ioc, ctx)
, on_complete_(std::forward<Handler>(handler))
{
}

~session() {
if (on_complete_) on_complete_();
}

现在您可以将主要程序逻辑重写为异步操作 chain:

struct Tester {
boost::asio::io_context ioc;
boost::optional<boost::asio::io_context::work> work{ioc};
std::thread t { io_context_runner{ioc} };

ssl::context ctx{ssl::context::sslv23_client};

Tester() {
load_root_certificates(ctx);
}

void run(int remaining = 1) {
if (remaining <= 0)
return;

auto s = std::make_shared<session>(ioc, ctx, [=] { run(remaining - 1); });
s->run("127.0.0.1", "443", "/BBB/http_client_async_ssl.cpp", 11);
}

~Tester() {
work.reset();
if (t.joinable()) t.join();
}
};

int main(int argc, char *argv[]) {
Tester tester;
tester.run(argc>1? atoi(argv[1]):1);
}

使用这个程序 ( Full Code On Coliru ),我们得到更稳定的结果:

  • 1 个请求:

    enter image description here

  • 100 个请求:

    enter image description here

  • 1000 个请求:

    enter image description here

恢复吞吐量

这有点太保守了,发送许多 请求可能会变得非常慢。 一些并发性如何?简单:

int main(int argc, char *argv[]) {
int const total = argc>1? atoi(argv[1]) : 1;
int const concurrent = argc>2? atoi(argv[2]) : 1;

{
std::vector<Tester> chains(concurrent);

for (auto& chain : chains)
chain.run(total / concurrent);
}

std::cout << "All done\n";
}

就是这样!现在,我们可以有 concurrent为〜总请求提供服务的单独执行链。查看运行时间的差异:

$ time ./sotest 1000
All done

real 0m53.295s
user 0m13.124s
sys 0m0.232s
$ time ./sotest 1000 10
All done

real 0m8.808s
user 0m8.884s
sys 0m1.096s

随着内存使用继续保持健康:

enter image description here

关于c++ - boost 批量请求的野兽内存使用率,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/49322387/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com