--- a/Release/src/http/listener/http_server_asio.cpp +++ b/Release/src/http/listener/http_server_asio.cpp @@ -520,17 +520,14 @@ auto& service = crossplat::threadpool::shared_instance().service(); tcp::resolver resolver(service); // #446: boost resolver does not recognize "+" as a host wildchar - tcp::resolver::query query = - ("+" == m_host) ? tcp::resolver::query(m_port, boost::asio::ip::resolver_query_base::flags()) - : tcp::resolver::query(m_host, m_port, boost::asio::ip::resolver_query_base::flags()); - tcp::endpoint endpoint = *resolver.resolve(query); + tcp::endpoint endpoint = (("+" == m_host) ? *(resolver.resolve("", m_port).begin()) : *(resolver.resolve(m_host, m_port).begin())); m_acceptor.reset(new tcp::acceptor(service)); m_acceptor->open(endpoint.protocol()); m_acceptor->set_option(socket_base::reuse_address(true)); m_acceptor->bind(endpoint); - m_acceptor->listen(0 != m_backlog ? m_backlog : socket_base::max_connections); + m_acceptor->listen(0 != m_backlog ? m_backlog : socket_base::max_listen_connections); auto socket = new ip::tcp::socket(service); std::unique_ptr usocket(socket); @@ -881,7 +878,7 @@ else { auto writebuf = requestImpl->outstream().streambuf(); - writebuf.putn_nocopy(buffer_cast(m_request_buf.data()), toWrite) + writebuf.putn_nocopy(static_cast(m_request_buf.data().data()), toWrite) .then([=](pplx::task writeChunkTask) -> will_deref_t { try { @@ -913,7 +910,7 @@ { auto writebuf = requestImpl->outstream().streambuf(); writebuf - .putn_nocopy(boost::asio::buffer_cast(m_request_buf.data()), + .putn_nocopy(static_cast(m_request_buf.data().data()), (std::min)(m_request_buf.size(), m_read_size - m_read)) .then([this](pplx::task writtenSizeTask) -> will_deref_t { size_t writtenSize = 0; @@ -1134,7 +1131,7 @@ } auto membuf = m_response_buf.prepare(ChunkSize + chunked_encoding::additional_encoding_space); - readbuf.getn(buffer_cast(membuf) + chunked_encoding::data_offset, ChunkSize) + readbuf.getn(static_cast(membuf.data()) + chunked_encoding::data_offset, ChunkSize) .then([=](pplx::task actualSizeTask) -> will_deref_and_erase_t { size_t actualSize = 0; try @@ -1146,7 +1143,7 @@ return cancel_sending_response_with_error(response, std::current_exception()); } size_t offset = chunked_encoding::add_chunked_delimiters( - buffer_cast(membuf), ChunkSize + chunked_encoding::additional_encoding_space, actualSize); + static_cast(membuf.data()), ChunkSize + chunked_encoding::additional_encoding_space, actualSize); m_response_buf.commit(actualSize + chunked_encoding::additional_encoding_space); m_response_buf.consume(offset); if (actualSize == 0) @@ -1167,7 +1164,7 @@ return cancel_sending_response_with_error( response, std::make_exception_ptr(http_exception("Response stream close early!"))); size_t readBytes = (std::min)(ChunkSize, m_write_size - m_write); - readbuf.getn(buffer_cast(m_response_buf.prepare(readBytes)), readBytes) + readbuf.getn(static_cast(m_response_buf.prepare(readBytes).data()), readBytes) .then([=](pplx::task actualSizeTask) -> will_deref_and_erase_t { size_t actualSize = 0; try --- a/Release/src/http/client/http_client_asio.cpp +++ b/Release/src/http/client/http_client_asio.cpp @@ -146,7 +146,7 @@ friend class asio_client; public: - asio_connection(boost::asio::io_service& io_service) + asio_connection(boost::asio::io_context& io_service) : m_socket_lock() , m_socket(io_service) , m_ssl_stream() @@ -581,10 +581,8 @@ m_context->m_timer.start(); - tcp::resolver::query query(utility::conversions::to_utf8string(proxy_host), to_string(proxy_port)); - auto client = std::static_pointer_cast(m_context->m_http_client); - m_context->m_resolver.async_resolve(query, + m_context->m_resolver.async_resolve(utility::conversions::to_utf8string(proxy_host), to_string(proxy_port), boost::bind(&ssl_proxy_tunnel::handle_resolve, shared_from_this(), boost::asio::placeholders::error, @@ -592,8 +590,9 @@ } private: - void handle_resolve(const boost::system::error_code& ec, tcp::resolver::iterator endpoints) + void handle_resolve(const boost::system::error_code& ec, tcp::resolver::results_type results) { + auto iterator = results.begin(); if (ec) { m_context->report_error("Error resolving proxy address", ec, httpclient_errorcode_context::connect); @@ -601,16 +600,16 @@ else { m_context->m_timer.reset(); - auto endpoint = *endpoints; + auto endpoint = *iterator; m_context->m_connection->async_connect(endpoint, boost::bind(&ssl_proxy_tunnel::handle_tcp_connect, shared_from_this(), boost::asio::placeholders::error, - ++endpoints)); + ++iterator, results.end())); } } - void handle_tcp_connect(const boost::system::error_code& ec, tcp::resolver::iterator endpoints) + void handle_tcp_connect(const boost::system::error_code& ec, tcp::resolver::results_type::iterator endpoints, tcp::resolver::results_type::iterator endpoints_end) { if (!ec) { @@ -621,7 +620,7 @@ shared_from_this(), boost::asio::placeholders::error)); } - else if (endpoints == tcp::resolver::iterator()) + else if (endpoints == endpoints_end) { m_context->report_error( "Failed to connect to any resolved proxy endpoint", ec, httpclient_errorcode_context::connect); @@ -646,7 +645,7 @@ boost::bind(&ssl_proxy_tunnel::handle_tcp_connect, shared_from_this(), boost::asio::placeholders::error, - ++endpoints)); + ++endpoints, endpoints_end)); } } @@ -885,8 +884,7 @@ auto tcp_host = proxy_type == http_proxy_type::http ? proxy_host : host; auto tcp_port = proxy_type == http_proxy_type::http ? proxy_port : port; - tcp::resolver::query query(tcp_host, to_string(tcp_port)); - ctx->m_resolver.async_resolve(query, + ctx->m_resolver.async_resolve(tcp_host, to_string(tcp_port), boost::bind(&asio_context::handle_resolve, ctx, boost::asio::placeholders::error, @@ -1006,7 +1004,7 @@ request_context::report_error(errorcodeValue, message); } - void handle_connect(const boost::system::error_code& ec, tcp::resolver::iterator endpoints) + void handle_connect(const boost::system::error_code& ec, tcp::resolver::results_type::iterator endpoints, tcp::resolver::results_type::iterator endpoints_end) { m_timer.reset(); if (!ec) @@ -1019,7 +1017,7 @@ { report_error("Request canceled by user.", ec, httpclient_errorcode_context::connect); } - else if (endpoints == tcp::resolver::iterator()) + else if (endpoints == endpoints_end) { report_error("Failed to connect to any resolved endpoint", ec, httpclient_errorcode_context::connect); } @@ -1041,28 +1039,29 @@ m_connection->async_connect( endpoint, boost::bind( - &asio_context::handle_connect, shared_from_this(), boost::asio::placeholders::error, ++endpoints)); + &asio_context::handle_connect, shared_from_this(), boost::asio::placeholders::error, ++endpoints, endpoints_end)); } } - void handle_resolve(const boost::system::error_code& ec, tcp::resolver::iterator endpoints) + void handle_resolve(const boost::system::error_code& ec, tcp::resolver::results_type results) { if (ec) { report_error("Error resolving address", ec, httpclient_errorcode_context::connect); } - else if (endpoints == tcp::resolver::iterator()) + else if (results.empty()) { report_error("Failed to resolve address", ec, httpclient_errorcode_context::connect); } else { m_timer.reset(); - auto endpoint = *endpoints; + auto iterator = results.begin(); + auto endpoint = *iterator; m_connection->async_connect( endpoint, boost::bind( - &asio_context::handle_connect, shared_from_this(), boost::asio::placeholders::error, ++endpoints)); + &asio_context::handle_connect, shared_from_this(), boost::asio::placeholders::error, ++iterator, results.end())); } } @@ -1134,7 +1133,7 @@ } #endif // CPPREST_PLATFORM_ASIO_CERT_VERIFICATION_AVAILABLE - boost::asio::ssl::rfc2818_verification rfc2818(m_connection->cn_hostname()); + boost::asio::ssl::host_name_verification rfc2818(m_connection->cn_hostname()); return rfc2818(preverified, verifyCtx); } @@ -1182,8 +1181,8 @@ const auto& chunkSize = m_http_client->client_config().chunksize(); auto readbuf = _get_readbuffer(); - uint8_t* buf = boost::asio::buffer_cast( - m_body_buf.prepare(chunkSize + http::details::chunked_encoding::additional_encoding_space)); + auto bodyBuf = m_body_buf.prepare(chunkSize + http::details::chunked_encoding::additional_encoding_space); + uint8_t* buf = static_cast(bodyBuf.data()); const auto this_request = shared_from_this(); readbuf.getn(buf + http::details::chunked_encoding::data_offset, chunkSize) .then([this_request, buf, chunkSize AND_CAPTURE_MEMBER_FUNCTION_POINTERS](pplx::task op) { @@ -1247,7 +1246,7 @@ const auto readSize = static_cast((std::min)( static_cast(m_http_client->client_config().chunksize()), m_content_length - m_uploaded)); auto readbuf = _get_readbuffer(); - readbuf.getn(boost::asio::buffer_cast(m_body_buf.prepare(readSize)), readSize) + readbuf.getn(static_cast(m_body_buf.prepare(readSize).data()), readSize) .then([this_request AND_CAPTURE_MEMBER_FUNCTION_POINTERS](pplx::task op) { try { @@ -1639,7 +1638,7 @@ std::vector decompressed; bool boo = - decompress(boost::asio::buffer_cast(m_body_buf.data()), to_read, decompressed); + decompress(static_cast(m_body_buf.data().data()), to_read, decompressed); if (!boo) { report_exception(std::runtime_error("Failed to decompress the response body")); @@ -1687,7 +1686,7 @@ } else { - writeBuffer.putn_nocopy(boost::asio::buffer_cast(m_body_buf.data()), to_read) + writeBuffer.putn_nocopy(static_cast(m_body_buf.data().data()), to_read) .then([this_request, to_read AND_CAPTURE_MEMBER_FUNCTION_POINTERS](pplx::task op) { try { @@ -1759,7 +1758,7 @@ std::vector decompressed; bool boo = - decompress(boost::asio::buffer_cast(m_body_buf.data()), read_size, decompressed); + decompress(static_cast(m_body_buf.data().data()), read_size, decompressed); if (!boo) { this_request->report_exception(std::runtime_error("Failed to decompress the response body")); @@ -1821,7 +1820,7 @@ } else { - writeBuffer.putn_nocopy(boost::asio::buffer_cast(m_body_buf.data()), read_size) + writeBuffer.putn_nocopy(static_cast(m_body_buf.data().data()), read_size) .then([this_request AND_CAPTURE_MEMBER_FUNCTION_POINTERS](pplx::task op) { size_t writtenSize = 0; try @@ -1870,7 +1869,7 @@ assert(!m_ctx.expired()); m_state = started; - m_timer.expires_from_now(m_duration); + m_timer.expires_after(m_duration); auto ctx = m_ctx; m_timer.async_wait([ctx AND_CAPTURE_MEMBER_FUNCTION_POINTERS](const boost::system::error_code& ec) { handle_timeout(ec, ctx); @@ -1881,7 +1880,7 @@ { assert(m_state == started || m_state == timedout); assert(!m_ctx.expired()); - if (m_timer.expires_from_now(m_duration) > 0) + if (m_timer.expires_after(m_duration) > 0) { // The existing handler was canceled so schedule a new one. assert(m_state == started); --- a/Release/src/pplx/threadpool.cpp +++ b/Release/src/pplx/threadpool.cpp @@ -37,7 +37,7 @@ struct threadpool_impl final : crossplat::threadpool { - threadpool_impl(size_t n) : crossplat::threadpool(n), m_work(m_service) + threadpool_impl(size_t n) : crossplat::threadpool(n), m_work(boost::asio::make_work_guard(m_service)) { for (size_t i = 0; i < n; i++) add_thread(); @@ -84,7 +84,7 @@ } std::vector> m_threads; - boost::asio::io_service::work m_work; + boost::asio::executor_work_guard m_work; }; #if defined(_WIN32) --- a/Release/src/websockets/client/ws_client_wspp.cpp +++ b/Release/src/websockets/client/ws_client_wspp.cpp @@ -225,7 +225,7 @@ verifyCtx, utility::conversions::to_utf8string(m_uri.host())); } #endif - boost::asio::ssl::rfc2818_verification rfc2818(utility::conversions::to_utf8string(m_uri.host())); + boost::asio::ssl::host_name_verification rfc2818(utility::conversions::to_utf8string(m_uri.host())); return rfc2818(preverified, verifyCtx); }); --- a/Release/src/pplx/pplxlinux.cpp +++ b/Release/src/pplx/pplxlinux.cpp @@ -35,7 +35,7 @@ _PPLXIMP void linux_scheduler::schedule(TaskProc_t proc, void* param) { - crossplat::threadpool::shared_instance().service().post(boost::bind(proc, param)); + boost::asio::post(crossplat::threadpool::shared_instance().service(), boost::bind(proc, param)); } } // namespace details --- a/Release/include/pplx/threadpool.h +++ b/Release/include/pplx/threadpool.h @@ -69,15 +69,15 @@ CASABLANCA_DEPRECATED("Use `.service().post(task)` directly.") void schedule(T task) { - service().post(task); + boost::asio::post(service(), task); } - boost::asio::io_service& service() { return m_service; } + boost::asio::io_context& service() { return m_service; } protected: threadpool(size_t num_threads) : m_service(static_cast(num_threads)) {} - boost::asio::io_service m_service; + boost::asio::io_context m_service; }; } // namespace crossplat --- a/Release/tests/functional/pplx/pplx_test/pplx_op_test.cpp +++ b/Release/tests/functional/pplx/pplx_test/pplx_op_test.cpp @@ -57,7 +57,7 @@ virtual void schedule(pplx::TaskProc_t proc, void* param) { pplx::details::atomic_increment(s_flag); - m_pool->service().post([=]() -> void { proc(param); }); + boost::asio::post(m_pool->service(), [=]() -> void { proc(param); }); } public: