Unnamed: 0
int64 0
0
| repo_id
stringlengths 5
186
| file_path
stringlengths 15
223
| content
stringlengths 1
32.8M
⌀ |
---|---|---|---|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/detail/future.hpp | //
// detail/future.hpp
// ~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_FUTURE_HPP
#define ASIO_DETAIL_FUTURE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <future>
// Even though the future header is available, libstdc++ may not implement the
// std::future class itself. However, we need to have already included the
// future header to reliably test for _GLIBCXX_HAS_GTHREADS.
#if defined(__GNUC__) && !defined(ASIO_HAS_CLANG_LIBCXX)
# if defined(_GLIBCXX_HAS_GTHREADS)
# define ASIO_HAS_STD_FUTURE_CLASS 1
# endif // defined(_GLIBCXX_HAS_GTHREADS)
#else // defined(__GNUC__) && !defined(ASIO_HAS_CLANG_LIBCXX)
# define ASIO_HAS_STD_FUTURE_CLASS 1
#endif // defined(__GNUC__) && !defined(ASIO_HAS_CLANG_LIBCXX)
#endif // ASIO_DETAIL_FUTURE_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/detail/operation.hpp | //
// detail/operation.hpp
// ~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_OPERATION_HPP
#define ASIO_DETAIL_OPERATION_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP)
# include "asio/detail/win_iocp_operation.hpp"
#else
# include "asio/detail/scheduler_operation.hpp"
#endif
namespace asio {
namespace detail {
#if defined(ASIO_HAS_IOCP)
typedef win_iocp_operation operation;
#else
typedef scheduler_operation operation;
#endif
} // namespace detail
} // namespace asio
#endif // ASIO_DETAIL_OPERATION_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/detail/local_free_on_block_exit.hpp | //
// detail/local_free_on_block_exit.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_LOCAL_FREE_ON_BLOCK_EXIT_HPP
#define ASIO_DETAIL_LOCAL_FREE_ON_BLOCK_EXIT_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
#if !defined(ASIO_WINDOWS_APP)
#include "asio/detail/noncopyable.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class local_free_on_block_exit
: private noncopyable
{
public:
// Constructor blocks all signals for the calling thread.
explicit local_free_on_block_exit(void* p)
: p_(p)
{
}
// Destructor restores the previous signal mask.
~local_free_on_block_exit()
{
::LocalFree(p_);
}
private:
void* p_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_WINDOWS_APP)
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
#endif // ASIO_DETAIL_LOCAL_FREE_ON_BLOCK_EXIT_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/detail/eventfd_select_interrupter.hpp | //
// detail/eventfd_select_interrupter.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2008 Roelof Naude (roelof.naude at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP
#define ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_EVENTFD)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class eventfd_select_interrupter
{
public:
// Constructor.
ASIO_DECL eventfd_select_interrupter();
// Destructor.
ASIO_DECL ~eventfd_select_interrupter();
// Recreate the interrupter's descriptors. Used after a fork.
ASIO_DECL void recreate();
// Interrupt the select call.
ASIO_DECL void interrupt();
// Reset the select interrupter. Returns true if the reset was successful.
ASIO_DECL bool reset();
// Get the read descriptor to be passed to select.
int read_descriptor() const
{
return read_descriptor_;
}
private:
// Open the descriptors. Throws on error.
ASIO_DECL void open_descriptors();
// Close the descriptors.
ASIO_DECL void close_descriptors();
// The read end of a connection used to interrupt the select call. This file
// descriptor is passed to select such that when it is time to stop, a single
// 64bit value will be written on the other end of the connection and this
// descriptor will become readable.
int read_descriptor_;
// The write end of a connection used to interrupt the select call. A single
// 64bit non-zero value may be written to this to wake up the select which is
// waiting for the other end to become readable. This descriptor will only
// differ from the read descriptor when a pipe is used.
int write_descriptor_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/detail/impl/eventfd_select_interrupter.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // defined(ASIO_HAS_EVENTFD)
#endif // ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/detail/posix_tss_ptr.hpp | //
// detail/posix_tss_ptr.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_POSIX_TSS_PTR_HPP
#define ASIO_DETAIL_POSIX_TSS_PTR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_PTHREADS)
#include <pthread.h>
#include "asio/detail/noncopyable.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
// Helper function to create thread-specific storage.
ASIO_DECL void posix_tss_ptr_create(pthread_key_t& key);
template <typename T>
class posix_tss_ptr
: private noncopyable
{
public:
// Constructor.
posix_tss_ptr()
{
posix_tss_ptr_create(tss_key_);
}
// Destructor.
~posix_tss_ptr()
{
::pthread_key_delete(tss_key_);
}
// Get the value.
operator T*() const
{
return static_cast<T*>(::pthread_getspecific(tss_key_));
}
// Set the value.
void operator=(T* value)
{
::pthread_setspecific(tss_key_, value);
}
private:
// Thread-specific storage to allow unlocked access to determine whether a
// thread is a member of the pool.
pthread_key_t tss_key_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/detail/impl/posix_tss_ptr.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // defined(ASIO_HAS_PTHREADS)
#endif // ASIO_DETAIL_POSIX_TSS_PTR_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/detail/null_event.hpp | //
// detail/null_event.hpp
// ~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_NULL_EVENT_HPP
#define ASIO_DETAIL_NULL_EVENT_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/noncopyable.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class null_event
: private noncopyable
{
public:
// Constructor.
null_event()
{
}
// Destructor.
~null_event()
{
}
// Signal the event. (Retained for backward compatibility.)
template <typename Lock>
void signal(Lock&)
{
}
// Signal all waiters.
template <typename Lock>
void signal_all(Lock&)
{
}
// Unlock the mutex and signal one waiter.
template <typename Lock>
void unlock_and_signal_one(Lock&)
{
}
// Unlock the mutex and signal one waiter who may destroy us.
template <typename Lock>
void unlock_and_signal_one_for_destruction(Lock&)
{
}
// If there's a waiter, unlock the mutex and signal it.
template <typename Lock>
bool maybe_unlock_and_signal_one(Lock&)
{
return false;
}
// Reset the event.
template <typename Lock>
void clear(Lock&)
{
}
// Wait for the event to become signalled.
template <typename Lock>
void wait(Lock&)
{
do_wait();
}
// Timed wait for the event to become signalled.
template <typename Lock>
bool wait_for_usec(Lock&, long usec)
{
do_wait_for_usec(usec);
return true;
}
private:
ASIO_DECL static void do_wait();
ASIO_DECL static void do_wait_for_usec(long usec);
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/detail/impl/null_event.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // ASIO_DETAIL_NULL_EVENT_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/detail/scheduler.hpp | //
// detail/scheduler.hpp
// ~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_SCHEDULER_HPP
#define ASIO_DETAIL_SCHEDULER_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/error_code.hpp"
#include "asio/execution_context.hpp"
#include "asio/detail/atomic_count.hpp"
#include "asio/detail/conditionally_enabled_event.hpp"
#include "asio/detail/conditionally_enabled_mutex.hpp"
#include "asio/detail/op_queue.hpp"
#include "asio/detail/scheduler_operation.hpp"
#include "asio/detail/scheduler_task.hpp"
#include "asio/detail/thread.hpp"
#include "asio/detail/thread_context.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
struct scheduler_thread_info;
class scheduler
: public execution_context_service_base<scheduler>,
public thread_context
{
public:
typedef scheduler_operation operation;
// The type of a function used to obtain a task instance.
typedef scheduler_task* (*get_task_func_type)(
asio::execution_context&);
// Constructor. Specifies the number of concurrent threads that are likely to
// run the scheduler. If set to 1 certain optimisation are performed.
ASIO_DECL scheduler(asio::execution_context& ctx,
int concurrency_hint = 0, bool own_thread = true,
get_task_func_type get_task = &scheduler::get_default_task);
// Destructor.
ASIO_DECL ~scheduler();
// Destroy all user-defined handler objects owned by the service.
ASIO_DECL void shutdown();
// Initialise the task, if required.
ASIO_DECL void init_task();
// Run the event loop until interrupted or no more work.
ASIO_DECL std::size_t run(asio::error_code& ec);
// Run until interrupted or one operation is performed.
ASIO_DECL std::size_t run_one(asio::error_code& ec);
// Run until timeout, interrupted, or one operation is performed.
ASIO_DECL std::size_t wait_one(
long usec, asio::error_code& ec);
// Poll for operations without blocking.
ASIO_DECL std::size_t poll(asio::error_code& ec);
// Poll for one operation without blocking.
ASIO_DECL std::size_t poll_one(asio::error_code& ec);
// Interrupt the event processing loop.
ASIO_DECL void stop();
// Determine whether the scheduler is stopped.
ASIO_DECL bool stopped() const;
// Restart in preparation for a subsequent run invocation.
ASIO_DECL void restart();
// Notify that some work has started.
void work_started()
{
++outstanding_work_;
}
// Used to compensate for a forthcoming work_finished call. Must be called
// from within a scheduler-owned thread.
ASIO_DECL void compensating_work_started();
// Notify that some work has finished.
void work_finished()
{
if (--outstanding_work_ == 0)
stop();
}
// Return whether a handler can be dispatched immediately.
ASIO_DECL bool can_dispatch();
/// Capture the current exception so it can be rethrown from a run function.
ASIO_DECL void capture_current_exception();
// Request invocation of the given operation and return immediately. Assumes
// that work_started() has not yet been called for the operation.
ASIO_DECL void post_immediate_completion(
operation* op, bool is_continuation);
// Request invocation of the given operations and return immediately. Assumes
// that work_started() has not yet been called for the operations.
ASIO_DECL void post_immediate_completions(std::size_t n,
op_queue<operation>& ops, bool is_continuation);
// Request invocation of the given operation and return immediately. Assumes
// that work_started() was previously called for the operation.
ASIO_DECL void post_deferred_completion(operation* op);
// Request invocation of the given operations and return immediately. Assumes
// that work_started() was previously called for each operation.
ASIO_DECL void post_deferred_completions(op_queue<operation>& ops);
// Enqueue the given operation following a failed attempt to dispatch the
// operation for immediate invocation.
ASIO_DECL void do_dispatch(operation* op);
// Process unfinished operations as part of a shutdownoperation. Assumes that
// work_started() was previously called for the operations.
ASIO_DECL void abandon_operations(op_queue<operation>& ops);
// Get the concurrency hint that was used to initialise the scheduler.
int concurrency_hint() const
{
return concurrency_hint_;
}
private:
// The mutex type used by this scheduler.
typedef conditionally_enabled_mutex mutex;
// The event type used by this scheduler.
typedef conditionally_enabled_event event;
// Structure containing thread-specific data.
typedef scheduler_thread_info thread_info;
// Run at most one operation. May block.
ASIO_DECL std::size_t do_run_one(mutex::scoped_lock& lock,
thread_info& this_thread, const asio::error_code& ec);
// Run at most one operation with a timeout. May block.
ASIO_DECL std::size_t do_wait_one(mutex::scoped_lock& lock,
thread_info& this_thread, long usec, const asio::error_code& ec);
// Poll for at most one operation.
ASIO_DECL std::size_t do_poll_one(mutex::scoped_lock& lock,
thread_info& this_thread, const asio::error_code& ec);
// Stop the task and all idle threads.
ASIO_DECL void stop_all_threads(mutex::scoped_lock& lock);
// Wake a single idle thread, or the task, and always unlock the mutex.
ASIO_DECL void wake_one_thread_and_unlock(
mutex::scoped_lock& lock);
// Get the default task.
ASIO_DECL static scheduler_task* get_default_task(
asio::execution_context& ctx);
// Helper class to run the scheduler in its own thread.
class thread_function;
friend class thread_function;
// Helper class to perform task-related operations on block exit.
struct task_cleanup;
friend struct task_cleanup;
// Helper class to call work-related operations on block exit.
struct work_cleanup;
friend struct work_cleanup;
// Whether to optimise for single-threaded use cases.
const bool one_thread_;
// Mutex to protect access to internal data.
mutable mutex mutex_;
// Event to wake up blocked threads.
event wakeup_event_;
// The task to be run by this service.
scheduler_task* task_;
// The function used to get the task.
get_task_func_type get_task_;
// Operation object to represent the position of the task in the queue.
struct task_operation : operation
{
task_operation() : operation(0) {}
} task_operation_;
// Whether the task has been interrupted.
bool task_interrupted_;
// The count of unfinished work.
atomic_count outstanding_work_;
// The queue of handlers that are ready to be delivered.
op_queue<operation> op_queue_;
// Flag to indicate that the dispatcher has been stopped.
bool stopped_;
// Flag to indicate that the dispatcher has been shut down.
bool shutdown_;
// The concurrency hint used to initialise the scheduler.
const int concurrency_hint_;
// The thread that is running the scheduler.
asio::detail::thread* thread_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/detail/impl/scheduler.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // ASIO_DETAIL_SCHEDULER_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/detail/reactive_socket_service.hpp | //
// detail/reactive_socket_service.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_HPP
#define ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_HAS_IOCP) \
&& !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#include "asio/buffer.hpp"
#include "asio/error.hpp"
#include "asio/execution_context.hpp"
#include "asio/socket_base.hpp"
#include "asio/detail/buffer_sequence_adapter.hpp"
#include "asio/detail/memory.hpp"
#include "asio/detail/noncopyable.hpp"
#include "asio/detail/reactive_null_buffers_op.hpp"
#include "asio/detail/reactive_socket_accept_op.hpp"
#include "asio/detail/reactive_socket_connect_op.hpp"
#include "asio/detail/reactive_socket_recvfrom_op.hpp"
#include "asio/detail/reactive_socket_sendto_op.hpp"
#include "asio/detail/reactive_socket_service_base.hpp"
#include "asio/detail/reactor.hpp"
#include "asio/detail/reactor_op.hpp"
#include "asio/detail/socket_holder.hpp"
#include "asio/detail/socket_ops.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Protocol>
class reactive_socket_service :
public execution_context_service_base<reactive_socket_service<Protocol>>,
public reactive_socket_service_base
{
public:
// The protocol type.
typedef Protocol protocol_type;
// The endpoint type.
typedef typename Protocol::endpoint endpoint_type;
// The native type of a socket.
typedef socket_type native_handle_type;
// The implementation type of the socket.
struct implementation_type :
reactive_socket_service_base::base_implementation_type
{
// Default constructor.
implementation_type()
: protocol_(endpoint_type().protocol())
{
}
// The protocol associated with the socket.
protocol_type protocol_;
};
// Constructor.
reactive_socket_service(execution_context& context)
: execution_context_service_base<
reactive_socket_service<Protocol>>(context),
reactive_socket_service_base(context)
{
}
// Destroy all user-defined handler objects owned by the service.
void shutdown()
{
this->base_shutdown();
}
// Move-construct a new socket implementation.
void move_construct(implementation_type& impl,
implementation_type& other_impl) noexcept
{
this->base_move_construct(impl, other_impl);
impl.protocol_ = other_impl.protocol_;
other_impl.protocol_ = endpoint_type().protocol();
}
// Move-assign from another socket implementation.
void move_assign(implementation_type& impl,
reactive_socket_service_base& other_service,
implementation_type& other_impl)
{
this->base_move_assign(impl, other_service, other_impl);
impl.protocol_ = other_impl.protocol_;
other_impl.protocol_ = endpoint_type().protocol();
}
// Move-construct a new socket implementation from another protocol type.
template <typename Protocol1>
void converting_move_construct(implementation_type& impl,
reactive_socket_service<Protocol1>&,
typename reactive_socket_service<
Protocol1>::implementation_type& other_impl)
{
this->base_move_construct(impl, other_impl);
impl.protocol_ = protocol_type(other_impl.protocol_);
other_impl.protocol_ = typename Protocol1::endpoint().protocol();
}
// Open a new socket implementation.
asio::error_code open(implementation_type& impl,
const protocol_type& protocol, asio::error_code& ec)
{
if (!do_open(impl, protocol.family(),
protocol.type(), protocol.protocol(), ec))
impl.protocol_ = protocol;
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Assign a native socket to a socket implementation.
asio::error_code assign(implementation_type& impl,
const protocol_type& protocol, const native_handle_type& native_socket,
asio::error_code& ec)
{
if (!do_assign(impl, protocol.type(), native_socket, ec))
impl.protocol_ = protocol;
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Get the native socket representation.
native_handle_type native_handle(implementation_type& impl)
{
return impl.socket_;
}
// Bind the socket to the specified local endpoint.
asio::error_code bind(implementation_type& impl,
const endpoint_type& endpoint, asio::error_code& ec)
{
socket_ops::bind(impl.socket_, endpoint.data(), endpoint.size(), ec);
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Set a socket option.
template <typename Option>
asio::error_code set_option(implementation_type& impl,
const Option& option, asio::error_code& ec)
{
socket_ops::setsockopt(impl.socket_, impl.state_,
option.level(impl.protocol_), option.name(impl.protocol_),
option.data(impl.protocol_), option.size(impl.protocol_), ec);
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Set a socket option.
template <typename Option>
asio::error_code get_option(const implementation_type& impl,
Option& option, asio::error_code& ec) const
{
std::size_t size = option.size(impl.protocol_);
socket_ops::getsockopt(impl.socket_, impl.state_,
option.level(impl.protocol_), option.name(impl.protocol_),
option.data(impl.protocol_), &size, ec);
if (!ec)
option.resize(impl.protocol_, size);
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Get the local endpoint.
endpoint_type local_endpoint(const implementation_type& impl,
asio::error_code& ec) const
{
endpoint_type endpoint;
std::size_t addr_len = endpoint.capacity();
if (socket_ops::getsockname(impl.socket_, endpoint.data(), &addr_len, ec))
{
ASIO_ERROR_LOCATION(ec);
return endpoint_type();
}
endpoint.resize(addr_len);
return endpoint;
}
// Get the remote endpoint.
endpoint_type remote_endpoint(const implementation_type& impl,
asio::error_code& ec) const
{
endpoint_type endpoint;
std::size_t addr_len = endpoint.capacity();
if (socket_ops::getpeername(impl.socket_,
endpoint.data(), &addr_len, false, ec))
{
ASIO_ERROR_LOCATION(ec);
return endpoint_type();
}
endpoint.resize(addr_len);
return endpoint;
}
// Disable sends or receives on the socket.
asio::error_code shutdown(base_implementation_type& impl,
socket_base::shutdown_type what, asio::error_code& ec)
{
socket_ops::shutdown(impl.socket_, what, ec);
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Send a datagram to the specified endpoint. Returns the number of bytes
// sent.
template <typename ConstBufferSequence>
size_t send_to(implementation_type& impl, const ConstBufferSequence& buffers,
const endpoint_type& destination, socket_base::message_flags flags,
asio::error_code& ec)
{
typedef buffer_sequence_adapter<asio::const_buffer,
ConstBufferSequence> bufs_type;
size_t n;
if (bufs_type::is_single_buffer)
{
n = socket_ops::sync_sendto1(impl.socket_, impl.state_,
bufs_type::first(buffers).data(),
bufs_type::first(buffers).size(), flags,
destination.data(), destination.size(), ec);
}
else
{
bufs_type bufs(buffers);
n = socket_ops::sync_sendto(impl.socket_, impl.state_,
bufs.buffers(), bufs.count(), flags,
destination.data(), destination.size(), ec);
}
ASIO_ERROR_LOCATION(ec);
return n;
}
// Wait until data can be sent without blocking.
size_t send_to(implementation_type& impl, const null_buffers&,
const endpoint_type&, socket_base::message_flags,
asio::error_code& ec)
{
// Wait for socket to become ready.
socket_ops::poll_write(impl.socket_, impl.state_, -1, ec);
ASIO_ERROR_LOCATION(ec);
return 0;
}
// Start an asynchronous send. The data being sent must be valid for the
// lifetime of the asynchronous operation.
template <typename ConstBufferSequence, typename Handler, typename IoExecutor>
void async_send_to(implementation_type& impl,
const ConstBufferSequence& buffers,
const endpoint_type& destination, socket_base::message_flags flags,
Handler& handler, const IoExecutor& io_ex)
{
bool is_continuation =
asio_handler_cont_helpers::is_continuation(handler);
associated_cancellation_slot_t<Handler> slot
= asio::get_associated_cancellation_slot(handler);
// Allocate and construct an operation to wrap the handler.
typedef reactive_socket_sendto_op<ConstBufferSequence,
endpoint_type, Handler, IoExecutor> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(success_ec_, impl.socket_,
buffers, destination, flags, handler, io_ex);
// Optionally register for per-operation cancellation.
if (slot.is_connected())
{
p.p->cancellation_key_ =
&slot.template emplace<reactor_op_cancellation>(
&reactor_, &impl.reactor_data_, impl.socket_, reactor::write_op);
}
ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket",
&impl, impl.socket_, "async_send_to"));
start_op(impl, reactor::write_op, p.p,
is_continuation, true, false, true, &io_ex, 0);
p.v = p.p = 0;
}
// Start an asynchronous wait until data can be sent without blocking.
template <typename Handler, typename IoExecutor>
void async_send_to(implementation_type& impl, const null_buffers&,
const endpoint_type&, socket_base::message_flags,
Handler& handler, const IoExecutor& io_ex)
{
bool is_continuation =
asio_handler_cont_helpers::is_continuation(handler);
associated_cancellation_slot_t<Handler> slot
= asio::get_associated_cancellation_slot(handler);
// Allocate and construct an operation to wrap the handler.
typedef reactive_null_buffers_op<Handler, IoExecutor> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(success_ec_, handler, io_ex);
// Optionally register for per-operation cancellation.
if (slot.is_connected())
{
p.p->cancellation_key_ =
&slot.template emplace<reactor_op_cancellation>(
&reactor_, &impl.reactor_data_, impl.socket_, reactor::write_op);
}
ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket",
&impl, impl.socket_, "async_send_to(null_buffers)"));
start_op(impl, reactor::write_op, p.p,
is_continuation, false, false, false, &io_ex, 0);
p.v = p.p = 0;
}
// Receive a datagram with the endpoint of the sender. Returns the number of
// bytes received.
template <typename MutableBufferSequence>
size_t receive_from(implementation_type& impl,
const MutableBufferSequence& buffers,
endpoint_type& sender_endpoint, socket_base::message_flags flags,
asio::error_code& ec)
{
typedef buffer_sequence_adapter<asio::mutable_buffer,
MutableBufferSequence> bufs_type;
std::size_t addr_len = sender_endpoint.capacity();
std::size_t n;
if (bufs_type::is_single_buffer)
{
n = socket_ops::sync_recvfrom1(impl.socket_, impl.state_,
bufs_type::first(buffers).data(), bufs_type::first(buffers).size(),
flags, sender_endpoint.data(), &addr_len, ec);
}
else
{
bufs_type bufs(buffers);
n = socket_ops::sync_recvfrom(impl.socket_, impl.state_, bufs.buffers(),
bufs.count(), flags, sender_endpoint.data(), &addr_len, ec);
}
if (!ec)
sender_endpoint.resize(addr_len);
ASIO_ERROR_LOCATION(ec);
return n;
}
// Wait until data can be received without blocking.
size_t receive_from(implementation_type& impl, const null_buffers&,
endpoint_type& sender_endpoint, socket_base::message_flags,
asio::error_code& ec)
{
// Wait for socket to become ready.
socket_ops::poll_read(impl.socket_, impl.state_, -1, ec);
// Reset endpoint since it can be given no sensible value at this time.
sender_endpoint = endpoint_type();
ASIO_ERROR_LOCATION(ec);
return 0;
}
// Start an asynchronous receive. The buffer for the data being received and
// the sender_endpoint object must both be valid for the lifetime of the
// asynchronous operation.
template <typename MutableBufferSequence,
typename Handler, typename IoExecutor>
void async_receive_from(implementation_type& impl,
const MutableBufferSequence& buffers, endpoint_type& sender_endpoint,
socket_base::message_flags flags, Handler& handler,
const IoExecutor& io_ex)
{
bool is_continuation =
asio_handler_cont_helpers::is_continuation(handler);
associated_cancellation_slot_t<Handler> slot
= asio::get_associated_cancellation_slot(handler);
// Allocate and construct an operation to wrap the handler.
typedef reactive_socket_recvfrom_op<MutableBufferSequence,
endpoint_type, Handler, IoExecutor> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
int protocol = impl.protocol_.type();
p.p = new (p.v) op(success_ec_, impl.socket_, protocol,
buffers, sender_endpoint, flags, handler, io_ex);
// Optionally register for per-operation cancellation.
if (slot.is_connected())
{
p.p->cancellation_key_ =
&slot.template emplace<reactor_op_cancellation>(
&reactor_, &impl.reactor_data_, impl.socket_, reactor::read_op);
}
ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket",
&impl, impl.socket_, "async_receive_from"));
start_op(impl,
(flags & socket_base::message_out_of_band)
? reactor::except_op : reactor::read_op,
p.p, is_continuation, true, false, true, &io_ex, 0);
p.v = p.p = 0;
}
// Wait until data can be received without blocking.
template <typename Handler, typename IoExecutor>
void async_receive_from(implementation_type& impl, const null_buffers&,
endpoint_type& sender_endpoint, socket_base::message_flags flags,
Handler& handler, const IoExecutor& io_ex)
{
bool is_continuation =
asio_handler_cont_helpers::is_continuation(handler);
associated_cancellation_slot_t<Handler> slot
= asio::get_associated_cancellation_slot(handler);
// Allocate and construct an operation to wrap the handler.
typedef reactive_null_buffers_op<Handler, IoExecutor> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(success_ec_, handler, io_ex);
// Optionally register for per-operation cancellation.
if (slot.is_connected())
{
p.p->cancellation_key_ =
&slot.template emplace<reactor_op_cancellation>(
&reactor_, &impl.reactor_data_, impl.socket_, reactor::read_op);
}
ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket",
&impl, impl.socket_, "async_receive_from(null_buffers)"));
// Reset endpoint since it can be given no sensible value at this time.
sender_endpoint = endpoint_type();
start_op(impl,
(flags & socket_base::message_out_of_band)
? reactor::except_op : reactor::read_op,
p.p, is_continuation, false, false, false, &io_ex, 0);
p.v = p.p = 0;
}
// Accept a new connection.
template <typename Socket>
asio::error_code accept(implementation_type& impl,
Socket& peer, endpoint_type* peer_endpoint, asio::error_code& ec)
{
// We cannot accept a socket that is already open.
if (peer.is_open())
{
ec = asio::error::already_open;
ASIO_ERROR_LOCATION(ec);
return ec;
}
std::size_t addr_len = peer_endpoint ? peer_endpoint->capacity() : 0;
socket_holder new_socket(socket_ops::sync_accept(impl.socket_,
impl.state_, peer_endpoint ? peer_endpoint->data() : 0,
peer_endpoint ? &addr_len : 0, ec));
// On success, assign new connection to peer socket object.
if (new_socket.get() != invalid_socket)
{
if (peer_endpoint)
peer_endpoint->resize(addr_len);
peer.assign(impl.protocol_, new_socket.get(), ec);
if (!ec)
new_socket.release();
}
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Start an asynchronous accept. The peer and peer_endpoint objects must be
// valid until the accept's handler is invoked.
template <typename Socket, typename Handler, typename IoExecutor>
void async_accept(implementation_type& impl, Socket& peer,
endpoint_type* peer_endpoint, Handler& handler, const IoExecutor& io_ex)
{
bool is_continuation =
asio_handler_cont_helpers::is_continuation(handler);
associated_cancellation_slot_t<Handler> slot
= asio::get_associated_cancellation_slot(handler);
// Allocate and construct an operation to wrap the handler.
typedef reactive_socket_accept_op<Socket, Protocol, Handler, IoExecutor> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(success_ec_, impl.socket_, impl.state_,
peer, impl.protocol_, peer_endpoint, handler, io_ex);
// Optionally register for per-operation cancellation.
if (slot.is_connected() && !peer.is_open())
{
p.p->cancellation_key_ =
&slot.template emplace<reactor_op_cancellation>(
&reactor_, &impl.reactor_data_, impl.socket_, reactor::read_op);
}
ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket",
&impl, impl.socket_, "async_accept"));
start_accept_op(impl, p.p, is_continuation, peer.is_open(), &io_ex, 0);
p.v = p.p = 0;
}
// Start an asynchronous accept. The peer_endpoint object must be valid until
// the accept's handler is invoked.
template <typename PeerIoExecutor, typename Handler, typename IoExecutor>
void async_move_accept(implementation_type& impl,
const PeerIoExecutor& peer_io_ex, endpoint_type* peer_endpoint,
Handler& handler, const IoExecutor& io_ex)
{
bool is_continuation =
asio_handler_cont_helpers::is_continuation(handler);
associated_cancellation_slot_t<Handler> slot
= asio::get_associated_cancellation_slot(handler);
// Allocate and construct an operation to wrap the handler.
typedef reactive_socket_move_accept_op<Protocol,
PeerIoExecutor, Handler, IoExecutor> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(success_ec_, peer_io_ex, impl.socket_,
impl.state_, impl.protocol_, peer_endpoint, handler, io_ex);
// Optionally register for per-operation cancellation.
if (slot.is_connected())
{
p.p->cancellation_key_ =
&slot.template emplace<reactor_op_cancellation>(
&reactor_, &impl.reactor_data_, impl.socket_, reactor::read_op);
}
ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket",
&impl, impl.socket_, "async_accept"));
start_accept_op(impl, p.p, is_continuation, false, &io_ex, 0);
p.v = p.p = 0;
}
// Connect the socket to the specified endpoint.
asio::error_code connect(implementation_type& impl,
const endpoint_type& peer_endpoint, asio::error_code& ec)
{
socket_ops::sync_connect(impl.socket_,
peer_endpoint.data(), peer_endpoint.size(), ec);
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Start an asynchronous connect.
template <typename Handler, typename IoExecutor>
void async_connect(implementation_type& impl,
const endpoint_type& peer_endpoint,
Handler& handler, const IoExecutor& io_ex)
{
bool is_continuation =
asio_handler_cont_helpers::is_continuation(handler);
associated_cancellation_slot_t<Handler> slot
= asio::get_associated_cancellation_slot(handler);
// Allocate and construct an operation to wrap the handler.
typedef reactive_socket_connect_op<Handler, IoExecutor> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(success_ec_, impl.socket_, handler, io_ex);
// Optionally register for per-operation cancellation.
if (slot.is_connected())
{
p.p->cancellation_key_ =
&slot.template emplace<reactor_op_cancellation>(
&reactor_, &impl.reactor_data_, impl.socket_, reactor::connect_op);
}
ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket",
&impl, impl.socket_, "async_connect"));
start_connect_op(impl, p.p, is_continuation,
peer_endpoint.data(), peer_endpoint.size(), &io_ex, 0);
p.v = p.p = 0;
}
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_HAS_IOCP)
// && !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#endif // ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/detail/io_uring_socket_service.hpp | //
// detail/io_uring_socket_service.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IO_URING_SOCKET_SERVICE_HPP
#define ASIO_DETAIL_IO_URING_SOCKET_SERVICE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IO_URING)
#include "asio/buffer.hpp"
#include "asio/error.hpp"
#include "asio/execution_context.hpp"
#include "asio/socket_base.hpp"
#include "asio/detail/buffer_sequence_adapter.hpp"
#include "asio/detail/memory.hpp"
#include "asio/detail/noncopyable.hpp"
#include "asio/detail/io_uring_null_buffers_op.hpp"
#include "asio/detail/io_uring_service.hpp"
#include "asio/detail/io_uring_socket_accept_op.hpp"
#include "asio/detail/io_uring_socket_connect_op.hpp"
#include "asio/detail/io_uring_socket_recvfrom_op.hpp"
#include "asio/detail/io_uring_socket_sendto_op.hpp"
#include "asio/detail/io_uring_socket_service_base.hpp"
#include "asio/detail/socket_holder.hpp"
#include "asio/detail/socket_ops.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Protocol>
class io_uring_socket_service :
public execution_context_service_base<io_uring_socket_service<Protocol>>,
public io_uring_socket_service_base
{
public:
// The protocol type.
typedef Protocol protocol_type;
// The endpoint type.
typedef typename Protocol::endpoint endpoint_type;
// The native type of a socket.
typedef socket_type native_handle_type;
// The implementation type of the socket.
struct implementation_type :
io_uring_socket_service_base::base_implementation_type
{
// Default constructor.
implementation_type()
: protocol_(endpoint_type().protocol())
{
}
// The protocol associated with the socket.
protocol_type protocol_;
};
// Constructor.
io_uring_socket_service(execution_context& context)
: execution_context_service_base<
io_uring_socket_service<Protocol>>(context),
io_uring_socket_service_base(context)
{
}
// Destroy all user-defined handler objects owned by the service.
void shutdown()
{
this->base_shutdown();
}
// Move-construct a new socket implementation.
void move_construct(implementation_type& impl,
implementation_type& other_impl) noexcept
{
this->base_move_construct(impl, other_impl);
impl.protocol_ = other_impl.protocol_;
other_impl.protocol_ = endpoint_type().protocol();
}
// Move-assign from another socket implementation.
void move_assign(implementation_type& impl,
io_uring_socket_service_base& other_service,
implementation_type& other_impl)
{
this->base_move_assign(impl, other_service, other_impl);
impl.protocol_ = other_impl.protocol_;
other_impl.protocol_ = endpoint_type().protocol();
}
// Move-construct a new socket implementation from another protocol type.
template <typename Protocol1>
void converting_move_construct(implementation_type& impl,
io_uring_socket_service<Protocol1>&,
typename io_uring_socket_service<
Protocol1>::implementation_type& other_impl)
{
this->base_move_construct(impl, other_impl);
impl.protocol_ = protocol_type(other_impl.protocol_);
other_impl.protocol_ = typename Protocol1::endpoint().protocol();
}
// Open a new socket implementation.
asio::error_code open(implementation_type& impl,
const protocol_type& protocol, asio::error_code& ec)
{
if (!do_open(impl, protocol.family(),
protocol.type(), protocol.protocol(), ec))
impl.protocol_ = protocol;
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Assign a native socket to a socket implementation.
asio::error_code assign(implementation_type& impl,
const protocol_type& protocol, const native_handle_type& native_socket,
asio::error_code& ec)
{
if (!do_assign(impl, protocol.type(), native_socket, ec))
impl.protocol_ = protocol;
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Get the native socket representation.
native_handle_type native_handle(implementation_type& impl)
{
return impl.socket_;
}
// Bind the socket to the specified local endpoint.
asio::error_code bind(implementation_type& impl,
const endpoint_type& endpoint, asio::error_code& ec)
{
socket_ops::bind(impl.socket_, endpoint.data(), endpoint.size(), ec);
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Set a socket option.
template <typename Option>
asio::error_code set_option(implementation_type& impl,
const Option& option, asio::error_code& ec)
{
socket_ops::setsockopt(impl.socket_, impl.state_,
option.level(impl.protocol_), option.name(impl.protocol_),
option.data(impl.protocol_), option.size(impl.protocol_), ec);
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Set a socket option.
template <typename Option>
asio::error_code get_option(const implementation_type& impl,
Option& option, asio::error_code& ec) const
{
std::size_t size = option.size(impl.protocol_);
socket_ops::getsockopt(impl.socket_, impl.state_,
option.level(impl.protocol_), option.name(impl.protocol_),
option.data(impl.protocol_), &size, ec);
if (!ec)
option.resize(impl.protocol_, size);
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Get the local endpoint.
endpoint_type local_endpoint(const implementation_type& impl,
asio::error_code& ec) const
{
endpoint_type endpoint;
std::size_t addr_len = endpoint.capacity();
if (socket_ops::getsockname(impl.socket_, endpoint.data(), &addr_len, ec))
{
ASIO_ERROR_LOCATION(ec);
return endpoint_type();
}
endpoint.resize(addr_len);
return endpoint;
}
// Get the remote endpoint.
endpoint_type remote_endpoint(const implementation_type& impl,
asio::error_code& ec) const
{
endpoint_type endpoint;
std::size_t addr_len = endpoint.capacity();
if (socket_ops::getpeername(impl.socket_,
endpoint.data(), &addr_len, false, ec))
{
ASIO_ERROR_LOCATION(ec);
return endpoint_type();
}
endpoint.resize(addr_len);
return endpoint;
}
// Disable sends or receives on the socket.
asio::error_code shutdown(base_implementation_type& impl,
socket_base::shutdown_type what, asio::error_code& ec)
{
socket_ops::shutdown(impl.socket_, what, ec);
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Send a datagram to the specified endpoint. Returns the number of bytes
// sent.
template <typename ConstBufferSequence>
size_t send_to(implementation_type& impl, const ConstBufferSequence& buffers,
const endpoint_type& destination, socket_base::message_flags flags,
asio::error_code& ec)
{
typedef buffer_sequence_adapter<asio::const_buffer,
ConstBufferSequence> bufs_type;
size_t n;
if (bufs_type::is_single_buffer)
{
n = socket_ops::sync_sendto1(impl.socket_, impl.state_,
bufs_type::first(buffers).data(),
bufs_type::first(buffers).size(), flags,
destination.data(), destination.size(), ec);
}
else
{
bufs_type bufs(buffers);
n = socket_ops::sync_sendto(impl.socket_, impl.state_,
bufs.buffers(), bufs.count(), flags,
destination.data(), destination.size(), ec);
}
ASIO_ERROR_LOCATION(ec);
return n;
}
// Wait until data can be sent without blocking.
size_t send_to(implementation_type& impl, const null_buffers&,
const endpoint_type&, socket_base::message_flags,
asio::error_code& ec)
{
// Wait for socket to become ready.
socket_ops::poll_write(impl.socket_, impl.state_, -1, ec);
ASIO_ERROR_LOCATION(ec);
return 0;
}
// Start an asynchronous send. The data being sent must be valid for the
// lifetime of the asynchronous operation.
template <typename ConstBufferSequence, typename Handler, typename IoExecutor>
void async_send_to(implementation_type& impl,
const ConstBufferSequence& buffers,
const endpoint_type& destination, socket_base::message_flags flags,
Handler& handler, const IoExecutor& io_ex)
{
bool is_continuation =
asio_handler_cont_helpers::is_continuation(handler);
associated_cancellation_slot_t<Handler> slot
= asio::get_associated_cancellation_slot(handler);
// Allocate and construct an operation to wrap the handler.
typedef io_uring_socket_sendto_op<ConstBufferSequence,
endpoint_type, Handler, IoExecutor> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(success_ec_, impl.socket_, impl.state_,
buffers, destination, flags, handler, io_ex);
// Optionally register for per-operation cancellation.
if (slot.is_connected())
{
p.p->cancellation_key_ =
&slot.template emplace<io_uring_op_cancellation>(&io_uring_service_,
&impl.io_object_data_, io_uring_service::write_op);
}
ASIO_HANDLER_CREATION((io_uring_service_.context(), *p.p,
"socket", &impl, impl.socket_, "async_send_to"));
start_op(impl, io_uring_service::write_op, p.p, is_continuation, false);
p.v = p.p = 0;
}
// Start an asynchronous wait until data can be sent without blocking.
template <typename Handler, typename IoExecutor>
void async_send_to(implementation_type& impl, const null_buffers&,
const endpoint_type&, socket_base::message_flags,
Handler& handler, const IoExecutor& io_ex)
{
bool is_continuation =
asio_handler_cont_helpers::is_continuation(handler);
associated_cancellation_slot_t<Handler> slot
= asio::get_associated_cancellation_slot(handler);
// Allocate and construct an operation to wrap the handler.
typedef io_uring_null_buffers_op<Handler, IoExecutor> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(success_ec_, impl.socket_, POLLOUT, handler, io_ex);
// Optionally register for per-operation cancellation.
if (slot.is_connected())
{
p.p->cancellation_key_ =
&slot.template emplace<io_uring_op_cancellation>(&io_uring_service_,
&impl.io_object_data_, io_uring_service::write_op);
}
ASIO_HANDLER_CREATION((io_uring_service_.context(), *p.p, "socket",
&impl, impl.socket_, "async_send_to(null_buffers)"));
start_op(impl, io_uring_service::write_op, p.p, is_continuation, false);
p.v = p.p = 0;
}
// Receive a datagram with the endpoint of the sender. Returns the number of
// bytes received.
template <typename MutableBufferSequence>
size_t receive_from(implementation_type& impl,
const MutableBufferSequence& buffers,
endpoint_type& sender_endpoint, socket_base::message_flags flags,
asio::error_code& ec)
{
typedef buffer_sequence_adapter<asio::mutable_buffer,
MutableBufferSequence> bufs_type;
std::size_t addr_len = sender_endpoint.capacity();
std::size_t n;
if (bufs_type::is_single_buffer)
{
n = socket_ops::sync_recvfrom1(impl.socket_, impl.state_,
bufs_type::first(buffers).data(), bufs_type::first(buffers).size(),
flags, sender_endpoint.data(), &addr_len, ec);
}
else
{
bufs_type bufs(buffers);
n = socket_ops::sync_recvfrom(impl.socket_, impl.state_, bufs.buffers(),
bufs.count(), flags, sender_endpoint.data(), &addr_len, ec);
}
if (!ec)
sender_endpoint.resize(addr_len);
ASIO_ERROR_LOCATION(ec);
return n;
}
// Wait until data can be received without blocking.
size_t receive_from(implementation_type& impl, const null_buffers&,
endpoint_type& sender_endpoint, socket_base::message_flags,
asio::error_code& ec)
{
// Wait for socket to become ready.
socket_ops::poll_read(impl.socket_, impl.state_, -1, ec);
// Reset endpoint since it can be given no sensible value at this time.
sender_endpoint = endpoint_type();
ASIO_ERROR_LOCATION(ec);
return 0;
}
// Start an asynchronous receive. The buffer for the data being received and
// the sender_endpoint object must both be valid for the lifetime of the
// asynchronous operation.
template <typename MutableBufferSequence,
typename Handler, typename IoExecutor>
void async_receive_from(implementation_type& impl,
const MutableBufferSequence& buffers, endpoint_type& sender_endpoint,
socket_base::message_flags flags, Handler& handler,
const IoExecutor& io_ex)
{
bool is_continuation =
asio_handler_cont_helpers::is_continuation(handler);
int op_type = (flags & socket_base::message_out_of_band)
? io_uring_service::except_op : io_uring_service::read_op;
associated_cancellation_slot_t<Handler> slot
= asio::get_associated_cancellation_slot(handler);
// Allocate and construct an operation to wrap the handler.
typedef io_uring_socket_recvfrom_op<MutableBufferSequence,
endpoint_type, Handler, IoExecutor> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(success_ec_, impl.socket_, impl.state_,
buffers, sender_endpoint, flags, handler, io_ex);
// Optionally register for per-operation cancellation.
if (slot.is_connected())
{
p.p->cancellation_key_ =
&slot.template emplace<io_uring_op_cancellation>(
&io_uring_service_, &impl.io_object_data_, op_type);
}
ASIO_HANDLER_CREATION((io_uring_service_.context(), *p.p,
"socket", &impl, impl.socket_, "async_receive_from"));
start_op(impl, op_type, p.p, is_continuation, false);
p.v = p.p = 0;
}
// Wait until data can be received without blocking.
template <typename Handler, typename IoExecutor>
void async_receive_from(implementation_type& impl, const null_buffers&,
endpoint_type& sender_endpoint, socket_base::message_flags flags,
Handler& handler, const IoExecutor& io_ex)
{
bool is_continuation =
asio_handler_cont_helpers::is_continuation(handler);
int op_type;
int poll_flags;
if ((flags & socket_base::message_out_of_band) != 0)
{
op_type = io_uring_service::except_op;
poll_flags = POLLPRI;
}
else
{
op_type = io_uring_service::read_op;
poll_flags = POLLIN;
}
associated_cancellation_slot_t<Handler> slot
= asio::get_associated_cancellation_slot(handler);
// Allocate and construct an operation to wrap the handler.
typedef io_uring_null_buffers_op<Handler, IoExecutor> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(success_ec_, impl.socket_, poll_flags, handler, io_ex);
// Optionally register for per-operation cancellation.
if (slot.is_connected())
{
p.p->cancellation_key_ =
&slot.template emplace<io_uring_op_cancellation>(
&io_uring_service_, &impl.io_object_data_, op_type);
}
ASIO_HANDLER_CREATION((io_uring_service_.context(), *p.p, "socket",
&impl, impl.socket_, "async_receive_from(null_buffers)"));
// Reset endpoint since it can be given no sensible value at this time.
sender_endpoint = endpoint_type();
start_op(impl, op_type, p.p, is_continuation, false);
p.v = p.p = 0;
}
// Accept a new connection.
template <typename Socket>
asio::error_code accept(implementation_type& impl,
Socket& peer, endpoint_type* peer_endpoint, asio::error_code& ec)
{
// We cannot accept a socket that is already open.
if (peer.is_open())
{
ec = asio::error::already_open;
ASIO_ERROR_LOCATION(ec);
return ec;
}
std::size_t addr_len = peer_endpoint ? peer_endpoint->capacity() : 0;
socket_holder new_socket(socket_ops::sync_accept(impl.socket_,
impl.state_, peer_endpoint ? peer_endpoint->data() : 0,
peer_endpoint ? &addr_len : 0, ec));
// On success, assign new connection to peer socket object.
if (new_socket.get() != invalid_socket)
{
if (peer_endpoint)
peer_endpoint->resize(addr_len);
peer.assign(impl.protocol_, new_socket.get(), ec);
if (!ec)
new_socket.release();
}
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Start an asynchronous accept. The peer and peer_endpoint objects must be
// valid until the accept's handler is invoked.
template <typename Socket, typename Handler, typename IoExecutor>
void async_accept(implementation_type& impl, Socket& peer,
endpoint_type* peer_endpoint, Handler& handler, const IoExecutor& io_ex)
{
bool is_continuation =
asio_handler_cont_helpers::is_continuation(handler);
associated_cancellation_slot_t<Handler> slot
= asio::get_associated_cancellation_slot(handler);
// Allocate and construct an operation to wrap the handler.
typedef io_uring_socket_accept_op<Socket, Protocol, Handler, IoExecutor> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(success_ec_, impl.socket_, impl.state_,
peer, impl.protocol_, peer_endpoint, handler, io_ex);
// Optionally register for per-operation cancellation.
if (slot.is_connected() && !peer.is_open())
{
p.p->cancellation_key_ =
&slot.template emplace<io_uring_op_cancellation>(&io_uring_service_,
&impl.io_object_data_, io_uring_service::read_op);
}
ASIO_HANDLER_CREATION((io_uring_service_.context(), *p.p,
"socket", &impl, impl.socket_, "async_accept"));
start_accept_op(impl, p.p, is_continuation, peer.is_open());
p.v = p.p = 0;
}
// Start an asynchronous accept. The peer_endpoint object must be valid until
// the accept's handler is invoked.
template <typename PeerIoExecutor, typename Handler, typename IoExecutor>
void async_move_accept(implementation_type& impl,
const PeerIoExecutor& peer_io_ex, endpoint_type* peer_endpoint,
Handler& handler, const IoExecutor& io_ex)
{
bool is_continuation =
asio_handler_cont_helpers::is_continuation(handler);
associated_cancellation_slot_t<Handler> slot
= asio::get_associated_cancellation_slot(handler);
// Allocate and construct an operation to wrap the handler.
typedef io_uring_socket_move_accept_op<Protocol,
PeerIoExecutor, Handler, IoExecutor> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(success_ec_, peer_io_ex, impl.socket_,
impl.state_, impl.protocol_, peer_endpoint, handler, io_ex);
// Optionally register for per-operation cancellation.
if (slot.is_connected())
{
p.p->cancellation_key_ =
&slot.template emplace<io_uring_op_cancellation>(&io_uring_service_,
&impl.io_object_data_, io_uring_service::read_op);
}
ASIO_HANDLER_CREATION((io_uring_service_.context(), *p.p,
"socket", &impl, impl.socket_, "async_accept"));
start_accept_op(impl, p.p, is_continuation, false);
p.v = p.p = 0;
}
// Connect the socket to the specified endpoint.
asio::error_code connect(implementation_type& impl,
const endpoint_type& peer_endpoint, asio::error_code& ec)
{
socket_ops::sync_connect(impl.socket_,
peer_endpoint.data(), peer_endpoint.size(), ec);
return ec;
}
// Start an asynchronous connect.
template <typename Handler, typename IoExecutor>
void async_connect(implementation_type& impl,
const endpoint_type& peer_endpoint,
Handler& handler, const IoExecutor& io_ex)
{
bool is_continuation =
asio_handler_cont_helpers::is_continuation(handler);
associated_cancellation_slot_t<Handler> slot
= asio::get_associated_cancellation_slot(handler);
// Allocate and construct an operation to wrap the handler.
typedef io_uring_socket_connect_op<Protocol, Handler, IoExecutor> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(success_ec_, impl.socket_,
peer_endpoint, handler, io_ex);
// Optionally register for per-operation cancellation.
if (slot.is_connected())
{
p.p->cancellation_key_ =
&slot.template emplace<io_uring_op_cancellation>(&io_uring_service_,
&impl.io_object_data_, io_uring_service::write_op);
}
ASIO_HANDLER_CREATION((io_uring_service_.context(), *p.p,
"socket", &impl, impl.socket_, "async_connect"));
start_op(impl, io_uring_service::write_op, p.p, is_continuation, false);
p.v = p.p = 0;
}
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IO_URING)
#endif // ASIO_DETAIL_IO_URING_SOCKET_SERVICE_HPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/winrt_ssocket_service_base.ipp | //
// detail/impl/winrt_ssocket_service_base.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP
#define ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS_RUNTIME)
#include <cstring>
#include "asio/detail/winrt_ssocket_service_base.hpp"
#include "asio/detail/winrt_async_op.hpp"
#include "asio/detail/winrt_utils.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
winrt_ssocket_service_base::winrt_ssocket_service_base(
execution_context& context)
: scheduler_(use_service<scheduler_impl>(context)),
async_manager_(use_service<winrt_async_manager>(context)),
mutex_(),
impl_list_(0)
{
}
void winrt_ssocket_service_base::base_shutdown()
{
// Close all implementations, causing all operations to complete.
asio::detail::mutex::scoped_lock lock(mutex_);
base_implementation_type* impl = impl_list_;
while (impl)
{
asio::error_code ignored_ec;
close(*impl, ignored_ec);
impl = impl->next_;
}
}
void winrt_ssocket_service_base::construct(
winrt_ssocket_service_base::base_implementation_type& impl)
{
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
void winrt_ssocket_service_base::base_move_construct(
winrt_ssocket_service_base::base_implementation_type& impl,
winrt_ssocket_service_base::base_implementation_type& other_impl)
noexcept
{
impl.socket_ = other_impl.socket_;
other_impl.socket_ = nullptr;
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
void winrt_ssocket_service_base::base_move_assign(
winrt_ssocket_service_base::base_implementation_type& impl,
winrt_ssocket_service_base& other_service,
winrt_ssocket_service_base::base_implementation_type& other_impl)
{
asio::error_code ignored_ec;
close(impl, ignored_ec);
if (this != &other_service)
{
// Remove implementation from linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
impl.socket_ = other_impl.socket_;
other_impl.socket_ = nullptr;
if (this != &other_service)
{
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(other_service.mutex_);
impl.next_ = other_service.impl_list_;
impl.prev_ = 0;
if (other_service.impl_list_)
other_service.impl_list_->prev_ = &impl;
other_service.impl_list_ = &impl;
}
}
void winrt_ssocket_service_base::destroy(
winrt_ssocket_service_base::base_implementation_type& impl)
{
asio::error_code ignored_ec;
close(impl, ignored_ec);
// Remove implementation from linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
asio::error_code winrt_ssocket_service_base::close(
winrt_ssocket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
delete impl.socket_;
impl.socket_ = nullptr;
ec = asio::error_code();
return ec;
}
winrt_ssocket_service_base::native_handle_type
winrt_ssocket_service_base::release(
winrt_ssocket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
return nullptr;
cancel(impl, ec);
if (ec)
return nullptr;
native_handle_type tmp = impl.socket_;
impl.socket_ = nullptr;
return tmp;
}
std::size_t winrt_ssocket_service_base::do_get_endpoint(
const base_implementation_type& impl, bool local,
void* addr, std::size_t addr_len, asio::error_code& ec) const
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return addr_len;
}
try
{
std::string addr_string = winrt_utils::string(local
? impl.socket_->Information->LocalAddress->CanonicalName
: impl.socket_->Information->RemoteAddress->CanonicalName);
unsigned short port = winrt_utils::integer(local
? impl.socket_->Information->LocalPort
: impl.socket_->Information->RemotePort);
unsigned long scope = 0;
switch (static_cast<const socket_addr_type*>(addr)->sa_family)
{
case ASIO_OS_DEF(AF_INET):
if (addr_len < sizeof(sockaddr_in4_type))
{
ec = asio::error::invalid_argument;
return addr_len;
}
else
{
socket_ops::inet_pton(ASIO_OS_DEF(AF_INET), addr_string.c_str(),
&reinterpret_cast<sockaddr_in4_type*>(addr)->sin_addr, &scope, ec);
reinterpret_cast<sockaddr_in4_type*>(addr)->sin_port
= socket_ops::host_to_network_short(port);
ec = asio::error_code();
return sizeof(sockaddr_in4_type);
}
case ASIO_OS_DEF(AF_INET6):
if (addr_len < sizeof(sockaddr_in6_type))
{
ec = asio::error::invalid_argument;
return addr_len;
}
else
{
socket_ops::inet_pton(ASIO_OS_DEF(AF_INET6), addr_string.c_str(),
&reinterpret_cast<sockaddr_in6_type*>(addr)->sin6_addr, &scope, ec);
reinterpret_cast<sockaddr_in6_type*>(addr)->sin6_port
= socket_ops::host_to_network_short(port);
ec = asio::error_code();
return sizeof(sockaddr_in6_type);
}
default:
ec = asio::error::address_family_not_supported;
return addr_len;
}
}
catch (Platform::Exception^ e)
{
ec = asio::error_code(e->HResult,
asio::system_category());
return addr_len;
}
}
asio::error_code winrt_ssocket_service_base::do_set_option(
winrt_ssocket_service_base::base_implementation_type& impl,
int level, int optname, const void* optval,
std::size_t optlen, asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return ec;
}
try
{
if (level == ASIO_OS_DEF(SOL_SOCKET)
&& optname == ASIO_OS_DEF(SO_KEEPALIVE))
{
if (optlen == sizeof(int))
{
int value = 0;
std::memcpy(&value, optval, optlen);
impl.socket_->Control->KeepAlive = !!value;
ec = asio::error_code();
}
else
{
ec = asio::error::invalid_argument;
}
}
else if (level == ASIO_OS_DEF(IPPROTO_TCP)
&& optname == ASIO_OS_DEF(TCP_NODELAY))
{
if (optlen == sizeof(int))
{
int value = 0;
std::memcpy(&value, optval, optlen);
impl.socket_->Control->NoDelay = !!value;
ec = asio::error_code();
}
else
{
ec = asio::error::invalid_argument;
}
}
else
{
ec = asio::error::invalid_argument;
}
}
catch (Platform::Exception^ e)
{
ec = asio::error_code(e->HResult,
asio::system_category());
}
return ec;
}
void winrt_ssocket_service_base::do_get_option(
const winrt_ssocket_service_base::base_implementation_type& impl,
int level, int optname, void* optval,
std::size_t* optlen, asio::error_code& ec) const
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return;
}
try
{
if (level == ASIO_OS_DEF(SOL_SOCKET)
&& optname == ASIO_OS_DEF(SO_KEEPALIVE))
{
if (*optlen >= sizeof(int))
{
int value = impl.socket_->Control->KeepAlive ? 1 : 0;
std::memcpy(optval, &value, sizeof(int));
*optlen = sizeof(int);
ec = asio::error_code();
}
else
{
ec = asio::error::invalid_argument;
}
}
else if (level == ASIO_OS_DEF(IPPROTO_TCP)
&& optname == ASIO_OS_DEF(TCP_NODELAY))
{
if (*optlen >= sizeof(int))
{
int value = impl.socket_->Control->NoDelay ? 1 : 0;
std::memcpy(optval, &value, sizeof(int));
*optlen = sizeof(int);
ec = asio::error_code();
}
else
{
ec = asio::error::invalid_argument;
}
}
else
{
ec = asio::error::invalid_argument;
}
}
catch (Platform::Exception^ e)
{
ec = asio::error_code(e->HResult,
asio::system_category());
}
}
asio::error_code winrt_ssocket_service_base::do_connect(
winrt_ssocket_service_base::base_implementation_type& impl,
const void* addr, asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return ec;
}
char addr_string[max_addr_v6_str_len];
unsigned short port;
switch (static_cast<const socket_addr_type*>(addr)->sa_family)
{
case ASIO_OS_DEF(AF_INET):
socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET),
&reinterpret_cast<const sockaddr_in4_type*>(addr)->sin_addr,
addr_string, sizeof(addr_string), 0, ec);
port = socket_ops::network_to_host_short(
reinterpret_cast<const sockaddr_in4_type*>(addr)->sin_port);
break;
case ASIO_OS_DEF(AF_INET6):
socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET6),
&reinterpret_cast<const sockaddr_in6_type*>(addr)->sin6_addr,
addr_string, sizeof(addr_string), 0, ec);
port = socket_ops::network_to_host_short(
reinterpret_cast<const sockaddr_in6_type*>(addr)->sin6_port);
break;
default:
ec = asio::error::address_family_not_supported;
return ec;
}
if (!ec) try
{
async_manager_.sync(impl.socket_->ConnectAsync(
ref new Windows::Networking::HostName(
winrt_utils::string(addr_string)),
winrt_utils::string(port)), ec);
}
catch (Platform::Exception^ e)
{
ec = asio::error_code(e->HResult,
asio::system_category());
}
return ec;
}
void winrt_ssocket_service_base::start_connect_op(
winrt_ssocket_service_base::base_implementation_type& impl,
const void* addr, winrt_async_op<void>* op, bool is_continuation)
{
if (!is_open(impl))
{
op->ec_ = asio::error::bad_descriptor;
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
char addr_string[max_addr_v6_str_len];
unsigned short port = 0;
switch (static_cast<const socket_addr_type*>(addr)->sa_family)
{
case ASIO_OS_DEF(AF_INET):
socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET),
&reinterpret_cast<const sockaddr_in4_type*>(addr)->sin_addr,
addr_string, sizeof(addr_string), 0, op->ec_);
port = socket_ops::network_to_host_short(
reinterpret_cast<const sockaddr_in4_type*>(addr)->sin_port);
break;
case ASIO_OS_DEF(AF_INET6):
socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET6),
&reinterpret_cast<const sockaddr_in6_type*>(addr)->sin6_addr,
addr_string, sizeof(addr_string), 0, op->ec_);
port = socket_ops::network_to_host_short(
reinterpret_cast<const sockaddr_in6_type*>(addr)->sin6_port);
break;
default:
op->ec_ = asio::error::address_family_not_supported;
break;
}
if (op->ec_)
{
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
try
{
async_manager_.async(impl.socket_->ConnectAsync(
ref new Windows::Networking::HostName(
winrt_utils::string(addr_string)),
winrt_utils::string(port)), op);
}
catch (Platform::Exception^ e)
{
op->ec_ = asio::error_code(
e->HResult, asio::system_category());
scheduler_.post_immediate_completion(op, is_continuation);
}
}
std::size_t winrt_ssocket_service_base::do_send(
winrt_ssocket_service_base::base_implementation_type& impl,
const asio::const_buffer& data,
socket_base::message_flags flags, asio::error_code& ec)
{
if (flags)
{
ec = asio::error::operation_not_supported;
return 0;
}
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return 0;
}
try
{
buffer_sequence_adapter<asio::const_buffer,
asio::const_buffer> bufs(asio::buffer(data));
if (bufs.all_empty())
{
ec = asio::error_code();
return 0;
}
return async_manager_.sync(
impl.socket_->OutputStream->WriteAsync(bufs.buffers()[0]), ec);
}
catch (Platform::Exception^ e)
{
ec = asio::error_code(e->HResult,
asio::system_category());
return 0;
}
}
void winrt_ssocket_service_base::start_send_op(
winrt_ssocket_service_base::base_implementation_type& impl,
const asio::const_buffer& data, socket_base::message_flags flags,
winrt_async_op<unsigned int>* op, bool is_continuation)
{
if (flags)
{
op->ec_ = asio::error::operation_not_supported;
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
if (!is_open(impl))
{
op->ec_ = asio::error::bad_descriptor;
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
try
{
buffer_sequence_adapter<asio::const_buffer,
asio::const_buffer> bufs(asio::buffer(data));
if (bufs.all_empty())
{
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
async_manager_.async(
impl.socket_->OutputStream->WriteAsync(bufs.buffers()[0]), op);
}
catch (Platform::Exception^ e)
{
op->ec_ = asio::error_code(e->HResult,
asio::system_category());
scheduler_.post_immediate_completion(op, is_continuation);
}
}
std::size_t winrt_ssocket_service_base::do_receive(
winrt_ssocket_service_base::base_implementation_type& impl,
const asio::mutable_buffer& data,
socket_base::message_flags flags, asio::error_code& ec)
{
if (flags)
{
ec = asio::error::operation_not_supported;
return 0;
}
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return 0;
}
try
{
buffer_sequence_adapter<asio::mutable_buffer,
asio::mutable_buffer> bufs(asio::buffer(data));
if (bufs.all_empty())
{
ec = asio::error_code();
return 0;
}
async_manager_.sync(
impl.socket_->InputStream->ReadAsync(
bufs.buffers()[0], bufs.buffers()[0]->Capacity,
Windows::Storage::Streams::InputStreamOptions::Partial), ec);
std::size_t bytes_transferred = bufs.buffers()[0]->Length;
if (bytes_transferred == 0 && !ec)
{
ec = asio::error::eof;
}
return bytes_transferred;
}
catch (Platform::Exception^ e)
{
ec = asio::error_code(e->HResult,
asio::system_category());
return 0;
}
}
void winrt_ssocket_service_base::start_receive_op(
winrt_ssocket_service_base::base_implementation_type& impl,
const asio::mutable_buffer& data, socket_base::message_flags flags,
winrt_async_op<Windows::Storage::Streams::IBuffer^>* op,
bool is_continuation)
{
if (flags)
{
op->ec_ = asio::error::operation_not_supported;
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
if (!is_open(impl))
{
op->ec_ = asio::error::bad_descriptor;
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
try
{
buffer_sequence_adapter<asio::mutable_buffer,
asio::mutable_buffer> bufs(asio::buffer(data));
if (bufs.all_empty())
{
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
async_manager_.async(
impl.socket_->InputStream->ReadAsync(
bufs.buffers()[0], bufs.buffers()[0]->Capacity,
Windows::Storage::Streams::InputStreamOptions::Partial), op);
}
catch (Platform::Exception^ e)
{
op->ec_ = asio::error_code(e->HResult,
asio::system_category());
scheduler_.post_immediate_completion(op, is_continuation);
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS_RUNTIME)
#endif // ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/socket_ops.ipp | //
// detail/impl/socket_ops.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_SOCKET_OPS_IPP
#define ASIO_DETAIL_SOCKET_OPS_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <cctype>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <cerrno>
#include <new>
#include "asio/detail/assert.hpp"
#include "asio/detail/socket_ops.hpp"
#include "asio/error.hpp"
#if defined(ASIO_WINDOWS_RUNTIME)
# include <codecvt>
# include <locale>
# include <string>
#endif // defined(ASIO_WINDOWS_RUNTIME)
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__) \
|| defined(__MACH__) && defined(__APPLE__)
# if defined(ASIO_HAS_PTHREADS)
# include <pthread.h>
# endif // defined(ASIO_HAS_PTHREADS)
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// || defined(__MACH__) && defined(__APPLE__)
#if defined(_MSC_VER) && (_MSC_VER >= 1800)
# include <malloc.h>
#endif // defined(_MSC_VER) && (_MSC_VER >= 1800)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
namespace socket_ops {
#if !defined(ASIO_WINDOWS_RUNTIME)
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
struct msghdr { int msg_namelen; };
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
#if defined(__hpux)
// HP-UX doesn't declare these functions extern "C", so they are declared again
// here to avoid linker errors about undefined symbols.
extern "C" char* if_indextoname(unsigned int, char*);
extern "C" unsigned int if_nametoindex(const char*);
#endif // defined(__hpux)
#endif // !defined(ASIO_WINDOWS_RUNTIME)
inline void clear_last_error()
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
WSASetLastError(0);
#else
errno = 0;
#endif
}
#if !defined(ASIO_WINDOWS_RUNTIME)
inline void get_last_error(
asio::error_code& ec, bool is_error_condition)
{
if (!is_error_condition)
{
asio::error::clear(ec);
}
else
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error_code(WSAGetLastError(),
asio::error::get_system_category());
#else
ec = asio::error_code(errno,
asio::error::get_system_category());
#endif
}
}
template <typename SockLenType>
inline socket_type call_accept(SockLenType msghdr::*,
socket_type s, void* addr, std::size_t* addrlen)
{
SockLenType tmp_addrlen = addrlen ? (SockLenType)*addrlen : 0;
socket_type result = ::accept(s,
static_cast<socket_addr_type*>(addr),
addrlen ? &tmp_addrlen : 0);
if (addrlen)
*addrlen = (std::size_t)tmp_addrlen;
return result;
}
socket_type accept(socket_type s, void* addr,
std::size_t* addrlen, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return invalid_socket;
}
socket_type new_s = call_accept(&msghdr::msg_namelen, s, addr, addrlen);
get_last_error(ec, new_s == invalid_socket);
if (new_s == invalid_socket)
return new_s;
#if defined(__MACH__) && defined(__APPLE__) || defined(__FreeBSD__)
int optval = 1;
int result = ::setsockopt(new_s, SOL_SOCKET,
SO_NOSIGPIPE, &optval, sizeof(optval));
get_last_error(ec, result != 0);
if (result != 0)
{
::close(new_s);
return invalid_socket;
}
#endif
asio::error::clear(ec);
return new_s;
}
socket_type sync_accept(socket_type s, state_type state,
void* addr, std::size_t* addrlen, asio::error_code& ec)
{
// Accept a socket.
for (;;)
{
// Try to complete the operation without blocking.
socket_type new_socket = socket_ops::accept(s, addr, addrlen, ec);
// Check if operation succeeded.
if (new_socket != invalid_socket)
return new_socket;
// Operation failed.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
{
if (state & user_set_non_blocking)
return invalid_socket;
// Fall through to retry operation.
}
else if (ec == asio::error::connection_aborted)
{
if (state & enable_connection_aborted)
return invalid_socket;
// Fall through to retry operation.
}
#if defined(EPROTO)
else if (ec.value() == EPROTO)
{
if (state & enable_connection_aborted)
return invalid_socket;
// Fall through to retry operation.
}
#endif // defined(EPROTO)
else
return invalid_socket;
// Wait for socket to become ready.
if (socket_ops::poll_read(s, 0, -1, ec) < 0)
return invalid_socket;
}
}
#if defined(ASIO_HAS_IOCP)
void complete_iocp_accept(socket_type s, void* output_buffer,
DWORD address_length, void* addr, std::size_t* addrlen,
socket_type new_socket, asio::error_code& ec)
{
// Map non-portable errors to their portable counterparts.
if (ec.value() == ERROR_NETNAME_DELETED)
ec = asio::error::connection_aborted;
if (!ec)
{
// Get the address of the peer.
if (addr && addrlen)
{
LPSOCKADDR local_addr = 0;
int local_addr_length = 0;
LPSOCKADDR remote_addr = 0;
int remote_addr_length = 0;
GetAcceptExSockaddrs(output_buffer, 0, address_length,
address_length, &local_addr, &local_addr_length,
&remote_addr, &remote_addr_length);
if (static_cast<std::size_t>(remote_addr_length) > *addrlen)
{
ec = asio::error::invalid_argument;
}
else
{
using namespace std; // For memcpy.
memcpy(addr, remote_addr, remote_addr_length);
*addrlen = static_cast<std::size_t>(remote_addr_length);
}
}
// Need to set the SO_UPDATE_ACCEPT_CONTEXT option so that getsockname
// and getpeername will work on the accepted socket.
SOCKET update_ctx_param = s;
socket_ops::state_type state = 0;
socket_ops::setsockopt(new_socket, state,
SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
&update_ctx_param, sizeof(SOCKET), ec);
}
}
#else // defined(ASIO_HAS_IOCP)
bool non_blocking_accept(socket_type s,
state_type state, void* addr, std::size_t* addrlen,
asio::error_code& ec, socket_type& new_socket)
{
for (;;)
{
// Accept the waiting connection.
new_socket = socket_ops::accept(s, addr, addrlen, ec);
// Check if operation succeeded.
if (new_socket != invalid_socket)
return true;
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Operation failed.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
{
// Fall through to retry operation.
}
else if (ec == asio::error::connection_aborted)
{
if (state & enable_connection_aborted)
return true;
// Fall through to retry operation.
}
#if defined(EPROTO)
else if (ec.value() == EPROTO)
{
if (state & enable_connection_aborted)
return true;
// Fall through to retry operation.
}
#endif // defined(EPROTO)
else
return true;
return false;
}
}
#endif // defined(ASIO_HAS_IOCP)
template <typename SockLenType>
inline int call_bind(SockLenType msghdr::*,
socket_type s, const void* addr, std::size_t addrlen)
{
return ::bind(s, static_cast<const socket_addr_type*>(addr),
(SockLenType)addrlen);
}
int bind(socket_type s, const void* addr,
std::size_t addrlen, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return socket_error_retval;
}
int result = call_bind(&msghdr::msg_namelen, s, addr, addrlen);
get_last_error(ec, result != 0);
return result;
}
int close(socket_type s, state_type& state,
bool destruction, asio::error_code& ec)
{
int result = 0;
if (s != invalid_socket)
{
// We don't want the destructor to block, so set the socket to linger in
// the background. If the user doesn't like this behaviour then they need
// to explicitly close the socket.
if (destruction && (state & user_set_linger))
{
::linger opt;
opt.l_onoff = 0;
opt.l_linger = 0;
asio::error_code ignored_ec;
socket_ops::setsockopt(s, state, SOL_SOCKET,
SO_LINGER, &opt, sizeof(opt), ignored_ec);
}
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
result = ::closesocket(s);
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
result = ::close(s);
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
get_last_error(ec, result != 0);
if (result != 0
&& (ec == asio::error::would_block
|| ec == asio::error::try_again))
{
// According to UNIX Network Programming Vol. 1, it is possible for
// close() to fail with EWOULDBLOCK under certain circumstances. What
// isn't clear is the state of the descriptor after this error. The one
// current OS where this behaviour is seen, Windows, says that the socket
// remains open. Therefore we'll put the descriptor back into blocking
// mode and have another attempt at closing it.
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ioctl_arg_type arg = 0;
::ioctlsocket(s, FIONBIO, &arg);
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
# if defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
int flags = ::fcntl(s, F_GETFL, 0);
if (flags >= 0)
::fcntl(s, F_SETFL, flags & ~O_NONBLOCK);
# else // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
ioctl_arg_type arg = 0;
if ((state & possible_dup) == 0)
{
result = ::ioctl(s, FIONBIO, &arg);
get_last_error(ec, result < 0);
}
if ((state & possible_dup) != 0
# if defined(ENOTTY)
|| ec.value() == ENOTTY
# endif // defined(ENOTTY)
# if defined(ENOTCAPABLE)
|| ec.value() == ENOTCAPABLE
# endif // defined(ENOTCAPABLE)
)
{
int flags = ::fcntl(s, F_GETFL, 0);
if (flags >= 0)
::fcntl(s, F_SETFL, flags & ~O_NONBLOCK);
}
# endif // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
state &= ~non_blocking;
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
result = ::closesocket(s);
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
result = ::close(s);
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
get_last_error(ec, result != 0);
}
}
return result;
}
bool set_user_non_blocking(socket_type s,
state_type& state, bool value, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return false;
}
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ioctl_arg_type arg = (value ? 1 : 0);
int result = ::ioctlsocket(s, FIONBIO, &arg);
get_last_error(ec, result < 0);
#elif defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
int result = ::fcntl(s, F_GETFL, 0);
get_last_error(ec, result < 0);
if (result >= 0)
{
int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
result = (flag != result) ? ::fcntl(s, F_SETFL, flag) : 0;
get_last_error(ec, result < 0);
}
#else // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
ioctl_arg_type arg = (value ? 1 : 0);
int result = 0;
if ((state & possible_dup) == 0)
{
result = ::ioctl(s, FIONBIO, &arg);
get_last_error(ec, result < 0);
}
if ((state & possible_dup) != 0
# if defined(ENOTTY)
|| ec.value() == ENOTTY
# endif // defined(ENOTTY)
# if defined(ENOTCAPABLE)
|| ec.value() == ENOTCAPABLE
# endif // defined(ENOTCAPABLE)
)
{
result = ::fcntl(s, F_GETFL, 0);
get_last_error(ec, result < 0);
if (result >= 0)
{
int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
result = (flag != result) ? ::fcntl(s, F_SETFL, flag) : 0;
get_last_error(ec, result < 0);
}
}
#endif // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
if (result >= 0)
{
if (value)
state |= user_set_non_blocking;
else
{
// Clearing the user-set non-blocking mode always overrides any
// internally-set non-blocking flag. Any subsequent asynchronous
// operations will need to re-enable non-blocking I/O.
state &= ~(user_set_non_blocking | internal_non_blocking);
}
return true;
}
return false;
}
bool set_internal_non_blocking(socket_type s,
state_type& state, bool value, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return false;
}
if (!value && (state & user_set_non_blocking))
{
// It does not make sense to clear the internal non-blocking flag if the
// user still wants non-blocking behaviour. Return an error and let the
// caller figure out whether to update the user-set non-blocking flag.
ec = asio::error::invalid_argument;
return false;
}
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ioctl_arg_type arg = (value ? 1 : 0);
int result = ::ioctlsocket(s, FIONBIO, &arg);
get_last_error(ec, result < 0);
#elif defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
int result = ::fcntl(s, F_GETFL, 0);
get_last_error(ec, result < 0);
if (result >= 0)
{
int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
result = (flag != result) ? ::fcntl(s, F_SETFL, flag) : 0;
get_last_error(ec, result < 0);
}
#else // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
ioctl_arg_type arg = (value ? 1 : 0);
int result = 0;
if ((state & possible_dup) == 0)
{
result = ::ioctl(s, FIONBIO, &arg);
get_last_error(ec, result < 0);
}
if ((state & possible_dup) != 0
# if defined(ENOTTY)
|| ec.value() == ENOTTY
# endif // defined(ENOTTY)
# if defined(ENOTCAPABLE)
|| ec.value() == ENOTCAPABLE
# endif // defined(ENOTCAPABLE)
)
{
result = ::fcntl(s, F_GETFL, 0);
get_last_error(ec, result < 0);
if (result >= 0)
{
int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
result = (flag != result) ? ::fcntl(s, F_SETFL, flag) : 0;
get_last_error(ec, result < 0);
}
}
#endif // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
if (result >= 0)
{
if (value)
state |= internal_non_blocking;
else
state &= ~internal_non_blocking;
return true;
}
return false;
}
int shutdown(socket_type s, int what, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return socket_error_retval;
}
int result = ::shutdown(s, what);
get_last_error(ec, result != 0);
return result;
}
template <typename SockLenType>
inline int call_connect(SockLenType msghdr::*,
socket_type s, const void* addr, std::size_t addrlen)
{
return ::connect(s, static_cast<const socket_addr_type*>(addr),
(SockLenType)addrlen);
}
int connect(socket_type s, const void* addr,
std::size_t addrlen, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return socket_error_retval;
}
int result = call_connect(&msghdr::msg_namelen, s, addr, addrlen);
get_last_error(ec, result != 0);
#if defined(__linux__)
if (result != 0 && ec == asio::error::try_again)
{
if (static_cast<const socket_addr_type*>(addr)->sa_family == AF_UNIX)
ec = asio::error::in_progress;
else
ec = asio::error::no_buffer_space;
}
#endif // defined(__linux__)
return result;
}
void sync_connect(socket_type s, const void* addr,
std::size_t addrlen, asio::error_code& ec)
{
// Perform the connect operation.
socket_ops::connect(s, addr, addrlen, ec);
if (ec != asio::error::in_progress
&& ec != asio::error::would_block)
{
// The connect operation finished immediately.
return;
}
// Wait for socket to become ready.
if (socket_ops::poll_connect(s, -1, ec) < 0)
return;
// Get the error code from the connect operation.
int connect_error = 0;
size_t connect_error_len = sizeof(connect_error);
if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_ERROR,
&connect_error, &connect_error_len, ec) == socket_error_retval)
return;
// Return the result of the connect operation.
ec = asio::error_code(connect_error,
asio::error::get_system_category());
}
#if defined(ASIO_HAS_IOCP)
void complete_iocp_connect(socket_type s, asio::error_code& ec)
{
// Map non-portable errors to their portable counterparts.
switch (ec.value())
{
case ERROR_CONNECTION_REFUSED:
ec = asio::error::connection_refused;
break;
case ERROR_NETWORK_UNREACHABLE:
ec = asio::error::network_unreachable;
break;
case ERROR_HOST_UNREACHABLE:
ec = asio::error::host_unreachable;
break;
case ERROR_SEM_TIMEOUT:
ec = asio::error::timed_out;
break;
default:
break;
}
if (!ec)
{
// Need to set the SO_UPDATE_CONNECT_CONTEXT option so that getsockname
// and getpeername will work on the connected socket.
socket_ops::state_type state = 0;
const int so_update_connect_context = 0x7010;
socket_ops::setsockopt(s, state, SOL_SOCKET,
so_update_connect_context, 0, 0, ec);
}
}
#endif // defined(ASIO_HAS_IOCP)
bool non_blocking_connect(socket_type s, asio::error_code& ec)
{
// Check if the connect operation has finished. This is required since we may
// get spurious readiness notifications from the reactor.
#if defined(ASIO_WINDOWS) \
|| defined(__CYGWIN__) \
|| defined(__SYMBIAN32__)
fd_set write_fds;
FD_ZERO(&write_fds);
FD_SET(s, &write_fds);
fd_set except_fds;
FD_ZERO(&except_fds);
FD_SET(s, &except_fds);
timeval zero_timeout;
zero_timeout.tv_sec = 0;
zero_timeout.tv_usec = 0;
int ready = ::select(s + 1, 0, &write_fds, &except_fds, &zero_timeout);
#else // defined(ASIO_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
pollfd fds;
fds.fd = s;
fds.events = POLLOUT;
fds.revents = 0;
int ready = ::poll(&fds, 1, 0);
#endif // defined(ASIO_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
if (ready == 0)
{
// The asynchronous connect operation is still in progress.
return false;
}
// Get the error code from the connect operation.
int connect_error = 0;
size_t connect_error_len = sizeof(connect_error);
if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_ERROR,
&connect_error, &connect_error_len, ec) == 0)
{
if (connect_error)
{
ec = asio::error_code(connect_error,
asio::error::get_system_category());
}
else
asio::error::clear(ec);
}
return true;
}
int socketpair(int af, int type, int protocol,
socket_type sv[2], asio::error_code& ec)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
(void)(af);
(void)(type);
(void)(protocol);
(void)(sv);
ec = asio::error::operation_not_supported;
return socket_error_retval;
#else
int result = ::socketpair(af, type, protocol, sv);
get_last_error(ec, result != 0);
return result;
#endif
}
bool sockatmark(socket_type s, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return false;
}
#if defined(SIOCATMARK)
ioctl_arg_type value = 0;
# if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
int result = ::ioctlsocket(s, SIOCATMARK, &value);
# else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
int result = ::ioctl(s, SIOCATMARK, &value);
# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
get_last_error(ec, result < 0);
# if defined(ENOTTY)
if (ec.value() == ENOTTY)
ec = asio::error::not_socket;
# endif // defined(ENOTTY)
#else // defined(SIOCATMARK)
int value = ::sockatmark(s);
get_last_error(ec, value < 0);
#endif // defined(SIOCATMARK)
return ec ? false : value != 0;
}
size_t available(socket_type s, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return 0;
}
ioctl_arg_type value = 0;
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
int result = ::ioctlsocket(s, FIONREAD, &value);
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
int result = ::ioctl(s, FIONREAD, &value);
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
get_last_error(ec, result < 0);
#if defined(ENOTTY)
if (ec.value() == ENOTTY)
ec = asio::error::not_socket;
#endif // defined(ENOTTY)
return ec ? static_cast<size_t>(0) : static_cast<size_t>(value);
}
int listen(socket_type s, int backlog, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return socket_error_retval;
}
int result = ::listen(s, backlog);
get_last_error(ec, result != 0);
return result;
}
inline void init_buf_iov_base(void*& base, void* addr)
{
base = addr;
}
template <typename T>
inline void init_buf_iov_base(T& base, void* addr)
{
base = static_cast<T>(addr);
}
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
typedef WSABUF buf;
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
typedef iovec buf;
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
void init_buf(buf& b, void* data, size_t size)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
b.buf = static_cast<char*>(data);
b.len = static_cast<u_long>(size);
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
init_buf_iov_base(b.iov_base, data);
b.iov_len = size;
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
}
void init_buf(buf& b, const void* data, size_t size)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
b.buf = static_cast<char*>(const_cast<void*>(data));
b.len = static_cast<u_long>(size);
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
init_buf_iov_base(b.iov_base, const_cast<void*>(data));
b.iov_len = size;
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
}
inline void init_msghdr_msg_name(void*& name, void* addr)
{
name = static_cast<socket_addr_type*>(addr);
}
inline void init_msghdr_msg_name(void*& name, const socket_addr_type* addr)
{
name = const_cast<socket_addr_type*>(addr);
}
template <typename T>
inline void init_msghdr_msg_name(T& name, void* addr)
{
name = static_cast<T>(addr);
}
template <typename T>
inline void init_msghdr_msg_name(T& name, const void* addr)
{
name = static_cast<T>(const_cast<void*>(addr));
}
signed_size_type recv(socket_type s, buf* bufs, size_t count,
int flags, asio::error_code& ec)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// Receive some data.
DWORD recv_buf_count = static_cast<DWORD>(count);
DWORD bytes_transferred = 0;
DWORD recv_flags = flags;
int result = ::WSARecv(s, bufs, recv_buf_count,
&bytes_transferred, &recv_flags, 0, 0);
get_last_error(ec, true);
if (ec.value() == ERROR_NETNAME_DELETED)
ec = asio::error::connection_reset;
else if (ec.value() == ERROR_PORT_UNREACHABLE)
ec = asio::error::connection_refused;
else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA)
result = 0;
if (result != 0)
return socket_error_retval;
asio::error::clear(ec);
return bytes_transferred;
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
msghdr msg = msghdr();
msg.msg_iov = bufs;
msg.msg_iovlen = static_cast<int>(count);
signed_size_type result = ::recvmsg(s, &msg, flags);
get_last_error(ec, result < 0);
return result;
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
}
signed_size_type recv1(socket_type s, void* data, size_t size,
int flags, asio::error_code& ec)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// Receive some data.
WSABUF buf;
buf.buf = const_cast<char*>(static_cast<const char*>(data));
buf.len = static_cast<ULONG>(size);
DWORD bytes_transferred = 0;
DWORD recv_flags = flags;
int result = ::WSARecv(s, &buf, 1,
&bytes_transferred, &recv_flags, 0, 0);
get_last_error(ec, true);
if (ec.value() == ERROR_NETNAME_DELETED)
ec = asio::error::connection_reset;
else if (ec.value() == ERROR_PORT_UNREACHABLE)
ec = asio::error::connection_refused;
else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA)
result = 0;
if (result != 0)
return socket_error_retval;
asio::error::clear(ec);
return bytes_transferred;
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
signed_size_type result = ::recv(s, static_cast<char*>(data), size, flags);
get_last_error(ec, result < 0);
return result;
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
}
size_t sync_recv(socket_type s, state_type state, buf* bufs,
size_t count, int flags, bool all_empty, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to read 0 bytes on a stream is a no-op.
if (all_empty && (state & stream_oriented))
{
asio::error::clear(ec);
return 0;
}
// Read some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = socket_ops::recv(s, bufs, count, flags, ec);
// Check for EOF.
if ((state & stream_oriented) && bytes == 0)
{
ec = asio::error::eof;
return 0;
}
// Check if operation succeeded.
if (bytes >= 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for socket to become ready.
if (socket_ops::poll_read(s, 0, -1, ec) < 0)
return 0;
}
}
size_t sync_recv1(socket_type s, state_type state, void* data,
size_t size, int flags, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to read 0 bytes on a stream is a no-op.
if (size == 0 && (state & stream_oriented))
{
asio::error::clear(ec);
return 0;
}
// Read some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = socket_ops::recv1(s, data, size, flags, ec);
// Check for EOF.
if ((state & stream_oriented) && bytes == 0)
{
ec = asio::error::eof;
return 0;
}
// Check if operation succeeded.
if (bytes >= 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for socket to become ready.
if (socket_ops::poll_read(s, 0, -1, ec) < 0)
return 0;
}
}
#if defined(ASIO_HAS_IOCP)
void complete_iocp_recv(state_type state,
const weak_cancel_token_type& cancel_token, bool all_empty,
asio::error_code& ec, size_t bytes_transferred)
{
// Map non-portable errors to their portable counterparts.
if (ec.value() == ERROR_NETNAME_DELETED)
{
if (cancel_token.expired())
ec = asio::error::operation_aborted;
else
ec = asio::error::connection_reset;
}
else if (ec.value() == ERROR_PORT_UNREACHABLE)
{
ec = asio::error::connection_refused;
}
else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA)
{
asio::error::clear(ec);
}
// Check for connection closed.
else if (!ec && bytes_transferred == 0
&& (state & stream_oriented) != 0
&& !all_empty)
{
ec = asio::error::eof;
}
}
#else // defined(ASIO_HAS_IOCP)
bool non_blocking_recv(socket_type s,
buf* bufs, size_t count, int flags, bool is_stream,
asio::error_code& ec, size_t& bytes_transferred)
{
for (;;)
{
// Read some data.
signed_size_type bytes = socket_ops::recv(s, bufs, count, flags, ec);
// Check for end of stream.
if (is_stream && bytes == 0)
{
ec = asio::error::eof;
return true;
}
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
bool non_blocking_recv1(socket_type s,
void* data, size_t size, int flags, bool is_stream,
asio::error_code& ec, size_t& bytes_transferred)
{
for (;;)
{
// Read some data.
signed_size_type bytes = socket_ops::recv1(s, data, size, flags, ec);
// Check for end of stream.
if (is_stream && bytes == 0)
{
ec = asio::error::eof;
return true;
}
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
#endif // defined(ASIO_HAS_IOCP)
signed_size_type recvfrom(socket_type s, buf* bufs, size_t count,
int flags, void* addr, std::size_t* addrlen, asio::error_code& ec)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// Receive some data.
DWORD recv_buf_count = static_cast<DWORD>(count);
DWORD bytes_transferred = 0;
DWORD recv_flags = flags;
int tmp_addrlen = (int)*addrlen;
int result = ::WSARecvFrom(s, bufs, recv_buf_count, &bytes_transferred,
&recv_flags, static_cast<socket_addr_type*>(addr), &tmp_addrlen, 0, 0);
get_last_error(ec, true);
*addrlen = (std::size_t)tmp_addrlen;
if (ec.value() == ERROR_NETNAME_DELETED)
ec = asio::error::connection_reset;
else if (ec.value() == ERROR_PORT_UNREACHABLE)
ec = asio::error::connection_refused;
else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA)
result = 0;
if (result != 0)
return socket_error_retval;
asio::error::clear(ec);
return bytes_transferred;
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
msghdr msg = msghdr();
init_msghdr_msg_name(msg.msg_name, addr);
msg.msg_namelen = static_cast<int>(*addrlen);
msg.msg_iov = bufs;
msg.msg_iovlen = static_cast<int>(count);
signed_size_type result = ::recvmsg(s, &msg, flags);
get_last_error(ec, result < 0);
*addrlen = msg.msg_namelen;
return result;
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
}
template <typename SockLenType>
inline signed_size_type call_recvfrom(SockLenType msghdr::*, socket_type s,
void* data, size_t size, int flags, void* addr, std::size_t* addrlen)
{
SockLenType tmp_addrlen = addrlen ? (SockLenType)*addrlen : 0;
signed_size_type result = ::recvfrom(s, static_cast<char*>(data), size,
flags, static_cast<socket_addr_type*>(addr), addrlen ? &tmp_addrlen : 0);
if (addrlen)
*addrlen = (std::size_t)tmp_addrlen;
return result;
}
signed_size_type recvfrom1(socket_type s, void* data, size_t size,
int flags, void* addr, std::size_t* addrlen, asio::error_code& ec)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// Receive some data.
WSABUF buf;
buf.buf = static_cast<char*>(data);
buf.len = static_cast<ULONG>(size);
DWORD bytes_transferred = 0;
DWORD recv_flags = flags;
int tmp_addrlen = (int)*addrlen;
int result = ::WSARecvFrom(s, &buf, 1, &bytes_transferred, &recv_flags,
static_cast<socket_addr_type*>(addr), &tmp_addrlen, 0, 0);
get_last_error(ec, true);
*addrlen = (std::size_t)tmp_addrlen;
if (ec.value() == ERROR_NETNAME_DELETED)
ec = asio::error::connection_reset;
else if (ec.value() == ERROR_PORT_UNREACHABLE)
ec = asio::error::connection_refused;
else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA)
result = 0;
if (result != 0)
return socket_error_retval;
asio::error::clear(ec);
return bytes_transferred;
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
signed_size_type result = call_recvfrom(&msghdr::msg_namelen,
s, data, size, flags, addr, addrlen);
get_last_error(ec, result < 0);
return result;
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
}
size_t sync_recvfrom(socket_type s, state_type state, buf* bufs, size_t count,
int flags, void* addr, std::size_t* addrlen, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return 0;
}
// Read some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = socket_ops::recvfrom(
s, bufs, count, flags, addr, addrlen, ec);
// Check if operation succeeded.
if (bytes >= 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for socket to become ready.
if (socket_ops::poll_read(s, 0, -1, ec) < 0)
return 0;
}
}
size_t sync_recvfrom1(socket_type s, state_type state, void* data, size_t size,
int flags, void* addr, std::size_t* addrlen, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return 0;
}
// Read some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = socket_ops::recvfrom1(
s, data, size, flags, addr, addrlen, ec);
// Check if operation succeeded.
if (bytes >= 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for socket to become ready.
if (socket_ops::poll_read(s, 0, -1, ec) < 0)
return 0;
}
}
#if defined(ASIO_HAS_IOCP)
void complete_iocp_recvfrom(
const weak_cancel_token_type& cancel_token,
asio::error_code& ec)
{
// Map non-portable errors to their portable counterparts.
if (ec.value() == ERROR_NETNAME_DELETED)
{
if (cancel_token.expired())
ec = asio::error::operation_aborted;
else
ec = asio::error::connection_reset;
}
else if (ec.value() == ERROR_PORT_UNREACHABLE)
{
ec = asio::error::connection_refused;
}
else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA)
{
asio::error::clear(ec);
}
}
#else // defined(ASIO_HAS_IOCP)
bool non_blocking_recvfrom(socket_type s, buf* bufs,
size_t count, int flags, void* addr, std::size_t* addrlen,
asio::error_code& ec, size_t& bytes_transferred)
{
for (;;)
{
// Read some data.
signed_size_type bytes = socket_ops::recvfrom(
s, bufs, count, flags, addr, addrlen, ec);
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
bool non_blocking_recvfrom1(socket_type s, void* data,
size_t size, int flags, void* addr, std::size_t* addrlen,
asio::error_code& ec, size_t& bytes_transferred)
{
for (;;)
{
// Read some data.
signed_size_type bytes = socket_ops::recvfrom1(
s, data, size, flags, addr, addrlen, ec);
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
#endif // defined(ASIO_HAS_IOCP)
signed_size_type recvmsg(socket_type s, buf* bufs, size_t count,
int in_flags, int& out_flags, asio::error_code& ec)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
out_flags = 0;
return socket_ops::recv(s, bufs, count, in_flags, ec);
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
msghdr msg = msghdr();
msg.msg_iov = bufs;
msg.msg_iovlen = static_cast<int>(count);
signed_size_type result = ::recvmsg(s, &msg, in_flags);
get_last_error(ec, result < 0);
if (result >= 0)
out_flags = msg.msg_flags;
else
out_flags = 0;
return result;
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
}
size_t sync_recvmsg(socket_type s, state_type state,
buf* bufs, size_t count, int in_flags, int& out_flags,
asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return 0;
}
// Read some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = socket_ops::recvmsg(
s, bufs, count, in_flags, out_flags, ec);
// Check if operation succeeded.
if (bytes >= 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for socket to become ready.
if (socket_ops::poll_read(s, 0, -1, ec) < 0)
return 0;
}
}
#if defined(ASIO_HAS_IOCP)
void complete_iocp_recvmsg(
const weak_cancel_token_type& cancel_token,
asio::error_code& ec)
{
// Map non-portable errors to their portable counterparts.
if (ec.value() == ERROR_NETNAME_DELETED)
{
if (cancel_token.expired())
ec = asio::error::operation_aborted;
else
ec = asio::error::connection_reset;
}
else if (ec.value() == ERROR_PORT_UNREACHABLE)
{
ec = asio::error::connection_refused;
}
else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA)
{
asio::error::clear(ec);
}
}
#else // defined(ASIO_HAS_IOCP)
bool non_blocking_recvmsg(socket_type s,
buf* bufs, size_t count, int in_flags, int& out_flags,
asio::error_code& ec, size_t& bytes_transferred)
{
for (;;)
{
// Read some data.
signed_size_type bytes = socket_ops::recvmsg(
s, bufs, count, in_flags, out_flags, ec);
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
#endif // defined(ASIO_HAS_IOCP)
signed_size_type send(socket_type s, const buf* bufs, size_t count,
int flags, asio::error_code& ec)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// Send the data.
DWORD send_buf_count = static_cast<DWORD>(count);
DWORD bytes_transferred = 0;
DWORD send_flags = flags;
int result = ::WSASend(s, const_cast<buf*>(bufs),
send_buf_count, &bytes_transferred, send_flags, 0, 0);
get_last_error(ec, true);
if (ec.value() == ERROR_NETNAME_DELETED)
ec = asio::error::connection_reset;
else if (ec.value() == ERROR_PORT_UNREACHABLE)
ec = asio::error::connection_refused;
if (result != 0)
return socket_error_retval;
asio::error::clear(ec);
return bytes_transferred;
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
msghdr msg = msghdr();
msg.msg_iov = const_cast<buf*>(bufs);
msg.msg_iovlen = static_cast<int>(count);
#if defined(ASIO_HAS_MSG_NOSIGNAL)
flags |= MSG_NOSIGNAL;
#endif // defined(ASIO_HAS_MSG_NOSIGNAL)
signed_size_type result = ::sendmsg(s, &msg, flags);
get_last_error(ec, result < 0);
return result;
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
}
signed_size_type send1(socket_type s, const void* data, size_t size,
int flags, asio::error_code& ec)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// Send the data.
WSABUF buf;
buf.buf = const_cast<char*>(static_cast<const char*>(data));
buf.len = static_cast<ULONG>(size);
DWORD bytes_transferred = 0;
DWORD send_flags = flags;
int result = ::WSASend(s, &buf, 1,
&bytes_transferred, send_flags, 0, 0);
get_last_error(ec, true);
if (ec.value() == ERROR_NETNAME_DELETED)
ec = asio::error::connection_reset;
else if (ec.value() == ERROR_PORT_UNREACHABLE)
ec = asio::error::connection_refused;
if (result != 0)
return socket_error_retval;
asio::error::clear(ec);
return bytes_transferred;
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
#if defined(ASIO_HAS_MSG_NOSIGNAL)
flags |= MSG_NOSIGNAL;
#endif // defined(ASIO_HAS_MSG_NOSIGNAL)
signed_size_type result = ::send(s,
static_cast<const char*>(data), size, flags);
get_last_error(ec, result < 0);
return result;
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
}
size_t sync_send(socket_type s, state_type state, const buf* bufs,
size_t count, int flags, bool all_empty, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to write 0 bytes to a stream is a no-op.
if (all_empty && (state & stream_oriented))
{
asio::error::clear(ec);
return 0;
}
// Read some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = socket_ops::send(s, bufs, count, flags, ec);
// Check if operation succeeded.
if (bytes >= 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for socket to become ready.
if (socket_ops::poll_write(s, 0, -1, ec) < 0)
return 0;
}
}
size_t sync_send1(socket_type s, state_type state, const void* data,
size_t size, int flags, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to write 0 bytes to a stream is a no-op.
if (size == 0 && (state & stream_oriented))
{
asio::error::clear(ec);
return 0;
}
// Read some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = socket_ops::send1(s, data, size, flags, ec);
// Check if operation succeeded.
if (bytes >= 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for socket to become ready.
if (socket_ops::poll_write(s, 0, -1, ec) < 0)
return 0;
}
}
#if defined(ASIO_HAS_IOCP)
void complete_iocp_send(
const weak_cancel_token_type& cancel_token,
asio::error_code& ec)
{
// Map non-portable errors to their portable counterparts.
if (ec.value() == ERROR_NETNAME_DELETED)
{
if (cancel_token.expired())
ec = asio::error::operation_aborted;
else
ec = asio::error::connection_reset;
}
else if (ec.value() == ERROR_PORT_UNREACHABLE)
{
ec = asio::error::connection_refused;
}
}
#else // defined(ASIO_HAS_IOCP)
bool non_blocking_send(socket_type s,
const buf* bufs, size_t count, int flags,
asio::error_code& ec, size_t& bytes_transferred)
{
for (;;)
{
// Write some data.
signed_size_type bytes = socket_ops::send(s, bufs, count, flags, ec);
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
bool non_blocking_send1(socket_type s,
const void* data, size_t size, int flags,
asio::error_code& ec, size_t& bytes_transferred)
{
for (;;)
{
// Write some data.
signed_size_type bytes = socket_ops::send1(s, data, size, flags, ec);
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
#endif // defined(ASIO_HAS_IOCP)
signed_size_type sendto(socket_type s, const buf* bufs,
size_t count, int flags, const void* addr,
std::size_t addrlen, asio::error_code& ec)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// Send the data.
DWORD send_buf_count = static_cast<DWORD>(count);
DWORD bytes_transferred = 0;
int result = ::WSASendTo(s, const_cast<buf*>(bufs),
send_buf_count, &bytes_transferred, flags,
static_cast<const socket_addr_type*>(addr),
static_cast<int>(addrlen), 0, 0);
get_last_error(ec, true);
if (ec.value() == ERROR_NETNAME_DELETED)
ec = asio::error::connection_reset;
else if (ec.value() == ERROR_PORT_UNREACHABLE)
ec = asio::error::connection_refused;
if (result != 0)
return socket_error_retval;
asio::error::clear(ec);
return bytes_transferred;
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
msghdr msg = msghdr();
init_msghdr_msg_name(msg.msg_name, addr);
msg.msg_namelen = static_cast<int>(addrlen);
msg.msg_iov = const_cast<buf*>(bufs);
msg.msg_iovlen = static_cast<int>(count);
#if defined(ASIO_HAS_MSG_NOSIGNAL)
flags |= MSG_NOSIGNAL;
#endif // defined(ASIO_HAS_MSG_NOSIGNAL)
signed_size_type result = ::sendmsg(s, &msg, flags);
get_last_error(ec, result < 0);
return result;
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
}
template <typename SockLenType>
inline signed_size_type call_sendto(SockLenType msghdr::*,
socket_type s, const void* data, size_t size, int flags,
const void* addr, std::size_t addrlen)
{
return ::sendto(s, static_cast<char*>(const_cast<void*>(data)), size, flags,
static_cast<const socket_addr_type*>(addr), (SockLenType)addrlen);
}
signed_size_type sendto1(socket_type s, const void* data,
size_t size, int flags, const void* addr,
std::size_t addrlen, asio::error_code& ec)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// Send the data.
WSABUF buf;
buf.buf = const_cast<char*>(static_cast<const char*>(data));
buf.len = static_cast<ULONG>(size);
DWORD bytes_transferred = 0;
int result = ::WSASendTo(s, &buf, 1, &bytes_transferred, flags,
static_cast<const socket_addr_type*>(addr),
static_cast<int>(addrlen), 0, 0);
get_last_error(ec, true);
if (ec.value() == ERROR_NETNAME_DELETED)
ec = asio::error::connection_reset;
else if (ec.value() == ERROR_PORT_UNREACHABLE)
ec = asio::error::connection_refused;
if (result != 0)
return socket_error_retval;
asio::error::clear(ec);
return bytes_transferred;
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
#if defined(ASIO_HAS_MSG_NOSIGNAL)
flags |= MSG_NOSIGNAL;
#endif // defined(ASIO_HAS_MSG_NOSIGNAL)
signed_size_type result = call_sendto(&msghdr::msg_namelen,
s, data, size, flags, addr, addrlen);
get_last_error(ec, result < 0);
return result;
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
}
size_t sync_sendto(socket_type s, state_type state,
const buf* bufs, size_t count, int flags, const void* addr,
std::size_t addrlen, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return 0;
}
// Write some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = socket_ops::sendto(
s, bufs, count, flags, addr, addrlen, ec);
// Check if operation succeeded.
if (bytes >= 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for socket to become ready.
if (socket_ops::poll_write(s, 0, -1, ec) < 0)
return 0;
}
}
size_t sync_sendto1(socket_type s, state_type state,
const void* data, size_t size, int flags, const void* addr,
std::size_t addrlen, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return 0;
}
// Write some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = socket_ops::sendto1(
s, data, size, flags, addr, addrlen, ec);
// Check if operation succeeded.
if (bytes >= 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for socket to become ready.
if (socket_ops::poll_write(s, 0, -1, ec) < 0)
return 0;
}
}
#if !defined(ASIO_HAS_IOCP)
bool non_blocking_sendto(socket_type s,
const buf* bufs, size_t count, int flags,
const void* addr, std::size_t addrlen,
asio::error_code& ec, size_t& bytes_transferred)
{
for (;;)
{
// Write some data.
signed_size_type bytes = socket_ops::sendto(
s, bufs, count, flags, addr, addrlen, ec);
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
bool non_blocking_sendto1(socket_type s,
const void* data, size_t size, int flags,
const void* addr, std::size_t addrlen,
asio::error_code& ec, size_t& bytes_transferred)
{
for (;;)
{
// Write some data.
signed_size_type bytes = socket_ops::sendto1(
s, data, size, flags, addr, addrlen, ec);
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
#endif // !defined(ASIO_HAS_IOCP)
socket_type socket(int af, int type, int protocol,
asio::error_code& ec)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
socket_type s = ::WSASocketW(af, type, protocol, 0, 0, WSA_FLAG_OVERLAPPED);
get_last_error(ec, s == invalid_socket);
if (s == invalid_socket)
return s;
if (af == ASIO_OS_DEF(AF_INET6))
{
// Try to enable the POSIX default behaviour of having IPV6_V6ONLY set to
// false. This will only succeed on Windows Vista and later versions of
// Windows, where a dual-stack IPv4/v6 implementation is available.
DWORD optval = 0;
::setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY,
reinterpret_cast<const char*>(&optval), sizeof(optval));
}
return s;
#elif defined(__MACH__) && defined(__APPLE__) || defined(__FreeBSD__)
socket_type s = ::socket(af, type, protocol);
get_last_error(ec, s == invalid_socket);
if (s == invalid_socket)
return s;
int optval = 1;
int result = ::setsockopt(s, SOL_SOCKET,
SO_NOSIGPIPE, &optval, sizeof(optval));
get_last_error(ec, result != 0);
if (result != 0)
{
::close(s);
return invalid_socket;
}
return s;
#else
int s = ::socket(af, type, protocol);
get_last_error(ec, s < 0);
return s;
#endif
}
template <typename SockLenType>
inline int call_setsockopt(SockLenType msghdr::*,
socket_type s, int level, int optname,
const void* optval, std::size_t optlen)
{
return ::setsockopt(s, level, optname,
(const char*)optval, (SockLenType)optlen);
}
int setsockopt(socket_type s, state_type& state, int level, int optname,
const void* optval, std::size_t optlen, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return socket_error_retval;
}
if (level == custom_socket_option_level && optname == always_fail_option)
{
ec = asio::error::invalid_argument;
return socket_error_retval;
}
if (level == custom_socket_option_level
&& optname == enable_connection_aborted_option)
{
if (optlen != sizeof(int))
{
ec = asio::error::invalid_argument;
return socket_error_retval;
}
if (*static_cast<const int*>(optval))
state |= enable_connection_aborted;
else
state &= ~enable_connection_aborted;
asio::error::clear(ec);
return 0;
}
if (level == SOL_SOCKET && optname == SO_LINGER)
state |= user_set_linger;
#if defined(__BORLANDC__)
// Mysteriously, using the getsockopt and setsockopt functions directly with
// Borland C++ results in incorrect values being set and read. The bug can be
// worked around by using function addresses resolved with GetProcAddress.
if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32"))
{
typedef int (WSAAPI *sso_t)(SOCKET, int, int, const char*, int);
if (sso_t sso = (sso_t)::GetProcAddress(winsock_module, "setsockopt"))
{
int result = sso(s, level, optname,
reinterpret_cast<const char*>(optval),
static_cast<int>(optlen));
get_last_error(ec, result != 0);
return result;
}
}
ec = asio::error::fault;
return socket_error_retval;
#else // defined(__BORLANDC__)
int result = call_setsockopt(&msghdr::msg_namelen,
s, level, optname, optval, optlen);
get_last_error(ec, result != 0);
if (result == 0)
{
#if defined(__MACH__) && defined(__APPLE__) \
|| defined(__NetBSD__) || defined(__FreeBSD__) \
|| defined(__OpenBSD__) || defined(__QNX__)
// To implement portable behaviour for SO_REUSEADDR with UDP sockets we
// need to also set SO_REUSEPORT on BSD-based platforms.
if ((state & datagram_oriented)
&& level == SOL_SOCKET && optname == SO_REUSEADDR)
{
call_setsockopt(&msghdr::msg_namelen, s,
SOL_SOCKET, SO_REUSEPORT, optval, optlen);
}
#endif
}
return result;
#endif // defined(__BORLANDC__)
}
template <typename SockLenType>
inline int call_getsockopt(SockLenType msghdr::*,
socket_type s, int level, int optname,
void* optval, std::size_t* optlen)
{
SockLenType tmp_optlen = (SockLenType)*optlen;
int result = ::getsockopt(s, level, optname, (char*)optval, &tmp_optlen);
*optlen = (std::size_t)tmp_optlen;
return result;
}
int getsockopt(socket_type s, state_type state, int level, int optname,
void* optval, size_t* optlen, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return socket_error_retval;
}
if (level == custom_socket_option_level && optname == always_fail_option)
{
ec = asio::error::invalid_argument;
return socket_error_retval;
}
if (level == custom_socket_option_level
&& optname == enable_connection_aborted_option)
{
if (*optlen != sizeof(int))
{
ec = asio::error::invalid_argument;
return socket_error_retval;
}
*static_cast<int*>(optval) = (state & enable_connection_aborted) ? 1 : 0;
asio::error::clear(ec);
return 0;
}
#if defined(__BORLANDC__)
// Mysteriously, using the getsockopt and setsockopt functions directly with
// Borland C++ results in incorrect values being set and read. The bug can be
// worked around by using function addresses resolved with GetProcAddress.
if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32"))
{
typedef int (WSAAPI *gso_t)(SOCKET, int, int, char*, int*);
if (gso_t gso = (gso_t)::GetProcAddress(winsock_module, "getsockopt"))
{
int tmp_optlen = static_cast<int>(*optlen);
int result = gso(s, level, optname,
reinterpret_cast<char*>(optval), &tmp_optlen);
get_last_error(ec, result != 0);
*optlen = static_cast<size_t>(tmp_optlen);
if (result != 0 && level == IPPROTO_IPV6 && optname == IPV6_V6ONLY
&& ec.value() == WSAENOPROTOOPT && *optlen == sizeof(DWORD))
{
// Dual-stack IPv4/v6 sockets, and the IPV6_V6ONLY socket option, are
// only supported on Windows Vista and later. To simplify program logic
// we will fake success of getting this option and specify that the
// value is non-zero (i.e. true). This corresponds to the behavior of
// IPv6 sockets on Windows platforms pre-Vista.
*static_cast<DWORD*>(optval) = 1;
asio::error::clear(ec);
}
return result;
}
}
ec = asio::error::fault;
return socket_error_retval;
#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)
int result = call_getsockopt(&msghdr::msg_namelen,
s, level, optname, optval, optlen);
get_last_error(ec, result != 0);
if (result != 0 && level == IPPROTO_IPV6 && optname == IPV6_V6ONLY
&& ec.value() == WSAENOPROTOOPT && *optlen == sizeof(DWORD))
{
// Dual-stack IPv4/v6 sockets, and the IPV6_V6ONLY socket option, are only
// supported on Windows Vista and later. To simplify program logic we will
// fake success of getting this option and specify that the value is
// non-zero (i.e. true). This corresponds to the behavior of IPv6 sockets
// on Windows platforms pre-Vista.
*static_cast<DWORD*>(optval) = 1;
asio::error::clear(ec);
}
return result;
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
int result = call_getsockopt(&msghdr::msg_namelen,
s, level, optname, optval, optlen);
get_last_error(ec, result != 0);
#if defined(__linux__)
if (result == 0 && level == SOL_SOCKET && *optlen == sizeof(int)
&& (optname == SO_SNDBUF || optname == SO_RCVBUF))
{
// On Linux, setting SO_SNDBUF or SO_RCVBUF to N actually causes the kernel
// to set the buffer size to N*2. Linux puts additional stuff into the
// buffers so that only about half is actually available to the application.
// The retrieved value is divided by 2 here to make it appear as though the
// correct value has been set.
*static_cast<int*>(optval) /= 2;
}
#endif // defined(__linux__)
return result;
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
}
template <typename SockLenType>
inline int call_getpeername(SockLenType msghdr::*,
socket_type s, void* addr, std::size_t* addrlen)
{
SockLenType tmp_addrlen = (SockLenType)*addrlen;
int result = ::getpeername(s,
static_cast<socket_addr_type*>(addr), &tmp_addrlen);
*addrlen = (std::size_t)tmp_addrlen;
return result;
}
int getpeername(socket_type s, void* addr, std::size_t* addrlen,
bool cached, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return socket_error_retval;
}
#if defined(ASIO_WINDOWS) && !defined(ASIO_WINDOWS_APP) \
|| defined(__CYGWIN__)
if (cached)
{
// Check if socket is still connected.
DWORD connect_time = 0;
size_t connect_time_len = sizeof(connect_time);
if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_CONNECT_TIME,
&connect_time, &connect_time_len, ec) == socket_error_retval)
{
return socket_error_retval;
}
if (connect_time == 0xFFFFFFFF)
{
ec = asio::error::not_connected;
return socket_error_retval;
}
// The cached value is still valid.
asio::error::clear(ec);
return 0;
}
#else // defined(ASIO_WINDOWS) && !defined(ASIO_WINDOWS_APP)
// || defined(__CYGWIN__)
(void)cached;
#endif // defined(ASIO_WINDOWS) && !defined(ASIO_WINDOWS_APP)
// || defined(__CYGWIN__)
int result = call_getpeername(&msghdr::msg_namelen, s, addr, addrlen);
get_last_error(ec, result != 0);
return result;
}
template <typename SockLenType>
inline int call_getsockname(SockLenType msghdr::*,
socket_type s, void* addr, std::size_t* addrlen)
{
SockLenType tmp_addrlen = (SockLenType)*addrlen;
int result = ::getsockname(s,
static_cast<socket_addr_type*>(addr), &tmp_addrlen);
*addrlen = (std::size_t)tmp_addrlen;
return result;
}
int getsockname(socket_type s, void* addr,
std::size_t* addrlen, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return socket_error_retval;
}
int result = call_getsockname(&msghdr::msg_namelen, s, addr, addrlen);
get_last_error(ec, result != 0);
return result;
}
int ioctl(socket_type s, state_type& state, int cmd,
ioctl_arg_type* arg, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return socket_error_retval;
}
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
int result = ::ioctlsocket(s, cmd, arg);
#elif defined(__MACH__) && defined(__APPLE__) \
|| defined(__NetBSD__) || defined(__FreeBSD__) || defined(__OpenBSD__)
int result = ::ioctl(s, static_cast<unsigned int>(cmd), arg);
#else
int result = ::ioctl(s, cmd, arg);
#endif
get_last_error(ec, result < 0);
if (result >= 0)
{
// When updating the non-blocking mode we always perform the ioctl syscall,
// even if the flags would otherwise indicate that the socket is already in
// the correct state. This ensures that the underlying socket is put into
// the state that has been requested by the user. If the ioctl syscall was
// successful then we need to update the flags to match.
if (cmd == static_cast<int>(FIONBIO))
{
if (*arg)
{
state |= user_set_non_blocking;
}
else
{
// Clearing the non-blocking mode always overrides any internally-set
// non-blocking flag. Any subsequent asynchronous operations will need
// to re-enable non-blocking I/O.
state &= ~(user_set_non_blocking | internal_non_blocking);
}
}
}
return result;
}
int select(int nfds, fd_set* readfds, fd_set* writefds,
fd_set* exceptfds, timeval* timeout, asio::error_code& ec)
{
#if defined(__EMSCRIPTEN__)
exceptfds = 0;
#endif // defined(__EMSCRIPTEN__)
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
if (!readfds && !writefds && !exceptfds && timeout)
{
DWORD milliseconds = timeout->tv_sec * 1000 + timeout->tv_usec / 1000;
if (milliseconds == 0)
milliseconds = 1; // Force context switch.
::Sleep(milliseconds);
asio::error::clear(ec);
return 0;
}
// The select() call allows timeout values measured in microseconds, but the
// system clock (as wrapped by boost::posix_time::microsec_clock) typically
// has a resolution of 10 milliseconds. This can lead to a spinning select
// reactor, meaning increased CPU usage, when waiting for the earliest
// scheduled timeout if it's less than 10 milliseconds away. To avoid a tight
// spin we'll use a minimum timeout of 1 millisecond.
if (timeout && timeout->tv_sec == 0
&& timeout->tv_usec > 0 && timeout->tv_usec < 1000)
timeout->tv_usec = 1000;
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
#if defined(__hpux) && defined(__SELECT)
timespec ts;
ts.tv_sec = timeout ? timeout->tv_sec : 0;
ts.tv_nsec = timeout ? timeout->tv_usec * 1000 : 0;
int result = ::pselect(nfds, readfds,
writefds, exceptfds, timeout ? &ts : 0, 0);
#else
int result = ::select(nfds, readfds, writefds, exceptfds, timeout);
#endif
get_last_error(ec, result < 0);
return result;
}
int poll_read(socket_type s, state_type state,
int msec, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return socket_error_retval;
}
#if defined(ASIO_WINDOWS) \
|| defined(__CYGWIN__) \
|| defined(__SYMBIAN32__)
fd_set fds;
FD_ZERO(&fds);
FD_SET(s, &fds);
timeval timeout_obj;
timeval* timeout;
if (state & user_set_non_blocking)
{
timeout_obj.tv_sec = 0;
timeout_obj.tv_usec = 0;
timeout = &timeout_obj;
}
else if (msec >= 0)
{
timeout_obj.tv_sec = msec / 1000;
timeout_obj.tv_usec = (msec % 1000) * 1000;
timeout = &timeout_obj;
}
else
timeout = 0;
int result = ::select(s + 1, &fds, 0, 0, timeout);
get_last_error(ec, result < 0);
#else // defined(ASIO_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
pollfd fds;
fds.fd = s;
fds.events = POLLIN;
fds.revents = 0;
int timeout = (state & user_set_non_blocking) ? 0 : msec;
int result = ::poll(&fds, 1, timeout);
get_last_error(ec, result < 0);
#endif // defined(ASIO_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
if (result == 0)
if (state & user_set_non_blocking)
ec = asio::error::would_block;
return result;
}
int poll_write(socket_type s, state_type state,
int msec, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return socket_error_retval;
}
#if defined(ASIO_WINDOWS) \
|| defined(__CYGWIN__) \
|| defined(__SYMBIAN32__)
fd_set fds;
FD_ZERO(&fds);
FD_SET(s, &fds);
timeval timeout_obj;
timeval* timeout;
if (state & user_set_non_blocking)
{
timeout_obj.tv_sec = 0;
timeout_obj.tv_usec = 0;
timeout = &timeout_obj;
}
else if (msec >= 0)
{
timeout_obj.tv_sec = msec / 1000;
timeout_obj.tv_usec = (msec % 1000) * 1000;
timeout = &timeout_obj;
}
else
timeout = 0;
int result = ::select(s + 1, 0, &fds, 0, timeout);
get_last_error(ec, result < 0);
#else // defined(ASIO_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
pollfd fds;
fds.fd = s;
fds.events = POLLOUT;
fds.revents = 0;
int timeout = (state & user_set_non_blocking) ? 0 : msec;
int result = ::poll(&fds, 1, timeout);
get_last_error(ec, result < 0);
#endif // defined(ASIO_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
if (result == 0)
if (state & user_set_non_blocking)
ec = asio::error::would_block;
return result;
}
int poll_error(socket_type s, state_type state,
int msec, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return socket_error_retval;
}
#if defined(ASIO_WINDOWS) \
|| defined(__CYGWIN__) \
|| defined(__SYMBIAN32__)
fd_set fds;
FD_ZERO(&fds);
FD_SET(s, &fds);
timeval timeout_obj;
timeval* timeout;
if (state & user_set_non_blocking)
{
timeout_obj.tv_sec = 0;
timeout_obj.tv_usec = 0;
timeout = &timeout_obj;
}
else if (msec >= 0)
{
timeout_obj.tv_sec = msec / 1000;
timeout_obj.tv_usec = (msec % 1000) * 1000;
timeout = &timeout_obj;
}
else
timeout = 0;
int result = ::select(s + 1, 0, 0, &fds, timeout);
get_last_error(ec, result < 0);
#else // defined(ASIO_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
pollfd fds;
fds.fd = s;
fds.events = POLLPRI | POLLERR | POLLHUP;
fds.revents = 0;
int timeout = (state & user_set_non_blocking) ? 0 : msec;
int result = ::poll(&fds, 1, timeout);
get_last_error(ec, result < 0);
#endif // defined(ASIO_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
if (result == 0)
if (state & user_set_non_blocking)
ec = asio::error::would_block;
return result;
}
int poll_connect(socket_type s, int msec, asio::error_code& ec)
{
if (s == invalid_socket)
{
ec = asio::error::bad_descriptor;
return socket_error_retval;
}
#if defined(ASIO_WINDOWS) \
|| defined(__CYGWIN__) \
|| defined(__SYMBIAN32__)
fd_set write_fds;
FD_ZERO(&write_fds);
FD_SET(s, &write_fds);
fd_set except_fds;
FD_ZERO(&except_fds);
FD_SET(s, &except_fds);
timeval timeout_obj;
timeval* timeout;
if (msec >= 0)
{
timeout_obj.tv_sec = msec / 1000;
timeout_obj.tv_usec = (msec % 1000) * 1000;
timeout = &timeout_obj;
}
else
timeout = 0;
int result = ::select(s + 1, 0, &write_fds, &except_fds, timeout);
get_last_error(ec, result < 0);
return result;
#else // defined(ASIO_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
pollfd fds;
fds.fd = s;
fds.events = POLLOUT;
fds.revents = 0;
int result = ::poll(&fds, 1, msec);
get_last_error(ec, result < 0);
return result;
#endif // defined(ASIO_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
}
#endif // !defined(ASIO_WINDOWS_RUNTIME)
const char* inet_ntop(int af, const void* src, char* dest, size_t length,
unsigned long scope_id, asio::error_code& ec)
{
clear_last_error();
#if defined(ASIO_WINDOWS_RUNTIME)
using namespace std; // For sprintf.
const unsigned char* bytes = static_cast<const unsigned char*>(src);
if (af == ASIO_OS_DEF(AF_INET))
{
sprintf_s(dest, length, "%u.%u.%u.%u",
bytes[0], bytes[1], bytes[2], bytes[3]);
return dest;
}
else if (af == ASIO_OS_DEF(AF_INET6))
{
size_t n = 0, b = 0, z = 0;
while (n < length && b < 16)
{
if (bytes[b] == 0 && bytes[b + 1] == 0 && z == 0)
{
do b += 2; while (b < 16 && bytes[b] == 0 && bytes[b + 1] == 0);
n += sprintf_s(dest + n, length - n, ":%s", b < 16 ? "" : ":"), ++z;
}
else
{
n += sprintf_s(dest + n, length - n, "%s%x", b ? ":" : "",
(static_cast<u_long_type>(bytes[b]) << 8) | bytes[b + 1]);
b += 2;
}
}
if (scope_id)
n += sprintf_s(dest + n, length - n, "%%%lu", scope_id);
return dest;
}
else
{
ec = asio::error::address_family_not_supported;
return 0;
}
#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)
using namespace std; // For memcpy.
if (af != ASIO_OS_DEF(AF_INET) && af != ASIO_OS_DEF(AF_INET6))
{
ec = asio::error::address_family_not_supported;
return 0;
}
union
{
socket_addr_type base;
sockaddr_storage_type storage;
sockaddr_in4_type v4;
sockaddr_in6_type v6;
} address;
DWORD address_length;
if (af == ASIO_OS_DEF(AF_INET))
{
address_length = sizeof(sockaddr_in4_type);
address.v4.sin_family = ASIO_OS_DEF(AF_INET);
address.v4.sin_port = 0;
memcpy(&address.v4.sin_addr, src, sizeof(in4_addr_type));
}
else // AF_INET6
{
address_length = sizeof(sockaddr_in6_type);
address.v6.sin6_family = ASIO_OS_DEF(AF_INET6);
address.v6.sin6_port = 0;
address.v6.sin6_flowinfo = 0;
address.v6.sin6_scope_id = scope_id;
memcpy(&address.v6.sin6_addr, src, sizeof(in6_addr_type));
}
DWORD string_length = static_cast<DWORD>(length);
#if defined(BOOST_NO_ANSI_APIS) || (defined(_MSC_VER) && (_MSC_VER >= 1800))
LPWSTR string_buffer = (LPWSTR)_alloca(length * sizeof(WCHAR));
int result = ::WSAAddressToStringW(&address.base,
address_length, 0, string_buffer, &string_length);
get_last_error(ec, true);
::WideCharToMultiByte(CP_ACP, 0, string_buffer, -1,
dest, static_cast<int>(length), 0, 0);
#else
int result = ::WSAAddressToStringA(&address.base,
address_length, 0, dest, &string_length);
get_last_error(ec, true);
#endif
// Windows may set error code on success.
if (result != socket_error_retval)
asio::error::clear(ec);
// Windows may not set an error code on failure.
else if (result == socket_error_retval && !ec)
ec = asio::error::invalid_argument;
return result == socket_error_retval ? 0 : dest;
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
const char* result = ::inet_ntop(af, src, dest, static_cast<int>(length));
get_last_error(ec, true);
if (result == 0 && !ec)
ec = asio::error::invalid_argument;
if (result != 0 && af == ASIO_OS_DEF(AF_INET6) && scope_id != 0)
{
using namespace std; // For strcat and sprintf.
char if_name[(IF_NAMESIZE > 21 ? IF_NAMESIZE : 21) + 1] = "%";
const in6_addr_type* ipv6_address = static_cast<const in6_addr_type*>(src);
bool is_link_local = ((ipv6_address->s6_addr[0] == 0xfe)
&& ((ipv6_address->s6_addr[1] & 0xc0) == 0x80));
bool is_multicast_link_local = ((ipv6_address->s6_addr[0] == 0xff)
&& ((ipv6_address->s6_addr[1] & 0x0f) == 0x02));
if ((!is_link_local && !is_multicast_link_local)
|| if_indextoname(static_cast<unsigned>(scope_id), if_name + 1) == 0)
#if defined(ASIO_HAS_SNPRINTF)
snprintf(if_name + 1, sizeof(if_name) - 1, "%lu", scope_id);
#elif defined(ASIO_HAS_SECURE_RTL)
sprintf_s(if_name + 1, sizeof(if_name) -1, "%lu", scope_id);
#else // defined(ASIO_HAS_SECURE_RTL)
sprintf(if_name + 1, "%lu", scope_id);
#endif // defined(ASIO_HAS_SECURE_RTL)
strcat(dest, if_name);
}
return result;
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
}
int inet_pton(int af, const char* src, void* dest,
unsigned long* scope_id, asio::error_code& ec)
{
clear_last_error();
#if defined(ASIO_WINDOWS_RUNTIME)
using namespace std; // For sscanf.
unsigned char* bytes = static_cast<unsigned char*>(dest);
if (af == ASIO_OS_DEF(AF_INET))
{
unsigned int b0, b1, b2, b3;
if (sscanf_s(src, "%u.%u.%u.%u", &b0, &b1, &b2, &b3) != 4)
{
ec = asio::error::invalid_argument;
return -1;
}
if (b0 > 255 || b1 > 255 || b2 > 255 || b3 > 255)
{
ec = asio::error::invalid_argument;
return -1;
}
bytes[0] = static_cast<unsigned char>(b0);
bytes[1] = static_cast<unsigned char>(b1);
bytes[2] = static_cast<unsigned char>(b2);
bytes[3] = static_cast<unsigned char>(b3);
asio::error::clear(ec);
return 1;
}
else if (af == ASIO_OS_DEF(AF_INET6))
{
unsigned char* bytes = static_cast<unsigned char*>(dest);
std::memset(bytes, 0, 16);
unsigned char back_bytes[16] = { 0 };
int num_front_bytes = 0, num_back_bytes = 0;
const char* p = src;
enum { fword, fcolon, bword, scope, done } state = fword;
unsigned long current_word = 0;
while (state != done)
{
if (current_word > 0xFFFF)
{
ec = asio::error::invalid_argument;
return -1;
}
switch (state)
{
case fword:
if (*p >= '0' && *p <= '9')
current_word = current_word * 16 + *p++ - '0';
else if (*p >= 'a' && *p <= 'f')
current_word = current_word * 16 + *p++ - 'a' + 10;
else if (*p >= 'A' && *p <= 'F')
current_word = current_word * 16 + *p++ - 'A' + 10;
else
{
if (num_front_bytes == 16)
{
ec = asio::error::invalid_argument;
return -1;
}
bytes[num_front_bytes++] = (current_word >> 8) & 0xFF;
bytes[num_front_bytes++] = current_word & 0xFF;
current_word = 0;
if (*p == ':')
state = fcolon, ++p;
else if (*p == '%')
state = scope, ++p;
else if (*p == 0)
state = done;
else
{
ec = asio::error::invalid_argument;
return -1;
}
}
break;
case fcolon:
if (*p == ':')
state = bword, ++p;
else
state = fword;
break;
case bword:
if (*p >= '0' && *p <= '9')
current_word = current_word * 16 + *p++ - '0';
else if (*p >= 'a' && *p <= 'f')
current_word = current_word * 16 + *p++ - 'a' + 10;
else if (*p >= 'A' && *p <= 'F')
current_word = current_word * 16 + *p++ - 'A' + 10;
else
{
if (num_front_bytes + num_back_bytes == 16)
{
ec = asio::error::invalid_argument;
return -1;
}
back_bytes[num_back_bytes++] = (current_word >> 8) & 0xFF;
back_bytes[num_back_bytes++] = current_word & 0xFF;
current_word = 0;
if (*p == ':')
state = bword, ++p;
else if (*p == '%')
state = scope, ++p;
else if (*p == 0)
state = done;
else
{
ec = asio::error::invalid_argument;
return -1;
}
}
break;
case scope:
if (*p >= '0' && *p <= '9')
current_word = current_word * 10 + *p++ - '0';
else if (*p == 0)
*scope_id = current_word, state = done;
else
{
ec = asio::error::invalid_argument;
return -1;
}
break;
default:
break;
}
}
for (int i = 0; i < num_back_bytes; ++i)
bytes[16 - num_back_bytes + i] = back_bytes[i];
asio::error::clear(ec);
return 1;
}
else
{
ec = asio::error::address_family_not_supported;
return -1;
}
#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)
using namespace std; // For memcpy and strcmp.
if (af != ASIO_OS_DEF(AF_INET) && af != ASIO_OS_DEF(AF_INET6))
{
ec = asio::error::address_family_not_supported;
return -1;
}
union
{
socket_addr_type base;
sockaddr_storage_type storage;
sockaddr_in4_type v4;
sockaddr_in6_type v6;
} address;
int address_length = sizeof(sockaddr_storage_type);
#if defined(BOOST_NO_ANSI_APIS) || (defined(_MSC_VER) && (_MSC_VER >= 1800))
int num_wide_chars = static_cast<int>(strlen(src)) + 1;
LPWSTR wide_buffer = (LPWSTR)_alloca(num_wide_chars * sizeof(WCHAR));
::MultiByteToWideChar(CP_ACP, 0, src, -1, wide_buffer, num_wide_chars);
int result = ::WSAStringToAddressW(wide_buffer,
af, 0, &address.base, &address_length);
get_last_error(ec, true);
#else
int result = ::WSAStringToAddressA(const_cast<char*>(src),
af, 0, &address.base, &address_length);
get_last_error(ec, true);
#endif
if (af == ASIO_OS_DEF(AF_INET))
{
if (result != socket_error_retval)
{
memcpy(dest, &address.v4.sin_addr, sizeof(in4_addr_type));
asio::error::clear(ec);
}
else if (strcmp(src, "255.255.255.255") == 0)
{
static_cast<in4_addr_type*>(dest)->s_addr = INADDR_NONE;
asio::error::clear(ec);
}
}
else // AF_INET6
{
if (result != socket_error_retval)
{
memcpy(dest, &address.v6.sin6_addr, sizeof(in6_addr_type));
if (scope_id)
*scope_id = address.v6.sin6_scope_id;
asio::error::clear(ec);
}
}
// Windows may not set an error code on failure.
if (result == socket_error_retval && !ec)
ec = asio::error::invalid_argument;
if (result != socket_error_retval)
asio::error::clear(ec);
return result == socket_error_retval ? -1 : 1;
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
using namespace std; // For strchr, memcpy and atoi.
// On some platforms, inet_pton fails if an address string contains a scope
// id. Detect and remove the scope id before passing the string to inet_pton.
const bool is_v6 = (af == ASIO_OS_DEF(AF_INET6));
const char* if_name = is_v6 ? strchr(src, '%') : 0;
char src_buf[max_addr_v6_str_len + 1];
const char* src_ptr = src;
if (if_name != 0)
{
if (if_name - src > max_addr_v6_str_len)
{
ec = asio::error::invalid_argument;
return 0;
}
memcpy(src_buf, src, if_name - src);
src_buf[if_name - src] = 0;
src_ptr = src_buf;
}
int result = ::inet_pton(af, src_ptr, dest);
get_last_error(ec, true);
if (result <= 0 && !ec)
ec = asio::error::invalid_argument;
if (result > 0 && is_v6 && scope_id)
{
using namespace std; // For strchr and atoi.
*scope_id = 0;
if (if_name != 0)
{
in6_addr_type* ipv6_address = static_cast<in6_addr_type*>(dest);
bool is_link_local = ((ipv6_address->s6_addr[0] == 0xfe)
&& ((ipv6_address->s6_addr[1] & 0xc0) == 0x80));
bool is_multicast_link_local = ((ipv6_address->s6_addr[0] == 0xff)
&& ((ipv6_address->s6_addr[1] & 0x0f) == 0x02));
if (is_link_local || is_multicast_link_local)
*scope_id = if_nametoindex(if_name + 1);
if (*scope_id == 0)
*scope_id = atoi(if_name + 1);
}
}
return result;
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
}
int gethostname(char* name, int namelen, asio::error_code& ec)
{
#if defined(ASIO_WINDOWS_RUNTIME)
try
{
using namespace Windows::Foundation::Collections;
using namespace Windows::Networking;
using namespace Windows::Networking::Connectivity;
IVectorView<HostName^>^ hostnames = NetworkInformation::GetHostNames();
for (unsigned i = 0; i < hostnames->Size; ++i)
{
HostName^ hostname = hostnames->GetAt(i);
if (hostname->Type == HostNameType::DomainName)
{
std::wstring_convert<std::codecvt_utf8<wchar_t>> converter;
std::string raw_name = converter.to_bytes(hostname->RawName->Data());
if (namelen > 0 && raw_name.size() < static_cast<std::size_t>(namelen))
{
strcpy_s(name, namelen, raw_name.c_str());
return 0;
}
}
}
return -1;
}
catch (Platform::Exception^ e)
{
ec = asio::error_code(e->HResult,
asio::system_category());
return -1;
}
#else // defined(ASIO_WINDOWS_RUNTIME)
int result = ::gethostname(name, namelen);
get_last_error(ec, result != 0);
return result;
#endif // defined(ASIO_WINDOWS_RUNTIME)
}
#if !defined(ASIO_WINDOWS_RUNTIME)
#if !defined(ASIO_HAS_GETADDRINFO)
// The following functions are only needed for emulation of getaddrinfo and
// getnameinfo.
inline asio::error_code translate_netdb_error(int error)
{
switch (error)
{
case 0:
return asio::error_code();
case HOST_NOT_FOUND:
return asio::error::host_not_found;
case TRY_AGAIN:
return asio::error::host_not_found_try_again;
case NO_RECOVERY:
return asio::error::no_recovery;
case NO_DATA:
return asio::error::no_data;
default:
ASIO_ASSERT(false);
return asio::error::invalid_argument;
}
}
inline hostent* gethostbyaddr(const char* addr, int length, int af,
hostent* result, char* buffer, int buflength, asio::error_code& ec)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
(void)(buffer);
(void)(buflength);
hostent* retval = ::gethostbyaddr(addr, length, af);
get_last_error(ec, !retval);
if (!retval)
return 0;
*result = *retval;
return retval;
#elif defined(__sun) || defined(__QNX__)
int error = 0;
hostent* retval = ::gethostbyaddr_r(addr, length,
af, result, buffer, buflength, &error);
get_last_error(ec, !retval);
if (error)
ec = translate_netdb_error(error);
return retval;
#elif defined(__MACH__) && defined(__APPLE__)
(void)(buffer);
(void)(buflength);
int error = 0;
hostent* retval = ::getipnodebyaddr(addr, length, af, &error);
get_last_error(ec, !retval);
if (error)
ec = translate_netdb_error(error);
if (!retval)
return 0;
*result = *retval;
return retval;
#else
hostent* retval = 0;
int error = 0;
clear_last_error();
::gethostbyaddr_r(addr, length, af, result,
buffer, buflength, &retval, &error);
get_last_error(ec, true);
if (error)
ec = translate_netdb_error(error);
return retval;
#endif
}
inline hostent* gethostbyname(const char* name, int af, struct hostent* result,
char* buffer, int buflength, int ai_flags, asio::error_code& ec)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
(void)(buffer);
(void)(buflength);
(void)(ai_flags);
if (af != ASIO_OS_DEF(AF_INET))
{
ec = asio::error::address_family_not_supported;
return 0;
}
hostent* retval = ::gethostbyname(name);
get_last_error(ec, !retval);
if (!retval)
return 0;
*result = *retval;
return result;
#elif defined(__sun) || defined(__QNX__)
(void)(ai_flags);
if (af != ASIO_OS_DEF(AF_INET))
{
ec = asio::error::address_family_not_supported;
return 0;
}
int error = 0;
hostent* retval = ::gethostbyname_r(name, result, buffer, buflength, &error);
get_last_error(ec, !retval);
if (error)
ec = translate_netdb_error(error);
return retval;
#elif defined(__MACH__) && defined(__APPLE__)
(void)(buffer);
(void)(buflength);
int error = 0;
hostent* retval = ::getipnodebyname(name, af, ai_flags, &error);
get_last_error(ec, !retval);
if (error)
ec = translate_netdb_error(error);
if (!retval)
return 0;
*result = *retval;
return retval;
#else
(void)(ai_flags);
if (af != ASIO_OS_DEF(AF_INET))
{
ec = asio::error::address_family_not_supported;
return 0;
}
hostent* retval = 0;
int error = 0;
clear_last_error();
::gethostbyname_r(name, result, buffer, buflength, &retval, &error);
get_last_error(ec, true);
if (error)
ec = translate_netdb_error(error);
return retval;
#endif
}
inline void freehostent(hostent* h)
{
#if defined(__MACH__) && defined(__APPLE__)
if (h)
::freehostent(h);
#else
(void)(h);
#endif
}
// Emulation of getaddrinfo based on implementation in:
// Stevens, W. R., UNIX Network Programming Vol. 1, 2nd Ed., Prentice-Hall 1998.
struct gai_search
{
const char* host;
int family;
};
inline int gai_nsearch(const char* host,
const addrinfo_type* hints, gai_search (&search)[2])
{
int search_count = 0;
if (host == 0 || host[0] == '\0')
{
if (hints->ai_flags & AI_PASSIVE)
{
// No host and AI_PASSIVE implies wildcard bind.
switch (hints->ai_family)
{
case ASIO_OS_DEF(AF_INET):
search[search_count].host = "0.0.0.0";
search[search_count].family = ASIO_OS_DEF(AF_INET);
++search_count;
break;
case ASIO_OS_DEF(AF_INET6):
search[search_count].host = "0::0";
search[search_count].family = ASIO_OS_DEF(AF_INET6);
++search_count;
break;
case ASIO_OS_DEF(AF_UNSPEC):
search[search_count].host = "0::0";
search[search_count].family = ASIO_OS_DEF(AF_INET6);
++search_count;
search[search_count].host = "0.0.0.0";
search[search_count].family = ASIO_OS_DEF(AF_INET);
++search_count;
break;
default:
break;
}
}
else
{
// No host and not AI_PASSIVE means connect to local host.
switch (hints->ai_family)
{
case ASIO_OS_DEF(AF_INET):
search[search_count].host = "localhost";
search[search_count].family = ASIO_OS_DEF(AF_INET);
++search_count;
break;
case ASIO_OS_DEF(AF_INET6):
search[search_count].host = "localhost";
search[search_count].family = ASIO_OS_DEF(AF_INET6);
++search_count;
break;
case ASIO_OS_DEF(AF_UNSPEC):
search[search_count].host = "localhost";
search[search_count].family = ASIO_OS_DEF(AF_INET6);
++search_count;
search[search_count].host = "localhost";
search[search_count].family = ASIO_OS_DEF(AF_INET);
++search_count;
break;
default:
break;
}
}
}
else
{
// Host is specified.
switch (hints->ai_family)
{
case ASIO_OS_DEF(AF_INET):
search[search_count].host = host;
search[search_count].family = ASIO_OS_DEF(AF_INET);
++search_count;
break;
case ASIO_OS_DEF(AF_INET6):
search[search_count].host = host;
search[search_count].family = ASIO_OS_DEF(AF_INET6);
++search_count;
break;
case ASIO_OS_DEF(AF_UNSPEC):
search[search_count].host = host;
search[search_count].family = ASIO_OS_DEF(AF_INET6);
++search_count;
search[search_count].host = host;
search[search_count].family = ASIO_OS_DEF(AF_INET);
++search_count;
break;
default:
break;
}
}
return search_count;
}
template <typename T>
inline T* gai_alloc(std::size_t size = sizeof(T))
{
using namespace std;
T* p = static_cast<T*>(::operator new(size, std::nothrow));
if (p)
memset(p, 0, size);
return p;
}
inline void gai_free(void* p)
{
::operator delete(p);
}
inline void gai_strcpy(char* target, const char* source, std::size_t max_size)
{
using namespace std;
#if defined(ASIO_HAS_SECURE_RTL)
strcpy_s(target, max_size, source);
#else // defined(ASIO_HAS_SECURE_RTL)
*target = 0;
if (max_size > 0)
strncat(target, source, max_size - 1);
#endif // defined(ASIO_HAS_SECURE_RTL)
}
enum { gai_clone_flag = 1 << 30 };
inline int gai_aistruct(addrinfo_type*** next, const addrinfo_type* hints,
const void* addr, int family)
{
using namespace std;
addrinfo_type* ai = gai_alloc<addrinfo_type>();
if (ai == 0)
return EAI_MEMORY;
ai->ai_next = 0;
**next = ai;
*next = &ai->ai_next;
ai->ai_canonname = 0;
ai->ai_socktype = hints->ai_socktype;
if (ai->ai_socktype == 0)
ai->ai_flags |= gai_clone_flag;
ai->ai_protocol = hints->ai_protocol;
ai->ai_family = family;
switch (ai->ai_family)
{
case ASIO_OS_DEF(AF_INET):
{
sockaddr_in4_type* sinptr = gai_alloc<sockaddr_in4_type>();
if (sinptr == 0)
return EAI_MEMORY;
sinptr->sin_family = ASIO_OS_DEF(AF_INET);
memcpy(&sinptr->sin_addr, addr, sizeof(in4_addr_type));
ai->ai_addr = reinterpret_cast<sockaddr*>(sinptr);
ai->ai_addrlen = sizeof(sockaddr_in4_type);
break;
}
case ASIO_OS_DEF(AF_INET6):
{
sockaddr_in6_type* sin6ptr = gai_alloc<sockaddr_in6_type>();
if (sin6ptr == 0)
return EAI_MEMORY;
sin6ptr->sin6_family = ASIO_OS_DEF(AF_INET6);
memcpy(&sin6ptr->sin6_addr, addr, sizeof(in6_addr_type));
ai->ai_addr = reinterpret_cast<sockaddr*>(sin6ptr);
ai->ai_addrlen = sizeof(sockaddr_in6_type);
break;
}
default:
break;
}
return 0;
}
inline addrinfo_type* gai_clone(addrinfo_type* ai)
{
using namespace std;
addrinfo_type* new_ai = gai_alloc<addrinfo_type>();
if (new_ai == 0)
return new_ai;
new_ai->ai_next = ai->ai_next;
ai->ai_next = new_ai;
new_ai->ai_flags = 0;
new_ai->ai_family = ai->ai_family;
new_ai->ai_socktype = ai->ai_socktype;
new_ai->ai_protocol = ai->ai_protocol;
new_ai->ai_canonname = 0;
new_ai->ai_addrlen = ai->ai_addrlen;
new_ai->ai_addr = gai_alloc<sockaddr>(ai->ai_addrlen);
memcpy(new_ai->ai_addr, ai->ai_addr, ai->ai_addrlen);
return new_ai;
}
inline int gai_port(addrinfo_type* aihead, int port, int socktype)
{
int num_found = 0;
for (addrinfo_type* ai = aihead; ai; ai = ai->ai_next)
{
if (ai->ai_flags & gai_clone_flag)
{
if (ai->ai_socktype != 0)
{
ai = gai_clone(ai);
if (ai == 0)
return -1;
// ai now points to newly cloned entry.
}
}
else if (ai->ai_socktype != socktype)
{
// Ignore if mismatch on socket type.
continue;
}
ai->ai_socktype = socktype;
switch (ai->ai_family)
{
case ASIO_OS_DEF(AF_INET):
{
sockaddr_in4_type* sinptr =
reinterpret_cast<sockaddr_in4_type*>(ai->ai_addr);
sinptr->sin_port = port;
++num_found;
break;
}
case ASIO_OS_DEF(AF_INET6):
{
sockaddr_in6_type* sin6ptr =
reinterpret_cast<sockaddr_in6_type*>(ai->ai_addr);
sin6ptr->sin6_port = port;
++num_found;
break;
}
default:
break;
}
}
return num_found;
}
inline int gai_serv(addrinfo_type* aihead,
const addrinfo_type* hints, const char* serv)
{
using namespace std;
int num_found = 0;
if (
#if defined(AI_NUMERICSERV)
(hints->ai_flags & AI_NUMERICSERV) ||
#endif
isdigit(static_cast<unsigned char>(serv[0])))
{
int port = htons(atoi(serv));
if (hints->ai_socktype)
{
// Caller specifies socket type.
int rc = gai_port(aihead, port, hints->ai_socktype);
if (rc < 0)
return EAI_MEMORY;
num_found += rc;
}
else
{
// Caller does not specify socket type.
int rc = gai_port(aihead, port, SOCK_STREAM);
if (rc < 0)
return EAI_MEMORY;
num_found += rc;
rc = gai_port(aihead, port, SOCK_DGRAM);
if (rc < 0)
return EAI_MEMORY;
num_found += rc;
}
}
else
{
// Try service name with TCP first, then UDP.
if (hints->ai_socktype == 0 || hints->ai_socktype == SOCK_STREAM)
{
servent* sptr = getservbyname(serv, "tcp");
if (sptr != 0)
{
int rc = gai_port(aihead, sptr->s_port, SOCK_STREAM);
if (rc < 0)
return EAI_MEMORY;
num_found += rc;
}
}
if (hints->ai_socktype == 0 || hints->ai_socktype == SOCK_DGRAM)
{
servent* sptr = getservbyname(serv, "udp");
if (sptr != 0)
{
int rc = gai_port(aihead, sptr->s_port, SOCK_DGRAM);
if (rc < 0)
return EAI_MEMORY;
num_found += rc;
}
}
}
if (num_found == 0)
{
if (hints->ai_socktype == 0)
{
// All calls to getservbyname() failed.
return EAI_NONAME;
}
else
{
// Service not supported for socket type.
return EAI_SERVICE;
}
}
return 0;
}
inline int gai_echeck(const char* host, const char* service,
int flags, int family, int socktype, int protocol)
{
(void)(flags);
(void)(protocol);
// Host or service must be specified.
if (host == 0 || host[0] == '\0')
if (service == 0 || service[0] == '\0')
return EAI_NONAME;
// Check combination of family and socket type.
switch (family)
{
case ASIO_OS_DEF(AF_UNSPEC):
break;
case ASIO_OS_DEF(AF_INET):
case ASIO_OS_DEF(AF_INET6):
if (service != 0 && service[0] != '\0')
if (socktype != 0 && socktype != SOCK_STREAM && socktype != SOCK_DGRAM)
return EAI_SOCKTYPE;
break;
default:
return EAI_FAMILY;
}
return 0;
}
inline void freeaddrinfo_emulation(addrinfo_type* aihead)
{
addrinfo_type* ai = aihead;
while (ai)
{
gai_free(ai->ai_addr);
gai_free(ai->ai_canonname);
addrinfo_type* ainext = ai->ai_next;
gai_free(ai);
ai = ainext;
}
}
inline int getaddrinfo_emulation(const char* host, const char* service,
const addrinfo_type* hintsp, addrinfo_type** result)
{
// Set up linked list of addrinfo structures.
addrinfo_type* aihead = 0;
addrinfo_type** ainext = &aihead;
char* canon = 0;
// Supply default hints if not specified by caller.
addrinfo_type hints = addrinfo_type();
hints.ai_family = ASIO_OS_DEF(AF_UNSPEC);
if (hintsp)
hints = *hintsp;
// If the resolution is not specifically for AF_INET6, remove the AI_V4MAPPED
// and AI_ALL flags.
#if defined(AI_V4MAPPED)
if (hints.ai_family != ASIO_OS_DEF(AF_INET6))
hints.ai_flags &= ~AI_V4MAPPED;
#endif
#if defined(AI_ALL)
if (hints.ai_family != ASIO_OS_DEF(AF_INET6))
hints.ai_flags &= ~AI_ALL;
#endif
// Basic error checking.
int rc = gai_echeck(host, service, hints.ai_flags, hints.ai_family,
hints.ai_socktype, hints.ai_protocol);
if (rc != 0)
{
freeaddrinfo_emulation(aihead);
return rc;
}
gai_search search[2];
int search_count = gai_nsearch(host, &hints, search);
for (gai_search* sptr = search; sptr < search + search_count; ++sptr)
{
// Check for IPv4 dotted decimal string.
in4_addr_type inaddr;
asio::error_code ec;
if (socket_ops::inet_pton(ASIO_OS_DEF(AF_INET),
sptr->host, &inaddr, 0, ec) == 1)
{
if (hints.ai_family != ASIO_OS_DEF(AF_UNSPEC)
&& hints.ai_family != ASIO_OS_DEF(AF_INET))
{
freeaddrinfo_emulation(aihead);
gai_free(canon);
return EAI_FAMILY;
}
if (sptr->family == ASIO_OS_DEF(AF_INET))
{
rc = gai_aistruct(&ainext, &hints, &inaddr, ASIO_OS_DEF(AF_INET));
if (rc != 0)
{
freeaddrinfo_emulation(aihead);
gai_free(canon);
return rc;
}
}
continue;
}
// Check for IPv6 hex string.
in6_addr_type in6addr;
if (socket_ops::inet_pton(ASIO_OS_DEF(AF_INET6),
sptr->host, &in6addr, 0, ec) == 1)
{
if (hints.ai_family != ASIO_OS_DEF(AF_UNSPEC)
&& hints.ai_family != ASIO_OS_DEF(AF_INET6))
{
freeaddrinfo_emulation(aihead);
gai_free(canon);
return EAI_FAMILY;
}
if (sptr->family == ASIO_OS_DEF(AF_INET6))
{
rc = gai_aistruct(&ainext, &hints, &in6addr,
ASIO_OS_DEF(AF_INET6));
if (rc != 0)
{
freeaddrinfo_emulation(aihead);
gai_free(canon);
return rc;
}
}
continue;
}
// Look up hostname.
hostent hent;
char hbuf[8192] = "";
hostent* hptr = socket_ops::gethostbyname(sptr->host,
sptr->family, &hent, hbuf, sizeof(hbuf), hints.ai_flags, ec);
if (hptr == 0)
{
if (search_count == 2)
{
// Failure is OK if there are multiple searches.
continue;
}
freeaddrinfo_emulation(aihead);
gai_free(canon);
if (ec == asio::error::host_not_found)
return EAI_NONAME;
if (ec == asio::error::host_not_found_try_again)
return EAI_AGAIN;
if (ec == asio::error::no_recovery)
return EAI_FAIL;
if (ec == asio::error::no_data)
return EAI_NONAME;
return EAI_NONAME;
}
// Check for address family mismatch if one was specified.
if (hints.ai_family != ASIO_OS_DEF(AF_UNSPEC)
&& hints.ai_family != hptr->h_addrtype)
{
freeaddrinfo_emulation(aihead);
gai_free(canon);
socket_ops::freehostent(hptr);
return EAI_FAMILY;
}
// Save canonical name first time.
if (host != 0 && host[0] != '\0' && hptr->h_name && hptr->h_name[0]
&& (hints.ai_flags & AI_CANONNAME) && canon == 0)
{
std::size_t canon_len = strlen(hptr->h_name) + 1;
canon = gai_alloc<char>(canon_len);
if (canon == 0)
{
freeaddrinfo_emulation(aihead);
socket_ops::freehostent(hptr);
return EAI_MEMORY;
}
gai_strcpy(canon, hptr->h_name, canon_len);
}
// Create an addrinfo structure for each returned address.
for (char** ap = hptr->h_addr_list; *ap; ++ap)
{
rc = gai_aistruct(&ainext, &hints, *ap, hptr->h_addrtype);
if (rc != 0)
{
freeaddrinfo_emulation(aihead);
gai_free(canon);
socket_ops::freehostent(hptr);
return EAI_FAMILY;
}
}
socket_ops::freehostent(hptr);
}
// Check if we found anything.
if (aihead == 0)
{
gai_free(canon);
return EAI_NONAME;
}
// Return canonical name in first entry.
if (host != 0 && host[0] != '\0' && (hints.ai_flags & AI_CANONNAME))
{
if (canon)
{
aihead->ai_canonname = canon;
canon = 0;
}
else
{
std::size_t canonname_len = strlen(search[0].host) + 1;
aihead->ai_canonname = gai_alloc<char>(canonname_len);
if (aihead->ai_canonname == 0)
{
freeaddrinfo_emulation(aihead);
return EAI_MEMORY;
}
gai_strcpy(aihead->ai_canonname, search[0].host, canonname_len);
}
}
gai_free(canon);
// Process the service name.
if (service != 0 && service[0] != '\0')
{
rc = gai_serv(aihead, &hints, service);
if (rc != 0)
{
freeaddrinfo_emulation(aihead);
return rc;
}
}
// Return result to caller.
*result = aihead;
return 0;
}
inline asio::error_code getnameinfo_emulation(
const socket_addr_type* sa, std::size_t salen, char* host,
std::size_t hostlen, char* serv, std::size_t servlen, int flags,
asio::error_code& ec)
{
using namespace std;
const char* addr;
size_t addr_len;
unsigned short port;
switch (sa->sa_family)
{
case ASIO_OS_DEF(AF_INET):
if (salen != sizeof(sockaddr_in4_type))
{
return ec = asio::error::invalid_argument;
}
addr = reinterpret_cast<const char*>(
&reinterpret_cast<const sockaddr_in4_type*>(sa)->sin_addr);
addr_len = sizeof(in4_addr_type);
port = reinterpret_cast<const sockaddr_in4_type*>(sa)->sin_port;
break;
case ASIO_OS_DEF(AF_INET6):
if (salen != sizeof(sockaddr_in6_type))
{
return ec = asio::error::invalid_argument;
}
addr = reinterpret_cast<const char*>(
&reinterpret_cast<const sockaddr_in6_type*>(sa)->sin6_addr);
addr_len = sizeof(in6_addr_type);
port = reinterpret_cast<const sockaddr_in6_type*>(sa)->sin6_port;
break;
default:
return ec = asio::error::address_family_not_supported;
}
if (host && hostlen > 0)
{
if (flags & NI_NUMERICHOST)
{
if (socket_ops::inet_ntop(sa->sa_family, addr, host, hostlen, 0, ec) == 0)
{
return ec;
}
}
else
{
hostent hent;
char hbuf[8192] = "";
hostent* hptr = socket_ops::gethostbyaddr(addr,
static_cast<int>(addr_len), sa->sa_family,
&hent, hbuf, sizeof(hbuf), ec);
if (hptr && hptr->h_name && hptr->h_name[0] != '\0')
{
if (flags & NI_NOFQDN)
{
char* dot = strchr(hptr->h_name, '.');
if (dot)
{
*dot = 0;
}
}
gai_strcpy(host, hptr->h_name, hostlen);
socket_ops::freehostent(hptr);
}
else
{
socket_ops::freehostent(hptr);
if (flags & NI_NAMEREQD)
{
return ec = asio::error::host_not_found;
}
if (socket_ops::inet_ntop(sa->sa_family,
addr, host, hostlen, 0, ec) == 0)
{
return ec;
}
}
}
}
if (serv && servlen > 0)
{
if (flags & NI_NUMERICSERV)
{
if (servlen < 6)
{
return ec = asio::error::no_buffer_space;
}
#if defined(ASIO_HAS_SNPRINTF)
snprintf(serv, servlen, "%u", ntohs(port));
#elif defined(ASIO_HAS_SECURE_RTL)
sprintf_s(serv, servlen, "%u", ntohs(port));
#else // defined(ASIO_HAS_SECURE_RTL)
sprintf(serv, "%u", ntohs(port));
#endif // defined(ASIO_HAS_SECURE_RTL)
}
else
{
#if defined(ASIO_HAS_PTHREADS)
static ::pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
::pthread_mutex_lock(&mutex);
#endif // defined(ASIO_HAS_PTHREADS)
servent* sptr = ::getservbyport(port, (flags & NI_DGRAM) ? "udp" : 0);
if (sptr && sptr->s_name && sptr->s_name[0] != '\0')
{
gai_strcpy(serv, sptr->s_name, servlen);
}
else
{
if (servlen < 6)
{
return ec = asio::error::no_buffer_space;
}
#if defined(ASIO_HAS_SNPRINTF)
snprintf(serv, servlen, "%u", ntohs(port));
#elif defined(ASIO_HAS_SECURE_RTL)
sprintf_s(serv, servlen, "%u", ntohs(port));
#else // defined(ASIO_HAS_SECURE_RTL)
sprintf(serv, "%u", ntohs(port));
#endif // defined(ASIO_HAS_SECURE_RTL)
}
#if defined(ASIO_HAS_PTHREADS)
::pthread_mutex_unlock(&mutex);
#endif // defined(ASIO_HAS_PTHREADS)
}
}
asio::error::clear(ec);
return ec;
}
#endif // !defined(ASIO_HAS_GETADDRINFO)
inline asio::error_code translate_addrinfo_error(int error)
{
switch (error)
{
case 0:
return asio::error_code();
case EAI_AGAIN:
return asio::error::host_not_found_try_again;
case EAI_BADFLAGS:
return asio::error::invalid_argument;
case EAI_FAIL:
return asio::error::no_recovery;
case EAI_FAMILY:
return asio::error::address_family_not_supported;
case EAI_MEMORY:
return asio::error::no_memory;
case EAI_NONAME:
#if defined(EAI_ADDRFAMILY)
case EAI_ADDRFAMILY:
#endif
#if defined(EAI_NODATA) && (EAI_NODATA != EAI_NONAME)
case EAI_NODATA:
#endif
return asio::error::host_not_found;
case EAI_SERVICE:
return asio::error::service_not_found;
case EAI_SOCKTYPE:
return asio::error::socket_type_not_supported;
default: // Possibly the non-portable EAI_SYSTEM.
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
return asio::error_code(
WSAGetLastError(), asio::error::get_system_category());
#else
return asio::error_code(
errno, asio::error::get_system_category());
#endif
}
}
asio::error_code getaddrinfo(const char* host,
const char* service, const addrinfo_type& hints,
addrinfo_type** result, asio::error_code& ec)
{
host = (host && *host) ? host : 0;
service = (service && *service) ? service : 0;
clear_last_error();
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
# if defined(ASIO_HAS_GETADDRINFO)
// Building for Windows XP, Windows Server 2003, or later.
int error = ::getaddrinfo(host, service, &hints, result);
return ec = translate_addrinfo_error(error);
# else
// Building for Windows 2000 or earlier.
typedef int (WSAAPI *gai_t)(const char*,
const char*, const addrinfo_type*, addrinfo_type**);
if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32"))
{
if (gai_t gai = (gai_t)::GetProcAddress(winsock_module, "getaddrinfo"))
{
int error = gai(host, service, &hints, result);
return ec = translate_addrinfo_error(error);
}
}
int error = getaddrinfo_emulation(host, service, &hints, result);
return ec = translate_addrinfo_error(error);
# endif
#elif !defined(ASIO_HAS_GETADDRINFO)
int error = getaddrinfo_emulation(host, service, &hints, result);
return ec = translate_addrinfo_error(error);
#else
int error = ::getaddrinfo(host, service, &hints, result);
#if defined(__MACH__) && defined(__APPLE__)
using namespace std; // For isdigit and atoi.
if (error == 0 && service && isdigit(static_cast<unsigned char>(service[0])))
{
u_short_type port = host_to_network_short(atoi(service));
for (addrinfo_type* ai = *result; ai; ai = ai->ai_next)
{
switch (ai->ai_family)
{
case ASIO_OS_DEF(AF_INET):
{
sockaddr_in4_type* sinptr =
reinterpret_cast<sockaddr_in4_type*>(ai->ai_addr);
if (sinptr->sin_port == 0)
sinptr->sin_port = port;
break;
}
case ASIO_OS_DEF(AF_INET6):
{
sockaddr_in6_type* sin6ptr =
reinterpret_cast<sockaddr_in6_type*>(ai->ai_addr);
if (sin6ptr->sin6_port == 0)
sin6ptr->sin6_port = port;
break;
}
default:
break;
}
}
}
#endif
return ec = translate_addrinfo_error(error);
#endif
}
asio::error_code background_getaddrinfo(
const weak_cancel_token_type& cancel_token, const char* host,
const char* service, const addrinfo_type& hints,
addrinfo_type** result, asio::error_code& ec)
{
if (cancel_token.expired())
ec = asio::error::operation_aborted;
else
socket_ops::getaddrinfo(host, service, hints, result, ec);
return ec;
}
void freeaddrinfo(addrinfo_type* ai)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
# if defined(ASIO_HAS_GETADDRINFO)
// Building for Windows XP, Windows Server 2003, or later.
::freeaddrinfo(ai);
# else
// Building for Windows 2000 or earlier.
typedef int (WSAAPI *fai_t)(addrinfo_type*);
if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32"))
{
if (fai_t fai = (fai_t)::GetProcAddress(winsock_module, "freeaddrinfo"))
{
fai(ai);
return;
}
}
freeaddrinfo_emulation(ai);
# endif
#elif !defined(ASIO_HAS_GETADDRINFO)
freeaddrinfo_emulation(ai);
#else
::freeaddrinfo(ai);
#endif
}
asio::error_code getnameinfo(const void* addr,
std::size_t addrlen, char* host, std::size_t hostlen,
char* serv, std::size_t servlen, int flags, asio::error_code& ec)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
# if defined(ASIO_HAS_GETADDRINFO)
// Building for Windows XP, Windows Server 2003, or later.
clear_last_error();
int error = ::getnameinfo(static_cast<const socket_addr_type*>(addr),
static_cast<socklen_t>(addrlen), host, static_cast<DWORD>(hostlen),
serv, static_cast<DWORD>(servlen), flags);
return ec = translate_addrinfo_error(error);
# else
// Building for Windows 2000 or earlier.
typedef int (WSAAPI *gni_t)(const socket_addr_type*,
int, char*, DWORD, char*, DWORD, int);
if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32"))
{
if (gni_t gni = (gni_t)::GetProcAddress(winsock_module, "getnameinfo"))
{
clear_last_error();
int error = gni(static_cast<const socket_addr_type*>(addr),
static_cast<int>(addrlen), host, static_cast<DWORD>(hostlen),
serv, static_cast<DWORD>(servlen), flags);
return ec = translate_addrinfo_error(error);
}
}
clear_last_error();
return getnameinfo_emulation(static_cast<const socket_addr_type*>(addr),
addrlen, host, hostlen, serv, servlen, flags, ec);
# endif
#elif !defined(ASIO_HAS_GETADDRINFO)
using namespace std; // For memcpy.
sockaddr_storage_type tmp_addr;
memcpy(&tmp_addr, addr, addrlen);
addr = &tmp_addr;
clear_last_error();
return getnameinfo_emulation(static_cast<const socket_addr_type*>(addr),
addrlen, host, hostlen, serv, servlen, flags, ec);
#else
clear_last_error();
int error = ::getnameinfo(static_cast<const socket_addr_type*>(addr),
addrlen, host, hostlen, serv, servlen, flags);
return ec = translate_addrinfo_error(error);
#endif
}
asio::error_code sync_getnameinfo(const void* addr,
std::size_t addrlen, char* host, std::size_t hostlen, char* serv,
std::size_t servlen, int sock_type, asio::error_code& ec)
{
// First try resolving with the service name. If that fails try resolving
// but allow the service to be returned as a number.
int flags = (sock_type == SOCK_DGRAM) ? NI_DGRAM : 0;
socket_ops::getnameinfo(addr, addrlen, host,
hostlen, serv, servlen, flags, ec);
if (ec)
{
socket_ops::getnameinfo(addr, addrlen, host, hostlen,
serv, servlen, flags | NI_NUMERICSERV, ec);
}
return ec;
}
asio::error_code background_getnameinfo(
const weak_cancel_token_type& cancel_token,
const void* addr, std::size_t addrlen,
char* host, std::size_t hostlen, char* serv,
std::size_t servlen, int sock_type, asio::error_code& ec)
{
if (cancel_token.expired())
{
ec = asio::error::operation_aborted;
}
else
{
// First try resolving with the service name. If that fails try resolving
// but allow the service to be returned as a number.
int flags = (sock_type == SOCK_DGRAM) ? NI_DGRAM : 0;
socket_ops::getnameinfo(addr, addrlen, host,
hostlen, serv, servlen, flags, ec);
if (ec)
{
socket_ops::getnameinfo(addr, addrlen, host, hostlen,
serv, servlen, flags | NI_NUMERICSERV, ec);
}
}
return ec;
}
#endif // !defined(ASIO_WINDOWS_RUNTIME)
u_long_type network_to_host_long(u_long_type value)
{
#if defined(ASIO_WINDOWS_RUNTIME)
unsigned char* value_p = reinterpret_cast<unsigned char*>(&value);
u_long_type result = (static_cast<u_long_type>(value_p[0]) << 24)
| (static_cast<u_long_type>(value_p[1]) << 16)
| (static_cast<u_long_type>(value_p[2]) << 8)
| static_cast<u_long_type>(value_p[3]);
return result;
#else // defined(ASIO_WINDOWS_RUNTIME)
return ntohl(value);
#endif // defined(ASIO_WINDOWS_RUNTIME)
}
u_long_type host_to_network_long(u_long_type value)
{
#if defined(ASIO_WINDOWS_RUNTIME)
u_long_type result;
unsigned char* result_p = reinterpret_cast<unsigned char*>(&result);
result_p[0] = static_cast<unsigned char>((value >> 24) & 0xFF);
result_p[1] = static_cast<unsigned char>((value >> 16) & 0xFF);
result_p[2] = static_cast<unsigned char>((value >> 8) & 0xFF);
result_p[3] = static_cast<unsigned char>(value & 0xFF);
return result;
#else // defined(ASIO_WINDOWS_RUNTIME)
return htonl(value);
#endif // defined(ASIO_WINDOWS_RUNTIME)
}
u_short_type network_to_host_short(u_short_type value)
{
#if defined(ASIO_WINDOWS_RUNTIME)
unsigned char* value_p = reinterpret_cast<unsigned char*>(&value);
u_short_type result = (static_cast<u_short_type>(value_p[0]) << 8)
| static_cast<u_short_type>(value_p[1]);
return result;
#else // defined(ASIO_WINDOWS_RUNTIME)
return ntohs(value);
#endif // defined(ASIO_WINDOWS_RUNTIME)
}
u_short_type host_to_network_short(u_short_type value)
{
#if defined(ASIO_WINDOWS_RUNTIME)
u_short_type result;
unsigned char* result_p = reinterpret_cast<unsigned char*>(&result);
result_p[0] = static_cast<unsigned char>((value >> 8) & 0xFF);
result_p[1] = static_cast<unsigned char>(value & 0xFF);
return result;
#else // defined(ASIO_WINDOWS_RUNTIME)
return htons(value);
#endif // defined(ASIO_WINDOWS_RUNTIME)
}
} // namespace socket_ops
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_SOCKET_OPS_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/win_iocp_io_context.ipp | //
// detail/impl/win_iocp_io_context.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP
#define ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP)
#include "asio/error.hpp"
#include "asio/detail/cstdint.hpp"
#include "asio/detail/handler_alloc_helpers.hpp"
#include "asio/detail/limits.hpp"
#include "asio/detail/thread.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/detail/win_iocp_io_context.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
struct win_iocp_io_context::thread_function
{
explicit thread_function(win_iocp_io_context* s)
: this_(s)
{
}
void operator()()
{
asio::error_code ec;
this_->run(ec);
}
win_iocp_io_context* this_;
};
struct win_iocp_io_context::work_finished_on_block_exit
{
~work_finished_on_block_exit() noexcept(false)
{
io_context_->work_finished();
}
win_iocp_io_context* io_context_;
};
struct win_iocp_io_context::timer_thread_function
{
void operator()()
{
while (::InterlockedExchangeAdd(&io_context_->shutdown_, 0) == 0)
{
if (::WaitForSingleObject(io_context_->waitable_timer_.handle,
INFINITE) == WAIT_OBJECT_0)
{
::InterlockedExchange(&io_context_->dispatch_required_, 1);
::PostQueuedCompletionStatus(io_context_->iocp_.handle,
0, wake_for_dispatch, 0);
}
}
}
win_iocp_io_context* io_context_;
};
win_iocp_io_context::win_iocp_io_context(
asio::execution_context& ctx, int concurrency_hint, bool own_thread)
: execution_context_service_base<win_iocp_io_context>(ctx),
iocp_(),
outstanding_work_(0),
stopped_(0),
stop_event_posted_(0),
shutdown_(0),
gqcs_timeout_(get_gqcs_timeout()),
dispatch_required_(0),
concurrency_hint_(concurrency_hint)
{
ASIO_HANDLER_TRACKING_INIT;
iocp_.handle = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0,
static_cast<DWORD>(concurrency_hint >= 0 ? concurrency_hint : DWORD(~0)));
if (!iocp_.handle)
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "iocp");
}
if (own_thread)
{
::InterlockedIncrement(&outstanding_work_);
thread_.reset(new asio::detail::thread(thread_function(this)));
}
}
win_iocp_io_context::~win_iocp_io_context()
{
if (thread_.get())
{
stop();
thread_->join();
thread_.reset();
}
}
void win_iocp_io_context::shutdown()
{
::InterlockedExchange(&shutdown_, 1);
if (timer_thread_.get())
{
LARGE_INTEGER timeout;
timeout.QuadPart = 1;
::SetWaitableTimer(waitable_timer_.handle, &timeout, 1, 0, 0, FALSE);
}
if (thread_.get())
{
stop();
thread_->join();
thread_.reset();
::InterlockedDecrement(&outstanding_work_);
}
while (::InterlockedExchangeAdd(&outstanding_work_, 0) > 0)
{
op_queue<win_iocp_operation> ops;
timer_queues_.get_all_timers(ops);
ops.push(completed_ops_);
if (!ops.empty())
{
while (win_iocp_operation* op = ops.front())
{
ops.pop();
::InterlockedDecrement(&outstanding_work_);
op->destroy();
}
}
else
{
DWORD bytes_transferred = 0;
dword_ptr_t completion_key = 0;
LPOVERLAPPED overlapped = 0;
::GetQueuedCompletionStatus(iocp_.handle, &bytes_transferred,
&completion_key, &overlapped, gqcs_timeout_);
if (overlapped)
{
::InterlockedDecrement(&outstanding_work_);
static_cast<win_iocp_operation*>(overlapped)->destroy();
}
}
}
if (timer_thread_.get())
{
timer_thread_->join();
timer_thread_.reset();
}
}
asio::error_code win_iocp_io_context::register_handle(
HANDLE handle, asio::error_code& ec)
{
if (::CreateIoCompletionPort(handle, iocp_.handle, 0, 0) == 0)
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
else
{
ec = asio::error_code();
}
return ec;
}
size_t win_iocp_io_context::run(asio::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
stop();
ec = asio::error_code();
return 0;
}
win_iocp_thread_info this_thread;
thread_call_stack::context ctx(this, this_thread);
size_t n = 0;
while (do_one(INFINITE, this_thread, ec))
if (n != (std::numeric_limits<size_t>::max)())
++n;
return n;
}
size_t win_iocp_io_context::run_one(asio::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
stop();
ec = asio::error_code();
return 0;
}
win_iocp_thread_info this_thread;
thread_call_stack::context ctx(this, this_thread);
return do_one(INFINITE, this_thread, ec);
}
size_t win_iocp_io_context::wait_one(long usec, asio::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
stop();
ec = asio::error_code();
return 0;
}
win_iocp_thread_info this_thread;
thread_call_stack::context ctx(this, this_thread);
return do_one(usec < 0 ? INFINITE : ((usec - 1) / 1000 + 1), this_thread, ec);
}
size_t win_iocp_io_context::poll(asio::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
stop();
ec = asio::error_code();
return 0;
}
win_iocp_thread_info this_thread;
thread_call_stack::context ctx(this, this_thread);
size_t n = 0;
while (do_one(0, this_thread, ec))
if (n != (std::numeric_limits<size_t>::max)())
++n;
return n;
}
size_t win_iocp_io_context::poll_one(asio::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
stop();
ec = asio::error_code();
return 0;
}
win_iocp_thread_info this_thread;
thread_call_stack::context ctx(this, this_thread);
return do_one(0, this_thread, ec);
}
void win_iocp_io_context::stop()
{
if (::InterlockedExchange(&stopped_, 1) == 0)
{
if (::InterlockedExchange(&stop_event_posted_, 1) == 0)
{
if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0))
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "pqcs");
}
}
}
}
bool win_iocp_io_context::can_dispatch()
{
return thread_call_stack::contains(this) != 0;
}
void win_iocp_io_context::capture_current_exception()
{
if (thread_info_base* this_thread = thread_call_stack::contains(this))
this_thread->capture_current_exception();
}
void win_iocp_io_context::post_deferred_completion(win_iocp_operation* op)
{
// Flag the operation as ready.
op->ready_ = 1;
// Enqueue the operation on the I/O completion port.
if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op))
{
// Out of resources. Put on completed queue instead.
mutex::scoped_lock lock(dispatch_mutex_);
completed_ops_.push(op);
::InterlockedExchange(&dispatch_required_, 1);
}
}
void win_iocp_io_context::post_deferred_completions(
op_queue<win_iocp_operation>& ops)
{
while (win_iocp_operation* op = ops.front())
{
ops.pop();
// Flag the operation as ready.
op->ready_ = 1;
// Enqueue the operation on the I/O completion port.
if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op))
{
// Out of resources. Put on completed queue instead.
mutex::scoped_lock lock(dispatch_mutex_);
completed_ops_.push(op);
completed_ops_.push(ops);
::InterlockedExchange(&dispatch_required_, 1);
}
}
}
void win_iocp_io_context::abandon_operations(
op_queue<win_iocp_operation>& ops)
{
while (win_iocp_operation* op = ops.front())
{
ops.pop();
::InterlockedDecrement(&outstanding_work_);
op->destroy();
}
}
void win_iocp_io_context::on_pending(win_iocp_operation* op)
{
if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1)
{
// Enqueue the operation on the I/O completion port.
if (!::PostQueuedCompletionStatus(iocp_.handle,
0, overlapped_contains_result, op))
{
// Out of resources. Put on completed queue instead.
mutex::scoped_lock lock(dispatch_mutex_);
completed_ops_.push(op);
::InterlockedExchange(&dispatch_required_, 1);
}
}
}
void win_iocp_io_context::on_completion(win_iocp_operation* op,
DWORD last_error, DWORD bytes_transferred)
{
// Flag that the operation is ready for invocation.
op->ready_ = 1;
// Store results in the OVERLAPPED structure.
op->Internal = reinterpret_cast<ulong_ptr_t>(
&asio::error::get_system_category());
op->Offset = last_error;
op->OffsetHigh = bytes_transferred;
// Enqueue the operation on the I/O completion port.
if (!::PostQueuedCompletionStatus(iocp_.handle,
0, overlapped_contains_result, op))
{
// Out of resources. Put on completed queue instead.
mutex::scoped_lock lock(dispatch_mutex_);
completed_ops_.push(op);
::InterlockedExchange(&dispatch_required_, 1);
}
}
void win_iocp_io_context::on_completion(win_iocp_operation* op,
const asio::error_code& ec, DWORD bytes_transferred)
{
// Flag that the operation is ready for invocation.
op->ready_ = 1;
// Store results in the OVERLAPPED structure.
op->Internal = reinterpret_cast<ulong_ptr_t>(&ec.category());
op->Offset = ec.value();
op->OffsetHigh = bytes_transferred;
// Enqueue the operation on the I/O completion port.
if (!::PostQueuedCompletionStatus(iocp_.handle,
0, overlapped_contains_result, op))
{
// Out of resources. Put on completed queue instead.
mutex::scoped_lock lock(dispatch_mutex_);
completed_ops_.push(op);
::InterlockedExchange(&dispatch_required_, 1);
}
}
size_t win_iocp_io_context::do_one(DWORD msec,
win_iocp_thread_info& this_thread, asio::error_code& ec)
{
for (;;)
{
// Try to acquire responsibility for dispatching timers and completed ops.
if (::InterlockedCompareExchange(&dispatch_required_, 0, 1) == 1)
{
mutex::scoped_lock lock(dispatch_mutex_);
// Dispatch pending timers and operations.
op_queue<win_iocp_operation> ops;
ops.push(completed_ops_);
timer_queues_.get_ready_timers(ops);
post_deferred_completions(ops);
update_timeout();
}
// Get the next operation from the queue.
DWORD bytes_transferred = 0;
dword_ptr_t completion_key = 0;
LPOVERLAPPED overlapped = 0;
::SetLastError(0);
BOOL ok = ::GetQueuedCompletionStatus(iocp_.handle,
&bytes_transferred, &completion_key, &overlapped,
msec < gqcs_timeout_ ? msec : gqcs_timeout_);
DWORD last_error = ::GetLastError();
if (overlapped)
{
win_iocp_operation* op = static_cast<win_iocp_operation*>(overlapped);
asio::error_code result_ec(last_error,
asio::error::get_system_category());
// We may have been passed the last_error and bytes_transferred in the
// OVERLAPPED structure itself.
if (completion_key == overlapped_contains_result)
{
result_ec = asio::error_code(static_cast<int>(op->Offset),
*reinterpret_cast<asio::error_category*>(op->Internal));
bytes_transferred = op->OffsetHigh;
}
// Otherwise ensure any result has been saved into the OVERLAPPED
// structure.
else
{
op->Internal = reinterpret_cast<ulong_ptr_t>(&result_ec.category());
op->Offset = result_ec.value();
op->OffsetHigh = bytes_transferred;
}
// Dispatch the operation only if ready. The operation may not be ready
// if the initiating function (e.g. a call to WSARecv) has not yet
// returned. This is because the initiating function still wants access
// to the operation's OVERLAPPED structure.
if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1)
{
// Ensure the count of outstanding work is decremented on block exit.
work_finished_on_block_exit on_exit = { this };
(void)on_exit;
op->complete(this, result_ec, bytes_transferred);
this_thread.rethrow_pending_exception();
ec = asio::error_code();
return 1;
}
}
else if (!ok)
{
if (last_error != WAIT_TIMEOUT)
{
ec = asio::error_code(last_error,
asio::error::get_system_category());
return 0;
}
// If we're waiting indefinitely we need to keep going until we get a
// real handler.
if (msec == INFINITE)
continue;
ec = asio::error_code();
return 0;
}
else if (completion_key == wake_for_dispatch)
{
// We have been woken up to try to acquire responsibility for dispatching
// timers and completed operations.
}
else
{
// Indicate that there is no longer an in-flight stop event.
::InterlockedExchange(&stop_event_posted_, 0);
// The stopped_ flag is always checked to ensure that any leftover
// stop events from a previous run invocation are ignored.
if (::InterlockedExchangeAdd(&stopped_, 0) != 0)
{
// Wake up next thread that is blocked on GetQueuedCompletionStatus.
if (::InterlockedExchange(&stop_event_posted_, 1) == 0)
{
if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0))
{
last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
return 0;
}
}
ec = asio::error_code();
return 0;
}
}
}
}
DWORD win_iocp_io_context::get_gqcs_timeout()
{
#if !defined(_WIN32_WINNT) || (_WIN32_WINNT < 0x0600)
OSVERSIONINFOEX osvi;
ZeroMemory(&osvi, sizeof(osvi));
osvi.dwOSVersionInfoSize = sizeof(osvi);
osvi.dwMajorVersion = 6ul;
const uint64_t condition_mask = ::VerSetConditionMask(
0, VER_MAJORVERSION, VER_GREATER_EQUAL);
if (!!::VerifyVersionInfo(&osvi, VER_MAJORVERSION, condition_mask))
return INFINITE;
return default_gqcs_timeout;
#else // !defined(_WIN32_WINNT) || (_WIN32_WINNT < 0x0600)
return INFINITE;
#endif // !defined(_WIN32_WINNT) || (_WIN32_WINNT < 0x0600)
}
void win_iocp_io_context::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(dispatch_mutex_);
timer_queues_.insert(&queue);
if (!waitable_timer_.handle)
{
waitable_timer_.handle = ::CreateWaitableTimer(0, FALSE, 0);
if (waitable_timer_.handle == 0)
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "timer");
}
LARGE_INTEGER timeout;
timeout.QuadPart = -max_timeout_usec;
timeout.QuadPart *= 10;
::SetWaitableTimer(waitable_timer_.handle,
&timeout, max_timeout_msec, 0, 0, FALSE);
}
if (!timer_thread_.get())
{
timer_thread_function thread_function = { this };
timer_thread_.reset(new thread(thread_function, 65536));
}
}
void win_iocp_io_context::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(dispatch_mutex_);
timer_queues_.erase(&queue);
}
void win_iocp_io_context::update_timeout()
{
if (timer_thread_.get())
{
// There's no point updating the waitable timer if the new timeout period
// exceeds the maximum timeout. In that case, we might as well wait for the
// existing period of the timer to expire.
long timeout_usec = timer_queues_.wait_duration_usec(max_timeout_usec);
if (timeout_usec < max_timeout_usec)
{
LARGE_INTEGER timeout;
timeout.QuadPart = -timeout_usec;
timeout.QuadPart *= 10;
::SetWaitableTimer(waitable_timer_.handle,
&timeout, max_timeout_msec, 0, 0, FALSE);
}
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
#endif // ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/io_uring_descriptor_service.ipp | //
// detail/impl/io_uring_descriptor_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_IO_URING_DESCRIPTOR_SERVICE_IPP
#define ASIO_DETAIL_IMPL_IO_URING_DESCRIPTOR_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IO_URING)
#include "asio/error.hpp"
#include "asio/detail/io_uring_descriptor_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
io_uring_descriptor_service::io_uring_descriptor_service(
execution_context& context)
: execution_context_service_base<io_uring_descriptor_service>(context),
io_uring_service_(asio::use_service<io_uring_service>(context))
{
io_uring_service_.init_task();
}
void io_uring_descriptor_service::shutdown()
{
}
void io_uring_descriptor_service::construct(
io_uring_descriptor_service::implementation_type& impl)
{
impl.descriptor_ = -1;
impl.state_ = 0;
impl.io_object_data_ = 0;
}
void io_uring_descriptor_service::move_construct(
io_uring_descriptor_service::implementation_type& impl,
io_uring_descriptor_service::implementation_type& other_impl)
noexcept
{
impl.descriptor_ = other_impl.descriptor_;
other_impl.descriptor_ = -1;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
impl.io_object_data_ = other_impl.io_object_data_;
other_impl.io_object_data_ = 0;
}
void io_uring_descriptor_service::move_assign(
io_uring_descriptor_service::implementation_type& impl,
io_uring_descriptor_service& /*other_service*/,
io_uring_descriptor_service::implementation_type& other_impl)
{
destroy(impl);
impl.descriptor_ = other_impl.descriptor_;
other_impl.descriptor_ = -1;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
impl.io_object_data_ = other_impl.io_object_data_;
other_impl.io_object_data_ = 0;
}
void io_uring_descriptor_service::destroy(
io_uring_descriptor_service::implementation_type& impl)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((io_uring_service_.context(),
"descriptor", &impl, impl.descriptor_, "close"));
io_uring_service_.deregister_io_object(impl.io_object_data_);
asio::error_code ignored_ec;
descriptor_ops::close(impl.descriptor_, impl.state_, ignored_ec);
io_uring_service_.cleanup_io_object(impl.io_object_data_);
}
}
asio::error_code io_uring_descriptor_service::assign(
io_uring_descriptor_service::implementation_type& impl,
const native_handle_type& native_descriptor, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
ASIO_ERROR_LOCATION(ec);
return ec;
}
io_uring_service_.register_io_object(impl.io_object_data_);
impl.descriptor_ = native_descriptor;
impl.state_ = descriptor_ops::possible_dup;
ec = success_ec_;
return ec;
}
asio::error_code io_uring_descriptor_service::close(
io_uring_descriptor_service::implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((io_uring_service_.context(),
"descriptor", &impl, impl.descriptor_, "close"));
io_uring_service_.deregister_io_object(impl.io_object_data_);
descriptor_ops::close(impl.descriptor_, impl.state_, ec);
io_uring_service_.cleanup_io_object(impl.io_object_data_);
}
else
{
ec = success_ec_;
}
// The descriptor is closed by the OS even if close() returns an error.
//
// (Actually, POSIX says the state of the descriptor is unspecified. On
// Linux the descriptor is apparently closed anyway; e.g. see
// http://lkml.org/lkml/2005/9/10/129
construct(impl);
ASIO_ERROR_LOCATION(ec);
return ec;
}
io_uring_descriptor_service::native_handle_type
io_uring_descriptor_service::release(
io_uring_descriptor_service::implementation_type& impl)
{
native_handle_type descriptor = impl.descriptor_;
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((io_uring_service_.context(),
"descriptor", &impl, impl.descriptor_, "release"));
io_uring_service_.deregister_io_object(impl.io_object_data_);
io_uring_service_.cleanup_io_object(impl.io_object_data_);
construct(impl);
}
return descriptor;
}
asio::error_code io_uring_descriptor_service::cancel(
io_uring_descriptor_service::implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
ASIO_ERROR_LOCATION(ec);
return ec;
}
ASIO_HANDLER_OPERATION((io_uring_service_.context(),
"descriptor", &impl, impl.descriptor_, "cancel"));
io_uring_service_.cancel_ops(impl.io_object_data_);
ec = success_ec_;
return ec;
}
void io_uring_descriptor_service::start_op(
io_uring_descriptor_service::implementation_type& impl,
int op_type, io_uring_operation* op, bool is_continuation, bool noop)
{
if (!noop)
{
io_uring_service_.start_op(op_type,
impl.io_object_data_, op, is_continuation);
}
else
{
io_uring_service_.post_immediate_completion(op, is_continuation);
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IO_URING)
#endif // ASIO_DETAIL_IMPL_IO_URING_DESCRIPTOR_SERVICE_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/eventfd_select_interrupter.ipp | //
// detail/impl/eventfd_select_interrupter.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2008 Roelof Naude (roelof.naude at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP
#define ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_EVENTFD)
#include <sys/stat.h>
#include <sys/types.h>
#include <fcntl.h>
#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 && !defined(__UCLIBC__)
# include <asm/unistd.h>
#else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 && !defined(__UCLIBC__)
# include <sys/eventfd.h>
#endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 && !defined(__UCLIBC__)
#include "asio/detail/cstdint.hpp"
#include "asio/detail/eventfd_select_interrupter.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
eventfd_select_interrupter::eventfd_select_interrupter()
{
open_descriptors();
}
void eventfd_select_interrupter::open_descriptors()
{
#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 && !defined(__UCLIBC__)
write_descriptor_ = read_descriptor_ = syscall(__NR_eventfd, 0);
if (read_descriptor_ != -1)
{
::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
}
#else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 && !defined(__UCLIBC__)
# if defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)
write_descriptor_ = read_descriptor_ =
::eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
# else // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)
errno = EINVAL;
write_descriptor_ = read_descriptor_ = -1;
# endif // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)
if (read_descriptor_ == -1 && errno == EINVAL)
{
write_descriptor_ = read_descriptor_ = ::eventfd(0, 0);
if (read_descriptor_ != -1)
{
::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
}
}
#endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 && !defined(__UCLIBC__)
if (read_descriptor_ == -1)
{
int pipe_fds[2];
if (pipe(pipe_fds) == 0)
{
read_descriptor_ = pipe_fds[0];
::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
write_descriptor_ = pipe_fds[1];
::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK);
::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC);
}
else
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "eventfd_select_interrupter");
}
}
}
eventfd_select_interrupter::~eventfd_select_interrupter()
{
close_descriptors();
}
void eventfd_select_interrupter::close_descriptors()
{
if (write_descriptor_ != -1 && write_descriptor_ != read_descriptor_)
::close(write_descriptor_);
if (read_descriptor_ != -1)
::close(read_descriptor_);
}
void eventfd_select_interrupter::recreate()
{
close_descriptors();
write_descriptor_ = -1;
read_descriptor_ = -1;
open_descriptors();
}
void eventfd_select_interrupter::interrupt()
{
uint64_t counter(1UL);
int result = ::write(write_descriptor_, &counter, sizeof(uint64_t));
(void)result;
}
bool eventfd_select_interrupter::reset()
{
if (write_descriptor_ == read_descriptor_)
{
for (;;)
{
// Only perform one read. The kernel maintains an atomic counter.
uint64_t counter(0);
errno = 0;
int bytes_read = ::read(read_descriptor_, &counter, sizeof(uint64_t));
if (bytes_read < 0 && errno == EINTR)
continue;
return true;
}
}
else
{
for (;;)
{
// Clear all data from the pipe.
char data[1024];
int bytes_read = ::read(read_descriptor_, data, sizeof(data));
if (bytes_read == sizeof(data))
continue;
if (bytes_read > 0)
return true;
if (bytes_read == 0)
return false;
if (errno == EINTR)
continue;
if (errno == EWOULDBLOCK)
return true;
if (errno == EAGAIN)
return true;
return false;
}
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_EVENTFD)
#endif // ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/win_mutex.ipp | //
// detail/impl/win_mutex.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_MUTEX_IPP
#define ASIO_DETAIL_IMPL_WIN_MUTEX_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS)
#include "asio/detail/throw_error.hpp"
#include "asio/detail/win_mutex.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_mutex::win_mutex()
{
int error = do_init();
asio::error_code ec(error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "mutex");
}
int win_mutex::do_init()
{
#if defined(__MINGW32__)
// Not sure if MinGW supports structured exception handling, so for now
// we'll just call the Windows API and hope.
# if defined(UNDER_CE)
::InitializeCriticalSection(&crit_section_);
# elif defined(ASIO_WINDOWS_APP)
if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0))
return ::GetLastError();
# else
if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))
return ::GetLastError();
# endif
return 0;
#else
__try
{
# if defined(UNDER_CE)
::InitializeCriticalSection(&crit_section_);
# elif defined(ASIO_WINDOWS_APP)
if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0))
return ::GetLastError();
# else
if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))
return ::GetLastError();
# endif
}
__except(GetExceptionCode() == STATUS_NO_MEMORY
? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH)
{
return ERROR_OUTOFMEMORY;
}
return 0;
#endif
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS)
#endif // ASIO_DETAIL_IMPL_WIN_MUTEX_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/posix_event.ipp | //
// detail/impl/posix_event.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_POSIX_EVENT_IPP
#define ASIO_DETAIL_IMPL_POSIX_EVENT_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_PTHREADS)
#include "asio/detail/posix_event.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
posix_event::posix_event()
: state_(0)
{
#if (defined(__MACH__) && defined(__APPLE__)) \
|| (defined(__ANDROID__) && (__ANDROID_API__ < 21))
int error = ::pthread_cond_init(&cond_, 0);
#else // (defined(__MACH__) && defined(__APPLE__))
// || (defined(__ANDROID__) && (__ANDROID_API__ < 21))
::pthread_condattr_t attr;
int error = ::pthread_condattr_init(&attr);
if (error == 0)
{
error = ::pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
if (error == 0)
error = ::pthread_cond_init(&cond_, &attr);
::pthread_condattr_destroy(&attr);
}
#endif // (defined(__MACH__) && defined(__APPLE__))
// || (defined(__ANDROID__) && (__ANDROID_API__ < 21))
asio::error_code ec(error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "event");
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_PTHREADS)
#endif // ASIO_DETAIL_IMPL_POSIX_EVENT_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/kqueue_reactor.ipp | //
// detail/impl/kqueue_reactor.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2005 Stefan Arentz (stefan at soze dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP
#define ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_KQUEUE)
#include "asio/detail/kqueue_reactor.hpp"
#include "asio/detail/scheduler.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#if defined(__NetBSD__)
# include <sys/param.h>
#endif
#include "asio/detail/push_options.hpp"
#if defined(__NetBSD__) && __NetBSD_Version__ < 999001500
# define ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \
EV_SET(ev, ident, filt, flags, fflags, data, \
reinterpret_cast<intptr_t>(static_cast<void*>(udata)))
#else
# define ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \
EV_SET(ev, ident, filt, flags, fflags, data, udata)
#endif
namespace asio {
namespace detail {
kqueue_reactor::kqueue_reactor(asio::execution_context& ctx)
: execution_context_service_base<kqueue_reactor>(ctx),
scheduler_(use_service<scheduler>(ctx)),
mutex_(ASIO_CONCURRENCY_HINT_IS_LOCKING(
REACTOR_REGISTRATION, scheduler_.concurrency_hint())),
kqueue_fd_(do_kqueue_create()),
interrupter_(),
shutdown_(false),
registered_descriptors_mutex_(mutex_.enabled())
{
struct kevent events[1];
ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(),
EVFILT_READ, EV_ADD, 0, 0, &interrupter_);
if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1)
{
asio::error_code error(errno,
asio::error::get_system_category());
asio::detail::throw_error(error);
}
}
kqueue_reactor::~kqueue_reactor()
{
close(kqueue_fd_);
}
void kqueue_reactor::shutdown()
{
mutex::scoped_lock lock(mutex_);
shutdown_ = true;
lock.unlock();
op_queue<operation> ops;
while (descriptor_state* state = registered_descriptors_.first())
{
for (int i = 0; i < max_ops; ++i)
ops.push(state->op_queue_[i]);
state->shutdown_ = true;
registered_descriptors_.free(state);
}
timer_queues_.get_all_timers(ops);
scheduler_.abandon_operations(ops);
}
void kqueue_reactor::notify_fork(
asio::execution_context::fork_event fork_ev)
{
if (fork_ev == asio::execution_context::fork_child)
{
// The kqueue descriptor is automatically closed in the child.
kqueue_fd_ = -1;
kqueue_fd_ = do_kqueue_create();
interrupter_.recreate();
struct kevent events[2];
ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(),
EVFILT_READ, EV_ADD, 0, 0, &interrupter_);
if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1)
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "kqueue interrupter registration");
}
// Re-register all descriptors with kqueue.
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
for (descriptor_state* state = registered_descriptors_.first();
state != 0; state = state->next_)
{
if (state->num_kevents_ > 0)
{
ASIO_KQUEUE_EV_SET(&events[0], state->descriptor_,
EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, state);
ASIO_KQUEUE_EV_SET(&events[1], state->descriptor_,
EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, state);
if (::kevent(kqueue_fd_, events, state->num_kevents_, 0, 0, 0) == -1)
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "kqueue re-registration");
}
}
}
}
}
void kqueue_reactor::init_task()
{
scheduler_.init_task();
}
int kqueue_reactor::register_descriptor(socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data)
{
descriptor_data = allocate_descriptor_state();
ASIO_HANDLER_REACTOR_REGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
mutex::scoped_lock lock(descriptor_data->mutex_);
descriptor_data->descriptor_ = descriptor;
descriptor_data->num_kevents_ = 0;
descriptor_data->shutdown_ = false;
return 0;
}
int kqueue_reactor::register_internal_descriptor(
int op_type, socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op)
{
descriptor_data = allocate_descriptor_state();
ASIO_HANDLER_REACTOR_REGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
mutex::scoped_lock lock(descriptor_data->mutex_);
descriptor_data->descriptor_ = descriptor;
descriptor_data->num_kevents_ = 1;
descriptor_data->shutdown_ = false;
descriptor_data->op_queue_[op_type].push(op);
struct kevent events[1];
ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ,
EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1)
return errno;
return 0;
}
void kqueue_reactor::move_descriptor(socket_type,
kqueue_reactor::per_descriptor_data& target_descriptor_data,
kqueue_reactor::per_descriptor_data& source_descriptor_data)
{
target_descriptor_data = source_descriptor_data;
source_descriptor_data = 0;
}
void kqueue_reactor::call_post_immediate_completion(
operation* op, bool is_continuation, const void* self)
{
static_cast<const kqueue_reactor*>(self)->post_immediate_completion(
op, is_continuation);
}
void kqueue_reactor::start_op(int op_type, socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op,
bool is_continuation, bool allow_speculative,
void (*on_immediate)(operation*, bool, const void*),
const void* immediate_arg)
{
if (!descriptor_data)
{
op->ec_ = asio::error::bad_descriptor;
on_immediate(op, is_continuation, immediate_arg);
return;
}
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (descriptor_data->shutdown_)
{
on_immediate(op, is_continuation, immediate_arg);
return;
}
if (descriptor_data->op_queue_[op_type].empty())
{
static const int num_kevents[max_ops] = { 1, 2, 1 };
if (allow_speculative
&& (op_type != read_op
|| descriptor_data->op_queue_[except_op].empty()))
{
if (op->perform())
{
descriptor_lock.unlock();
on_immediate(op, is_continuation, immediate_arg);
return;
}
if (descriptor_data->num_kevents_ < num_kevents[op_type])
{
struct kevent events[2];
ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ,
EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE,
EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
if (::kevent(kqueue_fd_, events, num_kevents[op_type], 0, 0, 0) != -1)
{
descriptor_data->num_kevents_ = num_kevents[op_type];
}
else
{
op->ec_ = asio::error_code(errno,
asio::error::get_system_category());
on_immediate(op, is_continuation, immediate_arg);
return;
}
}
}
else
{
if (descriptor_data->num_kevents_ < num_kevents[op_type])
descriptor_data->num_kevents_ = num_kevents[op_type];
struct kevent events[2];
ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ,
EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE,
EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0);
}
}
descriptor_data->op_queue_[op_type].push(op);
scheduler_.work_started();
}
void kqueue_reactor::cancel_ops(socket_type,
kqueue_reactor::per_descriptor_data& descriptor_data)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
{
while (reactor_op* op = descriptor_data->op_queue_[i].front())
{
op->ec_ = asio::error::operation_aborted;
descriptor_data->op_queue_[i].pop();
ops.push(op);
}
}
descriptor_lock.unlock();
scheduler_.post_deferred_completions(ops);
}
void kqueue_reactor::cancel_ops_by_key(socket_type,
kqueue_reactor::per_descriptor_data& descriptor_data,
int op_type, void* cancellation_key)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
op_queue<operation> ops;
op_queue<reactor_op> other_ops;
while (reactor_op* op = descriptor_data->op_queue_[op_type].front())
{
descriptor_data->op_queue_[op_type].pop();
if (op->cancellation_key_ == cancellation_key)
{
op->ec_ = asio::error::operation_aborted;
ops.push(op);
}
else
other_ops.push(op);
}
descriptor_data->op_queue_[op_type].push(other_ops);
descriptor_lock.unlock();
scheduler_.post_deferred_completions(ops);
}
void kqueue_reactor::deregister_descriptor(socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data, bool closing)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (!descriptor_data->shutdown_)
{
if (closing)
{
// The descriptor will be automatically removed from the kqueue when it
// is closed.
}
else
{
struct kevent events[2];
ASIO_KQUEUE_EV_SET(&events[0], descriptor,
EVFILT_READ, EV_DELETE, 0, 0, 0);
ASIO_KQUEUE_EV_SET(&events[1], descriptor,
EVFILT_WRITE, EV_DELETE, 0, 0, 0);
::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0);
}
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
{
while (reactor_op* op = descriptor_data->op_queue_[i].front())
{
op->ec_ = asio::error::operation_aborted;
descriptor_data->op_queue_[i].pop();
ops.push(op);
}
}
descriptor_data->descriptor_ = -1;
descriptor_data->shutdown_ = true;
descriptor_lock.unlock();
ASIO_HANDLER_REACTOR_DEREGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
scheduler_.post_deferred_completions(ops);
// Leave descriptor_data set so that it will be freed by the subsequent
// call to cleanup_descriptor_data.
}
else
{
// We are shutting down, so prevent cleanup_descriptor_data from freeing
// the descriptor_data object and let the destructor free it instead.
descriptor_data = 0;
}
}
void kqueue_reactor::deregister_internal_descriptor(socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (!descriptor_data->shutdown_)
{
struct kevent events[2];
ASIO_KQUEUE_EV_SET(&events[0], descriptor,
EVFILT_READ, EV_DELETE, 0, 0, 0);
ASIO_KQUEUE_EV_SET(&events[1], descriptor,
EVFILT_WRITE, EV_DELETE, 0, 0, 0);
::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0);
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
ops.push(descriptor_data->op_queue_[i]);
descriptor_data->descriptor_ = -1;
descriptor_data->shutdown_ = true;
descriptor_lock.unlock();
ASIO_HANDLER_REACTOR_DEREGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
// Leave descriptor_data set so that it will be freed by the subsequent
// call to cleanup_descriptor_data.
}
else
{
// We are shutting down, so prevent cleanup_descriptor_data from freeing
// the descriptor_data object and let the destructor free it instead.
descriptor_data = 0;
}
}
void kqueue_reactor::cleanup_descriptor_data(
per_descriptor_data& descriptor_data)
{
if (descriptor_data)
{
free_descriptor_state(descriptor_data);
descriptor_data = 0;
}
}
void kqueue_reactor::run(long usec, op_queue<operation>& ops)
{
mutex::scoped_lock lock(mutex_);
// Determine how long to block while waiting for events.
timespec timeout_buf = { 0, 0 };
timespec* timeout = usec ? get_timeout(usec, timeout_buf) : &timeout_buf;
lock.unlock();
// Block on the kqueue descriptor.
struct kevent events[128];
int num_events = kevent(kqueue_fd_, 0, 0, events, 128, timeout);
#if defined(ASIO_ENABLE_HANDLER_TRACKING)
// Trace the waiting events.
for (int i = 0; i < num_events; ++i)
{
void* ptr = reinterpret_cast<void*>(events[i].udata);
if (ptr != &interrupter_)
{
unsigned event_mask = 0;
switch (events[i].filter)
{
case EVFILT_READ:
event_mask |= ASIO_HANDLER_REACTOR_READ_EVENT;
break;
case EVFILT_WRITE:
event_mask |= ASIO_HANDLER_REACTOR_WRITE_EVENT;
break;
}
if ((events[i].flags & (EV_ERROR | EV_OOBAND)) != 0)
event_mask |= ASIO_HANDLER_REACTOR_ERROR_EVENT;
ASIO_HANDLER_REACTOR_EVENTS((context(),
reinterpret_cast<uintmax_t>(ptr), event_mask));
}
}
#endif // defined(ASIO_ENABLE_HANDLER_TRACKING)
// Dispatch the waiting events.
for (int i = 0; i < num_events; ++i)
{
void* ptr = reinterpret_cast<void*>(events[i].udata);
if (ptr == &interrupter_)
{
interrupter_.reset();
}
else
{
descriptor_state* descriptor_data = static_cast<descriptor_state*>(ptr);
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (events[i].filter == EVFILT_WRITE
&& descriptor_data->num_kevents_ == 2
&& descriptor_data->op_queue_[write_op].empty())
{
// Some descriptor types, like serial ports, don't seem to support
// EV_CLEAR with EVFILT_WRITE. Since we have no pending write
// operations we'll remove the EVFILT_WRITE registration here so that
// we don't end up in a tight spin.
struct kevent delete_events[1];
ASIO_KQUEUE_EV_SET(&delete_events[0],
descriptor_data->descriptor_, EVFILT_WRITE, EV_DELETE, 0, 0, 0);
::kevent(kqueue_fd_, delete_events, 1, 0, 0, 0);
descriptor_data->num_kevents_ = 1;
}
// Exception operations must be processed first to ensure that any
// out-of-band data is read before normal data.
#if defined(__NetBSD__)
static const unsigned int filter[max_ops] =
#else
static const int filter[max_ops] =
#endif
{ EVFILT_READ, EVFILT_WRITE, EVFILT_READ };
for (int j = max_ops - 1; j >= 0; --j)
{
if (events[i].filter == filter[j])
{
if (j != except_op || events[i].flags & EV_OOBAND)
{
while (reactor_op* op = descriptor_data->op_queue_[j].front())
{
if (events[i].flags & EV_ERROR)
{
op->ec_ = asio::error_code(
static_cast<int>(events[i].data),
asio::error::get_system_category());
descriptor_data->op_queue_[j].pop();
ops.push(op);
}
if (op->perform())
{
descriptor_data->op_queue_[j].pop();
ops.push(op);
}
else
break;
}
}
}
}
}
}
lock.lock();
timer_queues_.get_ready_timers(ops);
}
void kqueue_reactor::interrupt()
{
interrupter_.interrupt();
}
int kqueue_reactor::do_kqueue_create()
{
int fd = ::kqueue();
if (fd == -1)
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "kqueue");
}
return fd;
}
kqueue_reactor::descriptor_state* kqueue_reactor::allocate_descriptor_state()
{
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
return registered_descriptors_.alloc(ASIO_CONCURRENCY_HINT_IS_LOCKING(
REACTOR_IO, scheduler_.concurrency_hint()));
}
void kqueue_reactor::free_descriptor_state(kqueue_reactor::descriptor_state* s)
{
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
registered_descriptors_.free(s);
}
void kqueue_reactor::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.insert(&queue);
}
void kqueue_reactor::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.erase(&queue);
}
timespec* kqueue_reactor::get_timeout(long usec, timespec& ts)
{
// By default we will wait no longer than 5 minutes. This will ensure that
// any changes to the system clock are detected after no longer than this.
const long max_usec = 5 * 60 * 1000 * 1000;
usec = timer_queues_.wait_duration_usec(
(usec < 0 || max_usec < usec) ? max_usec : usec);
ts.tv_sec = usec / 1000000;
ts.tv_nsec = (usec % 1000000) * 1000;
return &ts;
}
} // namespace detail
} // namespace asio
#undef ASIO_KQUEUE_EV_SET
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_KQUEUE)
#endif // ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/scheduler.ipp | //
// detail/impl/scheduler.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SCHEDULER_IPP
#define ASIO_DETAIL_IMPL_SCHEDULER_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/concurrency_hint.hpp"
#include "asio/detail/event.hpp"
#include "asio/detail/limits.hpp"
#include "asio/detail/scheduler.hpp"
#include "asio/detail/scheduler_thread_info.hpp"
#include "asio/detail/signal_blocker.hpp"
#if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
# include "asio/detail/io_uring_service.hpp"
#else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
# include "asio/detail/reactor.hpp"
#endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class scheduler::thread_function
{
public:
explicit thread_function(scheduler* s)
: this_(s)
{
}
void operator()()
{
asio::error_code ec;
this_->run(ec);
}
private:
scheduler* this_;
};
struct scheduler::task_cleanup
{
~task_cleanup()
{
if (this_thread_->private_outstanding_work > 0)
{
asio::detail::increment(
scheduler_->outstanding_work_,
this_thread_->private_outstanding_work);
}
this_thread_->private_outstanding_work = 0;
// Enqueue the completed operations and reinsert the task at the end of
// the operation queue.
lock_->lock();
scheduler_->task_interrupted_ = true;
scheduler_->op_queue_.push(this_thread_->private_op_queue);
scheduler_->op_queue_.push(&scheduler_->task_operation_);
}
scheduler* scheduler_;
mutex::scoped_lock* lock_;
thread_info* this_thread_;
};
struct scheduler::work_cleanup
{
~work_cleanup()
{
if (this_thread_->private_outstanding_work > 1)
{
asio::detail::increment(
scheduler_->outstanding_work_,
this_thread_->private_outstanding_work - 1);
}
else if (this_thread_->private_outstanding_work < 1)
{
scheduler_->work_finished();
}
this_thread_->private_outstanding_work = 0;
#if defined(ASIO_HAS_THREADS)
if (!this_thread_->private_op_queue.empty())
{
lock_->lock();
scheduler_->op_queue_.push(this_thread_->private_op_queue);
}
#endif // defined(ASIO_HAS_THREADS)
}
scheduler* scheduler_;
mutex::scoped_lock* lock_;
thread_info* this_thread_;
};
scheduler::scheduler(asio::execution_context& ctx,
int concurrency_hint, bool own_thread, get_task_func_type get_task)
: asio::detail::execution_context_service_base<scheduler>(ctx),
one_thread_(concurrency_hint == 1
|| !ASIO_CONCURRENCY_HINT_IS_LOCKING(
SCHEDULER, concurrency_hint)
|| !ASIO_CONCURRENCY_HINT_IS_LOCKING(
REACTOR_IO, concurrency_hint)),
mutex_(ASIO_CONCURRENCY_HINT_IS_LOCKING(
SCHEDULER, concurrency_hint)),
task_(0),
get_task_(get_task),
task_interrupted_(true),
outstanding_work_(0),
stopped_(false),
shutdown_(false),
concurrency_hint_(concurrency_hint),
thread_(0)
{
ASIO_HANDLER_TRACKING_INIT;
if (own_thread)
{
++outstanding_work_;
asio::detail::signal_blocker sb;
thread_ = new asio::detail::thread(thread_function(this));
}
}
scheduler::~scheduler()
{
if (thread_)
{
mutex::scoped_lock lock(mutex_);
shutdown_ = true;
stop_all_threads(lock);
lock.unlock();
thread_->join();
delete thread_;
}
}
void scheduler::shutdown()
{
mutex::scoped_lock lock(mutex_);
shutdown_ = true;
if (thread_)
stop_all_threads(lock);
lock.unlock();
// Join thread to ensure task operation is returned to queue.
if (thread_)
{
thread_->join();
delete thread_;
thread_ = 0;
}
// Destroy handler objects.
while (!op_queue_.empty())
{
operation* o = op_queue_.front();
op_queue_.pop();
if (o != &task_operation_)
o->destroy();
}
// Reset to initial state.
task_ = 0;
}
void scheduler::init_task()
{
mutex::scoped_lock lock(mutex_);
if (!shutdown_ && !task_)
{
task_ = get_task_(this->context());
op_queue_.push(&task_operation_);
wake_one_thread_and_unlock(lock);
}
}
std::size_t scheduler::run(asio::error_code& ec)
{
ec = asio::error_code();
if (outstanding_work_ == 0)
{
stop();
return 0;
}
thread_info this_thread;
this_thread.private_outstanding_work = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
std::size_t n = 0;
for (; do_run_one(lock, this_thread, ec); lock.lock())
if (n != (std::numeric_limits<std::size_t>::max)())
++n;
return n;
}
std::size_t scheduler::run_one(asio::error_code& ec)
{
ec = asio::error_code();
if (outstanding_work_ == 0)
{
stop();
return 0;
}
thread_info this_thread;
this_thread.private_outstanding_work = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
return do_run_one(lock, this_thread, ec);
}
std::size_t scheduler::wait_one(long usec, asio::error_code& ec)
{
ec = asio::error_code();
if (outstanding_work_ == 0)
{
stop();
return 0;
}
thread_info this_thread;
this_thread.private_outstanding_work = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
return do_wait_one(lock, this_thread, usec, ec);
}
std::size_t scheduler::poll(asio::error_code& ec)
{
ec = asio::error_code();
if (outstanding_work_ == 0)
{
stop();
return 0;
}
thread_info this_thread;
this_thread.private_outstanding_work = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
#if defined(ASIO_HAS_THREADS)
// We want to support nested calls to poll() and poll_one(), so any handlers
// that are already on a thread-private queue need to be put on to the main
// queue now.
if (one_thread_)
if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
op_queue_.push(outer_info->private_op_queue);
#endif // defined(ASIO_HAS_THREADS)
std::size_t n = 0;
for (; do_poll_one(lock, this_thread, ec); lock.lock())
if (n != (std::numeric_limits<std::size_t>::max)())
++n;
return n;
}
std::size_t scheduler::poll_one(asio::error_code& ec)
{
ec = asio::error_code();
if (outstanding_work_ == 0)
{
stop();
return 0;
}
thread_info this_thread;
this_thread.private_outstanding_work = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
#if defined(ASIO_HAS_THREADS)
// We want to support nested calls to poll() and poll_one(), so any handlers
// that are already on a thread-private queue need to be put on to the main
// queue now.
if (one_thread_)
if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
op_queue_.push(outer_info->private_op_queue);
#endif // defined(ASIO_HAS_THREADS)
return do_poll_one(lock, this_thread, ec);
}
void scheduler::stop()
{
mutex::scoped_lock lock(mutex_);
stop_all_threads(lock);
}
bool scheduler::stopped() const
{
mutex::scoped_lock lock(mutex_);
return stopped_;
}
void scheduler::restart()
{
mutex::scoped_lock lock(mutex_);
stopped_ = false;
}
void scheduler::compensating_work_started()
{
thread_info_base* this_thread = thread_call_stack::contains(this);
ASIO_ASSUME(this_thread != 0); // Only called from inside scheduler.
++static_cast<thread_info*>(this_thread)->private_outstanding_work;
}
bool scheduler::can_dispatch()
{
return thread_call_stack::contains(this) != 0;
}
void scheduler::capture_current_exception()
{
if (thread_info_base* this_thread = thread_call_stack::contains(this))
this_thread->capture_current_exception();
}
void scheduler::post_immediate_completion(
scheduler::operation* op, bool is_continuation)
{
#if defined(ASIO_HAS_THREADS)
if (one_thread_ || is_continuation)
{
if (thread_info_base* this_thread = thread_call_stack::contains(this))
{
++static_cast<thread_info*>(this_thread)->private_outstanding_work;
static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
return;
}
}
#else // defined(ASIO_HAS_THREADS)
(void)is_continuation;
#endif // defined(ASIO_HAS_THREADS)
work_started();
mutex::scoped_lock lock(mutex_);
op_queue_.push(op);
wake_one_thread_and_unlock(lock);
}
void scheduler::post_immediate_completions(std::size_t n,
op_queue<scheduler::operation>& ops, bool is_continuation)
{
#if defined(ASIO_HAS_THREADS)
if (one_thread_ || is_continuation)
{
if (thread_info_base* this_thread = thread_call_stack::contains(this))
{
static_cast<thread_info*>(this_thread)->private_outstanding_work
+= static_cast<long>(n);
static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);
return;
}
}
#else // defined(ASIO_HAS_THREADS)
(void)is_continuation;
#endif // defined(ASIO_HAS_THREADS)
increment(outstanding_work_, static_cast<long>(n));
mutex::scoped_lock lock(mutex_);
op_queue_.push(ops);
wake_one_thread_and_unlock(lock);
}
void scheduler::post_deferred_completion(scheduler::operation* op)
{
#if defined(ASIO_HAS_THREADS)
if (one_thread_)
{
if (thread_info_base* this_thread = thread_call_stack::contains(this))
{
static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
return;
}
}
#endif // defined(ASIO_HAS_THREADS)
mutex::scoped_lock lock(mutex_);
op_queue_.push(op);
wake_one_thread_and_unlock(lock);
}
void scheduler::post_deferred_completions(
op_queue<scheduler::operation>& ops)
{
if (!ops.empty())
{
#if defined(ASIO_HAS_THREADS)
if (one_thread_)
{
if (thread_info_base* this_thread = thread_call_stack::contains(this))
{
static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);
return;
}
}
#endif // defined(ASIO_HAS_THREADS)
mutex::scoped_lock lock(mutex_);
op_queue_.push(ops);
wake_one_thread_and_unlock(lock);
}
}
void scheduler::do_dispatch(
scheduler::operation* op)
{
work_started();
mutex::scoped_lock lock(mutex_);
op_queue_.push(op);
wake_one_thread_and_unlock(lock);
}
void scheduler::abandon_operations(
op_queue<scheduler::operation>& ops)
{
op_queue<scheduler::operation> ops2;
ops2.push(ops);
}
std::size_t scheduler::do_run_one(mutex::scoped_lock& lock,
scheduler::thread_info& this_thread,
const asio::error_code& ec)
{
while (!stopped_)
{
if (!op_queue_.empty())
{
// Prepare to execute first handler from queue.
operation* o = op_queue_.front();
op_queue_.pop();
bool more_handlers = (!op_queue_.empty());
if (o == &task_operation_)
{
task_interrupted_ = more_handlers;
if (more_handlers && !one_thread_)
wakeup_event_.unlock_and_signal_one(lock);
else
lock.unlock();
task_cleanup on_exit = { this, &lock, &this_thread };
(void)on_exit;
// Run the task. May throw an exception. Only block if the operation
// queue is empty and we're not polling, otherwise we want to return
// as soon as possible.
task_->run(more_handlers ? 0 : -1, this_thread.private_op_queue);
}
else
{
std::size_t task_result = o->task_result_;
if (more_handlers && !one_thread_)
wake_one_thread_and_unlock(lock);
else
lock.unlock();
// Ensure the count of outstanding work is decremented on block exit.
work_cleanup on_exit = { this, &lock, &this_thread };
(void)on_exit;
// Complete the operation. May throw an exception. Deletes the object.
o->complete(this, ec, task_result);
this_thread.rethrow_pending_exception();
return 1;
}
}
else
{
wakeup_event_.clear(lock);
wakeup_event_.wait(lock);
}
}
return 0;
}
std::size_t scheduler::do_wait_one(mutex::scoped_lock& lock,
scheduler::thread_info& this_thread, long usec,
const asio::error_code& ec)
{
if (stopped_)
return 0;
operation* o = op_queue_.front();
if (o == 0)
{
wakeup_event_.clear(lock);
wakeup_event_.wait_for_usec(lock, usec);
usec = 0; // Wait at most once.
o = op_queue_.front();
}
if (o == &task_operation_)
{
op_queue_.pop();
bool more_handlers = (!op_queue_.empty());
task_interrupted_ = more_handlers;
if (more_handlers && !one_thread_)
wakeup_event_.unlock_and_signal_one(lock);
else
lock.unlock();
{
task_cleanup on_exit = { this, &lock, &this_thread };
(void)on_exit;
// Run the task. May throw an exception. Only block if the operation
// queue is empty and we're not polling, otherwise we want to return
// as soon as possible.
task_->run(more_handlers ? 0 : usec, this_thread.private_op_queue);
}
o = op_queue_.front();
if (o == &task_operation_)
{
if (!one_thread_)
wakeup_event_.maybe_unlock_and_signal_one(lock);
return 0;
}
}
if (o == 0)
return 0;
op_queue_.pop();
bool more_handlers = (!op_queue_.empty());
std::size_t task_result = o->task_result_;
if (more_handlers && !one_thread_)
wake_one_thread_and_unlock(lock);
else
lock.unlock();
// Ensure the count of outstanding work is decremented on block exit.
work_cleanup on_exit = { this, &lock, &this_thread };
(void)on_exit;
// Complete the operation. May throw an exception. Deletes the object.
o->complete(this, ec, task_result);
this_thread.rethrow_pending_exception();
return 1;
}
std::size_t scheduler::do_poll_one(mutex::scoped_lock& lock,
scheduler::thread_info& this_thread,
const asio::error_code& ec)
{
if (stopped_)
return 0;
operation* o = op_queue_.front();
if (o == &task_operation_)
{
op_queue_.pop();
lock.unlock();
{
task_cleanup c = { this, &lock, &this_thread };
(void)c;
// Run the task. May throw an exception. Only block if the operation
// queue is empty and we're not polling, otherwise we want to return
// as soon as possible.
task_->run(0, this_thread.private_op_queue);
}
o = op_queue_.front();
if (o == &task_operation_)
{
wakeup_event_.maybe_unlock_and_signal_one(lock);
return 0;
}
}
if (o == 0)
return 0;
op_queue_.pop();
bool more_handlers = (!op_queue_.empty());
std::size_t task_result = o->task_result_;
if (more_handlers && !one_thread_)
wake_one_thread_and_unlock(lock);
else
lock.unlock();
// Ensure the count of outstanding work is decremented on block exit.
work_cleanup on_exit = { this, &lock, &this_thread };
(void)on_exit;
// Complete the operation. May throw an exception. Deletes the object.
o->complete(this, ec, task_result);
this_thread.rethrow_pending_exception();
return 1;
}
void scheduler::stop_all_threads(
mutex::scoped_lock& lock)
{
stopped_ = true;
wakeup_event_.signal_all(lock);
if (!task_interrupted_ && task_)
{
task_interrupted_ = true;
task_->interrupt();
}
}
void scheduler::wake_one_thread_and_unlock(
mutex::scoped_lock& lock)
{
if (!wakeup_event_.maybe_unlock_and_signal_one(lock))
{
if (!task_interrupted_ && task_)
{
task_interrupted_ = true;
task_->interrupt();
}
lock.unlock();
}
}
scheduler_task* scheduler::get_default_task(asio::execution_context& ctx)
{
#if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
return &use_service<io_uring_service>(ctx);
#else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
return &use_service<reactor>(ctx);
#endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_SCHEDULER_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/win_object_handle_service.ipp | //
// detail/impl/win_object_handle_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2011 Boris Schaeling ([email protected])
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP
#define ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE)
#include "asio/detail/win_object_handle_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_object_handle_service::win_object_handle_service(execution_context& context)
: execution_context_service_base<win_object_handle_service>(context),
scheduler_(asio::use_service<scheduler_impl>(context)),
mutex_(),
impl_list_(0),
shutdown_(false)
{
}
void win_object_handle_service::shutdown()
{
mutex::scoped_lock lock(mutex_);
// Setting this flag to true prevents new objects from being registered, and
// new asynchronous wait operations from being started. We only need to worry
// about cleaning up the operations that are currently in progress.
shutdown_ = true;
op_queue<operation> ops;
for (implementation_type* impl = impl_list_; impl; impl = impl->next_)
ops.push(impl->op_queue_);
lock.unlock();
scheduler_.abandon_operations(ops);
}
void win_object_handle_service::construct(
win_object_handle_service::implementation_type& impl)
{
impl.handle_ = INVALID_HANDLE_VALUE;
impl.wait_handle_ = INVALID_HANDLE_VALUE;
impl.owner_ = this;
// Insert implementation into linked list of all implementations.
mutex::scoped_lock lock(mutex_);
if (!shutdown_)
{
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
}
void win_object_handle_service::move_construct(
win_object_handle_service::implementation_type& impl,
win_object_handle_service::implementation_type& other_impl)
{
mutex::scoped_lock lock(mutex_);
// Insert implementation into linked list of all implementations.
if (!shutdown_)
{
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
impl.handle_ = other_impl.handle_;
other_impl.handle_ = INVALID_HANDLE_VALUE;
impl.wait_handle_ = other_impl.wait_handle_;
other_impl.wait_handle_ = INVALID_HANDLE_VALUE;
impl.op_queue_.push(other_impl.op_queue_);
impl.owner_ = this;
// We must not hold the lock while calling UnregisterWaitEx. This is because
// the registered callback function might be invoked while we are waiting for
// UnregisterWaitEx to complete.
lock.unlock();
if (impl.wait_handle_ != INVALID_HANDLE_VALUE)
::UnregisterWaitEx(impl.wait_handle_, INVALID_HANDLE_VALUE);
if (!impl.op_queue_.empty())
register_wait_callback(impl, lock);
}
void win_object_handle_service::move_assign(
win_object_handle_service::implementation_type& impl,
win_object_handle_service& other_service,
win_object_handle_service::implementation_type& other_impl)
{
asio::error_code ignored_ec;
close(impl, ignored_ec);
mutex::scoped_lock lock(mutex_);
if (this != &other_service)
{
// Remove implementation from linked list of all implementations.
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
impl.handle_ = other_impl.handle_;
other_impl.handle_ = INVALID_HANDLE_VALUE;
impl.wait_handle_ = other_impl.wait_handle_;
other_impl.wait_handle_ = INVALID_HANDLE_VALUE;
impl.op_queue_.push(other_impl.op_queue_);
impl.owner_ = this;
if (this != &other_service)
{
// Insert implementation into linked list of all implementations.
impl.next_ = other_service.impl_list_;
impl.prev_ = 0;
if (other_service.impl_list_)
other_service.impl_list_->prev_ = &impl;
other_service.impl_list_ = &impl;
}
// We must not hold the lock while calling UnregisterWaitEx. This is because
// the registered callback function might be invoked while we are waiting for
// UnregisterWaitEx to complete.
lock.unlock();
if (impl.wait_handle_ != INVALID_HANDLE_VALUE)
::UnregisterWaitEx(impl.wait_handle_, INVALID_HANDLE_VALUE);
if (!impl.op_queue_.empty())
register_wait_callback(impl, lock);
}
void win_object_handle_service::destroy(
win_object_handle_service::implementation_type& impl)
{
mutex::scoped_lock lock(mutex_);
// Remove implementation from linked list of all implementations.
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((scheduler_.context(), "object_handle",
&impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), "close"));
HANDLE wait_handle = impl.wait_handle_;
impl.wait_handle_ = INVALID_HANDLE_VALUE;
op_queue<operation> ops;
while (wait_op* op = impl.op_queue_.front())
{
op->ec_ = asio::error::operation_aborted;
impl.op_queue_.pop();
ops.push(op);
}
// We must not hold the lock while calling UnregisterWaitEx. This is
// because the registered callback function might be invoked while we are
// waiting for UnregisterWaitEx to complete.
lock.unlock();
if (wait_handle != INVALID_HANDLE_VALUE)
::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE);
::CloseHandle(impl.handle_);
impl.handle_ = INVALID_HANDLE_VALUE;
scheduler_.post_deferred_completions(ops);
}
}
asio::error_code win_object_handle_service::assign(
win_object_handle_service::implementation_type& impl,
const native_handle_type& handle, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
ASIO_ERROR_LOCATION(ec);
return ec;
}
impl.handle_ = handle;
ec = asio::error_code();
return ec;
}
asio::error_code win_object_handle_service::close(
win_object_handle_service::implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((scheduler_.context(), "object_handle",
&impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), "close"));
mutex::scoped_lock lock(mutex_);
HANDLE wait_handle = impl.wait_handle_;
impl.wait_handle_ = INVALID_HANDLE_VALUE;
op_queue<operation> completed_ops;
while (wait_op* op = impl.op_queue_.front())
{
impl.op_queue_.pop();
op->ec_ = asio::error::operation_aborted;
completed_ops.push(op);
}
// We must not hold the lock while calling UnregisterWaitEx. This is
// because the registered callback function might be invoked while we are
// waiting for UnregisterWaitEx to complete.
lock.unlock();
if (wait_handle != INVALID_HANDLE_VALUE)
::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE);
if (::CloseHandle(impl.handle_))
{
impl.handle_ = INVALID_HANDLE_VALUE;
ec = asio::error_code();
}
else
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
scheduler_.post_deferred_completions(completed_ops);
}
else
{
ec = asio::error_code();
}
ASIO_ERROR_LOCATION(ec);
return ec;
}
asio::error_code win_object_handle_service::cancel(
win_object_handle_service::implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((scheduler_.context(), "object_handle",
&impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), "cancel"));
mutex::scoped_lock lock(mutex_);
HANDLE wait_handle = impl.wait_handle_;
impl.wait_handle_ = INVALID_HANDLE_VALUE;
op_queue<operation> completed_ops;
while (wait_op* op = impl.op_queue_.front())
{
op->ec_ = asio::error::operation_aborted;
impl.op_queue_.pop();
completed_ops.push(op);
}
// We must not hold the lock while calling UnregisterWaitEx. This is
// because the registered callback function might be invoked while we are
// waiting for UnregisterWaitEx to complete.
lock.unlock();
if (wait_handle != INVALID_HANDLE_VALUE)
::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE);
ec = asio::error_code();
scheduler_.post_deferred_completions(completed_ops);
}
else
{
ec = asio::error::bad_descriptor;
}
ASIO_ERROR_LOCATION(ec);
return ec;
}
void win_object_handle_service::wait(
win_object_handle_service::implementation_type& impl,
asio::error_code& ec)
{
switch (::WaitForSingleObject(impl.handle_, INFINITE))
{
case WAIT_FAILED:
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
break;
}
case WAIT_OBJECT_0:
case WAIT_ABANDONED:
default:
ec = asio::error_code();
break;
}
}
void win_object_handle_service::start_wait_op(
win_object_handle_service::implementation_type& impl, wait_op* op)
{
scheduler_.work_started();
if (is_open(impl))
{
mutex::scoped_lock lock(mutex_);
if (!shutdown_)
{
impl.op_queue_.push(op);
// Only the first operation to be queued gets to register a wait callback.
// Subsequent operations have to wait for the first to finish.
if (impl.op_queue_.front() == op)
register_wait_callback(impl, lock);
}
else
{
lock.unlock();
scheduler_.post_deferred_completion(op);
}
}
else
{
op->ec_ = asio::error::bad_descriptor;
scheduler_.post_deferred_completion(op);
}
}
void win_object_handle_service::register_wait_callback(
win_object_handle_service::implementation_type& impl,
mutex::scoped_lock& lock)
{
lock.lock();
if (!RegisterWaitForSingleObject(&impl.wait_handle_,
impl.handle_, &win_object_handle_service::wait_callback,
&impl, INFINITE, WT_EXECUTEONLYONCE))
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
op_queue<operation> completed_ops;
while (wait_op* op = impl.op_queue_.front())
{
op->ec_ = ec;
impl.op_queue_.pop();
completed_ops.push(op);
}
lock.unlock();
scheduler_.post_deferred_completions(completed_ops);
}
}
void win_object_handle_service::wait_callback(PVOID param, BOOLEAN)
{
implementation_type* impl = static_cast<implementation_type*>(param);
mutex::scoped_lock lock(impl->owner_->mutex_);
if (impl->wait_handle_ != INVALID_HANDLE_VALUE)
{
::UnregisterWaitEx(impl->wait_handle_, NULL);
impl->wait_handle_ = INVALID_HANDLE_VALUE;
}
if (wait_op* op = impl->op_queue_.front())
{
op_queue<operation> completed_ops;
op->ec_ = asio::error_code();
impl->op_queue_.pop();
completed_ops.push(op);
if (!impl->op_queue_.empty())
{
if (!RegisterWaitForSingleObject(&impl->wait_handle_,
impl->handle_, &win_object_handle_service::wait_callback,
param, INFINITE, WT_EXECUTEONLYONCE))
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
while ((op = impl->op_queue_.front()) != 0)
{
op->ec_ = ec;
impl->op_queue_.pop();
completed_ops.push(op);
}
}
}
scheduler_impl& sched = impl->owner_->scheduler_;
lock.unlock();
sched.post_deferred_completions(completed_ops);
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE)
#endif // ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/null_event.ipp | //
// detail/impl/null_event.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_NULL_EVENT_IPP
#define ASIO_DETAIL_IMPL_NULL_EVENT_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS_RUNTIME)
# include <thread>
#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)
# include "asio/detail/socket_types.hpp"
#else
# include <unistd.h>
# if defined(__hpux)
# include <sys/time.h>
# endif
# if !defined(__hpux) || defined(__SELECT)
# include <sys/select.h>
# endif
#endif
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
void null_event::do_wait()
{
#if defined(ASIO_WINDOWS_RUNTIME)
std::this_thread::sleep_until((std::chrono::steady_clock::time_point::max)());
#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)
::Sleep(INFINITE);
#else
::pause();
#endif
}
void null_event::do_wait_for_usec(long usec)
{
#if defined(ASIO_WINDOWS_RUNTIME)
std::this_thread::sleep_for(std::chrono::microseconds(usec));
#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)
::Sleep(usec / 1000);
#elif defined(__hpux) && defined(__SELECT)
timespec ts;
ts.tv_sec = usec / 1000000;
ts.tv_nsec = (usec % 1000000) * 1000;
::pselect(0, 0, 0, 0, &ts, 0);
#else
timeval tv;
tv.tv_sec = usec / 1000000;
tv.tv_usec = usec % 1000000;
::select(0, 0, 0, 0, &tv);
#endif
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_NULL_EVENT_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/win_iocp_io_context.hpp | //
// detail/impl/win_iocp_io_context.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP
#define ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP)
#include "asio/detail/completion_handler.hpp"
#include "asio/detail/fenced_block.hpp"
#include "asio/detail/handler_alloc_helpers.hpp"
#include "asio/detail/memory.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Time_Traits>
void win_iocp_io_context::add_timer_queue(
timer_queue<Time_Traits>& queue)
{
do_add_timer_queue(queue);
}
template <typename Time_Traits>
void win_iocp_io_context::remove_timer_queue(
timer_queue<Time_Traits>& queue)
{
do_remove_timer_queue(queue);
}
template <typename Time_Traits>
void win_iocp_io_context::schedule_timer(timer_queue<Time_Traits>& queue,
const typename Time_Traits::time_type& time,
typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
{
// If the service has been shut down we silently discard the timer.
if (::InterlockedExchangeAdd(&shutdown_, 0) != 0)
{
post_immediate_completion(op, false);
return;
}
mutex::scoped_lock lock(dispatch_mutex_);
bool earliest = queue.enqueue_timer(time, timer, op);
work_started();
if (earliest)
update_timeout();
}
template <typename Time_Traits>
std::size_t win_iocp_io_context::cancel_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& timer,
std::size_t max_cancelled)
{
// If the service has been shut down we silently ignore the cancellation.
if (::InterlockedExchangeAdd(&shutdown_, 0) != 0)
return 0;
mutex::scoped_lock lock(dispatch_mutex_);
op_queue<win_iocp_operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
post_deferred_completions(ops);
return n;
}
template <typename Time_Traits>
void win_iocp_io_context::cancel_timer_by_key(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data* timer,
void* cancellation_key)
{
// If the service has been shut down we silently ignore the cancellation.
if (::InterlockedExchangeAdd(&shutdown_, 0) != 0)
return;
mutex::scoped_lock lock(dispatch_mutex_);
op_queue<win_iocp_operation> ops;
queue.cancel_timer_by_key(timer, ops, cancellation_key);
lock.unlock();
post_deferred_completions(ops);
}
template <typename Time_Traits>
void win_iocp_io_context::move_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& to,
typename timer_queue<Time_Traits>::per_timer_data& from)
{
asio::detail::mutex::scoped_lock lock(dispatch_mutex_);
op_queue<operation> ops;
queue.cancel_timer(to, ops);
queue.move_timer(to, from);
lock.unlock();
post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
#endif // ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/winrt_timer_scheduler.hpp | //
// detail/impl/winrt_timer_scheduler.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP
#define ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS_RUNTIME)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Time_Traits>
void winrt_timer_scheduler::add_timer_queue(timer_queue<Time_Traits>& queue)
{
do_add_timer_queue(queue);
}
// Remove a timer queue from the reactor.
template <typename Time_Traits>
void winrt_timer_scheduler::remove_timer_queue(timer_queue<Time_Traits>& queue)
{
do_remove_timer_queue(queue);
}
template <typename Time_Traits>
void winrt_timer_scheduler::schedule_timer(timer_queue<Time_Traits>& queue,
const typename Time_Traits::time_type& time,
typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
{
asio::detail::mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
scheduler_.work_started();
if (earliest)
event_.signal(lock);
}
template <typename Time_Traits>
std::size_t winrt_timer_scheduler::cancel_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& timer,
std::size_t max_cancelled)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
scheduler_.post_deferred_completions(ops);
return n;
}
template <typename Time_Traits>
void winrt_timer_scheduler::move_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& to,
typename timer_queue<Time_Traits>::per_timer_data& from)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer(to, ops);
queue.move_timer(to, from);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS_RUNTIME)
#endif // ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/io_uring_service.ipp | //
// detail/impl/io_uring_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_IO_URING_SERVICE_IPP
#define ASIO_DETAIL_IMPL_IO_URING_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IO_URING)
#include <cstddef>
#include <sys/eventfd.h>
#include "asio/detail/io_uring_service.hpp"
#include "asio/detail/reactor_op.hpp"
#include "asio/detail/scheduler.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
io_uring_service::io_uring_service(asio::execution_context& ctx)
: execution_context_service_base<io_uring_service>(ctx),
scheduler_(use_service<scheduler>(ctx)),
mutex_(ASIO_CONCURRENCY_HINT_IS_LOCKING(
REACTOR_REGISTRATION, scheduler_.concurrency_hint())),
outstanding_work_(0),
submit_sqes_op_(this),
pending_sqes_(0),
pending_submit_sqes_op_(false),
shutdown_(false),
timeout_(),
registration_mutex_(mutex_.enabled()),
reactor_(use_service<reactor>(ctx)),
reactor_data_(),
event_fd_(-1)
{
reactor_.init_task();
init_ring();
register_with_reactor();
}
io_uring_service::~io_uring_service()
{
if (ring_.ring_fd != -1)
::io_uring_queue_exit(&ring_);
if (event_fd_ != -1)
::close(event_fd_);
}
void io_uring_service::shutdown()
{
mutex::scoped_lock lock(mutex_);
shutdown_ = true;
lock.unlock();
op_queue<operation> ops;
// Cancel all outstanding operations.
while (io_object* io_obj = registered_io_objects_.first())
{
for (int i = 0; i < max_ops; ++i)
{
if (!io_obj->queues_[i].op_queue_.empty())
{
ops.push(io_obj->queues_[i].op_queue_);
if (::io_uring_sqe* sqe = get_sqe())
::io_uring_prep_cancel(sqe, &io_obj->queues_[i], 0);
}
}
io_obj->shutdown_ = true;
registered_io_objects_.free(io_obj);
}
// Cancel the timeout operation.
if (::io_uring_sqe* sqe = get_sqe())
::io_uring_prep_cancel(sqe, &timeout_, IOSQE_IO_DRAIN);
submit_sqes();
// Wait for all completions to come back.
for (; outstanding_work_ > 0; --outstanding_work_)
{
::io_uring_cqe* cqe = 0;
if (::io_uring_wait_cqe(&ring_, &cqe) != 0)
break;
}
timer_queues_.get_all_timers(ops);
scheduler_.abandon_operations(ops);
}
void io_uring_service::notify_fork(
asio::execution_context::fork_event fork_ev)
{
switch (fork_ev)
{
case asio::execution_context::fork_prepare:
{
// Cancel all outstanding operations. They will be restarted
// after the fork completes.
mutex::scoped_lock registration_lock(registration_mutex_);
for (io_object* io_obj = registered_io_objects_.first();
io_obj != 0; io_obj = io_obj->next_)
{
mutex::scoped_lock io_object_lock(io_obj->mutex_);
for (int i = 0; i < max_ops; ++i)
{
if (!io_obj->queues_[i].op_queue_.empty()
&& !io_obj->queues_[i].cancel_requested_)
{
mutex::scoped_lock lock(mutex_);
if (::io_uring_sqe* sqe = get_sqe())
::io_uring_prep_cancel(sqe, &io_obj->queues_[i], 0);
}
}
}
// Cancel the timeout operation.
{
mutex::scoped_lock lock(mutex_);
if (::io_uring_sqe* sqe = get_sqe())
::io_uring_prep_cancel(sqe, &timeout_, IOSQE_IO_DRAIN);
submit_sqes();
}
// Wait for all completions to come back, and post all completed I/O
// queues to the scheduler. Note that some operations may have already
// completed, or were explicitly cancelled. All others will be
// automatically restarted.
op_queue<operation> ops;
for (; outstanding_work_ > 0; --outstanding_work_)
{
::io_uring_cqe* cqe = 0;
if (::io_uring_wait_cqe(&ring_, &cqe) != 0)
break;
if (void* ptr = ::io_uring_cqe_get_data(cqe))
{
if (ptr != this && ptr != &timer_queues_ && ptr != &timeout_)
{
io_queue* io_q = static_cast<io_queue*>(ptr);
io_q->set_result(cqe->res);
ops.push(io_q);
}
}
}
scheduler_.post_deferred_completions(ops);
// Restart and eventfd operation.
register_with_reactor();
}
break;
case asio::execution_context::fork_parent:
// Restart the timeout and eventfd operations.
update_timeout();
register_with_reactor();
break;
case asio::execution_context::fork_child:
{
// The child process gets a new io_uring instance.
::io_uring_queue_exit(&ring_);
init_ring();
register_with_reactor();
}
break;
default:
break;
}
}
void io_uring_service::init_task()
{
scheduler_.init_task();
}
void io_uring_service::register_io_object(
io_uring_service::per_io_object_data& io_obj)
{
io_obj = allocate_io_object();
mutex::scoped_lock io_object_lock(io_obj->mutex_);
io_obj->service_ = this;
io_obj->shutdown_ = false;
for (int i = 0; i < max_ops; ++i)
{
io_obj->queues_[i].io_object_ = io_obj;
io_obj->queues_[i].cancel_requested_ = false;
}
}
void io_uring_service::register_internal_io_object(
io_uring_service::per_io_object_data& io_obj,
int op_type, io_uring_operation* op)
{
io_obj = allocate_io_object();
mutex::scoped_lock io_object_lock(io_obj->mutex_);
io_obj->service_ = this;
io_obj->shutdown_ = false;
for (int i = 0; i < max_ops; ++i)
{
io_obj->queues_[i].io_object_ = io_obj;
io_obj->queues_[i].cancel_requested_ = false;
}
io_obj->queues_[op_type].op_queue_.push(op);
io_object_lock.unlock();
mutex::scoped_lock lock(mutex_);
if (::io_uring_sqe* sqe = get_sqe())
{
op->prepare(sqe);
::io_uring_sqe_set_data(sqe, &io_obj->queues_[op_type]);
post_submit_sqes_op(lock);
}
else
{
asio::error_code ec(ENOBUFS,
asio::error::get_system_category());
asio::detail::throw_error(ec, "io_uring_get_sqe");
}
}
void io_uring_service::register_buffers(const ::iovec* v, unsigned n)
{
int result = ::io_uring_register_buffers(&ring_, v, n);
if (result < 0)
{
asio::error_code ec(-result,
asio::error::get_system_category());
asio::detail::throw_error(ec, "io_uring_register_buffers");
}
}
void io_uring_service::unregister_buffers()
{
(void)::io_uring_unregister_buffers(&ring_);
}
void io_uring_service::start_op(int op_type,
io_uring_service::per_io_object_data& io_obj,
io_uring_operation* op, bool is_continuation)
{
if (!io_obj)
{
op->ec_ = asio::error::bad_descriptor;
post_immediate_completion(op, is_continuation);
return;
}
mutex::scoped_lock io_object_lock(io_obj->mutex_);
if (io_obj->shutdown_)
{
io_object_lock.unlock();
post_immediate_completion(op, is_continuation);
return;
}
if (io_obj->queues_[op_type].op_queue_.empty())
{
if (op->perform(false))
{
io_object_lock.unlock();
scheduler_.post_immediate_completion(op, is_continuation);
}
else
{
io_obj->queues_[op_type].op_queue_.push(op);
io_object_lock.unlock();
mutex::scoped_lock lock(mutex_);
if (::io_uring_sqe* sqe = get_sqe())
{
op->prepare(sqe);
::io_uring_sqe_set_data(sqe, &io_obj->queues_[op_type]);
scheduler_.work_started();
post_submit_sqes_op(lock);
}
else
{
lock.unlock();
io_obj->queues_[op_type].set_result(-ENOBUFS);
post_immediate_completion(&io_obj->queues_[op_type], is_continuation);
}
}
}
else
{
io_obj->queues_[op_type].op_queue_.push(op);
scheduler_.work_started();
}
}
void io_uring_service::cancel_ops(io_uring_service::per_io_object_data& io_obj)
{
if (!io_obj)
return;
mutex::scoped_lock io_object_lock(io_obj->mutex_);
op_queue<operation> ops;
do_cancel_ops(io_obj, ops);
io_object_lock.unlock();
scheduler_.post_deferred_completions(ops);
}
void io_uring_service::cancel_ops_by_key(
io_uring_service::per_io_object_data& io_obj,
int op_type, void* cancellation_key)
{
if (!io_obj)
return;
mutex::scoped_lock io_object_lock(io_obj->mutex_);
bool first = true;
op_queue<operation> ops;
op_queue<io_uring_operation> other_ops;
while (io_uring_operation* op = io_obj->queues_[op_type].op_queue_.front())
{
io_obj->queues_[op_type].op_queue_.pop();
if (op->cancellation_key_ == cancellation_key)
{
if (first)
{
other_ops.push(op);
if (!io_obj->queues_[op_type].cancel_requested_)
{
io_obj->queues_[op_type].cancel_requested_ = true;
mutex::scoped_lock lock(mutex_);
if (::io_uring_sqe* sqe = get_sqe())
{
::io_uring_prep_cancel(sqe, &io_obj->queues_[op_type], 0);
submit_sqes();
}
}
}
else
{
op->ec_ = asio::error::operation_aborted;
ops.push(op);
}
}
else
other_ops.push(op);
first = false;
}
io_obj->queues_[op_type].op_queue_.push(other_ops);
io_object_lock.unlock();
scheduler_.post_deferred_completions(ops);
}
void io_uring_service::deregister_io_object(
io_uring_service::per_io_object_data& io_obj)
{
if (!io_obj)
return;
mutex::scoped_lock io_object_lock(io_obj->mutex_);
if (!io_obj->shutdown_)
{
op_queue<operation> ops;
bool pending_cancelled_ops = do_cancel_ops(io_obj, ops);
io_obj->shutdown_ = true;
io_object_lock.unlock();
scheduler_.post_deferred_completions(ops);
if (pending_cancelled_ops)
{
// There are still pending operations. Prevent cleanup_io_object from
// freeing the I/O object and let the last operation to complete free it.
io_obj = 0;
}
else
{
// Leave io_obj set so that it will be freed by the subsequent call to
// cleanup_io_object.
}
}
else
{
// We are shutting down, so prevent cleanup_io_object from freeing
// the I/O object and let the destructor free it instead.
io_obj = 0;
}
}
void io_uring_service::cleanup_io_object(
io_uring_service::per_io_object_data& io_obj)
{
if (io_obj)
{
free_io_object(io_obj);
io_obj = 0;
}
}
void io_uring_service::run(long usec, op_queue<operation>& ops)
{
__kernel_timespec ts;
int local_ops = 0;
if (usec > 0)
{
ts.tv_sec = usec / 1000000;
ts.tv_nsec = (usec % 1000000) * 1000;
mutex::scoped_lock lock(mutex_);
if (::io_uring_sqe* sqe = get_sqe())
{
++local_ops;
::io_uring_prep_timeout(sqe, &ts, 0, 0);
::io_uring_sqe_set_data(sqe, &ts);
submit_sqes();
}
}
::io_uring_cqe* cqe = 0;
int result = (usec == 0)
? ::io_uring_peek_cqe(&ring_, &cqe)
: ::io_uring_wait_cqe(&ring_, &cqe);
if (local_ops > 0)
{
if (result != 0 || ::io_uring_cqe_get_data(cqe) != &ts)
{
mutex::scoped_lock lock(mutex_);
if (::io_uring_sqe* sqe = get_sqe())
{
++local_ops;
::io_uring_prep_timeout_remove(sqe, reinterpret_cast<__u64>(&ts), 0);
::io_uring_sqe_set_data(sqe, &ts);
submit_sqes();
}
}
}
bool check_timers = false;
int count = 0;
while (result == 0 || local_ops > 0)
{
if (result == 0)
{
if (void* ptr = ::io_uring_cqe_get_data(cqe))
{
if (ptr == this)
{
// The io_uring service was interrupted.
}
else if (ptr == &timer_queues_)
{
check_timers = true;
}
else if (ptr == &timeout_)
{
check_timers = true;
timeout_.tv_sec = 0;
timeout_.tv_nsec = 0;
}
else if (ptr == &ts)
{
--local_ops;
}
else
{
io_queue* io_q = static_cast<io_queue*>(ptr);
io_q->set_result(cqe->res);
ops.push(io_q);
}
}
::io_uring_cqe_seen(&ring_, cqe);
++count;
}
result = (count < complete_batch_size || local_ops > 0)
? ::io_uring_peek_cqe(&ring_, &cqe) : -EAGAIN;
}
decrement(outstanding_work_, count);
if (check_timers)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.get_ready_timers(ops);
if (timeout_.tv_sec == 0 && timeout_.tv_nsec == 0)
{
timeout_ = get_timeout();
if (::io_uring_sqe* sqe = get_sqe())
{
::io_uring_prep_timeout(sqe, &timeout_, 0, 0);
::io_uring_sqe_set_data(sqe, &timeout_);
push_submit_sqes_op(ops);
}
}
}
}
void io_uring_service::interrupt()
{
mutex::scoped_lock lock(mutex_);
if (::io_uring_sqe* sqe = get_sqe())
{
::io_uring_prep_nop(sqe);
::io_uring_sqe_set_data(sqe, this);
}
submit_sqes();
}
void io_uring_service::init_ring()
{
int result = ::io_uring_queue_init(ring_size, &ring_, 0);
if (result < 0)
{
ring_.ring_fd = -1;
asio::error_code ec(-result,
asio::error::get_system_category());
asio::detail::throw_error(ec, "io_uring_queue_init");
}
#if !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
event_fd_ = ::eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
if (event_fd_ < 0)
{
asio::error_code ec(-result,
asio::error::get_system_category());
::io_uring_queue_exit(&ring_);
asio::detail::throw_error(ec, "eventfd");
}
result = ::io_uring_register_eventfd(&ring_, event_fd_);
if (result < 0)
{
::close(event_fd_);
::io_uring_queue_exit(&ring_);
asio::error_code ec(-result,
asio::error::get_system_category());
asio::detail::throw_error(ec, "io_uring_queue_init");
}
#endif // !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
}
#if !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
class io_uring_service::event_fd_read_op :
public reactor_op
{
public:
event_fd_read_op(io_uring_service* s)
: reactor_op(asio::error_code(),
&event_fd_read_op::do_perform, event_fd_read_op::do_complete),
service_(s)
{
}
static status do_perform(reactor_op* base)
{
event_fd_read_op* o(static_cast<event_fd_read_op*>(base));
for (;;)
{
// Only perform one read. The kernel maintains an atomic counter.
uint64_t counter(0);
errno = 0;
int bytes_read = ::read(o->service_->event_fd_,
&counter, sizeof(uint64_t));
if (bytes_read < 0 && errno == EINTR)
continue;
break;
}
op_queue<operation> ops;
o->service_->run(0, ops);
o->service_->scheduler_.post_deferred_completions(ops);
return not_done;
}
static void do_complete(void* /*owner*/, operation* base,
const asio::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
event_fd_read_op* o(static_cast<event_fd_read_op*>(base));
delete o;
}
private:
io_uring_service* service_;
};
#endif // !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
void io_uring_service::register_with_reactor()
{
#if !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
reactor_.register_internal_descriptor(reactor::read_op,
event_fd_, reactor_data_, new event_fd_read_op(this));
#endif // !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
}
io_uring_service::io_object* io_uring_service::allocate_io_object()
{
mutex::scoped_lock registration_lock(registration_mutex_);
return registered_io_objects_.alloc(
ASIO_CONCURRENCY_HINT_IS_LOCKING(
REACTOR_IO, scheduler_.concurrency_hint()));
}
void io_uring_service::free_io_object(io_uring_service::io_object* io_obj)
{
mutex::scoped_lock registration_lock(registration_mutex_);
registered_io_objects_.free(io_obj);
}
bool io_uring_service::do_cancel_ops(
per_io_object_data& io_obj, op_queue<operation>& ops)
{
bool cancel_op = false;
for (int i = 0; i < max_ops; ++i)
{
if (io_uring_operation* first_op = io_obj->queues_[i].op_queue_.front())
{
cancel_op = true;
io_obj->queues_[i].op_queue_.pop();
while (io_uring_operation* op = io_obj->queues_[i].op_queue_.front())
{
op->ec_ = asio::error::operation_aborted;
io_obj->queues_[i].op_queue_.pop();
ops.push(op);
}
io_obj->queues_[i].op_queue_.push(first_op);
}
}
if (cancel_op)
{
mutex::scoped_lock lock(mutex_);
for (int i = 0; i < max_ops; ++i)
{
if (!io_obj->queues_[i].op_queue_.empty()
&& !io_obj->queues_[i].cancel_requested_)
{
io_obj->queues_[i].cancel_requested_ = true;
if (::io_uring_sqe* sqe = get_sqe())
::io_uring_prep_cancel(sqe, &io_obj->queues_[i], 0);
}
}
submit_sqes();
}
return cancel_op;
}
void io_uring_service::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.insert(&queue);
}
void io_uring_service::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.erase(&queue);
}
void io_uring_service::update_timeout()
{
if (::io_uring_sqe* sqe = get_sqe())
{
::io_uring_prep_timeout_remove(sqe, reinterpret_cast<__u64>(&timeout_), 0);
::io_uring_sqe_set_data(sqe, &timer_queues_);
}
}
__kernel_timespec io_uring_service::get_timeout() const
{
__kernel_timespec ts;
long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000);
ts.tv_sec = usec / 1000000;
ts.tv_nsec = usec ? (usec % 1000000) * 1000 : 1;
return ts;
}
::io_uring_sqe* io_uring_service::get_sqe()
{
::io_uring_sqe* sqe = ::io_uring_get_sqe(&ring_);
if (!sqe)
{
submit_sqes();
sqe = ::io_uring_get_sqe(&ring_);
}
if (sqe)
{
::io_uring_sqe_set_data(sqe, 0);
++pending_sqes_;
}
return sqe;
}
void io_uring_service::submit_sqes()
{
if (pending_sqes_ != 0)
{
int result = ::io_uring_submit(&ring_);
if (result > 0)
{
pending_sqes_ -= result;
increment(outstanding_work_, result);
}
}
}
void io_uring_service::post_submit_sqes_op(mutex::scoped_lock& lock)
{
if (pending_sqes_ >= submit_batch_size)
{
submit_sqes();
}
else if (pending_sqes_ != 0 && !pending_submit_sqes_op_)
{
pending_submit_sqes_op_ = true;
lock.unlock();
scheduler_.post_immediate_completion(&submit_sqes_op_, false);
}
}
void io_uring_service::push_submit_sqes_op(op_queue<operation>& ops)
{
if (pending_sqes_ != 0 && !pending_submit_sqes_op_)
{
pending_submit_sqes_op_ = true;
ops.push(&submit_sqes_op_);
scheduler_.compensating_work_started();
}
}
io_uring_service::submit_sqes_op::submit_sqes_op(io_uring_service* s)
: operation(&io_uring_service::submit_sqes_op::do_complete),
service_(s)
{
}
void io_uring_service::submit_sqes_op::do_complete(void* owner, operation* base,
const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/)
{
if (owner)
{
submit_sqes_op* o = static_cast<submit_sqes_op*>(base);
mutex::scoped_lock lock(o->service_->mutex_);
o->service_->submit_sqes();
if (o->service_->pending_sqes_ != 0)
o->service_->scheduler_.post_immediate_completion(o, true);
else
o->service_->pending_submit_sqes_op_ = false;
}
}
io_uring_service::io_queue::io_queue()
: operation(&io_uring_service::io_queue::do_complete)
{
}
struct io_uring_service::perform_io_cleanup_on_block_exit
{
explicit perform_io_cleanup_on_block_exit(io_uring_service* s)
: service_(s), io_object_to_free_(0), first_op_(0)
{
}
~perform_io_cleanup_on_block_exit()
{
if (io_object_to_free_)
{
mutex::scoped_lock lock(service_->mutex_);
service_->free_io_object(io_object_to_free_);
}
if (first_op_)
{
// Post the remaining completed operations for invocation.
if (!ops_.empty())
service_->scheduler_.post_deferred_completions(ops_);
// A user-initiated operation has completed, but there's no need to
// explicitly call work_finished() here. Instead, we'll take advantage of
// the fact that the scheduler will call work_finished() once we return.
}
else
{
// No user-initiated operations have completed, so we need to compensate
// for the work_finished() call that the scheduler will make once this
// operation returns.
service_->scheduler_.compensating_work_started();
}
}
io_uring_service* service_;
io_object* io_object_to_free_;
op_queue<operation> ops_;
operation* first_op_;
};
operation* io_uring_service::io_queue::perform_io(int result)
{
perform_io_cleanup_on_block_exit io_cleanup(io_object_->service_);
mutex::scoped_lock io_object_lock(io_object_->mutex_);
if (result != -ECANCELED || cancel_requested_)
{
if (io_uring_operation* op = op_queue_.front())
{
if (result < 0)
{
op->ec_.assign(-result, asio::error::get_system_category());
op->bytes_transferred_ = 0;
}
else
{
op->ec_.assign(0, op->ec_.category());
op->bytes_transferred_ = static_cast<std::size_t>(result);
}
}
while (io_uring_operation* op = op_queue_.front())
{
if (op->perform(io_cleanup.ops_.empty()))
{
op_queue_.pop();
io_cleanup.ops_.push(op);
}
else
break;
}
}
cancel_requested_ = false;
if (!op_queue_.empty())
{
io_uring_service* service = io_object_->service_;
mutex::scoped_lock lock(service->mutex_);
if (::io_uring_sqe* sqe = service->get_sqe())
{
op_queue_.front()->prepare(sqe);
::io_uring_sqe_set_data(sqe, this);
service->post_submit_sqes_op(lock);
}
else
{
lock.unlock();
while (io_uring_operation* op = op_queue_.front())
{
op->ec_ = asio::error::no_buffer_space;
op_queue_.pop();
io_cleanup.ops_.push(op);
}
}
}
// The last operation to complete on a shut down object must free it.
if (io_object_->shutdown_)
{
io_cleanup.io_object_to_free_ = io_object_;
for (int i = 0; i < max_ops; ++i)
if (!io_object_->queues_[i].op_queue_.empty())
io_cleanup.io_object_to_free_ = 0;
}
// The first operation will be returned for completion now. The others will
// be posted for later by the io_cleanup object's destructor.
io_cleanup.first_op_ = io_cleanup.ops_.front();
io_cleanup.ops_.pop();
return io_cleanup.first_op_;
}
void io_uring_service::io_queue::do_complete(void* owner, operation* base,
const asio::error_code& ec, std::size_t bytes_transferred)
{
if (owner)
{
io_queue* io_q = static_cast<io_queue*>(base);
int result = static_cast<int>(bytes_transferred);
if (operation* op = io_q->perform_io(result))
{
op->complete(owner, ec, 0);
}
}
}
io_uring_service::io_object::io_object(bool locking)
: mutex_(locking)
{
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IO_URING)
#endif // ASIO_DETAIL_IMPL_IO_URING_SERVICE_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/epoll_reactor.hpp | //
// detail/impl/epoll_reactor.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP
#define ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#if defined(ASIO_HAS_EPOLL)
#include "asio/detail/scheduler.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
inline void epoll_reactor::post_immediate_completion(
operation* op, bool is_continuation) const
{
scheduler_.post_immediate_completion(op, is_continuation);
}
template <typename Time_Traits>
void epoll_reactor::add_timer_queue(timer_queue<Time_Traits>& queue)
{
do_add_timer_queue(queue);
}
template <typename Time_Traits>
void epoll_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue)
{
do_remove_timer_queue(queue);
}
template <typename Time_Traits>
void epoll_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
const typename Time_Traits::time_type& time,
typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
{
mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
scheduler_.work_started();
if (earliest)
update_timeout();
}
template <typename Time_Traits>
std::size_t epoll_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& timer,
std::size_t max_cancelled)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
scheduler_.post_deferred_completions(ops);
return n;
}
template <typename Time_Traits>
void epoll_reactor::cancel_timer_by_key(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data* timer,
void* cancellation_key)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer_by_key(timer, ops, cancellation_key);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
template <typename Time_Traits>
void epoll_reactor::move_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& target,
typename timer_queue<Time_Traits>::per_timer_data& source)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer(target, ops);
queue.move_timer(target, source);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_EPOLL)
#endif // ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/select_reactor.hpp | //
// detail/impl/select_reactor.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP
#define ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP) \
|| (!defined(ASIO_HAS_DEV_POLL) \
&& !defined(ASIO_HAS_EPOLL) \
&& !defined(ASIO_HAS_KQUEUE) \
&& !defined(ASIO_WINDOWS_RUNTIME))
#if defined(ASIO_HAS_IOCP)
# include "asio/detail/win_iocp_io_context.hpp"
#else // defined(ASIO_HAS_IOCP)
# include "asio/detail/scheduler.hpp"
#endif // defined(ASIO_HAS_IOCP)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
inline void select_reactor::post_immediate_completion(
operation* op, bool is_continuation) const
{
scheduler_.post_immediate_completion(op, is_continuation);
}
template <typename Time_Traits>
void select_reactor::add_timer_queue(timer_queue<Time_Traits>& queue)
{
do_add_timer_queue(queue);
}
// Remove a timer queue from the reactor.
template <typename Time_Traits>
void select_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue)
{
do_remove_timer_queue(queue);
}
template <typename Time_Traits>
void select_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
const typename Time_Traits::time_type& time,
typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
{
asio::detail::mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
scheduler_.work_started();
if (earliest)
interrupter_.interrupt();
}
template <typename Time_Traits>
std::size_t select_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& timer,
std::size_t max_cancelled)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
scheduler_.post_deferred_completions(ops);
return n;
}
template <typename Time_Traits>
void select_reactor::cancel_timer_by_key(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data* timer,
void* cancellation_key)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer_by_key(timer, ops, cancellation_key);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
template <typename Time_Traits>
void select_reactor::move_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& target,
typename timer_queue<Time_Traits>::per_timer_data& source)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer(target, ops);
queue.move_timer(target, source);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
// || (!defined(ASIO_HAS_DEV_POLL)
// && !defined(ASIO_HAS_EPOLL)
// && !defined(ASIO_HAS_KQUEUE)
// && !defined(ASIO_WINDOWS_RUNTIME))
#endif // ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/win_iocp_serial_port_service.ipp | //
// detail/impl/win_iocp_serial_port_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2008 Rep Invariant Systems, Inc. ([email protected])
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP
#define ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT)
#include <cstring>
#include "asio/detail/win_iocp_serial_port_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_iocp_serial_port_service::win_iocp_serial_port_service(
execution_context& context)
: execution_context_service_base<win_iocp_serial_port_service>(context),
handle_service_(context)
{
}
void win_iocp_serial_port_service::shutdown()
{
}
asio::error_code win_iocp_serial_port_service::open(
win_iocp_serial_port_service::implementation_type& impl,
const std::string& device, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
ASIO_ERROR_LOCATION(ec);
return ec;
}
// For convenience, add a leading \\.\ sequence if not already present.
std::string name = (device[0] == '\\') ? device : "\\\\.\\" + device;
// Open a handle to the serial port.
::HANDLE handle = ::CreateFileA(name.c_str(),
GENERIC_READ | GENERIC_WRITE, 0, 0,
OPEN_EXISTING, FILE_FLAG_OVERLAPPED, 0);
if (handle == INVALID_HANDLE_VALUE)
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Determine the initial serial port parameters.
using namespace std; // For memset.
::DCB dcb;
memset(&dcb, 0, sizeof(DCB));
dcb.DCBlength = sizeof(DCB);
if (!::GetCommState(handle, &dcb))
{
DWORD last_error = ::GetLastError();
::CloseHandle(handle);
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Set some default serial port parameters. This implementation does not
// support changing all of these, so they might as well be in a known state.
dcb.fBinary = TRUE; // Win32 only supports binary mode.
dcb.fNull = FALSE; // Do not ignore NULL characters.
dcb.fAbortOnError = FALSE; // Ignore serial framing errors.
dcb.BaudRate = CBR_9600; // 9600 baud by default
dcb.ByteSize = 8; // 8 bit bytes
dcb.fOutxCtsFlow = FALSE; // No flow control
dcb.fOutxDsrFlow = FALSE;
dcb.fDtrControl = DTR_CONTROL_DISABLE;
dcb.fDsrSensitivity = FALSE;
dcb.fOutX = FALSE;
dcb.fInX = FALSE;
dcb.fRtsControl = RTS_CONTROL_DISABLE;
dcb.fParity = FALSE; // No parity
dcb.Parity = NOPARITY;
dcb.StopBits = ONESTOPBIT; // One stop bit
if (!::SetCommState(handle, &dcb))
{
DWORD last_error = ::GetLastError();
::CloseHandle(handle);
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Set up timeouts so that the serial port will behave similarly to a
// network socket. Reads wait for at least one byte, then return with
// whatever they have. Writes return once everything is out the door.
::COMMTIMEOUTS timeouts;
timeouts.ReadIntervalTimeout = 1;
timeouts.ReadTotalTimeoutMultiplier = 0;
timeouts.ReadTotalTimeoutConstant = 0;
timeouts.WriteTotalTimeoutMultiplier = 0;
timeouts.WriteTotalTimeoutConstant = 0;
if (!::SetCommTimeouts(handle, &timeouts))
{
DWORD last_error = ::GetLastError();
::CloseHandle(handle);
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
// We're done. Take ownership of the serial port handle.
if (handle_service_.assign(impl, handle, ec))
::CloseHandle(handle);
return ec;
}
asio::error_code win_iocp_serial_port_service::do_set_option(
win_iocp_serial_port_service::implementation_type& impl,
win_iocp_serial_port_service::store_function_type store,
const void* option, asio::error_code& ec)
{
using namespace std; // For memcpy.
::DCB dcb;
memset(&dcb, 0, sizeof(DCB));
dcb.DCBlength = sizeof(DCB);
if (!::GetCommState(handle_service_.native_handle(impl), &dcb))
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
if (store(option, dcb, ec))
return ec;
if (!::SetCommState(handle_service_.native_handle(impl), &dcb))
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
ec = asio::error_code();
return ec;
}
asio::error_code win_iocp_serial_port_service::do_get_option(
const win_iocp_serial_port_service::implementation_type& impl,
win_iocp_serial_port_service::load_function_type load,
void* option, asio::error_code& ec) const
{
using namespace std; // For memset.
::DCB dcb;
memset(&dcb, 0, sizeof(DCB));
dcb.DCBlength = sizeof(DCB);
if (!::GetCommState(handle_service_.native_handle(impl), &dcb))
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
return load(option, dcb, ec);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT)
#endif // ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/strand_executor_service.ipp | //
// detail/impl/strand_executor_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP
#define ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/strand_executor_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
strand_executor_service::strand_executor_service(execution_context& ctx)
: execution_context_service_base<strand_executor_service>(ctx),
mutex_(),
salt_(0),
impl_list_(0)
{
}
void strand_executor_service::shutdown()
{
op_queue<scheduler_operation> ops;
asio::detail::mutex::scoped_lock lock(mutex_);
strand_impl* impl = impl_list_;
while (impl)
{
impl->mutex_->lock();
impl->shutdown_ = true;
ops.push(impl->waiting_queue_);
ops.push(impl->ready_queue_);
impl->mutex_->unlock();
impl = impl->next_;
}
}
strand_executor_service::implementation_type
strand_executor_service::create_implementation()
{
implementation_type new_impl(new strand_impl);
new_impl->locked_ = false;
new_impl->shutdown_ = false;
asio::detail::mutex::scoped_lock lock(mutex_);
// Select a mutex from the pool of shared mutexes.
std::size_t salt = salt_++;
std::size_t mutex_index = reinterpret_cast<std::size_t>(new_impl.get());
mutex_index += (reinterpret_cast<std::size_t>(new_impl.get()) >> 3);
mutex_index ^= salt + 0x9e3779b9 + (mutex_index << 6) + (mutex_index >> 2);
mutex_index = mutex_index % num_mutexes;
if (!mutexes_[mutex_index].get())
mutexes_[mutex_index].reset(new mutex);
new_impl->mutex_ = mutexes_[mutex_index].get();
// Insert implementation into linked list of all implementations.
new_impl->next_ = impl_list_;
new_impl->prev_ = 0;
if (impl_list_)
impl_list_->prev_ = new_impl.get();
impl_list_ = new_impl.get();
new_impl->service_ = this;
return new_impl;
}
strand_executor_service::strand_impl::~strand_impl()
{
asio::detail::mutex::scoped_lock lock(service_->mutex_);
// Remove implementation from linked list of all implementations.
if (service_->impl_list_ == this)
service_->impl_list_ = next_;
if (prev_)
prev_->next_ = next_;
if (next_)
next_->prev_= prev_;
}
bool strand_executor_service::enqueue(const implementation_type& impl,
scheduler_operation* op)
{
impl->mutex_->lock();
if (impl->shutdown_)
{
impl->mutex_->unlock();
op->destroy();
return false;
}
else if (impl->locked_)
{
// Some other function already holds the strand lock. Enqueue for later.
impl->waiting_queue_.push(op);
impl->mutex_->unlock();
return false;
}
else
{
// The function is acquiring the strand lock and so is responsible for
// scheduling the strand.
impl->locked_ = true;
impl->mutex_->unlock();
impl->ready_queue_.push(op);
return true;
}
}
bool strand_executor_service::running_in_this_thread(
const implementation_type& impl)
{
return !!call_stack<strand_impl>::contains(impl.get());
}
bool strand_executor_service::push_waiting_to_ready(implementation_type& impl)
{
impl->mutex_->lock();
impl->ready_queue_.push(impl->waiting_queue_);
bool more_handlers = impl->locked_ = !impl->ready_queue_.empty();
impl->mutex_->unlock();
return more_handlers;
}
void strand_executor_service::run_ready_handlers(implementation_type& impl)
{
// Indicate that this strand is executing on the current thread.
call_stack<strand_impl>::context ctx(impl.get());
// Run all ready handlers. No lock is required since the ready queue is
// accessed only within the strand.
asio::error_code ec;
while (scheduler_operation* o = impl->ready_queue_.front())
{
impl->ready_queue_.pop();
o->complete(impl.get(), ec, 0);
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/buffer_sequence_adapter.ipp | //
// detail/impl/buffer_sequence_adapter.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP
#define ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS_RUNTIME)
#include <robuffer.h>
#include <windows.storage.streams.h>
#include <wrl/implements.h>
#include "asio/detail/buffer_sequence_adapter.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class winrt_buffer_impl :
public Microsoft::WRL::RuntimeClass<
Microsoft::WRL::RuntimeClassFlags<
Microsoft::WRL::RuntimeClassType::WinRtClassicComMix>,
ABI::Windows::Storage::Streams::IBuffer,
Windows::Storage::Streams::IBufferByteAccess>
{
public:
explicit winrt_buffer_impl(const asio::const_buffer& b)
{
bytes_ = const_cast<byte*>(static_cast<const byte*>(b.data()));
length_ = b.size();
capacity_ = b.size();
}
explicit winrt_buffer_impl(const asio::mutable_buffer& b)
{
bytes_ = static_cast<byte*>(b.data());
length_ = 0;
capacity_ = b.size();
}
~winrt_buffer_impl()
{
}
STDMETHODIMP Buffer(byte** value)
{
*value = bytes_;
return S_OK;
}
STDMETHODIMP get_Capacity(UINT32* value)
{
*value = capacity_;
return S_OK;
}
STDMETHODIMP get_Length(UINT32 *value)
{
*value = length_;
return S_OK;
}
STDMETHODIMP put_Length(UINT32 value)
{
if (value > capacity_)
return E_INVALIDARG;
length_ = value;
return S_OK;
}
private:
byte* bytes_;
UINT32 length_;
UINT32 capacity_;
};
void buffer_sequence_adapter_base::init_native_buffer(
buffer_sequence_adapter_base::native_buffer_type& buf,
const asio::mutable_buffer& buffer)
{
std::memset(&buf, 0, sizeof(native_buffer_type));
Microsoft::WRL::ComPtr<IInspectable> insp
= Microsoft::WRL::Make<winrt_buffer_impl>(buffer);
buf = reinterpret_cast<Windows::Storage::Streams::IBuffer^>(insp.Get());
}
void buffer_sequence_adapter_base::init_native_buffer(
buffer_sequence_adapter_base::native_buffer_type& buf,
const asio::const_buffer& buffer)
{
std::memset(&buf, 0, sizeof(native_buffer_type));
Microsoft::WRL::ComPtr<IInspectable> insp
= Microsoft::WRL::Make<winrt_buffer_impl>(buffer);
Platform::Object^ buf_obj = reinterpret_cast<Platform::Object^>(insp.Get());
buf = reinterpret_cast<Windows::Storage::Streams::IBuffer^>(insp.Get());
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS_RUNTIME)
#endif // ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/reactive_socket_service_base.ipp | //
// detail/reactive_socket_service_base.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP
#define ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_HAS_IOCP) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#include "asio/detail/reactive_socket_service_base.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
reactive_socket_service_base::reactive_socket_service_base(
execution_context& context)
: reactor_(use_service<reactor>(context))
{
reactor_.init_task();
}
void reactive_socket_service_base::base_shutdown()
{
}
void reactive_socket_service_base::construct(
reactive_socket_service_base::base_implementation_type& impl)
{
impl.socket_ = invalid_socket;
impl.state_ = 0;
impl.reactor_data_ = reactor::per_descriptor_data();
}
void reactive_socket_service_base::base_move_construct(
reactive_socket_service_base::base_implementation_type& impl,
reactive_socket_service_base::base_implementation_type& other_impl)
noexcept
{
impl.socket_ = other_impl.socket_;
other_impl.socket_ = invalid_socket;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
reactor_.move_descriptor(impl.socket_,
impl.reactor_data_, other_impl.reactor_data_);
}
void reactive_socket_service_base::base_move_assign(
reactive_socket_service_base::base_implementation_type& impl,
reactive_socket_service_base& other_service,
reactive_socket_service_base::base_implementation_type& other_impl)
{
destroy(impl);
impl.socket_ = other_impl.socket_;
other_impl.socket_ = invalid_socket;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
other_service.reactor_.move_descriptor(impl.socket_,
impl.reactor_data_, other_impl.reactor_data_);
}
void reactive_socket_service_base::destroy(
reactive_socket_service_base::base_implementation_type& impl)
{
if (impl.socket_ != invalid_socket)
{
ASIO_HANDLER_OPERATION((reactor_.context(),
"socket", &impl, impl.socket_, "close"));
reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_,
(impl.state_ & socket_ops::possible_dup) == 0);
asio::error_code ignored_ec;
socket_ops::close(impl.socket_, impl.state_, true, ignored_ec);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
}
}
asio::error_code reactive_socket_service_base::close(
reactive_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((reactor_.context(),
"socket", &impl, impl.socket_, "close"));
reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_,
(impl.state_ & socket_ops::possible_dup) == 0);
socket_ops::close(impl.socket_, impl.state_, false, ec);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
}
else
{
ec = asio::error_code();
}
// The descriptor is closed by the OS even if close() returns an error.
//
// (Actually, POSIX says the state of the descriptor is unspecified. On
// Linux the descriptor is apparently closed anyway; e.g. see
// http://lkml.org/lkml/2005/9/10/129
// We'll just have to assume that other OSes follow the same behaviour. The
// known exception is when Windows's closesocket() function fails with
// WSAEWOULDBLOCK, but this case is handled inside socket_ops::close().
construct(impl);
return ec;
}
socket_type reactive_socket_service_base::release(
reactive_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return invalid_socket;
}
ASIO_HANDLER_OPERATION((reactor_.context(),
"socket", &impl, impl.socket_, "release"));
reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_, false);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
socket_type sock = impl.socket_;
construct(impl);
ec = asio::error_code();
return sock;
}
asio::error_code reactive_socket_service_base::cancel(
reactive_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return ec;
}
ASIO_HANDLER_OPERATION((reactor_.context(),
"socket", &impl, impl.socket_, "cancel"));
reactor_.cancel_ops(impl.socket_, impl.reactor_data_);
ec = asio::error_code();
return ec;
}
asio::error_code reactive_socket_service_base::do_open(
reactive_socket_service_base::base_implementation_type& impl,
int af, int type, int protocol, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
socket_holder sock(socket_ops::socket(af, type, protocol, ec));
if (sock.get() == invalid_socket)
return ec;
if (int err = reactor_.register_descriptor(sock.get(), impl.reactor_data_))
{
ec = asio::error_code(err,
asio::error::get_system_category());
return ec;
}
impl.socket_ = sock.release();
switch (type)
{
case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
default: impl.state_ = 0; break;
}
ec = asio::error_code();
return ec;
}
asio::error_code reactive_socket_service_base::do_assign(
reactive_socket_service_base::base_implementation_type& impl, int type,
const reactive_socket_service_base::native_handle_type& native_socket,
asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
if (int err = reactor_.register_descriptor(
native_socket, impl.reactor_data_))
{
ec = asio::error_code(err,
asio::error::get_system_category());
return ec;
}
impl.socket_ = native_socket;
switch (type)
{
case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
default: impl.state_ = 0; break;
}
impl.state_ |= socket_ops::possible_dup;
ec = asio::error_code();
return ec;
}
void reactive_socket_service_base::do_start_op(
reactive_socket_service_base::base_implementation_type& impl,
int op_type, reactor_op* op, bool is_continuation,
bool allow_speculative, bool noop, bool needs_non_blocking,
void (*on_immediate)(operation* op, bool, const void*),
const void* immediate_arg)
{
if (!noop)
{
if ((impl.state_ & socket_ops::non_blocking)
|| !needs_non_blocking
|| socket_ops::set_internal_non_blocking(
impl.socket_, impl.state_, true, op->ec_))
{
reactor_.start_op(op_type, impl.socket_, impl.reactor_data_, op,
is_continuation, allow_speculative, on_immediate, immediate_arg);
return;
}
}
on_immediate(op, is_continuation, immediate_arg);
}
void reactive_socket_service_base::do_start_accept_op(
reactive_socket_service_base::base_implementation_type& impl,
reactor_op* op, bool is_continuation, bool peer_is_open,
void (*on_immediate)(operation* op, bool, const void*),
const void* immediate_arg)
{
if (!peer_is_open)
{
do_start_op(impl, reactor::read_op, op, is_continuation,
true, false, true, on_immediate, immediate_arg);
}
else
{
op->ec_ = asio::error::already_open;
on_immediate(op, is_continuation, immediate_arg);
}
}
void reactive_socket_service_base::do_start_connect_op(
reactive_socket_service_base::base_implementation_type& impl,
reactor_op* op, bool is_continuation, const void* addr, size_t addrlen,
void (*on_immediate)(operation* op, bool, const void*),
const void* immediate_arg)
{
if ((impl.state_ & socket_ops::non_blocking)
|| socket_ops::set_internal_non_blocking(
impl.socket_, impl.state_, true, op->ec_))
{
if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0)
{
if (op->ec_ == asio::error::in_progress
|| op->ec_ == asio::error::would_block)
{
op->ec_ = asio::error_code();
reactor_.start_op(reactor::connect_op, impl.socket_, impl.reactor_data_,
op, is_continuation, false, on_immediate, immediate_arg);
return;
}
}
}
on_immediate(op, is_continuation, immediate_arg);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_HAS_IOCP)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#endif // ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/strand_executor_service.hpp | //
// detail/impl/strand_executor_service.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP
#define ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/fenced_block.hpp"
#include "asio/detail/recycling_allocator.hpp"
#include "asio/executor_work_guard.hpp"
#include "asio/defer.hpp"
#include "asio/dispatch.hpp"
#include "asio/post.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename F, typename Allocator>
class strand_executor_service::allocator_binder
{
public:
typedef Allocator allocator_type;
allocator_binder(F&& f, const Allocator& a)
: f_(static_cast<F&&>(f)),
allocator_(a)
{
}
allocator_binder(const allocator_binder& other)
: f_(other.f_),
allocator_(other.allocator_)
{
}
allocator_binder(allocator_binder&& other)
: f_(static_cast<F&&>(other.f_)),
allocator_(static_cast<allocator_type&&>(other.allocator_))
{
}
allocator_type get_allocator() const noexcept
{
return allocator_;
}
void operator()()
{
f_();
}
private:
F f_;
allocator_type allocator_;
};
template <typename Executor>
class strand_executor_service::invoker<Executor,
enable_if_t<
execution::is_executor<Executor>::value
>>
{
public:
invoker(const implementation_type& impl, Executor& ex)
: impl_(impl),
executor_(asio::prefer(ex, execution::outstanding_work.tracked))
{
}
invoker(const invoker& other)
: impl_(other.impl_),
executor_(other.executor_)
{
}
invoker(invoker&& other)
: impl_(static_cast<implementation_type&&>(other.impl_)),
executor_(static_cast<executor_type&&>(other.executor_))
{
}
struct on_invoker_exit
{
invoker* this_;
~on_invoker_exit()
{
if (push_waiting_to_ready(this_->impl_))
{
recycling_allocator<void> allocator;
executor_type ex = this_->executor_;
asio::prefer(
asio::require(
static_cast<executor_type&&>(ex),
execution::blocking.never),
execution::allocator(allocator)
).execute(static_cast<invoker&&>(*this_));
}
}
};
void operator()()
{
// Ensure the next handler, if any, is scheduled on block exit.
on_invoker_exit on_exit = { this };
(void)on_exit;
run_ready_handlers(impl_);
}
private:
typedef decay_t<
prefer_result_t<
Executor,
execution::outstanding_work_t::tracked_t
>
> executor_type;
implementation_type impl_;
executor_type executor_;
};
#if !defined(ASIO_NO_TS_EXECUTORS)
template <typename Executor>
class strand_executor_service::invoker<Executor,
enable_if_t<
!execution::is_executor<Executor>::value
>>
{
public:
invoker(const implementation_type& impl, Executor& ex)
: impl_(impl),
work_(ex)
{
}
invoker(const invoker& other)
: impl_(other.impl_),
work_(other.work_)
{
}
invoker(invoker&& other)
: impl_(static_cast<implementation_type&&>(other.impl_)),
work_(static_cast<executor_work_guard<Executor>&&>(other.work_))
{
}
struct on_invoker_exit
{
invoker* this_;
~on_invoker_exit()
{
if (push_waiting_to_ready(this_->impl_))
{
Executor ex(this_->work_.get_executor());
recycling_allocator<void> allocator;
ex.post(static_cast<invoker&&>(*this_), allocator);
}
}
};
void operator()()
{
// Ensure the next handler, if any, is scheduled on block exit.
on_invoker_exit on_exit = { this };
(void)on_exit;
run_ready_handlers(impl_);
}
private:
implementation_type impl_;
executor_work_guard<Executor> work_;
};
#endif // !defined(ASIO_NO_TS_EXECUTORS)
template <typename Executor, typename Function>
inline void strand_executor_service::execute(const implementation_type& impl,
Executor& ex, Function&& function,
enable_if_t<
can_query<Executor, execution::allocator_t<void>>::value
>*)
{
return strand_executor_service::do_execute(impl, ex,
static_cast<Function&&>(function),
asio::query(ex, execution::allocator));
}
template <typename Executor, typename Function>
inline void strand_executor_service::execute(const implementation_type& impl,
Executor& ex, Function&& function,
enable_if_t<
!can_query<Executor, execution::allocator_t<void>>::value
>*)
{
return strand_executor_service::do_execute(impl, ex,
static_cast<Function&&>(function),
std::allocator<void>());
}
template <typename Executor, typename Function, typename Allocator>
void strand_executor_service::do_execute(const implementation_type& impl,
Executor& ex, Function&& function, const Allocator& a)
{
typedef decay_t<Function> function_type;
// If the executor is not never-blocking, and we are already in the strand,
// then the function can run immediately.
if (asio::query(ex, execution::blocking) != execution::blocking.never
&& running_in_this_thread(impl))
{
// Make a local, non-const copy of the function.
function_type tmp(static_cast<Function&&>(function));
fenced_block b(fenced_block::full);
static_cast<function_type&&>(tmp)();
return;
}
// Allocate and construct an operation to wrap the function.
typedef executor_op<function_type, Allocator> op;
typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };
p.p = new (p.v) op(static_cast<Function&&>(function), a);
ASIO_HANDLER_CREATION((impl->service_->context(), *p.p,
"strand_executor", impl.get(), 0, "execute"));
// Add the function to the strand and schedule the strand if required.
bool first = enqueue(impl, p.p);
p.v = p.p = 0;
if (first)
{
ex.execute(invoker<Executor>(impl, ex));
}
}
template <typename Executor, typename Function, typename Allocator>
void strand_executor_service::dispatch(const implementation_type& impl,
Executor& ex, Function&& function, const Allocator& a)
{
typedef decay_t<Function> function_type;
// If we are already in the strand then the function can run immediately.
if (running_in_this_thread(impl))
{
// Make a local, non-const copy of the function.
function_type tmp(static_cast<Function&&>(function));
fenced_block b(fenced_block::full);
static_cast<function_type&&>(tmp)();
return;
}
// Allocate and construct an operation to wrap the function.
typedef executor_op<function_type, Allocator> op;
typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };
p.p = new (p.v) op(static_cast<Function&&>(function), a);
ASIO_HANDLER_CREATION((impl->service_->context(), *p.p,
"strand_executor", impl.get(), 0, "dispatch"));
// Add the function to the strand and schedule the strand if required.
bool first = enqueue(impl, p.p);
p.v = p.p = 0;
if (first)
{
asio::dispatch(ex,
allocator_binder<invoker<Executor>, Allocator>(
invoker<Executor>(impl, ex), a));
}
}
// Request invocation of the given function and return immediately.
template <typename Executor, typename Function, typename Allocator>
void strand_executor_service::post(const implementation_type& impl,
Executor& ex, Function&& function, const Allocator& a)
{
typedef decay_t<Function> function_type;
// Allocate and construct an operation to wrap the function.
typedef executor_op<function_type, Allocator> op;
typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };
p.p = new (p.v) op(static_cast<Function&&>(function), a);
ASIO_HANDLER_CREATION((impl->service_->context(), *p.p,
"strand_executor", impl.get(), 0, "post"));
// Add the function to the strand and schedule the strand if required.
bool first = enqueue(impl, p.p);
p.v = p.p = 0;
if (first)
{
asio::post(ex,
allocator_binder<invoker<Executor>, Allocator>(
invoker<Executor>(impl, ex), a));
}
}
// Request invocation of the given function and return immediately.
template <typename Executor, typename Function, typename Allocator>
void strand_executor_service::defer(const implementation_type& impl,
Executor& ex, Function&& function, const Allocator& a)
{
typedef decay_t<Function> function_type;
// Allocate and construct an operation to wrap the function.
typedef executor_op<function_type, Allocator> op;
typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };
p.p = new (p.v) op(static_cast<Function&&>(function), a);
ASIO_HANDLER_CREATION((impl->service_->context(), *p.p,
"strand_executor", impl.get(), 0, "defer"));
// Add the function to the strand and schedule the strand if required.
bool first = enqueue(impl, p.p);
p.v = p.p = 0;
if (first)
{
asio::defer(ex,
allocator_binder<invoker<Executor>, Allocator>(
invoker<Executor>(impl, ex), a));
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/win_static_mutex.ipp | //
// detail/impl/win_static_mutex.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP
#define ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS)
#include <cstdio>
#include "asio/detail/throw_error.hpp"
#include "asio/detail/win_static_mutex.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
void win_static_mutex::init()
{
int error = do_init();
asio::error_code ec(error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "static_mutex");
}
int win_static_mutex::do_init()
{
using namespace std; // For sprintf.
wchar_t mutex_name[128];
#if defined(ASIO_HAS_SECURE_RTL)
swprintf_s(
#else // defined(ASIO_HAS_SECURE_RTL)
_snwprintf(
#endif // defined(ASIO_HAS_SECURE_RTL)
mutex_name, 128, L"asio-58CCDC44-6264-4842-90C2-F3C545CB8AA7-%u-%p",
static_cast<unsigned int>(::GetCurrentProcessId()), this);
#if defined(ASIO_WINDOWS_APP)
HANDLE mutex = ::CreateMutexExW(0, mutex_name, CREATE_MUTEX_INITIAL_OWNER, 0);
#else // defined(ASIO_WINDOWS_APP)
HANDLE mutex = ::CreateMutexW(0, TRUE, mutex_name);
#endif // defined(ASIO_WINDOWS_APP)
DWORD last_error = ::GetLastError();
if (mutex == 0)
return ::GetLastError();
if (last_error == ERROR_ALREADY_EXISTS)
{
#if defined(ASIO_WINDOWS_APP)
::WaitForSingleObjectEx(mutex, INFINITE, false);
#else // defined(ASIO_WINDOWS_APP)
::WaitForSingleObject(mutex, INFINITE);
#endif // defined(ASIO_WINDOWS_APP)
}
if (initialised_)
{
::ReleaseMutex(mutex);
::CloseHandle(mutex);
return 0;
}
#if defined(__MINGW32__)
// Not sure if MinGW supports structured exception handling, so for now
// we'll just call the Windows API and hope.
# if defined(UNDER_CE)
::InitializeCriticalSection(&crit_section_);
# else
if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))
{
last_error = ::GetLastError();
::ReleaseMutex(mutex);
::CloseHandle(mutex);
return last_error;
}
# endif
#else
__try
{
# if defined(UNDER_CE)
::InitializeCriticalSection(&crit_section_);
# elif defined(ASIO_WINDOWS_APP)
if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0))
{
last_error = ::GetLastError();
::ReleaseMutex(mutex);
::CloseHandle(mutex);
return last_error;
}
# else
if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))
{
last_error = ::GetLastError();
::ReleaseMutex(mutex);
::CloseHandle(mutex);
return last_error;
}
# endif
}
__except(GetExceptionCode() == STATUS_NO_MEMORY
? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH)
{
::ReleaseMutex(mutex);
::CloseHandle(mutex);
return ERROR_OUTOFMEMORY;
}
#endif
initialised_ = true;
::ReleaseMutex(mutex);
::CloseHandle(mutex);
return 0;
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS)
#endif // ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/timer_queue_set.ipp | //
// detail/impl/timer_queue_set.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP
#define ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/timer_queue_set.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
timer_queue_set::timer_queue_set()
: first_(0)
{
}
void timer_queue_set::insert(timer_queue_base* q)
{
q->next_ = first_;
first_ = q;
}
void timer_queue_set::erase(timer_queue_base* q)
{
if (first_)
{
if (q == first_)
{
first_ = q->next_;
q->next_ = 0;
return;
}
for (timer_queue_base* p = first_; p->next_; p = p->next_)
{
if (p->next_ == q)
{
p->next_ = q->next_;
q->next_ = 0;
return;
}
}
}
}
bool timer_queue_set::all_empty() const
{
for (timer_queue_base* p = first_; p; p = p->next_)
if (!p->empty())
return false;
return true;
}
long timer_queue_set::wait_duration_msec(long max_duration) const
{
long min_duration = max_duration;
for (timer_queue_base* p = first_; p; p = p->next_)
min_duration = p->wait_duration_msec(min_duration);
return min_duration;
}
long timer_queue_set::wait_duration_usec(long max_duration) const
{
long min_duration = max_duration;
for (timer_queue_base* p = first_; p; p = p->next_)
min_duration = p->wait_duration_usec(min_duration);
return min_duration;
}
void timer_queue_set::get_ready_timers(op_queue<operation>& ops)
{
for (timer_queue_base* p = first_; p; p = p->next_)
p->get_ready_timers(ops);
}
void timer_queue_set::get_all_timers(op_queue<operation>& ops)
{
for (timer_queue_base* p = first_; p; p = p->next_)
p->get_all_timers(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/socket_select_interrupter.ipp | //
// detail/impl/socket_select_interrupter.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP
#define ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_WINDOWS_RUNTIME)
#if defined(ASIO_WINDOWS) \
|| defined(__CYGWIN__) \
|| defined(__SYMBIAN32__)
#include <cstdlib>
#include "asio/detail/socket_holder.hpp"
#include "asio/detail/socket_ops.hpp"
#include "asio/detail/socket_select_interrupter.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
socket_select_interrupter::socket_select_interrupter()
{
open_descriptors();
}
void socket_select_interrupter::open_descriptors()
{
asio::error_code ec;
socket_holder acceptor(socket_ops::socket(
AF_INET, SOCK_STREAM, IPPROTO_TCP, ec));
if (acceptor.get() == invalid_socket)
asio::detail::throw_error(ec, "socket_select_interrupter");
int opt = 1;
socket_ops::state_type acceptor_state = 0;
socket_ops::setsockopt(acceptor.get(), acceptor_state,
SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt), ec);
using namespace std; // For memset.
sockaddr_in4_type addr;
std::size_t addr_len = sizeof(addr);
memset(&addr, 0, sizeof(addr));
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = socket_ops::host_to_network_long(INADDR_LOOPBACK);
addr.sin_port = 0;
if (socket_ops::bind(acceptor.get(), &addr,
addr_len, ec) == socket_error_retval)
asio::detail::throw_error(ec, "socket_select_interrupter");
if (socket_ops::getsockname(acceptor.get(), &addr,
&addr_len, ec) == socket_error_retval)
asio::detail::throw_error(ec, "socket_select_interrupter");
// Some broken firewalls on Windows will intermittently cause getsockname to
// return 0.0.0.0 when the socket is actually bound to 127.0.0.1. We
// explicitly specify the target address here to work around this problem.
if (addr.sin_addr.s_addr == socket_ops::host_to_network_long(INADDR_ANY))
addr.sin_addr.s_addr = socket_ops::host_to_network_long(INADDR_LOOPBACK);
if (socket_ops::listen(acceptor.get(),
SOMAXCONN, ec) == socket_error_retval)
asio::detail::throw_error(ec, "socket_select_interrupter");
socket_holder client(socket_ops::socket(
AF_INET, SOCK_STREAM, IPPROTO_TCP, ec));
if (client.get() == invalid_socket)
asio::detail::throw_error(ec, "socket_select_interrupter");
if (socket_ops::connect(client.get(), &addr,
addr_len, ec) == socket_error_retval)
asio::detail::throw_error(ec, "socket_select_interrupter");
socket_holder server(socket_ops::accept(acceptor.get(), 0, 0, ec));
if (server.get() == invalid_socket)
asio::detail::throw_error(ec, "socket_select_interrupter");
ioctl_arg_type non_blocking = 1;
socket_ops::state_type client_state = 0;
if (socket_ops::ioctl(client.get(), client_state,
FIONBIO, &non_blocking, ec))
asio::detail::throw_error(ec, "socket_select_interrupter");
opt = 1;
socket_ops::setsockopt(client.get(), client_state,
IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec);
non_blocking = 1;
socket_ops::state_type server_state = 0;
if (socket_ops::ioctl(server.get(), server_state,
FIONBIO, &non_blocking, ec))
asio::detail::throw_error(ec, "socket_select_interrupter");
opt = 1;
socket_ops::setsockopt(server.get(), server_state,
IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec);
read_descriptor_ = server.release();
write_descriptor_ = client.release();
}
socket_select_interrupter::~socket_select_interrupter()
{
close_descriptors();
}
void socket_select_interrupter::close_descriptors()
{
asio::error_code ec;
socket_ops::state_type state = socket_ops::internal_non_blocking;
if (read_descriptor_ != invalid_socket)
socket_ops::close(read_descriptor_, state, true, ec);
if (write_descriptor_ != invalid_socket)
socket_ops::close(write_descriptor_, state, true, ec);
}
void socket_select_interrupter::recreate()
{
close_descriptors();
write_descriptor_ = invalid_socket;
read_descriptor_ = invalid_socket;
open_descriptors();
}
void socket_select_interrupter::interrupt()
{
char byte = 0;
socket_ops::buf b;
socket_ops::init_buf(b, &byte, 1);
asio::error_code ec;
socket_ops::send(write_descriptor_, &b, 1, 0, ec);
}
bool socket_select_interrupter::reset()
{
char data[1024];
socket_ops::buf b;
socket_ops::init_buf(b, data, sizeof(data));
asio::error_code ec;
for (;;)
{
int bytes_read = socket_ops::recv(read_descriptor_, &b, 1, 0, ec);
if (bytes_read == sizeof(data))
continue;
if (bytes_read > 0)
return true;
if (bytes_read == 0)
return false;
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return true;
return false;
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
#endif // !defined(ASIO_WINDOWS_RUNTIME)
#endif // ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/win_iocp_file_service.ipp | //
// detail/impl/win_iocp_file_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_FILE_SERVICE_IPP
#define ASIO_DETAIL_IMPL_WIN_IOCP_FILE_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_FILE) \
&& defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE)
#include <cstring>
#include <sys/stat.h>
#include "asio/detail/win_iocp_file_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_iocp_file_service::win_iocp_file_service(
execution_context& context)
: execution_context_service_base<win_iocp_file_service>(context),
handle_service_(context),
nt_flush_buffers_file_ex_(0)
{
if (FARPROC nt_flush_buffers_file_ex_ptr = ::GetProcAddress(
::GetModuleHandleA("NTDLL"), "NtFlushBuffersFileEx"))
{
nt_flush_buffers_file_ex_ = reinterpret_cast<nt_flush_buffers_file_ex_fn>(
reinterpret_cast<void*>(nt_flush_buffers_file_ex_ptr));
}
}
void win_iocp_file_service::shutdown()
{
handle_service_.shutdown();
}
asio::error_code win_iocp_file_service::open(
win_iocp_file_service::implementation_type& impl,
const char* path, file_base::flags open_flags,
asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
ASIO_ERROR_LOCATION(ec);
return ec;
}
DWORD access = 0;
if ((open_flags & file_base::read_only) != 0)
access = GENERIC_READ;
else if ((open_flags & file_base::write_only) != 0)
access = GENERIC_WRITE;
else if ((open_flags & file_base::read_write) != 0)
access = GENERIC_READ | GENERIC_WRITE;
DWORD share = FILE_SHARE_READ | FILE_SHARE_WRITE;
DWORD disposition = 0;
if ((open_flags & file_base::create) != 0)
{
if ((open_flags & file_base::exclusive) != 0)
disposition = CREATE_NEW;
else
disposition = OPEN_ALWAYS;
}
else
{
if ((open_flags & file_base::truncate) != 0)
disposition = TRUNCATE_EXISTING;
else
disposition = OPEN_EXISTING;
}
DWORD flags = FILE_ATTRIBUTE_NORMAL | FILE_FLAG_OVERLAPPED;
if (impl.is_stream_)
flags |= FILE_FLAG_SEQUENTIAL_SCAN;
else
flags |= FILE_FLAG_RANDOM_ACCESS;
if ((open_flags & file_base::sync_all_on_write) != 0)
flags |= FILE_FLAG_WRITE_THROUGH;
impl.offset_ = 0;
HANDLE handle = ::CreateFileA(path, access, share, 0, disposition, flags, 0);
if (handle != INVALID_HANDLE_VALUE)
{
if (disposition == OPEN_ALWAYS)
{
if ((open_flags & file_base::truncate) != 0)
{
if (!::SetEndOfFile(handle))
{
DWORD last_error = ::GetLastError();
::CloseHandle(handle);
ec.assign(last_error, asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
}
}
if (disposition == OPEN_ALWAYS || disposition == OPEN_EXISTING)
{
if ((open_flags & file_base::append) != 0)
{
LARGE_INTEGER distance, new_offset;
distance.QuadPart = 0;
if (::SetFilePointerEx(handle, distance, &new_offset, FILE_END))
{
impl.offset_ = static_cast<uint64_t>(new_offset.QuadPart);
}
else
{
DWORD last_error = ::GetLastError();
::CloseHandle(handle);
ec.assign(last_error, asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
}
}
handle_service_.assign(impl, handle, ec);
if (ec)
::CloseHandle(handle);
ASIO_ERROR_LOCATION(ec);
return ec;
}
else
{
DWORD last_error = ::GetLastError();
ec.assign(last_error, asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
}
uint64_t win_iocp_file_service::size(
const win_iocp_file_service::implementation_type& impl,
asio::error_code& ec) const
{
LARGE_INTEGER result;
if (::GetFileSizeEx(native_handle(impl), &result))
{
asio::error::clear(ec);
return static_cast<uint64_t>(result.QuadPart);
}
else
{
DWORD last_error = ::GetLastError();
ec.assign(last_error, asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return 0;
}
}
asio::error_code win_iocp_file_service::resize(
win_iocp_file_service::implementation_type& impl,
uint64_t n, asio::error_code& ec)
{
LARGE_INTEGER distance;
distance.QuadPart = n;
if (::SetFilePointerEx(native_handle(impl), distance, 0, FILE_BEGIN))
{
BOOL result = ::SetEndOfFile(native_handle(impl));
DWORD last_error = ::GetLastError();
distance.QuadPart = static_cast<LONGLONG>(impl.offset_);
if (!::SetFilePointerEx(native_handle(impl), distance, 0, FILE_BEGIN))
{
result = FALSE;
last_error = ::GetLastError();
}
if (result)
asio::error::clear(ec);
else
ec.assign(last_error, asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
else
{
DWORD last_error = ::GetLastError();
ec.assign(last_error, asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
}
asio::error_code win_iocp_file_service::sync_all(
win_iocp_file_service::implementation_type& impl,
asio::error_code& ec)
{
BOOL result = ::FlushFileBuffers(native_handle(impl));
if (result)
{
asio::error::clear(ec);
return ec;
}
else
{
DWORD last_error = ::GetLastError();
ec.assign(last_error, asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
}
asio::error_code win_iocp_file_service::sync_data(
win_iocp_file_service::implementation_type& impl,
asio::error_code& ec)
{
if (nt_flush_buffers_file_ex_)
{
io_status_block status = {};
if (!nt_flush_buffers_file_ex_(native_handle(impl),
flush_flags_file_data_sync_only, 0, 0, &status))
{
asio::error::clear(ec);
return ec;
}
}
return sync_all(impl, ec);
}
uint64_t win_iocp_file_service::seek(
win_iocp_file_service::implementation_type& impl, int64_t offset,
file_base::seek_basis whence, asio::error_code& ec)
{
DWORD method;
switch (whence)
{
case file_base::seek_set:
method = FILE_BEGIN;
break;
case file_base::seek_cur:
method = FILE_BEGIN;
offset = static_cast<int64_t>(impl.offset_) + offset;
break;
case file_base::seek_end:
method = FILE_END;
break;
default:
ec = asio::error::invalid_argument;
ASIO_ERROR_LOCATION(ec);
return 0;
}
LARGE_INTEGER distance, new_offset;
distance.QuadPart = offset;
if (::SetFilePointerEx(native_handle(impl), distance, &new_offset, method))
{
impl.offset_ = new_offset.QuadPart;
asio::error::clear(ec);
return impl.offset_;
}
else
{
DWORD last_error = ::GetLastError();
ec.assign(last_error, asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return 0;
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_FILE)
// && defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE)
#endif // ASIO_DETAIL_IMPL_WIN_IOCP_FILE_SERVICE_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/strand_service.ipp | //
// detail/impl/strand_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP
#define ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/call_stack.hpp"
#include "asio/detail/strand_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
struct strand_service::on_do_complete_exit
{
io_context_impl* owner_;
strand_impl* impl_;
~on_do_complete_exit()
{
impl_->mutex_.lock();
impl_->ready_queue_.push(impl_->waiting_queue_);
bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty();
impl_->mutex_.unlock();
if (more_handlers)
owner_->post_immediate_completion(impl_, true);
}
};
strand_service::strand_service(asio::io_context& io_context)
: asio::detail::service_base<strand_service>(io_context),
io_context_(io_context),
io_context_impl_(asio::use_service<io_context_impl>(io_context)),
mutex_(),
salt_(0)
{
}
void strand_service::shutdown()
{
op_queue<operation> ops;
asio::detail::mutex::scoped_lock lock(mutex_);
for (std::size_t i = 0; i < num_implementations; ++i)
{
if (strand_impl* impl = implementations_[i].get())
{
ops.push(impl->waiting_queue_);
ops.push(impl->ready_queue_);
}
}
}
void strand_service::construct(strand_service::implementation_type& impl)
{
asio::detail::mutex::scoped_lock lock(mutex_);
std::size_t salt = salt_++;
#if defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION)
std::size_t index = salt;
#else // defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION)
std::size_t index = reinterpret_cast<std::size_t>(&impl);
index += (reinterpret_cast<std::size_t>(&impl) >> 3);
index ^= salt + 0x9e3779b9 + (index << 6) + (index >> 2);
#endif // defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION)
index = index % num_implementations;
if (!implementations_[index].get())
implementations_[index].reset(new strand_impl);
impl = implementations_[index].get();
}
bool strand_service::running_in_this_thread(
const implementation_type& impl) const
{
return call_stack<strand_impl>::contains(impl) != 0;
}
struct strand_service::on_dispatch_exit
{
io_context_impl* io_context_impl_;
strand_impl* impl_;
~on_dispatch_exit()
{
impl_->mutex_.lock();
impl_->ready_queue_.push(impl_->waiting_queue_);
bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty();
impl_->mutex_.unlock();
if (more_handlers)
io_context_impl_->post_immediate_completion(impl_, false);
}
};
void strand_service::do_dispatch(implementation_type& impl, operation* op)
{
// If we are running inside the io_context, and no other handler already
// holds the strand lock, then the handler can run immediately.
bool can_dispatch = io_context_impl_.can_dispatch();
impl->mutex_.lock();
if (can_dispatch && !impl->locked_)
{
// Immediate invocation is allowed.
impl->locked_ = true;
impl->mutex_.unlock();
// Indicate that this strand is executing on the current thread.
call_stack<strand_impl>::context ctx(impl);
// Ensure the next handler, if any, is scheduled on block exit.
on_dispatch_exit on_exit = { &io_context_impl_, impl };
(void)on_exit;
op->complete(&io_context_impl_, asio::error_code(), 0);
return;
}
if (impl->locked_)
{
// Some other handler already holds the strand lock. Enqueue for later.
impl->waiting_queue_.push(op);
impl->mutex_.unlock();
}
else
{
// The handler is acquiring the strand lock and so is responsible for
// scheduling the strand.
impl->locked_ = true;
impl->mutex_.unlock();
impl->ready_queue_.push(op);
io_context_impl_.post_immediate_completion(impl, false);
}
}
void strand_service::do_post(implementation_type& impl,
operation* op, bool is_continuation)
{
impl->mutex_.lock();
if (impl->locked_)
{
// Some other handler already holds the strand lock. Enqueue for later.
impl->waiting_queue_.push(op);
impl->mutex_.unlock();
}
else
{
// The handler is acquiring the strand lock and so is responsible for
// scheduling the strand.
impl->locked_ = true;
impl->mutex_.unlock();
impl->ready_queue_.push(op);
io_context_impl_.post_immediate_completion(impl, is_continuation);
}
}
void strand_service::do_complete(void* owner, operation* base,
const asio::error_code& ec, std::size_t /*bytes_transferred*/)
{
if (owner)
{
strand_impl* impl = static_cast<strand_impl*>(base);
// Indicate that this strand is executing on the current thread.
call_stack<strand_impl>::context ctx(impl);
// Ensure the next handler, if any, is scheduled on block exit.
on_do_complete_exit on_exit;
on_exit.owner_ = static_cast<io_context_impl*>(owner);
on_exit.impl_ = impl;
// Run all ready handlers. No lock is required since the ready queue is
// accessed only within the strand.
while (operation* o = impl->ready_queue_.front())
{
impl->ready_queue_.pop();
o->complete(owner, ec, 0);
}
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/io_uring_file_service.ipp | //
// detail/impl/io_uring_file_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_IO_URING_FILE_SERVICE_IPP
#define ASIO_DETAIL_IMPL_IO_URING_FILE_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_FILE) \
&& defined(ASIO_HAS_IO_URING)
#include <cstring>
#include <sys/stat.h>
#include "asio/detail/io_uring_file_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
io_uring_file_service::io_uring_file_service(
execution_context& context)
: execution_context_service_base<io_uring_file_service>(context),
descriptor_service_(context)
{
}
void io_uring_file_service::shutdown()
{
descriptor_service_.shutdown();
}
asio::error_code io_uring_file_service::open(
io_uring_file_service::implementation_type& impl,
const char* path, file_base::flags open_flags,
asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
ASIO_ERROR_LOCATION(ec);
return ec;
}
descriptor_ops::state_type state = 0;
int fd = descriptor_ops::open(path, static_cast<int>(open_flags), 0777, ec);
if (fd < 0)
{
ASIO_ERROR_LOCATION(ec);
return ec;
}
// We're done. Take ownership of the serial port descriptor.
if (descriptor_service_.assign(impl, fd, ec))
{
asio::error_code ignored_ec;
descriptor_ops::close(fd, state, ignored_ec);
}
(void)::posix_fadvise(native_handle(impl), 0, 0,
impl.is_stream_ ? POSIX_FADV_SEQUENTIAL : POSIX_FADV_RANDOM);
ASIO_ERROR_LOCATION(ec);
return ec;
}
uint64_t io_uring_file_service::size(
const io_uring_file_service::implementation_type& impl,
asio::error_code& ec) const
{
struct stat s;
int result = ::fstat(native_handle(impl), &s);
descriptor_ops::get_last_error(ec, result != 0);
ASIO_ERROR_LOCATION(ec);
return !ec ? s.st_size : 0;
}
asio::error_code io_uring_file_service::resize(
io_uring_file_service::implementation_type& impl,
uint64_t n, asio::error_code& ec)
{
int result = ::ftruncate(native_handle(impl), n);
descriptor_ops::get_last_error(ec, result != 0);
ASIO_ERROR_LOCATION(ec);
return ec;
}
asio::error_code io_uring_file_service::sync_all(
io_uring_file_service::implementation_type& impl,
asio::error_code& ec)
{
int result = ::fsync(native_handle(impl));
descriptor_ops::get_last_error(ec, result != 0);
return ec;
}
asio::error_code io_uring_file_service::sync_data(
io_uring_file_service::implementation_type& impl,
asio::error_code& ec)
{
#if defined(_POSIX_SYNCHRONIZED_IO)
int result = ::fdatasync(native_handle(impl));
#else // defined(_POSIX_SYNCHRONIZED_IO)
int result = ::fsync(native_handle(impl));
#endif // defined(_POSIX_SYNCHRONIZED_IO)
descriptor_ops::get_last_error(ec, result != 0);
ASIO_ERROR_LOCATION(ec);
return ec;
}
uint64_t io_uring_file_service::seek(
io_uring_file_service::implementation_type& impl, int64_t offset,
file_base::seek_basis whence, asio::error_code& ec)
{
int64_t result = ::lseek(native_handle(impl), offset, whence);
descriptor_ops::get_last_error(ec, result < 0);
ASIO_ERROR_LOCATION(ec);
return !ec ? static_cast<uint64_t>(result) : 0;
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_FILE)
// && defined(ASIO_HAS_IO_URING)
#endif // ASIO_DETAIL_IMPL_IO_URING_FILE_SERVICE_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/descriptor_ops.ipp | //
// detail/impl/descriptor_ops.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP
#define ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <cerrno>
#include "asio/detail/descriptor_ops.hpp"
#include "asio/error.hpp"
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
namespace descriptor_ops {
int open(const char* path, int flags, asio::error_code& ec)
{
int result = ::open(path, flags);
get_last_error(ec, result < 0);
return result;
}
int open(const char* path, int flags,
unsigned mode, asio::error_code& ec)
{
int result = ::open(path, flags, mode);
get_last_error(ec, result < 0);
return result;
}
int close(int d, state_type& state, asio::error_code& ec)
{
int result = 0;
if (d != -1)
{
result = ::close(d);
get_last_error(ec, result < 0);
if (result != 0
&& (ec == asio::error::would_block
|| ec == asio::error::try_again))
{
// According to UNIX Network Programming Vol. 1, it is possible for
// close() to fail with EWOULDBLOCK under certain circumstances. What
// isn't clear is the state of the descriptor after this error. The one
// current OS where this behaviour is seen, Windows, says that the socket
// remains open. Therefore we'll put the descriptor back into blocking
// mode and have another attempt at closing it.
#if defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
int flags = ::fcntl(d, F_GETFL, 0);
if (flags >= 0)
::fcntl(d, F_SETFL, flags & ~O_NONBLOCK);
#else // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
ioctl_arg_type arg = 0;
if ((state & possible_dup) == 0)
{
result = ::ioctl(d, FIONBIO, &arg);
get_last_error(ec, result < 0);
}
if ((state & possible_dup) != 0
# if defined(ENOTTY)
|| ec.value() == ENOTTY
# endif // defined(ENOTTY)
# if defined(ENOTCAPABLE)
|| ec.value() == ENOTCAPABLE
# endif // defined(ENOTCAPABLE)
)
{
int flags = ::fcntl(d, F_GETFL, 0);
if (flags >= 0)
::fcntl(d, F_SETFL, flags & ~O_NONBLOCK);
}
#endif // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
state &= ~non_blocking;
result = ::close(d);
get_last_error(ec, result < 0);
}
}
return result;
}
bool set_user_non_blocking(int d, state_type& state,
bool value, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return false;
}
#if defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
int result = ::fcntl(d, F_GETFL, 0);
get_last_error(ec, result < 0);
if (result >= 0)
{
int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
result = (flag != result) ? ::fcntl(d, F_SETFL, flag) : 0;
get_last_error(ec, result < 0);
}
#else // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
ioctl_arg_type arg = (value ? 1 : 0);
int result = 0;
if ((state & possible_dup) == 0)
{
result = ::ioctl(d, FIONBIO, &arg);
get_last_error(ec, result < 0);
}
if ((state & possible_dup) != 0
# if defined(ENOTTY)
|| ec.value() == ENOTTY
# endif // defined(ENOTTY)
# if defined(ENOTCAPABLE)
|| ec.value() == ENOTCAPABLE
# endif // defined(ENOTCAPABLE)
)
{
result = ::fcntl(d, F_GETFL, 0);
get_last_error(ec, result < 0);
if (result >= 0)
{
int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
result = (flag != result) ? ::fcntl(d, F_SETFL, flag) : 0;
get_last_error(ec, result < 0);
}
}
#endif // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
if (result >= 0)
{
if (value)
state |= user_set_non_blocking;
else
{
// Clearing the user-set non-blocking mode always overrides any
// internally-set non-blocking flag. Any subsequent asynchronous
// operations will need to re-enable non-blocking I/O.
state &= ~(user_set_non_blocking | internal_non_blocking);
}
return true;
}
return false;
}
bool set_internal_non_blocking(int d, state_type& state,
bool value, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return false;
}
if (!value && (state & user_set_non_blocking))
{
// It does not make sense to clear the internal non-blocking flag if the
// user still wants non-blocking behaviour. Return an error and let the
// caller figure out whether to update the user-set non-blocking flag.
ec = asio::error::invalid_argument;
return false;
}
#if defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
int result = ::fcntl(d, F_GETFL, 0);
get_last_error(ec, result < 0);
if (result >= 0)
{
int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
result = (flag != result) ? ::fcntl(d, F_SETFL, flag) : 0;
get_last_error(ec, result < 0);
}
#else // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
ioctl_arg_type arg = (value ? 1 : 0);
int result = 0;
if ((state & possible_dup) == 0)
{
result = ::ioctl(d, FIONBIO, &arg);
get_last_error(ec, result < 0);
}
if ((state & possible_dup) != 0
# if defined(ENOTTY)
|| ec.value() == ENOTTY
# endif // defined(ENOTTY)
# if defined(ENOTCAPABLE)
|| ec.value() == ENOTCAPABLE
# endif // defined(ENOTCAPABLE)
)
{
result = ::fcntl(d, F_GETFL, 0);
get_last_error(ec, result < 0);
if (result >= 0)
{
int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
result = (flag != result) ? ::fcntl(d, F_SETFL, flag) : 0;
get_last_error(ec, result < 0);
}
}
#endif // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
if (result >= 0)
{
if (value)
state |= internal_non_blocking;
else
state &= ~internal_non_blocking;
return true;
}
return false;
}
std::size_t sync_read(int d, state_type state, buf* bufs,
std::size_t count, bool all_empty, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to read 0 bytes on a stream is a no-op.
if (all_empty)
{
asio::error::clear(ec);
return 0;
}
// Read some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::readv(d, bufs, static_cast<int>(count));
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Check for EOF.
if (bytes == 0)
{
ec = asio::error::eof;
return 0;
}
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_read(d, 0, ec) < 0)
return 0;
}
}
std::size_t sync_read1(int d, state_type state, void* data,
std::size_t size, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to read 0 bytes on a stream is a no-op.
if (size == 0)
{
asio::error::clear(ec);
return 0;
}
// Read some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::read(d, data, size);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Check for EOF.
if (bytes == 0)
{
ec = asio::error::eof;
return 0;
}
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_read(d, 0, ec) < 0)
return 0;
}
}
bool non_blocking_read(int d, buf* bufs, std::size_t count,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Read some data.
signed_size_type bytes = ::readv(d, bufs, static_cast<int>(count));
get_last_error(ec, bytes < 0);
// Check for end of stream.
if (bytes == 0)
{
ec = asio::error::eof;
return true;
}
// Check if operation succeeded.
if (bytes > 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
bool non_blocking_read1(int d, void* data, std::size_t size,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Read some data.
signed_size_type bytes = ::read(d, data, size);
get_last_error(ec, bytes < 0);
// Check for end of stream.
if (bytes == 0)
{
ec = asio::error::eof;
return true;
}
// Check if operation succeeded.
if (bytes > 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
std::size_t sync_write(int d, state_type state, const buf* bufs,
std::size_t count, bool all_empty, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to write 0 bytes on a stream is a no-op.
if (all_empty)
{
asio::error::clear(ec);
return 0;
}
// Write some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::writev(d, bufs, static_cast<int>(count));
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_write(d, 0, ec) < 0)
return 0;
}
}
std::size_t sync_write1(int d, state_type state, const void* data,
std::size_t size, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to write 0 bytes on a stream is a no-op.
if (size == 0)
{
asio::error::clear(ec);
return 0;
}
// Write some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::write(d, data, size);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_write(d, 0, ec) < 0)
return 0;
}
}
bool non_blocking_write(int d, const buf* bufs, std::size_t count,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Write some data.
signed_size_type bytes = ::writev(d, bufs, static_cast<int>(count));
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
bool non_blocking_write1(int d, const void* data, std::size_t size,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Write some data.
signed_size_type bytes = ::write(d, data, size);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
#if defined(ASIO_HAS_FILE)
std::size_t sync_read_at(int d, state_type state, uint64_t offset,
buf* bufs, std::size_t count, bool all_empty, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to read 0 bytes on a stream is a no-op.
if (all_empty)
{
asio::error::clear(ec);
return 0;
}
// Read some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::preadv(d, bufs, static_cast<int>(count), offset);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Check for EOF.
if (bytes == 0)
{
ec = asio::error::eof;
return 0;
}
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_read(d, 0, ec) < 0)
return 0;
}
}
std::size_t sync_read_at1(int d, state_type state, uint64_t offset,
void* data, std::size_t size, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to read 0 bytes on a stream is a no-op.
if (size == 0)
{
asio::error::clear(ec);
return 0;
}
// Read some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::pread(d, data, size, offset);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Check for EOF.
if (bytes == 0)
{
ec = asio::error::eof;
return 0;
}
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_read(d, 0, ec) < 0)
return 0;
}
}
bool non_blocking_read_at(int d, uint64_t offset, buf* bufs, std::size_t count,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Read some data.
signed_size_type bytes = ::preadv(d, bufs, static_cast<int>(count), offset);
get_last_error(ec, bytes < 0);
// Check for EOF.
if (bytes == 0)
{
ec = asio::error::eof;
return true;
}
// Check if operation succeeded.
if (bytes > 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
bool non_blocking_read_at1(int d, uint64_t offset, void* data, std::size_t size,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Read some data.
signed_size_type bytes = ::pread(d, data, size, offset);
get_last_error(ec, bytes < 0);
// Check for EOF.
if (bytes == 0)
{
ec = asio::error::eof;
return true;
}
// Check if operation succeeded.
if (bytes > 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
std::size_t sync_write_at(int d, state_type state, uint64_t offset,
const buf* bufs, std::size_t count, bool all_empty,
asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to write 0 bytes on a stream is a no-op.
if (all_empty)
{
asio::error::clear(ec);
return 0;
}
// Write some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::pwritev(d,
bufs, static_cast<int>(count), offset);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_write(d, 0, ec) < 0)
return 0;
}
}
std::size_t sync_write_at1(int d, state_type state, uint64_t offset,
const void* data, std::size_t size, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to write 0 bytes on a stream is a no-op.
if (size == 0)
{
asio::error::clear(ec);
return 0;
}
// Write some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::pwrite(d, data, size, offset);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_write(d, 0, ec) < 0)
return 0;
}
}
bool non_blocking_write_at(int d, uint64_t offset,
const buf* bufs, std::size_t count,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Write some data.
signed_size_type bytes = ::pwritev(d,
bufs, static_cast<int>(count), offset);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
bool non_blocking_write_at1(int d, uint64_t offset,
const void* data, std::size_t size,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Write some data.
signed_size_type bytes = ::pwrite(d, data, size, offset);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
#endif // defined(ASIO_HAS_FILE)
int ioctl(int d, state_type& state, long cmd,
ioctl_arg_type* arg, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return -1;
}
int result = ::ioctl(d, cmd, arg);
get_last_error(ec, result < 0);
if (result >= 0)
{
// When updating the non-blocking mode we always perform the ioctl syscall,
// even if the flags would otherwise indicate that the descriptor is
// already in the correct state. This ensures that the underlying
// descriptor is put into the state that has been requested by the user. If
// the ioctl syscall was successful then we need to update the flags to
// match.
if (cmd == static_cast<long>(FIONBIO))
{
if (*arg)
{
state |= user_set_non_blocking;
}
else
{
// Clearing the non-blocking mode always overrides any internally-set
// non-blocking flag. Any subsequent asynchronous operations will need
// to re-enable non-blocking I/O.
state &= ~(user_set_non_blocking | internal_non_blocking);
}
}
}
return result;
}
int fcntl(int d, int cmd, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return -1;
}
int result = ::fcntl(d, cmd);
get_last_error(ec, result < 0);
return result;
}
int fcntl(int d, int cmd, long arg, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return -1;
}
int result = ::fcntl(d, cmd, arg);
get_last_error(ec, result < 0);
return result;
}
int poll_read(int d, state_type state, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return -1;
}
pollfd fds;
fds.fd = d;
fds.events = POLLIN;
fds.revents = 0;
int timeout = (state & user_set_non_blocking) ? 0 : -1;
int result = ::poll(&fds, 1, timeout);
get_last_error(ec, result < 0);
if (result == 0)
if (state & user_set_non_blocking)
ec = asio::error::would_block;
return result;
}
int poll_write(int d, state_type state, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return -1;
}
pollfd fds;
fds.fd = d;
fds.events = POLLOUT;
fds.revents = 0;
int timeout = (state & user_set_non_blocking) ? 0 : -1;
int result = ::poll(&fds, 1, timeout);
get_last_error(ec, result < 0);
if (result == 0)
if (state & user_set_non_blocking)
ec = asio::error::would_block;
return result;
}
int poll_error(int d, state_type state, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return -1;
}
pollfd fds;
fds.fd = d;
fds.events = POLLPRI | POLLERR | POLLHUP;
fds.revents = 0;
int timeout = (state & user_set_non_blocking) ? 0 : -1;
int result = ::poll(&fds, 1, timeout);
get_last_error(ec, result < 0);
if (result == 0)
if (state & user_set_non_blocking)
ec = asio::error::would_block;
return result;
}
} // namespace descriptor_ops
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
#endif // ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/win_iocp_socket_service_base.ipp | //
// detail/impl/win_iocp_socket_service_base.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP
#define ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP)
#include "asio/detail/win_iocp_socket_service_base.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_iocp_socket_service_base::win_iocp_socket_service_base(
execution_context& context)
: context_(context),
iocp_service_(use_service<win_iocp_io_context>(context)),
reactor_(0),
connect_ex_(0),
nt_set_info_(0),
mutex_(),
impl_list_(0)
{
}
void win_iocp_socket_service_base::base_shutdown()
{
// Close all implementations, causing all operations to complete.
asio::detail::mutex::scoped_lock lock(mutex_);
base_implementation_type* impl = impl_list_;
while (impl)
{
close_for_destruction(*impl);
impl = impl->next_;
}
}
void win_iocp_socket_service_base::construct(
win_iocp_socket_service_base::base_implementation_type& impl)
{
impl.socket_ = invalid_socket;
impl.state_ = 0;
impl.cancel_token_.reset();
#if defined(ASIO_ENABLE_CANCELIO)
impl.safe_cancellation_thread_id_ = 0;
#endif // defined(ASIO_ENABLE_CANCELIO)
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
void win_iocp_socket_service_base::base_move_construct(
win_iocp_socket_service_base::base_implementation_type& impl,
win_iocp_socket_service_base::base_implementation_type& other_impl)
noexcept
{
impl.socket_ = other_impl.socket_;
other_impl.socket_ = invalid_socket;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
impl.cancel_token_ = other_impl.cancel_token_;
other_impl.cancel_token_.reset();
#if defined(ASIO_ENABLE_CANCELIO)
impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
other_impl.safe_cancellation_thread_id_ = 0;
#endif // defined(ASIO_ENABLE_CANCELIO)
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
void win_iocp_socket_service_base::base_move_assign(
win_iocp_socket_service_base::base_implementation_type& impl,
win_iocp_socket_service_base& other_service,
win_iocp_socket_service_base::base_implementation_type& other_impl)
{
close_for_destruction(impl);
if (this != &other_service)
{
// Remove implementation from linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
impl.socket_ = other_impl.socket_;
other_impl.socket_ = invalid_socket;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
impl.cancel_token_ = other_impl.cancel_token_;
other_impl.cancel_token_.reset();
#if defined(ASIO_ENABLE_CANCELIO)
impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
other_impl.safe_cancellation_thread_id_ = 0;
#endif // defined(ASIO_ENABLE_CANCELIO)
if (this != &other_service)
{
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(other_service.mutex_);
impl.next_ = other_service.impl_list_;
impl.prev_ = 0;
if (other_service.impl_list_)
other_service.impl_list_->prev_ = &impl;
other_service.impl_list_ = &impl;
}
}
void win_iocp_socket_service_base::destroy(
win_iocp_socket_service_base::base_implementation_type& impl)
{
close_for_destruction(impl);
// Remove implementation from linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
asio::error_code win_iocp_socket_service_base::close(
win_iocp_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((iocp_service_.context(),
"socket", &impl, impl.socket_, "close"));
// Check if the reactor was created, in which case we need to close the
// socket on the reactor as well to cancel any operations that might be
// running there.
select_reactor* r = static_cast<select_reactor*>(
interlocked_compare_exchange_pointer(
reinterpret_cast<void**>(&reactor_), 0, 0));
if (r)
r->deregister_descriptor(impl.socket_, impl.reactor_data_, true);
socket_ops::close(impl.socket_, impl.state_, false, ec);
if (r)
r->cleanup_descriptor_data(impl.reactor_data_);
}
else
{
ec = asio::error_code();
}
impl.socket_ = invalid_socket;
impl.state_ = 0;
impl.cancel_token_.reset();
#if defined(ASIO_ENABLE_CANCELIO)
impl.safe_cancellation_thread_id_ = 0;
#endif // defined(ASIO_ENABLE_CANCELIO)
return ec;
}
socket_type win_iocp_socket_service_base::release(
win_iocp_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
return invalid_socket;
cancel(impl, ec);
if (ec)
return invalid_socket;
nt_set_info_fn fn = get_nt_set_info();
if (fn == 0)
{
ec = asio::error::operation_not_supported;
return invalid_socket;
}
HANDLE sock_as_handle = reinterpret_cast<HANDLE>(impl.socket_);
ULONG_PTR iosb[2] = { 0, 0 };
void* info[2] = { 0, 0 };
if (fn(sock_as_handle, iosb, &info, sizeof(info),
61 /* FileReplaceCompletionInformation */))
{
ec = asio::error::operation_not_supported;
return invalid_socket;
}
socket_type tmp = impl.socket_;
impl.socket_ = invalid_socket;
return tmp;
}
asio::error_code win_iocp_socket_service_base::cancel(
win_iocp_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return ec;
}
ASIO_HANDLER_OPERATION((iocp_service_.context(),
"socket", &impl, impl.socket_, "cancel"));
if (FARPROC cancel_io_ex_ptr = ::GetProcAddress(
::GetModuleHandleA("KERNEL32"), "CancelIoEx"))
{
// The version of Windows supports cancellation from any thread.
typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED);
cancel_io_ex_t cancel_io_ex = reinterpret_cast<cancel_io_ex_t>(
reinterpret_cast<void*>(cancel_io_ex_ptr));
socket_type sock = impl.socket_;
HANDLE sock_as_handle = reinterpret_cast<HANDLE>(sock);
if (!cancel_io_ex(sock_as_handle, 0))
{
DWORD last_error = ::GetLastError();
if (last_error == ERROR_NOT_FOUND)
{
// ERROR_NOT_FOUND means that there were no operations to be
// cancelled. We swallow this error to match the behaviour on other
// platforms.
ec = asio::error_code();
}
else
{
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
}
else
{
ec = asio::error_code();
}
}
#if defined(ASIO_ENABLE_CANCELIO)
else if (impl.safe_cancellation_thread_id_ == 0)
{
// No operations have been started, so there's nothing to cancel.
ec = asio::error_code();
}
else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId())
{
// Asynchronous operations have been started from the current thread only,
// so it is safe to try to cancel them using CancelIo.
socket_type sock = impl.socket_;
HANDLE sock_as_handle = reinterpret_cast<HANDLE>(sock);
if (!::CancelIo(sock_as_handle))
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
else
{
ec = asio::error_code();
}
}
else
{
// Asynchronous operations have been started from more than one thread,
// so cancellation is not safe.
ec = asio::error::operation_not_supported;
}
#else // defined(ASIO_ENABLE_CANCELIO)
else
{
// Cancellation is not supported as CancelIo may not be used.
ec = asio::error::operation_not_supported;
}
#endif // defined(ASIO_ENABLE_CANCELIO)
// Cancel any operations started via the reactor.
if (!ec)
{
select_reactor* r = static_cast<select_reactor*>(
interlocked_compare_exchange_pointer(
reinterpret_cast<void**>(&reactor_), 0, 0));
if (r)
r->cancel_ops(impl.socket_, impl.reactor_data_);
}
return ec;
}
asio::error_code win_iocp_socket_service_base::do_open(
win_iocp_socket_service_base::base_implementation_type& impl,
int family, int type, int protocol, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
socket_holder sock(socket_ops::socket(family, type, protocol, ec));
if (sock.get() == invalid_socket)
return ec;
HANDLE sock_as_handle = reinterpret_cast<HANDLE>(sock.get());
if (iocp_service_.register_handle(sock_as_handle, ec))
return ec;
impl.socket_ = sock.release();
switch (type)
{
case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
default: impl.state_ = 0; break;
}
impl.cancel_token_.reset(static_cast<void*>(0), socket_ops::noop_deleter());
ec = asio::error_code();
return ec;
}
asio::error_code win_iocp_socket_service_base::do_assign(
win_iocp_socket_service_base::base_implementation_type& impl,
int type, socket_type native_socket, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
HANDLE sock_as_handle = reinterpret_cast<HANDLE>(native_socket);
if (iocp_service_.register_handle(sock_as_handle, ec))
return ec;
impl.socket_ = native_socket;
switch (type)
{
case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
default: impl.state_ = 0; break;
}
impl.cancel_token_.reset(static_cast<void*>(0), socket_ops::noop_deleter());
ec = asio::error_code();
return ec;
}
void win_iocp_socket_service_base::start_send_op(
win_iocp_socket_service_base::base_implementation_type& impl,
WSABUF* buffers, std::size_t buffer_count,
socket_base::message_flags flags, bool noop, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (noop)
iocp_service_.on_completion(op);
else if (!is_open(impl))
iocp_service_.on_completion(op, asio::error::bad_descriptor);
else
{
DWORD bytes_transferred = 0;
int result = ::WSASend(impl.socket_, buffers,
static_cast<DWORD>(buffer_count), &bytes_transferred, flags, op, 0);
DWORD last_error = ::WSAGetLastError();
if (last_error == ERROR_PORT_UNREACHABLE)
last_error = WSAECONNREFUSED;
if (result != 0 && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error, bytes_transferred);
else
iocp_service_.on_pending(op);
}
}
void win_iocp_socket_service_base::start_send_to_op(
win_iocp_socket_service_base::base_implementation_type& impl,
WSABUF* buffers, std::size_t buffer_count, const void* addr,
int addrlen, socket_base::message_flags flags, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (!is_open(impl))
iocp_service_.on_completion(op, asio::error::bad_descriptor);
else
{
DWORD bytes_transferred = 0;
int result = ::WSASendTo(impl.socket_, buffers,
static_cast<DWORD>(buffer_count), &bytes_transferred, flags,
static_cast<const socket_addr_type*>(addr), addrlen, op, 0);
DWORD last_error = ::WSAGetLastError();
if (last_error == ERROR_PORT_UNREACHABLE)
last_error = WSAECONNREFUSED;
if (result != 0 && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error, bytes_transferred);
else
iocp_service_.on_pending(op);
}
}
void win_iocp_socket_service_base::start_receive_op(
win_iocp_socket_service_base::base_implementation_type& impl,
WSABUF* buffers, std::size_t buffer_count,
socket_base::message_flags flags, bool noop, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (noop)
iocp_service_.on_completion(op);
else if (!is_open(impl))
iocp_service_.on_completion(op, asio::error::bad_descriptor);
else
{
DWORD bytes_transferred = 0;
DWORD recv_flags = flags;
int result = ::WSARecv(impl.socket_, buffers,
static_cast<DWORD>(buffer_count),
&bytes_transferred, &recv_flags, op, 0);
DWORD last_error = ::WSAGetLastError();
if (last_error == ERROR_NETNAME_DELETED)
last_error = WSAECONNRESET;
else if (last_error == ERROR_PORT_UNREACHABLE)
last_error = WSAECONNREFUSED;
if (result != 0 && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error, bytes_transferred);
else
iocp_service_.on_pending(op);
}
}
int win_iocp_socket_service_base::start_null_buffers_receive_op(
win_iocp_socket_service_base::base_implementation_type& impl,
socket_base::message_flags flags, reactor_op* op, operation* iocp_op)
{
if ((impl.state_ & socket_ops::stream_oriented) != 0)
{
// For stream sockets on Windows, we may issue a 0-byte overlapped
// WSARecv to wait until there is data available on the socket.
::WSABUF buf = { 0, 0 };
start_receive_op(impl, &buf, 1, flags, false, iocp_op);
return -1;
}
else
{
int op_type = (flags & socket_base::message_out_of_band)
? select_reactor::except_op : select_reactor::read_op;
start_reactor_op(impl, op_type, op);
return op_type;
}
}
void win_iocp_socket_service_base::start_receive_from_op(
win_iocp_socket_service_base::base_implementation_type& impl,
WSABUF* buffers, std::size_t buffer_count, void* addr,
socket_base::message_flags flags, int* addrlen, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (!is_open(impl))
iocp_service_.on_completion(op, asio::error::bad_descriptor);
else
{
DWORD bytes_transferred = 0;
DWORD recv_flags = flags;
int result = ::WSARecvFrom(impl.socket_, buffers,
static_cast<DWORD>(buffer_count), &bytes_transferred, &recv_flags,
static_cast<socket_addr_type*>(addr), addrlen, op, 0);
DWORD last_error = ::WSAGetLastError();
if (last_error == ERROR_PORT_UNREACHABLE)
last_error = WSAECONNREFUSED;
if (result != 0 && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error, bytes_transferred);
else
iocp_service_.on_pending(op);
}
}
void win_iocp_socket_service_base::start_accept_op(
win_iocp_socket_service_base::base_implementation_type& impl,
bool peer_is_open, socket_holder& new_socket, int family, int type,
int protocol, void* output_buffer, DWORD address_length, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (!is_open(impl))
iocp_service_.on_completion(op, asio::error::bad_descriptor);
else if (peer_is_open)
iocp_service_.on_completion(op, asio::error::already_open);
else
{
asio::error_code ec;
new_socket.reset(socket_ops::socket(family, type, protocol, ec));
if (new_socket.get() == invalid_socket)
iocp_service_.on_completion(op, ec);
else
{
DWORD bytes_read = 0;
BOOL result = ::AcceptEx(impl.socket_, new_socket.get(), output_buffer,
0, address_length, address_length, &bytes_read, op);
DWORD last_error = ::WSAGetLastError();
if (!result && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error);
else
iocp_service_.on_pending(op);
}
}
}
void win_iocp_socket_service_base::restart_accept_op(
socket_type s, socket_holder& new_socket, int family, int type,
int protocol, void* output_buffer, DWORD address_length,
long* cancel_requested, operation* op)
{
new_socket.reset();
iocp_service_.work_started();
// Check if we were cancelled after the first AcceptEx completed.
if (cancel_requested)
if (::InterlockedExchangeAdd(cancel_requested, 0) == 1)
iocp_service_.on_completion(op, asio::error::operation_aborted);
asio::error_code ec;
new_socket.reset(socket_ops::socket(family, type, protocol, ec));
if (new_socket.get() == invalid_socket)
iocp_service_.on_completion(op, ec);
else
{
DWORD bytes_read = 0;
BOOL result = ::AcceptEx(s, new_socket.get(), output_buffer,
0, address_length, address_length, &bytes_read, op);
DWORD last_error = ::WSAGetLastError();
if (!result && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error);
else
{
#if defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0600)
if (cancel_requested)
{
if (::InterlockedExchangeAdd(cancel_requested, 0) == 1)
{
HANDLE sock_as_handle = reinterpret_cast<HANDLE>(s);
::CancelIoEx(sock_as_handle, op);
}
}
#endif // defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0600)
iocp_service_.on_pending(op);
}
}
}
void win_iocp_socket_service_base::start_reactor_op(
win_iocp_socket_service_base::base_implementation_type& impl,
int op_type, reactor_op* op)
{
select_reactor& r = get_reactor();
update_cancellation_thread_id(impl);
if (is_open(impl))
{
r.start_op(op_type, impl.socket_, impl.reactor_data_, op, false, false);
return;
}
else
op->ec_ = asio::error::bad_descriptor;
iocp_service_.post_immediate_completion(op, false);
}
int win_iocp_socket_service_base::start_connect_op(
win_iocp_socket_service_base::base_implementation_type& impl,
int family, int type, const void* addr, std::size_t addrlen,
win_iocp_socket_connect_op_base* op, operation* iocp_op)
{
// If ConnectEx is available, use that.
if (family == ASIO_OS_DEF(AF_INET)
|| family == ASIO_OS_DEF(AF_INET6))
{
if (connect_ex_fn connect_ex = get_connect_ex(impl, type))
{
union address_union
{
socket_addr_type base;
sockaddr_in4_type v4;
sockaddr_in6_type v6;
} a;
using namespace std; // For memset.
memset(&a, 0, sizeof(a));
a.base.sa_family = family;
socket_ops::bind(impl.socket_, &a.base,
family == ASIO_OS_DEF(AF_INET)
? sizeof(a.v4) : sizeof(a.v6), op->ec_);
if (op->ec_ && op->ec_ != asio::error::invalid_argument)
{
iocp_service_.post_immediate_completion(op, false);
return -1;
}
op->connect_ex_ = true;
update_cancellation_thread_id(impl);
iocp_service_.work_started();
BOOL result = connect_ex(impl.socket_,
static_cast<const socket_addr_type*>(addr),
static_cast<int>(addrlen), 0, 0, 0, iocp_op);
DWORD last_error = ::WSAGetLastError();
if (!result && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(iocp_op, last_error);
else
iocp_service_.on_pending(iocp_op);
return -1;
}
}
// Otherwise, fall back to a reactor-based implementation.
select_reactor& r = get_reactor();
update_cancellation_thread_id(impl);
if ((impl.state_ & socket_ops::non_blocking) != 0
|| socket_ops::set_internal_non_blocking(
impl.socket_, impl.state_, true, op->ec_))
{
if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0)
{
if (op->ec_ == asio::error::in_progress
|| op->ec_ == asio::error::would_block)
{
op->ec_ = asio::error_code();
r.start_op(select_reactor::connect_op, impl.socket_,
impl.reactor_data_, op, false, false);
return select_reactor::connect_op;
}
}
}
r.post_immediate_completion(op, false);
return -1;
}
void win_iocp_socket_service_base::close_for_destruction(
win_iocp_socket_service_base::base_implementation_type& impl)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((iocp_service_.context(),
"socket", &impl, impl.socket_, "close"));
// Check if the reactor was created, in which case we need to close the
// socket on the reactor as well to cancel any operations that might be
// running there.
select_reactor* r = static_cast<select_reactor*>(
interlocked_compare_exchange_pointer(
reinterpret_cast<void**>(&reactor_), 0, 0));
if (r)
r->deregister_descriptor(impl.socket_, impl.reactor_data_, true);
asio::error_code ignored_ec;
socket_ops::close(impl.socket_, impl.state_, true, ignored_ec);
if (r)
r->cleanup_descriptor_data(impl.reactor_data_);
}
impl.socket_ = invalid_socket;
impl.state_ = 0;
impl.cancel_token_.reset();
#if defined(ASIO_ENABLE_CANCELIO)
impl.safe_cancellation_thread_id_ = 0;
#endif // defined(ASIO_ENABLE_CANCELIO)
}
void win_iocp_socket_service_base::update_cancellation_thread_id(
win_iocp_socket_service_base::base_implementation_type& impl)
{
#if defined(ASIO_ENABLE_CANCELIO)
if (impl.safe_cancellation_thread_id_ == 0)
impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId();
else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId())
impl.safe_cancellation_thread_id_ = ~DWORD(0);
#else // defined(ASIO_ENABLE_CANCELIO)
(void)impl;
#endif // defined(ASIO_ENABLE_CANCELIO)
}
select_reactor& win_iocp_socket_service_base::get_reactor()
{
select_reactor* r = static_cast<select_reactor*>(
interlocked_compare_exchange_pointer(
reinterpret_cast<void**>(&reactor_), 0, 0));
if (!r)
{
r = &(use_service<select_reactor>(context_));
interlocked_exchange_pointer(reinterpret_cast<void**>(&reactor_), r);
}
return *r;
}
win_iocp_socket_service_base::connect_ex_fn
win_iocp_socket_service_base::get_connect_ex(
win_iocp_socket_service_base::base_implementation_type& impl, int type)
{
#if defined(ASIO_DISABLE_CONNECTEX)
(void)impl;
(void)type;
return 0;
#else // defined(ASIO_DISABLE_CONNECTEX)
if (type != ASIO_OS_DEF(SOCK_STREAM)
&& type != ASIO_OS_DEF(SOCK_SEQPACKET))
return 0;
void* ptr = interlocked_compare_exchange_pointer(&connect_ex_, 0, 0);
if (!ptr)
{
GUID guid = { 0x25a207b9, 0xddf3, 0x4660,
{ 0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e } };
DWORD bytes = 0;
if (::WSAIoctl(impl.socket_, SIO_GET_EXTENSION_FUNCTION_POINTER,
&guid, sizeof(guid), &ptr, sizeof(ptr), &bytes, 0, 0) != 0)
{
// Set connect_ex_ to a special value to indicate that ConnectEx is
// unavailable. That way we won't bother trying to look it up again.
ptr = this;
}
interlocked_exchange_pointer(&connect_ex_, ptr);
}
return reinterpret_cast<connect_ex_fn>(ptr == this ? 0 : ptr);
#endif // defined(ASIO_DISABLE_CONNECTEX)
}
win_iocp_socket_service_base::nt_set_info_fn
win_iocp_socket_service_base::get_nt_set_info()
{
void* ptr = interlocked_compare_exchange_pointer(&nt_set_info_, 0, 0);
if (!ptr)
{
if (HMODULE h = ::GetModuleHandleA("NTDLL.DLL"))
ptr = reinterpret_cast<void*>(GetProcAddress(h, "NtSetInformationFile"));
// On failure, set nt_set_info_ to a special value to indicate that the
// NtSetInformationFile function is unavailable. That way we won't bother
// trying to look it up again.
interlocked_exchange_pointer(&nt_set_info_, ptr ? ptr : this);
}
return reinterpret_cast<nt_set_info_fn>(ptr == this ? 0 : ptr);
}
void* win_iocp_socket_service_base::interlocked_compare_exchange_pointer(
void** dest, void* exch, void* cmp)
{
#if defined(_M_IX86)
return reinterpret_cast<void*>(InterlockedCompareExchange(
reinterpret_cast<PLONG>(dest), reinterpret_cast<LONG>(exch),
reinterpret_cast<LONG>(cmp)));
#else
return InterlockedCompareExchangePointer(dest, exch, cmp);
#endif
}
void* win_iocp_socket_service_base::interlocked_exchange_pointer(
void** dest, void* val)
{
#if defined(_M_IX86)
return reinterpret_cast<void*>(InterlockedExchange(
reinterpret_cast<PLONG>(dest), reinterpret_cast<LONG>(val)));
#else
return InterlockedExchangePointer(dest, val);
#endif
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
#endif // ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/resolver_service_base.ipp | //
// detail/impl/resolver_service_base.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP
#define ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/resolver_service_base.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class resolver_service_base::work_scheduler_runner
{
public:
work_scheduler_runner(scheduler_impl& work_scheduler)
: work_scheduler_(work_scheduler)
{
}
void operator()()
{
asio::error_code ec;
work_scheduler_.run(ec);
}
private:
scheduler_impl& work_scheduler_;
};
resolver_service_base::resolver_service_base(execution_context& context)
: scheduler_(asio::use_service<scheduler_impl>(context)),
work_scheduler_(new scheduler_impl(context, -1, false)),
work_thread_(0)
{
work_scheduler_->work_started();
}
resolver_service_base::~resolver_service_base()
{
base_shutdown();
}
void resolver_service_base::base_shutdown()
{
if (work_scheduler_.get())
{
work_scheduler_->work_finished();
work_scheduler_->stop();
if (work_thread_.get())
{
work_thread_->join();
work_thread_.reset();
}
work_scheduler_.reset();
}
}
void resolver_service_base::base_notify_fork(
execution_context::fork_event fork_ev)
{
if (work_thread_.get())
{
if (fork_ev == execution_context::fork_prepare)
{
work_scheduler_->stop();
work_thread_->join();
work_thread_.reset();
}
}
else if (fork_ev != execution_context::fork_prepare)
{
work_scheduler_->restart();
}
}
void resolver_service_base::construct(
resolver_service_base::implementation_type& impl)
{
impl.reset(static_cast<void*>(0), socket_ops::noop_deleter());
}
void resolver_service_base::destroy(
resolver_service_base::implementation_type& impl)
{
ASIO_HANDLER_OPERATION((scheduler_.context(),
"resolver", &impl, 0, "cancel"));
impl.reset();
}
void resolver_service_base::move_construct(implementation_type& impl,
implementation_type& other_impl)
{
impl = static_cast<implementation_type&&>(other_impl);
}
void resolver_service_base::move_assign(implementation_type& impl,
resolver_service_base&, implementation_type& other_impl)
{
destroy(impl);
impl = static_cast<implementation_type&&>(other_impl);
}
void resolver_service_base::cancel(
resolver_service_base::implementation_type& impl)
{
ASIO_HANDLER_OPERATION((scheduler_.context(),
"resolver", &impl, 0, "cancel"));
impl.reset(static_cast<void*>(0), socket_ops::noop_deleter());
}
void resolver_service_base::start_resolve_op(resolve_op* op)
{
if (ASIO_CONCURRENCY_HINT_IS_LOCKING(SCHEDULER,
scheduler_.concurrency_hint()))
{
start_work_thread();
scheduler_.work_started();
work_scheduler_->post_immediate_completion(op, false);
}
else
{
op->ec_ = asio::error::operation_not_supported;
scheduler_.post_immediate_completion(op, false);
}
}
void resolver_service_base::start_work_thread()
{
asio::detail::mutex::scoped_lock lock(mutex_);
if (!work_thread_.get())
{
work_thread_.reset(new asio::detail::thread(
work_scheduler_runner(*work_scheduler_)));
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/io_uring_service.hpp | //
// detail/impl/io_uring_service.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_IO_URING_SERVICE_HPP
#define ASIO_DETAIL_IMPL_IO_URING_SERVICE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#if defined(ASIO_HAS_IO_URING)
#include "asio/detail/scheduler.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
inline void io_uring_service::post_immediate_completion(
operation* op, bool is_continuation)
{
scheduler_.post_immediate_completion(op, is_continuation);
}
template <typename Time_Traits>
void io_uring_service::add_timer_queue(timer_queue<Time_Traits>& queue)
{
do_add_timer_queue(queue);
}
template <typename Time_Traits>
void io_uring_service::remove_timer_queue(timer_queue<Time_Traits>& queue)
{
do_remove_timer_queue(queue);
}
template <typename Time_Traits>
void io_uring_service::schedule_timer(timer_queue<Time_Traits>& queue,
const typename Time_Traits::time_type& time,
typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
{
mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
scheduler_.work_started();
if (earliest)
{
update_timeout();
post_submit_sqes_op(lock);
}
}
template <typename Time_Traits>
std::size_t io_uring_service::cancel_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& timer,
std::size_t max_cancelled)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
scheduler_.post_deferred_completions(ops);
return n;
}
template <typename Time_Traits>
void io_uring_service::cancel_timer_by_key(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data* timer,
void* cancellation_key)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer_by_key(timer, ops, cancellation_key);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
template <typename Time_Traits>
void io_uring_service::move_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& target,
typename timer_queue<Time_Traits>::per_timer_data& source)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer(target, ops);
queue.move_timer(target, source);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IO_URING)
#endif // ASIO_DETAIL_IMPL_IO_URING_SERVICE_HPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/posix_thread.ipp | //
// detail/impl/posix_thread.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_POSIX_THREAD_IPP
#define ASIO_DETAIL_IMPL_POSIX_THREAD_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_PTHREADS)
#include "asio/detail/posix_thread.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
posix_thread::~posix_thread()
{
if (!joined_)
::pthread_detach(thread_);
}
void posix_thread::join()
{
if (!joined_)
{
::pthread_join(thread_, 0);
joined_ = true;
}
}
std::size_t posix_thread::hardware_concurrency()
{
#if defined(_SC_NPROCESSORS_ONLN)
long result = sysconf(_SC_NPROCESSORS_ONLN);
if (result > 0)
return result;
#endif // defined(_SC_NPROCESSORS_ONLN)
return 0;
}
void posix_thread::start_thread(func_base* arg)
{
int error = ::pthread_create(&thread_, 0,
asio_detail_posix_thread_function, arg);
if (error != 0)
{
delete arg;
asio::error_code ec(error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "thread");
}
}
void* asio_detail_posix_thread_function(void* arg)
{
posix_thread::auto_func_base_ptr func = {
static_cast<posix_thread::func_base*>(arg) };
func.ptr->run();
return 0;
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_PTHREADS)
#endif // ASIO_DETAIL_IMPL_POSIX_THREAD_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/throw_error.ipp | //
// detail/impl/throw_error.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_THROW_ERROR_IPP
#define ASIO_DETAIL_IMPL_THROW_ERROR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/system_error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
void do_throw_error(
const asio::error_code& err
ASIO_SOURCE_LOCATION_PARAM)
{
asio::system_error e(err);
asio::detail::throw_exception(e ASIO_SOURCE_LOCATION_ARG);
}
void do_throw_error(
const asio::error_code& err,
const char* location
ASIO_SOURCE_LOCATION_PARAM)
{
asio::system_error e(err, location);
asio::detail::throw_exception(e ASIO_SOURCE_LOCATION_ARG);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_THROW_ERROR_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/dev_poll_reactor.hpp | //
// detail/impl/dev_poll_reactor.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP
#define ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_DEV_POLL)
#include "asio/detail/scheduler.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
inline void dev_poll_reactor::post_immediate_completion(
operation* op, bool is_continuation) const
{
scheduler_.post_immediate_completion(op, is_continuation);
}
template <typename Time_Traits>
void dev_poll_reactor::add_timer_queue(timer_queue<Time_Traits>& queue)
{
do_add_timer_queue(queue);
}
template <typename Time_Traits>
void dev_poll_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue)
{
do_remove_timer_queue(queue);
}
template <typename Time_Traits>
void dev_poll_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
const typename Time_Traits::time_type& time,
typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
{
asio::detail::mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
scheduler_.work_started();
if (earliest)
interrupter_.interrupt();
}
template <typename Time_Traits>
std::size_t dev_poll_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& timer,
std::size_t max_cancelled)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
scheduler_.post_deferred_completions(ops);
return n;
}
template <typename Time_Traits>
void dev_poll_reactor::cancel_timer_by_key(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data* timer,
void* cancellation_key)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer_by_key(timer, ops, cancellation_key);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
template <typename Time_Traits>
void dev_poll_reactor::move_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& target,
typename timer_queue<Time_Traits>::per_timer_data& source)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer(target, ops);
queue.move_timer(target, source);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_DEV_POLL)
#endif // ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/winrt_timer_scheduler.ipp | //
// detail/impl/winrt_timer_scheduler.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP
#define ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS_RUNTIME)
#include "asio/detail/bind_handler.hpp"
#include "asio/detail/winrt_timer_scheduler.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
winrt_timer_scheduler::winrt_timer_scheduler(execution_context& context)
: execution_context_service_base<winrt_timer_scheduler>(context),
scheduler_(use_service<scheduler_impl>(context)),
mutex_(),
event_(),
timer_queues_(),
thread_(0),
stop_thread_(false),
shutdown_(false)
{
thread_ = new asio::detail::thread(
bind_handler(&winrt_timer_scheduler::call_run_thread, this));
}
winrt_timer_scheduler::~winrt_timer_scheduler()
{
shutdown();
}
void winrt_timer_scheduler::shutdown()
{
asio::detail::mutex::scoped_lock lock(mutex_);
shutdown_ = true;
stop_thread_ = true;
event_.signal(lock);
lock.unlock();
if (thread_)
{
thread_->join();
delete thread_;
thread_ = 0;
}
op_queue<operation> ops;
timer_queues_.get_all_timers(ops);
scheduler_.abandon_operations(ops);
}
void winrt_timer_scheduler::notify_fork(execution_context::fork_event)
{
}
void winrt_timer_scheduler::init_task()
{
}
void winrt_timer_scheduler::run_thread()
{
asio::detail::mutex::scoped_lock lock(mutex_);
while (!stop_thread_)
{
const long max_wait_duration = 5 * 60 * 1000000;
long wait_duration = timer_queues_.wait_duration_usec(max_wait_duration);
event_.wait_for_usec(lock, wait_duration);
event_.clear(lock);
op_queue<operation> ops;
timer_queues_.get_ready_timers(ops);
if (!ops.empty())
{
lock.unlock();
scheduler_.post_deferred_completions(ops);
lock.lock();
}
}
}
void winrt_timer_scheduler::call_run_thread(winrt_timer_scheduler* scheduler)
{
scheduler->run_thread();
}
void winrt_timer_scheduler::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.insert(&queue);
}
void winrt_timer_scheduler::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.erase(&queue);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS_RUNTIME)
#endif // ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/timer_queue_ptime.ipp | //
// detail/impl/timer_queue_ptime.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP
#define ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_BOOST_DATE_TIME)
#include "asio/detail/timer_queue_ptime.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
timer_queue<time_traits<boost::posix_time::ptime>>::timer_queue()
{
}
timer_queue<time_traits<boost::posix_time::ptime>>::~timer_queue()
{
}
bool timer_queue<time_traits<boost::posix_time::ptime>>::enqueue_timer(
const time_type& time, per_timer_data& timer, wait_op* op)
{
return impl_.enqueue_timer(time, timer, op);
}
bool timer_queue<time_traits<boost::posix_time::ptime>>::empty() const
{
return impl_.empty();
}
long timer_queue<time_traits<boost::posix_time::ptime>>::wait_duration_msec(
long max_duration) const
{
return impl_.wait_duration_msec(max_duration);
}
long timer_queue<time_traits<boost::posix_time::ptime>>::wait_duration_usec(
long max_duration) const
{
return impl_.wait_duration_usec(max_duration);
}
void timer_queue<time_traits<boost::posix_time::ptime>>::get_ready_timers(
op_queue<operation>& ops)
{
impl_.get_ready_timers(ops);
}
void timer_queue<time_traits<boost::posix_time::ptime>>::get_all_timers(
op_queue<operation>& ops)
{
impl_.get_all_timers(ops);
}
std::size_t timer_queue<time_traits<boost::posix_time::ptime>>::cancel_timer(
per_timer_data& timer, op_queue<operation>& ops, std::size_t max_cancelled)
{
return impl_.cancel_timer(timer, ops, max_cancelled);
}
void timer_queue<time_traits<boost::posix_time::ptime>>::cancel_timer_by_key(
per_timer_data* timer, op_queue<operation>& ops, void* cancellation_key)
{
impl_.cancel_timer_by_key(timer, ops, cancellation_key);
}
void timer_queue<time_traits<boost::posix_time::ptime>>::move_timer(
per_timer_data& target, per_timer_data& source)
{
impl_.move_timer(target, source);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_BOOST_DATE_TIME)
#endif // ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/pipe_select_interrupter.ipp | //
// detail/impl/pipe_select_interrupter.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP
#define ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_WINDOWS_RUNTIME)
#if !defined(ASIO_WINDOWS)
#if !defined(__CYGWIN__)
#if !defined(__SYMBIAN32__)
#if !defined(ASIO_HAS_EVENTFD)
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include "asio/detail/pipe_select_interrupter.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
pipe_select_interrupter::pipe_select_interrupter()
{
open_descriptors();
}
void pipe_select_interrupter::open_descriptors()
{
int pipe_fds[2];
if (pipe(pipe_fds) == 0)
{
read_descriptor_ = pipe_fds[0];
::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
write_descriptor_ = pipe_fds[1];
::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK);
#if defined(FD_CLOEXEC)
::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC);
#endif // defined(FD_CLOEXEC)
}
else
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "pipe_select_interrupter");
}
}
pipe_select_interrupter::~pipe_select_interrupter()
{
close_descriptors();
}
void pipe_select_interrupter::close_descriptors()
{
if (read_descriptor_ != -1)
::close(read_descriptor_);
if (write_descriptor_ != -1)
::close(write_descriptor_);
}
void pipe_select_interrupter::recreate()
{
close_descriptors();
write_descriptor_ = -1;
read_descriptor_ = -1;
open_descriptors();
}
void pipe_select_interrupter::interrupt()
{
char byte = 0;
signed_size_type result = ::write(write_descriptor_, &byte, 1);
(void)result;
}
bool pipe_select_interrupter::reset()
{
for (;;)
{
char data[1024];
signed_size_type bytes_read = ::read(read_descriptor_, data, sizeof(data));
if (bytes_read == sizeof(data))
continue;
if (bytes_read > 0)
return true;
if (bytes_read == 0)
return false;
if (errno == EINTR)
continue;
if (errno == EWOULDBLOCK || errno == EAGAIN)
return true;
return false;
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_HAS_EVENTFD)
#endif // !defined(__SYMBIAN32__)
#endif // !defined(__CYGWIN__)
#endif // !defined(ASIO_WINDOWS)
#endif // !defined(ASIO_WINDOWS_RUNTIME)
#endif // ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/thread_context.ipp | //
// detail/impl/thread_context.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_THREAD_CONTEXT_IPP
#define ASIO_DETAIL_IMPL_THREAD_CONTEXT_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
thread_info_base* thread_context::top_of_thread_call_stack()
{
return thread_call_stack::top();
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_THREAD_CONTEXT_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/win_thread.ipp | //
// detail/impl/win_thread.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_THREAD_IPP
#define ASIO_DETAIL_IMPL_WIN_THREAD_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_APP) \
&& !defined(UNDER_CE)
#include <process.h>
#include "asio/detail/throw_error.hpp"
#include "asio/detail/win_thread.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_thread::~win_thread()
{
::CloseHandle(thread_);
// The exit_event_ handle is deliberately allowed to leak here since it
// is an error for the owner of an internal thread not to join() it.
}
void win_thread::join()
{
HANDLE handles[2] = { exit_event_, thread_ };
::WaitForMultipleObjects(2, handles, FALSE, INFINITE);
::CloseHandle(exit_event_);
if (terminate_threads())
{
::TerminateThread(thread_, 0);
}
else
{
::QueueUserAPC(apc_function, thread_, 0);
::WaitForSingleObject(thread_, INFINITE);
}
}
std::size_t win_thread::hardware_concurrency()
{
SYSTEM_INFO system_info;
::GetSystemInfo(&system_info);
return system_info.dwNumberOfProcessors;
}
void win_thread::start_thread(func_base* arg, unsigned int stack_size)
{
::HANDLE entry_event = 0;
arg->entry_event_ = entry_event = ::CreateEventW(0, true, false, 0);
if (!entry_event)
{
DWORD last_error = ::GetLastError();
delete arg;
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "thread.entry_event");
}
arg->exit_event_ = exit_event_ = ::CreateEventW(0, true, false, 0);
if (!exit_event_)
{
DWORD last_error = ::GetLastError();
delete arg;
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "thread.exit_event");
}
unsigned int thread_id = 0;
thread_ = reinterpret_cast<HANDLE>(::_beginthreadex(0,
stack_size, win_thread_function, arg, 0, &thread_id));
if (!thread_)
{
DWORD last_error = ::GetLastError();
delete arg;
if (entry_event)
::CloseHandle(entry_event);
if (exit_event_)
::CloseHandle(exit_event_);
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "thread");
}
if (entry_event)
{
::WaitForSingleObject(entry_event, INFINITE);
::CloseHandle(entry_event);
}
}
unsigned int __stdcall win_thread_function(void* arg)
{
win_thread::auto_func_base_ptr func = {
static_cast<win_thread::func_base*>(arg) };
::SetEvent(func.ptr->entry_event_);
func.ptr->run();
// Signal that the thread has finished its work, but rather than returning go
// to sleep to put the thread into a well known state. If the thread is being
// joined during global object destruction then it may be killed using
// TerminateThread (to avoid a deadlock in DllMain). Otherwise, the SleepEx
// call will be interrupted using QueueUserAPC and the thread will shut down
// cleanly.
HANDLE exit_event = func.ptr->exit_event_;
delete func.ptr;
func.ptr = 0;
::SetEvent(exit_event);
::SleepEx(INFINITE, TRUE);
return 0;
}
#if defined(WINVER) && (WINVER < 0x0500)
void __stdcall apc_function(ULONG) {}
#else
void __stdcall apc_function(ULONG_PTR) {}
#endif
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_APP)
// && !defined(UNDER_CE)
#endif // ASIO_DETAIL_IMPL_WIN_THREAD_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/strand_service.hpp | //
// detail/impl/strand_service.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP
#define ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/completion_handler.hpp"
#include "asio/detail/fenced_block.hpp"
#include "asio/detail/handler_alloc_helpers.hpp"
#include "asio/detail/memory.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
inline strand_service::strand_impl::strand_impl()
: operation(&strand_service::do_complete),
locked_(false)
{
}
template <typename Handler>
void strand_service::dispatch(strand_service::implementation_type& impl,
Handler& handler)
{
// If we are already in the strand then the handler can run immediately.
if (running_in_this_thread(impl))
{
fenced_block b(fenced_block::full);
static_cast<Handler&&>(handler)();
return;
}
// Allocate and construct an operation to wrap the handler.
typedef completion_handler<Handler, io_context::executor_type> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(handler, io_context_.get_executor());
ASIO_HANDLER_CREATION((this->context(),
*p.p, "strand", impl, 0, "dispatch"));
operation* o = p.p;
p.v = p.p = 0;
do_dispatch(impl, o);
}
// Request the io_context to invoke the given handler and return immediately.
template <typename Handler>
void strand_service::post(strand_service::implementation_type& impl,
Handler& handler)
{
bool is_continuation =
asio_handler_cont_helpers::is_continuation(handler);
// Allocate and construct an operation to wrap the handler.
typedef completion_handler<Handler, io_context::executor_type> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(handler, io_context_.get_executor());
ASIO_HANDLER_CREATION((this->context(),
*p.p, "strand", impl, 0, "post"));
do_post(impl, p.p, is_continuation);
p.v = p.p = 0;
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/win_event.ipp | //
// detail/win_event.ipp
// ~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_EVENT_IPP
#define ASIO_DETAIL_IMPL_WIN_EVENT_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS)
#include "asio/detail/throw_error.hpp"
#include "asio/detail/win_event.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_event::win_event()
: state_(0)
{
#if defined(ASIO_WINDOWS_APP)
events_[0] = ::CreateEventExW(0, 0,
CREATE_EVENT_MANUAL_RESET, EVENT_ALL_ACCESS);
#else // defined(ASIO_WINDOWS_APP)
events_[0] = ::CreateEventW(0, true, false, 0);
#endif // defined(ASIO_WINDOWS_APP)
if (!events_[0])
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "event");
}
#if defined(ASIO_WINDOWS_APP)
events_[1] = ::CreateEventExW(0, 0, 0, EVENT_ALL_ACCESS);
#else // defined(ASIO_WINDOWS_APP)
events_[1] = ::CreateEventW(0, false, false, 0);
#endif // defined(ASIO_WINDOWS_APP)
if (!events_[1])
{
DWORD last_error = ::GetLastError();
::CloseHandle(events_[0]);
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "event");
}
}
win_event::~win_event()
{
::CloseHandle(events_[0]);
::CloseHandle(events_[1]);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS)
#endif // ASIO_DETAIL_IMPL_WIN_EVENT_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/win_iocp_handle_service.ipp | //
// detail/impl/win_iocp_handle_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2008 Rep Invariant Systems, Inc. ([email protected])
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP
#define ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP)
#include "asio/detail/win_iocp_handle_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class win_iocp_handle_service::overlapped_wrapper
: public OVERLAPPED
{
public:
explicit overlapped_wrapper(asio::error_code& ec)
{
Internal = 0;
InternalHigh = 0;
Offset = 0;
OffsetHigh = 0;
// Create a non-signalled manual-reset event, for GetOverlappedResult.
hEvent = ::CreateEventW(0, TRUE, FALSE, 0);
if (hEvent)
{
// As documented in GetQueuedCompletionStatus, setting the low order
// bit of this event prevents our synchronous writes from being treated
// as completion port events.
DWORD_PTR tmp = reinterpret_cast<DWORD_PTR>(hEvent);
hEvent = reinterpret_cast<HANDLE>(tmp | 1);
}
else
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
}
~overlapped_wrapper()
{
if (hEvent)
{
::CloseHandle(hEvent);
}
}
};
win_iocp_handle_service::win_iocp_handle_service(execution_context& context)
: execution_context_service_base<win_iocp_handle_service>(context),
iocp_service_(asio::use_service<win_iocp_io_context>(context)),
nt_set_info_(0),
mutex_(),
impl_list_(0)
{
}
void win_iocp_handle_service::shutdown()
{
// Close all implementations, causing all operations to complete.
asio::detail::mutex::scoped_lock lock(mutex_);
implementation_type* impl = impl_list_;
while (impl)
{
close_for_destruction(*impl);
impl = impl->next_;
}
}
void win_iocp_handle_service::construct(
win_iocp_handle_service::implementation_type& impl)
{
impl.handle_ = INVALID_HANDLE_VALUE;
impl.safe_cancellation_thread_id_ = 0;
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
void win_iocp_handle_service::move_construct(
win_iocp_handle_service::implementation_type& impl,
win_iocp_handle_service::implementation_type& other_impl)
{
impl.handle_ = other_impl.handle_;
other_impl.handle_ = INVALID_HANDLE_VALUE;
impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
other_impl.safe_cancellation_thread_id_ = 0;
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
void win_iocp_handle_service::move_assign(
win_iocp_handle_service::implementation_type& impl,
win_iocp_handle_service& other_service,
win_iocp_handle_service::implementation_type& other_impl)
{
close_for_destruction(impl);
if (this != &other_service)
{
// Remove implementation from linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
impl.handle_ = other_impl.handle_;
other_impl.handle_ = INVALID_HANDLE_VALUE;
impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
other_impl.safe_cancellation_thread_id_ = 0;
if (this != &other_service)
{
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(other_service.mutex_);
impl.next_ = other_service.impl_list_;
impl.prev_ = 0;
if (other_service.impl_list_)
other_service.impl_list_->prev_ = &impl;
other_service.impl_list_ = &impl;
}
}
void win_iocp_handle_service::destroy(
win_iocp_handle_service::implementation_type& impl)
{
close_for_destruction(impl);
// Remove implementation from linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
asio::error_code win_iocp_handle_service::assign(
win_iocp_handle_service::implementation_type& impl,
const native_handle_type& handle, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
ASIO_ERROR_LOCATION(ec);
return ec;
}
if (iocp_service_.register_handle(handle, ec))
{
ASIO_ERROR_LOCATION(ec);
return ec;
}
impl.handle_ = handle;
ec = asio::error_code();
return ec;
}
asio::error_code win_iocp_handle_service::close(
win_iocp_handle_service::implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((iocp_service_.context(), "handle",
&impl, reinterpret_cast<uintmax_t>(impl.handle_), "close"));
if (!::CloseHandle(impl.handle_))
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
else
{
ec = asio::error_code();
}
impl.handle_ = INVALID_HANDLE_VALUE;
impl.safe_cancellation_thread_id_ = 0;
}
else
{
ec = asio::error_code();
}
ASIO_ERROR_LOCATION(ec);
return ec;
}
win_iocp_handle_service::native_handle_type win_iocp_handle_service::release(
win_iocp_handle_service::implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
return INVALID_HANDLE_VALUE;
cancel(impl, ec);
if (ec)
{
ASIO_ERROR_LOCATION(ec);
return INVALID_HANDLE_VALUE;
}
nt_set_info_fn fn = get_nt_set_info();
if (fn == 0)
{
ec = asio::error::operation_not_supported;
ASIO_ERROR_LOCATION(ec);
return INVALID_HANDLE_VALUE;
}
ULONG_PTR iosb[2] = { 0, 0 };
void* info[2] = { 0, 0 };
if (fn(impl.handle_, iosb, &info, sizeof(info),
61 /* FileReplaceCompletionInformation */))
{
ec = asio::error::operation_not_supported;
ASIO_ERROR_LOCATION(ec);
return INVALID_HANDLE_VALUE;
}
native_handle_type tmp = impl.handle_;
impl.handle_ = INVALID_HANDLE_VALUE;
return tmp;
}
asio::error_code win_iocp_handle_service::cancel(
win_iocp_handle_service::implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
ASIO_ERROR_LOCATION(ec);
return ec;
}
ASIO_HANDLER_OPERATION((iocp_service_.context(), "handle",
&impl, reinterpret_cast<uintmax_t>(impl.handle_), "cancel"));
if (FARPROC cancel_io_ex_ptr = ::GetProcAddress(
::GetModuleHandleA("KERNEL32"), "CancelIoEx"))
{
// The version of Windows supports cancellation from any thread.
typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED);
cancel_io_ex_t cancel_io_ex = reinterpret_cast<cancel_io_ex_t>(
reinterpret_cast<void*>(cancel_io_ex_ptr));
if (!cancel_io_ex(impl.handle_, 0))
{
DWORD last_error = ::GetLastError();
if (last_error == ERROR_NOT_FOUND)
{
// ERROR_NOT_FOUND means that there were no operations to be
// cancelled. We swallow this error to match the behaviour on other
// platforms.
ec = asio::error_code();
}
else
{
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
}
else
{
ec = asio::error_code();
}
}
else if (impl.safe_cancellation_thread_id_ == 0)
{
// No operations have been started, so there's nothing to cancel.
ec = asio::error_code();
}
else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId())
{
// Asynchronous operations have been started from the current thread only,
// so it is safe to try to cancel them using CancelIo.
if (!::CancelIo(impl.handle_))
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
else
{
ec = asio::error_code();
}
}
else
{
// Asynchronous operations have been started from more than one thread,
// so cancellation is not safe.
ec = asio::error::operation_not_supported;
}
ASIO_ERROR_LOCATION(ec);
return ec;
}
size_t win_iocp_handle_service::do_write(
win_iocp_handle_service::implementation_type& impl, uint64_t offset,
const asio::const_buffer& buffer, asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
ASIO_ERROR_LOCATION(ec);
return 0;
}
// A request to write 0 bytes on a handle is a no-op.
if (buffer.size() == 0)
{
ec = asio::error_code();
return 0;
}
overlapped_wrapper overlapped(ec);
if (ec)
{
ASIO_ERROR_LOCATION(ec);
return 0;
}
// Write the data.
overlapped.Offset = offset & 0xFFFFFFFF;
overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
BOOL ok = ::WriteFile(impl.handle_, buffer.data(),
static_cast<DWORD>(buffer.size()), 0, &overlapped);
if (!ok)
{
DWORD last_error = ::GetLastError();
if (last_error != ERROR_IO_PENDING)
{
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return 0;
}
}
// Wait for the operation to complete.
DWORD bytes_transferred = 0;
ok = ::GetOverlappedResult(impl.handle_,
&overlapped, &bytes_transferred, TRUE);
if (!ok)
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return 0;
}
ec = asio::error_code();
return bytes_transferred;
}
void win_iocp_handle_service::start_write_op(
win_iocp_handle_service::implementation_type& impl, uint64_t offset,
const asio::const_buffer& buffer, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (!is_open(impl))
{
iocp_service_.on_completion(op, asio::error::bad_descriptor);
}
else if (buffer.size() == 0)
{
// A request to write 0 bytes on a handle is a no-op.
iocp_service_.on_completion(op);
}
else
{
DWORD bytes_transferred = 0;
op->Offset = offset & 0xFFFFFFFF;
op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
BOOL ok = ::WriteFile(impl.handle_, buffer.data(),
static_cast<DWORD>(buffer.size()),
&bytes_transferred, op);
DWORD last_error = ::GetLastError();
if (!ok && last_error != ERROR_IO_PENDING
&& last_error != ERROR_MORE_DATA)
{
iocp_service_.on_completion(op, last_error, bytes_transferred);
}
else
{
iocp_service_.on_pending(op);
}
}
}
size_t win_iocp_handle_service::do_read(
win_iocp_handle_service::implementation_type& impl, uint64_t offset,
const asio::mutable_buffer& buffer, asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
ASIO_ERROR_LOCATION(ec);
return 0;
}
// A request to read 0 bytes on a stream handle is a no-op.
if (buffer.size() == 0)
{
ec = asio::error_code();
return 0;
}
overlapped_wrapper overlapped(ec);
if (ec)
{
ASIO_ERROR_LOCATION(ec);
return 0;
}
// Read some data.
overlapped.Offset = offset & 0xFFFFFFFF;
overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
BOOL ok = ::ReadFile(impl.handle_, buffer.data(),
static_cast<DWORD>(buffer.size()), 0, &overlapped);
if (!ok)
{
DWORD last_error = ::GetLastError();
if (last_error != ERROR_IO_PENDING && last_error != ERROR_MORE_DATA)
{
if (last_error == ERROR_HANDLE_EOF)
{
ec = asio::error::eof;
}
else
{
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
ASIO_ERROR_LOCATION(ec);
return 0;
}
}
// Wait for the operation to complete.
DWORD bytes_transferred = 0;
ok = ::GetOverlappedResult(impl.handle_,
&overlapped, &bytes_transferred, TRUE);
if (!ok)
{
DWORD last_error = ::GetLastError();
if (last_error == ERROR_HANDLE_EOF)
{
ec = asio::error::eof;
}
else
{
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
ASIO_ERROR_LOCATION(ec);
return (last_error == ERROR_MORE_DATA) ? bytes_transferred : 0;
}
ec = asio::error_code();
return bytes_transferred;
}
void win_iocp_handle_service::start_read_op(
win_iocp_handle_service::implementation_type& impl, uint64_t offset,
const asio::mutable_buffer& buffer, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (!is_open(impl))
{
iocp_service_.on_completion(op, asio::error::bad_descriptor);
}
else if (buffer.size() == 0)
{
// A request to read 0 bytes on a handle is a no-op.
iocp_service_.on_completion(op);
}
else
{
DWORD bytes_transferred = 0;
op->Offset = offset & 0xFFFFFFFF;
op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
BOOL ok = ::ReadFile(impl.handle_, buffer.data(),
static_cast<DWORD>(buffer.size()),
&bytes_transferred, op);
DWORD last_error = ::GetLastError();
if (!ok && last_error != ERROR_IO_PENDING
&& last_error != ERROR_MORE_DATA)
{
iocp_service_.on_completion(op, last_error, bytes_transferred);
}
else
{
iocp_service_.on_pending(op);
}
}
}
void win_iocp_handle_service::update_cancellation_thread_id(
win_iocp_handle_service::implementation_type& impl)
{
if (impl.safe_cancellation_thread_id_ == 0)
impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId();
else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId())
impl.safe_cancellation_thread_id_ = ~DWORD(0);
}
void win_iocp_handle_service::close_for_destruction(implementation_type& impl)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((iocp_service_.context(), "handle",
&impl, reinterpret_cast<uintmax_t>(impl.handle_), "close"));
::CloseHandle(impl.handle_);
impl.handle_ = INVALID_HANDLE_VALUE;
impl.safe_cancellation_thread_id_ = 0;
}
}
win_iocp_handle_service::nt_set_info_fn
win_iocp_handle_service::get_nt_set_info()
{
void* ptr = interlocked_compare_exchange_pointer(&nt_set_info_, 0, 0);
if (!ptr)
{
if (HMODULE h = ::GetModuleHandleA("NTDLL.DLL"))
ptr = reinterpret_cast<void*>(GetProcAddress(h, "NtSetInformationFile"));
// On failure, set nt_set_info_ to a special value to indicate that the
// NtSetInformationFile function is unavailable. That way we won't bother
// trying to look it up again.
interlocked_exchange_pointer(&nt_set_info_, ptr ? ptr : this);
}
return reinterpret_cast<nt_set_info_fn>(ptr == this ? 0 : ptr);
}
void* win_iocp_handle_service::interlocked_compare_exchange_pointer(
void** dest, void* exch, void* cmp)
{
#if defined(_M_IX86)
return reinterpret_cast<void*>(InterlockedCompareExchange(
reinterpret_cast<PLONG>(dest), reinterpret_cast<LONG>(exch),
reinterpret_cast<LONG>(cmp)));
#else
return InterlockedCompareExchangePointer(dest, exch, cmp);
#endif
}
void* win_iocp_handle_service::interlocked_exchange_pointer(
void** dest, void* val)
{
#if defined(_M_IX86)
return reinterpret_cast<void*>(InterlockedExchange(
reinterpret_cast<PLONG>(dest), reinterpret_cast<LONG>(val)));
#else
return InterlockedExchangePointer(dest, val);
#endif
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
#endif // ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/posix_serial_port_service.ipp | //
// detail/impl/posix_serial_port_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2008 Rep Invariant Systems, Inc. ([email protected])
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_POSIX_SERIAL_PORT_SERVICE_IPP
#define ASIO_DETAIL_IMPL_POSIX_SERIAL_PORT_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_SERIAL_PORT)
#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
#include <cstring>
#include "asio/detail/posix_serial_port_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
posix_serial_port_service::posix_serial_port_service(
execution_context& context)
: execution_context_service_base<posix_serial_port_service>(context),
descriptor_service_(context)
{
}
void posix_serial_port_service::shutdown()
{
descriptor_service_.shutdown();
}
asio::error_code posix_serial_port_service::open(
posix_serial_port_service::implementation_type& impl,
const std::string& device, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
ASIO_ERROR_LOCATION(ec);
return ec;
}
descriptor_ops::state_type state = 0;
int fd = descriptor_ops::open(device.c_str(),
O_RDWR | O_NONBLOCK | O_NOCTTY, ec);
if (fd < 0)
{
ASIO_ERROR_LOCATION(ec);
return ec;
}
int s = descriptor_ops::fcntl(fd, F_GETFL, ec);
if (s >= 0)
s = descriptor_ops::fcntl(fd, F_SETFL, s | O_NONBLOCK, ec);
if (s < 0)
{
asio::error_code ignored_ec;
descriptor_ops::close(fd, state, ignored_ec);
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Set up default serial port options.
termios ios;
s = ::tcgetattr(fd, &ios);
descriptor_ops::get_last_error(ec, s < 0);
if (s >= 0)
{
#if defined(_BSD_SOURCE) || defined(_DEFAULT_SOURCE)
::cfmakeraw(&ios);
#else
ios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK
| ISTRIP | INLCR | IGNCR | ICRNL | IXON);
ios.c_oflag &= ~OPOST;
ios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
ios.c_cflag &= ~(CSIZE | PARENB);
ios.c_cflag |= CS8;
#endif
ios.c_iflag |= IGNPAR;
ios.c_cflag |= CREAD | CLOCAL;
s = ::tcsetattr(fd, TCSANOW, &ios);
descriptor_ops::get_last_error(ec, s < 0);
}
if (s < 0)
{
asio::error_code ignored_ec;
descriptor_ops::close(fd, state, ignored_ec);
ASIO_ERROR_LOCATION(ec);
return ec;
}
// We're done. Take ownership of the serial port descriptor.
if (descriptor_service_.assign(impl, fd, ec))
{
asio::error_code ignored_ec;
descriptor_ops::close(fd, state, ignored_ec);
}
ASIO_ERROR_LOCATION(ec);
return ec;
}
asio::error_code posix_serial_port_service::do_set_option(
posix_serial_port_service::implementation_type& impl,
posix_serial_port_service::store_function_type store,
const void* option, asio::error_code& ec)
{
termios ios;
int s = ::tcgetattr(descriptor_service_.native_handle(impl), &ios);
descriptor_ops::get_last_error(ec, s < 0);
if (s < 0)
{
ASIO_ERROR_LOCATION(ec);
return ec;
}
if (store(option, ios, ec))
{
ASIO_ERROR_LOCATION(ec);
return ec;
}
s = ::tcsetattr(descriptor_service_.native_handle(impl), TCSANOW, &ios);
descriptor_ops::get_last_error(ec, s < 0);
ASIO_ERROR_LOCATION(ec);
return ec;
}
asio::error_code posix_serial_port_service::do_get_option(
const posix_serial_port_service::implementation_type& impl,
posix_serial_port_service::load_function_type load,
void* option, asio::error_code& ec) const
{
termios ios;
int s = ::tcgetattr(descriptor_service_.native_handle(impl), &ios);
descriptor_ops::get_last_error(ec, s < 0);
if (s < 0)
{
ASIO_ERROR_LOCATION(ec);
return ec;
}
load(option, ios, ec);
ASIO_ERROR_LOCATION(ec);
return ec;
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
#endif // defined(ASIO_HAS_SERIAL_PORT)
#endif // ASIO_DETAIL_IMPL_POSIX_SERIAL_PORT_SERVICE_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/win_tss_ptr.ipp | //
// detail/impl/win_tss_ptr.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP
#define ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS)
#include "asio/detail/throw_error.hpp"
#include "asio/detail/win_tss_ptr.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
DWORD win_tss_ptr_create()
{
#if defined(UNDER_CE)
const DWORD out_of_indexes = 0xFFFFFFFF;
#else
const DWORD out_of_indexes = TLS_OUT_OF_INDEXES;
#endif
DWORD tss_key = ::TlsAlloc();
if (tss_key == out_of_indexes)
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "tss");
}
return tss_key;
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS)
#endif // ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/signal_set_service.ipp | //
// detail/impl/signal_set_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP
#define ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <cstring>
#include <stdexcept>
#include "asio/detail/signal_blocker.hpp"
#include "asio/detail/signal_set_service.hpp"
#include "asio/detail/static_mutex.hpp"
#include "asio/detail/throw_exception.hpp"
#if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
# include "asio/detail/io_uring_service.hpp"
#else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
# include "asio/detail/reactor.hpp"
#endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
struct signal_state
{
// Mutex used for protecting global state.
static_mutex mutex_;
// The read end of the pipe used for signal notifications.
int read_descriptor_;
// The write end of the pipe used for signal notifications.
int write_descriptor_;
// Whether the signal state has been prepared for a fork.
bool fork_prepared_;
// The head of a linked list of all signal_set_service instances.
class signal_set_service* service_list_;
// A count of the number of objects that are registered for each signal.
std::size_t registration_count_[max_signal_number];
// The flags used for each registered signal.
signal_set_base::flags_t flags_[max_signal_number];
};
signal_state* get_signal_state()
{
static signal_state state = {
ASIO_STATIC_MUTEX_INIT, -1, -1, false, 0,
{ 0 }, { signal_set_base::flags_t() } };
return &state;
}
void asio_signal_handler(int signal_number)
{
#if defined(ASIO_WINDOWS) \
|| defined(ASIO_WINDOWS_RUNTIME) \
|| defined(__CYGWIN__)
signal_set_service::deliver_signal(signal_number);
#else // defined(ASIO_WINDOWS)
// || defined(ASIO_WINDOWS_RUNTIME)
// || defined(__CYGWIN__)
int saved_errno = errno;
signal_state* state = get_signal_state();
signed_size_type result = ::write(state->write_descriptor_,
&signal_number, sizeof(signal_number));
(void)result;
errno = saved_errno;
#endif // defined(ASIO_WINDOWS)
// || defined(ASIO_WINDOWS_RUNTIME)
// || defined(__CYGWIN__)
#if defined(ASIO_HAS_SIGNAL) && !defined(ASIO_HAS_SIGACTION)
::signal(signal_number, asio_signal_handler);
#endif // defined(ASIO_HAS_SIGNAL) && !defined(ASIO_HAS_SIGACTION)
}
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
class signal_set_service::pipe_read_op :
# if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
public io_uring_operation
# else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
public reactor_op
# endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
{
public:
# if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
pipe_read_op()
: io_uring_operation(asio::error_code(), &pipe_read_op::do_prepare,
&pipe_read_op::do_perform, pipe_read_op::do_complete)
{
}
static void do_prepare(io_uring_operation*, ::io_uring_sqe* sqe)
{
signal_state* state = get_signal_state();
int fd = state->read_descriptor_;
::io_uring_prep_poll_add(sqe, fd, POLLIN);
}
static bool do_perform(io_uring_operation*, bool)
{
signal_state* state = get_signal_state();
int fd = state->read_descriptor_;
int signal_number = 0;
while (::read(fd, &signal_number, sizeof(int)) == sizeof(int))
if (signal_number >= 0 && signal_number < max_signal_number)
signal_set_service::deliver_signal(signal_number);
return false;
}
# else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
pipe_read_op()
: reactor_op(asio::error_code(),
&pipe_read_op::do_perform, pipe_read_op::do_complete)
{
}
static status do_perform(reactor_op*)
{
signal_state* state = get_signal_state();
int fd = state->read_descriptor_;
int signal_number = 0;
while (::read(fd, &signal_number, sizeof(int)) == sizeof(int))
if (signal_number >= 0 && signal_number < max_signal_number)
signal_set_service::deliver_signal(signal_number);
return not_done;
}
# endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
static void do_complete(void* /*owner*/, operation* base,
const asio::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
pipe_read_op* o(static_cast<pipe_read_op*>(base));
delete o;
}
};
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
signal_set_service::signal_set_service(execution_context& context)
: execution_context_service_base<signal_set_service>(context),
scheduler_(asio::use_service<scheduler_impl>(context)),
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
# if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
io_uring_service_(asio::use_service<io_uring_service>(context)),
# else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
reactor_(asio::use_service<reactor>(context)),
# endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
next_(0),
prev_(0)
{
get_signal_state()->mutex_.init();
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
# if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
io_uring_service_.init_task();
# else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
reactor_.init_task();
# endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
for (int i = 0; i < max_signal_number; ++i)
registrations_[i] = 0;
add_service(this);
}
signal_set_service::~signal_set_service()
{
remove_service(this);
}
void signal_set_service::shutdown()
{
remove_service(this);
op_queue<operation> ops;
for (int i = 0; i < max_signal_number; ++i)
{
registration* reg = registrations_[i];
while (reg)
{
ops.push(*reg->queue_);
reg = reg->next_in_table_;
}
}
scheduler_.abandon_operations(ops);
}
void signal_set_service::notify_fork(execution_context::fork_event fork_ev)
{
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
switch (fork_ev)
{
case execution_context::fork_prepare:
{
int read_descriptor = state->read_descriptor_;
state->fork_prepared_ = true;
lock.unlock();
# if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
(void)read_descriptor;
io_uring_service_.deregister_io_object(io_object_data_);
io_uring_service_.cleanup_io_object(io_object_data_);
# else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
reactor_.deregister_internal_descriptor(read_descriptor, reactor_data_);
reactor_.cleanup_descriptor_data(reactor_data_);
# endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
}
break;
case execution_context::fork_parent:
if (state->fork_prepared_)
{
int read_descriptor = state->read_descriptor_;
state->fork_prepared_ = false;
lock.unlock();
# if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
(void)read_descriptor;
io_uring_service_.register_internal_io_object(io_object_data_,
io_uring_service::read_op, new pipe_read_op);
# else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
reactor_.register_internal_descriptor(reactor::read_op,
read_descriptor, reactor_data_, new pipe_read_op);
# endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
}
break;
case execution_context::fork_child:
if (state->fork_prepared_)
{
asio::detail::signal_blocker blocker;
close_descriptors();
open_descriptors();
int read_descriptor = state->read_descriptor_;
state->fork_prepared_ = false;
lock.unlock();
# if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
(void)read_descriptor;
io_uring_service_.register_internal_io_object(io_object_data_,
io_uring_service::read_op, new pipe_read_op);
# else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
reactor_.register_internal_descriptor(reactor::read_op,
read_descriptor, reactor_data_, new pipe_read_op);
# endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
}
break;
default:
break;
}
#else // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
(void)fork_ev;
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
}
void signal_set_service::construct(
signal_set_service::implementation_type& impl)
{
impl.signals_ = 0;
}
void signal_set_service::destroy(
signal_set_service::implementation_type& impl)
{
asio::error_code ignored_ec;
clear(impl, ignored_ec);
cancel(impl, ignored_ec);
}
asio::error_code signal_set_service::add(
signal_set_service::implementation_type& impl, int signal_number,
signal_set_base::flags_t f, asio::error_code& ec)
{
// Check that the signal number is valid.
if (signal_number < 0 || signal_number >= max_signal_number)
{
ec = asio::error::invalid_argument;
return ec;
}
// Check that the specified flags are supported.
#if !defined(ASIO_HAS_SIGACTION)
if (f != signal_set_base::flags::dont_care)
{
ec = asio::error::operation_not_supported;
return ec;
}
#endif // !defined(ASIO_HAS_SIGACTION)
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
// Find the appropriate place to insert the registration.
registration** insertion_point = &impl.signals_;
registration* next = impl.signals_;
while (next && next->signal_number_ < signal_number)
{
insertion_point = &next->next_in_set_;
next = next->next_in_set_;
}
// Only do something if the signal is not already registered.
if (next == 0 || next->signal_number_ != signal_number)
{
registration* new_registration = new registration;
#if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)
// Register for the signal if we're the first.
if (state->registration_count_[signal_number] == 0)
{
# if defined(ASIO_HAS_SIGACTION)
using namespace std; // For memset.
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = asio_signal_handler;
sigfillset(&sa.sa_mask);
if (f != signal_set_base::flags::dont_care)
sa.sa_flags = static_cast<int>(f);
if (::sigaction(signal_number, &sa, 0) == -1)
# else // defined(ASIO_HAS_SIGACTION)
if (::signal(signal_number, asio_signal_handler) == SIG_ERR)
# endif // defined(ASIO_HAS_SIGACTION)
{
# if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error::invalid_argument;
# else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error_code(errno,
asio::error::get_system_category());
# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
delete new_registration;
return ec;
}
# if defined(ASIO_HAS_SIGACTION)
state->flags_[signal_number] = f;
# endif // defined(ASIO_HAS_SIGACTION)
}
# if defined(ASIO_HAS_SIGACTION)
// Otherwise check to see if the flags have changed.
else if (f != signal_set_base::flags::dont_care)
{
if (f != state->flags_[signal_number])
{
using namespace std; // For memset.
if (state->flags_[signal_number] != signal_set_base::flags::dont_care)
{
ec = asio::error::invalid_argument;
delete new_registration;
return ec;
}
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = asio_signal_handler;
sigfillset(&sa.sa_mask);
sa.sa_flags = static_cast<int>(f);
if (::sigaction(signal_number, &sa, 0) == -1)
{
ec = asio::error_code(errno,
asio::error::get_system_category());
delete new_registration;
return ec;
}
state->flags_[signal_number] = f;
}
}
# endif // defined(ASIO_HAS_SIGACTION)
#endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)
// Record the new registration in the set.
new_registration->signal_number_ = signal_number;
new_registration->queue_ = &impl.queue_;
new_registration->next_in_set_ = next;
*insertion_point = new_registration;
// Insert registration into the registration table.
new_registration->next_in_table_ = registrations_[signal_number];
if (registrations_[signal_number])
registrations_[signal_number]->prev_in_table_ = new_registration;
registrations_[signal_number] = new_registration;
++state->registration_count_[signal_number];
}
ec = asio::error_code();
return ec;
}
asio::error_code signal_set_service::remove(
signal_set_service::implementation_type& impl,
int signal_number, asio::error_code& ec)
{
// Check that the signal number is valid.
if (signal_number < 0 || signal_number >= max_signal_number)
{
ec = asio::error::invalid_argument;
return ec;
}
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
// Find the signal number in the list of registrations.
registration** deletion_point = &impl.signals_;
registration* reg = impl.signals_;
while (reg && reg->signal_number_ < signal_number)
{
deletion_point = ®->next_in_set_;
reg = reg->next_in_set_;
}
if (reg != 0 && reg->signal_number_ == signal_number)
{
#if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)
// Set signal handler back to the default if we're the last.
if (state->registration_count_[signal_number] == 1)
{
# if defined(ASIO_HAS_SIGACTION)
using namespace std; // For memset.
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
if (::sigaction(signal_number, &sa, 0) == -1)
# else // defined(ASIO_HAS_SIGACTION)
if (::signal(signal_number, SIG_DFL) == SIG_ERR)
# endif // defined(ASIO_HAS_SIGACTION)
{
# if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error::invalid_argument;
# else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error_code(errno,
asio::error::get_system_category());
# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
return ec;
}
# if defined(ASIO_HAS_SIGACTION)
state->flags_[signal_number] = signal_set_base::flags_t();
# endif // defined(ASIO_HAS_SIGACTION)
}
#endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)
// Remove the registration from the set.
*deletion_point = reg->next_in_set_;
// Remove the registration from the registration table.
if (registrations_[signal_number] == reg)
registrations_[signal_number] = reg->next_in_table_;
if (reg->prev_in_table_)
reg->prev_in_table_->next_in_table_ = reg->next_in_table_;
if (reg->next_in_table_)
reg->next_in_table_->prev_in_table_ = reg->prev_in_table_;
--state->registration_count_[signal_number];
delete reg;
}
ec = asio::error_code();
return ec;
}
asio::error_code signal_set_service::clear(
signal_set_service::implementation_type& impl,
asio::error_code& ec)
{
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
while (registration* reg = impl.signals_)
{
#if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)
// Set signal handler back to the default if we're the last.
if (state->registration_count_[reg->signal_number_] == 1)
{
# if defined(ASIO_HAS_SIGACTION)
using namespace std; // For memset.
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
if (::sigaction(reg->signal_number_, &sa, 0) == -1)
# else // defined(ASIO_HAS_SIGACTION)
if (::signal(reg->signal_number_, SIG_DFL) == SIG_ERR)
# endif // defined(ASIO_HAS_SIGACTION)
{
# if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error::invalid_argument;
# else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error_code(errno,
asio::error::get_system_category());
# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
return ec;
}
# if defined(ASIO_HAS_SIGACTION)
state->flags_[reg->signal_number_] = signal_set_base::flags_t();
# endif // defined(ASIO_HAS_SIGACTION)
}
#endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)
// Remove the registration from the registration table.
if (registrations_[reg->signal_number_] == reg)
registrations_[reg->signal_number_] = reg->next_in_table_;
if (reg->prev_in_table_)
reg->prev_in_table_->next_in_table_ = reg->next_in_table_;
if (reg->next_in_table_)
reg->next_in_table_->prev_in_table_ = reg->prev_in_table_;
--state->registration_count_[reg->signal_number_];
impl.signals_ = reg->next_in_set_;
delete reg;
}
ec = asio::error_code();
return ec;
}
asio::error_code signal_set_service::cancel(
signal_set_service::implementation_type& impl,
asio::error_code& ec)
{
ASIO_HANDLER_OPERATION((scheduler_.context(),
"signal_set", &impl, 0, "cancel"));
op_queue<operation> ops;
{
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
while (signal_op* op = impl.queue_.front())
{
op->ec_ = asio::error::operation_aborted;
impl.queue_.pop();
ops.push(op);
}
}
scheduler_.post_deferred_completions(ops);
ec = asio::error_code();
return ec;
}
void signal_set_service::cancel_ops_by_key(
signal_set_service::implementation_type& impl, void* cancellation_key)
{
op_queue<operation> ops;
{
op_queue<signal_op> other_ops;
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
while (signal_op* op = impl.queue_.front())
{
impl.queue_.pop();
if (op->cancellation_key_ == cancellation_key)
{
op->ec_ = asio::error::operation_aborted;
ops.push(op);
}
else
other_ops.push(op);
}
impl.queue_.push(other_ops);
}
scheduler_.post_deferred_completions(ops);
}
void signal_set_service::deliver_signal(int signal_number)
{
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
signal_set_service* service = state->service_list_;
while (service)
{
op_queue<operation> ops;
registration* reg = service->registrations_[signal_number];
while (reg)
{
if (reg->queue_->empty())
{
++reg->undelivered_;
}
else
{
while (signal_op* op = reg->queue_->front())
{
op->signal_number_ = signal_number;
reg->queue_->pop();
ops.push(op);
}
}
reg = reg->next_in_table_;
}
service->scheduler_.post_deferred_completions(ops);
service = service->next_;
}
}
void signal_set_service::add_service(signal_set_service* service)
{
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
// If this is the first service to be created, open a new pipe.
if (state->service_list_ == 0)
open_descriptors();
#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
// If a scheduler_ object is thread-unsafe then it must be the only
// scheduler used to create signal_set objects.
if (state->service_list_ != 0)
{
if (!ASIO_CONCURRENCY_HINT_IS_LOCKING(SCHEDULER,
service->scheduler_.concurrency_hint())
|| !ASIO_CONCURRENCY_HINT_IS_LOCKING(SCHEDULER,
state->service_list_->scheduler_.concurrency_hint()))
{
std::logic_error ex(
"Thread-unsafe execution context objects require "
"exclusive access to signal handling.");
asio::detail::throw_exception(ex);
}
}
// Insert service into linked list of all services.
service->next_ = state->service_list_;
service->prev_ = 0;
if (state->service_list_)
state->service_list_->prev_ = service;
state->service_list_ = service;
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
// Register for pipe readiness notifications.
int read_descriptor = state->read_descriptor_;
lock.unlock();
# if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
(void)read_descriptor;
service->io_uring_service_.register_internal_io_object(
service->io_object_data_, io_uring_service::read_op, new pipe_read_op);
# else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
service->reactor_.register_internal_descriptor(reactor::read_op,
read_descriptor, service->reactor_data_, new pipe_read_op);
# endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
}
void signal_set_service::remove_service(signal_set_service* service)
{
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
if (service->next_ || service->prev_ || state->service_list_ == service)
{
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
// Disable the pipe readiness notifications.
int read_descriptor = state->read_descriptor_;
lock.unlock();
# if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
(void)read_descriptor;
service->io_uring_service_.deregister_io_object(service->io_object_data_);
service->io_uring_service_.cleanup_io_object(service->io_object_data_);
lock.lock();
# else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
service->reactor_.deregister_internal_descriptor(
read_descriptor, service->reactor_data_);
service->reactor_.cleanup_descriptor_data(service->reactor_data_);
lock.lock();
# endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
// Remove service from linked list of all services.
if (state->service_list_ == service)
state->service_list_ = service->next_;
if (service->prev_)
service->prev_->next_ = service->next_;
if (service->next_)
service->next_->prev_= service->prev_;
service->next_ = 0;
service->prev_ = 0;
#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
// If this is the last service to be removed, close the pipe.
if (state->service_list_ == 0)
close_descriptors();
#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
}
}
void signal_set_service::open_descriptors()
{
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
signal_state* state = get_signal_state();
int pipe_fds[2];
if (::pipe(pipe_fds) == 0)
{
state->read_descriptor_ = pipe_fds[0];
::fcntl(state->read_descriptor_, F_SETFL, O_NONBLOCK);
state->write_descriptor_ = pipe_fds[1];
::fcntl(state->write_descriptor_, F_SETFL, O_NONBLOCK);
#if defined(FD_CLOEXEC)
::fcntl(state->read_descriptor_, F_SETFD, FD_CLOEXEC);
::fcntl(state->write_descriptor_, F_SETFD, FD_CLOEXEC);
#endif // defined(FD_CLOEXEC)
}
else
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "signal_set_service pipe");
}
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
}
void signal_set_service::close_descriptors()
{
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
signal_state* state = get_signal_state();
if (state->read_descriptor_ != -1)
::close(state->read_descriptor_);
state->read_descriptor_ = -1;
if (state->write_descriptor_ != -1)
::close(state->write_descriptor_);
state->write_descriptor_ = -1;
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
}
void signal_set_service::start_wait_op(
signal_set_service::implementation_type& impl, signal_op* op)
{
scheduler_.work_started();
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
registration* reg = impl.signals_;
while (reg)
{
if (reg->undelivered_ > 0)
{
--reg->undelivered_;
op->signal_number_ = reg->signal_number_;
scheduler_.post_deferred_completion(op);
return;
}
reg = reg->next_in_set_;
}
impl.queue_.push(op);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/io_uring_socket_service_base.ipp | //
// detail/io_uring_socket_service_base.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_IO_URING_SOCKET_SERVICE_BASE_IPP
#define ASIO_DETAIL_IMPL_IO_URING_SOCKET_SERVICE_BASE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IO_URING)
#include "asio/detail/io_uring_socket_service_base.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
io_uring_socket_service_base::io_uring_socket_service_base(
execution_context& context)
: io_uring_service_(asio::use_service<io_uring_service>(context))
{
io_uring_service_.init_task();
}
void io_uring_socket_service_base::base_shutdown()
{
}
void io_uring_socket_service_base::construct(
io_uring_socket_service_base::base_implementation_type& impl)
{
impl.socket_ = invalid_socket;
impl.state_ = 0;
impl.io_object_data_ = 0;
}
void io_uring_socket_service_base::base_move_construct(
io_uring_socket_service_base::base_implementation_type& impl,
io_uring_socket_service_base::base_implementation_type& other_impl)
noexcept
{
impl.socket_ = other_impl.socket_;
other_impl.socket_ = invalid_socket;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
impl.io_object_data_ = other_impl.io_object_data_;
other_impl.io_object_data_ = 0;
}
void io_uring_socket_service_base::base_move_assign(
io_uring_socket_service_base::base_implementation_type& impl,
io_uring_socket_service_base& /*other_service*/,
io_uring_socket_service_base::base_implementation_type& other_impl)
{
destroy(impl);
impl.socket_ = other_impl.socket_;
other_impl.socket_ = invalid_socket;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
impl.io_object_data_ = other_impl.io_object_data_;
other_impl.io_object_data_ = 0;
}
void io_uring_socket_service_base::destroy(
io_uring_socket_service_base::base_implementation_type& impl)
{
if (impl.socket_ != invalid_socket)
{
ASIO_HANDLER_OPERATION((io_uring_service_.context(),
"socket", &impl, impl.socket_, "close"));
io_uring_service_.deregister_io_object(impl.io_object_data_);
asio::error_code ignored_ec;
socket_ops::close(impl.socket_, impl.state_, true, ignored_ec);
io_uring_service_.cleanup_io_object(impl.io_object_data_);
}
}
asio::error_code io_uring_socket_service_base::close(
io_uring_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((io_uring_service_.context(),
"socket", &impl, impl.socket_, "close"));
io_uring_service_.deregister_io_object(impl.io_object_data_);
socket_ops::close(impl.socket_, impl.state_, false, ec);
io_uring_service_.cleanup_io_object(impl.io_object_data_);
}
else
{
ec = success_ec_;
}
// The descriptor is closed by the OS even if close() returns an error.
//
// (Actually, POSIX says the state of the descriptor is unspecified. On
// Linux the descriptor is apparently closed anyway; e.g. see
// http://lkml.org/lkml/2005/9/10/129
construct(impl);
return ec;
}
socket_type io_uring_socket_service_base::release(
io_uring_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return invalid_socket;
}
ASIO_HANDLER_OPERATION((io_uring_service_.context(),
"socket", &impl, impl.socket_, "release"));
io_uring_service_.deregister_io_object(impl.io_object_data_);
io_uring_service_.cleanup_io_object(impl.io_object_data_);
socket_type sock = impl.socket_;
construct(impl);
ec = success_ec_;
return sock;
}
asio::error_code io_uring_socket_service_base::cancel(
io_uring_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return ec;
}
ASIO_HANDLER_OPERATION((io_uring_service_.context(),
"socket", &impl, impl.socket_, "cancel"));
io_uring_service_.cancel_ops(impl.io_object_data_);
ec = success_ec_;
return ec;
}
asio::error_code io_uring_socket_service_base::do_open(
io_uring_socket_service_base::base_implementation_type& impl,
int af, int type, int protocol, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
socket_holder sock(socket_ops::socket(af, type, protocol, ec));
if (sock.get() == invalid_socket)
return ec;
io_uring_service_.register_io_object(impl.io_object_data_);
impl.socket_ = sock.release();
switch (type)
{
case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
default: impl.state_ = 0; break;
}
ec = success_ec_;
return ec;
}
asio::error_code io_uring_socket_service_base::do_assign(
io_uring_socket_service_base::base_implementation_type& impl, int type,
const io_uring_socket_service_base::native_handle_type& native_socket,
asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
io_uring_service_.register_io_object(impl.io_object_data_);
impl.socket_ = native_socket;
switch (type)
{
case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
default: impl.state_ = 0; break;
}
impl.state_ |= socket_ops::possible_dup;
ec = success_ec_;
return ec;
}
void io_uring_socket_service_base::start_op(
io_uring_socket_service_base::base_implementation_type& impl,
int op_type, io_uring_operation* op, bool is_continuation, bool noop)
{
if (!noop)
{
io_uring_service_.start_op(op_type,
impl.io_object_data_, op, is_continuation);
}
else
{
io_uring_service_.post_immediate_completion(op, is_continuation);
}
}
void io_uring_socket_service_base::start_accept_op(
io_uring_socket_service_base::base_implementation_type& impl,
io_uring_operation* op, bool is_continuation, bool peer_is_open)
{
if (!peer_is_open)
start_op(impl, io_uring_service::read_op, op, is_continuation, false);
else
{
op->ec_ = asio::error::already_open;
io_uring_service_.post_immediate_completion(op, is_continuation);
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IO_URING)
#endif // ASIO_DETAIL_IMPL_IO_URING_SOCKET_SERVICE_BASE_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/handler_tracking.ipp | //
// detail/impl/handler_tracking.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP
#define ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_CUSTOM_HANDLER_TRACKING)
// The handler tracking implementation is provided by the user-specified header.
#elif defined(ASIO_ENABLE_HANDLER_TRACKING)
#include <cstdarg>
#include <cstdio>
#include "asio/detail/handler_tracking.hpp"
#if defined(ASIO_HAS_BOOST_DATE_TIME)
# include "asio/time_traits.hpp"
#else // defined(ASIO_HAS_BOOST_DATE_TIME)
# include "asio/detail/chrono.hpp"
# include "asio/detail/chrono_time_traits.hpp"
# include "asio/wait_traits.hpp"
#endif // defined(ASIO_HAS_BOOST_DATE_TIME)
#if defined(ASIO_WINDOWS_RUNTIME)
# include "asio/detail/socket_types.hpp"
#elif !defined(ASIO_WINDOWS)
# include <unistd.h>
#endif // !defined(ASIO_WINDOWS)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
struct handler_tracking_timestamp
{
uint64_t seconds;
uint64_t microseconds;
handler_tracking_timestamp()
{
#if defined(ASIO_HAS_BOOST_DATE_TIME)
boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
boost::posix_time::time_duration now =
boost::posix_time::microsec_clock::universal_time() - epoch;
#else // defined(ASIO_HAS_BOOST_DATE_TIME)
typedef chrono_time_traits<chrono::system_clock,
asio::wait_traits<chrono::system_clock>> traits_helper;
traits_helper::posix_time_duration now(
chrono::system_clock::now().time_since_epoch());
#endif // defined(ASIO_HAS_BOOST_DATE_TIME)
seconds = static_cast<uint64_t>(now.total_seconds());
microseconds = static_cast<uint64_t>(now.total_microseconds() % 1000000);
}
};
struct handler_tracking::tracking_state
{
static_mutex mutex_;
uint64_t next_id_;
tss_ptr<completion>* current_completion_;
tss_ptr<location>* current_location_;
};
handler_tracking::tracking_state* handler_tracking::get_state()
{
static tracking_state state = { ASIO_STATIC_MUTEX_INIT, 1, 0, 0 };
return &state;
}
void handler_tracking::init()
{
static tracking_state* state = get_state();
state->mutex_.init();
static_mutex::scoped_lock lock(state->mutex_);
if (state->current_completion_ == 0)
state->current_completion_ = new tss_ptr<completion>;
if (state->current_location_ == 0)
state->current_location_ = new tss_ptr<location>;
}
handler_tracking::location::location(
const char* file, int line, const char* func)
: file_(file),
line_(line),
func_(func),
next_(*get_state()->current_location_)
{
if (file_)
*get_state()->current_location_ = this;
}
handler_tracking::location::~location()
{
if (file_)
*get_state()->current_location_ = next_;
}
void handler_tracking::creation(execution_context&,
handler_tracking::tracked_handler& h,
const char* object_type, void* object,
uintmax_t /*native_handle*/, const char* op_name)
{
static tracking_state* state = get_state();
static_mutex::scoped_lock lock(state->mutex_);
h.id_ = state->next_id_++;
lock.unlock();
handler_tracking_timestamp timestamp;
uint64_t current_id = 0;
if (completion* current_completion = *state->current_completion_)
current_id = current_completion->id_;
for (location* current_location = *state->current_location_;
current_location; current_location = current_location->next_)
{
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|%I64u^%I64u|%s%s%.80s%s(%.80s:%d)\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|%llu^%llu|%s%s%.80s%s(%.80s:%d)\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
current_id, h.id_,
current_location == *state->current_location_ ? "in " : "called from ",
current_location->func_ ? "'" : "",
current_location->func_ ? current_location->func_ : "",
current_location->func_ ? "' " : "",
current_location->file_, current_location->line_);
}
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|%I64u*%I64u|%.20s@%p.%.50s\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|%llu*%llu|%.20s@%p.%.50s\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
current_id, h.id_, object_type, object, op_name);
}
handler_tracking::completion::completion(
const handler_tracking::tracked_handler& h)
: id_(h.id_),
invoked_(false),
next_(*get_state()->current_completion_)
{
*get_state()->current_completion_ = this;
}
handler_tracking::completion::~completion()
{
if (id_)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|%c%I64u|\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|%c%llu|\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
invoked_ ? '!' : '~', id_);
}
*get_state()->current_completion_ = next_;
}
void handler_tracking::completion::invocation_begin()
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|>%I64u|\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|>%llu|\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds, id_);
invoked_ = true;
}
void handler_tracking::completion::invocation_begin(
const asio::error_code& ec)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|>%llu|ec=%.20s:%d\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
id_, ec.category().name(), ec.value());
invoked_ = true;
}
void handler_tracking::completion::invocation_begin(
const asio::error_code& ec, std::size_t bytes_transferred)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,bytes_transferred=%I64u\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|>%llu|ec=%.20s:%d,bytes_transferred=%llu\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
id_, ec.category().name(), ec.value(),
static_cast<uint64_t>(bytes_transferred));
invoked_ = true;
}
void handler_tracking::completion::invocation_begin(
const asio::error_code& ec, int signal_number)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,signal_number=%d\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|>%llu|ec=%.20s:%d,signal_number=%d\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
id_, ec.category().name(), ec.value(), signal_number);
invoked_ = true;
}
void handler_tracking::completion::invocation_begin(
const asio::error_code& ec, const char* arg)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,%.50s\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|>%llu|ec=%.20s:%d,%.50s\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
id_, ec.category().name(), ec.value(), arg);
invoked_ = true;
}
void handler_tracking::completion::invocation_end()
{
if (id_)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|<%I64u|\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|<%llu|\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds, id_);
id_ = 0;
}
}
void handler_tracking::operation(execution_context&,
const char* object_type, void* object,
uintmax_t /*native_handle*/, const char* op_name)
{
static tracking_state* state = get_state();
handler_tracking_timestamp timestamp;
unsigned long long current_id = 0;
if (completion* current_completion = *state->current_completion_)
current_id = current_completion->id_;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|%I64u|%.20s@%p.%.50s\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|%llu|%.20s@%p.%.50s\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
current_id, object_type, object, op_name);
}
void handler_tracking::reactor_registration(execution_context& /*context*/,
uintmax_t /*native_handle*/, uintmax_t /*registration*/)
{
}
void handler_tracking::reactor_deregistration(execution_context& /*context*/,
uintmax_t /*native_handle*/, uintmax_t /*registration*/)
{
}
void handler_tracking::reactor_events(execution_context& /*context*/,
uintmax_t /*native_handle*/, unsigned /*events*/)
{
}
void handler_tracking::reactor_operation(
const tracked_handler& h, const char* op_name,
const asio::error_code& ec)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|.%I64u|%s,ec=%.20s:%d\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|.%llu|%s,ec=%.20s:%d\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
h.id_, op_name, ec.category().name(), ec.value());
}
void handler_tracking::reactor_operation(
const tracked_handler& h, const char* op_name,
const asio::error_code& ec, std::size_t bytes_transferred)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|.%I64u|%s,ec=%.20s:%d,bytes_transferred=%I64u\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|.%llu|%s,ec=%.20s:%d,bytes_transferred=%llu\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
h.id_, op_name, ec.category().name(), ec.value(),
static_cast<uint64_t>(bytes_transferred));
}
void handler_tracking::write_line(const char* format, ...)
{
using namespace std; // For sprintf (or equivalent).
va_list args;
va_start(args, format);
char line[256] = "";
#if defined(ASIO_HAS_SNPRINTF)
int length = vsnprintf(line, sizeof(line), format, args);
#elif defined(ASIO_HAS_SECURE_RTL)
int length = vsprintf_s(line, sizeof(line), format, args);
#else // defined(ASIO_HAS_SECURE_RTL)
int length = vsprintf(line, format, args);
#endif // defined(ASIO_HAS_SECURE_RTL)
va_end(args);
#if defined(ASIO_WINDOWS_RUNTIME)
wchar_t wline[256] = L"";
mbstowcs_s(0, wline, sizeof(wline) / sizeof(wchar_t), line, length);
::OutputDebugStringW(wline);
#elif defined(ASIO_WINDOWS)
HANDLE stderr_handle = ::GetStdHandle(STD_ERROR_HANDLE);
DWORD bytes_written = 0;
::WriteFile(stderr_handle, line, length, &bytes_written, 0);
#else // defined(ASIO_WINDOWS)
::write(STDERR_FILENO, line, length);
#endif // defined(ASIO_WINDOWS)
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_ENABLE_HANDLER_TRACKING)
#endif // ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/winsock_init.ipp | //
// detail/impl/winsock_init.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP
#define ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
#include "asio/detail/socket_types.hpp"
#include "asio/detail/winsock_init.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
void winsock_init_base::startup(data& d,
unsigned char major, unsigned char minor)
{
if (::InterlockedIncrement(&d.init_count_) == 1)
{
WSADATA wsa_data;
long result = ::WSAStartup(MAKEWORD(major, minor), &wsa_data);
::InterlockedExchange(&d.result_, result);
}
}
void winsock_init_base::manual_startup(data& d)
{
if (::InterlockedIncrement(&d.init_count_) == 1)
{
::InterlockedExchange(&d.result_, 0);
}
}
void winsock_init_base::cleanup(data& d)
{
if (::InterlockedDecrement(&d.init_count_) == 0)
{
::WSACleanup();
}
}
void winsock_init_base::manual_cleanup(data& d)
{
::InterlockedDecrement(&d.init_count_);
}
void winsock_init_base::throw_on_error(data& d)
{
long result = ::InterlockedExchangeAdd(&d.result_, 0);
if (result != 0)
{
asio::error_code ec(result,
asio::error::get_system_category());
asio::detail::throw_error(ec, "winsock");
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
#endif // ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/dev_poll_reactor.ipp | //
// detail/impl/dev_poll_reactor.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP
#define ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_DEV_POLL)
#include "asio/detail/dev_poll_reactor.hpp"
#include "asio/detail/assert.hpp"
#include "asio/detail/scheduler.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
dev_poll_reactor::dev_poll_reactor(asio::execution_context& ctx)
: asio::detail::execution_context_service_base<dev_poll_reactor>(ctx),
scheduler_(use_service<scheduler>(ctx)),
mutex_(),
dev_poll_fd_(do_dev_poll_create()),
interrupter_(),
shutdown_(false)
{
// Add the interrupter's descriptor to /dev/poll.
::pollfd ev = { 0, 0, 0 };
ev.fd = interrupter_.read_descriptor();
ev.events = POLLIN | POLLERR;
ev.revents = 0;
::write(dev_poll_fd_, &ev, sizeof(ev));
}
dev_poll_reactor::~dev_poll_reactor()
{
shutdown();
::close(dev_poll_fd_);
}
void dev_poll_reactor::shutdown()
{
asio::detail::mutex::scoped_lock lock(mutex_);
shutdown_ = true;
lock.unlock();
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
op_queue_[i].get_all_operations(ops);
timer_queues_.get_all_timers(ops);
scheduler_.abandon_operations(ops);
}
void dev_poll_reactor::notify_fork(
asio::execution_context::fork_event fork_ev)
{
if (fork_ev == asio::execution_context::fork_child)
{
detail::mutex::scoped_lock lock(mutex_);
if (dev_poll_fd_ != -1)
::close(dev_poll_fd_);
dev_poll_fd_ = -1;
dev_poll_fd_ = do_dev_poll_create();
interrupter_.recreate();
// Add the interrupter's descriptor to /dev/poll.
::pollfd ev = { 0, 0, 0 };
ev.fd = interrupter_.read_descriptor();
ev.events = POLLIN | POLLERR;
ev.revents = 0;
::write(dev_poll_fd_, &ev, sizeof(ev));
// Re-register all descriptors with /dev/poll. The changes will be written
// to the /dev/poll descriptor the next time the reactor is run.
for (int i = 0; i < max_ops; ++i)
{
reactor_op_queue<socket_type>::iterator iter = op_queue_[i].begin();
reactor_op_queue<socket_type>::iterator end = op_queue_[i].end();
for (; iter != end; ++iter)
{
::pollfd& pending_ev = add_pending_event_change(iter->first);
pending_ev.events |= POLLERR | POLLHUP;
switch (i)
{
case read_op: pending_ev.events |= POLLIN; break;
case write_op: pending_ev.events |= POLLOUT; break;
case except_op: pending_ev.events |= POLLPRI; break;
default: break;
}
}
}
interrupter_.interrupt();
}
}
void dev_poll_reactor::init_task()
{
scheduler_.init_task();
}
int dev_poll_reactor::register_descriptor(socket_type, per_descriptor_data&)
{
return 0;
}
int dev_poll_reactor::register_internal_descriptor(int op_type,
socket_type descriptor, per_descriptor_data&, reactor_op* op)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue_[op_type].enqueue_operation(descriptor, op);
::pollfd& ev = add_pending_event_change(descriptor);
ev.events = POLLERR | POLLHUP;
switch (op_type)
{
case read_op: ev.events |= POLLIN; break;
case write_op: ev.events |= POLLOUT; break;
case except_op: ev.events |= POLLPRI; break;
default: break;
}
interrupter_.interrupt();
return 0;
}
void dev_poll_reactor::move_descriptor(socket_type,
dev_poll_reactor::per_descriptor_data&,
dev_poll_reactor::per_descriptor_data&)
{
}
void dev_poll_reactor::call_post_immediate_completion(
operation* op, bool is_continuation, const void* self)
{
static_cast<const dev_poll_reactor*>(self)->post_immediate_completion(
op, is_continuation);
}
void dev_poll_reactor::start_op(int op_type, socket_type descriptor,
dev_poll_reactor::per_descriptor_data&, reactor_op* op,
bool is_continuation, bool allow_speculative,
void (*on_immediate)(operation*, bool, const void*),
const void* immediate_arg)
{
asio::detail::mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
on_immediate(op, is_continuation, immediate_arg);
return;
}
if (allow_speculative)
{
if (op_type != read_op || !op_queue_[except_op].has_operation(descriptor))
{
if (!op_queue_[op_type].has_operation(descriptor))
{
if (op->perform())
{
lock.unlock();
on_immediate(op, is_continuation, immediate_arg);
return;
}
}
}
}
bool first = op_queue_[op_type].enqueue_operation(descriptor, op);
scheduler_.work_started();
if (first)
{
::pollfd& ev = add_pending_event_change(descriptor);
ev.events = POLLERR | POLLHUP;
if (op_type == read_op
|| op_queue_[read_op].has_operation(descriptor))
ev.events |= POLLIN;
if (op_type == write_op
|| op_queue_[write_op].has_operation(descriptor))
ev.events |= POLLOUT;
if (op_type == except_op
|| op_queue_[except_op].has_operation(descriptor))
ev.events |= POLLPRI;
interrupter_.interrupt();
}
}
void dev_poll_reactor::cancel_ops(socket_type descriptor,
dev_poll_reactor::per_descriptor_data&)
{
asio::detail::mutex::scoped_lock lock(mutex_);
cancel_ops_unlocked(descriptor, asio::error::operation_aborted);
}
void dev_poll_reactor::cancel_ops_by_key(socket_type descriptor,
dev_poll_reactor::per_descriptor_data&,
int op_type, void* cancellation_key)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
bool need_interrupt = op_queue_[op_type].cancel_operations_by_key(
descriptor, ops, cancellation_key, asio::error::operation_aborted);
scheduler_.post_deferred_completions(ops);
if (need_interrupt)
interrupter_.interrupt();
}
void dev_poll_reactor::deregister_descriptor(socket_type descriptor,
dev_poll_reactor::per_descriptor_data&, bool)
{
asio::detail::mutex::scoped_lock lock(mutex_);
// Remove the descriptor from /dev/poll.
::pollfd& ev = add_pending_event_change(descriptor);
ev.events = POLLREMOVE;
interrupter_.interrupt();
// Cancel any outstanding operations associated with the descriptor.
cancel_ops_unlocked(descriptor, asio::error::operation_aborted);
}
void dev_poll_reactor::deregister_internal_descriptor(
socket_type descriptor, dev_poll_reactor::per_descriptor_data&)
{
asio::detail::mutex::scoped_lock lock(mutex_);
// Remove the descriptor from /dev/poll. Since this function is only called
// during a fork, we can apply the change immediately.
::pollfd ev = { 0, 0, 0 };
ev.fd = descriptor;
ev.events = POLLREMOVE;
ev.revents = 0;
::write(dev_poll_fd_, &ev, sizeof(ev));
// Destroy all operations associated with the descriptor.
op_queue<operation> ops;
asio::error_code ec;
for (int i = 0; i < max_ops; ++i)
op_queue_[i].cancel_operations(descriptor, ops, ec);
}
void dev_poll_reactor::cleanup_descriptor_data(
dev_poll_reactor::per_descriptor_data&)
{
}
void dev_poll_reactor::run(long usec, op_queue<operation>& ops)
{
asio::detail::mutex::scoped_lock lock(mutex_);
// We can return immediately if there's no work to do and the reactor is
// not supposed to block.
if (usec == 0 && op_queue_[read_op].empty() && op_queue_[write_op].empty()
&& op_queue_[except_op].empty() && timer_queues_.all_empty())
return;
// Write the pending event registration changes to the /dev/poll descriptor.
std::size_t events_size = sizeof(::pollfd) * pending_event_changes_.size();
if (events_size > 0)
{
errno = 0;
int result = ::write(dev_poll_fd_,
&pending_event_changes_[0], events_size);
if (result != static_cast<int>(events_size))
{
asio::error_code ec = asio::error_code(
errno, asio::error::get_system_category());
for (std::size_t i = 0; i < pending_event_changes_.size(); ++i)
{
int descriptor = pending_event_changes_[i].fd;
for (int j = 0; j < max_ops; ++j)
op_queue_[j].cancel_operations(descriptor, ops, ec);
}
}
pending_event_changes_.clear();
pending_event_change_index_.clear();
}
// Calculate timeout.
int timeout;
if (usec == 0)
timeout = 0;
else
{
timeout = (usec < 0) ? -1 : ((usec - 1) / 1000 + 1);
timeout = get_timeout(timeout);
}
lock.unlock();
// Block on the /dev/poll descriptor.
::pollfd events[128] = { { 0, 0, 0 } };
::dvpoll dp = { 0, 0, 0 };
dp.dp_fds = events;
dp.dp_nfds = 128;
dp.dp_timeout = timeout;
int num_events = ::ioctl(dev_poll_fd_, DP_POLL, &dp);
lock.lock();
// Dispatch the waiting events.
for (int i = 0; i < num_events; ++i)
{
int descriptor = events[i].fd;
if (descriptor == interrupter_.read_descriptor())
{
interrupter_.reset();
}
else
{
bool more_reads = false;
bool more_writes = false;
bool more_except = false;
// Exception operations must be processed first to ensure that any
// out-of-band data is read before normal data.
if (events[i].events & (POLLPRI | POLLERR | POLLHUP))
more_except =
op_queue_[except_op].perform_operations(descriptor, ops);
else
more_except = op_queue_[except_op].has_operation(descriptor);
if (events[i].events & (POLLIN | POLLERR | POLLHUP))
more_reads = op_queue_[read_op].perform_operations(descriptor, ops);
else
more_reads = op_queue_[read_op].has_operation(descriptor);
if (events[i].events & (POLLOUT | POLLERR | POLLHUP))
more_writes = op_queue_[write_op].perform_operations(descriptor, ops);
else
more_writes = op_queue_[write_op].has_operation(descriptor);
if ((events[i].events & (POLLERR | POLLHUP)) != 0
&& !more_except && !more_reads && !more_writes)
{
// If we have an event and no operations associated with the
// descriptor then we need to delete the descriptor from /dev/poll.
// The poll operation can produce POLLHUP or POLLERR events when there
// is no operation pending, so if we do not remove the descriptor we
// can end up in a tight polling loop.
::pollfd ev = { 0, 0, 0 };
ev.fd = descriptor;
ev.events = POLLREMOVE;
ev.revents = 0;
::write(dev_poll_fd_, &ev, sizeof(ev));
}
else
{
::pollfd ev = { 0, 0, 0 };
ev.fd = descriptor;
ev.events = POLLERR | POLLHUP;
if (more_reads)
ev.events |= POLLIN;
if (more_writes)
ev.events |= POLLOUT;
if (more_except)
ev.events |= POLLPRI;
ev.revents = 0;
int result = ::write(dev_poll_fd_, &ev, sizeof(ev));
if (result != sizeof(ev))
{
asio::error_code ec(errno,
asio::error::get_system_category());
for (int j = 0; j < max_ops; ++j)
op_queue_[j].cancel_operations(descriptor, ops, ec);
}
}
}
}
timer_queues_.get_ready_timers(ops);
}
void dev_poll_reactor::interrupt()
{
interrupter_.interrupt();
}
int dev_poll_reactor::do_dev_poll_create()
{
int fd = ::open("/dev/poll", O_RDWR);
if (fd == -1)
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "/dev/poll");
}
return fd;
}
void dev_poll_reactor::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.insert(&queue);
}
void dev_poll_reactor::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.erase(&queue);
}
int dev_poll_reactor::get_timeout(int msec)
{
// By default we will wait no longer than 5 minutes. This will ensure that
// any changes to the system clock are detected after no longer than this.
const int max_msec = 5 * 60 * 1000;
return timer_queues_.wait_duration_msec(
(msec < 0 || max_msec < msec) ? max_msec : msec);
}
void dev_poll_reactor::cancel_ops_unlocked(socket_type descriptor,
const asio::error_code& ec)
{
bool need_interrupt = false;
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
need_interrupt = op_queue_[i].cancel_operations(
descriptor, ops, ec) || need_interrupt;
scheduler_.post_deferred_completions(ops);
if (need_interrupt)
interrupter_.interrupt();
}
::pollfd& dev_poll_reactor::add_pending_event_change(int descriptor)
{
hash_map<int, std::size_t>::iterator iter
= pending_event_change_index_.find(descriptor);
if (iter == pending_event_change_index_.end())
{
std::size_t index = pending_event_changes_.size();
pending_event_changes_.reserve(pending_event_changes_.size() + 1);
pending_event_change_index_.insert(std::make_pair(descriptor, index));
pending_event_changes_.push_back(::pollfd());
pending_event_changes_[index].fd = descriptor;
pending_event_changes_[index].revents = 0;
return pending_event_changes_[index];
}
else
{
return pending_event_changes_[iter->second];
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_DEV_POLL)
#endif // ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/reactive_descriptor_service.ipp | //
// detail/impl/reactive_descriptor_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP
#define ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__) \
&& !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#include "asio/error.hpp"
#include "asio/detail/reactive_descriptor_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
reactive_descriptor_service::reactive_descriptor_service(
execution_context& context)
: execution_context_service_base<reactive_descriptor_service>(context),
reactor_(asio::use_service<reactor>(context))
{
reactor_.init_task();
}
void reactive_descriptor_service::shutdown()
{
}
void reactive_descriptor_service::construct(
reactive_descriptor_service::implementation_type& impl)
{
impl.descriptor_ = -1;
impl.state_ = 0;
impl.reactor_data_ = reactor::per_descriptor_data();
}
void reactive_descriptor_service::move_construct(
reactive_descriptor_service::implementation_type& impl,
reactive_descriptor_service::implementation_type& other_impl)
noexcept
{
impl.descriptor_ = other_impl.descriptor_;
other_impl.descriptor_ = -1;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
reactor_.move_descriptor(impl.descriptor_,
impl.reactor_data_, other_impl.reactor_data_);
}
void reactive_descriptor_service::move_assign(
reactive_descriptor_service::implementation_type& impl,
reactive_descriptor_service& other_service,
reactive_descriptor_service::implementation_type& other_impl)
{
destroy(impl);
impl.descriptor_ = other_impl.descriptor_;
other_impl.descriptor_ = -1;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
other_service.reactor_.move_descriptor(impl.descriptor_,
impl.reactor_data_, other_impl.reactor_data_);
}
void reactive_descriptor_service::destroy(
reactive_descriptor_service::implementation_type& impl)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((reactor_.context(),
"descriptor", &impl, impl.descriptor_, "close"));
reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_,
(impl.state_ & descriptor_ops::possible_dup) == 0);
asio::error_code ignored_ec;
descriptor_ops::close(impl.descriptor_, impl.state_, ignored_ec);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
}
}
asio::error_code reactive_descriptor_service::assign(
reactive_descriptor_service::implementation_type& impl,
const native_handle_type& native_descriptor, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
ASIO_ERROR_LOCATION(ec);
return ec;
}
if (int err = reactor_.register_descriptor(
native_descriptor, impl.reactor_data_))
{
ec = asio::error_code(err,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
impl.descriptor_ = native_descriptor;
impl.state_ = descriptor_ops::possible_dup;
ec = asio::error_code();
return ec;
}
asio::error_code reactive_descriptor_service::close(
reactive_descriptor_service::implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((reactor_.context(),
"descriptor", &impl, impl.descriptor_, "close"));
reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_,
(impl.state_ & descriptor_ops::possible_dup) == 0);
descriptor_ops::close(impl.descriptor_, impl.state_, ec);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
}
else
{
ec = asio::error_code();
}
// The descriptor is closed by the OS even if close() returns an error.
//
// (Actually, POSIX says the state of the descriptor is unspecified. On
// Linux the descriptor is apparently closed anyway; e.g. see
// http://lkml.org/lkml/2005/9/10/129
// We'll just have to assume that other OSes follow the same behaviour.)
construct(impl);
ASIO_ERROR_LOCATION(ec);
return ec;
}
reactive_descriptor_service::native_handle_type
reactive_descriptor_service::release(
reactive_descriptor_service::implementation_type& impl)
{
native_handle_type descriptor = impl.descriptor_;
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((reactor_.context(),
"descriptor", &impl, impl.descriptor_, "release"));
reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, false);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
construct(impl);
}
return descriptor;
}
asio::error_code reactive_descriptor_service::cancel(
reactive_descriptor_service::implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
ASIO_ERROR_LOCATION(ec);
return ec;
}
ASIO_HANDLER_OPERATION((reactor_.context(),
"descriptor", &impl, impl.descriptor_, "cancel"));
reactor_.cancel_ops(impl.descriptor_, impl.reactor_data_);
ec = asio::error_code();
return ec;
}
void reactive_descriptor_service::do_start_op(implementation_type& impl,
int op_type, reactor_op* op, bool is_continuation,
bool allow_speculative, bool noop, bool needs_non_blocking,
void (*on_immediate)(operation* op, bool, const void*),
const void* immediate_arg)
{
if (!noop)
{
if ((impl.state_ & descriptor_ops::non_blocking)
|| !needs_non_blocking
|| descriptor_ops::set_internal_non_blocking(
impl.descriptor_, impl.state_, true, op->ec_))
{
reactor_.start_op(op_type, impl.descriptor_, impl.reactor_data_, op,
is_continuation, allow_speculative, on_immediate, immediate_arg);
return;
}
}
on_immediate(op, is_continuation, immediate_arg);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
// && !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#endif // ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/kqueue_reactor.hpp | //
// detail/impl/kqueue_reactor.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2005 Stefan Arentz (stefan at soze dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP
#define ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_KQUEUE)
#include "asio/detail/scheduler.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
inline void kqueue_reactor::post_immediate_completion(
operation* op, bool is_continuation) const
{
scheduler_.post_immediate_completion(op, is_continuation);
}
template <typename Time_Traits>
void kqueue_reactor::add_timer_queue(timer_queue<Time_Traits>& queue)
{
do_add_timer_queue(queue);
}
// Remove a timer queue from the reactor.
template <typename Time_Traits>
void kqueue_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue)
{
do_remove_timer_queue(queue);
}
template <typename Time_Traits>
void kqueue_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
const typename Time_Traits::time_type& time,
typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
{
mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
scheduler_.work_started();
if (earliest)
interrupt();
}
template <typename Time_Traits>
std::size_t kqueue_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& timer,
std::size_t max_cancelled)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
scheduler_.post_deferred_completions(ops);
return n;
}
template <typename Time_Traits>
void kqueue_reactor::cancel_timer_by_key(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data* timer,
void* cancellation_key)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer_by_key(timer, ops, cancellation_key);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
template <typename Time_Traits>
void kqueue_reactor::move_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& target,
typename timer_queue<Time_Traits>::per_timer_data& source)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer(target, ops);
queue.move_timer(target, source);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_KQUEUE)
#endif // ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/posix_mutex.ipp | //
// detail/impl/posix_mutex.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP
#define ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_PTHREADS)
#include "asio/detail/posix_mutex.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
posix_mutex::posix_mutex()
{
int error = ::pthread_mutex_init(&mutex_, 0);
asio::error_code ec(error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "mutex");
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_PTHREADS)
#endif // ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/posix_tss_ptr.ipp | //
// detail/impl/posix_tss_ptr.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP
#define ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_PTHREADS)
#include "asio/detail/posix_tss_ptr.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
void posix_tss_ptr_create(pthread_key_t& key)
{
int error = ::pthread_key_create(&key, 0);
asio::error_code ec(error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "tss");
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_PTHREADS)
#endif // ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/service_registry.hpp | //
// detail/impl/service_registry.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP
#define ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Service>
Service& service_registry::use_service()
{
execution_context::service::key key;
init_key<Service>(key, 0);
factory_type factory = &service_registry::create<Service, execution_context>;
return *static_cast<Service*>(do_use_service(key, factory, &owner_));
}
template <typename Service>
Service& service_registry::use_service(io_context& owner)
{
execution_context::service::key key;
init_key<Service>(key, 0);
factory_type factory = &service_registry::create<Service, io_context>;
return *static_cast<Service*>(do_use_service(key, factory, &owner));
}
template <typename Service>
void service_registry::add_service(Service* new_service)
{
execution_context::service::key key;
init_key<Service>(key, 0);
return do_add_service(key, new_service);
}
template <typename Service>
bool service_registry::has_service() const
{
execution_context::service::key key;
init_key<Service>(key, 0);
return do_has_service(key);
}
template <typename Service>
inline void service_registry::init_key(
execution_context::service::key& key, ...)
{
init_key_from_id(key, Service::id);
}
#if !defined(ASIO_NO_TYPEID)
template <typename Service>
void service_registry::init_key(execution_context::service::key& key,
enable_if_t<is_base_of<typename Service::key_type, Service>::value>*)
{
key.type_info_ = &typeid(typeid_wrapper<Service>);
key.id_ = 0;
}
template <typename Service>
void service_registry::init_key_from_id(execution_context::service::key& key,
const service_id<Service>& /*id*/)
{
key.type_info_ = &typeid(typeid_wrapper<Service>);
key.id_ = 0;
}
#endif // !defined(ASIO_NO_TYPEID)
template <typename Service, typename Owner>
execution_context::service* service_registry::create(void* owner)
{
return new Service(*static_cast<Owner*>(owner));
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/epoll_reactor.ipp | //
// detail/impl/epoll_reactor.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP
#define ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_EPOLL)
#include <cstddef>
#include <sys/epoll.h>
#include "asio/detail/epoll_reactor.hpp"
#include "asio/detail/scheduler.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#if defined(ASIO_HAS_TIMERFD)
# include <sys/timerfd.h>
#endif // defined(ASIO_HAS_TIMERFD)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
epoll_reactor::epoll_reactor(asio::execution_context& ctx)
: execution_context_service_base<epoll_reactor>(ctx),
scheduler_(use_service<scheduler>(ctx)),
mutex_(ASIO_CONCURRENCY_HINT_IS_LOCKING(
REACTOR_REGISTRATION, scheduler_.concurrency_hint())),
interrupter_(),
epoll_fd_(do_epoll_create()),
timer_fd_(do_timerfd_create()),
shutdown_(false),
registered_descriptors_mutex_(mutex_.enabled())
{
// Add the interrupter's descriptor to epoll.
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLET;
ev.data.ptr = &interrupter_;
epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev);
interrupter_.interrupt();
// Add the timer descriptor to epoll.
if (timer_fd_ != -1)
{
ev.events = EPOLLIN | EPOLLERR;
ev.data.ptr = &timer_fd_;
epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev);
}
}
epoll_reactor::~epoll_reactor()
{
if (epoll_fd_ != -1)
close(epoll_fd_);
if (timer_fd_ != -1)
close(timer_fd_);
}
void epoll_reactor::shutdown()
{
mutex::scoped_lock lock(mutex_);
shutdown_ = true;
lock.unlock();
op_queue<operation> ops;
while (descriptor_state* state = registered_descriptors_.first())
{
for (int i = 0; i < max_ops; ++i)
ops.push(state->op_queue_[i]);
state->shutdown_ = true;
registered_descriptors_.free(state);
}
timer_queues_.get_all_timers(ops);
scheduler_.abandon_operations(ops);
}
void epoll_reactor::notify_fork(
asio::execution_context::fork_event fork_ev)
{
if (fork_ev == asio::execution_context::fork_child)
{
if (epoll_fd_ != -1)
::close(epoll_fd_);
epoll_fd_ = -1;
epoll_fd_ = do_epoll_create();
if (timer_fd_ != -1)
::close(timer_fd_);
timer_fd_ = -1;
timer_fd_ = do_timerfd_create();
interrupter_.recreate();
// Add the interrupter's descriptor to epoll.
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLET;
ev.data.ptr = &interrupter_;
epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev);
interrupter_.interrupt();
// Add the timer descriptor to epoll.
if (timer_fd_ != -1)
{
ev.events = EPOLLIN | EPOLLERR;
ev.data.ptr = &timer_fd_;
epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev);
}
update_timeout();
// Re-register all descriptors with epoll.
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
for (descriptor_state* state = registered_descriptors_.first();
state != 0; state = state->next_)
{
ev.events = state->registered_events_;
ev.data.ptr = state;
int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, state->descriptor_, &ev);
if (result != 0)
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "epoll re-registration");
}
}
}
}
void epoll_reactor::init_task()
{
scheduler_.init_task();
}
int epoll_reactor::register_descriptor(socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data)
{
descriptor_data = allocate_descriptor_state();
ASIO_HANDLER_REACTOR_REGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
{
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
descriptor_data->reactor_ = this;
descriptor_data->descriptor_ = descriptor;
descriptor_data->shutdown_ = false;
for (int i = 0; i < max_ops; ++i)
descriptor_data->try_speculative_[i] = true;
}
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLPRI | EPOLLET;
descriptor_data->registered_events_ = ev.events;
ev.data.ptr = descriptor_data;
int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);
if (result != 0)
{
if (errno == EPERM)
{
// This file descriptor type is not supported by epoll. However, if it is
// a regular file then operations on it will not block. We will allow
// this descriptor to be used and fail later if an operation on it would
// otherwise require a trip through the reactor.
descriptor_data->registered_events_ = 0;
return 0;
}
return errno;
}
return 0;
}
int epoll_reactor::register_internal_descriptor(
int op_type, socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op)
{
descriptor_data = allocate_descriptor_state();
ASIO_HANDLER_REACTOR_REGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
{
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
descriptor_data->reactor_ = this;
descriptor_data->descriptor_ = descriptor;
descriptor_data->shutdown_ = false;
descriptor_data->op_queue_[op_type].push(op);
for (int i = 0; i < max_ops; ++i)
descriptor_data->try_speculative_[i] = true;
}
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLPRI | EPOLLET;
descriptor_data->registered_events_ = ev.events;
ev.data.ptr = descriptor_data;
int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);
if (result != 0)
return errno;
return 0;
}
void epoll_reactor::move_descriptor(socket_type,
epoll_reactor::per_descriptor_data& target_descriptor_data,
epoll_reactor::per_descriptor_data& source_descriptor_data)
{
target_descriptor_data = source_descriptor_data;
source_descriptor_data = 0;
}
void epoll_reactor::call_post_immediate_completion(
operation* op, bool is_continuation, const void* self)
{
static_cast<const epoll_reactor*>(self)->post_immediate_completion(
op, is_continuation);
}
void epoll_reactor::start_op(int op_type, socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op,
bool is_continuation, bool allow_speculative,
void (*on_immediate)(operation*, bool, const void*),
const void* immediate_arg)
{
if (!descriptor_data)
{
op->ec_ = asio::error::bad_descriptor;
on_immediate(op, is_continuation, immediate_arg);
return;
}
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (descriptor_data->shutdown_)
{
on_immediate(op, is_continuation, immediate_arg);
return;
}
if (descriptor_data->op_queue_[op_type].empty())
{
if (allow_speculative
&& (op_type != read_op
|| descriptor_data->op_queue_[except_op].empty()))
{
if (descriptor_data->try_speculative_[op_type])
{
if (reactor_op::status status = op->perform())
{
if (status == reactor_op::done_and_exhausted)
if (descriptor_data->registered_events_ != 0)
descriptor_data->try_speculative_[op_type] = false;
descriptor_lock.unlock();
on_immediate(op, is_continuation, immediate_arg);
return;
}
}
if (descriptor_data->registered_events_ == 0)
{
op->ec_ = asio::error::operation_not_supported;
on_immediate(op, is_continuation, immediate_arg);
return;
}
if (op_type == write_op)
{
if ((descriptor_data->registered_events_ & EPOLLOUT) == 0)
{
epoll_event ev = { 0, { 0 } };
ev.events = descriptor_data->registered_events_ | EPOLLOUT;
ev.data.ptr = descriptor_data;
if (epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev) == 0)
{
descriptor_data->registered_events_ |= ev.events;
}
else
{
op->ec_ = asio::error_code(errno,
asio::error::get_system_category());
on_immediate(op, is_continuation, immediate_arg);
return;
}
}
}
}
else if (descriptor_data->registered_events_ == 0)
{
op->ec_ = asio::error::operation_not_supported;
on_immediate(op, is_continuation, immediate_arg);
return;
}
else
{
if (op_type == write_op)
{
descriptor_data->registered_events_ |= EPOLLOUT;
}
epoll_event ev = { 0, { 0 } };
ev.events = descriptor_data->registered_events_;
ev.data.ptr = descriptor_data;
epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev);
}
}
descriptor_data->op_queue_[op_type].push(op);
scheduler_.work_started();
}
void epoll_reactor::cancel_ops(socket_type,
epoll_reactor::per_descriptor_data& descriptor_data)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
{
while (reactor_op* op = descriptor_data->op_queue_[i].front())
{
op->ec_ = asio::error::operation_aborted;
descriptor_data->op_queue_[i].pop();
ops.push(op);
}
}
descriptor_lock.unlock();
scheduler_.post_deferred_completions(ops);
}
void epoll_reactor::cancel_ops_by_key(socket_type,
epoll_reactor::per_descriptor_data& descriptor_data,
int op_type, void* cancellation_key)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
op_queue<operation> ops;
op_queue<reactor_op> other_ops;
while (reactor_op* op = descriptor_data->op_queue_[op_type].front())
{
descriptor_data->op_queue_[op_type].pop();
if (op->cancellation_key_ == cancellation_key)
{
op->ec_ = asio::error::operation_aborted;
ops.push(op);
}
else
other_ops.push(op);
}
descriptor_data->op_queue_[op_type].push(other_ops);
descriptor_lock.unlock();
scheduler_.post_deferred_completions(ops);
}
void epoll_reactor::deregister_descriptor(socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data, bool closing)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (!descriptor_data->shutdown_)
{
if (closing)
{
// The descriptor will be automatically removed from the epoll set when
// it is closed.
}
else if (descriptor_data->registered_events_ != 0)
{
epoll_event ev = { 0, { 0 } };
epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev);
}
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
{
while (reactor_op* op = descriptor_data->op_queue_[i].front())
{
op->ec_ = asio::error::operation_aborted;
descriptor_data->op_queue_[i].pop();
ops.push(op);
}
}
descriptor_data->descriptor_ = -1;
descriptor_data->shutdown_ = true;
descriptor_lock.unlock();
ASIO_HANDLER_REACTOR_DEREGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
scheduler_.post_deferred_completions(ops);
// Leave descriptor_data set so that it will be freed by the subsequent
// call to cleanup_descriptor_data.
}
else
{
// We are shutting down, so prevent cleanup_descriptor_data from freeing
// the descriptor_data object and let the destructor free it instead.
descriptor_data = 0;
}
}
void epoll_reactor::deregister_internal_descriptor(socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (!descriptor_data->shutdown_)
{
epoll_event ev = { 0, { 0 } };
epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev);
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
ops.push(descriptor_data->op_queue_[i]);
descriptor_data->descriptor_ = -1;
descriptor_data->shutdown_ = true;
descriptor_lock.unlock();
ASIO_HANDLER_REACTOR_DEREGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
// Leave descriptor_data set so that it will be freed by the subsequent
// call to cleanup_descriptor_data.
}
else
{
// We are shutting down, so prevent cleanup_descriptor_data from freeing
// the descriptor_data object and let the destructor free it instead.
descriptor_data = 0;
}
}
void epoll_reactor::cleanup_descriptor_data(
per_descriptor_data& descriptor_data)
{
if (descriptor_data)
{
free_descriptor_state(descriptor_data);
descriptor_data = 0;
}
}
void epoll_reactor::run(long usec, op_queue<operation>& ops)
{
// This code relies on the fact that the scheduler queues the reactor task
// behind all descriptor operations generated by this function. This means,
// that by the time we reach this point, any previously returned descriptor
// operations have already been dequeued. Therefore it is now safe for us to
// reuse and return them for the scheduler to queue again.
// Calculate timeout. Check the timer queues only if timerfd is not in use.
int timeout;
if (usec == 0)
timeout = 0;
else
{
timeout = (usec < 0) ? -1 : ((usec - 1) / 1000 + 1);
if (timer_fd_ == -1)
{
mutex::scoped_lock lock(mutex_);
timeout = get_timeout(timeout);
}
}
// Block on the epoll descriptor.
epoll_event events[128];
int num_events = epoll_wait(epoll_fd_, events, 128, timeout);
#if defined(ASIO_ENABLE_HANDLER_TRACKING)
// Trace the waiting events.
for (int i = 0; i < num_events; ++i)
{
void* ptr = events[i].data.ptr;
if (ptr == &interrupter_)
{
// Ignore.
}
# if defined(ASIO_HAS_TIMERFD)
else if (ptr == &timer_fd_)
{
// Ignore.
}
# endif // defined(ASIO_HAS_TIMERFD)
else
{
unsigned event_mask = 0;
if ((events[i].events & EPOLLIN) != 0)
event_mask |= ASIO_HANDLER_REACTOR_READ_EVENT;
if ((events[i].events & EPOLLOUT))
event_mask |= ASIO_HANDLER_REACTOR_WRITE_EVENT;
if ((events[i].events & (EPOLLERR | EPOLLHUP)) != 0)
event_mask |= ASIO_HANDLER_REACTOR_ERROR_EVENT;
ASIO_HANDLER_REACTOR_EVENTS((context(),
reinterpret_cast<uintmax_t>(ptr), event_mask));
}
}
#endif // defined(ASIO_ENABLE_HANDLER_TRACKING)
#if defined(ASIO_HAS_TIMERFD)
bool check_timers = (timer_fd_ == -1);
#else // defined(ASIO_HAS_TIMERFD)
bool check_timers = true;
#endif // defined(ASIO_HAS_TIMERFD)
// Dispatch the waiting events.
for (int i = 0; i < num_events; ++i)
{
void* ptr = events[i].data.ptr;
if (ptr == &interrupter_)
{
// No need to reset the interrupter since we're leaving the descriptor
// in a ready-to-read state and relying on edge-triggered notifications
// to make it so that we only get woken up when the descriptor's epoll
// registration is updated.
#if defined(ASIO_HAS_TIMERFD)
if (timer_fd_ == -1)
check_timers = true;
#else // defined(ASIO_HAS_TIMERFD)
check_timers = true;
#endif // defined(ASIO_HAS_TIMERFD)
}
#if defined(ASIO_HAS_TIMERFD)
else if (ptr == &timer_fd_)
{
check_timers = true;
}
#endif // defined(ASIO_HAS_TIMERFD)
else
{
// The descriptor operation doesn't count as work in and of itself, so we
// don't call work_started() here. This still allows the scheduler to
// stop if the only remaining operations are descriptor operations.
descriptor_state* descriptor_data = static_cast<descriptor_state*>(ptr);
if (!ops.is_enqueued(descriptor_data))
{
descriptor_data->set_ready_events(events[i].events);
ops.push(descriptor_data);
}
else
{
descriptor_data->add_ready_events(events[i].events);
}
}
}
if (check_timers)
{
mutex::scoped_lock common_lock(mutex_);
timer_queues_.get_ready_timers(ops);
#if defined(ASIO_HAS_TIMERFD)
if (timer_fd_ != -1)
{
itimerspec new_timeout;
itimerspec old_timeout;
int flags = get_timeout(new_timeout);
timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout);
}
#endif // defined(ASIO_HAS_TIMERFD)
}
}
void epoll_reactor::interrupt()
{
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLET;
ev.data.ptr = &interrupter_;
epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, interrupter_.read_descriptor(), &ev);
}
int epoll_reactor::do_epoll_create()
{
#if defined(EPOLL_CLOEXEC)
int fd = epoll_create1(EPOLL_CLOEXEC);
#else // defined(EPOLL_CLOEXEC)
int fd = -1;
errno = EINVAL;
#endif // defined(EPOLL_CLOEXEC)
if (fd == -1 && (errno == EINVAL || errno == ENOSYS))
{
fd = epoll_create(epoll_size);
if (fd != -1)
::fcntl(fd, F_SETFD, FD_CLOEXEC);
}
if (fd == -1)
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "epoll");
}
return fd;
}
int epoll_reactor::do_timerfd_create()
{
#if defined(ASIO_HAS_TIMERFD)
# if defined(TFD_CLOEXEC)
int fd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC);
# else // defined(TFD_CLOEXEC)
int fd = -1;
errno = EINVAL;
# endif // defined(TFD_CLOEXEC)
if (fd == -1 && errno == EINVAL)
{
fd = timerfd_create(CLOCK_MONOTONIC, 0);
if (fd != -1)
::fcntl(fd, F_SETFD, FD_CLOEXEC);
}
return fd;
#else // defined(ASIO_HAS_TIMERFD)
return -1;
#endif // defined(ASIO_HAS_TIMERFD)
}
epoll_reactor::descriptor_state* epoll_reactor::allocate_descriptor_state()
{
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
return registered_descriptors_.alloc(ASIO_CONCURRENCY_HINT_IS_LOCKING(
REACTOR_IO, scheduler_.concurrency_hint()));
}
void epoll_reactor::free_descriptor_state(epoll_reactor::descriptor_state* s)
{
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
registered_descriptors_.free(s);
}
void epoll_reactor::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.insert(&queue);
}
void epoll_reactor::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.erase(&queue);
}
void epoll_reactor::update_timeout()
{
#if defined(ASIO_HAS_TIMERFD)
if (timer_fd_ != -1)
{
itimerspec new_timeout;
itimerspec old_timeout;
int flags = get_timeout(new_timeout);
timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout);
return;
}
#endif // defined(ASIO_HAS_TIMERFD)
interrupt();
}
int epoll_reactor::get_timeout(int msec)
{
// By default we will wait no longer than 5 minutes. This will ensure that
// any changes to the system clock are detected after no longer than this.
const int max_msec = 5 * 60 * 1000;
return timer_queues_.wait_duration_msec(
(msec < 0 || max_msec < msec) ? max_msec : msec);
}
#if defined(ASIO_HAS_TIMERFD)
int epoll_reactor::get_timeout(itimerspec& ts)
{
ts.it_interval.tv_sec = 0;
ts.it_interval.tv_nsec = 0;
long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000);
ts.it_value.tv_sec = usec / 1000000;
ts.it_value.tv_nsec = usec ? (usec % 1000000) * 1000 : 1;
return usec ? 0 : TFD_TIMER_ABSTIME;
}
#endif // defined(ASIO_HAS_TIMERFD)
struct epoll_reactor::perform_io_cleanup_on_block_exit
{
explicit perform_io_cleanup_on_block_exit(epoll_reactor* r)
: reactor_(r), first_op_(0)
{
}
~perform_io_cleanup_on_block_exit()
{
if (first_op_)
{
// Post the remaining completed operations for invocation.
if (!ops_.empty())
reactor_->scheduler_.post_deferred_completions(ops_);
// A user-initiated operation has completed, but there's no need to
// explicitly call work_finished() here. Instead, we'll take advantage of
// the fact that the scheduler will call work_finished() once we return.
}
else
{
// No user-initiated operations have completed, so we need to compensate
// for the work_finished() call that the scheduler will make once this
// operation returns.
reactor_->scheduler_.compensating_work_started();
}
}
epoll_reactor* reactor_;
op_queue<operation> ops_;
operation* first_op_;
};
epoll_reactor::descriptor_state::descriptor_state(bool locking)
: operation(&epoll_reactor::descriptor_state::do_complete),
mutex_(locking)
{
}
operation* epoll_reactor::descriptor_state::perform_io(uint32_t events)
{
mutex_.lock();
perform_io_cleanup_on_block_exit io_cleanup(reactor_);
mutex::scoped_lock descriptor_lock(mutex_, mutex::scoped_lock::adopt_lock);
// Exception operations must be processed first to ensure that any
// out-of-band data is read before normal data.
static const int flag[max_ops] = { EPOLLIN, EPOLLOUT, EPOLLPRI };
for (int j = max_ops - 1; j >= 0; --j)
{
if (events & (flag[j] | EPOLLERR | EPOLLHUP))
{
try_speculative_[j] = true;
while (reactor_op* op = op_queue_[j].front())
{
if (reactor_op::status status = op->perform())
{
op_queue_[j].pop();
io_cleanup.ops_.push(op);
if (status == reactor_op::done_and_exhausted)
{
try_speculative_[j] = false;
break;
}
}
else
break;
}
}
}
// The first operation will be returned for completion now. The others will
// be posted for later by the io_cleanup object's destructor.
io_cleanup.first_op_ = io_cleanup.ops_.front();
io_cleanup.ops_.pop();
return io_cleanup.first_op_;
}
void epoll_reactor::descriptor_state::do_complete(
void* owner, operation* base,
const asio::error_code& ec, std::size_t bytes_transferred)
{
if (owner)
{
descriptor_state* descriptor_data = static_cast<descriptor_state*>(base);
uint32_t events = static_cast<uint32_t>(bytes_transferred);
if (operation* op = descriptor_data->perform_io(events))
{
op->complete(owner, ec, 0);
}
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_EPOLL)
#endif // ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/service_registry.ipp | //
// detail/impl/service_registry.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP
#define ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <vector>
#include "asio/detail/service_registry.hpp"
#include "asio/detail/throw_exception.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
service_registry::service_registry(execution_context& owner)
: owner_(owner),
first_service_(0)
{
}
service_registry::~service_registry()
{
}
void service_registry::shutdown_services()
{
execution_context::service* service = first_service_;
while (service)
{
service->shutdown();
service = service->next_;
}
}
void service_registry::destroy_services()
{
while (first_service_)
{
execution_context::service* next_service = first_service_->next_;
destroy(first_service_);
first_service_ = next_service;
}
}
void service_registry::notify_fork(execution_context::fork_event fork_ev)
{
// Make a copy of all of the services while holding the lock. We don't want
// to hold the lock while calling into each service, as it may try to call
// back into this class.
std::vector<execution_context::service*> services;
{
asio::detail::mutex::scoped_lock lock(mutex_);
execution_context::service* service = first_service_;
while (service)
{
services.push_back(service);
service = service->next_;
}
}
// If processing the fork_prepare event, we want to go in reverse order of
// service registration, which happens to be the existing order of the
// services in the vector. For the other events we want to go in the other
// direction.
std::size_t num_services = services.size();
if (fork_ev == execution_context::fork_prepare)
for (std::size_t i = 0; i < num_services; ++i)
services[i]->notify_fork(fork_ev);
else
for (std::size_t i = num_services; i > 0; --i)
services[i - 1]->notify_fork(fork_ev);
}
void service_registry::init_key_from_id(execution_context::service::key& key,
const execution_context::id& id)
{
key.type_info_ = 0;
key.id_ = &id;
}
bool service_registry::keys_match(
const execution_context::service::key& key1,
const execution_context::service::key& key2)
{
if (key1.id_ && key2.id_)
if (key1.id_ == key2.id_)
return true;
if (key1.type_info_ && key2.type_info_)
if (*key1.type_info_ == *key2.type_info_)
return true;
return false;
}
void service_registry::destroy(execution_context::service* service)
{
delete service;
}
execution_context::service* service_registry::do_use_service(
const execution_context::service::key& key,
factory_type factory, void* owner)
{
asio::detail::mutex::scoped_lock lock(mutex_);
// First see if there is an existing service object with the given key.
execution_context::service* service = first_service_;
while (service)
{
if (keys_match(service->key_, key))
return service;
service = service->next_;
}
// Create a new service object. The service registry's mutex is not locked
// at this time to allow for nested calls into this function from the new
// service's constructor.
lock.unlock();
auto_service_ptr new_service = { factory(owner) };
new_service.ptr_->key_ = key;
lock.lock();
// Check that nobody else created another service object of the same type
// while the lock was released.
service = first_service_;
while (service)
{
if (keys_match(service->key_, key))
return service;
service = service->next_;
}
// Service was successfully initialised, pass ownership to registry.
new_service.ptr_->next_ = first_service_;
first_service_ = new_service.ptr_;
new_service.ptr_ = 0;
return first_service_;
}
void service_registry::do_add_service(
const execution_context::service::key& key,
execution_context::service* new_service)
{
if (&owner_ != &new_service->context())
asio::detail::throw_exception(invalid_service_owner());
asio::detail::mutex::scoped_lock lock(mutex_);
// Check if there is an existing service object with the given key.
execution_context::service* service = first_service_;
while (service)
{
if (keys_match(service->key_, key))
asio::detail::throw_exception(service_already_exists());
service = service->next_;
}
// Take ownership of the service object.
new_service->key_ = key;
new_service->next_ = first_service_;
first_service_ = new_service;
}
bool service_registry::do_has_service(
const execution_context::service::key& key) const
{
asio::detail::mutex::scoped_lock lock(mutex_);
execution_context::service* service = first_service_;
while (service)
{
if (keys_match(service->key_, key))
return true;
service = service->next_;
}
return false;
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP
|
0 | repos/asio/asio/include/asio/detail | repos/asio/asio/include/asio/detail/impl/select_reactor.ipp | //
// detail/impl/select_reactor.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP
#define ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP) \
|| (!defined(ASIO_HAS_DEV_POLL) \
&& !defined(ASIO_HAS_EPOLL) \
&& !defined(ASIO_HAS_KQUEUE) \
&& !defined(ASIO_WINDOWS_RUNTIME))
#include "asio/detail/fd_set_adapter.hpp"
#include "asio/detail/select_reactor.hpp"
#include "asio/detail/signal_blocker.hpp"
#include "asio/detail/socket_ops.hpp"
#if defined(ASIO_HAS_IOCP)
# include "asio/detail/win_iocp_io_context.hpp"
#else // defined(ASIO_HAS_IOCP)
# include "asio/detail/scheduler.hpp"
#endif // defined(ASIO_HAS_IOCP)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
#if defined(ASIO_HAS_IOCP)
class select_reactor::thread_function
{
public:
explicit thread_function(select_reactor* r)
: this_(r)
{
}
void operator()()
{
this_->run_thread();
}
private:
select_reactor* this_;
};
#endif // defined(ASIO_HAS_IOCP)
select_reactor::select_reactor(asio::execution_context& ctx)
: execution_context_service_base<select_reactor>(ctx),
scheduler_(use_service<scheduler_type>(ctx)),
mutex_(),
interrupter_(),
#if defined(ASIO_HAS_IOCP)
stop_thread_(false),
thread_(0),
restart_reactor_(this),
#endif // defined(ASIO_HAS_IOCP)
shutdown_(false)
{
#if defined(ASIO_HAS_IOCP)
asio::detail::signal_blocker sb;
thread_ = new asio::detail::thread(thread_function(this));
#endif // defined(ASIO_HAS_IOCP)
}
select_reactor::~select_reactor()
{
shutdown();
}
void select_reactor::shutdown()
{
asio::detail::mutex::scoped_lock lock(mutex_);
shutdown_ = true;
#if defined(ASIO_HAS_IOCP)
stop_thread_ = true;
if (thread_)
interrupter_.interrupt();
#endif // defined(ASIO_HAS_IOCP)
lock.unlock();
#if defined(ASIO_HAS_IOCP)
if (thread_)
{
thread_->join();
delete thread_;
thread_ = 0;
}
#endif // defined(ASIO_HAS_IOCP)
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
op_queue_[i].get_all_operations(ops);
timer_queues_.get_all_timers(ops);
scheduler_.abandon_operations(ops);
}
void select_reactor::notify_fork(
asio::execution_context::fork_event fork_ev)
{
#if defined(ASIO_HAS_IOCP)
(void)fork_ev;
#else // defined(ASIO_HAS_IOCP)
if (fork_ev == asio::execution_context::fork_child)
interrupter_.recreate();
#endif // defined(ASIO_HAS_IOCP)
}
void select_reactor::init_task()
{
scheduler_.init_task();
}
int select_reactor::register_descriptor(socket_type,
select_reactor::per_descriptor_data&)
{
return 0;
}
int select_reactor::register_internal_descriptor(
int op_type, socket_type descriptor,
select_reactor::per_descriptor_data&, reactor_op* op)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue_[op_type].enqueue_operation(descriptor, op);
interrupter_.interrupt();
return 0;
}
void select_reactor::move_descriptor(socket_type,
select_reactor::per_descriptor_data&,
select_reactor::per_descriptor_data&)
{
}
void select_reactor::call_post_immediate_completion(
operation* op, bool is_continuation, const void* self)
{
static_cast<const select_reactor*>(self)->post_immediate_completion(
op, is_continuation);
}
void select_reactor::start_op(int op_type, socket_type descriptor,
select_reactor::per_descriptor_data&, reactor_op* op, bool is_continuation,
bool, void (*on_immediate)(operation*, bool, const void*),
const void* immediate_arg)
{
asio::detail::mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
on_immediate(op, is_continuation, immediate_arg);
return;
}
bool first = op_queue_[op_type].enqueue_operation(descriptor, op);
scheduler_.work_started();
if (first)
interrupter_.interrupt();
}
void select_reactor::cancel_ops(socket_type descriptor,
select_reactor::per_descriptor_data&)
{
asio::detail::mutex::scoped_lock lock(mutex_);
cancel_ops_unlocked(descriptor, asio::error::operation_aborted);
}
void select_reactor::cancel_ops_by_key(socket_type descriptor,
select_reactor::per_descriptor_data&,
int op_type, void* cancellation_key)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
bool need_interrupt = op_queue_[op_type].cancel_operations_by_key(
descriptor, ops, cancellation_key, asio::error::operation_aborted);
scheduler_.post_deferred_completions(ops);
if (need_interrupt)
interrupter_.interrupt();
}
void select_reactor::deregister_descriptor(socket_type descriptor,
select_reactor::per_descriptor_data&, bool)
{
asio::detail::mutex::scoped_lock lock(mutex_);
cancel_ops_unlocked(descriptor, asio::error::operation_aborted);
}
void select_reactor::deregister_internal_descriptor(
socket_type descriptor, select_reactor::per_descriptor_data&)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
op_queue_[i].cancel_operations(descriptor, ops);
}
void select_reactor::cleanup_descriptor_data(
select_reactor::per_descriptor_data&)
{
}
void select_reactor::run(long usec, op_queue<operation>& ops)
{
asio::detail::mutex::scoped_lock lock(mutex_);
#if defined(ASIO_HAS_IOCP)
// Check if the thread is supposed to stop.
if (stop_thread_)
return;
#endif // defined(ASIO_HAS_IOCP)
// Set up the descriptor sets.
for (int i = 0; i < max_select_ops; ++i)
fd_sets_[i].reset();
fd_sets_[read_op].set(interrupter_.read_descriptor());
socket_type max_fd = 0;
bool have_work_to_do = !timer_queues_.all_empty();
for (int i = 0; i < max_select_ops; ++i)
{
have_work_to_do = have_work_to_do || !op_queue_[i].empty();
fd_sets_[i].set(op_queue_[i], ops);
if (fd_sets_[i].max_descriptor() > max_fd)
max_fd = fd_sets_[i].max_descriptor();
}
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// Connection operations on Windows use both except and write fd_sets.
have_work_to_do = have_work_to_do || !op_queue_[connect_op].empty();
fd_sets_[write_op].set(op_queue_[connect_op], ops);
if (fd_sets_[write_op].max_descriptor() > max_fd)
max_fd = fd_sets_[write_op].max_descriptor();
fd_sets_[except_op].set(op_queue_[connect_op], ops);
if (fd_sets_[except_op].max_descriptor() > max_fd)
max_fd = fd_sets_[except_op].max_descriptor();
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// We can return immediately if there's no work to do and the reactor is
// not supposed to block.
if (!usec && !have_work_to_do)
return;
// Determine how long to block while waiting for events.
timeval tv_buf = { 0, 0 };
timeval* tv = usec ? get_timeout(usec, tv_buf) : &tv_buf;
lock.unlock();
// Block on the select call until descriptors become ready.
asio::error_code ec;
int retval = socket_ops::select(static_cast<int>(max_fd + 1),
fd_sets_[read_op], fd_sets_[write_op], fd_sets_[except_op], tv, ec);
// Reset the interrupter.
if (retval > 0 && fd_sets_[read_op].is_set(interrupter_.read_descriptor()))
{
if (!interrupter_.reset())
{
lock.lock();
#if defined(ASIO_HAS_IOCP)
stop_thread_ = true;
scheduler_.post_immediate_completion(&restart_reactor_, false);
#else // defined(ASIO_HAS_IOCP)
interrupter_.recreate();
#endif // defined(ASIO_HAS_IOCP)
}
--retval;
}
lock.lock();
// Dispatch all ready operations.
if (retval > 0)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// Connection operations on Windows use both except and write fd_sets.
fd_sets_[except_op].perform(op_queue_[connect_op], ops);
fd_sets_[write_op].perform(op_queue_[connect_op], ops);
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// Exception operations must be processed first to ensure that any
// out-of-band data is read before normal data.
for (int i = max_select_ops - 1; i >= 0; --i)
fd_sets_[i].perform(op_queue_[i], ops);
}
timer_queues_.get_ready_timers(ops);
}
void select_reactor::interrupt()
{
interrupter_.interrupt();
}
#if defined(ASIO_HAS_IOCP)
void select_reactor::run_thread()
{
asio::detail::mutex::scoped_lock lock(mutex_);
while (!stop_thread_)
{
lock.unlock();
op_queue<operation> ops;
run(-1, ops);
scheduler_.post_deferred_completions(ops);
lock.lock();
}
}
void select_reactor::restart_reactor::do_complete(void* owner, operation* base,
const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/)
{
if (owner)
{
select_reactor* reactor = static_cast<restart_reactor*>(base)->reactor_;
if (reactor->thread_)
{
reactor->thread_->join();
delete reactor->thread_;
reactor->thread_ = 0;
}
asio::detail::mutex::scoped_lock lock(reactor->mutex_);
reactor->interrupter_.recreate();
reactor->stop_thread_ = false;
lock.unlock();
asio::detail::signal_blocker sb;
reactor->thread_ =
new asio::detail::thread(thread_function(reactor));
}
}
#endif // defined(ASIO_HAS_IOCP)
void select_reactor::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.insert(&queue);
}
void select_reactor::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.erase(&queue);
}
timeval* select_reactor::get_timeout(long usec, timeval& tv)
{
// By default we will wait no longer than 5 minutes. This will ensure that
// any changes to the system clock are detected after no longer than this.
const long max_usec = 5 * 60 * 1000 * 1000;
usec = timer_queues_.wait_duration_usec(
(usec < 0 || max_usec < usec) ? max_usec : usec);
tv.tv_sec = usec / 1000000;
tv.tv_usec = usec % 1000000;
return &tv;
}
void select_reactor::cancel_ops_unlocked(socket_type descriptor,
const asio::error_code& ec)
{
bool need_interrupt = false;
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
need_interrupt = op_queue_[i].cancel_operations(
descriptor, ops, ec) || need_interrupt;
scheduler_.post_deferred_completions(ops);
if (need_interrupt)
interrupter_.interrupt();
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
// || (!defined(ASIO_HAS_DEV_POLL)
// && !defined(ASIO_HAS_EPOLL)
// && !defined(ASIO_HAS_KQUEUE))
// && !defined(ASIO_WINDOWS_RUNTIME))
#endif // ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/generic/datagram_protocol.hpp | //
// generic/datagram_protocol.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_GENERIC_DATAGRAM_PROTOCOL_HPP
#define ASIO_GENERIC_DATAGRAM_PROTOCOL_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <typeinfo>
#include "asio/basic_datagram_socket.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/detail/throw_exception.hpp"
#include "asio/generic/basic_endpoint.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace generic {
/// Encapsulates the flags needed for a generic datagram-oriented socket.
/**
* The asio::generic::datagram_protocol class contains flags necessary
* for datagram-oriented sockets of any address family and protocol.
*
* @par Examples
* Constructing using a native address family and socket protocol:
* @code datagram_protocol p(AF_INET, IPPROTO_UDP); @endcode
* Constructing from a specific protocol type:
* @code datagram_protocol p(asio::ip::udp::v4()); @endcode
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Safe.
*
* @par Concepts:
* Protocol.
*/
class datagram_protocol
{
public:
/// Construct a protocol object for a specific address family and protocol.
datagram_protocol(int address_family, int socket_protocol)
: family_(address_family),
protocol_(socket_protocol)
{
}
/// Construct a generic protocol object from a specific protocol.
/**
* @throws @c bad_cast Thrown if the source protocol is not datagram-oriented.
*/
template <typename Protocol>
datagram_protocol(const Protocol& source_protocol)
: family_(source_protocol.family()),
protocol_(source_protocol.protocol())
{
if (source_protocol.type() != type())
{
std::bad_cast ex;
asio::detail::throw_exception(ex);
}
}
/// Obtain an identifier for the type of the protocol.
int type() const noexcept
{
return ASIO_OS_DEF(SOCK_DGRAM);
}
/// Obtain an identifier for the protocol.
int protocol() const noexcept
{
return protocol_;
}
/// Obtain an identifier for the protocol family.
int family() const noexcept
{
return family_;
}
/// Compare two protocols for equality.
friend bool operator==(const datagram_protocol& p1,
const datagram_protocol& p2)
{
return p1.family_ == p2.family_ && p1.protocol_ == p2.protocol_;
}
/// Compare two protocols for inequality.
friend bool operator!=(const datagram_protocol& p1,
const datagram_protocol& p2)
{
return !(p1 == p2);
}
/// The type of an endpoint.
typedef basic_endpoint<datagram_protocol> endpoint;
/// The generic socket type.
typedef basic_datagram_socket<datagram_protocol> socket;
private:
int family_;
int protocol_;
};
} // namespace generic
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_GENERIC_DATAGRAM_PROTOCOL_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/generic/stream_protocol.hpp | //
// generic/stream_protocol.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_GENERIC_STREAM_PROTOCOL_HPP
#define ASIO_GENERIC_STREAM_PROTOCOL_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <typeinfo>
#include "asio/basic_socket_iostream.hpp"
#include "asio/basic_stream_socket.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/detail/throw_exception.hpp"
#include "asio/generic/basic_endpoint.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace generic {
/// Encapsulates the flags needed for a generic stream-oriented socket.
/**
* The asio::generic::stream_protocol class contains flags necessary for
* stream-oriented sockets of any address family and protocol.
*
* @par Examples
* Constructing using a native address family and socket protocol:
* @code stream_protocol p(AF_INET, IPPROTO_TCP); @endcode
* Constructing from a specific protocol type:
* @code stream_protocol p(asio::ip::tcp::v4()); @endcode
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Safe.
*
* @par Concepts:
* Protocol.
*/
class stream_protocol
{
public:
/// Construct a protocol object for a specific address family and protocol.
stream_protocol(int address_family, int socket_protocol)
: family_(address_family),
protocol_(socket_protocol)
{
}
/// Construct a generic protocol object from a specific protocol.
/**
* @throws @c bad_cast Thrown if the source protocol is not stream-oriented.
*/
template <typename Protocol>
stream_protocol(const Protocol& source_protocol)
: family_(source_protocol.family()),
protocol_(source_protocol.protocol())
{
if (source_protocol.type() != type())
{
std::bad_cast ex;
asio::detail::throw_exception(ex);
}
}
/// Obtain an identifier for the type of the protocol.
int type() const noexcept
{
return ASIO_OS_DEF(SOCK_STREAM);
}
/// Obtain an identifier for the protocol.
int protocol() const noexcept
{
return protocol_;
}
/// Obtain an identifier for the protocol family.
int family() const noexcept
{
return family_;
}
/// Compare two protocols for equality.
friend bool operator==(const stream_protocol& p1, const stream_protocol& p2)
{
return p1.family_ == p2.family_ && p1.protocol_ == p2.protocol_;
}
/// Compare two protocols for inequality.
friend bool operator!=(const stream_protocol& p1, const stream_protocol& p2)
{
return !(p1 == p2);
}
/// The type of an endpoint.
typedef basic_endpoint<stream_protocol> endpoint;
/// The generic socket type.
typedef basic_stream_socket<stream_protocol> socket;
#if !defined(ASIO_NO_IOSTREAM)
/// The generic socket iostream type.
typedef basic_socket_iostream<stream_protocol> iostream;
#endif // !defined(ASIO_NO_IOSTREAM)
private:
int family_;
int protocol_;
};
} // namespace generic
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_GENERIC_STREAM_PROTOCOL_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/generic/seq_packet_protocol.hpp | //
// generic/seq_packet_protocol.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_GENERIC_SEQ_PACKET_PROTOCOL_HPP
#define ASIO_GENERIC_SEQ_PACKET_PROTOCOL_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <typeinfo>
#include "asio/basic_seq_packet_socket.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/detail/throw_exception.hpp"
#include "asio/generic/basic_endpoint.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace generic {
/// Encapsulates the flags needed for a generic sequenced packet socket.
/**
* The asio::generic::seq_packet_protocol class contains flags necessary
* for seq_packet-oriented sockets of any address family and protocol.
*
* @par Examples
* Constructing using a native address family and socket protocol:
* @code seq_packet_protocol p(AF_INET, IPPROTO_SCTP); @endcode
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Safe.
*
* @par Concepts:
* Protocol.
*/
class seq_packet_protocol
{
public:
/// Construct a protocol object for a specific address family and protocol.
seq_packet_protocol(int address_family, int socket_protocol)
: family_(address_family),
protocol_(socket_protocol)
{
}
/// Construct a generic protocol object from a specific protocol.
/**
* @throws @c bad_cast Thrown if the source protocol is not based around
* sequenced packets.
*/
template <typename Protocol>
seq_packet_protocol(const Protocol& source_protocol)
: family_(source_protocol.family()),
protocol_(source_protocol.protocol())
{
if (source_protocol.type() != type())
{
std::bad_cast ex;
asio::detail::throw_exception(ex);
}
}
/// Obtain an identifier for the type of the protocol.
int type() const noexcept
{
return ASIO_OS_DEF(SOCK_SEQPACKET);
}
/// Obtain an identifier for the protocol.
int protocol() const noexcept
{
return protocol_;
}
/// Obtain an identifier for the protocol family.
int family() const noexcept
{
return family_;
}
/// Compare two protocols for equality.
friend bool operator==(const seq_packet_protocol& p1,
const seq_packet_protocol& p2)
{
return p1.family_ == p2.family_ && p1.protocol_ == p2.protocol_;
}
/// Compare two protocols for inequality.
friend bool operator!=(const seq_packet_protocol& p1,
const seq_packet_protocol& p2)
{
return !(p1 == p2);
}
/// The type of an endpoint.
typedef basic_endpoint<seq_packet_protocol> endpoint;
/// The generic socket type.
typedef basic_seq_packet_socket<seq_packet_protocol> socket;
private:
int family_;
int protocol_;
};
} // namespace generic
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_GENERIC_SEQ_PACKET_PROTOCOL_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/generic/basic_endpoint.hpp | //
// generic/basic_endpoint.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_GENERIC_BASIC_ENDPOINT_HPP
#define ASIO_GENERIC_BASIC_ENDPOINT_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/generic/detail/endpoint.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace generic {
/// Describes an endpoint for any socket type.
/**
* The asio::generic::basic_endpoint class template describes an endpoint
* that may be associated with any socket type.
*
* @note The socket types sockaddr type must be able to fit into a
* @c sockaddr_storage structure.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*
* @par Concepts:
* Endpoint.
*/
template <typename Protocol>
class basic_endpoint
{
public:
/// The protocol type associated with the endpoint.
typedef Protocol protocol_type;
/// The type of the endpoint structure. This type is dependent on the
/// underlying implementation of the socket layer.
#if defined(GENERATING_DOCUMENTATION)
typedef implementation_defined data_type;
#else
typedef asio::detail::socket_addr_type data_type;
#endif
/// Default constructor.
basic_endpoint() noexcept
{
}
/// Construct an endpoint from the specified socket address.
basic_endpoint(const void* socket_address,
std::size_t socket_address_size, int socket_protocol = 0)
: impl_(socket_address, socket_address_size, socket_protocol)
{
}
/// Construct an endpoint from the specific endpoint type.
template <typename Endpoint>
basic_endpoint(const Endpoint& endpoint)
: impl_(endpoint.data(), endpoint.size(), endpoint.protocol().protocol())
{
}
/// Copy constructor.
basic_endpoint(const basic_endpoint& other)
: impl_(other.impl_)
{
}
/// Move constructor.
basic_endpoint(basic_endpoint&& other)
: impl_(other.impl_)
{
}
/// Assign from another endpoint.
basic_endpoint& operator=(const basic_endpoint& other)
{
impl_ = other.impl_;
return *this;
}
/// Move-assign from another endpoint.
basic_endpoint& operator=(basic_endpoint&& other)
{
impl_ = other.impl_;
return *this;
}
/// The protocol associated with the endpoint.
protocol_type protocol() const
{
return protocol_type(impl_.family(), impl_.protocol());
}
/// Get the underlying endpoint in the native type.
data_type* data()
{
return impl_.data();
}
/// Get the underlying endpoint in the native type.
const data_type* data() const
{
return impl_.data();
}
/// Get the underlying size of the endpoint in the native type.
std::size_t size() const
{
return impl_.size();
}
/// Set the underlying size of the endpoint in the native type.
void resize(std::size_t new_size)
{
impl_.resize(new_size);
}
/// Get the capacity of the endpoint in the native type.
std::size_t capacity() const
{
return impl_.capacity();
}
/// Compare two endpoints for equality.
friend bool operator==(const basic_endpoint<Protocol>& e1,
const basic_endpoint<Protocol>& e2)
{
return e1.impl_ == e2.impl_;
}
/// Compare two endpoints for inequality.
friend bool operator!=(const basic_endpoint<Protocol>& e1,
const basic_endpoint<Protocol>& e2)
{
return !(e1.impl_ == e2.impl_);
}
/// Compare endpoints for ordering.
friend bool operator<(const basic_endpoint<Protocol>& e1,
const basic_endpoint<Protocol>& e2)
{
return e1.impl_ < e2.impl_;
}
/// Compare endpoints for ordering.
friend bool operator>(const basic_endpoint<Protocol>& e1,
const basic_endpoint<Protocol>& e2)
{
return e2.impl_ < e1.impl_;
}
/// Compare endpoints for ordering.
friend bool operator<=(const basic_endpoint<Protocol>& e1,
const basic_endpoint<Protocol>& e2)
{
return !(e2 < e1);
}
/// Compare endpoints for ordering.
friend bool operator>=(const basic_endpoint<Protocol>& e1,
const basic_endpoint<Protocol>& e2)
{
return !(e1 < e2);
}
private:
// The underlying generic endpoint.
asio::generic::detail::endpoint impl_;
};
} // namespace generic
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_GENERIC_BASIC_ENDPOINT_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/generic/raw_protocol.hpp | //
// generic/raw_protocol.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_GENERIC_RAW_PROTOCOL_HPP
#define ASIO_GENERIC_RAW_PROTOCOL_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <typeinfo>
#include "asio/basic_raw_socket.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/detail/throw_exception.hpp"
#include "asio/generic/basic_endpoint.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace generic {
/// Encapsulates the flags needed for a generic raw socket.
/**
* The asio::generic::raw_protocol class contains flags necessary for
* raw sockets of any address family and protocol.
*
* @par Examples
* Constructing using a native address family and socket protocol:
* @code raw_protocol p(AF_INET, IPPROTO_ICMP); @endcode
* Constructing from a specific protocol type:
* @code raw_protocol p(asio::ip::icmp::v4()); @endcode
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Safe.
*
* @par Concepts:
* Protocol.
*/
class raw_protocol
{
public:
/// Construct a protocol object for a specific address family and protocol.
raw_protocol(int address_family, int socket_protocol)
: family_(address_family),
protocol_(socket_protocol)
{
}
/// Construct a generic protocol object from a specific protocol.
/**
* @throws @c bad_cast Thrown if the source protocol is not raw-oriented.
*/
template <typename Protocol>
raw_protocol(const Protocol& source_protocol)
: family_(source_protocol.family()),
protocol_(source_protocol.protocol())
{
if (source_protocol.type() != type())
{
std::bad_cast ex;
asio::detail::throw_exception(ex);
}
}
/// Obtain an identifier for the type of the protocol.
int type() const noexcept
{
return ASIO_OS_DEF(SOCK_RAW);
}
/// Obtain an identifier for the protocol.
int protocol() const noexcept
{
return protocol_;
}
/// Obtain an identifier for the protocol family.
int family() const noexcept
{
return family_;
}
/// Compare two protocols for equality.
friend bool operator==(const raw_protocol& p1, const raw_protocol& p2)
{
return p1.family_ == p2.family_ && p1.protocol_ == p2.protocol_;
}
/// Compare two protocols for inequality.
friend bool operator!=(const raw_protocol& p1, const raw_protocol& p2)
{
return !(p1 == p2);
}
/// The type of an endpoint.
typedef basic_endpoint<raw_protocol> endpoint;
/// The generic socket type.
typedef basic_raw_socket<raw_protocol> socket;
private:
int family_;
int protocol_;
};
} // namespace generic
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_GENERIC_RAW_PROTOCOL_HPP
|
0 | repos/asio/asio/include/asio/generic | repos/asio/asio/include/asio/generic/detail/endpoint.hpp | //
// generic/detail/endpoint.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_GENERIC_DETAIL_ENDPOINT_HPP
#define ASIO_GENERIC_DETAIL_ENDPOINT_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <cstddef>
#include "asio/detail/socket_types.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace generic {
namespace detail {
// Helper class for implementing a generic socket endpoint.
class endpoint
{
public:
// Default constructor.
ASIO_DECL endpoint();
// Construct an endpoint from the specified raw bytes.
ASIO_DECL endpoint(const void* sock_addr,
std::size_t sock_addr_size, int sock_protocol);
// Copy constructor.
endpoint(const endpoint& other)
: data_(other.data_),
size_(other.size_),
protocol_(other.protocol_)
{
}
// Assign from another endpoint.
endpoint& operator=(const endpoint& other)
{
data_ = other.data_;
size_ = other.size_;
protocol_ = other.protocol_;
return *this;
}
// Get the address family associated with the endpoint.
int family() const
{
return data_.base.sa_family;
}
// Get the socket protocol associated with the endpoint.
int protocol() const
{
return protocol_;
}
// Get the underlying endpoint in the native type.
asio::detail::socket_addr_type* data()
{
return &data_.base;
}
// Get the underlying endpoint in the native type.
const asio::detail::socket_addr_type* data() const
{
return &data_.base;
}
// Get the underlying size of the endpoint in the native type.
std::size_t size() const
{
return size_;
}
// Set the underlying size of the endpoint in the native type.
ASIO_DECL void resize(std::size_t size);
// Get the capacity of the endpoint in the native type.
std::size_t capacity() const
{
return sizeof(asio::detail::sockaddr_storage_type);
}
// Compare two endpoints for equality.
ASIO_DECL friend bool operator==(
const endpoint& e1, const endpoint& e2);
// Compare endpoints for ordering.
ASIO_DECL friend bool operator<(
const endpoint& e1, const endpoint& e2);
private:
// The underlying socket address.
union data_union
{
asio::detail::socket_addr_type base;
asio::detail::sockaddr_storage_type generic;
} data_;
// The length of the socket address stored in the endpoint.
std::size_t size_;
// The socket protocol associated with the endpoint.
int protocol_;
// Initialise with a specified memory.
ASIO_DECL void init(const void* sock_addr,
std::size_t sock_addr_size, int sock_protocol);
};
} // namespace detail
} // namespace generic
} // namespace asio
#include "asio/detail/pop_options.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/generic/detail/impl/endpoint.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // ASIO_GENERIC_DETAIL_ENDPOINT_HPP
|
0 | repos/asio/asio/include/asio/generic/detail | repos/asio/asio/include/asio/generic/detail/impl/endpoint.ipp | //
// generic/detail/impl/endpoint.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_GENERIC_DETAIL_IMPL_ENDPOINT_IPP
#define ASIO_GENERIC_DETAIL_IMPL_ENDPOINT_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <cstring>
#include <typeinfo>
#include "asio/detail/socket_ops.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/detail/throw_exception.hpp"
#include "asio/error.hpp"
#include "asio/generic/detail/endpoint.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace generic {
namespace detail {
endpoint::endpoint()
{
init(0, 0, 0);
}
endpoint::endpoint(const void* sock_addr,
std::size_t sock_addr_size, int sock_protocol)
{
init(sock_addr, sock_addr_size, sock_protocol);
}
void endpoint::resize(std::size_t new_size)
{
if (new_size > sizeof(asio::detail::sockaddr_storage_type))
{
asio::error_code ec(asio::error::invalid_argument);
asio::detail::throw_error(ec);
}
else
{
size_ = new_size;
protocol_ = 0;
}
}
bool operator==(const endpoint& e1, const endpoint& e2)
{
using namespace std; // For memcmp.
return e1.size() == e2.size() && memcmp(e1.data(), e2.data(), e1.size()) == 0;
}
bool operator<(const endpoint& e1, const endpoint& e2)
{
if (e1.protocol() < e2.protocol())
return true;
if (e1.protocol() > e2.protocol())
return false;
using namespace std; // For memcmp.
std::size_t compare_size = e1.size() < e2.size() ? e1.size() : e2.size();
int compare_result = memcmp(e1.data(), e2.data(), compare_size);
if (compare_result < 0)
return true;
if (compare_result > 0)
return false;
return e1.size() < e2.size();
}
void endpoint::init(const void* sock_addr,
std::size_t sock_addr_size, int sock_protocol)
{
if (sock_addr_size > sizeof(asio::detail::sockaddr_storage_type))
{
asio::error_code ec(asio::error::invalid_argument);
asio::detail::throw_error(ec);
}
using namespace std; // For memset and memcpy.
memset(&data_.generic, 0, sizeof(asio::detail::sockaddr_storage_type));
if (sock_addr_size > 0)
memcpy(&data_.generic, sock_addr, sock_addr_size);
size_ = sock_addr_size;
protocol_ = sock_protocol;
}
} // namespace detail
} // namespace generic
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_GENERIC_DETAIL_IMPL_ENDPOINT_IPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/address.hpp | //
// ip/address.hpp
// ~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_ADDRESS_HPP
#define ASIO_IP_ADDRESS_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <functional>
#include <string>
#include "asio/detail/throw_exception.hpp"
#include "asio/detail/string_view.hpp"
#include "asio/detail/type_traits.hpp"
#include "asio/error_code.hpp"
#include "asio/ip/address_v4.hpp"
#include "asio/ip/address_v6.hpp"
#include "asio/ip/bad_address_cast.hpp"
#if !defined(ASIO_NO_IOSTREAM)
# include <iosfwd>
#endif // !defined(ASIO_NO_IOSTREAM)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
/// Implements version-independent IP addresses.
/**
* The asio::ip::address class provides the ability to use either IP
* version 4 or version 6 addresses.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*/
class address
{
public:
/// Default constructor.
ASIO_DECL address() noexcept;
/// Construct an address from an IPv4 address.
ASIO_DECL address(
const asio::ip::address_v4& ipv4_address) noexcept;
/// Construct an address from an IPv6 address.
ASIO_DECL address(
const asio::ip::address_v6& ipv6_address) noexcept;
/// Copy constructor.
ASIO_DECL address(const address& other) noexcept;
/// Move constructor.
ASIO_DECL address(address&& other) noexcept;
/// Assign from another address.
ASIO_DECL address& operator=(const address& other) noexcept;
/// Move-assign from another address.
ASIO_DECL address& operator=(address&& other) noexcept;
/// Assign from an IPv4 address.
ASIO_DECL address& operator=(
const asio::ip::address_v4& ipv4_address) noexcept;
/// Assign from an IPv6 address.
ASIO_DECL address& operator=(
const asio::ip::address_v6& ipv6_address) noexcept;
/// Get whether the address is an IP version 4 address.
bool is_v4() const noexcept
{
return type_ == ipv4;
}
/// Get whether the address is an IP version 6 address.
bool is_v6() const noexcept
{
return type_ == ipv6;
}
/// Get the address as an IP version 4 address.
ASIO_DECL asio::ip::address_v4 to_v4() const;
/// Get the address as an IP version 6 address.
ASIO_DECL asio::ip::address_v6 to_v6() const;
/// Get the address as a string.
ASIO_DECL std::string to_string() const;
#if !defined(ASIO_NO_DEPRECATED)
/// (Deprecated: Use other overload.) Get the address as a string.
ASIO_DECL std::string to_string(asio::error_code& ec) const;
/// (Deprecated: Use make_address().) Create an address from an IPv4 address
/// string in dotted decimal form, or from an IPv6 address in hexadecimal
/// notation.
static address from_string(const char* str);
/// (Deprecated: Use make_address().) Create an address from an IPv4 address
/// string in dotted decimal form, or from an IPv6 address in hexadecimal
/// notation.
static address from_string(const char* str, asio::error_code& ec);
/// (Deprecated: Use make_address().) Create an address from an IPv4 address
/// string in dotted decimal form, or from an IPv6 address in hexadecimal
/// notation.
static address from_string(const std::string& str);
/// (Deprecated: Use make_address().) Create an address from an IPv4 address
/// string in dotted decimal form, or from an IPv6 address in hexadecimal
/// notation.
static address from_string(
const std::string& str, asio::error_code& ec);
#endif // !defined(ASIO_NO_DEPRECATED)
/// Determine whether the address is a loopback address.
ASIO_DECL bool is_loopback() const noexcept;
/// Determine whether the address is unspecified.
ASIO_DECL bool is_unspecified() const noexcept;
/// Determine whether the address is a multicast address.
ASIO_DECL bool is_multicast() const noexcept;
/// Compare two addresses for equality.
ASIO_DECL friend bool operator==(const address& a1,
const address& a2) noexcept;
/// Compare two addresses for inequality.
friend bool operator!=(const address& a1,
const address& a2) noexcept
{
return !(a1 == a2);
}
/// Compare addresses for ordering.
ASIO_DECL friend bool operator<(const address& a1,
const address& a2) noexcept;
/// Compare addresses for ordering.
friend bool operator>(const address& a1,
const address& a2) noexcept
{
return a2 < a1;
}
/// Compare addresses for ordering.
friend bool operator<=(const address& a1,
const address& a2) noexcept
{
return !(a2 < a1);
}
/// Compare addresses for ordering.
friend bool operator>=(const address& a1,
const address& a2) noexcept
{
return !(a1 < a2);
}
private:
// The type of the address.
enum { ipv4, ipv6 } type_;
// The underlying IPv4 address.
asio::ip::address_v4 ipv4_address_;
// The underlying IPv6 address.
asio::ip::address_v6 ipv6_address_;
};
/// Create an address from an IPv4 address string in dotted decimal form,
/// or from an IPv6 address in hexadecimal notation.
/**
* @relates address
*/
ASIO_DECL address make_address(const char* str);
/// Create an address from an IPv4 address string in dotted decimal form,
/// or from an IPv6 address in hexadecimal notation.
/**
* @relates address
*/
ASIO_DECL address make_address(const char* str,
asio::error_code& ec) noexcept;
/// Create an address from an IPv4 address string in dotted decimal form,
/// or from an IPv6 address in hexadecimal notation.
/**
* @relates address
*/
ASIO_DECL address make_address(const std::string& str);
/// Create an address from an IPv4 address string in dotted decimal form,
/// or from an IPv6 address in hexadecimal notation.
/**
* @relates address
*/
ASIO_DECL address make_address(const std::string& str,
asio::error_code& ec) noexcept;
#if defined(ASIO_HAS_STRING_VIEW) \
|| defined(GENERATING_DOCUMENTATION)
/// Create an address from an IPv4 address string in dotted decimal form,
/// or from an IPv6 address in hexadecimal notation.
/**
* @relates address
*/
ASIO_DECL address make_address(string_view str);
/// Create an address from an IPv4 address string in dotted decimal form,
/// or from an IPv6 address in hexadecimal notation.
/**
* @relates address
*/
ASIO_DECL address make_address(string_view str,
asio::error_code& ec) noexcept;
#endif // defined(ASIO_HAS_STRING_VIEW)
// || defined(GENERATING_DOCUMENTATION)
#if !defined(ASIO_NO_IOSTREAM)
/// Output an address as a string.
/**
* Used to output a human-readable string for a specified address.
*
* @param os The output stream to which the string will be written.
*
* @param addr The address to be written.
*
* @return The output stream.
*
* @relates asio::ip::address
*/
template <typename Elem, typename Traits>
std::basic_ostream<Elem, Traits>& operator<<(
std::basic_ostream<Elem, Traits>& os, const address& addr);
#endif // !defined(ASIO_NO_IOSTREAM)
} // namespace ip
} // namespace asio
namespace std {
template <>
struct hash<asio::ip::address>
{
std::size_t operator()(const asio::ip::address& addr)
const noexcept
{
return addr.is_v4()
? std::hash<asio::ip::address_v4>()(addr.to_v4())
: std::hash<asio::ip::address_v6>()(addr.to_v6());
}
};
} // namespace std
#include "asio/detail/pop_options.hpp"
#include "asio/ip/impl/address.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/ip/impl/address.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // ASIO_IP_ADDRESS_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/address_v4_iterator.hpp | //
// ip/address_v4_iterator.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_ADDRESS_V4_ITERATOR_HPP
#define ASIO_IP_ADDRESS_V4_ITERATOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/ip/address_v4.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
template <typename> class basic_address_iterator;
/// An input iterator that can be used for traversing IPv4 addresses.
/**
* In addition to satisfying the input iterator requirements, this iterator
* also supports decrement.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*/
template <> class basic_address_iterator<address_v4>
{
public:
/// The type of the elements pointed to by the iterator.
typedef address_v4 value_type;
/// Distance between two iterators.
typedef std::ptrdiff_t difference_type;
/// The type of a pointer to an element pointed to by the iterator.
typedef const address_v4* pointer;
/// The type of a reference to an element pointed to by the iterator.
typedef const address_v4& reference;
/// Denotes that the iterator satisfies the input iterator requirements.
typedef std::input_iterator_tag iterator_category;
/// Construct an iterator that points to the specified address.
basic_address_iterator(const address_v4& addr) noexcept
: address_(addr)
{
}
/// Copy constructor.
basic_address_iterator(const basic_address_iterator& other) noexcept
: address_(other.address_)
{
}
/// Move constructor.
basic_address_iterator(basic_address_iterator&& other) noexcept
: address_(static_cast<address_v4&&>(other.address_))
{
}
/// Assignment operator.
basic_address_iterator& operator=(
const basic_address_iterator& other) noexcept
{
address_ = other.address_;
return *this;
}
/// Move assignment operator.
basic_address_iterator& operator=(basic_address_iterator&& other) noexcept
{
address_ = static_cast<address_v4&&>(other.address_);
return *this;
}
/// Dereference the iterator.
const address_v4& operator*() const noexcept
{
return address_;
}
/// Dereference the iterator.
const address_v4* operator->() const noexcept
{
return &address_;
}
/// Pre-increment operator.
basic_address_iterator& operator++() noexcept
{
address_ = address_v4((address_.to_uint() + 1) & 0xFFFFFFFF);
return *this;
}
/// Post-increment operator.
basic_address_iterator operator++(int) noexcept
{
basic_address_iterator tmp(*this);
++*this;
return tmp;
}
/// Pre-decrement operator.
basic_address_iterator& operator--() noexcept
{
address_ = address_v4((address_.to_uint() - 1) & 0xFFFFFFFF);
return *this;
}
/// Post-decrement operator.
basic_address_iterator operator--(int)
{
basic_address_iterator tmp(*this);
--*this;
return tmp;
}
/// Compare two addresses for equality.
friend bool operator==(const basic_address_iterator& a,
const basic_address_iterator& b)
{
return a.address_ == b.address_;
}
/// Compare two addresses for inequality.
friend bool operator!=(const basic_address_iterator& a,
const basic_address_iterator& b)
{
return a.address_ != b.address_;
}
private:
address_v4 address_;
};
/// An input iterator that can be used for traversing IPv4 addresses.
typedef basic_address_iterator<address_v4> address_v4_iterator;
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_ADDRESS_V4_ITERATOR_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/resolver_query_base.hpp | //
// ip/resolver_query_base.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_RESOLVER_QUERY_BASE_HPP
#define ASIO_IP_RESOLVER_QUERY_BASE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/ip/resolver_base.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
/// The resolver_query_base class is used as a base for the
/// basic_resolver_query class templates to provide a common place to define
/// the flag constants.
class resolver_query_base : public resolver_base
{
protected:
/// Protected destructor to prevent deletion through this type.
~resolver_query_base()
{
}
};
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_RESOLVER_QUERY_BASE_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/address_v4.hpp | //
// ip/address_v4.hpp
// ~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_ADDRESS_V4_HPP
#define ASIO_IP_ADDRESS_V4_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <functional>
#include <string>
#include "asio/detail/array.hpp"
#include "asio/detail/cstdint.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/detail/string_view.hpp"
#include "asio/detail/winsock_init.hpp"
#include "asio/error_code.hpp"
#if !defined(ASIO_NO_IOSTREAM)
# include <iosfwd>
#endif // !defined(ASIO_NO_IOSTREAM)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
/// Implements IP version 4 style addresses.
/**
* The asio::ip::address_v4 class provides the ability to use and
* manipulate IP version 4 addresses.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*/
class address_v4
{
public:
/// The type used to represent an address as an unsigned integer.
typedef uint_least32_t uint_type;
/// The type used to represent an address as an array of bytes.
/**
* @note This type is defined in terms of the C++0x template @c std::array
* when it is available. Otherwise, it uses @c boost:array.
*/
#if defined(GENERATING_DOCUMENTATION)
typedef array<unsigned char, 4> bytes_type;
#else
typedef asio::detail::array<unsigned char, 4> bytes_type;
#endif
/// Default constructor.
/**
* Initialises the @c address_v4 object such that:
* @li <tt>to_bytes()</tt> yields <tt>{0, 0, 0, 0}</tt>; and
* @li <tt>to_uint() == 0</tt>.
*/
address_v4() noexcept
{
addr_.s_addr = 0;
}
/// Construct an address from raw bytes.
/**
* Initialises the @c address_v4 object such that <tt>to_bytes() ==
* bytes</tt>.
*
* @throws out_of_range Thrown if any element in @c bytes is not in the range
* <tt>0 - 0xFF</tt>. Note that no range checking is required for platforms
* where <tt>std::numeric_limits<unsigned char>::max()</tt> is <tt>0xFF</tt>.
*/
ASIO_DECL explicit address_v4(const bytes_type& bytes);
/// Construct an address from an unsigned integer in host byte order.
/**
* Initialises the @c address_v4 object such that <tt>to_uint() == addr</tt>.
*/
ASIO_DECL explicit address_v4(uint_type addr);
/// Copy constructor.
address_v4(const address_v4& other) noexcept
: addr_(other.addr_)
{
}
/// Move constructor.
address_v4(address_v4&& other) noexcept
: addr_(other.addr_)
{
}
/// Assign from another address.
address_v4& operator=(const address_v4& other) noexcept
{
addr_ = other.addr_;
return *this;
}
/// Move-assign from another address.
address_v4& operator=(address_v4&& other) noexcept
{
addr_ = other.addr_;
return *this;
}
/// Get the address in bytes, in network byte order.
ASIO_DECL bytes_type to_bytes() const noexcept;
/// Get the address as an unsigned integer in host byte order.
ASIO_DECL uint_type to_uint() const noexcept;
#if !defined(ASIO_NO_DEPRECATED)
/// (Deprecated: Use to_uint().) Get the address as an unsigned long in host
/// byte order.
ASIO_DECL unsigned long to_ulong() const;
#endif // !defined(ASIO_NO_DEPRECATED)
/// Get the address as a string in dotted decimal format.
ASIO_DECL std::string to_string() const;
#if !defined(ASIO_NO_DEPRECATED)
/// (Deprecated: Use other overload.) Get the address as a string in dotted
/// decimal format.
ASIO_DECL std::string to_string(asio::error_code& ec) const;
/// (Deprecated: Use make_address_v4().) Create an address from an IP address
/// string in dotted decimal form.
static address_v4 from_string(const char* str);
/// (Deprecated: Use make_address_v4().) Create an address from an IP address
/// string in dotted decimal form.
static address_v4 from_string(
const char* str, asio::error_code& ec);
/// (Deprecated: Use make_address_v4().) Create an address from an IP address
/// string in dotted decimal form.
static address_v4 from_string(const std::string& str);
/// (Deprecated: Use make_address_v4().) Create an address from an IP address
/// string in dotted decimal form.
static address_v4 from_string(
const std::string& str, asio::error_code& ec);
#endif // !defined(ASIO_NO_DEPRECATED)
/// Determine whether the address is a loopback address.
/**
* This function tests whether the address is in the address block
* <tt>127.0.0.0/8</tt>, which corresponds to the address range
* <tt>127.0.0.0 - 127.255.255.255</tt>.
*
* @returns <tt>(to_uint() & 0xFF000000) == 0x7F000000</tt>.
*/
ASIO_DECL bool is_loopback() const noexcept;
/// Determine whether the address is unspecified.
/**
* This function tests whether the address is the unspecified address
* <tt>0.0.0.0</tt>.
*
* @returns <tt>to_uint() == 0</tt>.
*/
ASIO_DECL bool is_unspecified() const noexcept;
#if !defined(ASIO_NO_DEPRECATED)
/// (Deprecated: Use network_v4 class.) Determine whether the address is a
/// class A address.
ASIO_DECL bool is_class_a() const;
/// (Deprecated: Use network_v4 class.) Determine whether the address is a
/// class B address.
ASIO_DECL bool is_class_b() const;
/// (Deprecated: Use network_v4 class.) Determine whether the address is a
/// class C address.
ASIO_DECL bool is_class_c() const;
#endif // !defined(ASIO_NO_DEPRECATED)
/// Determine whether the address is a multicast address.
/**
* This function tests whether the address is in the multicast address block
* <tt>224.0.0.0/4</tt>, which corresponds to the address range
* <tt>224.0.0.0 - 239.255.255.255</tt>.
*
* @returns <tt>(to_uint() & 0xF0000000) == 0xE0000000</tt>.
*/
ASIO_DECL bool is_multicast() const noexcept;
/// Compare two addresses for equality.
friend bool operator==(const address_v4& a1,
const address_v4& a2) noexcept
{
return a1.addr_.s_addr == a2.addr_.s_addr;
}
/// Compare two addresses for inequality.
friend bool operator!=(const address_v4& a1,
const address_v4& a2) noexcept
{
return a1.addr_.s_addr != a2.addr_.s_addr;
}
/// Compare addresses for ordering.
/**
* Compares two addresses in host byte order.
*
* @returns <tt>a1.to_uint() < a2.to_uint()</tt>.
*/
friend bool operator<(const address_v4& a1,
const address_v4& a2) noexcept
{
return a1.to_uint() < a2.to_uint();
}
/// Compare addresses for ordering.
/**
* Compares two addresses in host byte order.
*
* @returns <tt>a1.to_uint() > a2.to_uint()</tt>.
*/
friend bool operator>(const address_v4& a1,
const address_v4& a2) noexcept
{
return a1.to_uint() > a2.to_uint();
}
/// Compare addresses for ordering.
/**
* Compares two addresses in host byte order.
*
* @returns <tt>a1.to_uint() <= a2.to_uint()</tt>.
*/
friend bool operator<=(const address_v4& a1,
const address_v4& a2) noexcept
{
return a1.to_uint() <= a2.to_uint();
}
/// Compare addresses for ordering.
/**
* Compares two addresses in host byte order.
*
* @returns <tt>a1.to_uint() >= a2.to_uint()</tt>.
*/
friend bool operator>=(const address_v4& a1,
const address_v4& a2) noexcept
{
return a1.to_uint() >= a2.to_uint();
}
/// Obtain an address object that represents any address.
/**
* This functions returns an address that represents the "any" address
* <tt>0.0.0.0</tt>.
*
* @returns A default-constructed @c address_v4 object.
*/
static address_v4 any() noexcept
{
return address_v4();
}
/// Obtain an address object that represents the loopback address.
/**
* This function returns an address that represents the well-known loopback
* address <tt>127.0.0.1</tt>.
*
* @returns <tt>address_v4(0x7F000001)</tt>.
*/
static address_v4 loopback() noexcept
{
return address_v4(0x7F000001);
}
/// Obtain an address object that represents the broadcast address.
/**
* This function returns an address that represents the broadcast address
* <tt>255.255.255.255</tt>.
*
* @returns <tt>address_v4(0xFFFFFFFF)</tt>.
*/
static address_v4 broadcast() noexcept
{
return address_v4(0xFFFFFFFF);
}
#if !defined(ASIO_NO_DEPRECATED)
/// (Deprecated: Use network_v4 class.) Obtain an address object that
/// represents the broadcast address that corresponds to the specified
/// address and netmask.
ASIO_DECL static address_v4 broadcast(
const address_v4& addr, const address_v4& mask);
/// (Deprecated: Use network_v4 class.) Obtain the netmask that corresponds
/// to the address, based on its address class.
ASIO_DECL static address_v4 netmask(const address_v4& addr);
#endif // !defined(ASIO_NO_DEPRECATED)
private:
// The underlying IPv4 address.
asio::detail::in4_addr_type addr_;
};
/// Create an IPv4 address from raw bytes in network order.
/**
* @relates address_v4
*/
inline address_v4 make_address_v4(const address_v4::bytes_type& bytes)
{
return address_v4(bytes);
}
/// Create an IPv4 address from an unsigned integer in host byte order.
/**
* @relates address_v4
*/
inline address_v4 make_address_v4(address_v4::uint_type addr)
{
return address_v4(addr);
}
/// Create an IPv4 address from an IP address string in dotted decimal form.
/**
* @relates address_v4
*/
ASIO_DECL address_v4 make_address_v4(const char* str);
/// Create an IPv4 address from an IP address string in dotted decimal form.
/**
* @relates address_v4
*/
ASIO_DECL address_v4 make_address_v4(const char* str,
asio::error_code& ec) noexcept;
/// Create an IPv4 address from an IP address string in dotted decimal form.
/**
* @relates address_v4
*/
ASIO_DECL address_v4 make_address_v4(const std::string& str);
/// Create an IPv4 address from an IP address string in dotted decimal form.
/**
* @relates address_v4
*/
ASIO_DECL address_v4 make_address_v4(const std::string& str,
asio::error_code& ec) noexcept;
#if defined(ASIO_HAS_STRING_VIEW) \
|| defined(GENERATING_DOCUMENTATION)
/// Create an IPv4 address from an IP address string in dotted decimal form.
/**
* @relates address_v4
*/
ASIO_DECL address_v4 make_address_v4(string_view str);
/// Create an IPv4 address from an IP address string in dotted decimal form.
/**
* @relates address_v4
*/
ASIO_DECL address_v4 make_address_v4(string_view str,
asio::error_code& ec) noexcept;
#endif // defined(ASIO_HAS_STRING_VIEW)
// || defined(GENERATING_DOCUMENTATION)
#if !defined(ASIO_NO_IOSTREAM)
/// Output an address as a string.
/**
* Used to output a human-readable string for a specified address.
*
* @param os The output stream to which the string will be written.
*
* @param addr The address to be written.
*
* @return The output stream.
*
* @relates asio::ip::address_v4
*/
template <typename Elem, typename Traits>
std::basic_ostream<Elem, Traits>& operator<<(
std::basic_ostream<Elem, Traits>& os, const address_v4& addr);
#endif // !defined(ASIO_NO_IOSTREAM)
} // namespace ip
} // namespace asio
namespace std {
template <>
struct hash<asio::ip::address_v4>
{
std::size_t operator()(const asio::ip::address_v4& addr)
const noexcept
{
return std::hash<unsigned int>()(addr.to_uint());
}
};
} // namespace std
#include "asio/detail/pop_options.hpp"
#include "asio/ip/impl/address_v4.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/ip/impl/address_v4.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // ASIO_IP_ADDRESS_V4_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/address_v6.hpp | //
// ip/address_v6.hpp
// ~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_ADDRESS_V6_HPP
#define ASIO_IP_ADDRESS_V6_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <functional>
#include <string>
#include "asio/detail/array.hpp"
#include "asio/detail/cstdint.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/detail/string_view.hpp"
#include "asio/detail/winsock_init.hpp"
#include "asio/error_code.hpp"
#include "asio/ip/address_v4.hpp"
#if !defined(ASIO_NO_IOSTREAM)
# include <iosfwd>
#endif // !defined(ASIO_NO_IOSTREAM)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
template <typename> class basic_address_iterator;
/// Type used for storing IPv6 scope IDs.
typedef uint_least32_t scope_id_type;
/// Implements IP version 6 style addresses.
/**
* The asio::ip::address_v6 class provides the ability to use and
* manipulate IP version 6 addresses.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*/
class address_v6
{
public:
/// The type used to represent an address as an array of bytes.
/**
* @note This type is defined in terms of the C++0x template @c std::array
* when it is available. Otherwise, it uses @c boost:array.
*/
#if defined(GENERATING_DOCUMENTATION)
typedef array<unsigned char, 16> bytes_type;
#else
typedef asio::detail::array<unsigned char, 16> bytes_type;
#endif
/// Default constructor.
/**
* Initialises the @c address_v6 object such that:
* @li <tt>to_bytes()</tt> yields <tt>{0, 0, ..., 0}</tt>; and
* @li <tt>scope_id() == 0</tt>.
*/
ASIO_DECL address_v6() noexcept;
/// Construct an address from raw bytes and scope ID.
/**
* Initialises the @c address_v6 object such that:
* @li <tt>to_bytes() == bytes</tt>; and
* @li <tt>this->scope_id() == scope_id</tt>.
*
* @throws out_of_range Thrown if any element in @c bytes is not in the range
* <tt>0 - 0xFF</tt>. Note that no range checking is required for platforms
* where <tt>std::numeric_limits<unsigned char>::max()</tt> is <tt>0xFF</tt>.
*/
ASIO_DECL explicit address_v6(const bytes_type& bytes,
scope_id_type scope_id = 0);
/// Copy constructor.
ASIO_DECL address_v6(const address_v6& other) noexcept;
/// Move constructor.
ASIO_DECL address_v6(address_v6&& other) noexcept;
/// Assign from another address.
ASIO_DECL address_v6& operator=(
const address_v6& other) noexcept;
/// Move-assign from another address.
ASIO_DECL address_v6& operator=(address_v6&& other) noexcept;
/// The scope ID of the address.
/**
* Returns the scope ID associated with the IPv6 address.
*/
scope_id_type scope_id() const noexcept
{
return scope_id_;
}
/// The scope ID of the address.
/**
* Modifies the scope ID associated with the IPv6 address.
*
* @param id The new scope ID.
*/
void scope_id(scope_id_type id) noexcept
{
scope_id_ = id;
}
/// Get the address in bytes, in network byte order.
ASIO_DECL bytes_type to_bytes() const noexcept;
/// Get the address as a string.
ASIO_DECL std::string to_string() const;
#if !defined(ASIO_NO_DEPRECATED)
/// (Deprecated: Use other overload.) Get the address as a string.
ASIO_DECL std::string to_string(asio::error_code& ec) const;
/// (Deprecated: Use make_address_v6().) Create an IPv6 address from an IP
/// address string.
static address_v6 from_string(const char* str);
/// (Deprecated: Use make_address_v6().) Create an IPv6 address from an IP
/// address string.
static address_v6 from_string(
const char* str, asio::error_code& ec);
/// (Deprecated: Use make_address_v6().) Create an IPv6 address from an IP
/// address string.
static address_v6 from_string(const std::string& str);
/// (Deprecated: Use make_address_v6().) Create an IPv6 address from an IP
/// address string.
static address_v6 from_string(
const std::string& str, asio::error_code& ec);
/// (Deprecated: Use make_address_v4().) Converts an IPv4-mapped or
/// IPv4-compatible address to an IPv4 address.
ASIO_DECL address_v4 to_v4() const;
#endif // !defined(ASIO_NO_DEPRECATED)
/// Determine whether the address is a loopback address.
/**
* This function tests whether the address is the loopback address
* <tt>::1</tt>.
*/
ASIO_DECL bool is_loopback() const noexcept;
/// Determine whether the address is unspecified.
/**
* This function tests whether the address is the loopback address
* <tt>::</tt>.
*/
ASIO_DECL bool is_unspecified() const noexcept;
/// Determine whether the address is link local.
ASIO_DECL bool is_link_local() const noexcept;
/// Determine whether the address is site local.
ASIO_DECL bool is_site_local() const noexcept;
/// Determine whether the address is a mapped IPv4 address.
ASIO_DECL bool is_v4_mapped() const noexcept;
#if !defined(ASIO_NO_DEPRECATED)
/// (Deprecated: No replacement.) Determine whether the address is an
/// IPv4-compatible address.
ASIO_DECL bool is_v4_compatible() const;
#endif // !defined(ASIO_NO_DEPRECATED)
/// Determine whether the address is a multicast address.
ASIO_DECL bool is_multicast() const noexcept;
/// Determine whether the address is a global multicast address.
ASIO_DECL bool is_multicast_global() const noexcept;
/// Determine whether the address is a link-local multicast address.
ASIO_DECL bool is_multicast_link_local() const noexcept;
/// Determine whether the address is a node-local multicast address.
ASIO_DECL bool is_multicast_node_local() const noexcept;
/// Determine whether the address is a org-local multicast address.
ASIO_DECL bool is_multicast_org_local() const noexcept;
/// Determine whether the address is a site-local multicast address.
ASIO_DECL bool is_multicast_site_local() const noexcept;
/// Compare two addresses for equality.
ASIO_DECL friend bool operator==(const address_v6& a1,
const address_v6& a2) noexcept;
/// Compare two addresses for inequality.
friend bool operator!=(const address_v6& a1,
const address_v6& a2) noexcept
{
return !(a1 == a2);
}
/// Compare addresses for ordering.
ASIO_DECL friend bool operator<(const address_v6& a1,
const address_v6& a2) noexcept;
/// Compare addresses for ordering.
friend bool operator>(const address_v6& a1,
const address_v6& a2) noexcept
{
return a2 < a1;
}
/// Compare addresses for ordering.
friend bool operator<=(const address_v6& a1,
const address_v6& a2) noexcept
{
return !(a2 < a1);
}
/// Compare addresses for ordering.
friend bool operator>=(const address_v6& a1,
const address_v6& a2) noexcept
{
return !(a1 < a2);
}
/// Obtain an address object that represents any address.
/**
* This functions returns an address that represents the "any" address
* <tt>::</tt>.
*
* @returns A default-constructed @c address_v6 object.
*/
static address_v6 any() noexcept
{
return address_v6();
}
/// Obtain an address object that represents the loopback address.
/**
* This function returns an address that represents the well-known loopback
* address <tt>::1</tt>.
*/
ASIO_DECL static address_v6 loopback() noexcept;
#if !defined(ASIO_NO_DEPRECATED)
/// (Deprecated: Use make_address_v6().) Create an IPv4-mapped IPv6 address.
ASIO_DECL static address_v6 v4_mapped(const address_v4& addr);
/// (Deprecated: No replacement.) Create an IPv4-compatible IPv6 address.
ASIO_DECL static address_v6 v4_compatible(const address_v4& addr);
#endif // !defined(ASIO_NO_DEPRECATED)
private:
friend class basic_address_iterator<address_v6>;
// The underlying IPv6 address.
asio::detail::in6_addr_type addr_;
// The scope ID associated with the address.
scope_id_type scope_id_;
};
/// Create an IPv6 address from raw bytes and scope ID.
/**
* @relates address_v6
*/
inline address_v6 make_address_v6(const address_v6::bytes_type& bytes,
scope_id_type scope_id = 0)
{
return address_v6(bytes, scope_id);
}
/// Create an IPv6 address from an IP address string.
/**
* @relates address_v6
*/
ASIO_DECL address_v6 make_address_v6(const char* str);
/// Create an IPv6 address from an IP address string.
/**
* @relates address_v6
*/
ASIO_DECL address_v6 make_address_v6(const char* str,
asio::error_code& ec) noexcept;
/// Createan IPv6 address from an IP address string.
/**
* @relates address_v6
*/
ASIO_DECL address_v6 make_address_v6(const std::string& str);
/// Create an IPv6 address from an IP address string.
/**
* @relates address_v6
*/
ASIO_DECL address_v6 make_address_v6(const std::string& str,
asio::error_code& ec) noexcept;
#if defined(ASIO_HAS_STRING_VIEW) \
|| defined(GENERATING_DOCUMENTATION)
/// Create an IPv6 address from an IP address string.
/**
* @relates address_v6
*/
ASIO_DECL address_v6 make_address_v6(string_view str);
/// Create an IPv6 address from an IP address string.
/**
* @relates address_v6
*/
ASIO_DECL address_v6 make_address_v6(string_view str,
asio::error_code& ec) noexcept;
#endif // defined(ASIO_HAS_STRING_VIEW)
// || defined(GENERATING_DOCUMENTATION)
/// Tag type used for distinguishing overloads that deal in IPv4-mapped IPv6
/// addresses.
enum v4_mapped_t { v4_mapped };
/// Create an IPv4 address from a IPv4-mapped IPv6 address.
/**
* @relates address_v4
*/
ASIO_DECL address_v4 make_address_v4(
v4_mapped_t, const address_v6& v6_addr);
/// Create an IPv4-mapped IPv6 address from an IPv4 address.
/**
* @relates address_v6
*/
ASIO_DECL address_v6 make_address_v6(
v4_mapped_t, const address_v4& v4_addr);
#if !defined(ASIO_NO_IOSTREAM)
/// Output an address as a string.
/**
* Used to output a human-readable string for a specified address.
*
* @param os The output stream to which the string will be written.
*
* @param addr The address to be written.
*
* @return The output stream.
*
* @relates asio::ip::address_v6
*/
template <typename Elem, typename Traits>
std::basic_ostream<Elem, Traits>& operator<<(
std::basic_ostream<Elem, Traits>& os, const address_v6& addr);
#endif // !defined(ASIO_NO_IOSTREAM)
} // namespace ip
} // namespace asio
namespace std {
template <>
struct hash<asio::ip::address_v6>
{
std::size_t operator()(const asio::ip::address_v6& addr)
const noexcept
{
const asio::ip::address_v6::bytes_type bytes = addr.to_bytes();
std::size_t result = static_cast<std::size_t>(addr.scope_id());
combine_4_bytes(result, &bytes[0]);
combine_4_bytes(result, &bytes[4]);
combine_4_bytes(result, &bytes[8]);
combine_4_bytes(result, &bytes[12]);
return result;
}
private:
static void combine_4_bytes(std::size_t& seed, const unsigned char* bytes)
{
const std::size_t bytes_hash =
(static_cast<std::size_t>(bytes[0]) << 24) |
(static_cast<std::size_t>(bytes[1]) << 16) |
(static_cast<std::size_t>(bytes[2]) << 8) |
(static_cast<std::size_t>(bytes[3]));
seed ^= bytes_hash + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
};
} // namespace std
#include "asio/detail/pop_options.hpp"
#include "asio/ip/impl/address_v6.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/ip/impl/address_v6.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // ASIO_IP_ADDRESS_V6_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/v6_only.hpp | //
// ip/v6_only.hpp
// ~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_V6_ONLY_HPP
#define ASIO_IP_V6_ONLY_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/socket_option.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
/// Socket option for determining whether an IPv6 socket supports IPv6
/// communication only.
/**
* Implements the IPPROTO_IPV6/IPV6_V6ONLY socket option.
*
* @par Examples
* Setting the option:
* @code
* asio::ip::tcp::socket socket(my_context);
* ...
* asio::ip::v6_only option(true);
* socket.set_option(option);
* @endcode
*
* @par
* Getting the current option value:
* @code
* asio::ip::tcp::socket socket(my_context);
* ...
* asio::ip::v6_only option;
* socket.get_option(option);
* bool v6_only = option.value();
* @endcode
*
* @par Concepts:
* GettableSocketOption, SettableSocketOption.
*/
#if defined(GENERATING_DOCUMENTATION)
typedef implementation_defined v6_only;
#elif defined(IPV6_V6ONLY)
typedef asio::detail::socket_option::boolean<
IPPROTO_IPV6, IPV6_V6ONLY> v6_only;
#else
typedef asio::detail::socket_option::boolean<
asio::detail::custom_socket_option_level,
asio::detail::always_fail_option> v6_only;
#endif
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_V6_ONLY_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/address_v6_range.hpp | //
// ip/address_v6_range.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Oliver Kowalke (oliver dot kowalke at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_ADDRESS_V6_RANGE_HPP
#define ASIO_IP_ADDRESS_V6_RANGE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/ip/address_v6_iterator.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
template <typename> class basic_address_range;
/// Represents a range of IPv6 addresses.
/**
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*/
template <> class basic_address_range<address_v6>
{
public:
/// The type of an iterator that points into the range.
typedef basic_address_iterator<address_v6> iterator;
/// Construct an empty range.
basic_address_range() noexcept
: begin_(address_v6()),
end_(address_v6())
{
}
/// Construct an range that represents the given range of addresses.
explicit basic_address_range(const iterator& first,
const iterator& last) noexcept
: begin_(first),
end_(last)
{
}
/// Copy constructor.
basic_address_range(const basic_address_range& other) noexcept
: begin_(other.begin_),
end_(other.end_)
{
}
/// Move constructor.
basic_address_range(basic_address_range&& other) noexcept
: begin_(static_cast<iterator&&>(other.begin_)),
end_(static_cast<iterator&&>(other.end_))
{
}
/// Assignment operator.
basic_address_range& operator=(
const basic_address_range& other) noexcept
{
begin_ = other.begin_;
end_ = other.end_;
return *this;
}
/// Move assignment operator.
basic_address_range& operator=(basic_address_range&& other) noexcept
{
begin_ = static_cast<iterator&&>(other.begin_);
end_ = static_cast<iterator&&>(other.end_);
return *this;
}
/// Obtain an iterator that points to the start of the range.
iterator begin() const noexcept
{
return begin_;
}
/// Obtain an iterator that points to the end of the range.
iterator end() const noexcept
{
return end_;
}
/// Determine whether the range is empty.
bool empty() const noexcept
{
return begin_ == end_;
}
/// Find an address in the range.
iterator find(const address_v6& addr) const noexcept
{
return addr >= *begin_ && addr < *end_ ? iterator(addr) : end_;
}
private:
iterator begin_;
iterator end_;
};
/// Represents a range of IPv6 addresses.
typedef basic_address_range<address_v6> address_v6_range;
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_ADDRESS_V6_RANGE_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/resolver_base.hpp | //
// ip/resolver_base.hpp
// ~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_RESOLVER_BASE_HPP
#define ASIO_IP_RESOLVER_BASE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
/// The resolver_base class is used as a base for the basic_resolver class
/// templates to provide a common place to define the flag constants.
class resolver_base
{
public:
#if defined(GENERATING_DOCUMENTATION)
/// A bitmask type (C++ Std [lib.bitmask.types]).
typedef unspecified flags;
/// Determine the canonical name of the host specified in the query.
static const flags canonical_name = implementation_defined;
/// Indicate that returned endpoint is intended for use as a locally bound
/// socket endpoint.
static const flags passive = implementation_defined;
/// Host name should be treated as a numeric string defining an IPv4 or IPv6
/// address and no name resolution should be attempted.
static const flags numeric_host = implementation_defined;
/// Service name should be treated as a numeric string defining a port number
/// and no name resolution should be attempted.
static const flags numeric_service = implementation_defined;
/// If the query protocol family is specified as IPv6, return IPv4-mapped
/// IPv6 addresses on finding no IPv6 addresses.
static const flags v4_mapped = implementation_defined;
/// If used with v4_mapped, return all matching IPv6 and IPv4 addresses.
static const flags all_matching = implementation_defined;
/// Only return IPv4 addresses if a non-loopback IPv4 address is configured
/// for the system. Only return IPv6 addresses if a non-loopback IPv6 address
/// is configured for the system.
static const flags address_configured = implementation_defined;
#else
enum flags
{
canonical_name = ASIO_OS_DEF(AI_CANONNAME),
passive = ASIO_OS_DEF(AI_PASSIVE),
numeric_host = ASIO_OS_DEF(AI_NUMERICHOST),
numeric_service = ASIO_OS_DEF(AI_NUMERICSERV),
v4_mapped = ASIO_OS_DEF(AI_V4MAPPED),
all_matching = ASIO_OS_DEF(AI_ALL),
address_configured = ASIO_OS_DEF(AI_ADDRCONFIG)
};
// Implement bitmask operations as shown in C++ Std [lib.bitmask.types].
friend flags operator&(flags x, flags y)
{
return static_cast<flags>(
static_cast<unsigned int>(x) & static_cast<unsigned int>(y));
}
friend flags operator|(flags x, flags y)
{
return static_cast<flags>(
static_cast<unsigned int>(x) | static_cast<unsigned int>(y));
}
friend flags operator^(flags x, flags y)
{
return static_cast<flags>(
static_cast<unsigned int>(x) ^ static_cast<unsigned int>(y));
}
friend flags operator~(flags x)
{
return static_cast<flags>(~static_cast<unsigned int>(x));
}
friend flags& operator&=(flags& x, flags y)
{
x = x & y;
return x;
}
friend flags& operator|=(flags& x, flags y)
{
x = x | y;
return x;
}
friend flags& operator^=(flags& x, flags y)
{
x = x ^ y;
return x;
}
#endif
protected:
/// Protected destructor to prevent deletion through this type.
~resolver_base()
{
}
};
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_RESOLVER_BASE_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/address_v4_range.hpp | //
// ip/address_v4_range.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_ADDRESS_V4_RANGE_HPP
#define ASIO_IP_ADDRESS_V4_RANGE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/ip/address_v4_iterator.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
template <typename> class basic_address_range;
/// Represents a range of IPv4 addresses.
/**
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*/
template <> class basic_address_range<address_v4>
{
public:
/// The type of an iterator that points into the range.
typedef basic_address_iterator<address_v4> iterator;
/// Construct an empty range.
basic_address_range() noexcept
: begin_(address_v4()),
end_(address_v4())
{
}
/// Construct an range that represents the given range of addresses.
explicit basic_address_range(const iterator& first,
const iterator& last) noexcept
: begin_(first),
end_(last)
{
}
/// Copy constructor.
basic_address_range(const basic_address_range& other) noexcept
: begin_(other.begin_),
end_(other.end_)
{
}
/// Move constructor.
basic_address_range(basic_address_range&& other) noexcept
: begin_(static_cast<iterator&&>(other.begin_)),
end_(static_cast<iterator&&>(other.end_))
{
}
/// Assignment operator.
basic_address_range& operator=(const basic_address_range& other) noexcept
{
begin_ = other.begin_;
end_ = other.end_;
return *this;
}
/// Move assignment operator.
basic_address_range& operator=(basic_address_range&& other) noexcept
{
begin_ = static_cast<iterator&&>(other.begin_);
end_ = static_cast<iterator&&>(other.end_);
return *this;
}
/// Obtain an iterator that points to the start of the range.
iterator begin() const noexcept
{
return begin_;
}
/// Obtain an iterator that points to the end of the range.
iterator end() const noexcept
{
return end_;
}
/// Determine whether the range is empty.
bool empty() const noexcept
{
return size() == 0;
}
/// Return the size of the range.
std::size_t size() const noexcept
{
return end_->to_uint() - begin_->to_uint();
}
/// Find an address in the range.
iterator find(const address_v4& addr) const noexcept
{
return addr >= *begin_ && addr < *end_ ? iterator(addr) : end_;
}
private:
iterator begin_;
iterator end_;
};
/// Represents a range of IPv4 addresses.
typedef basic_address_range<address_v4> address_v4_range;
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_ADDRESS_V4_RANGE_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/network_v4.hpp | //
// ip/network_v4.hpp
// ~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2014 Oliver Kowalke (oliver dot kowalke at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_NETWORK_V4_HPP
#define ASIO_IP_NETWORK_V4_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <string>
#include "asio/detail/string_view.hpp"
#include "asio/error_code.hpp"
#include "asio/ip/address_v4_range.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
/// Represents an IPv4 network.
/**
* The asio::ip::network_v4 class provides the ability to use and
* manipulate IP version 4 networks.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*/
class network_v4
{
public:
/// Default constructor.
network_v4() noexcept
: address_(),
prefix_length_(0)
{
}
/// Construct a network based on the specified address and prefix length.
ASIO_DECL network_v4(const address_v4& addr,
unsigned short prefix_len);
/// Construct network based on the specified address and netmask.
ASIO_DECL network_v4(const address_v4& addr,
const address_v4& mask);
/// Copy constructor.
network_v4(const network_v4& other) noexcept
: address_(other.address_),
prefix_length_(other.prefix_length_)
{
}
/// Move constructor.
network_v4(network_v4&& other) noexcept
: address_(static_cast<address_v4&&>(other.address_)),
prefix_length_(other.prefix_length_)
{
}
/// Assign from another network.
network_v4& operator=(const network_v4& other) noexcept
{
address_ = other.address_;
prefix_length_ = other.prefix_length_;
return *this;
}
/// Move-assign from another network.
network_v4& operator=(network_v4&& other) noexcept
{
address_ = static_cast<address_v4&&>(other.address_);
prefix_length_ = other.prefix_length_;
return *this;
}
/// Obtain the address object specified when the network object was created.
address_v4 address() const noexcept
{
return address_;
}
/// Obtain the prefix length that was specified when the network object was
/// created.
unsigned short prefix_length() const noexcept
{
return prefix_length_;
}
/// Obtain the netmask that was specified when the network object was created.
ASIO_DECL address_v4 netmask() const noexcept;
/// Obtain an address object that represents the network address.
address_v4 network() const noexcept
{
return address_v4(address_.to_uint() & netmask().to_uint());
}
/// Obtain an address object that represents the network's broadcast address.
address_v4 broadcast() const noexcept
{
return address_v4(network().to_uint() | (netmask().to_uint() ^ 0xFFFFFFFF));
}
/// Obtain an address range corresponding to the hosts in the network.
ASIO_DECL address_v4_range hosts() const noexcept;
/// Obtain the true network address, omitting any host bits.
network_v4 canonical() const noexcept
{
return network_v4(network(), prefix_length());
}
/// Test if network is a valid host address.
bool is_host() const noexcept
{
return prefix_length_ == 32;
}
/// Test if a network is a real subnet of another network.
ASIO_DECL bool is_subnet_of(const network_v4& other) const;
/// Get the network as an address in dotted decimal format.
ASIO_DECL std::string to_string() const;
/// Get the network as an address in dotted decimal format.
ASIO_DECL std::string to_string(asio::error_code& ec) const;
/// Compare two networks for equality.
friend bool operator==(const network_v4& a, const network_v4& b)
{
return a.address_ == b.address_ && a.prefix_length_ == b.prefix_length_;
}
/// Compare two networks for inequality.
friend bool operator!=(const network_v4& a, const network_v4& b)
{
return !(a == b);
}
private:
address_v4 address_;
unsigned short prefix_length_;
};
/// Create an IPv4 network from an address and prefix length.
/**
* @relates address_v4
*/
inline network_v4 make_network_v4(
const address_v4& addr, unsigned short prefix_len)
{
return network_v4(addr, prefix_len);
}
/// Create an IPv4 network from an address and netmask.
/**
* @relates address_v4
*/
inline network_v4 make_network_v4(
const address_v4& addr, const address_v4& mask)
{
return network_v4(addr, mask);
}
/// Create an IPv4 network from a string containing IP address and prefix
/// length.
/**
* @relates network_v4
*/
ASIO_DECL network_v4 make_network_v4(const char* str);
/// Create an IPv4 network from a string containing IP address and prefix
/// length.
/**
* @relates network_v4
*/
ASIO_DECL network_v4 make_network_v4(
const char* str, asio::error_code& ec);
/// Create an IPv4 network from a string containing IP address and prefix
/// length.
/**
* @relates network_v4
*/
ASIO_DECL network_v4 make_network_v4(const std::string& str);
/// Create an IPv4 network from a string containing IP address and prefix
/// length.
/**
* @relates network_v4
*/
ASIO_DECL network_v4 make_network_v4(
const std::string& str, asio::error_code& ec);
#if defined(ASIO_HAS_STRING_VIEW) \
|| defined(GENERATING_DOCUMENTATION)
/// Create an IPv4 network from a string containing IP address and prefix
/// length.
/**
* @relates network_v4
*/
ASIO_DECL network_v4 make_network_v4(string_view str);
/// Create an IPv4 network from a string containing IP address and prefix
/// length.
/**
* @relates network_v4
*/
ASIO_DECL network_v4 make_network_v4(
string_view str, asio::error_code& ec);
#endif // defined(ASIO_HAS_STRING_VIEW)
// || defined(GENERATING_DOCUMENTATION)
#if !defined(ASIO_NO_IOSTREAM)
/// Output a network as a string.
/**
* Used to output a human-readable string for a specified network.
*
* @param os The output stream to which the string will be written.
*
* @param net The network to be written.
*
* @return The output stream.
*
* @relates asio::ip::address_v4
*/
template <typename Elem, typename Traits>
std::basic_ostream<Elem, Traits>& operator<<(
std::basic_ostream<Elem, Traits>& os, const network_v4& net);
#endif // !defined(ASIO_NO_IOSTREAM)
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#include "asio/ip/impl/network_v4.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/ip/impl/network_v4.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // ASIO_IP_NETWORK_V4_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/icmp.hpp | //
// ip/icmp.hpp
// ~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_ICMP_HPP
#define ASIO_IP_ICMP_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/basic_raw_socket.hpp"
#include "asio/ip/basic_endpoint.hpp"
#include "asio/ip/basic_resolver.hpp"
#include "asio/ip/basic_resolver_iterator.hpp"
#include "asio/ip/basic_resolver_query.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
/// Encapsulates the flags needed for ICMP.
/**
* The asio::ip::icmp class contains flags necessary for ICMP sockets.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Safe.
*
* @par Concepts:
* Protocol, InternetProtocol.
*/
class icmp
{
public:
/// The type of a ICMP endpoint.
typedef basic_endpoint<icmp> endpoint;
/// Construct to represent the IPv4 ICMP protocol.
static icmp v4() noexcept
{
return icmp(ASIO_OS_DEF(IPPROTO_ICMP),
ASIO_OS_DEF(AF_INET));
}
/// Construct to represent the IPv6 ICMP protocol.
static icmp v6() noexcept
{
return icmp(ASIO_OS_DEF(IPPROTO_ICMPV6),
ASIO_OS_DEF(AF_INET6));
}
/// Obtain an identifier for the type of the protocol.
int type() const noexcept
{
return ASIO_OS_DEF(SOCK_RAW);
}
/// Obtain an identifier for the protocol.
int protocol() const noexcept
{
return protocol_;
}
/// Obtain an identifier for the protocol family.
int family() const noexcept
{
return family_;
}
/// The ICMP socket type.
typedef basic_raw_socket<icmp> socket;
/// The ICMP resolver type.
typedef basic_resolver<icmp> resolver;
/// Compare two protocols for equality.
friend bool operator==(const icmp& p1, const icmp& p2)
{
return p1.protocol_ == p2.protocol_ && p1.family_ == p2.family_;
}
/// Compare two protocols for inequality.
friend bool operator!=(const icmp& p1, const icmp& p2)
{
return p1.protocol_ != p2.protocol_ || p1.family_ != p2.family_;
}
private:
// Construct with a specific family.
explicit icmp(int protocol_id, int protocol_family) noexcept
: protocol_(protocol_id),
family_(protocol_family)
{
}
int protocol_;
int family_;
};
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_ICMP_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/basic_resolver_query.hpp | //
// ip/basic_resolver_query.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_BASIC_RESOLVER_QUERY_HPP
#define ASIO_IP_BASIC_RESOLVER_QUERY_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <string>
#include "asio/detail/socket_ops.hpp"
#include "asio/ip/resolver_query_base.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
/// An query to be passed to a resolver.
/**
* The asio::ip::basic_resolver_query class template describes a query
* that can be passed to a resolver.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*/
template <typename InternetProtocol>
class basic_resolver_query
: public resolver_query_base
{
public:
/// The protocol type associated with the endpoint query.
typedef InternetProtocol protocol_type;
/// Construct with specified service name for any protocol.
/**
* This constructor is typically used to perform name resolution for local
* service binding.
*
* @param service A string identifying the requested service. This may be a
* descriptive name or a numeric string corresponding to a port number.
*
* @param resolve_flags A set of flags that determine how name resolution
* should be performed. The default flags are suitable for local service
* binding.
*
* @note On POSIX systems, service names are typically defined in the file
* <tt>/etc/services</tt>. On Windows, service names may be found in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\services</tt>. Operating systems
* may use additional locations when resolving service names.
*/
basic_resolver_query(const std::string& service,
resolver_query_base::flags resolve_flags = passive | address_configured)
: hints_(),
host_name_(),
service_name_(service)
{
typename InternetProtocol::endpoint endpoint;
hints_.ai_flags = static_cast<int>(resolve_flags);
hints_.ai_family = PF_UNSPEC;
hints_.ai_socktype = endpoint.protocol().type();
hints_.ai_protocol = endpoint.protocol().protocol();
hints_.ai_addrlen = 0;
hints_.ai_canonname = 0;
hints_.ai_addr = 0;
hints_.ai_next = 0;
}
/// Construct with specified service name for a given protocol.
/**
* This constructor is typically used to perform name resolution for local
* service binding with a specific protocol version.
*
* @param protocol A protocol object, normally representing either the IPv4 or
* IPv6 version of an internet protocol.
*
* @param service A string identifying the requested service. This may be a
* descriptive name or a numeric string corresponding to a port number.
*
* @param resolve_flags A set of flags that determine how name resolution
* should be performed. The default flags are suitable for local service
* binding.
*
* @note On POSIX systems, service names are typically defined in the file
* <tt>/etc/services</tt>. On Windows, service names may be found in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\services</tt>. Operating systems
* may use additional locations when resolving service names.
*/
basic_resolver_query(const protocol_type& protocol,
const std::string& service,
resolver_query_base::flags resolve_flags = passive | address_configured)
: hints_(),
host_name_(),
service_name_(service)
{
hints_.ai_flags = static_cast<int>(resolve_flags);
hints_.ai_family = protocol.family();
hints_.ai_socktype = protocol.type();
hints_.ai_protocol = protocol.protocol();
hints_.ai_addrlen = 0;
hints_.ai_canonname = 0;
hints_.ai_addr = 0;
hints_.ai_next = 0;
}
/// Construct with specified host name and service name for any protocol.
/**
* This constructor is typically used to perform name resolution for
* communication with remote hosts.
*
* @param host A string identifying a location. May be a descriptive name or
* a numeric address string. If an empty string and the passive flag has been
* specified, the resolved endpoints are suitable for local service binding.
* If an empty string and passive is not specified, the resolved endpoints
* will use the loopback address.
*
* @param service A string identifying the requested service. This may be a
* descriptive name or a numeric string corresponding to a port number. May
* be an empty string, in which case all resolved endpoints will have a port
* number of 0.
*
* @param resolve_flags A set of flags that determine how name resolution
* should be performed. The default flags are suitable for communication with
* remote hosts.
*
* @note On POSIX systems, host names may be locally defined in the file
* <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\hosts</tt>. Remote host name
* resolution is performed using DNS. Operating systems may use additional
* locations when resolving host names (such as NETBIOS names on Windows).
*
* On POSIX systems, service names are typically defined in the file
* <tt>/etc/services</tt>. On Windows, service names may be found in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\services</tt>. Operating systems
* may use additional locations when resolving service names.
*/
basic_resolver_query(const std::string& host, const std::string& service,
resolver_query_base::flags resolve_flags = address_configured)
: hints_(),
host_name_(host),
service_name_(service)
{
typename InternetProtocol::endpoint endpoint;
hints_.ai_flags = static_cast<int>(resolve_flags);
hints_.ai_family = ASIO_OS_DEF(AF_UNSPEC);
hints_.ai_socktype = endpoint.protocol().type();
hints_.ai_protocol = endpoint.protocol().protocol();
hints_.ai_addrlen = 0;
hints_.ai_canonname = 0;
hints_.ai_addr = 0;
hints_.ai_next = 0;
}
/// Construct with specified host name and service name for a given protocol.
/**
* This constructor is typically used to perform name resolution for
* communication with remote hosts.
*
* @param protocol A protocol object, normally representing either the IPv4 or
* IPv6 version of an internet protocol.
*
* @param host A string identifying a location. May be a descriptive name or
* a numeric address string. If an empty string and the passive flag has been
* specified, the resolved endpoints are suitable for local service binding.
* If an empty string and passive is not specified, the resolved endpoints
* will use the loopback address.
*
* @param service A string identifying the requested service. This may be a
* descriptive name or a numeric string corresponding to a port number. May
* be an empty string, in which case all resolved endpoints will have a port
* number of 0.
*
* @param resolve_flags A set of flags that determine how name resolution
* should be performed. The default flags are suitable for communication with
* remote hosts.
*
* @note On POSIX systems, host names may be locally defined in the file
* <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\hosts</tt>. Remote host name
* resolution is performed using DNS. Operating systems may use additional
* locations when resolving host names (such as NETBIOS names on Windows).
*
* On POSIX systems, service names are typically defined in the file
* <tt>/etc/services</tt>. On Windows, service names may be found in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\services</tt>. Operating systems
* may use additional locations when resolving service names.
*/
basic_resolver_query(const protocol_type& protocol,
const std::string& host, const std::string& service,
resolver_query_base::flags resolve_flags = address_configured)
: hints_(),
host_name_(host),
service_name_(service)
{
hints_.ai_flags = static_cast<int>(resolve_flags);
hints_.ai_family = protocol.family();
hints_.ai_socktype = protocol.type();
hints_.ai_protocol = protocol.protocol();
hints_.ai_addrlen = 0;
hints_.ai_canonname = 0;
hints_.ai_addr = 0;
hints_.ai_next = 0;
}
/// Copy construct a @c basic_resolver_query from another.
basic_resolver_query(const basic_resolver_query& other)
: hints_(other.hints_),
host_name_(other.host_name_),
service_name_(other.service_name_)
{
}
/// Move construct a @c basic_resolver_query from another.
basic_resolver_query(basic_resolver_query&& other)
: hints_(other.hints_),
host_name_(static_cast<std::string&&>(other.host_name_)),
service_name_(static_cast<std::string&&>(other.service_name_))
{
}
/// Get the hints associated with the query.
const asio::detail::addrinfo_type& hints() const
{
return hints_;
}
/// Get the host name associated with the query.
std::string host_name() const
{
return host_name_;
}
/// Get the service name associated with the query.
std::string service_name() const
{
return service_name_;
}
private:
asio::detail::addrinfo_type hints_;
std::string host_name_;
std::string service_name_;
};
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_BASIC_RESOLVER_QUERY_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/multicast.hpp | //
// ip/multicast.hpp
// ~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_MULTICAST_HPP
#define ASIO_IP_MULTICAST_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <cstddef>
#include "asio/ip/detail/socket_option.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
namespace multicast {
/// Socket option to join a multicast group on a specified interface.
/**
* Implements the IPPROTO_IP/IP_ADD_MEMBERSHIP socket option.
*
* @par Examples
* Setting the option to join a multicast group:
* @code
* asio::ip::udp::socket socket(my_context);
* ...
* asio::ip::address multicast_address =
* asio::ip::address::from_string("225.0.0.1");
* asio::ip::multicast::join_group option(multicast_address);
* socket.set_option(option);
* @endcode
*
* @par Concepts:
* SettableSocketOption.
*/
#if defined(GENERATING_DOCUMENTATION)
typedef implementation_defined join_group;
#else
typedef asio::ip::detail::socket_option::multicast_request<
ASIO_OS_DEF(IPPROTO_IP),
ASIO_OS_DEF(IP_ADD_MEMBERSHIP),
ASIO_OS_DEF(IPPROTO_IPV6),
ASIO_OS_DEF(IPV6_JOIN_GROUP)> join_group;
#endif
/// Socket option to leave a multicast group on a specified interface.
/**
* Implements the IPPROTO_IP/IP_DROP_MEMBERSHIP socket option.
*
* @par Examples
* Setting the option to leave a multicast group:
* @code
* asio::ip::udp::socket socket(my_context);
* ...
* asio::ip::address multicast_address =
* asio::ip::address::from_string("225.0.0.1");
* asio::ip::multicast::leave_group option(multicast_address);
* socket.set_option(option);
* @endcode
*
* @par Concepts:
* SettableSocketOption.
*/
#if defined(GENERATING_DOCUMENTATION)
typedef implementation_defined leave_group;
#else
typedef asio::ip::detail::socket_option::multicast_request<
ASIO_OS_DEF(IPPROTO_IP),
ASIO_OS_DEF(IP_DROP_MEMBERSHIP),
ASIO_OS_DEF(IPPROTO_IPV6),
ASIO_OS_DEF(IPV6_LEAVE_GROUP)> leave_group;
#endif
/// Socket option for local interface to use for outgoing multicast packets.
/**
* Implements the IPPROTO_IP/IP_MULTICAST_IF socket option.
*
* @par Examples
* Setting the option:
* @code
* asio::ip::udp::socket socket(my_context);
* ...
* asio::ip::address_v4 local_interface =
* asio::ip::address_v4::from_string("1.2.3.4");
* asio::ip::multicast::outbound_interface option(local_interface);
* socket.set_option(option);
* @endcode
*
* @par Concepts:
* SettableSocketOption.
*/
#if defined(GENERATING_DOCUMENTATION)
typedef implementation_defined outbound_interface;
#else
typedef asio::ip::detail::socket_option::network_interface<
ASIO_OS_DEF(IPPROTO_IP),
ASIO_OS_DEF(IP_MULTICAST_IF),
ASIO_OS_DEF(IPPROTO_IPV6),
ASIO_OS_DEF(IPV6_MULTICAST_IF)> outbound_interface;
#endif
/// Socket option for time-to-live associated with outgoing multicast packets.
/**
* Implements the IPPROTO_IP/IP_MULTICAST_TTL socket option.
*
* @par Examples
* Setting the option:
* @code
* asio::ip::udp::socket socket(my_context);
* ...
* asio::ip::multicast::hops option(4);
* socket.set_option(option);
* @endcode
*
* @par
* Getting the current option value:
* @code
* asio::ip::udp::socket socket(my_context);
* ...
* asio::ip::multicast::hops option;
* socket.get_option(option);
* int ttl = option.value();
* @endcode
*
* @par Concepts:
* GettableSocketOption, SettableSocketOption.
*/
#if defined(GENERATING_DOCUMENTATION)
typedef implementation_defined hops;
#else
typedef asio::ip::detail::socket_option::multicast_hops<
ASIO_OS_DEF(IPPROTO_IP),
ASIO_OS_DEF(IP_MULTICAST_TTL),
ASIO_OS_DEF(IPPROTO_IPV6),
ASIO_OS_DEF(IPV6_MULTICAST_HOPS)> hops;
#endif
/// Socket option determining whether outgoing multicast packets will be
/// received on the same socket if it is a member of the multicast group.
/**
* Implements the IPPROTO_IP/IP_MULTICAST_LOOP socket option.
*
* @par Examples
* Setting the option:
* @code
* asio::ip::udp::socket socket(my_context);
* ...
* asio::ip::multicast::enable_loopback option(true);
* socket.set_option(option);
* @endcode
*
* @par
* Getting the current option value:
* @code
* asio::ip::udp::socket socket(my_context);
* ...
* asio::ip::multicast::enable_loopback option;
* socket.get_option(option);
* bool is_set = option.value();
* @endcode
*
* @par Concepts:
* GettableSocketOption, SettableSocketOption.
*/
#if defined(GENERATING_DOCUMENTATION)
typedef implementation_defined enable_loopback;
#else
typedef asio::ip::detail::socket_option::multicast_enable_loopback<
ASIO_OS_DEF(IPPROTO_IP),
ASIO_OS_DEF(IP_MULTICAST_LOOP),
ASIO_OS_DEF(IPPROTO_IPV6),
ASIO_OS_DEF(IPV6_MULTICAST_LOOP)> enable_loopback;
#endif
} // namespace multicast
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_MULTICAST_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/basic_resolver_iterator.hpp | //
// ip/basic_resolver_iterator.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_BASIC_RESOLVER_ITERATOR_HPP
#define ASIO_IP_BASIC_RESOLVER_ITERATOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <cstddef>
#include <cstring>
#include <iterator>
#include <string>
#include <vector>
#include "asio/detail/memory.hpp"
#include "asio/detail/socket_ops.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/ip/basic_resolver_entry.hpp"
#if defined(ASIO_WINDOWS_RUNTIME)
# include "asio/detail/winrt_utils.hpp"
#endif // defined(ASIO_WINDOWS_RUNTIME)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
/// An iterator over the entries produced by a resolver.
/**
* The asio::ip::basic_resolver_iterator class template is used to define
* iterators over the results returned by a resolver.
*
* The iterator's value_type, obtained when the iterator is dereferenced, is:
* @code const basic_resolver_entry<InternetProtocol> @endcode
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*/
template <typename InternetProtocol>
class basic_resolver_iterator
{
public:
/// The type used for the distance between two iterators.
typedef std::ptrdiff_t difference_type;
/// The type of the value pointed to by the iterator.
typedef basic_resolver_entry<InternetProtocol> value_type;
/// The type of the result of applying operator->() to the iterator.
typedef const basic_resolver_entry<InternetProtocol>* pointer;
/// The type of the result of applying operator*() to the iterator.
typedef const basic_resolver_entry<InternetProtocol>& reference;
/// The iterator category.
typedef std::forward_iterator_tag iterator_category;
/// Default constructor creates an end iterator.
basic_resolver_iterator()
: index_(0)
{
}
/// Copy constructor.
basic_resolver_iterator(const basic_resolver_iterator& other)
: values_(other.values_),
index_(other.index_)
{
}
/// Move constructor.
basic_resolver_iterator(basic_resolver_iterator&& other)
: values_(static_cast<values_ptr_type&&>(other.values_)),
index_(other.index_)
{
other.index_ = 0;
}
/// Assignment operator.
basic_resolver_iterator& operator=(const basic_resolver_iterator& other)
{
values_ = other.values_;
index_ = other.index_;
return *this;
}
/// Move-assignment operator.
basic_resolver_iterator& operator=(basic_resolver_iterator&& other)
{
if (this != &other)
{
values_ = static_cast<values_ptr_type&&>(other.values_);
index_ = other.index_;
other.index_ = 0;
}
return *this;
}
/// Dereference an iterator.
const basic_resolver_entry<InternetProtocol>& operator*() const
{
return dereference();
}
/// Dereference an iterator.
const basic_resolver_entry<InternetProtocol>* operator->() const
{
return &dereference();
}
/// Increment operator (prefix).
basic_resolver_iterator& operator++()
{
increment();
return *this;
}
/// Increment operator (postfix).
basic_resolver_iterator operator++(int)
{
basic_resolver_iterator tmp(*this);
++*this;
return tmp;
}
/// Test two iterators for equality.
friend bool operator==(const basic_resolver_iterator& a,
const basic_resolver_iterator& b)
{
return a.equal(b);
}
/// Test two iterators for inequality.
friend bool operator!=(const basic_resolver_iterator& a,
const basic_resolver_iterator& b)
{
return !a.equal(b);
}
protected:
void increment()
{
if (++index_ == values_->size())
{
// Reset state to match a default constructed end iterator.
values_.reset();
index_ = 0;
}
}
bool equal(const basic_resolver_iterator& other) const
{
if (!values_ && !other.values_)
return true;
if (values_ != other.values_)
return false;
return index_ == other.index_;
}
const basic_resolver_entry<InternetProtocol>& dereference() const
{
return (*values_)[index_];
}
typedef std::vector<basic_resolver_entry<InternetProtocol>> values_type;
typedef asio::detail::shared_ptr<values_type> values_ptr_type;
values_ptr_type values_;
std::size_t index_;
};
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_BASIC_RESOLVER_ITERATOR_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/host_name.hpp | //
// ip/host_name.hpp
// ~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_HOST_NAME_HPP
#define ASIO_IP_HOST_NAME_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <string>
#include "asio/error_code.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
/// Get the current host name.
ASIO_DECL std::string host_name();
/// Get the current host name.
ASIO_DECL std::string host_name(asio::error_code& ec);
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/ip/impl/host_name.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // ASIO_IP_HOST_NAME_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/basic_endpoint.hpp | //
// ip/basic_endpoint.hpp
// ~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_BASIC_ENDPOINT_HPP
#define ASIO_IP_BASIC_ENDPOINT_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <functional>
#include "asio/detail/cstdint.hpp"
#include "asio/ip/address.hpp"
#include "asio/ip/detail/endpoint.hpp"
#if !defined(ASIO_NO_IOSTREAM)
# include <iosfwd>
#endif // !defined(ASIO_NO_IOSTREAM)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
/// Type used for storing port numbers.
typedef uint_least16_t port_type;
/// Describes an endpoint for a version-independent IP socket.
/**
* The asio::ip::basic_endpoint class template describes an endpoint that
* may be associated with a particular socket.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*
* @par Concepts:
* Endpoint.
*/
template <typename InternetProtocol>
class basic_endpoint
{
public:
/// The protocol type associated with the endpoint.
typedef InternetProtocol protocol_type;
/// The type of the endpoint structure. This type is dependent on the
/// underlying implementation of the socket layer.
#if defined(GENERATING_DOCUMENTATION)
typedef implementation_defined data_type;
#else
typedef asio::detail::socket_addr_type data_type;
#endif
/// Default constructor.
basic_endpoint() noexcept
: impl_()
{
}
/// Construct an endpoint using a port number, specified in the host's byte
/// order. The IP address will be the any address (i.e. INADDR_ANY or
/// in6addr_any). This constructor would typically be used for accepting new
/// connections.
/**
* @par Examples
* To initialise an IPv4 TCP endpoint for port 1234, use:
* @code
* asio::ip::tcp::endpoint ep(asio::ip::tcp::v4(), 1234);
* @endcode
*
* To specify an IPv6 UDP endpoint for port 9876, use:
* @code
* asio::ip::udp::endpoint ep(asio::ip::udp::v6(), 9876);
* @endcode
*/
basic_endpoint(const InternetProtocol& internet_protocol,
port_type port_num) noexcept
: impl_(internet_protocol.family(), port_num)
{
}
/// Construct an endpoint using a port number and an IP address. This
/// constructor may be used for accepting connections on a specific interface
/// or for making a connection to a remote endpoint.
basic_endpoint(const asio::ip::address& addr,
port_type port_num) noexcept
: impl_(addr, port_num)
{
}
/// Copy constructor.
basic_endpoint(const basic_endpoint& other) noexcept
: impl_(other.impl_)
{
}
/// Move constructor.
basic_endpoint(basic_endpoint&& other) noexcept
: impl_(other.impl_)
{
}
/// Assign from another endpoint.
basic_endpoint& operator=(const basic_endpoint& other) noexcept
{
impl_ = other.impl_;
return *this;
}
/// Move-assign from another endpoint.
basic_endpoint& operator=(basic_endpoint&& other) noexcept
{
impl_ = other.impl_;
return *this;
}
/// The protocol associated with the endpoint.
protocol_type protocol() const noexcept
{
if (impl_.is_v4())
return InternetProtocol::v4();
return InternetProtocol::v6();
}
/// Get the underlying endpoint in the native type.
data_type* data() noexcept
{
return impl_.data();
}
/// Get the underlying endpoint in the native type.
const data_type* data() const noexcept
{
return impl_.data();
}
/// Get the underlying size of the endpoint in the native type.
std::size_t size() const noexcept
{
return impl_.size();
}
/// Set the underlying size of the endpoint in the native type.
void resize(std::size_t new_size)
{
impl_.resize(new_size);
}
/// Get the capacity of the endpoint in the native type.
std::size_t capacity() const noexcept
{
return impl_.capacity();
}
/// Get the port associated with the endpoint. The port number is always in
/// the host's byte order.
port_type port() const noexcept
{
return impl_.port();
}
/// Set the port associated with the endpoint. The port number is always in
/// the host's byte order.
void port(port_type port_num) noexcept
{
impl_.port(port_num);
}
/// Get the IP address associated with the endpoint.
asio::ip::address address() const noexcept
{
return impl_.address();
}
/// Set the IP address associated with the endpoint.
void address(const asio::ip::address& addr) noexcept
{
impl_.address(addr);
}
/// Compare two endpoints for equality.
friend bool operator==(const basic_endpoint<InternetProtocol>& e1,
const basic_endpoint<InternetProtocol>& e2) noexcept
{
return e1.impl_ == e2.impl_;
}
/// Compare two endpoints for inequality.
friend bool operator!=(const basic_endpoint<InternetProtocol>& e1,
const basic_endpoint<InternetProtocol>& e2) noexcept
{
return !(e1 == e2);
}
/// Compare endpoints for ordering.
friend bool operator<(const basic_endpoint<InternetProtocol>& e1,
const basic_endpoint<InternetProtocol>& e2) noexcept
{
return e1.impl_ < e2.impl_;
}
/// Compare endpoints for ordering.
friend bool operator>(const basic_endpoint<InternetProtocol>& e1,
const basic_endpoint<InternetProtocol>& e2) noexcept
{
return e2.impl_ < e1.impl_;
}
/// Compare endpoints for ordering.
friend bool operator<=(const basic_endpoint<InternetProtocol>& e1,
const basic_endpoint<InternetProtocol>& e2) noexcept
{
return !(e2 < e1);
}
/// Compare endpoints for ordering.
friend bool operator>=(const basic_endpoint<InternetProtocol>& e1,
const basic_endpoint<InternetProtocol>& e2) noexcept
{
return !(e1 < e2);
}
private:
// The underlying IP endpoint.
asio::ip::detail::endpoint impl_;
};
#if !defined(ASIO_NO_IOSTREAM)
/// Output an endpoint as a string.
/**
* Used to output a human-readable string for a specified endpoint.
*
* @param os The output stream to which the string will be written.
*
* @param endpoint The endpoint to be written.
*
* @return The output stream.
*
* @relates asio::ip::basic_endpoint
*/
template <typename Elem, typename Traits, typename InternetProtocol>
std::basic_ostream<Elem, Traits>& operator<<(
std::basic_ostream<Elem, Traits>& os,
const basic_endpoint<InternetProtocol>& endpoint);
#endif // !defined(ASIO_NO_IOSTREAM)
} // namespace ip
} // namespace asio
namespace std {
template <typename InternetProtocol>
struct hash<asio::ip::basic_endpoint<InternetProtocol>>
{
std::size_t operator()(
const asio::ip::basic_endpoint<InternetProtocol>& ep)
const noexcept
{
std::size_t hash1 = std::hash<asio::ip::address>()(ep.address());
std::size_t hash2 = std::hash<unsigned short>()(ep.port());
return hash1 ^ (hash2 + 0x9e3779b9 + (hash1 << 6) + (hash1 >> 2));
}
};
} // namespace std
#include "asio/detail/pop_options.hpp"
#include "asio/ip/impl/basic_endpoint.hpp"
#endif // ASIO_IP_BASIC_ENDPOINT_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/udp.hpp | //
// ip/udp.hpp
// ~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_UDP_HPP
#define ASIO_IP_UDP_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/basic_datagram_socket.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/ip/basic_endpoint.hpp"
#include "asio/ip/basic_resolver.hpp"
#include "asio/ip/basic_resolver_iterator.hpp"
#include "asio/ip/basic_resolver_query.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
/// Encapsulates the flags needed for UDP.
/**
* The asio::ip::udp class contains flags necessary for UDP sockets.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Safe.
*
* @par Concepts:
* Protocol, InternetProtocol.
*/
class udp
{
public:
/// The type of a UDP endpoint.
typedef basic_endpoint<udp> endpoint;
/// Construct to represent the IPv4 UDP protocol.
static udp v4() noexcept
{
return udp(ASIO_OS_DEF(AF_INET));
}
/// Construct to represent the IPv6 UDP protocol.
static udp v6() noexcept
{
return udp(ASIO_OS_DEF(AF_INET6));
}
/// Obtain an identifier for the type of the protocol.
int type() const noexcept
{
return ASIO_OS_DEF(SOCK_DGRAM);
}
/// Obtain an identifier for the protocol.
int protocol() const noexcept
{
return ASIO_OS_DEF(IPPROTO_UDP);
}
/// Obtain an identifier for the protocol family.
int family() const noexcept
{
return family_;
}
/// The UDP socket type.
typedef basic_datagram_socket<udp> socket;
/// The UDP resolver type.
typedef basic_resolver<udp> resolver;
/// Compare two protocols for equality.
friend bool operator==(const udp& p1, const udp& p2)
{
return p1.family_ == p2.family_;
}
/// Compare two protocols for inequality.
friend bool operator!=(const udp& p1, const udp& p2)
{
return p1.family_ != p2.family_;
}
private:
// Construct with a specific family.
explicit udp(int protocol_family) noexcept
: family_(protocol_family)
{
}
int family_;
};
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_UDP_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/basic_resolver.hpp | //
// ip/basic_resolver.hpp
// ~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_BASIC_RESOLVER_HPP
#define ASIO_IP_BASIC_RESOLVER_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <string>
#include <utility>
#include "asio/any_io_executor.hpp"
#include "asio/async_result.hpp"
#include "asio/detail/handler_type_requirements.hpp"
#include "asio/detail/io_object_impl.hpp"
#include "asio/detail/non_const_lvalue.hpp"
#include "asio/detail/string_view.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/execution_context.hpp"
#include "asio/ip/basic_resolver_iterator.hpp"
#include "asio/ip/basic_resolver_query.hpp"
#include "asio/ip/basic_resolver_results.hpp"
#include "asio/ip/resolver_base.hpp"
#if defined(ASIO_WINDOWS_RUNTIME)
# include "asio/detail/winrt_resolver_service.hpp"
#else
# include "asio/detail/resolver_service.hpp"
#endif
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
#if !defined(ASIO_IP_BASIC_RESOLVER_FWD_DECL)
#define ASIO_IP_BASIC_RESOLVER_FWD_DECL
// Forward declaration with defaulted arguments.
template <typename InternetProtocol, typename Executor = any_io_executor>
class basic_resolver;
#endif // !defined(ASIO_IP_BASIC_RESOLVER_FWD_DECL)
/// Provides endpoint resolution functionality.
/**
* The basic_resolver class template provides the ability to resolve a query
* to a list of endpoints.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*/
template <typename InternetProtocol, typename Executor>
class basic_resolver
: public resolver_base
{
private:
class initiate_async_resolve;
public:
/// The type of the executor associated with the object.
typedef Executor executor_type;
/// Rebinds the resolver type to another executor.
template <typename Executor1>
struct rebind_executor
{
/// The resolver type when rebound to the specified executor.
typedef basic_resolver<InternetProtocol, Executor1> other;
};
/// The protocol type.
typedef InternetProtocol protocol_type;
/// The endpoint type.
typedef typename InternetProtocol::endpoint endpoint_type;
#if !defined(ASIO_NO_DEPRECATED)
/// (Deprecated.) The query type.
typedef basic_resolver_query<InternetProtocol> query;
/// (Deprecated.) The iterator type.
typedef basic_resolver_iterator<InternetProtocol> iterator;
#endif // !defined(ASIO_NO_DEPRECATED)
/// The results type.
typedef basic_resolver_results<InternetProtocol> results_type;
/// Construct with executor.
/**
* This constructor creates a basic_resolver.
*
* @param ex The I/O executor that the resolver will use, by default, to
* dispatch handlers for any asynchronous operations performed on the
* resolver.
*/
explicit basic_resolver(const executor_type& ex)
: impl_(0, ex)
{
}
/// Construct with execution context.
/**
* This constructor creates a basic_resolver.
*
* @param context An execution context which provides the I/O executor that
* the resolver will use, by default, to dispatch handlers for any
* asynchronous operations performed on the resolver.
*/
template <typename ExecutionContext>
explicit basic_resolver(ExecutionContext& context,
constraint_t<
is_convertible<ExecutionContext&, execution_context&>::value
> = 0)
: impl_(0, 0, context)
{
}
/// Move-construct a basic_resolver from another.
/**
* This constructor moves a resolver from one object to another.
*
* @param other The other basic_resolver object from which the move will
* occur.
*
* @note Following the move, the moved-from object is in the same state as if
* constructed using the @c basic_resolver(const executor_type&) constructor.
*/
basic_resolver(basic_resolver&& other)
: impl_(std::move(other.impl_))
{
}
// All resolvers have access to each other's implementations.
template <typename InternetProtocol1, typename Executor1>
friend class basic_resolver;
/// Move-construct a basic_resolver from another.
/**
* This constructor moves a resolver from one object to another.
*
* @param other The other basic_resolver object from which the move will
* occur.
*
* @note Following the move, the moved-from object is in the same state as if
* constructed using the @c basic_resolver(const executor_type&) constructor.
*/
template <typename Executor1>
basic_resolver(basic_resolver<InternetProtocol, Executor1>&& other,
constraint_t<
is_convertible<Executor1, Executor>::value
> = 0)
: impl_(std::move(other.impl_))
{
}
/// Move-assign a basic_resolver from another.
/**
* This assignment operator moves a resolver from one object to another.
* Cancels any outstanding asynchronous operations associated with the target
* object.
*
* @param other The other basic_resolver object from which the move will
* occur.
*
* @note Following the move, the moved-from object is in the same state as if
* constructed using the @c basic_resolver(const executor_type&) constructor.
*/
basic_resolver& operator=(basic_resolver&& other)
{
impl_ = std::move(other.impl_);
return *this;
}
/// Move-assign a basic_resolver from another.
/**
* This assignment operator moves a resolver from one object to another.
* Cancels any outstanding asynchronous operations associated with the target
* object.
*
* @param other The other basic_resolver object from which the move will
* occur.
*
* @note Following the move, the moved-from object is in the same state as if
* constructed using the @c basic_resolver(const executor_type&) constructor.
*/
template <typename Executor1>
constraint_t<
is_convertible<Executor1, Executor>::value,
basic_resolver&
> operator=(basic_resolver<InternetProtocol, Executor1>&& other)
{
basic_resolver tmp(std::move(other));
impl_ = std::move(tmp.impl_);
return *this;
}
/// Destroys the resolver.
/**
* This function destroys the resolver, cancelling any outstanding
* asynchronous wait operations associated with the resolver as if by calling
* @c cancel.
*/
~basic_resolver()
{
}
/// Get the executor associated with the object.
executor_type get_executor() noexcept
{
return impl_.get_executor();
}
/// Cancel any asynchronous operations that are waiting on the resolver.
/**
* This function forces the completion of any pending asynchronous
* operations on the host resolver. The handler for each cancelled operation
* will be invoked with the asio::error::operation_aborted error code.
*/
void cancel()
{
return impl_.get_service().cancel(impl_.get_implementation());
}
#if !defined(ASIO_NO_DEPRECATED)
/// (Deprecated: Use overload with separate host and service parameters.)
/// Perform forward resolution of a query to a list of entries.
/**
* This function is used to resolve a query into a list of endpoint entries.
*
* @param q A query object that determines what endpoints will be returned.
*
* @returns A range object representing the list of endpoint entries. A
* successful call to this function is guaranteed to return a non-empty
* range.
*
* @throws asio::system_error Thrown on failure.
*/
results_type resolve(const query& q)
{
asio::error_code ec;
results_type r = impl_.get_service().resolve(
impl_.get_implementation(), q, ec);
asio::detail::throw_error(ec, "resolve");
return r;
}
/// (Deprecated: Use overload with separate host and service parameters.)
/// Perform forward resolution of a query to a list of entries.
/**
* This function is used to resolve a query into a list of endpoint entries.
*
* @param q A query object that determines what endpoints will be returned.
*
* @param ec Set to indicate what error occurred, if any.
*
* @returns A range object representing the list of endpoint entries. An
* empty range is returned if an error occurs. A successful call to this
* function is guaranteed to return a non-empty range.
*/
results_type resolve(const query& q, asio::error_code& ec)
{
return impl_.get_service().resolve(impl_.get_implementation(), q, ec);
}
#endif // !defined(ASIO_NO_DEPRECATED)
/// Perform forward resolution of a query to a list of entries.
/**
* This function is used to resolve host and service names into a list of
* endpoint entries.
*
* @param host A string identifying a location. May be a descriptive name or
* a numeric address string. If an empty string and the passive flag has been
* specified, the resolved endpoints are suitable for local service binding.
* If an empty string and passive is not specified, the resolved endpoints
* will use the loopback address.
*
* @param service A string identifying the requested service. This may be a
* descriptive name or a numeric string corresponding to a port number. May
* be an empty string, in which case all resolved endpoints will have a port
* number of 0.
*
* @returns A range object representing the list of endpoint entries. A
* successful call to this function is guaranteed to return a non-empty
* range.
*
* @throws asio::system_error Thrown on failure.
*
* @note On POSIX systems, host names may be locally defined in the file
* <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\hosts</tt>. Remote host name
* resolution is performed using DNS. Operating systems may use additional
* locations when resolving host names (such as NETBIOS names on Windows).
*
* On POSIX systems, service names are typically defined in the file
* <tt>/etc/services</tt>. On Windows, service names may be found in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\services</tt>. Operating systems
* may use additional locations when resolving service names.
*/
results_type resolve(ASIO_STRING_VIEW_PARAM host,
ASIO_STRING_VIEW_PARAM service)
{
return resolve(host, service, resolver_base::flags());
}
/// Perform forward resolution of a query to a list of entries.
/**
* This function is used to resolve host and service names into a list of
* endpoint entries.
*
* @param host A string identifying a location. May be a descriptive name or
* a numeric address string. If an empty string and the passive flag has been
* specified, the resolved endpoints are suitable for local service binding.
* If an empty string and passive is not specified, the resolved endpoints
* will use the loopback address.
*
* @param service A string identifying the requested service. This may be a
* descriptive name or a numeric string corresponding to a port number. May
* be an empty string, in which case all resolved endpoints will have a port
* number of 0.
*
* @param ec Set to indicate what error occurred, if any.
*
* @returns A range object representing the list of endpoint entries. An
* empty range is returned if an error occurs. A successful call to this
* function is guaranteed to return a non-empty range.
*
* @note On POSIX systems, host names may be locally defined in the file
* <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\hosts</tt>. Remote host name
* resolution is performed using DNS. Operating systems may use additional
* locations when resolving host names (such as NETBIOS names on Windows).
*
* On POSIX systems, service names are typically defined in the file
* <tt>/etc/services</tt>. On Windows, service names may be found in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\services</tt>. Operating systems
* may use additional locations when resolving service names.
*/
results_type resolve(ASIO_STRING_VIEW_PARAM host,
ASIO_STRING_VIEW_PARAM service, asio::error_code& ec)
{
return resolve(host, service, resolver_base::flags(), ec);
}
/// Perform forward resolution of a query to a list of entries.
/**
* This function is used to resolve host and service names into a list of
* endpoint entries.
*
* @param host A string identifying a location. May be a descriptive name or
* a numeric address string. If an empty string and the passive flag has been
* specified, the resolved endpoints are suitable for local service binding.
* If an empty string and passive is not specified, the resolved endpoints
* will use the loopback address.
*
* @param service A string identifying the requested service. This may be a
* descriptive name or a numeric string corresponding to a port number. May
* be an empty string, in which case all resolved endpoints will have a port
* number of 0.
*
* @param resolve_flags A set of flags that determine how name resolution
* should be performed. The default flags are suitable for communication with
* remote hosts. See the @ref resolver_base documentation for the set of
* available flags.
*
* @returns A range object representing the list of endpoint entries. A
* successful call to this function is guaranteed to return a non-empty
* range.
*
* @throws asio::system_error Thrown on failure.
*
* @note On POSIX systems, host names may be locally defined in the file
* <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\hosts</tt>. Remote host name
* resolution is performed using DNS. Operating systems may use additional
* locations when resolving host names (such as NETBIOS names on Windows).
*
* On POSIX systems, service names are typically defined in the file
* <tt>/etc/services</tt>. On Windows, service names may be found in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\services</tt>. Operating systems
* may use additional locations when resolving service names.
*/
results_type resolve(ASIO_STRING_VIEW_PARAM host,
ASIO_STRING_VIEW_PARAM service, resolver_base::flags resolve_flags)
{
asio::error_code ec;
basic_resolver_query<protocol_type> q(static_cast<std::string>(host),
static_cast<std::string>(service), resolve_flags);
results_type r = impl_.get_service().resolve(
impl_.get_implementation(), q, ec);
asio::detail::throw_error(ec, "resolve");
return r;
}
/// Perform forward resolution of a query to a list of entries.
/**
* This function is used to resolve host and service names into a list of
* endpoint entries.
*
* @param host A string identifying a location. May be a descriptive name or
* a numeric address string. If an empty string and the passive flag has been
* specified, the resolved endpoints are suitable for local service binding.
* If an empty string and passive is not specified, the resolved endpoints
* will use the loopback address.
*
* @param service A string identifying the requested service. This may be a
* descriptive name or a numeric string corresponding to a port number. May
* be an empty string, in which case all resolved endpoints will have a port
* number of 0.
*
* @param resolve_flags A set of flags that determine how name resolution
* should be performed. The default flags are suitable for communication with
* remote hosts. See the @ref resolver_base documentation for the set of
* available flags.
*
* @param ec Set to indicate what error occurred, if any.
*
* @returns A range object representing the list of endpoint entries. An
* empty range is returned if an error occurs. A successful call to this
* function is guaranteed to return a non-empty range.
*
* @note On POSIX systems, host names may be locally defined in the file
* <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\hosts</tt>. Remote host name
* resolution is performed using DNS. Operating systems may use additional
* locations when resolving host names (such as NETBIOS names on Windows).
*
* On POSIX systems, service names are typically defined in the file
* <tt>/etc/services</tt>. On Windows, service names may be found in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\services</tt>. Operating systems
* may use additional locations when resolving service names.
*/
results_type resolve(ASIO_STRING_VIEW_PARAM host,
ASIO_STRING_VIEW_PARAM service, resolver_base::flags resolve_flags,
asio::error_code& ec)
{
basic_resolver_query<protocol_type> q(static_cast<std::string>(host),
static_cast<std::string>(service), resolve_flags);
return impl_.get_service().resolve(impl_.get_implementation(), q, ec);
}
/// Perform forward resolution of a query to a list of entries.
/**
* This function is used to resolve host and service names into a list of
* endpoint entries.
*
* @param protocol A protocol object, normally representing either the IPv4 or
* IPv6 version of an internet protocol.
*
* @param host A string identifying a location. May be a descriptive name or
* a numeric address string. If an empty string and the passive flag has been
* specified, the resolved endpoints are suitable for local service binding.
* If an empty string and passive is not specified, the resolved endpoints
* will use the loopback address.
*
* @param service A string identifying the requested service. This may be a
* descriptive name or a numeric string corresponding to a port number. May
* be an empty string, in which case all resolved endpoints will have a port
* number of 0.
*
* @returns A range object representing the list of endpoint entries. A
* successful call to this function is guaranteed to return a non-empty
* range.
*
* @throws asio::system_error Thrown on failure.
*
* @note On POSIX systems, host names may be locally defined in the file
* <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\hosts</tt>. Remote host name
* resolution is performed using DNS. Operating systems may use additional
* locations when resolving host names (such as NETBIOS names on Windows).
*
* On POSIX systems, service names are typically defined in the file
* <tt>/etc/services</tt>. On Windows, service names may be found in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\services</tt>. Operating systems
* may use additional locations when resolving service names.
*/
results_type resolve(const protocol_type& protocol,
ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service)
{
return resolve(protocol, host, service, resolver_base::flags());
}
/// Perform forward resolution of a query to a list of entries.
/**
* This function is used to resolve host and service names into a list of
* endpoint entries.
*
* @param protocol A protocol object, normally representing either the IPv4 or
* IPv6 version of an internet protocol.
*
* @param host A string identifying a location. May be a descriptive name or
* a numeric address string. If an empty string and the passive flag has been
* specified, the resolved endpoints are suitable for local service binding.
* If an empty string and passive is not specified, the resolved endpoints
* will use the loopback address.
*
* @param service A string identifying the requested service. This may be a
* descriptive name or a numeric string corresponding to a port number. May
* be an empty string, in which case all resolved endpoints will have a port
* number of 0.
*
* @param ec Set to indicate what error occurred, if any.
*
* @returns A range object representing the list of endpoint entries. An
* empty range is returned if an error occurs. A successful call to this
* function is guaranteed to return a non-empty range.
*
* @note On POSIX systems, host names may be locally defined in the file
* <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\hosts</tt>. Remote host name
* resolution is performed using DNS. Operating systems may use additional
* locations when resolving host names (such as NETBIOS names on Windows).
*
* On POSIX systems, service names are typically defined in the file
* <tt>/etc/services</tt>. On Windows, service names may be found in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\services</tt>. Operating systems
* may use additional locations when resolving service names.
*/
results_type resolve(const protocol_type& protocol,
ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service,
asio::error_code& ec)
{
return resolve(protocol, host, service, resolver_base::flags(), ec);
}
/// Perform forward resolution of a query to a list of entries.
/**
* This function is used to resolve host and service names into a list of
* endpoint entries.
*
* @param protocol A protocol object, normally representing either the IPv4 or
* IPv6 version of an internet protocol.
*
* @param host A string identifying a location. May be a descriptive name or
* a numeric address string. If an empty string and the passive flag has been
* specified, the resolved endpoints are suitable for local service binding.
* If an empty string and passive is not specified, the resolved endpoints
* will use the loopback address.
*
* @param service A string identifying the requested service. This may be a
* descriptive name or a numeric string corresponding to a port number. May
* be an empty string, in which case all resolved endpoints will have a port
* number of 0.
*
* @param resolve_flags A set of flags that determine how name resolution
* should be performed. The default flags are suitable for communication with
* remote hosts. See the @ref resolver_base documentation for the set of
* available flags.
*
* @returns A range object representing the list of endpoint entries. A
* successful call to this function is guaranteed to return a non-empty
* range.
*
* @throws asio::system_error Thrown on failure.
*
* @note On POSIX systems, host names may be locally defined in the file
* <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\hosts</tt>. Remote host name
* resolution is performed using DNS. Operating systems may use additional
* locations when resolving host names (such as NETBIOS names on Windows).
*
* On POSIX systems, service names are typically defined in the file
* <tt>/etc/services</tt>. On Windows, service names may be found in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\services</tt>. Operating systems
* may use additional locations when resolving service names.
*/
results_type resolve(const protocol_type& protocol,
ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service,
resolver_base::flags resolve_flags)
{
asio::error_code ec;
basic_resolver_query<protocol_type> q(
protocol, static_cast<std::string>(host),
static_cast<std::string>(service), resolve_flags);
results_type r = impl_.get_service().resolve(
impl_.get_implementation(), q, ec);
asio::detail::throw_error(ec, "resolve");
return r;
}
/// Perform forward resolution of a query to a list of entries.
/**
* This function is used to resolve host and service names into a list of
* endpoint entries.
*
* @param protocol A protocol object, normally representing either the IPv4 or
* IPv6 version of an internet protocol.
*
* @param host A string identifying a location. May be a descriptive name or
* a numeric address string. If an empty string and the passive flag has been
* specified, the resolved endpoints are suitable for local service binding.
* If an empty string and passive is not specified, the resolved endpoints
* will use the loopback address.
*
* @param service A string identifying the requested service. This may be a
* descriptive name or a numeric string corresponding to a port number. May
* be an empty string, in which case all resolved endpoints will have a port
* number of 0.
*
* @param resolve_flags A set of flags that determine how name resolution
* should be performed. The default flags are suitable for communication with
* remote hosts. See the @ref resolver_base documentation for the set of
* available flags.
*
* @param ec Set to indicate what error occurred, if any.
*
* @returns A range object representing the list of endpoint entries. An
* empty range is returned if an error occurs. A successful call to this
* function is guaranteed to return a non-empty range.
*
* @note On POSIX systems, host names may be locally defined in the file
* <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\hosts</tt>. Remote host name
* resolution is performed using DNS. Operating systems may use additional
* locations when resolving host names (such as NETBIOS names on Windows).
*
* On POSIX systems, service names are typically defined in the file
* <tt>/etc/services</tt>. On Windows, service names may be found in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\services</tt>. Operating systems
* may use additional locations when resolving service names.
*/
results_type resolve(const protocol_type& protocol,
ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service,
resolver_base::flags resolve_flags, asio::error_code& ec)
{
basic_resolver_query<protocol_type> q(
protocol, static_cast<std::string>(host),
static_cast<std::string>(service), resolve_flags);
return impl_.get_service().resolve(impl_.get_implementation(), q, ec);
}
#if !defined(ASIO_NO_DEPRECATED)
/// (Deprecated: Use overload with separate host and service parameters.)
/// Asynchronously perform forward resolution of a query to a list of entries.
/**
* This function is used to asynchronously resolve a query into a list of
* endpoint entries. It is an initiating function for an @ref
* asynchronous_operation, and always returns immediately.
*
* @param q A query object that determines what endpoints will be returned.
*
* @param token The @ref completion_token that will be used to produce a
* completion handler, which will be called when the resolve completes.
* Potential completion tokens include @ref use_future, @ref use_awaitable,
* @ref yield_context, or a function object with the correct completion
* signature. The function signature of the completion handler must be:
* @code void handler(
* const asio::error_code& error, // Result of operation.
* resolver::results_type results // Resolved endpoints as a range.
* ); @endcode
* Regardless of whether the asynchronous operation completes immediately or
* not, the completion handler will not be invoked from within this function.
* On immediate completion, invocation of the handler will be performed in a
* manner equivalent to using asio::async_immediate().
*
* A successful resolve operation is guaranteed to pass a non-empty range to
* the handler.
*
* @par Completion Signature
* @code void(asio::error_code, results_type) @endcode
*/
template <
ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,
results_type)) ResolveToken = default_completion_token_t<executor_type>>
auto async_resolve(const query& q,
ResolveToken&& token = default_completion_token_t<executor_type>())
-> decltype(
asio::async_initiate<ResolveToken,
void (asio::error_code, results_type)>(
declval<initiate_async_resolve>(), token, q))
{
return asio::async_initiate<ResolveToken,
void (asio::error_code, results_type)>(
initiate_async_resolve(this), token, q);
}
#endif // !defined(ASIO_NO_DEPRECATED)
/// Asynchronously perform forward resolution of a query to a list of entries.
/**
* This function is used to resolve host and service names into a list of
* endpoint entries.
*
* @param host A string identifying a location. May be a descriptive name or
* a numeric address string. If an empty string and the passive flag has been
* specified, the resolved endpoints are suitable for local service binding.
* If an empty string and passive is not specified, the resolved endpoints
* will use the loopback address.
*
* @param service A string identifying the requested service. This may be a
* descriptive name or a numeric string corresponding to a port number. May
* be an empty string, in which case all resolved endpoints will have a port
* number of 0.
*
* @param token The @ref completion_token that will be used to produce a
* completion handler, which will be called when the resolve completes.
* Potential completion tokens include @ref use_future, @ref use_awaitable,
* @ref yield_context, or a function object with the correct completion
* signature. The function signature of the completion handler must be:
* @code void handler(
* const asio::error_code& error, // Result of operation.
* resolver::results_type results // Resolved endpoints as a range.
* ); @endcode
* Regardless of whether the asynchronous operation completes immediately or
* not, the completion handler will not be invoked from within this function.
* On immediate completion, invocation of the handler will be performed in a
* manner equivalent to using asio::async_immediate().
*
* A successful resolve operation is guaranteed to pass a non-empty range to
* the handler.
*
* @par Completion Signature
* @code void(asio::error_code, results_type) @endcode
*
* @note On POSIX systems, host names may be locally defined in the file
* <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\hosts</tt>. Remote host name
* resolution is performed using DNS. Operating systems may use additional
* locations when resolving host names (such as NETBIOS names on Windows).
*
* On POSIX systems, service names are typically defined in the file
* <tt>/etc/services</tt>. On Windows, service names may be found in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\services</tt>. Operating systems
* may use additional locations when resolving service names.
*/
template <
ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,
results_type)) ResolveToken = default_completion_token_t<executor_type>>
auto async_resolve(ASIO_STRING_VIEW_PARAM host,
ASIO_STRING_VIEW_PARAM service,
ResolveToken&& token = default_completion_token_t<executor_type>())
-> decltype(
asio::async_initiate<ResolveToken,
void (asio::error_code, results_type)>(
declval<initiate_async_resolve>(), token,
declval<basic_resolver_query<protocol_type>&>()))
{
return async_resolve(host, service, resolver_base::flags(),
static_cast<ResolveToken&&>(token));
}
/// Asynchronously perform forward resolution of a query to a list of entries.
/**
* This function is used to resolve host and service names into a list of
* endpoint entries. It is an initiating function for an @ref
* asynchronous_operation, and always returns immediately.
*
* @param host A string identifying a location. May be a descriptive name or
* a numeric address string. If an empty string and the passive flag has been
* specified, the resolved endpoints are suitable for local service binding.
* If an empty string and passive is not specified, the resolved endpoints
* will use the loopback address.
*
* @param service A string identifying the requested service. This may be a
* descriptive name or a numeric string corresponding to a port number. May
* be an empty string, in which case all resolved endpoints will have a port
* number of 0.
*
* @param resolve_flags A set of flags that determine how name resolution
* should be performed. The default flags are suitable for communication with
* remote hosts. See the @ref resolver_base documentation for the set of
* available flags.
*
* @param token The @ref completion_token that will be used to produce a
* completion handler, which will be called when the resolve completes.
* Potential completion tokens include @ref use_future, @ref use_awaitable,
* @ref yield_context, or a function object with the correct completion
* signature. The function signature of the completion handler must be:
* @code void handler(
* const asio::error_code& error, // Result of operation.
* resolver::results_type results // Resolved endpoints as a range.
* ); @endcode
* Regardless of whether the asynchronous operation completes immediately or
* not, the completion handler will not be invoked from within this function.
* On immediate completion, invocation of the handler will be performed in a
* manner equivalent to using asio::async_immediate().
*
* A successful resolve operation is guaranteed to pass a non-empty range to
* the handler.
*
* @par Completion Signature
* @code void(asio::error_code, results_type) @endcode
*
* @note On POSIX systems, host names may be locally defined in the file
* <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\hosts</tt>. Remote host name
* resolution is performed using DNS. Operating systems may use additional
* locations when resolving host names (such as NETBIOS names on Windows).
*
* On POSIX systems, service names are typically defined in the file
* <tt>/etc/services</tt>. On Windows, service names may be found in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\services</tt>. Operating systems
* may use additional locations when resolving service names.
*/
template <
ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,
results_type)) ResolveToken = default_completion_token_t<executor_type>>
auto async_resolve(ASIO_STRING_VIEW_PARAM host,
ASIO_STRING_VIEW_PARAM service, resolver_base::flags resolve_flags,
ResolveToken&& token = default_completion_token_t<executor_type>())
-> decltype(
asio::async_initiate<ResolveToken,
void (asio::error_code, results_type)>(
declval<initiate_async_resolve>(), token,
declval<basic_resolver_query<protocol_type>&>()))
{
basic_resolver_query<protocol_type> q(static_cast<std::string>(host),
static_cast<std::string>(service), resolve_flags);
return asio::async_initiate<ResolveToken,
void (asio::error_code, results_type)>(
initiate_async_resolve(this), token, q);
}
/// Asynchronously perform forward resolution of a query to a list of entries.
/**
* This function is used to resolve host and service names into a list of
* endpoint entries. It is an initiating function for an @ref
* asynchronous_operation, and always returns immediately.
*
* @param protocol A protocol object, normally representing either the IPv4 or
* IPv6 version of an internet protocol.
*
* @param host A string identifying a location. May be a descriptive name or
* a numeric address string. If an empty string and the passive flag has been
* specified, the resolved endpoints are suitable for local service binding.
* If an empty string and passive is not specified, the resolved endpoints
* will use the loopback address.
*
* @param service A string identifying the requested service. This may be a
* descriptive name or a numeric string corresponding to a port number. May
* be an empty string, in which case all resolved endpoints will have a port
* number of 0.
*
* @param token The @ref completion_token that will be used to produce a
* completion handler, which will be called when the resolve completes.
* Potential completion tokens include @ref use_future, @ref use_awaitable,
* @ref yield_context, or a function object with the correct completion
* signature. The function signature of the completion handler must be:
* @code void handler(
* const asio::error_code& error, // Result of operation.
* resolver::results_type results // Resolved endpoints as a range.
* ); @endcode
* Regardless of whether the asynchronous operation completes immediately or
* not, the completion handler will not be invoked from within this function.
* On immediate completion, invocation of the handler will be performed in a
* manner equivalent to using asio::async_immediate().
*
* A successful resolve operation is guaranteed to pass a non-empty range to
* the handler.
*
* @par Completion Signature
* @code void(asio::error_code, results_type) @endcode
*
* @note On POSIX systems, host names may be locally defined in the file
* <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\hosts</tt>. Remote host name
* resolution is performed using DNS. Operating systems may use additional
* locations when resolving host names (such as NETBIOS names on Windows).
*
* On POSIX systems, service names are typically defined in the file
* <tt>/etc/services</tt>. On Windows, service names may be found in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\services</tt>. Operating systems
* may use additional locations when resolving service names.
*/
template <
ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,
results_type)) ResolveToken = default_completion_token_t<executor_type>>
auto async_resolve(const protocol_type& protocol,
ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service,
ResolveToken&& token = default_completion_token_t<executor_type>())
-> decltype(
asio::async_initiate<ResolveToken,
void (asio::error_code, results_type)>(
declval<initiate_async_resolve>(), token,
declval<basic_resolver_query<protocol_type>&>()))
{
return async_resolve(protocol, host, service, resolver_base::flags(),
static_cast<ResolveToken&&>(token));
}
/// Asynchronously perform forward resolution of a query to a list of entries.
/**
* This function is used to resolve host and service names into a list of
* endpoint entries. It is an initiating function for an @ref
* asynchronous_operation, and always returns immediately.
*
* @param protocol A protocol object, normally representing either the IPv4 or
* IPv6 version of an internet protocol.
*
* @param host A string identifying a location. May be a descriptive name or
* a numeric address string. If an empty string and the passive flag has been
* specified, the resolved endpoints are suitable for local service binding.
* If an empty string and passive is not specified, the resolved endpoints
* will use the loopback address.
*
* @param service A string identifying the requested service. This may be a
* descriptive name or a numeric string corresponding to a port number. May
* be an empty string, in which case all resolved endpoints will have a port
* number of 0.
*
* @param resolve_flags A set of flags that determine how name resolution
* should be performed. The default flags are suitable for communication with
* remote hosts. See the @ref resolver_base documentation for the set of
* available flags.
*
* @param token The @ref completion_token that will be used to produce a
* completion handler, which will be called when the resolve completes.
* Potential completion tokens include @ref use_future, @ref use_awaitable,
* @ref yield_context, or a function object with the correct completion
* signature. The function signature of the completion handler must be:
* @code void handler(
* const asio::error_code& error, // Result of operation.
* resolver::results_type results // Resolved endpoints as a range.
* ); @endcode
* Regardless of whether the asynchronous operation completes immediately or
* not, the completion handler will not be invoked from within this function.
* On immediate completion, invocation of the handler will be performed in a
* manner equivalent to using asio::async_immediate().
*
* A successful resolve operation is guaranteed to pass a non-empty range to
* the handler.
*
* @par Completion Signature
* @code void(asio::error_code, results_type) @endcode
*
* @note On POSIX systems, host names may be locally defined in the file
* <tt>/etc/hosts</tt>. On Windows, host names may be defined in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\hosts</tt>. Remote host name
* resolution is performed using DNS. Operating systems may use additional
* locations when resolving host names (such as NETBIOS names on Windows).
*
* On POSIX systems, service names are typically defined in the file
* <tt>/etc/services</tt>. On Windows, service names may be found in the file
* <tt>c:\\windows\\system32\\drivers\\etc\\services</tt>. Operating systems
* may use additional locations when resolving service names.
*/
template <
ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,
results_type)) ResolveToken = default_completion_token_t<executor_type>>
auto async_resolve(const protocol_type& protocol,
ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service,
resolver_base::flags resolve_flags,
ResolveToken&& token = default_completion_token_t<executor_type>())
-> decltype(
asio::async_initiate<ResolveToken,
void (asio::error_code, results_type)>(
declval<initiate_async_resolve>(), token,
declval<basic_resolver_query<protocol_type>&>()))
{
basic_resolver_query<protocol_type> q(
protocol, static_cast<std::string>(host),
static_cast<std::string>(service), resolve_flags);
return asio::async_initiate<ResolveToken,
void (asio::error_code, results_type)>(
initiate_async_resolve(this), token, q);
}
/// Perform reverse resolution of an endpoint to a list of entries.
/**
* This function is used to resolve an endpoint into a list of endpoint
* entries.
*
* @param e An endpoint object that determines what endpoints will be
* returned.
*
* @returns A range object representing the list of endpoint entries. A
* successful call to this function is guaranteed to return a non-empty
* range.
*
* @throws asio::system_error Thrown on failure.
*/
results_type resolve(const endpoint_type& e)
{
asio::error_code ec;
results_type i = impl_.get_service().resolve(
impl_.get_implementation(), e, ec);
asio::detail::throw_error(ec, "resolve");
return i;
}
/// Perform reverse resolution of an endpoint to a list of entries.
/**
* This function is used to resolve an endpoint into a list of endpoint
* entries.
*
* @param e An endpoint object that determines what endpoints will be
* returned.
*
* @param ec Set to indicate what error occurred, if any.
*
* @returns A range object representing the list of endpoint entries. An
* empty range is returned if an error occurs. A successful call to this
* function is guaranteed to return a non-empty range.
*/
results_type resolve(const endpoint_type& e, asio::error_code& ec)
{
return impl_.get_service().resolve(impl_.get_implementation(), e, ec);
}
/// Asynchronously perform reverse resolution of an endpoint to a list of
/// entries.
/**
* This function is used to asynchronously resolve an endpoint into a list of
* endpoint entries. It is an initiating function for an @ref
* asynchronous_operation, and always returns immediately.
*
* @param e An endpoint object that determines what endpoints will be
* returned.
*
* @param token The @ref completion_token that will be used to produce a
* completion handler, which will be called when the resolve completes.
* Potential completion tokens include @ref use_future, @ref use_awaitable,
* @ref yield_context, or a function object with the correct completion
* signature. The function signature of the completion handler must be:
* @code void handler(
* const asio::error_code& error, // Result of operation.
* resolver::results_type results // Resolved endpoints as a range.
* ); @endcode
* Regardless of whether the asynchronous operation completes immediately or
* not, the completion handler will not be invoked from within this function.
* On immediate completion, invocation of the handler will be performed in a
* manner equivalent to using asio::async_immediate().
*
* A successful resolve operation is guaranteed to pass a non-empty range to
* the handler.
*
* @par Completion Signature
* @code void(asio::error_code, results_type) @endcode
*/
template <
ASIO_COMPLETION_TOKEN_FOR(void (asio::error_code,
results_type)) ResolveToken = default_completion_token_t<executor_type>>
auto async_resolve(const endpoint_type& e,
ResolveToken&& token = default_completion_token_t<executor_type>())
-> decltype(
asio::async_initiate<ResolveToken,
void (asio::error_code, results_type)>(
declval<initiate_async_resolve>(), token, e))
{
return asio::async_initiate<ResolveToken,
void (asio::error_code, results_type)>(
initiate_async_resolve(this), token, e);
}
private:
// Disallow copying and assignment.
basic_resolver(const basic_resolver&) = delete;
basic_resolver& operator=(const basic_resolver&) = delete;
class initiate_async_resolve
{
public:
typedef Executor executor_type;
explicit initiate_async_resolve(basic_resolver* self)
: self_(self)
{
}
executor_type get_executor() const noexcept
{
return self_->get_executor();
}
template <typename ResolveHandler, typename Query>
void operator()(ResolveHandler&& handler,
const Query& q) const
{
// If you get an error on the following line it means that your handler
// does not meet the documented type requirements for a ResolveHandler.
ASIO_RESOLVE_HANDLER_CHECK(
ResolveHandler, handler, results_type) type_check;
asio::detail::non_const_lvalue<ResolveHandler> handler2(handler);
self_->impl_.get_service().async_resolve(
self_->impl_.get_implementation(), q,
handler2.value, self_->impl_.get_executor());
}
private:
basic_resolver* self_;
};
# if defined(ASIO_WINDOWS_RUNTIME)
asio::detail::io_object_impl<
asio::detail::winrt_resolver_service<InternetProtocol>,
Executor> impl_;
# else
asio::detail::io_object_impl<
asio::detail::resolver_service<InternetProtocol>,
Executor> impl_;
# endif
};
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_BASIC_RESOLVER_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/unicast.hpp | //
// ip/unicast.hpp
// ~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_UNICAST_HPP
#define ASIO_IP_UNICAST_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <cstddef>
#include "asio/ip/detail/socket_option.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
namespace unicast {
/// Socket option for time-to-live associated with outgoing unicast packets.
/**
* Implements the IPPROTO_IP/IP_UNICAST_TTL socket option.
*
* @par Examples
* Setting the option:
* @code
* asio::ip::udp::socket socket(my_context);
* ...
* asio::ip::unicast::hops option(4);
* socket.set_option(option);
* @endcode
*
* @par
* Getting the current option value:
* @code
* asio::ip::udp::socket socket(my_context);
* ...
* asio::ip::unicast::hops option;
* socket.get_option(option);
* int ttl = option.value();
* @endcode
*
* @par Concepts:
* GettableSocketOption, SettableSocketOption.
*/
#if defined(GENERATING_DOCUMENTATION)
typedef implementation_defined hops;
#else
typedef asio::ip::detail::socket_option::unicast_hops<
ASIO_OS_DEF(IPPROTO_IP),
ASIO_OS_DEF(IP_TTL),
ASIO_OS_DEF(IPPROTO_IPV6),
ASIO_OS_DEF(IPV6_UNICAST_HOPS)> hops;
#endif
} // namespace unicast
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_UNICAST_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/basic_resolver_results.hpp | //
// ip/basic_resolver_results.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_BASIC_RESOLVER_RESULTS_HPP
#define ASIO_IP_BASIC_RESOLVER_RESULTS_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <cstddef>
#include <cstring>
#include "asio/detail/socket_ops.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/ip/basic_resolver_iterator.hpp"
#if defined(ASIO_WINDOWS_RUNTIME)
# include "asio/detail/winrt_utils.hpp"
#endif // defined(ASIO_WINDOWS_RUNTIME)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
/// A range of entries produced by a resolver.
/**
* The asio::ip::basic_resolver_results class template is used to define
* a range over the results returned by a resolver.
*
* The iterator's value_type, obtained when a results iterator is dereferenced,
* is: @code const basic_resolver_entry<InternetProtocol> @endcode
*
* @note For backward compatibility, basic_resolver_results is derived from
* basic_resolver_iterator. This derivation is deprecated.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*/
template <typename InternetProtocol>
class basic_resolver_results
#if !defined(ASIO_NO_DEPRECATED)
: public basic_resolver_iterator<InternetProtocol>
#else // !defined(ASIO_NO_DEPRECATED)
: private basic_resolver_iterator<InternetProtocol>
#endif // !defined(ASIO_NO_DEPRECATED)
{
public:
/// The protocol type associated with the results.
typedef InternetProtocol protocol_type;
/// The endpoint type associated with the results.
typedef typename protocol_type::endpoint endpoint_type;
/// The type of a value in the results range.
typedef basic_resolver_entry<protocol_type> value_type;
/// The type of a const reference to a value in the range.
typedef const value_type& const_reference;
/// The type of a non-const reference to a value in the range.
typedef value_type& reference;
/// The type of an iterator into the range.
typedef basic_resolver_iterator<protocol_type> const_iterator;
/// The type of an iterator into the range.
typedef const_iterator iterator;
/// Type used to represent the distance between two iterators in the range.
typedef std::ptrdiff_t difference_type;
/// Type used to represent a count of the elements in the range.
typedef std::size_t size_type;
/// Default constructor creates an empty range.
basic_resolver_results()
{
}
/// Copy constructor.
basic_resolver_results(const basic_resolver_results& other)
: basic_resolver_iterator<InternetProtocol>(other)
{
}
/// Move constructor.
basic_resolver_results(basic_resolver_results&& other)
: basic_resolver_iterator<InternetProtocol>(
static_cast<basic_resolver_results&&>(other))
{
}
/// Assignment operator.
basic_resolver_results& operator=(const basic_resolver_results& other)
{
basic_resolver_iterator<InternetProtocol>::operator=(other);
return *this;
}
/// Move-assignment operator.
basic_resolver_results& operator=(basic_resolver_results&& other)
{
basic_resolver_iterator<InternetProtocol>::operator=(
static_cast<basic_resolver_results&&>(other));
return *this;
}
#if !defined(GENERATING_DOCUMENTATION)
// Create results from an addrinfo list returned by getaddrinfo.
static basic_resolver_results create(
asio::detail::addrinfo_type* address_info,
const std::string& host_name, const std::string& service_name)
{
basic_resolver_results results;
if (!address_info)
return results;
std::string actual_host_name = host_name;
if (address_info->ai_canonname)
actual_host_name = address_info->ai_canonname;
results.values_.reset(new values_type);
while (address_info)
{
if (address_info->ai_family == ASIO_OS_DEF(AF_INET)
|| address_info->ai_family == ASIO_OS_DEF(AF_INET6))
{
using namespace std; // For memcpy.
typename InternetProtocol::endpoint endpoint;
endpoint.resize(static_cast<std::size_t>(address_info->ai_addrlen));
memcpy(endpoint.data(), address_info->ai_addr,
address_info->ai_addrlen);
results.values_->push_back(
basic_resolver_entry<InternetProtocol>(endpoint,
actual_host_name, service_name));
}
address_info = address_info->ai_next;
}
return results;
}
// Create results from an endpoint, host name and service name.
static basic_resolver_results create(const endpoint_type& endpoint,
const std::string& host_name, const std::string& service_name)
{
basic_resolver_results results;
results.values_.reset(new values_type);
results.values_->push_back(
basic_resolver_entry<InternetProtocol>(
endpoint, host_name, service_name));
return results;
}
// Create results from a sequence of endpoints, host and service name.
template <typename EndpointIterator>
static basic_resolver_results create(
EndpointIterator begin, EndpointIterator end,
const std::string& host_name, const std::string& service_name)
{
basic_resolver_results results;
if (begin != end)
{
results.values_.reset(new values_type);
for (EndpointIterator ep_iter = begin; ep_iter != end; ++ep_iter)
{
results.values_->push_back(
basic_resolver_entry<InternetProtocol>(
*ep_iter, host_name, service_name));
}
}
return results;
}
# if defined(ASIO_WINDOWS_RUNTIME)
// Create results from a Windows Runtime list of EndpointPair objects.
static basic_resolver_results create(
Windows::Foundation::Collections::IVectorView<
Windows::Networking::EndpointPair^>^ endpoints,
const asio::detail::addrinfo_type& hints,
const std::string& host_name, const std::string& service_name)
{
basic_resolver_results results;
if (endpoints->Size)
{
results.values_.reset(new values_type);
for (unsigned int i = 0; i < endpoints->Size; ++i)
{
auto pair = endpoints->GetAt(i);
if (hints.ai_family == ASIO_OS_DEF(AF_INET)
&& pair->RemoteHostName->Type
!= Windows::Networking::HostNameType::Ipv4)
continue;
if (hints.ai_family == ASIO_OS_DEF(AF_INET6)
&& pair->RemoteHostName->Type
!= Windows::Networking::HostNameType::Ipv6)
continue;
results.values_->push_back(
basic_resolver_entry<InternetProtocol>(
typename InternetProtocol::endpoint(
ip::make_address(
asio::detail::winrt_utils::string(
pair->RemoteHostName->CanonicalName)),
asio::detail::winrt_utils::integer(
pair->RemoteServiceName)),
host_name, service_name));
}
}
return results;
}
# endif // defined(ASIO_WINDOWS_RUNTIME)
#endif // !defined(GENERATING_DOCUMENTATION)
/// Get the number of entries in the results range.
size_type size() const noexcept
{
return this->values_ ? this->values_->size() : 0;
}
/// Get the maximum number of entries permitted in a results range.
size_type max_size() const noexcept
{
return this->values_ ? this->values_->max_size() : values_type().max_size();
}
/// Determine whether the results range is empty.
bool empty() const noexcept
{
return this->values_ ? this->values_->empty() : true;
}
/// Obtain a begin iterator for the results range.
const_iterator begin() const
{
basic_resolver_results tmp(*this);
tmp.index_ = 0;
return static_cast<basic_resolver_results&&>(tmp);
}
/// Obtain an end iterator for the results range.
const_iterator end() const
{
return const_iterator();
}
/// Obtain a begin iterator for the results range.
const_iterator cbegin() const
{
return begin();
}
/// Obtain an end iterator for the results range.
const_iterator cend() const
{
return end();
}
/// Swap the results range with another.
void swap(basic_resolver_results& that) noexcept
{
if (this != &that)
{
this->values_.swap(that.values_);
std::size_t index = this->index_;
this->index_ = that.index_;
that.index_ = index;
}
}
/// Test two iterators for equality.
friend bool operator==(const basic_resolver_results& a,
const basic_resolver_results& b)
{
return a.equal(b);
}
/// Test two iterators for inequality.
friend bool operator!=(const basic_resolver_results& a,
const basic_resolver_results& b)
{
return !a.equal(b);
}
private:
typedef std::vector<basic_resolver_entry<InternetProtocol>> values_type;
};
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_BASIC_RESOLVER_RESULTS_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/network_v6.hpp | //
// ip/network_v6.hpp
// ~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2014 Oliver Kowalke (oliver dot kowalke at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_NETWORK_V6_HPP
#define ASIO_IP_NETWORK_V6_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <string>
#include "asio/detail/string_view.hpp"
#include "asio/error_code.hpp"
#include "asio/ip/address_v6_range.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
/// Represents an IPv6 network.
/**
* The asio::ip::network_v6 class provides the ability to use and
* manipulate IP version 6 networks.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*/
class network_v6
{
public:
/// Default constructor.
network_v6() noexcept
: address_(),
prefix_length_(0)
{
}
/// Construct a network based on the specified address and prefix length.
ASIO_DECL network_v6(const address_v6& addr,
unsigned short prefix_len);
/// Copy constructor.
network_v6(const network_v6& other) noexcept
: address_(other.address_),
prefix_length_(other.prefix_length_)
{
}
/// Move constructor.
network_v6(network_v6&& other) noexcept
: address_(static_cast<address_v6&&>(other.address_)),
prefix_length_(other.prefix_length_)
{
}
/// Assign from another network.
network_v6& operator=(const network_v6& other) noexcept
{
address_ = other.address_;
prefix_length_ = other.prefix_length_;
return *this;
}
/// Move-assign from another network.
network_v6& operator=(network_v6&& other) noexcept
{
address_ = static_cast<address_v6&&>(other.address_);
prefix_length_ = other.prefix_length_;
return *this;
}
/// Obtain the address object specified when the network object was created.
address_v6 address() const noexcept
{
return address_;
}
/// Obtain the prefix length that was specified when the network object was
/// created.
unsigned short prefix_length() const noexcept
{
return prefix_length_;
}
/// Obtain an address object that represents the network address.
ASIO_DECL address_v6 network() const noexcept;
/// Obtain an address range corresponding to the hosts in the network.
ASIO_DECL address_v6_range hosts() const noexcept;
/// Obtain the true network address, omitting any host bits.
network_v6 canonical() const noexcept
{
return network_v6(network(), prefix_length());
}
/// Test if network is a valid host address.
bool is_host() const noexcept
{
return prefix_length_ == 128;
}
/// Test if a network is a real subnet of another network.
ASIO_DECL bool is_subnet_of(const network_v6& other) const;
/// Get the network as an address in dotted decimal format.
ASIO_DECL std::string to_string() const;
/// Get the network as an address in dotted decimal format.
ASIO_DECL std::string to_string(asio::error_code& ec) const;
/// Compare two networks for equality.
friend bool operator==(const network_v6& a, const network_v6& b)
{
return a.address_ == b.address_ && a.prefix_length_ == b.prefix_length_;
}
/// Compare two networks for inequality.
friend bool operator!=(const network_v6& a, const network_v6& b)
{
return !(a == b);
}
private:
address_v6 address_;
unsigned short prefix_length_;
};
/// Create an IPv6 network from an address and prefix length.
/**
* @relates address_v6
*/
inline network_v6 make_network_v6(
const address_v6& addr, unsigned short prefix_len)
{
return network_v6(addr, prefix_len);
}
/// Create an IPv6 network from a string containing IP address and prefix
/// length.
/**
* @relates network_v6
*/
ASIO_DECL network_v6 make_network_v6(const char* str);
/// Create an IPv6 network from a string containing IP address and prefix
/// length.
/**
* @relates network_v6
*/
ASIO_DECL network_v6 make_network_v6(
const char* str, asio::error_code& ec);
/// Create an IPv6 network from a string containing IP address and prefix
/// length.
/**
* @relates network_v6
*/
ASIO_DECL network_v6 make_network_v6(const std::string& str);
/// Create an IPv6 network from a string containing IP address and prefix
/// length.
/**
* @relates network_v6
*/
ASIO_DECL network_v6 make_network_v6(
const std::string& str, asio::error_code& ec);
#if defined(ASIO_HAS_STRING_VIEW) \
|| defined(GENERATING_DOCUMENTATION)
/// Create an IPv6 network from a string containing IP address and prefix
/// length.
/**
* @relates network_v6
*/
ASIO_DECL network_v6 make_network_v6(string_view str);
/// Create an IPv6 network from a string containing IP address and prefix
/// length.
/**
* @relates network_v6
*/
ASIO_DECL network_v6 make_network_v6(
string_view str, asio::error_code& ec);
#endif // defined(ASIO_HAS_STRING_VIEW)
// || defined(GENERATING_DOCUMENTATION)
#if !defined(ASIO_NO_IOSTREAM)
/// Output a network as a string.
/**
* Used to output a human-readable string for a specified network.
*
* @param os The output stream to which the string will be written.
*
* @param net The network to be written.
*
* @return The output stream.
*
* @relates asio::ip::address_v6
*/
template <typename Elem, typename Traits>
std::basic_ostream<Elem, Traits>& operator<<(
std::basic_ostream<Elem, Traits>& os, const network_v6& net);
#endif // !defined(ASIO_NO_IOSTREAM)
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#include "asio/ip/impl/network_v6.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/ip/impl/network_v6.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // ASIO_IP_NETWORK_V6_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/basic_resolver_entry.hpp | //
// ip/basic_resolver_entry.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_BASIC_RESOLVER_ENTRY_HPP
#define ASIO_IP_BASIC_RESOLVER_ENTRY_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <string>
#include "asio/detail/string_view.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
/// An entry produced by a resolver.
/**
* The asio::ip::basic_resolver_entry class template describes an entry
* as returned by a resolver.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*/
template <typename InternetProtocol>
class basic_resolver_entry
{
public:
/// The protocol type associated with the endpoint entry.
typedef InternetProtocol protocol_type;
/// The endpoint type associated with the endpoint entry.
typedef typename InternetProtocol::endpoint endpoint_type;
/// Default constructor.
basic_resolver_entry()
{
}
/// Construct with specified endpoint, host name and service name.
basic_resolver_entry(const endpoint_type& ep,
ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service)
: endpoint_(ep),
host_name_(static_cast<std::string>(host)),
service_name_(static_cast<std::string>(service))
{
}
/// Get the endpoint associated with the entry.
endpoint_type endpoint() const
{
return endpoint_;
}
/// Convert to the endpoint associated with the entry.
operator endpoint_type() const
{
return endpoint_;
}
/// Get the host name associated with the entry.
std::string host_name() const
{
return host_name_;
}
/// Get the host name associated with the entry.
template <class Allocator>
std::basic_string<char, std::char_traits<char>, Allocator> host_name(
const Allocator& alloc = Allocator()) const
{
return std::basic_string<char, std::char_traits<char>, Allocator>(
host_name_.c_str(), alloc);
}
/// Get the service name associated with the entry.
std::string service_name() const
{
return service_name_;
}
/// Get the service name associated with the entry.
template <class Allocator>
std::basic_string<char, std::char_traits<char>, Allocator> service_name(
const Allocator& alloc = Allocator()) const
{
return std::basic_string<char, std::char_traits<char>, Allocator>(
service_name_.c_str(), alloc);
}
private:
endpoint_type endpoint_;
std::string host_name_;
std::string service_name_;
};
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_BASIC_RESOLVER_ENTRY_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/bad_address_cast.hpp | //
// ip/bad_address_cast.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_BAD_ADDRESS_CAST_HPP
#define ASIO_IP_BAD_ADDRESS_CAST_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <typeinfo>
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
/// Thrown to indicate a failed address conversion.
class bad_address_cast :
#if defined(ASIO_MSVC) && defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS
public std::exception
#else
public std::bad_cast
#endif
{
public:
/// Default constructor.
bad_address_cast() {}
/// Copy constructor.
bad_address_cast(const bad_address_cast& other) noexcept
#if defined(ASIO_MSVC) && defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS
: std::exception(static_cast<const std::exception&>(other))
#else
: std::bad_cast(static_cast<const std::bad_cast&>(other))
#endif
{
}
/// Destructor.
virtual ~bad_address_cast() noexcept {}
/// Get the message associated with the exception.
virtual const char* what() const noexcept
{
return "bad address cast";
}
};
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_ADDRESS_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/tcp.hpp | //
// ip/tcp.hpp
// ~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_TCP_HPP
#define ASIO_IP_TCP_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/basic_socket_acceptor.hpp"
#include "asio/basic_socket_iostream.hpp"
#include "asio/basic_stream_socket.hpp"
#include "asio/detail/socket_option.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/ip/basic_endpoint.hpp"
#include "asio/ip/basic_resolver.hpp"
#include "asio/ip/basic_resolver_iterator.hpp"
#include "asio/ip/basic_resolver_query.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
/// Encapsulates the flags needed for TCP.
/**
* The asio::ip::tcp class contains flags necessary for TCP sockets.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Safe.
*
* @par Concepts:
* Protocol, InternetProtocol.
*/
class tcp
{
public:
/// The type of a TCP endpoint.
typedef basic_endpoint<tcp> endpoint;
/// Construct to represent the IPv4 TCP protocol.
static tcp v4() noexcept
{
return tcp(ASIO_OS_DEF(AF_INET));
}
/// Construct to represent the IPv6 TCP protocol.
static tcp v6() noexcept
{
return tcp(ASIO_OS_DEF(AF_INET6));
}
/// Obtain an identifier for the type of the protocol.
int type() const noexcept
{
return ASIO_OS_DEF(SOCK_STREAM);
}
/// Obtain an identifier for the protocol.
int protocol() const noexcept
{
return ASIO_OS_DEF(IPPROTO_TCP);
}
/// Obtain an identifier for the protocol family.
int family() const noexcept
{
return family_;
}
/// The TCP socket type.
typedef basic_stream_socket<tcp> socket;
/// The TCP acceptor type.
typedef basic_socket_acceptor<tcp> acceptor;
/// The TCP resolver type.
typedef basic_resolver<tcp> resolver;
#if !defined(ASIO_NO_IOSTREAM)
/// The TCP iostream type.
typedef basic_socket_iostream<tcp> iostream;
#endif // !defined(ASIO_NO_IOSTREAM)
/// Socket option for disabling the Nagle algorithm.
/**
* Implements the IPPROTO_TCP/TCP_NODELAY socket option.
*
* @par Examples
* Setting the option:
* @code
* asio::ip::tcp::socket socket(my_context);
* ...
* asio::ip::tcp::no_delay option(true);
* socket.set_option(option);
* @endcode
*
* @par
* Getting the current option value:
* @code
* asio::ip::tcp::socket socket(my_context);
* ...
* asio::ip::tcp::no_delay option;
* socket.get_option(option);
* bool is_set = option.value();
* @endcode
*
* @par Concepts:
* Socket_Option, Boolean_Socket_Option.
*/
#if defined(GENERATING_DOCUMENTATION)
typedef implementation_defined no_delay;
#else
typedef asio::detail::socket_option::boolean<
ASIO_OS_DEF(IPPROTO_TCP), ASIO_OS_DEF(TCP_NODELAY)> no_delay;
#endif
/// Compare two protocols for equality.
friend bool operator==(const tcp& p1, const tcp& p2)
{
return p1.family_ == p2.family_;
}
/// Compare two protocols for inequality.
friend bool operator!=(const tcp& p1, const tcp& p2)
{
return p1.family_ != p2.family_;
}
private:
// Construct with a specific family.
explicit tcp(int protocol_family) noexcept
: family_(protocol_family)
{
}
int family_;
};
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_TCP_HPP
|
0 | repos/asio/asio/include/asio | repos/asio/asio/include/asio/ip/address_v6_iterator.hpp | //
// ip/address_v6_iterator.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Oliver Kowalke (oliver dot kowalke at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_ADDRESS_V6_ITERATOR_HPP
#define ASIO_IP_ADDRESS_V6_ITERATOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/ip/address_v6.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
template <typename> class basic_address_iterator;
/// An input iterator that can be used for traversing IPv6 addresses.
/**
* In addition to satisfying the input iterator requirements, this iterator
* also supports decrement.
*
* @par Thread Safety
* @e Distinct @e objects: Safe.@n
* @e Shared @e objects: Unsafe.
*/
template <> class basic_address_iterator<address_v6>
{
public:
/// The type of the elements pointed to by the iterator.
typedef address_v6 value_type;
/// Distance between two iterators.
typedef std::ptrdiff_t difference_type;
/// The type of a pointer to an element pointed to by the iterator.
typedef const address_v6* pointer;
/// The type of a reference to an element pointed to by the iterator.
typedef const address_v6& reference;
/// Denotes that the iterator satisfies the input iterator requirements.
typedef std::input_iterator_tag iterator_category;
/// Construct an iterator that points to the specified address.
basic_address_iterator(const address_v6& addr) noexcept
: address_(addr)
{
}
/// Copy constructor.
basic_address_iterator(
const basic_address_iterator& other) noexcept
: address_(other.address_)
{
}
/// Move constructor.
basic_address_iterator(basic_address_iterator&& other) noexcept
: address_(static_cast<address_v6&&>(other.address_))
{
}
/// Assignment operator.
basic_address_iterator& operator=(
const basic_address_iterator& other) noexcept
{
address_ = other.address_;
return *this;
}
/// Move assignment operator.
basic_address_iterator& operator=(basic_address_iterator&& other) noexcept
{
address_ = static_cast<address_v6&&>(other.address_);
return *this;
}
/// Dereference the iterator.
const address_v6& operator*() const noexcept
{
return address_;
}
/// Dereference the iterator.
const address_v6* operator->() const noexcept
{
return &address_;
}
/// Pre-increment operator.
basic_address_iterator& operator++() noexcept
{
for (int i = 15; i >= 0; --i)
{
if (address_.addr_.s6_addr[i] < 0xFF)
{
++address_.addr_.s6_addr[i];
break;
}
address_.addr_.s6_addr[i] = 0;
}
return *this;
}
/// Post-increment operator.
basic_address_iterator operator++(int) noexcept
{
basic_address_iterator tmp(*this);
++*this;
return tmp;
}
/// Pre-decrement operator.
basic_address_iterator& operator--() noexcept
{
for (int i = 15; i >= 0; --i)
{
if (address_.addr_.s6_addr[i] > 0)
{
--address_.addr_.s6_addr[i];
break;
}
address_.addr_.s6_addr[i] = 0xFF;
}
return *this;
}
/// Post-decrement operator.
basic_address_iterator operator--(int)
{
basic_address_iterator tmp(*this);
--*this;
return tmp;
}
/// Compare two addresses for equality.
friend bool operator==(const basic_address_iterator& a,
const basic_address_iterator& b)
{
return a.address_ == b.address_;
}
/// Compare two addresses for inequality.
friend bool operator!=(const basic_address_iterator& a,
const basic_address_iterator& b)
{
return a.address_ != b.address_;
}
private:
address_v6 address_;
};
/// An input iterator that can be used for traversing IPv6 addresses.
typedef basic_address_iterator<address_v6> address_v6_iterator;
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_ADDRESS_V6_ITERATOR_HPP
|
0 | repos/asio/asio/include/asio/ip | repos/asio/asio/include/asio/ip/detail/socket_option.hpp | //
// detail/socket_option.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_DETAIL_SOCKET_OPTION_HPP
#define ASIO_IP_DETAIL_SOCKET_OPTION_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <cstddef>
#include <cstring>
#include <stdexcept>
#include "asio/detail/socket_ops.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/detail/throw_exception.hpp"
#include "asio/ip/address.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
namespace detail {
namespace socket_option {
// Helper template for implementing multicast enable loopback options.
template <int IPv4_Level, int IPv4_Name, int IPv6_Level, int IPv6_Name>
class multicast_enable_loopback
{
public:
#if defined(__sun) || defined(__osf__)
typedef unsigned char ipv4_value_type;
typedef unsigned char ipv6_value_type;
#elif defined(_AIX) || defined(__hpux) || defined(__QNXNTO__)
typedef unsigned char ipv4_value_type;
typedef unsigned int ipv6_value_type;
#else
typedef int ipv4_value_type;
typedef int ipv6_value_type;
#endif
// Default constructor.
multicast_enable_loopback()
: ipv4_value_(0),
ipv6_value_(0)
{
}
// Construct with a specific option value.
explicit multicast_enable_loopback(bool v)
: ipv4_value_(v ? 1 : 0),
ipv6_value_(v ? 1 : 0)
{
}
// Set the value of the boolean.
multicast_enable_loopback& operator=(bool v)
{
ipv4_value_ = v ? 1 : 0;
ipv6_value_ = v ? 1 : 0;
return *this;
}
// Get the current value of the boolean.
bool value() const
{
return !!ipv4_value_;
}
// Convert to bool.
operator bool() const
{
return !!ipv4_value_;
}
// Test for false.
bool operator!() const
{
return !ipv4_value_;
}
// Get the level of the socket option.
template <typename Protocol>
int level(const Protocol& protocol) const
{
if (protocol.family() == PF_INET6)
return IPv6_Level;
return IPv4_Level;
}
// Get the name of the socket option.
template <typename Protocol>
int name(const Protocol& protocol) const
{
if (protocol.family() == PF_INET6)
return IPv6_Name;
return IPv4_Name;
}
// Get the address of the boolean data.
template <typename Protocol>
void* data(const Protocol& protocol)
{
if (protocol.family() == PF_INET6)
return &ipv6_value_;
return &ipv4_value_;
}
// Get the address of the boolean data.
template <typename Protocol>
const void* data(const Protocol& protocol) const
{
if (protocol.family() == PF_INET6)
return &ipv6_value_;
return &ipv4_value_;
}
// Get the size of the boolean data.
template <typename Protocol>
std::size_t size(const Protocol& protocol) const
{
if (protocol.family() == PF_INET6)
return sizeof(ipv6_value_);
return sizeof(ipv4_value_);
}
// Set the size of the boolean data.
template <typename Protocol>
void resize(const Protocol& protocol, std::size_t s)
{
if (protocol.family() == PF_INET6)
{
if (s != sizeof(ipv6_value_))
{
std::length_error ex("multicast_enable_loopback socket option resize");
asio::detail::throw_exception(ex);
}
ipv4_value_ = ipv6_value_ ? 1 : 0;
}
else
{
if (s != sizeof(ipv4_value_))
{
std::length_error ex("multicast_enable_loopback socket option resize");
asio::detail::throw_exception(ex);
}
ipv6_value_ = ipv4_value_ ? 1 : 0;
}
}
private:
ipv4_value_type ipv4_value_;
ipv6_value_type ipv6_value_;
};
// Helper template for implementing unicast hops options.
template <int IPv4_Level, int IPv4_Name, int IPv6_Level, int IPv6_Name>
class unicast_hops
{
public:
// Default constructor.
unicast_hops()
: value_(0)
{
}
// Construct with a specific option value.
explicit unicast_hops(int v)
: value_(v)
{
}
// Set the value of the option.
unicast_hops& operator=(int v)
{
value_ = v;
return *this;
}
// Get the current value of the option.
int value() const
{
return value_;
}
// Get the level of the socket option.
template <typename Protocol>
int level(const Protocol& protocol) const
{
if (protocol.family() == PF_INET6)
return IPv6_Level;
return IPv4_Level;
}
// Get the name of the socket option.
template <typename Protocol>
int name(const Protocol& protocol) const
{
if (protocol.family() == PF_INET6)
return IPv6_Name;
return IPv4_Name;
}
// Get the address of the data.
template <typename Protocol>
int* data(const Protocol&)
{
return &value_;
}
// Get the address of the data.
template <typename Protocol>
const int* data(const Protocol&) const
{
return &value_;
}
// Get the size of the data.
template <typename Protocol>
std::size_t size(const Protocol&) const
{
return sizeof(value_);
}
// Set the size of the data.
template <typename Protocol>
void resize(const Protocol&, std::size_t s)
{
if (s != sizeof(value_))
{
std::length_error ex("unicast hops socket option resize");
asio::detail::throw_exception(ex);
}
#if defined(__hpux)
if (value_ < 0)
value_ = value_ & 0xFF;
#endif
}
private:
int value_;
};
// Helper template for implementing multicast hops options.
template <int IPv4_Level, int IPv4_Name, int IPv6_Level, int IPv6_Name>
class multicast_hops
{
public:
#if defined(ASIO_WINDOWS) && defined(UNDER_CE)
typedef int ipv4_value_type;
#else
typedef unsigned char ipv4_value_type;
#endif
typedef int ipv6_value_type;
// Default constructor.
multicast_hops()
: ipv4_value_(0),
ipv6_value_(0)
{
}
// Construct with a specific option value.
explicit multicast_hops(int v)
{
if (v < 0 || v > 255)
{
std::out_of_range ex("multicast hops value out of range");
asio::detail::throw_exception(ex);
}
ipv4_value_ = (ipv4_value_type)v;
ipv6_value_ = v;
}
// Set the value of the option.
multicast_hops& operator=(int v)
{
if (v < 0 || v > 255)
{
std::out_of_range ex("multicast hops value out of range");
asio::detail::throw_exception(ex);
}
ipv4_value_ = (ipv4_value_type)v;
ipv6_value_ = v;
return *this;
}
// Get the current value of the option.
int value() const
{
return ipv6_value_;
}
// Get the level of the socket option.
template <typename Protocol>
int level(const Protocol& protocol) const
{
if (protocol.family() == PF_INET6)
return IPv6_Level;
return IPv4_Level;
}
// Get the name of the socket option.
template <typename Protocol>
int name(const Protocol& protocol) const
{
if (protocol.family() == PF_INET6)
return IPv6_Name;
return IPv4_Name;
}
// Get the address of the data.
template <typename Protocol>
void* data(const Protocol& protocol)
{
if (protocol.family() == PF_INET6)
return &ipv6_value_;
return &ipv4_value_;
}
// Get the address of the data.
template <typename Protocol>
const void* data(const Protocol& protocol) const
{
if (protocol.family() == PF_INET6)
return &ipv6_value_;
return &ipv4_value_;
}
// Get the size of the data.
template <typename Protocol>
std::size_t size(const Protocol& protocol) const
{
if (protocol.family() == PF_INET6)
return sizeof(ipv6_value_);
return sizeof(ipv4_value_);
}
// Set the size of the data.
template <typename Protocol>
void resize(const Protocol& protocol, std::size_t s)
{
if (protocol.family() == PF_INET6)
{
if (s != sizeof(ipv6_value_))
{
std::length_error ex("multicast hops socket option resize");
asio::detail::throw_exception(ex);
}
if (ipv6_value_ < 0)
ipv4_value_ = 0;
else if (ipv6_value_ > 255)
ipv4_value_ = 255;
else
ipv4_value_ = (ipv4_value_type)ipv6_value_;
}
else
{
if (s != sizeof(ipv4_value_))
{
std::length_error ex("multicast hops socket option resize");
asio::detail::throw_exception(ex);
}
ipv6_value_ = ipv4_value_;
}
}
private:
ipv4_value_type ipv4_value_;
ipv6_value_type ipv6_value_;
};
// Helper template for implementing ip_mreq-based options.
template <int IPv4_Level, int IPv4_Name, int IPv6_Level, int IPv6_Name>
class multicast_request
{
public:
// Default constructor.
multicast_request()
: ipv4_value_(), // Zero-initialisation gives the "any" address.
ipv6_value_() // Zero-initialisation gives the "any" address.
{
}
// Construct with multicast address only.
explicit multicast_request(const address& multicast_address)
: ipv4_value_(), // Zero-initialisation gives the "any" address.
ipv6_value_() // Zero-initialisation gives the "any" address.
{
if (multicast_address.is_v6())
{
using namespace std; // For memcpy.
address_v6 ipv6_address = multicast_address.to_v6();
address_v6::bytes_type bytes = ipv6_address.to_bytes();
memcpy(ipv6_value_.ipv6mr_multiaddr.s6_addr, bytes.data(), 16);
ipv6_value_.ipv6mr_interface = ipv6_address.scope_id();
}
else
{
ipv4_value_.imr_multiaddr.s_addr =
asio::detail::socket_ops::host_to_network_long(
multicast_address.to_v4().to_uint());
ipv4_value_.imr_interface.s_addr =
asio::detail::socket_ops::host_to_network_long(
address_v4::any().to_uint());
}
}
// Construct with multicast address and IPv4 address specifying an interface.
explicit multicast_request(const address_v4& multicast_address,
const address_v4& network_interface = address_v4::any())
: ipv6_value_() // Zero-initialisation gives the "any" address.
{
ipv4_value_.imr_multiaddr.s_addr =
asio::detail::socket_ops::host_to_network_long(
multicast_address.to_uint());
ipv4_value_.imr_interface.s_addr =
asio::detail::socket_ops::host_to_network_long(
network_interface.to_uint());
}
// Construct with multicast address and IPv6 network interface index.
explicit multicast_request(
const address_v6& multicast_address,
unsigned long network_interface = 0)
: ipv4_value_() // Zero-initialisation gives the "any" address.
{
using namespace std; // For memcpy.
address_v6::bytes_type bytes = multicast_address.to_bytes();
memcpy(ipv6_value_.ipv6mr_multiaddr.s6_addr, bytes.data(), 16);
if (network_interface)
ipv6_value_.ipv6mr_interface = network_interface;
else
ipv6_value_.ipv6mr_interface = multicast_address.scope_id();
}
// Get the level of the socket option.
template <typename Protocol>
int level(const Protocol& protocol) const
{
if (protocol.family() == PF_INET6)
return IPv6_Level;
return IPv4_Level;
}
// Get the name of the socket option.
template <typename Protocol>
int name(const Protocol& protocol) const
{
if (protocol.family() == PF_INET6)
return IPv6_Name;
return IPv4_Name;
}
// Get the address of the option data.
template <typename Protocol>
const void* data(const Protocol& protocol) const
{
if (protocol.family() == PF_INET6)
return &ipv6_value_;
return &ipv4_value_;
}
// Get the size of the option data.
template <typename Protocol>
std::size_t size(const Protocol& protocol) const
{
if (protocol.family() == PF_INET6)
return sizeof(ipv6_value_);
return sizeof(ipv4_value_);
}
private:
asio::detail::in4_mreq_type ipv4_value_;
asio::detail::in6_mreq_type ipv6_value_;
};
// Helper template for implementing options that specify a network interface.
template <int IPv4_Level, int IPv4_Name, int IPv6_Level, int IPv6_Name>
class network_interface
{
public:
// Default constructor.
network_interface()
{
ipv4_value_.s_addr =
asio::detail::socket_ops::host_to_network_long(
address_v4::any().to_uint());
ipv6_value_ = 0;
}
// Construct with IPv4 interface.
explicit network_interface(const address_v4& ipv4_interface)
{
ipv4_value_.s_addr =
asio::detail::socket_ops::host_to_network_long(
ipv4_interface.to_uint());
ipv6_value_ = 0;
}
// Construct with IPv6 interface.
explicit network_interface(unsigned int ipv6_interface)
{
ipv4_value_.s_addr =
asio::detail::socket_ops::host_to_network_long(
address_v4::any().to_uint());
ipv6_value_ = ipv6_interface;
}
// Get the level of the socket option.
template <typename Protocol>
int level(const Protocol& protocol) const
{
if (protocol.family() == PF_INET6)
return IPv6_Level;
return IPv4_Level;
}
// Get the name of the socket option.
template <typename Protocol>
int name(const Protocol& protocol) const
{
if (protocol.family() == PF_INET6)
return IPv6_Name;
return IPv4_Name;
}
// Get the address of the option data.
template <typename Protocol>
const void* data(const Protocol& protocol) const
{
if (protocol.family() == PF_INET6)
return &ipv6_value_;
return &ipv4_value_;
}
// Get the size of the option data.
template <typename Protocol>
std::size_t size(const Protocol& protocol) const
{
if (protocol.family() == PF_INET6)
return sizeof(ipv6_value_);
return sizeof(ipv4_value_);
}
private:
asio::detail::in4_addr_type ipv4_value_;
unsigned int ipv6_value_;
};
} // namespace socket_option
} // namespace detail
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_IP_DETAIL_SOCKET_OPTION_HPP
|
0 | repos/asio/asio/include/asio/ip | repos/asio/asio/include/asio/ip/detail/endpoint.hpp | //
// ip/detail/endpoint.hpp
// ~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2024 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_IP_DETAIL_ENDPOINT_HPP
#define ASIO_IP_DETAIL_ENDPOINT_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <string>
#include "asio/detail/socket_types.hpp"
#include "asio/detail/winsock_init.hpp"
#include "asio/error_code.hpp"
#include "asio/ip/address.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace ip {
namespace detail {
// Helper class for implementating an IP endpoint.
class endpoint
{
public:
// Default constructor.
ASIO_DECL endpoint() noexcept;
// Construct an endpoint using a family and port number.
ASIO_DECL endpoint(int family,
unsigned short port_num) noexcept;
// Construct an endpoint using an address and port number.
ASIO_DECL endpoint(const asio::ip::address& addr,
unsigned short port_num) noexcept;
// Copy constructor.
endpoint(const endpoint& other) noexcept
: data_(other.data_)
{
}
// Assign from another endpoint.
endpoint& operator=(const endpoint& other) noexcept
{
data_ = other.data_;
return *this;
}
// Get the underlying endpoint in the native type.
asio::detail::socket_addr_type* data() noexcept
{
return &data_.base;
}
// Get the underlying endpoint in the native type.
const asio::detail::socket_addr_type* data() const noexcept
{
return &data_.base;
}
// Get the underlying size of the endpoint in the native type.
std::size_t size() const noexcept
{
if (is_v4())
return sizeof(asio::detail::sockaddr_in4_type);
else
return sizeof(asio::detail::sockaddr_in6_type);
}
// Set the underlying size of the endpoint in the native type.
ASIO_DECL void resize(std::size_t new_size);
// Get the capacity of the endpoint in the native type.
std::size_t capacity() const noexcept
{
return sizeof(data_);
}
// Get the port associated with the endpoint.
ASIO_DECL unsigned short port() const noexcept;
// Set the port associated with the endpoint.
ASIO_DECL void port(unsigned short port_num) noexcept;
// Get the IP address associated with the endpoint.
ASIO_DECL asio::ip::address address() const noexcept;
// Set the IP address associated with the endpoint.
ASIO_DECL void address(
const asio::ip::address& addr) noexcept;
// Compare two endpoints for equality.
ASIO_DECL friend bool operator==(const endpoint& e1,
const endpoint& e2) noexcept;
// Compare endpoints for ordering.
ASIO_DECL friend bool operator<(const endpoint& e1,
const endpoint& e2) noexcept;
// Determine whether the endpoint is IPv4.
bool is_v4() const noexcept
{
return data_.base.sa_family == ASIO_OS_DEF(AF_INET);
}
#if !defined(ASIO_NO_IOSTREAM)
// Convert to a string.
ASIO_DECL std::string to_string() const;
#endif // !defined(ASIO_NO_IOSTREAM)
private:
// The underlying IP socket address.
union data_union
{
asio::detail::socket_addr_type base;
asio::detail::sockaddr_in4_type v4;
asio::detail::sockaddr_in6_type v6;
} data_;
};
} // namespace detail
} // namespace ip
} // namespace asio
#include "asio/detail/pop_options.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/ip/detail/impl/endpoint.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // ASIO_IP_DETAIL_ENDPOINT_HPP
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.