summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--AUTHORS8
-rw-r--r--Makefile.am4
-rw-r--r--acinclude.m417
-rw-r--r--builds/msvc/libzmq/libzmq.vcproj114
-rw-r--r--builds/msvc/platform.hpp2
-rw-r--r--configure.in45
-rw-r--r--doc/zmq_bind.txt2
-rw-r--r--doc/zmq_connect.txt2
-rw-r--r--doc/zmq_cpp.txt2
-rw-r--r--doc/zmq_getsockopt.txt85
-rw-r--r--doc/zmq_poll.txt10
-rw-r--r--doc/zmq_recv.txt3
-rw-r--r--doc/zmq_send.txt3
-rw-r--r--doc/zmq_setsockopt.txt44
-rw-r--r--foreign/openpgm/libpgm-2-1-28~dfsg.tar.gzbin340501 -> 0 bytes
-rw-r--r--foreign/openpgm/libpgm-5.0.78.tar.gzbin0 -> 511599 bytes
-rw-r--r--include/zmq.h14
-rw-r--r--include/zmq.hpp6
-rw-r--r--perf/remote_thr.cpp8
-rw-r--r--src/Makefile.am142
-rw-r--r--src/app_thread.cpp197
-rw-r--r--src/app_thread.hpp88
-rw-r--r--src/array.hpp (renamed from src/yarray.hpp)66
-rw-r--r--src/clock.cpp108
-rw-r--r--src/clock.hpp (renamed from src/zmq_decoder.hpp)42
-rw-r--r--src/command.hpp21
-rw-r--r--src/config.hpp30
-rw-r--r--src/connect_session.cpp117
-rw-r--r--src/connect_session.hpp64
-rw-r--r--src/ctx.cpp300
-rw-r--r--src/ctx.hpp113
-rw-r--r--src/decoder.cpp (renamed from src/zmq_decoder.cpp)54
-rw-r--r--src/decoder.hpp70
-rw-r--r--src/devpoll.cpp48
-rw-r--r--src/devpoll.hpp20
-rw-r--r--src/encoder.cpp (renamed from src/zmq_encoder.cpp)24
-rw-r--r--src/encoder.hpp41
-rw-r--r--src/epoll.cpp55
-rw-r--r--src/epoll.hpp15
-rw-r--r--src/err.cpp41
-rw-r--r--src/err.hpp9
-rw-r--r--src/fq.cpp66
-rw-r--r--src/fq.hpp27
-rw-r--r--src/i_engine.hpp17
-rw-r--r--src/i_inout.hpp21
-rw-r--r--src/i_poll_events.hpp2
-rw-r--r--src/io_object.cpp32
-rw-r--r--src/io_object.hpp16
-rw-r--r--src/io_thread.cpp15
-rw-r--r--src/io_thread.hpp4
-rw-r--r--src/kqueue.cpp55
-rw-r--r--src/kqueue.hpp15
-rw-r--r--src/lb.cpp40
-rw-r--r--src/lb.hpp24
-rw-r--r--src/named_session.cpp84
-rw-r--r--src/named_session.hpp56
-rw-r--r--src/object.cpp127
-rw-r--r--src/object.hpp52
-rw-r--r--src/options.cpp70
-rw-r--r--src/options.hpp12
-rw-r--r--src/own.cpp213
-rw-r--r--src/own.hpp139
-rw-r--r--src/owned.cpp71
-rw-r--r--src/owned.hpp89
-rw-r--r--src/pair.cpp98
-rw-r--r--src/pair.hpp30
-rw-r--r--src/pgm_receiver.cpp45
-rw-r--r--src/pgm_receiver.hpp32
-rw-r--r--src/pgm_sender.cpp66
-rw-r--r--src/pgm_sender.hpp19
-rw-r--r--src/pgm_socket.cpp643
-rw-r--r--src/pgm_socket.hpp8
-rw-r--r--src/pipe.cpp414
-rw-r--r--src/pipe.hpp152
-rw-r--r--src/poll.cpp45
-rw-r--r--src/poll.hpp15
-rw-r--r--src/poller_base.cpp98
-rw-r--r--src/poller_base.hpp83
-rw-r--r--src/pub.cpp78
-rw-r--r--src/pub.hpp29
-rw-r--r--src/pull.cpp52
-rw-r--r--src/pull.hpp15
-rw-r--r--src/push.cpp54
-rw-r--r--src/push.hpp15
-rw-r--r--src/rep.cpp239
-rw-r--r--src/rep.hpp43
-rw-r--r--src/req.cpp234
-rw-r--r--src/req.hpp48
-rw-r--r--src/select.cpp52
-rw-r--r--src/select.hpp15
-rw-r--r--src/semaphore.hpp186
-rw-r--r--src/session.cpp345
-rw-r--r--src/session.hpp115
-rw-r--r--src/signaler.cpp86
-rw-r--r--src/signaler.hpp2
-rw-r--r--src/socket_base.cpp748
-rw-r--r--src/socket_base.hpp172
-rw-r--r--src/sub.cpp44
-rw-r--r--src/sub.hpp16
-rw-r--r--src/swap.cpp (renamed from src/msg_store.cpp)33
-rw-r--r--src/swap.hpp (renamed from src/msg_store.hpp)28
-rw-r--r--src/tcp_listener.cpp12
-rw-r--r--src/tcp_listener.hpp3
-rw-r--r--src/thread.cpp20
-rw-r--r--src/thread.hpp9
-rw-r--r--src/transient_session.cpp (renamed from src/i_endpoint.hpp)33
-rw-r--r--src/transient_session.hpp (renamed from src/zmq_encoder.hpp)36
-rw-r--r--src/trie.cpp (renamed from src/prefix_tree.cpp)36
-rw-r--r--src/trie.hpp (renamed from src/prefix_tree.hpp)17
-rw-r--r--src/xrep.cpp139
-rw-r--r--src/xrep.hpp31
-rw-r--r--src/xreq.cpp42
-rw-r--r--src/xreq.hpp13
-rw-r--r--src/yarray_item.hpp64
-rw-r--r--src/zmq.cpp586
-rw-r--r--src/zmq_connecter.cpp80
-rw-r--r--src/zmq_connecter.hpp37
-rw-r--r--src/zmq_engine.cpp87
-rw-r--r--src/zmq_engine.hpp23
-rw-r--r--src/zmq_init.cpp150
-rw-r--r--src/zmq_init.hpp35
-rw-r--r--src/zmq_listener.cpp30
-rw-r--r--src/zmq_listener.hpp15
-rw-r--r--tests/Makefile.am22
-rw-r--r--tests/test_pair_inproc.cpp30
-rw-r--r--tests/test_pair_ipc.cpp30
-rw-r--r--tests/test_pair_tcp.cpp30
-rw-r--r--tests/test_reqrep_inproc.cpp30
-rw-r--r--tests/test_reqrep_ipc.cpp30
-rw-r--r--tests/test_reqrep_tcp.cpp30
-rw-r--r--tests/test_shutdown_stress.cpp83
-rw-r--r--tests/testutil.hpp129
132 files changed, 5463 insertions, 4127 deletions
diff --git a/AUTHORS b/AUTHORS
index de72c85..80be1ef 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -8,7 +8,9 @@ Bernd Prager <bernd@prager.ws>
Bernd Melchers <melchers@ZEDAT.FU-Berlin.DE>
Brian Buchanan <bwb@holo.org>
Chris Wong <chris@chriswongstudio.com>
+Christian Gudrian <christian.gudrian@fluidon.com>
Conrad D. Steenberg <conrad.steenberg@caltech.edu>
+Dhammika Pathirana <dhammika@gmail.com>
Dhruva Krishnamurthy <dhruva@ymail.com>
Dirk O. Kaar <dok@dok-net.net>
Erich Heine <sophacles@gmail.com>
@@ -16,17 +18,22 @@ Erik Rigtorp <erik@rigtorp.com>
Frank Denis <zeromq@pureftpd.org>
George Neill <georgen@neillnet.com>
Gonzalo Diethelm <gdiethelm@dcv.cl>
+Guido Goldstein <zmq@a-nugget.de>
+Ilja Golshtein <ilejncs@narod.ru>
Ivo Danihelka <ivo@danihelka.net>
Joe Thornber <joe.thornber@gmail.com>
Jon Dyte <jon@totient.co.uk>
Kamil Shakirov <kamils80@gmail.com>
Martin Hurton <hurtonm@gmail.com>
Martin Lucina <mato@kotelna.sk>
+Martin Pales <m.pales@gmail.com>
Martin Sustrik <sustrik@250bpm.com>
Matus Hamorsky <mhamorsky@gmail.com>
Max Wolf <YIDIEPXGXGPN@spammotel.com>
McClain Looney <m@loonsoft.com>
Mikael Helbo Kjaer <mhk@designtech.dk>
+Mikko Koppanen <mkoppanen@php.net>
+Nir Soffer <nirsof@gmail.com>
Pavel Gushcha <pavimus@gmail.com>
Pavol Malosek <malosek@fastmq.com>
Pieter Hintjens <ph@imatix.com>
@@ -37,6 +44,7 @@ Tamara Kustarova <kustarova.tamara@gmail.com>
Taras Shpot <taras.shpot@eleks.com>
Tero Marttila <terom@fixme.fi>
Terry Wilson <terry@logivox.net>
+Toralf Wittner <toralf.wittner@gmail.com>
Vitaly Mayatskikh <v.mayatskih@gmail.com>
Credits
diff --git a/Makefile.am b/Makefile.am
index dbad6e1..e7d221f 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1,7 +1,7 @@
ACLOCAL_AMFLAGS = -I config
-SUBDIRS = src doc perf devices
-DIST_SUBDIRS = src doc perf devices builds/msvc
+SUBDIRS = src doc perf devices tests
+DIST_SUBDIRS = src doc perf devices tests builds/msvc
EXTRA_DIST = \
autogen.sh \
diff --git a/acinclude.m4 b/acinclude.m4
new file mode 100644
index 0000000..be5d149
--- /dev/null
+++ b/acinclude.m4
@@ -0,0 +1,17 @@
+dnl ##############################################################################
+dnl # AC_CHECK_LANG_ICC #
+dnl # Check if the current language is compiled using ICC #
+dnl # Adapted from http://software.intel.com/en-us/forums/showthread.php?t=67984 #
+dnl ##############################################################################
+AC_DEFUN([AC_CHECK_LANG_ICC],
+ [AC_CACHE_CHECK([whether we are using Intel _AC_LANG compiler],
+ [ac_cv_[]_AC_LANG_ABBREV[]_intel_compiler],
+ [_AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
+[[#ifndef __INTEL_COMPILER
+ error if not ICC
+#endif
+]])],
+ [is_icc=yes],
+ [is_icc=no])
+ac_cv_[]_AC_LANG_ABBREV[]_intel_compiler=$is_icc
+])])
diff --git a/builds/msvc/libzmq/libzmq.vcproj b/builds/msvc/libzmq/libzmq.vcproj
index 95db1df..ffe8a17 100644
--- a/builds/msvc/libzmq/libzmq.vcproj
+++ b/builds/msvc/libzmq/libzmq.vcproj
@@ -170,7 +170,7 @@
UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"
>
<File
- RelativePath="..\..\..\src\app_thread.cpp"
+ RelativePath="..\..\..\src\clock.cpp"
>
</File>
<File
@@ -178,15 +178,23 @@
>
</File>
<File
+ RelativePath="..\..\..\src\connect_session.cpp"
+ >
+ </File>
+ <File
RelativePath="..\..\..\src\ctx.cpp"
>
</File>
<File
+ RelativePath="..\..\..\src\decoder.cpp"
+ >
+ </File>
+ <File
RelativePath="..\..\..\src\devpoll.cpp"
>
</File>
<File
- RelativePath="..\..\..\src\push.cpp"
+ RelativePath="..\..\..\src\encoder.cpp"
>
</File>
<File
@@ -226,7 +234,7 @@
>
</File>
<File
- RelativePath="..\..\..\src\msg_store.cpp"
+ RelativePath="..\..\..\src\named_session.cpp"
>
</File>
<File
@@ -238,7 +246,7 @@
>
</File>
<File
- RelativePath="..\..\..\src\owned.cpp"
+ RelativePath="..\..\..\src\own.cpp"
>
</File>
<File
@@ -266,7 +274,7 @@
>
</File>
<File
- RelativePath="..\..\..\src\prefix_tree.cpp"
+ RelativePath="..\..\..\src\poller_base.cpp"
>
</File>
<File
@@ -274,6 +282,14 @@
>
</File>
<File
+ RelativePath="..\..\..\src\pull.cpp"
+ >
+ </File>
+ <File
+ RelativePath="..\..\..\src\push.cpp"
+ >
+ </File>
+ <File
RelativePath="..\..\..\src\queue.cpp"
>
</File>
@@ -310,6 +326,10 @@
>
</File>
<File
+ RelativePath="..\..\..\src\swap.cpp"
+ >
+ </File>
+ <File
RelativePath="..\..\..\src\tcp_connecter.cpp"
>
</File>
@@ -326,7 +346,11 @@
>
</File>
<File
- RelativePath="..\..\..\src\pull.cpp"
+ RelativePath="..\..\..\src\transient_session.cpp"
+ >
+ </File>
+ <File
+ RelativePath="..\..\..\src\trie.cpp"
>
</File>
<File
@@ -350,14 +374,6 @@
>
</File>
<File
- RelativePath="..\..\..\src\zmq_decoder.cpp"
- >
- </File>
- <File
- RelativePath="..\..\..\src\zmq_encoder.cpp"
- >
- </File>
- <File
RelativePath="..\..\..\src\zmq_engine.cpp"
>
</File>
@@ -376,7 +392,7 @@
UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}"
>
<File
- RelativePath="..\..\..\src\app_thread.hpp"
+ RelativePath="..\..\..\src\array.hpp"
>
</File>
<File
@@ -388,6 +404,14 @@
>
</File>
<File
+ RelativePath="..\..\..\src\blob.hpp"
+ >
+ </File>
+ <File
+ RelativePath="..\..\..\src\clock.hpp"
+ >
+ </File>
+ <File
RelativePath="..\..\..\src\command.hpp"
>
</File>
@@ -396,19 +420,19 @@
>
</File>
<File
- RelativePath="..\..\..\src\ctx.hpp"
+ RelativePath="..\..\..\src\connect_session.hpp"
>
</File>
<File
- RelativePath="..\..\..\src\decoder.hpp"
+ RelativePath="..\..\..\src\ctx.hpp"
>
</File>
<File
- RelativePath="..\..\..\src\devpoll.hpp"
+ RelativePath="..\..\..\src\decoder.hpp"
>
</File>
<File
- RelativePath="..\..\..\src\push.hpp"
+ RelativePath="..\..\..\src\devpoll.hpp"
>
</File>
<File
@@ -436,10 +460,6 @@
>
</File>
<File
- RelativePath="..\..\..\src\i_endpoint.hpp"
- >
- </File>
- <File
RelativePath="..\..\..\src\i_engine.hpp"
>
</File>
@@ -472,11 +492,11 @@
>
</File>
<File
- RelativePath="..\..\..\src\msg_content.hpp"
+ RelativePath="..\..\..\src\likely.hpp"
>
</File>
<File
- RelativePath="..\..\..\src\msg_store.hpp"
+ RelativePath="..\..\..\src\msg_content.hpp"
>
</File>
<File
@@ -484,6 +504,10 @@
>
</File>
<File
+ RelativePath="..\..\..\src\named_session.hpp"
+ >
+ </File>
+ <File
RelativePath="..\..\..\src\object.hpp"
>
</File>
@@ -492,7 +516,7 @@
>
</File>
<File
- RelativePath="..\..\..\src\owned.hpp"
+ RelativePath="..\..\..\src\own.hpp"
>
</File>
<File
@@ -516,7 +540,7 @@
>
</File>
<File
- RelativePath="..\platform.hpp"
+ RelativePath="..\..\..\src\platform.hpp"
>
</File>
<File
@@ -524,7 +548,11 @@
>
</File>
<File
- RelativePath="..\..\..\src\prefix_tree.hpp"
+ RelativePath="..\..\..\src\poller.hpp"
+ >
+ </File>
+ <File
+ RelativePath="..\..\..\src\poller_base.hpp"
>
</File>
<File
@@ -532,6 +560,14 @@
>
</File>
<File
+ RelativePath="..\..\..\src\pull.hpp"
+ >
+ </File>
+ <File
+ RelativePath="..\..\..\src\push.hpp"
+ >
+ </File>
+ <File
RelativePath="..\..\..\src\queue.hpp"
>
</File>
@@ -548,6 +584,10 @@
>
</File>
<File
+ RelativePath="..\..\..\src\semaphore.hpp"
+ >
+ </File>
+ <File
RelativePath="..\..\..\src\session.hpp"
>
</File>
@@ -572,6 +612,10 @@
>
</File>
<File
+ RelativePath="..\..\..\src\swap.hpp"
+ >
+ </File>
+ <File
RelativePath="..\..\..\src\tcp_connecter.hpp"
>
</File>
@@ -588,7 +632,11 @@
>
</File>
<File
- RelativePath="..\..\..\src\pull.hpp"
+ RelativePath="..\..\..\src\transient_session.hpp"
+ >
+ </File>
+ <File
+ RelativePath="..\..\..\src\trie.hpp"
>
</File>
<File
@@ -624,14 +672,6 @@
>
</File>
<File
- RelativePath="..\..\..\src\zmq_decoder.hpp"
- >
- </File>
- <File
- RelativePath="..\..\..\src\zmq_encoder.hpp"
- >
- </File>
- <File
RelativePath="..\..\..\src\zmq_engine.hpp"
>
</File>
diff --git a/builds/msvc/platform.hpp b/builds/msvc/platform.hpp
index 9c0c8ae..b20c851 100644
--- a/builds/msvc/platform.hpp
+++ b/builds/msvc/platform.hpp
@@ -20,7 +20,7 @@
#ifndef __ZMQ_PLATFORM_HPP_INCLUDED__
#define __ZMQ_PLATFORM_HPP_INCLUDED__
-// This is the platform definition for the Windows platform.
+// This is the platform definition for the MSVC platform.
// As a first step of the build process it is copied to
// zmq directory to take place of platform.hpp generated from
// platform.hpp.in on platforms supported by GNU autotools.
diff --git a/configure.in b/configure.in
index 8e8df73..e52180d 100644
--- a/configure.in
+++ b/configure.in
@@ -53,8 +53,13 @@ LIBZMQ_EXTRA_CXXFLAGS=""
# Extra LDFLAGS are appended at the end of LDFLAGS for libzmq.
LIBZMQ_EXTRA_LDFLAGS=""
-# By default compiling with -pedantic except QNX and OSX.
-pedantic="yes"
+# Enable -pedantic if not using icc, this may be overridden later
+AC_CHECK_LANG_ICC
+if test "xyes" = "x$ac_cv_c_intel_compiler" -o "xyes" = "x$ac_cv_cpp_intel_compiler"; then
+ pedantic="no"
+else
+ pedantic="yes"
+fi
#By default compiling with -Werror except OSX.
werror="yes"
@@ -95,6 +100,7 @@ case "${host_os}" in
CPPFLAGS="-D_GNU_SOURCE $CPPFLAGS"
fi
AC_DEFINE(ZMQ_HAVE_LINUX, 1, [Have Linux OS])
+ AC_CHECK_LIB(rt, main)
AC_CHECK_LIB(uuid, main, ,
[AC_MSG_ERROR([cannot link with -luuid, install uuid-dev.])])
;;
@@ -228,10 +234,9 @@ AC_CHECK_HEADERS(ifaddrs.h, [AC_DEFINE(ZMQ_HAVE_IFADDRS, 1, [Have ifaddrs.h head
# Use c++ in subsequent tests
AC_LANG(C++)
-# pkg-config is used if found, and is required for builds with OpenPGM.
-# However, we need to provide a way to disable it entirely when the user
-# knows what she's doing and it's use is undesirable, such as when
-# cross-compiling.
+# pkg-config is used if found, however, we need to provide a way to disable it
+# entirely when the user knows what she's doing and it's use is undesirable,
+# such as when cross-compiling.
AC_ARG_WITH([pkg-config], [AS_HELP_STRING([--without-pkg-config],
[do not use pkg-config [default=no]])])
if test "x$with_pkg_config" != "xno"; then
@@ -262,7 +267,7 @@ fi
# PGM extension
pgm_ext="no"
-pgm_basename="libpgm-2-1-28~dfsg"
+pgm_basename="libpgm-5.0.78"
AC_SUBST(pgm_basename)
@@ -273,9 +278,9 @@ AC_ARG_WITH([pgm], [AS_HELP_STRING([--with-pgm],
if test "x$with_pgm_ext" != "xno"; then
AC_MSG_CHECKING([if the PGM extension is supported on this platform])
- # OpenPGM is only supported by the vendor on x86 and AMD64 platforms...
+ # OpenPGM is only supported by the vendor on x86, AMD64, and SPARC platforms...
case "${host_cpu}" in
- i*86|x86_64)
+ i*86|x86_64|amd64|*sparc*)
# Supported
;;
*)
@@ -283,9 +288,9 @@ if test "x$with_pgm_ext" != "xno"; then
;;
esac
- # ... and on Linux/Windows/Solaris systems.
+ # ... and on Linux/Windows/Solaris/FreeBSD/OSX systems.
case "${host_os}" in
- *linux*|*mingw32*|*solaris*)
+ *linux*|*mingw32*|*solaris*|*freebsd*|*darwin*)
LIBZMQ_EXTRA_CXXFLAGS="${LIBZMQ_EXTRA_CXXFLAGS} -Wno-variadic-macros -Wno-long-long "
;;
*)
@@ -294,18 +299,6 @@ if test "x$with_pgm_ext" != "xno"; then
esac
AC_MSG_RESULT([yes])
- # Test if we have pkg-config
- if test "x$with_pkg_config" != "xno"; then
- if test "x$have_pkg_config" != "xyes"; then
- AC_MSG_ERROR([the --with-pgm option requires that pkg-config be installed.]);
- fi
-
- # Check for OpenPGM dependencies
- PKG_CHECK_MODULES([GLIB], [glib-2.0 gthread-2.0])
- LIBZMQ_EXTRA_CXXFLAGS="${LIBZMQ_EXTRA_CXXFLAGS} ${GLIB_CFLAGS} "
- LIBZMQ_EXTRA_LDFLAGS="${LIBZMQ_EXTRA_LDFLAGS} ${GLIB_LIBS} "
- fi
-
# Gzip, Perl and Python are required duing PGM build
AC_CHECK_PROG(have_gzip, gzip, yes, no)
if test "x$have_gzip" != "xyes"; then
@@ -337,11 +330,11 @@ if test "x$gnu_compilers" = "xyes" -a "x$pgm_ext" = "xno"; then
CPPFLAGS="-Wall $CPPFLAGS"
if test "x$pedantic" = "xyes"; then
- CPPFLAGS="-pedantic $CPPFLAGS"
+ CPPFLAGS="-pedantic $CPPFLAGS"
fi
if test "x$werror" = "xyes"; then
- CPPFLAGS="-Werror $CPPFLAGS"
+ CPPFLAGS="-Werror $CPPFLAGS"
fi
fi
@@ -367,5 +360,5 @@ AC_OUTPUT(Makefile src/Makefile doc/Makefile
perf/Makefile src/libzmq.pc \
devices/Makefile devices/zmq_forwarder/Makefile \
devices/zmq_streamer/Makefile devices/zmq_queue/Makefile \
- builds/msvc/Makefile)
+ builds/msvc/Makefile tests/Makefile)
diff --git a/doc/zmq_bind.txt b/doc/zmq_bind.txt
index 7aa5a0b..23c3134 100644
--- a/doc/zmq_bind.txt
+++ b/doc/zmq_bind.txt
@@ -58,6 +58,8 @@ The requested 'address' specifies a nonexistent interface.
The 0MQ 'context' associated with the specified 'socket' was terminated.
*EFAULT*::
The provided 'socket' was not valid (NULL).
+*EMTHREAD*::
+No I/O thread is available to accomplish the task.
EXAMPLE
diff --git a/doc/zmq_connect.txt b/doc/zmq_connect.txt
index ffcf3b4..a95f716 100644
--- a/doc/zmq_connect.txt
+++ b/doc/zmq_connect.txt
@@ -56,6 +56,8 @@ The requested 'transport' protocol is not compatible with the socket type.
The 0MQ 'context' associated with the specified 'socket' was terminated.
*EFAULT*::
The provided 'socket' was not valid (NULL).
+*EMTHREAD*::
+No I/O thread is available to accomplish the task.
EXAMPLE
diff --git a/doc/zmq_cpp.txt b/doc/zmq_cpp.txt
index d43ff62..2ecbb55 100644
--- a/doc/zmq_cpp.txt
+++ b/doc/zmq_cpp.txt
@@ -102,11 +102,13 @@ Maps to the _zmq_connect()_ function, as described in linkzmq:zmq_connect[3].
*bool socket_t::send(message_t '&msg', int 'flags' = 0)*
Maps to the _zmq_send()_ function, as described in linkzmq:zmq_send[3].
+Returns true if message is successfully sent, false if it is not.
[verse]
*bool socket_t::recv(message_t '*msg', int 'flags' = 0)*
Maps to the _zmq_recv()_ function, as described in linkzmq:zmq_recv[3].
+Returns true if message is successfully received, false if it is not.
Message
diff --git a/doc/zmq_getsockopt.txt b/doc/zmq_getsockopt.txt
index 1e36a2a..34e6084 100644
--- a/doc/zmq_getsockopt.txt
+++ b/doc/zmq_getsockopt.txt
@@ -26,6 +26,19 @@ value stored in the buffer.
The following options can be retrieved with the _zmq_getsockopt()_ function:
+ZMQ_TYPE: Retrieve socket type.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The 'ZMQ_TYPE' option shall retrieve the socket type for the specified
+'socket'. The socket type is specified at socket creation time and
+cannot be modified afterwards.
+
+[horizontal]
+Option value type:: int
+Option value unit:: N/A
+Default value:: N/A
+Applicable socket types:: all
+
+
ZMQ_RCVMORE: More message parts to follow
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The 'ZMQ_RCVMORE' option shall return a boolean value indicating if the
@@ -199,6 +212,76 @@ Default value:: 0
Applicable socket types:: all
+ZMQ_LINGER: Retrieve linger period for socket shutdown
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The 'ZMQ_LINGER' option shall retrieve the period for pending outbound
+messages to linger in memory after closing the socket. Value of -1 means
+infinite. Pending messages will be kept until they are fully transferred to
+the peer. Value of 0 means that all the pending messages are dropped immediately
+when socket is closed. Positive value means number of milliseconds to keep
+trying to send the pending messages before discarding them.
+
+[horizontal]
+Option value type:: int
+Option value unit:: milliseconds
+Default value:: -1
+Applicable socket types:: all
+
+ZMQ_RECONNECT_IVL: Retrieve reconnect period for connection-based transports
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The 'ZMQ_RECONNECT' option shall retrieve the period indicating how long it
+takes for a disconnected underlying connection to attempt to reconnect.
+
+[horizontal]
+Option value type:: int
+Option value unit:: milliseconds
+Default value:: 100
+Applicable socket types:: all
+
+
+ZMQ_BACKLOG: Retrieve maximum length of the queue of pending connections
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The 'ZMQ_RECONNECT' option shall retrieve maximum size of the
+pending connection backlog for connection-based transports. For details
+refer to your operating system documentation for the 'listen' function.
+
+[horizontal]
+Option value type:: int
+Option value unit:: connections
+Default value:: 100
+Applicable socket types:: all
+
+
+ZMQ_FD: Retrieve file descriptor associated with the socket
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The 'ZMQ_FD' option shall retrieve file descriptor associated with the 0MQ
+socket. The descriptor can be used to integrate 0MQ socket into an existing
+event loop. It should never be used for anything else than polling -- such as
+reading or writing. The descriptor signals edge-triggered IN event when
+something has happened within the 0MQ socket. It does not necessarily mean that
+the messages can be read or written. Check ZMQ_EVENTS option to find out whether
+the 0MQ socket is readable or writeable.
+
+[horizontal]
+Option value type:: int on POSIX systems, SOCKET on Windows
+Option value unit:: N/A
+Default value:: N/A
+Applicable socket types:: all
+
+
+ZMQ_EVENTS: Check whether socket is readable and/or writeable
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The 'ZMQ_EVENTS' option shall retrieve event flags for the specified socket.
+If a message can be read from the socket ZMQ_POLLIN flag is set. If message can
+be written to the socket ZMQ_POLLOUT flag is set.
+
+[horizontal]
+Option value type:: uint32_t
+Option value unit:: N/A (flags)
+Default value:: N/A
+Applicable socket types:: all
+
+
RETURN VALUE
------------
The _zmq_getsockopt()_ function shall return zero if successful. Otherwise it
@@ -216,6 +299,8 @@ option value.
The 0MQ 'context' associated with the specified 'socket' was terminated.
*EFAULT*::
The provided 'socket' was not valid (NULL).
+*EINTR*::
+The operation was interrupted by delivery of a signal.
EXAMPLE
diff --git a/doc/zmq_poll.txt b/doc/zmq_poll.txt
index fe2a209..f709c07 100644
--- a/doc/zmq_poll.txt
+++ b/doc/zmq_poll.txt
@@ -44,7 +44,7 @@ member, and then indicate any requested events that have occured by setting the
bit corresponding to the event condition in the 'revents' member.
If none of the requested events have occured on any *zmq_pollitem_t* item,
-_zmq_poll()_ shall wait up to 'timeout' microseconds for an event to occur on
+_zmq_poll()_ shall wait 'timeout' microseconds for an event to occur on
any of the requested items. If the value of 'timeout' is `0`, _zmq_poll()_
shall return immediately. If the value of 'timeout' is `-1`, _zmq_poll()_ shall
block indefinitely until a requested event has occured on at least one
@@ -84,20 +84,16 @@ of *zmq_pollitem_t* structures with events signaled in 'revents' or `0` if no
events have been signaled. Upon failure, _zmq_poll()_ shall return `-1` and set
'errno' to one of the values defined below.
-IMPORTANT: The _zmq_poll()_ function may return *before* the 'timeout' period
-has expired even if no events have been signaled.
-
ERRORS
------
-*EFAULT*::
-At least one of the members of the 'items' array refers to a 'socket' belonging
-to a different application thread.
*ETERM*::
At least one of the members of the 'items' array refers to a 'socket' whose
associated 0MQ 'context' was terminated.
*EFAULT*::
The provided 'items' was not valid (NULL).
+*EINTR*::
+The poll was interrupted by delivery of a signal before any event was available.
EXAMPLE
diff --git a/doc/zmq_recv.txt b/doc/zmq_recv.txt
index dc60af6..dbbbd75 100644
--- a/doc/zmq_recv.txt
+++ b/doc/zmq_recv.txt
@@ -65,6 +65,9 @@ _messaging patterns_ section of linkzmq:zmq_socket[3] for more information.
The 0MQ 'context' associated with the specified 'socket' was terminated.
*EFAULT*::
The provided 'socket' was not valid (NULL).
+*EINTR*::
+The operation was interrupted by delivery of a signal before a message was
+available.
EXAMPLE
diff --git a/doc/zmq_send.txt b/doc/zmq_send.txt
index 793d1a8..231dfcc 100644
--- a/doc/zmq_send.txt
+++ b/doc/zmq_send.txt
@@ -71,6 +71,9 @@ _messaging patterns_ section of linkzmq:zmq_socket[3] for more information.
The 0MQ 'context' associated with the specified 'socket' was terminated.
*EFAULT*::
The provided 'context' was not valid (NULL).
+*EINTR*::
+The operation was interrupted by delivery of a signal before the message was
+sent.
EXAMPLE
diff --git a/doc/zmq_setsockopt.txt b/doc/zmq_setsockopt.txt
index 1b551c6..248a6ac 100644
--- a/doc/zmq_setsockopt.txt
+++ b/doc/zmq_setsockopt.txt
@@ -216,6 +216,48 @@ Default value:: 0
Applicable socket types:: all
+ZMQ_LINGER: Set linger period for socket shutdown
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The 'ZMQ_LINGER' option shall be set to specify period for pending outbound
+messages to linger in memory after closing the socket. Value of -1 means
+infinite. Pending messages will be kept until they are fully transferred to
+the peer. Value of 0 means that all the pending messages are dropped immediately
+when socket is closed. Positive value means number of milliseconds to keep
+trying to send the pending messages before discarding them.
+
+[horizontal]
+Option value type:: int
+Option value unit:: milliseconds
+Default value:: -1
+Applicable socket types:: all
+
+
+ZMQ_RECONNECT_IVL: Set reconnect period for connection-based transports
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The 'ZMQ_RECONNECT' option shall be set to specify how long it takes for a
+disconnected underlying connection to attempt to reconnect. The interval
+can be randomised to some extent by 0MQ to prevent reconnection storms.
+
+[horizontal]
+Option value type:: int
+Option value unit:: milliseconds
+Default value:: 100
+Applicable socket types:: all
+
+
+ZMQ_BACKLOG: Set maximum length of the queue of pending connections
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The 'ZMQ_RECONNECT' option shall be set to specify maximum size of the
+pending connection backlog for connection-based transports. For details
+refer to your operating system documentation for the 'listen' function.
+
+[horizontal]
+Option value type:: int
+Option value unit:: connections
+Default value:: 100
+Applicable socket types:: all
+
+
RETURN VALUE
------------
The _zmq_setsockopt()_ function shall return zero if successful. Otherwise it
@@ -231,6 +273,8 @@ _option_value_ is invalid.
The 0MQ 'context' associated with the specified 'socket' was terminated.
*EFAULT*::
The provided 'socket' was not valid (NULL).
+*EINTR*::
+The operation was interrupted by delivery of a signal.
EXAMPLE
diff --git a/foreign/openpgm/libpgm-2-1-28~dfsg.tar.gz b/foreign/openpgm/libpgm-2-1-28~dfsg.tar.gz
deleted file mode 100644
index 5e2bfc1..0000000
--- a/foreign/openpgm/libpgm-2-1-28~dfsg.tar.gz
+++ /dev/null
Binary files differ
diff --git a/foreign/openpgm/libpgm-5.0.78.tar.gz b/foreign/openpgm/libpgm-5.0.78.tar.gz
new file mode 100644
index 0000000..37db156
--- /dev/null
+++ b/foreign/openpgm/libpgm-5.0.78.tar.gz
Binary files differ
diff --git a/include/zmq.h b/include/zmq.h
index 9afd8e1..857fec7 100644
--- a/include/zmq.h
+++ b/include/zmq.h
@@ -47,8 +47,8 @@ extern "C" {
/* Version macros for compile-time API version detection */
#define ZMQ_VERSION_MAJOR 2
-#define ZMQ_VERSION_MINOR 0
-#define ZMQ_VERSION_PATCH 11
+#define ZMQ_VERSION_MINOR 1
+#define ZMQ_VERSION_PATCH 0
#define ZMQ_MAKE_VERSION(major, minor, patch) \
((major) * 10000 + (minor) * 100 + (patch))
@@ -93,10 +93,10 @@ ZMQ_EXPORT void zmq_version (int *major, int *minor, int *patch);
#endif
/* Native 0MQ error codes. */
-#define EMTHREAD (ZMQ_HAUSNUMERO + 50)
#define EFSM (ZMQ_HAUSNUMERO + 51)
#define ENOCOMPATPROTO (ZMQ_HAUSNUMERO + 52)
#define ETERM (ZMQ_HAUSNUMERO + 53)
+#define EMTHREAD (ZMQ_HAUSNUMERO + 54)
/* This function retrieves the errno as it is known to 0MQ library. The goal */
/* of this function is to make the code 100% portable, including where 0MQ */
@@ -188,6 +188,12 @@ ZMQ_EXPORT int zmq_term (void *context);
#define ZMQ_SNDBUF 11
#define ZMQ_RCVBUF 12
#define ZMQ_RCVMORE 13
+#define ZMQ_FD 14
+#define ZMQ_EVENTS 15
+#define ZMQ_TYPE 16
+#define ZMQ_LINGER 17
+#define ZMQ_RECONNECT_IVL 18
+#define ZMQ_BACKLOG 19
/* Send/recv options. */
#define ZMQ_NOBLOCK 1
@@ -236,8 +242,6 @@ ZMQ_EXPORT int zmq_poll (zmq_pollitem_t *items, int nitems, long timeout);
ZMQ_EXPORT int zmq_device (int device, void * insocket, void* outsocket);
-#undef ZMQ_EXPORT
-
#ifdef __cplusplus
}
#endif
diff --git a/include/zmq.hpp b/include/zmq.hpp
index 86ffff6..3fb5a79 100644
--- a/include/zmq.hpp
+++ b/include/zmq.hpp
@@ -94,8 +94,7 @@ namespace zmq
inline ~message_t ()
{
int rc = zmq_msg_close (this);
- if (rc != 0)
- throw error_t ();
+ assert (rc == 0);
}
inline void rebuild ()
@@ -202,8 +201,7 @@ namespace zmq
inline ~socket_t ()
{
int rc = zmq_close (ptr);
- if (rc != 0)
- throw error_t ();
+ assert (rc == 0);
}
inline operator void* ()
diff --git a/perf/remote_thr.cpp b/perf/remote_thr.cpp
index 1e69601..3295f2e 100644
--- a/perf/remote_thr.cpp
+++ b/perf/remote_thr.cpp
@@ -21,6 +21,7 @@
#include "../include/zmq_utils.h"
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
int main (int argc, char *argv [])
{
@@ -64,11 +65,16 @@ int main (int argc, char *argv [])
}
for (i = 0; i != message_count; i++) {
+
rc = zmq_msg_init_size (&msg, message_size);
if (rc != 0) {
printf ("error in zmq_msg_init_size: %s\n", zmq_strerror (errno));
return -1;
}
+#if defined ZMQ_MAKE_VALGRIND_HAPPY
+ memset (zmq_msg_data (&msg), 0, message_size);
+#endif
+
rc = zmq_send (s, &msg, 0);
if (rc != 0) {
printf ("error in zmq_send: %s\n", zmq_strerror (errno));
@@ -81,8 +87,6 @@ int main (int argc, char *argv [])
}
}
- zmq_sleep (10);
-
rc = zmq_close (s);
if (rc != 0) {
printf ("error in zmq_close: %s\n", zmq_strerror (errno));
diff --git a/src/Makefile.am b/src/Makefile.am
index 19a80d0..5cd4f73 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -6,7 +6,21 @@ pkgconfig_DATA = libzmq.pc
include_HEADERS = ../include/zmq.h ../include/zmq.hpp ../include/zmq_utils.h
if BUILD_PGM
-pgm_sources = ../foreign/openpgm/@pgm_basename@/openpgm/pgm/packet.c \
+noinst_LTLIBRARIES = libpgm.la
+
+nodist_libpgm_la_SOURCES = ../foreign/openpgm/@pgm_basename@/openpgm/pgm/thread.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/mem.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/string.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/list.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/slist.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/queue.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/hashtable.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/messages.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/error.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/math.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/packet_parse.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/packet_test.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/sockaddr.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/time.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/if.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/getifaddrs.c \
@@ -16,28 +30,25 @@ pgm_sources = ../foreign/openpgm/@pgm_basename@/openpgm/pgm/packet.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/nametoindex.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/inet_network.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/md5.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/rand.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/gsi.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/tsi.c \
- ../foreign/openpgm/@pgm_basename@/openpgm/pgm/signal.c \
- ../foreign/openpgm/@pgm_basename@/openpgm/pgm/txwi.c \
- ../foreign/openpgm/@pgm_basename@/openpgm/pgm/rxwi.c \
- ../foreign/openpgm/@pgm_basename@/openpgm/pgm/transport.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/txw.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/rxw.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/skbuff.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/socket.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/source.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/receiver.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/recv.c \
- ../foreign/openpgm/@pgm_basename@/openpgm/pgm/pgm.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/engine.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/timer.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/net.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/rate_control.c \
- ../foreign/openpgm/@pgm_basename@/openpgm/pgm/async.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/checksum.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/reed_solomon.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/galois_tables.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/wsastrerror.c \
- ../foreign/openpgm/@pgm_basename@/openpgm/pgm/glib-compat.c \
- ../foreign/openpgm/@pgm_basename@/openpgm/pgm/backtrace.c \
- ../foreign/openpgm/@pgm_basename@/openpgm/pgm/log.c \
- ../foreign/openpgm/@pgm_basename@/openpgm/pgm/sockaddr.c \
+ ../foreign/openpgm/@pgm_basename@/openpgm/pgm/histogram.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/version.c
../foreign/openpgm/@pgm_basename@/openpgm/pgm/version.c: ../foreign/openpgm/@pgm_basename@/openpgm/pgm/version_generator.py
@@ -45,20 +56,22 @@ pgm_sources = ../foreign/openpgm/@pgm_basename@/openpgm/pgm/packet.c \
../foreign/openpgm/@pgm_basename@/openpgm/pgm/galois_tables.c: ../foreign/openpgm/@pgm_basename@/openpgm/pgm/galois_generator.pl
perl ../foreign/openpgm/@pgm_basename@/openpgm/pgm/galois_generator.pl > $@
-endif
-nodist_libzmq_la_SOURCES = $(pgm_sources)
+libpgm_la_LIBADD = @LTLIBOBJS@
+endif
-libzmq_la_SOURCES = app_thread.hpp \
+libzmq_la_SOURCES = \
+ array.hpp \
atomic_counter.hpp \
atomic_ptr.hpp \
blob.hpp \
+ clock.hpp \
command.hpp \
config.hpp \
+ connect_session.hpp \
ctx.hpp \
decoder.hpp \
devpoll.hpp \
- push.hpp \
encoder.hpp \
epoll.hpp \
err.hpp \
@@ -69,18 +82,17 @@ libzmq_la_SOURCES = app_thread.hpp \
io_object.hpp \
io_thread.hpp \
ip.hpp \
- i_endpoint.hpp \
i_engine.hpp \
i_poll_events.hpp \
kqueue.hpp \
lb.hpp \
likely.hpp \
msg_content.hpp \
- msg_store.hpp \
mutex.hpp \
+ named_session.hpp \
object.hpp \
options.hpp \
- owned.hpp \
+ own.hpp \
pgm_receiver.hpp \
pgm_sender.hpp \
pgm_socket.hpp \
@@ -88,44 +100,47 @@ libzmq_la_SOURCES = app_thread.hpp \
platform.hpp \
poll.hpp \
poller.hpp \
+ poller_base.hpp \
pair.hpp \
- prefix_tree.hpp \
pub.hpp \
+ pull.hpp \
+ push.hpp \
queue.hpp \
rep.hpp \
req.hpp \
select.hpp \
+ semaphore.hpp \
session.hpp \
signaler.hpp \
socket_base.hpp \
stdint.hpp \
streamer.hpp \
sub.hpp \
+ swap.hpp \
tcp_connecter.hpp \
tcp_listener.hpp \
tcp_socket.hpp \
thread.hpp \
- pull.hpp \
+ transient_session.hpp \
+ trie.hpp \
uuid.hpp \
windows.hpp \
wire.hpp \
xrep.hpp \
xreq.hpp \
- yarray.hpp \
- yarray_item.hpp \
ypipe.hpp \
yqueue.hpp \
zmq_connecter.hpp \
- zmq_decoder.hpp \
- zmq_encoder.hpp \
zmq_engine.hpp \
zmq_init.hpp \
zmq_listener.hpp \
- app_thread.cpp \
+ clock.cpp \
command.cpp \
ctx.cpp \
+ connect_session.cpp \
+ decoder.cpp \
devpoll.cpp \
- push.cpp \
+ encoder.cpp \
epoll.cpp \
err.cpp \
forwarder.cpp \
@@ -135,17 +150,19 @@ libzmq_la_SOURCES = app_thread.hpp \
ip.cpp \
kqueue.cpp \
lb.cpp \
- msg_store.cpp \
+ named_session.cpp \
object.cpp \
options.cpp \
- owned.cpp \
+ own.cpp \
+ pair.cpp \
pgm_receiver.cpp \
pgm_sender.cpp \
pgm_socket.cpp \
- pair.cpp \
- prefix_tree.cpp \
pipe.cpp \
poll.cpp \
+ poller_base.cpp \
+ pull.cpp \
+ push.cpp \
pub.cpp \
queue.cpp \
rep.cpp \
@@ -156,18 +173,18 @@ libzmq_la_SOURCES = app_thread.hpp \
socket_base.cpp \
streamer.cpp \
sub.cpp \
+ swap.cpp \
tcp_connecter.cpp \
tcp_listener.cpp \
tcp_socket.cpp \
thread.cpp \
- pull.cpp \
+ transient_session.cpp \
+ trie.cpp \
uuid.cpp \
xrep.cpp \
xreq.cpp \
zmq.cpp \
zmq_connecter.cpp \
- zmq_decoder.cpp \
- zmq_encoder.cpp \
zmq_engine.cpp \
zmq_init.cpp \
zmq_listener.cpp
@@ -183,53 +200,44 @@ if BUILD_PGM
if ON_MINGW
libpgm_diff_flags = \
-D_WIN32_WINNT=0x0501 \
- -DCONFIG_16BIT_CHECKSUM \
- -DCONFIG_HAVE_IFR_NETMASK \
- -DCONFIG_BIND_INADDR_ANY \
- -DCONFIG_GALOIS_MUL_LUT \
- -DIF_NAMESIZE=256 \
- -DPGM_GNUC_INTERNAL=G_GNUC_INTERNAL \
+ -DCONFIG_HAVE_ISO_VARARGS \
+ -DCONFIG_HAVE_TSC \
-DCONFIG_HAVE_WSACMSGHDR \
- -DGETTEXT_PACKAGE='"pgm"' \
- -DG_LOG_DOMAIN='"Pgm"'
+ -DCONFIG_HAVE_DSO_VISIBILITY \
+ -DCONFIG_BIND_INADDR_ANY
else
libpgm_diff_flags = \
- -D__need_IOV_MAX \
- -DCONFIG_16BIT_CHECKSUM \
+ -DCONFIG_HAVE_GETPROTOBYNAME_R2 \
+ -DCONFIG_HAVE_ISO_VARARGS \
+ -DCONFIG_HAVE_ALLOCA_H \
+ -DCONFIG_HAVE_PROC \
+ -DCONFIG_HAVE_BACKTRACE \
-DCONFIG_HAVE_PSELECT \
- -DCONFIG_HAVE_POLL \
- -DCONFIG_HAVE_PPOLL \
- -DCONFIG_HAVE_EPOLL \
- -DCONFIG_HAVE_CLOCK_GETTIME \
- -DCONFIG_HAVE_CLOCK_NANOSLEEP \
- -DCONFIG_HAVE_NANOSLEEP \
- -DCONFIG_HAVE_USLEEP \
-DCONFIG_HAVE_RTC \
-DCONFIG_HAVE_TSC \
- -DCONFIG_HAVE_IFR_NETMASK \
+ -DCONFIG_HAVE_HPET \
+ -DCONFIG_HAVE_POLL \
+ -DCONFIG_HAVE_EPOLL \
-DCONFIG_HAVE_GETIFADDRS \
- -DCONFIG_HAVE_GETHOSTBYNAME2 \
- -DCONFIG_HAVE_GETPROTOBYNAME_R \
- -DCONFIG_BIND_INADDR_ANY \
- -DCONFIG_GALOIS_MUL_LUT \
+ -DCONFIG_HAVE_IFR_NETMASK \
-DCONFIG_HAVE_MCAST_JOIN \
-DCONFIG_HAVE_IP_MREQN \
-DCONFIG_HAVE_SPRINTF_GROUPING \
- -DCONFIG_HAVE_HPET \
- -DPGM_GNUC_INTERNAL=G_GNUC_INTERNAL \
- -DGETTEXT_PACKAGE='"pgm"' \
- -DG_LOG_DOMAIN='"Pgm"'
+ -DCONFIG_HAVE_VASPRINTF \
+ -DCONFIG_HAVE_DSO_VISIBILITY \
+ -DCONFIG_BIND_INADDR_ANY \
+ -DCONFIG_HAVE_GETOPT
endif
-libzmq_la_CFLAGS = -I$(top_srcdir)/foreign/openpgm/@pgm_basename@/openpgm/pgm/include/ @LIBZMQ_EXTRA_CXXFLAGS@ \
- -Wall \
- -pedantic \
+libpgm_la_CFLAGS = -I$(top_srcdir)/foreign/openpgm/@pgm_basename@/openpgm/pgm/include/ @LIBZMQ_EXTRA_CXXFLAGS@ \
-std=gnu99 \
- -fno-strict-aliasing \
- --param max-inline-insns-single=600 \
+ -D_XOPEN_SOURCE=600 \
+ -D_BSD_SOURCE \
-D_REENTRANT \
- -D_GNU_SOURCE \
+ -DCONFIG_16BIT_CHECKSUM \
+ -DCONFIG_GALOIS_MUL_LUT \
+ -DGETTEXT_PACKAGE='"pgm"' \
${libpgm_diff_flags}
libzmq_la_CXXFLAGS = -I$(top_srcdir)/foreign/openpgm/@pgm_basename@/openpgm/pgm/include/ \
@@ -240,6 +248,10 @@ if BUILD_NO_PGM
libzmq_la_CXXFLAGS = @LIBZMQ_EXTRA_CXXFLAGS@
endif
+if BUILD_PGM
+libzmq_la_LIBADD = libpgm.la
+endif
+
dist-hook:
-rm $(distdir)/platform.hpp
diff --git a/src/app_thread.cpp b/src/app_thread.cpp
deleted file mode 100644
index fc9bc1f..0000000
--- a/src/app_thread.cpp
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- Copyright (c) 2007-2010 iMatix Corporation
-
- This file is part of 0MQ.
-
- 0MQ is free software; you can redistribute it and/or modify it under
- the terms of the Lesser GNU General Public License as published by
- the Free Software Foundation; either version 3 of the License, or
- (at your option) any later version.
-
- 0MQ is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- Lesser GNU General Public License for more details.
-
- You should have received a copy of the Lesser GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-*/
-
-#include <new>
-#include <algorithm>
-
-#include "../include/zmq.h"
-
-#include "platform.hpp"
-
-#if defined ZMQ_HAVE_WINDOWS
-#include "windows.hpp"
-#if defined _MSC_VER
-#include <intrin.h>
-#endif
-#else
-#include <unistd.h>
-#endif
-
-#include "app_thread.hpp"
-#include "ctx.hpp"
-#include "err.hpp"
-#include "pipe.hpp"
-#include "config.hpp"
-#include "socket_base.hpp"
-#include "pair.hpp"
-#include "pub.hpp"
-#include "sub.hpp"
-#include "req.hpp"
-#include "rep.hpp"
-#include "xreq.hpp"
-#include "xrep.hpp"
-#include "pull.hpp"
-#include "push.hpp"
-
-// If the RDTSC is available we use it to prevent excessive
-// polling for commands. The nice thing here is that it will work on any
-// system with x86 architecture and gcc or MSVC compiler.
-#if (defined __GNUC__ && (defined __i386__ || defined __x86_64__)) ||\
- (defined _MSC_VER && (defined _M_IX86 || defined _M_X64))
-#define ZMQ_DELAY_COMMANDS
-#endif
-
-zmq::app_thread_t::app_thread_t (ctx_t *ctx_,
- uint32_t thread_slot_) :
- object_t (ctx_, thread_slot_),
- last_processing_time (0),
- terminated (false)
-{
-}
-
-zmq::app_thread_t::~app_thread_t ()
-{
- zmq_assert (sockets.empty ());
-}
-
-void zmq::app_thread_t::stop ()
-{
- send_stop ();
-}
-
-zmq::signaler_t *zmq::app_thread_t::get_signaler ()
-{
- return &signaler;
-}
-
-bool zmq::app_thread_t::process_commands (bool block_, bool throttle_)
-{
- bool received;
- command_t cmd;
- if (block_) {
- received = signaler.recv (&cmd, true);
- zmq_assert (received);
- }
- else {
-
-#if defined ZMQ_DELAY_COMMANDS
- // Optimised version of command processing - it doesn't have to check
- // for incoming commands each time. It does so only if certain time
- // elapsed since last command processing. Command delay varies
- // depending on CPU speed: It's ~1ms on 3GHz CPU, ~2ms on 1.5GHz CPU
- // etc. The optimisation makes sense only on platforms where getting
- // a timestamp is a very cheap operation (tens of nanoseconds).
- if (throttle_) {
-
- // Get timestamp counter.
-#if defined __GNUC__
- uint32_t low;
- uint32_t high;
- __asm__ volatile ("rdtsc" : "=a" (low), "=d" (high));
- uint64_t current_time = (uint64_t) high << 32 | low;
-#elif defined _MSC_VER
- uint64_t current_time = __rdtsc ();
-#else
-#error
-#endif
-
- // Check whether TSC haven't jumped backwards (in case of migration
- // between CPU cores) and whether certain time have elapsed since
- // last command processing. If it didn't do nothing.
- if (current_time >= last_processing_time &&
- current_time - last_processing_time <= max_command_delay)
- return !terminated;
- last_processing_time = current_time;
- }
-#endif
-
- // Check whether there are any commands pending for this thread.
- received = signaler.recv (&cmd, false);
- }
-
- // Process all the commands available at the moment.
- while (received) {
- cmd.destination->process_command (cmd);
- received = signaler.recv (&cmd, false);
- }
-
- return !terminated;
-}
-
-zmq::socket_base_t *zmq::app_thread_t::create_socket (int type_)
-{
- socket_base_t *s = NULL;
- switch (type_) {
- case ZMQ_PAIR:
- s = new (std::nothrow) pair_t (this);
- break;
- case ZMQ_PUB:
- s = new (std::nothrow) pub_t (this);
- break;
- case ZMQ_SUB:
- s = new (std::nothrow) sub_t (this);
- break;
- case ZMQ_REQ:
- s = new (std::nothrow) req_t (this);
- break;
- case ZMQ_REP:
- s = new (std::nothrow) rep_t (this);
- break;
- case ZMQ_XREQ:
- s = new (std::nothrow) xreq_t (this);
- break;
- case ZMQ_XREP:
- s = new (std::nothrow) xrep_t (this);
- break;
- case ZMQ_PULL:
- s = new (std::nothrow) pull_t (this);
- break;
- case ZMQ_PUSH:
- s = new (std::nothrow) push_t (this);
- break;
- default:
- if (sockets.empty ())
- get_ctx ()->no_sockets (this);
- errno = EINVAL;
- return NULL;
- }
- zmq_assert (s);
-
- sockets.push_back (s);
-
- return s;
-}
-
-void zmq::app_thread_t::remove_socket (socket_base_t *socket_)
-{
- sockets.erase (socket_);
- if (sockets.empty ())
- get_ctx ()->no_sockets (this);
-}
-
-void zmq::app_thread_t::process_stop ()
-{
- terminated = true;
-}
-
-bool zmq::app_thread_t::is_terminated ()
-{
- return terminated;
-}
-
diff --git a/src/app_thread.hpp b/src/app_thread.hpp
deleted file mode 100644
index f0deaab..0000000
--- a/src/app_thread.hpp
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- Copyright (c) 2007-2010 iMatix Corporation
-
- This file is part of 0MQ.
-
- 0MQ is free software; you can redistribute it and/or modify it under
- the terms of the Lesser GNU General Public License as published by
- the Free Software Foundation; either version 3 of the License, or
- (at your option) any later version.
-
- 0MQ is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- Lesser GNU General Public License for more details.
-
- You should have received a copy of the Lesser GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-*/
-
-#ifndef __ZMQ_APP_THREAD_HPP_INCLUDED__
-#define __ZMQ_APP_THREAD_HPP_INCLUDED__
-
-#include <vector>
-
-#include "stdint.hpp"
-#include "object.hpp"
-#include "yarray.hpp"
-#include "signaler.hpp"
-
-namespace zmq
-{
-
- class app_thread_t : public object_t
- {
- public:
-
- app_thread_t (class ctx_t *ctx_, uint32_t thread_slot_);
-
- ~app_thread_t ();
-
- // Interrupt blocking call if the app thread is stuck in one.
- // This function is is called from a different thread!
- void stop ();
-
- // Returns signaler associated with this application thread.
- signaler_t *get_signaler ();
-
- // Processes commands sent to this thread (if any). If 'block' is
- // set to true, returns only after at least one command was processed.
- // If throttle argument is true, commands are processed at most once
- // in a predefined time period. The function returns false is the
- // associated context was terminated, true otherwise.
- bool process_commands (bool block_, bool throttle_);
-
- // Create a socket of a specified type.
- class socket_base_t *create_socket (int type_);
-
- // Unregister the socket from the app_thread (called by socket itself).
- void remove_socket (class socket_base_t *socket_);
-
- // Returns true is the associated context was already terminated.
- bool is_terminated ();
-
- private:
-
- // Command handlers.
- void process_stop ();
-
- // All the sockets created from this application thread.
- typedef yarray_t <socket_base_t> sockets_t;
- sockets_t sockets;
-
- // App thread's signaler object.
- signaler_t signaler;
-
- // Timestamp of when commands were processed the last time.
- uint64_t last_processing_time;
-
- // If true, 'stop' command was already received.
- bool terminated;
-
- app_thread_t (const app_thread_t&);
- void operator = (const app_thread_t&);
- };
-
-}
-
-#endif
diff --git a/src/yarray.hpp b/src/array.hpp
index 8c79b99..a144049 100644
--- a/src/yarray.hpp
+++ b/src/array.hpp
@@ -17,8 +17,8 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __ZMQ_YARRAY_INCLUDED__
-#define __ZMQ_YARRAY_INCLUDED__
+#ifndef __ZMQ_ARRAY_INCLUDED__
+#define __ZMQ_ARRAY_INCLUDED__
#include <vector>
#include <algorithm>
@@ -26,21 +26,57 @@
namespace zmq
{
+ // Base class for objects stored in the array. Note that each object can
+ // be stored in at most one array.
+
+ class array_item_t
+ {
+ public:
+
+ inline array_item_t () :
+ array_index (-1)
+ {
+ }
+
+ // The destructor doesn't have to be virtual. It is mad virtual
+ // just to keep ICC and code checking tools from complaining.
+ inline virtual ~array_item_t ()
+ {
+ }
+
+ inline void set_array_index (int index_)
+ {
+ array_index = index_;
+ }
+
+ inline int get_array_index ()
+ {
+ return array_index;
+ }
+
+ private:
+
+ int array_index;
+
+ array_item_t (const array_item_t&);
+ void operator = (const array_item_t&);
+ };
+
// Fast array implementation with O(1) access to item, insertion and
- // removal. Yarray stores pointers rather than objects. The objects have
- // to be derived from yarray_item_t class.
+ // removal. Array stores pointers rather than objects. The objects have
+ // to be derived from array_item_t class.
- template <typename T> class yarray_t
+ template <typename T> class array_t
{
public:
typedef typename std::vector <T*>::size_type size_type;
- inline yarray_t ()
+ inline array_t ()
{
}
- inline ~yarray_t ()
+ inline ~array_t ()
{
}
@@ -62,17 +98,17 @@ namespace zmq
inline void push_back (T *item_)
{
if (item_)
- item_->set_yarray_index (items.size ());
+ item_->set_array_index (items.size ());
items.push_back (item_);
}
inline void erase (T *item_) {
- erase (item_->get_yarray_index ());
+ erase (item_->get_array_index ());
}
inline void erase (size_type index_) {
if (items.back ())
- items.back ()->set_yarray_index (index_);
+ items.back ()->set_array_index (index_);
items [index_] = items.back ();
items.pop_back ();
}
@@ -80,9 +116,9 @@ namespace zmq
inline void swap (size_type index1_, size_type index2_)
{
if (items [index1_])
- items [index1_]->set_yarray_index (index2_);
+ items [index1_]->set_array_index (index2_);
if (items [index2_])
- items [index2_]->set_yarray_index (index1_);
+ items [index2_]->set_array_index (index1_);
std::swap (items [index1_], items [index2_]);
}
@@ -93,7 +129,7 @@ namespace zmq
inline size_type index (T *item_)
{
- return (size_type) item_->get_yarray_index ();
+ return (size_type) item_->get_array_index ();
}
private:
@@ -101,8 +137,8 @@ namespace zmq
typedef std::vector <T*> items_t;
items_t items;
- yarray_t (const yarray_t&);
- void operator = (const yarray_t&);
+ array_t (const array_t&);
+ void operator = (const array_t&);
};
}
diff --git a/src/clock.cpp b/src/clock.cpp
new file mode 100644
index 0000000..432b48e
--- /dev/null
+++ b/src/clock.cpp
@@ -0,0 +1,108 @@
+/*
+ Copyright (c) 2007-2010 iMatix Corporation
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "clock.hpp"
+#include "platform.hpp"
+#include "likely.hpp"
+#include "config.hpp"
+#include "err.hpp"
+
+#include <stddef.h>
+
+#if !defined ZMQ_HAVE_WINDOWS
+#include <sys/time.h>
+#endif
+
+zmq::clock_t::clock_t () :
+ last_tsc (rdtsc ()),
+ last_time (now_us () / 1000)
+{
+}
+
+zmq::clock_t::~clock_t ()
+{
+}
+
+uint64_t zmq::clock_t::now_us ()
+{
+#if defined ZMQ_HAVE_WINDOWS
+
+ // Get the high resolution counter's accuracy.
+ LARGE_INTEGER ticksPerSecond;
+ QueryPerformanceFrequency (&ticksPerSecond);
+
+ // What time is it?
+ LARGE_INTEGER tick;
+ QueryPerformanceCounter (&tick);
+
+ // Convert the tick number into the number of seconds
+ // since the system was started.
+ double ticks_div = (double) (ticksPerSecond.QuadPart / 1000000);
+ return (uint64_t) (tick.QuadPart / ticks_div);
+
+#else
+
+ // Use POSIX gettimeofday function to get precise time.
+ struct timeval tv;
+ int rc = gettimeofday (&tv, NULL);
+ errno_assert (rc == 0);
+ return (tv.tv_sec * (uint64_t) 1000000 + tv.tv_usec);
+
+#endif
+}
+
+uint64_t zmq::clock_t::now_ms ()
+{
+ uint64_t tsc = rdtsc ();
+
+ // If TSC is not supported, get precise time and chop off the microseconds.
+ if (!tsc)
+ return now_us () / 1000;
+
+ // If TSC haven't jumped back (in case of migration to a different
+ // CPU core) and if not too much time elapsed since last measurement,
+ // we can return cached time value.
+ if (likely (tsc - last_tsc <= (clock_precision / 2) && tsc >= last_tsc))
+ return last_time;
+
+ last_tsc = tsc;
+ last_time = now_us () / 1000;
+ return last_time;
+}
+
+uint64_t zmq::clock_t::rdtsc ()
+{
+#if (defined _MSC_VER && (defined _M_IX86 || defined _M_X64))
+ return __rdtsc ();
+#elif (defined __GNUC__ && (defined __i386__ || defined __x86_64__))
+ uint32_t low, high;
+ __asm__ volatile ("rdtsc" : "=a" (low), "=d" (high));
+ return (uint64_t) high << 32 | low;
+#elif (defined __SUNPRO_CC && (__SUNPRO_CC >= 0x5100) && (defined __i386 || \
+ defined __amd64 || defined __x86_64))
+ union {
+ uint64_t u64val;
+ uint32_t u32val [2];
+ } tsc;
+ asm("rdtsc" : "=a" (tsc.u32val [0]), "=d" (tsc.u32val [1]));
+ return tsc.u64val;
+#else
+ return 0;
+#endif
+}
diff --git a/src/zmq_decoder.hpp b/src/clock.hpp
index c1e3e3e..6dc811f 100644
--- a/src/zmq_decoder.hpp
+++ b/src/clock.hpp
@@ -17,43 +17,43 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __ZMQ_ZMQ_DECODER_HPP_INCLUDED__
-#define __ZMQ_ZMQ_DECODER_HPP_INCLUDED__
+#ifndef __ZMQ_CLOCK_HPP_INCLUDED__
+#define __ZMQ_CLOCK_HPP_INCLUDED__
-#include "../include/zmq.h"
-
-#include "decoder.hpp"
-#include "blob.hpp"
+#include "stdint.hpp"
namespace zmq
{
- // Decoder for 0MQ backend protocol. Converts data batches into messages.
- class zmq_decoder_t : public decoder_t <zmq_decoder_t>
+ class clock_t
{
public:
- zmq_decoder_t (size_t bufsize_);
- ~zmq_decoder_t ();
+ clock_t ();
+ ~clock_t ();
+
+ // CPU's timestamp counter. Returns 0 if it's not available.
+ static uint64_t rdtsc ();
- void set_inout (struct i_inout *destination_);
+ // High precision timestamp.
+ static uint64_t now_us ();
+
+ // Low precision timestamp. In tight loops generating it can be
+ // 10 to 100 times faster than the high precision timestamp.
+ uint64_t now_ms ();
private:
- bool one_byte_size_ready ();
- bool eight_byte_size_ready ();
- bool flags_ready ();
- bool message_ready ();
+ // TSC timestamp of when last time measurement was made.
+ uint64_t last_tsc;
- struct i_inout *destination;
- unsigned char tmpbuf [8];
- ::zmq_msg_t in_progress;
+ // Physical time corresponding to the TSC above (in milliseconds).
+ uint64_t last_time;
- zmq_decoder_t (const zmq_decoder_t&);
- void operator = (const zmq_decoder_t&);
+ clock_t (const clock_t&);
+ void operator = (const clock_t&);
};
}
#endif
-
diff --git a/src/command.hpp b/src/command.hpp
index 3d00cd7..0c094b9 100644
--- a/src/command.hpp
+++ b/src/command.hpp
@@ -39,8 +39,8 @@ namespace zmq
own,
attach,
bind,
- revive,
- reader_info,
+ activate_reader,
+ activate_writer,
pipe_term,
pipe_term_ack,
term_req,
@@ -61,10 +61,11 @@ namespace zmq
// Sent to socket to let it know about the newly created object.
struct {
- class owned_t *object;
+ class own_t *object;
} own;
- // Attach the engine to the session.
+ // Attach the engine to the session. If engine is NULL, it informs
+ // session that the connection have failed.
struct {
struct i_engine *engine;
unsigned char peer_identity_size;
@@ -83,14 +84,13 @@ namespace zmq
// Sent by pipe writer to inform dormant pipe reader that there
// are messages in the pipe.
struct {
- } revive;
+ } activate_reader;
- // Sent by pipe reader to inform pipe writer
- // about how many messages it has read so far.
- // Used to implement the flow control.
+ // Sent by pipe reader to inform pipe writer about how many
+ // messages it has read so far.
struct {
uint64_t msgs_read;
- } reader_info;
+ } activate_writer;
// Sent by pipe reader to pipe writer to ask it to terminate
// its end of the pipe.
@@ -104,11 +104,12 @@ namespace zmq
// Sent by I/O object ot the socket to request the shutdown of
// the I/O object.
struct {
- class owned_t *object;
+ class own_t *object;
} term_req;
// Sent by socket to I/O object to start its shutdown.
struct {
+ int linger;
} term;
// Sent by I/O object to the socket to acknowledge it has
diff --git a/src/config.hpp b/src/config.hpp
index 2c0ac2d..4a0ad08 100644
--- a/src/config.hpp
+++ b/src/config.hpp
@@ -27,18 +27,17 @@ namespace zmq
enum
{
- // Maximal number of OS threads that can own 0MQ sockets
- // at the same time.
- max_app_threads = 512,
+ // Maximum number of sockets that can be opened at the same time.
+ max_sockets = 512,
// Number of new messages in message pipe needed to trigger new memory
// allocation. Setting this parameter to 256 decreases the impact of
// memory allocation by approximately 99.6%
message_pipe_granularity = 256,
- // Number of signals that can be read by the signaler
- // using a single system call.
- signal_buffer_size = 8,
+ // Socketpair send buffer size used by signaler. The default value of
+ // zero means leave it at the system default.
+ signaler_sndbuf_size = 0,
// Determines how often does socket poll for new commands when it
// still has unprocessed messages to handle. Thus, if it is set to 100,
@@ -63,19 +62,28 @@ namespace zmq
// Maximal delta between high and low watermark.
max_wm_delta = 1024,
+ // Swap inteligently batches data for writing to disk. The size of
+ // the batch in bytes is specified by this option.
+ swap_block_size = 8192,
+
// Maximum number of events the I/O thread can process in one go.
max_io_events = 256,
- // Maximal wait time for a timer (milliseconds).
- max_timer_period = 100,
+ // Should initial connection attempts be delayed?
+ wait_before_connect = false,
// Maximal delay to process command in API thread (in CPU ticks).
// 3,000,000 ticks equals to 1 - 2 milliseconds on current CPUs.
+ // Note that delay is only applied when there is continuous stream of
+ // messages to process. If not so, commands are processed immediately.
max_command_delay = 3000000,
- // Maximal number of non-accepted connections that can be held by
- // TCP listener object.
- tcp_connection_backlog = 10,
+ // Low-precision clock precision in CPU ticks. 1ms. Value of 1000000
+ // should be OK for CPU frequencies above 1GHz. If should work
+ // reasonably well for CPU frequencies above 500MHz. For lower CPU
+ // frequencies you may consider lowering this value to get best
+ // possible latencies.
+ clock_precision = 1000000,
// Maximum transport data unit size for PGM (TPDU).
pgm_max_tpdu = 1500
diff --git a/src/connect_session.cpp b/src/connect_session.cpp
new file mode 100644
index 0000000..4d8e322
--- /dev/null
+++ b/src/connect_session.cpp
@@ -0,0 +1,117 @@
+/*
+ Copyright (c) 2007-2010 iMatix Corporation
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "connect_session.hpp"
+#include "zmq_connecter.hpp"
+#include "pgm_sender.hpp"
+#include "pgm_receiver.hpp"
+
+zmq::connect_session_t::connect_session_t (class io_thread_t *io_thread_,
+ class socket_base_t *socket_, const options_t &options_,
+ const char *protocol_, const char *address_) :
+ session_t (io_thread_, socket_, options_),
+ protocol (protocol_),
+ address (address_)
+{
+}
+
+zmq::connect_session_t::~connect_session_t ()
+{
+}
+
+void zmq::connect_session_t::process_plug ()
+{
+ // Start connection process immediately.
+ start_connecting ();
+}
+
+void zmq::connect_session_t::start_connecting ()
+{
+ // Choose I/O thread to run connecter in. Given that we are already
+ // running in an I/O thread, there must be at least one available.
+ io_thread_t *io_thread = choose_io_thread (options.affinity);
+ zmq_assert (io_thread);
+
+ // Create the connecter object.
+
+ // Both TCP and IPC transports are using the same infrastructure.
+ if (protocol == "tcp" || protocol == "ipc") {
+
+ zmq_connecter_t *connecter = new (std::nothrow) zmq_connecter_t (
+ io_thread, this, options, protocol.c_str (), address.c_str ());
+ zmq_assert (connecter);
+ launch_child (connecter);
+ return;
+ }
+
+#if defined ZMQ_HAVE_OPENPGM
+
+ // Both PGM and EPGM transports are using the same infrastructure.
+ if (protocol == "pgm" || protocol == "epgm") {
+
+ // For EPGM transport with UDP encapsulation of PGM is used.
+ bool udp_encapsulation = (protocol == "epgm");
+
+ // At this point we'll create message pipes to the session straight
+ // away. There's no point in delaying it as no concept of 'connect'
+ // exists with PGM anyway.
+ if (options.requires_out) {
+
+ // PGM sender.
+ pgm_sender_t *pgm_sender = new (std::nothrow) pgm_sender_t (
+ io_thread, options);
+ zmq_assert (pgm_sender);
+
+ int rc = pgm_sender->init (udp_encapsulation, address.c_str ());
+ zmq_assert (rc == 0);
+
+ send_attach (this, pgm_sender, blob_t ());
+ }
+ else if (options.requires_in) {
+
+ // PGM receiver.
+ pgm_receiver_t *pgm_receiver = new (std::nothrow) pgm_receiver_t (
+ io_thread, options);
+ zmq_assert (pgm_receiver);
+
+ int rc = pgm_receiver->init (udp_encapsulation, address.c_str ());
+ zmq_assert (rc == 0);
+
+ send_attach (this, pgm_receiver, blob_t ());
+ }
+ else
+ zmq_assert (false);
+
+ return;
+ }
+#endif
+
+ zmq_assert (false);
+}
+
+void zmq::connect_session_t::attached (const blob_t &peer_identity_)
+{
+}
+
+void zmq::connect_session_t::detached ()
+{
+ // Reconnect.
+ start_connecting ();
+}
+
diff --git a/src/connect_session.hpp b/src/connect_session.hpp
new file mode 100644
index 0000000..11aa253
--- /dev/null
+++ b/src/connect_session.hpp
@@ -0,0 +1,64 @@
+/*
+ Copyright (c) 2007-2010 iMatix Corporation
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __ZMQ_CONNECT_SESSION_HPP_INCLUDED__
+#define __ZMQ_CONNECT_SESSION_HPP_INCLUDED__
+
+#include <string>
+
+#include "session.hpp"
+
+namespace zmq
+{
+
+ // Connect session contains an address to connect to. On disconnect it
+ // attempts to reconnect.
+
+ class connect_session_t : public session_t
+ {
+ public:
+
+ connect_session_t (class io_thread_t *io_thread_,
+ class socket_base_t *socket_, const options_t &options_,
+ const char *protocol_, const char *address_);
+ ~connect_session_t ();
+
+ private:
+
+ // Handlers for events from session base class.
+ void attached (const blob_t &peer_identity_);
+ void detached ();
+
+ // Start the connection process.
+ void start_connecting ();
+
+ // Command handlers.
+ void process_plug ();
+
+ // Address to connect to.
+ std::string protocol;
+ std::string address;
+
+ connect_session_t (const connect_session_t&);
+ void operator = (const connect_session_t&);
+ };
+
+}
+
+#endif
diff --git a/src/ctx.cpp b/src/ctx.cpp
index 397f692..f6e62a7 100644
--- a/src/ctx.cpp
+++ b/src/ctx.cpp
@@ -20,11 +20,8 @@
#include <new>
#include <string.h>
-#include "../include/zmq.h"
-
#include "ctx.hpp"
#include "socket_base.hpp"
-#include "app_thread.hpp"
#include "io_thread.hpp"
#include "platform.hpp"
#include "err.hpp"
@@ -32,62 +29,60 @@
#if defined ZMQ_HAVE_WINDOWS
#include "windows.h"
+#else
+#include "unistd.h"
#endif
zmq::ctx_t::ctx_t (uint32_t io_threads_) :
- sockets (0),
- terminated (false)
+ no_sockets_notify (false)
{
+ int rc;
+
#ifdef ZMQ_HAVE_WINDOWS
// Intialise Windows sockets. Note that WSAStartup can be called multiple
// times given that WSACleanup will be called for each WSAStartup.
WORD version_requested = MAKEWORD (2, 2);
WSADATA wsa_data;
- int rc = WSAStartup (version_requested, &wsa_data);
+ rc = WSAStartup (version_requested, &wsa_data);
zmq_assert (rc == 0);
zmq_assert (LOBYTE (wsa_data.wVersion) == 2 &&
HIBYTE (wsa_data.wVersion) == 2);
#endif
// Initialise the array of signalers.
- signalers_count = max_app_threads + io_threads_;
- signalers = (signaler_t**) malloc (sizeof (signaler_t*) * signalers_count);
- zmq_assert (signalers);
- memset (signalers, 0, sizeof (signaler_t*) * signalers_count);
+ slot_count = max_sockets + io_threads_;
+ slots = (signaler_t**) malloc (sizeof (signaler_t*) * slot_count);
+ zmq_assert (slots);
// Create I/O thread objects and launch them.
for (uint32_t i = 0; i != io_threads_; i++) {
io_thread_t *io_thread = new (std::nothrow) io_thread_t (this, i);
zmq_assert (io_thread);
io_threads.push_back (io_thread);
- signalers [i] = io_thread->get_signaler ();
+ slots [i] = io_thread->get_signaler ();
io_thread->start ();
}
-}
-int zmq::ctx_t::term ()
-{
- // First send stop command to application threads so that any
- // blocking calls are interrupted.
- for (app_threads_t::size_type i = 0; i != app_threads.size (); i++)
- app_threads [i].app_thread->stop ();
-
- // Then mark context as terminated.
- term_sync.lock ();
- zmq_assert (!terminated);
- terminated = true;
- bool destroy = (sockets == 0);
- term_sync.unlock ();
-
- // If there are no sockets open, destroy the context immediately.
- if (destroy)
- delete this;
+ // In the unused part of the slot array, create a list of empty slots.
+ for (int32_t i = (int32_t) slot_count - 1;
+ i >= (int32_t) io_threads_; i--) {
+ empty_slots.push_back (i);
+ slots [i] = NULL;
+ }
- return 0;
+ // Create the logging infrastructure.
+ log_socket = create_socket (ZMQ_PUB);
+ zmq_assert (log_socket);
+ rc = log_socket->bind ("sys://log");
+ zmq_assert (rc == 0);
}
zmq::ctx_t::~ctx_t ()
{
+ // Check that there are no remaining open or zombie sockets.
+ zmq_assert (sockets.empty ());
+ zmq_assert (zombies.empty ());
+
// Ask I/O threads to terminate. If stop signal wasn't sent to I/O
// thread subsequent invocation of destructor would hang-up.
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++)
@@ -97,18 +92,10 @@ zmq::ctx_t::~ctx_t ()
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++)
delete io_threads [i];
- // Close all application theads, sockets, io_objects etc.
- for (app_threads_t::size_type i = 0; i != app_threads.size (); i++)
- delete app_threads [i].app_thread;
-
- // Deallocate all the orphaned pipes.
- while (!pipes.empty ())
- delete *pipes.begin ();
-
- // Deallocate the array of pointers to signalers. No special work is
+ // Deallocate the array of slot. No special work is
// needed as signalers themselves were deallocated with their
- // corresponding (app_/io_) thread objects.
- free (signalers);
+ // corresponding io_thread/socket objects.
+ free (slots);
#ifdef ZMQ_HAVE_WINDOWS
// On Windows, uninitialise socket layer.
@@ -117,116 +104,129 @@ zmq::ctx_t::~ctx_t ()
#endif
}
-zmq::socket_base_t *zmq::ctx_t::create_socket (int type_)
+int zmq::ctx_t::terminate ()
{
- app_threads_sync.lock ();
-
- // Find whether the calling thread has app_thread_t object associated
- // already. At the same time find an unused app_thread_t so that it can
- // be used if there's no associated object for the calling thread.
- // Check whether thread ID is already assigned. If so, return it.
- app_threads_t::size_type unused = app_threads.size ();
- app_threads_t::size_type current;
- for (current = 0; current != app_threads.size (); current++) {
- if (app_threads [current].associated &&
- thread_t::equal (thread_t::id (), app_threads [current].tid))
- break;
- if (!app_threads [current].associated)
- unused = current;
- }
+ // Close the logging infrastructure.
+ log_sync.lock ();
+ int rc = log_socket->close ();
+ zmq_assert (rc == 0);
+ log_socket = NULL;
+ log_sync.unlock ();
- // If no app_thread_t is associated with the calling thread,
- // associate it with one of the unused app_thread_t objects.
- if (current == app_threads.size ()) {
+ // First send stop command to sockets so that any
+ // blocking calls are interrupted.
+ slot_sync.lock ();
+ for (sockets_t::size_type i = 0; i != sockets.size (); i++)
+ sockets [i]->stop ();
+ if (!sockets.empty ())
+ no_sockets_notify = true;
+ slot_sync.unlock ();
+
+ // Find out whether there are any open sockets to care about.
+ // If there are open sockets, sleep till they are closed. Note that we can
+ // use no_sockets_notify safely out of the critical section as once set
+ // its value is never changed again.
+ if (no_sockets_notify)
+ no_sockets_sync.wait ();
+
+ // Note that the lock won't block anyone here. There's noone else having
+ // open sockets anyway. The only purpose of the lock is to double-check all
+ // the CPU caches have been synchronised.
+ slot_sync.lock ();
+
+ // At this point there should be no active sockets. What we have is a set
+ // of zombies waiting to be dezombified.
+ zmq_assert (sockets.empty ());
+
+ // Get rid of remaining zombie sockets.
+ while (!zombies.empty ()) {
+ dezombify ();
+
+ // Sleep for 1ms not to end up busy-looping in the case the I/O threads
+ // are still busy sending data. We can possibly add a grand poll here
+ // (polling for fds associated with all the zombie sockets), but it's
+ // probably not worth of implementing it.
+#if defined ZMQ_HAVE_WINDOWS
+ Sleep (1);
+#else
+ usleep (1000);
+#endif
+ }
+ slot_sync.unlock ();
- // If all the existing app_threads are already used, create one more.
- if (unused == app_threads.size ()) {
+ // Deallocate the resources.
+ delete this;
- // If max_app_threads limit was reached, return error.
- if (app_threads.size () == max_app_threads) {
- app_threads_sync.unlock ();
- errno = EMTHREAD;
- return NULL;
- }
+ return 0;
+}
- // Create the new application thread proxy object.
- app_thread_info_t info;
- memset (&info, 0, sizeof (info));
- info.associated = false;
- info.app_thread = new (std::nothrow) app_thread_t (this,
- io_threads.size () + app_threads.size ());
- zmq_assert (info.app_thread);
- signalers [io_threads.size () + app_threads.size ()] =
- info.app_thread->get_signaler ();
- app_threads.push_back (info);
- }
+zmq::socket_base_t *zmq::ctx_t::create_socket (int type_)
+{
+ slot_sync.lock ();
- // Incidentally, this works both when there is an unused app_thread
- // and when a new one is created.
- current = unused;
+ // Free the slots, if possible.
+ dezombify ();
- // Associate the selected app_thread with the OS thread.
- app_threads [current].associated = true;
- app_threads [current].tid = thread_t::id ();
+ // If max_sockets limit was reached, return error.
+ if (empty_slots.empty ()) {
+ slot_sync.unlock ();
+ errno = EMFILE;
+ return NULL;
}
- app_thread_t *thread = app_threads [current].app_thread;
- app_threads_sync.unlock ();
+ // Choose a slot for the socket.
+ uint32_t slot = empty_slots.back ();
+ empty_slots.pop_back ();
- socket_base_t *s = thread->create_socket (type_);
- if (!s)
+ // Create the socket and register its signaler.
+ socket_base_t *s = socket_base_t::create (type_, this, slot);
+ if (!s) {
+ empty_slots.push_back (slot);
+ slot_sync.unlock ();
return NULL;
+ }
+ sockets.push_back (s);
+ slots [slot] = s->get_signaler ();
- term_sync.lock ();
- sockets++;
- term_sync.unlock ();
+ slot_sync.unlock ();
return s;
}
-void zmq::ctx_t::destroy_socket ()
+void zmq::ctx_t::zombify_socket (socket_base_t *socket_)
{
- // If zmq_term was already called and there are no more sockets,
- // terminate the whole 0MQ infrastructure.
- term_sync.lock ();
- zmq_assert (sockets > 0);
- sockets--;
- bool destroy = (sockets == 0 && terminated);
- term_sync.unlock ();
-
- if (destroy)
- delete this;
-}
+ // Zombification of socket basically means that its ownership is tranferred
+ // from the application that created it to the context.
-void zmq::ctx_t::no_sockets (app_thread_t *thread_)
-{
- app_threads_sync.lock ();
- app_threads_t::size_type i;
- for (i = 0; i != app_threads.size (); i++)
- if (app_threads [i].app_thread == thread_) {
- app_threads [i].associated = false;
- break;
- }
- zmq_assert (i != app_threads.size ());
- app_threads_sync.unlock ();
-}
+ // Note that the lock provides the memory barrier needed to migrate
+ // zombie-to-be socket from it's native thread to shared data area
+ // synchronised by slot_sync.
+ slot_sync.lock ();
+ sockets.erase (socket_);
+ zombies.push_back (socket_);
-void zmq::ctx_t::send_command (uint32_t destination_,
- const command_t &command_)
-{
- signalers [destination_]->send (command_);
+ // Try to get rid of at least some zombie sockets at this point.
+ dezombify ();
+
+ // If shutdown thread is interested in notification about no more
+ // open sockets, notify it now.
+ if (sockets.empty () && no_sockets_notify)
+ no_sockets_sync.post ();
+
+ slot_sync.unlock ();
}
-bool zmq::ctx_t::recv_command (uint32_t thread_slot_,
- command_t *command_, bool block_)
+void zmq::ctx_t::send_command (uint32_t slot_, const command_t &command_)
{
- return signalers [thread_slot_]->recv (command_, block_);
+ slots [slot_]->send (command_);
}
zmq::io_thread_t *zmq::ctx_t::choose_io_thread (uint64_t affinity_)
{
+ if (io_threads.empty ())
+ return NULL;
+
// Find the I/O thread with minimum load.
- zmq_assert (io_threads.size () > 0);
int min_load = -1;
io_threads_t::size_type result = 0;
for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) {
@@ -242,29 +242,13 @@ zmq::io_thread_t *zmq::ctx_t::choose_io_thread (uint64_t affinity_)
return io_threads [result];
}
-void zmq::ctx_t::register_pipe (class pipe_t *pipe_)
-{
- pipes_sync.lock ();
- bool inserted = pipes.insert (pipe_).second;
- zmq_assert (inserted);
- pipes_sync.unlock ();
-}
-
-void zmq::ctx_t::unregister_pipe (class pipe_t *pipe_)
-{
- pipes_sync.lock ();
- pipes_t::size_type erased = pipes.erase (pipe_);
- zmq_assert (erased == 1);
- pipes_sync.unlock ();
-}
-
int zmq::ctx_t::register_endpoint (const char *addr_,
socket_base_t *socket_)
{
endpoints_sync.lock ();
- bool inserted = endpoints.insert (std::make_pair (std::string (addr_),
- socket_)).second;
+ bool inserted = endpoints.insert (endpoints_t::value_type (
+ std::string (addr_), socket_)).second;
if (!inserted) {
errno = EADDRINUSE;
endpoints_sync.unlock ();
@@ -315,3 +299,35 @@ zmq::socket_base_t *zmq::ctx_t::find_endpoint (const char *addr_)
return endpoint;
}
+void zmq::ctx_t::log (zmq_msg_t *msg_)
+{
+ // At this point we migrate the log socket to the current thread.
+ // We rely on mutex for executing the memory barrier.
+ log_sync.lock ();
+ if (log_socket)
+ log_socket->send (msg_, 0);
+ log_sync.unlock ();
+}
+
+void zmq::ctx_t::dezombify ()
+{
+ // Try to dezombify each zombie in the list. Note that caller is
+ // responsible for calling this method in the slot_sync critical section.
+ for (zombies_t::iterator it = zombies.begin (); it != zombies.end ();) {
+ uint32_t slot = (*it)->get_slot ();
+ if ((*it)->dezombify ()) {
+#if defined _MSC_VER
+
+ // HP implementation of STL requires doing it this way...
+ it = zombies.erase (it);
+#else
+ zombies.erase (it);
+#endif
+ empty_slots.push_back (slot);
+ slots [slot] = NULL;
+ }
+ else
+ it++;
+ }
+}
+
diff --git a/src/ctx.hpp b/src/ctx.hpp
index c96a923..98b4f81 100644
--- a/src/ctx.hpp
+++ b/src/ctx.hpp
@@ -20,13 +20,16 @@
#ifndef __ZMQ_CTX_HPP_INCLUDED__
#define __ZMQ_CTX_HPP_INCLUDED__
-#include <vector>
-#include <set>
#include <map>
+#include <vector>
#include <string>
+#include "../include/zmq.h"
+
#include "signaler.hpp"
+#include "semaphore.hpp"
#include "ypipe.hpp"
+#include "array.hpp"
#include "config.hpp"
#include "mutex.hpp"
#include "stdint.hpp"
@@ -50,94 +53,73 @@ namespace zmq
// no more sockets open it'll cause all the infrastructure to be shut
// down. If there are open sockets still, the deallocation happens
// after the last one is closed.
- int term ();
+ int terminate ();
// Create a socket.
class socket_base_t *create_socket (int type_);
- // Destroy a socket.
- void destroy_socket ();
-
- // Called by app_thread_t when it has no more sockets. The function
- // should disassociate the object from the current OS thread.
- void no_sockets (class app_thread_t *thread_);
+ // Make socket a zombie.
+ void zombify_socket (socket_base_t *socket_);
- // Send command to the destination thread.
- void send_command (uint32_t destination_, const command_t &command_);
-
- // Receive command from another thread.
- bool recv_command (uint32_t thread_slot_, command_t *command_,
- bool block_);
+ // Send command to the destination slot.
+ void send_command (uint32_t slot_, const command_t &command_);
// Returns the I/O thread that is the least busy at the moment.
- // Taskset specifies which I/O threads are eligible (0 = all).
- class io_thread_t *choose_io_thread (uint64_t taskset_);
-
- // All pipes are registered with the context so that even the
- // orphaned pipes can be deallocated on the terminal shutdown.
- void register_pipe (class pipe_t *pipe_);
- void unregister_pipe (class pipe_t *pipe_);
+ // Affinity specifies which I/O threads are eligible (0 = all).
+ // Returns NULL is no I/O thread is available.
+ class io_thread_t *choose_io_thread (uint64_t affinity_);
// Management of inproc endpoints.
int register_endpoint (const char *addr_, class socket_base_t *socket_);
void unregister_endpoints (class socket_base_t *socket_);
class socket_base_t *find_endpoint (const char *addr_);
+ // Logging.
+ void log (zmq_msg_t *msg_);
+
private:
~ctx_t ();
- struct app_thread_info_t
- {
- // If false, 0MQ application thread is free, there's no associated
- // OS thread.
- bool associated;
+ // Sockets belonging to this context.
+ typedef array_t <socket_base_t> sockets_t;
+ sockets_t sockets;
+
+ // List of sockets that were already closed but not yet deallocated.
+ // These sockets still have some pipes and I/O objects attached.
+ typedef std::vector <socket_base_t*> zombies_t;
+ zombies_t zombies;
- // ID of the associated OS thread. If 'associated' is false,
- // this field contains bogus data.
- thread_t::id_t tid;
+ // List of unused slots.
+ typedef std::vector <uint32_t> emtpy_slots_t;
+ emtpy_slots_t empty_slots;
- // Pointer to the 0MQ application thread object.
- class app_thread_t *app_thread;
- };
+ // If true, shutdown thread wants to be informed when there are no
+ // more open sockets. Do so by posting no_sockets_sync semaphore.
+ // Note that this variable is synchronised by slot_sync mutex.
+ bool no_sockets_notify;
- // Application threads.
- typedef std::vector <app_thread_info_t> app_threads_t;
- app_threads_t app_threads;
+ // Object used by zmq_term to wait while all the sockets are closed
+ // by different application threads.
+ semaphore_t no_sockets_sync;
- // Synchronisation of accesses to shared application thread data.
- mutex_t app_threads_sync;
+ // Synchronisation of accesses to global slot-related data:
+ // sockets, zombies, empty_slots, terminated. It also synchronises
+ // access to zombie sockets as such (as oposed to slots) and provides
+ // a memory barrier to ensure that all CPU cores see the same data.
+ mutex_t slot_sync;
+
+ // This function attempts to deallocate as many zombie sockets as
+ // possible. It must be called within a slot_sync critical section.
+ void dezombify ();
// I/O threads.
typedef std::vector <class io_thread_t*> io_threads_t;
io_threads_t io_threads;
// Array of pointers to signalers for both application and I/O threads.
- int signalers_count;
- signaler_t **signalers;
-
- // As pipes may reside in orphaned state in particular moments
- // of the pipe shutdown process, i.e. neither pipe reader nor
- // pipe writer hold reference to the pipe, we have to hold references
- // to all pipes in context so that we can deallocate them
- // during terminal shutdown even though it conincides with the
- // pipe being in the orphaned state.
- typedef std::set <class pipe_t*> pipes_t;
- pipes_t pipes;
-
- // Synchronisation of access to the pipes repository.
- mutex_t pipes_sync;
-
- // Number of sockets alive.
- int sockets;
-
- // If true, zmq_term was already called. When last socket is closed
- // the whole 0MQ infrastructure should be deallocated.
- bool terminated;
-
- // Synchronisation of access to the termination data (socket count
- // and 'terminated' flag).
- mutex_t term_sync;
+ uint32_t slot_count;
+ signaler_t **slots;
// List of inproc endpoints within this context.
typedef std::map <std::string, class socket_base_t*> endpoints_t;
@@ -146,6 +128,11 @@ namespace zmq
// Synchronisation of access to the list of inproc endpoints.
mutex_t endpoints_sync;
+ // PUB socket for logging. The socket is shared among all the threads,
+ // thus it is synchronised by a mutex.
+ class socket_base_t *log_socket;
+ mutex_t log_sync;
+
ctx_t (const ctx_t&);
void operator = (const ctx_t&);
};
diff --git a/src/zmq_decoder.cpp b/src/decoder.cpp
index dcf8e76..1217193 100644
--- a/src/zmq_decoder.cpp
+++ b/src/decoder.cpp
@@ -20,95 +20,105 @@
#include <stdlib.h>
#include <string.h>
-#include "zmq_decoder.hpp"
+#include "decoder.hpp"
#include "i_inout.hpp"
#include "wire.hpp"
#include "err.hpp"
-zmq::zmq_decoder_t::zmq_decoder_t (size_t bufsize_) :
- decoder_t <zmq_decoder_t> (bufsize_),
+zmq::decoder_t::decoder_t (size_t bufsize_) :
+ decoder_base_t <decoder_t> (bufsize_),
destination (NULL)
{
zmq_msg_init (&in_progress);
// At the beginning, read one byte and go to one_byte_size_ready state.
- next_step (tmpbuf, 1, &zmq_decoder_t::one_byte_size_ready);
+ next_step (tmpbuf, 1, &decoder_t::one_byte_size_ready);
}
-zmq::zmq_decoder_t::~zmq_decoder_t ()
+zmq::decoder_t::~decoder_t ()
{
zmq_msg_close (&in_progress);
}
-void zmq::zmq_decoder_t::set_inout (i_inout *destination_)
+void zmq::decoder_t::set_inout (i_inout *destination_)
{
destination = destination_;
}
-bool zmq::zmq_decoder_t::one_byte_size_ready ()
+bool zmq::decoder_t::one_byte_size_ready ()
{
// First byte of size is read. If it is 0xff read 8-byte size.
// Otherwise allocate the buffer for message data and read the
// message data into it.
if (*tmpbuf == 0xff)
- next_step (tmpbuf, 8, &zmq_decoder_t::eight_byte_size_ready);
+ next_step (tmpbuf, 8, &decoder_t::eight_byte_size_ready);
else {
- // TODO: Handle over-sized message decently.
-
// There has to be at least one byte (the flags) in the message).
- zmq_assert (*tmpbuf > 0);
+ if (!*tmpbuf) {
+ decoding_error ();
+ return false;
+ }
// in_progress is initialised at this point so in theory we should
// close it before calling zmq_msg_init_size, however, it's a 0-byte
// message and thus we can treat it as uninitialised...
int rc = zmq_msg_init_size (&in_progress, *tmpbuf - 1);
+ if (rc != 0 && errno == ENOMEM) {
+ decoding_error ();
+ return false;
+ }
errno_assert (rc == 0);
- next_step (tmpbuf, 1, &zmq_decoder_t::flags_ready);
+
+ next_step (tmpbuf, 1, &decoder_t::flags_ready);
}
return true;
}
-bool zmq::zmq_decoder_t::eight_byte_size_ready ()
+bool zmq::decoder_t::eight_byte_size_ready ()
{
// 8-byte size is read. Allocate the buffer for message body and
// read the message data into it.
size_t size = (size_t) get_uint64 (tmpbuf);
- // TODO: Handle over-sized message decently.
-
// There has to be at least one byte (the flags) in the message).
- zmq_assert (size > 0);
-
+ if (!size) {
+ decoding_error ();
+ return false;
+ }
// in_progress is initialised at this point so in theory we should
// close it before calling zmq_msg_init_size, however, it's a 0-byte
// message and thus we can treat it as uninitialised...
int rc = zmq_msg_init_size (&in_progress, size - 1);
+ if (rc != 0 && errno == ENOMEM) {
+ decoding_error ();
+ return false;
+ }
errno_assert (rc == 0);
- next_step (tmpbuf, 1, &zmq_decoder_t::flags_ready);
+ next_step (tmpbuf, 1, &decoder_t::flags_ready);
return true;
}
-bool zmq::zmq_decoder_t::flags_ready ()
+bool zmq::decoder_t::flags_ready ()
{
// Store the flags from the wire into the message structure.
in_progress.flags = tmpbuf [0];
next_step (zmq_msg_data (&in_progress), zmq_msg_size (&in_progress),
- &zmq_decoder_t::message_ready);
+ &decoder_t::message_ready);
return true;
}
-bool zmq::zmq_decoder_t::message_ready ()
+bool zmq::decoder_t::message_ready ()
{
// Message is completely read. Push it further and start reading
// new message. (in_progress is a 0-byte message after this point.)
if (!destination || !destination->write (&in_progress))
return false;
- next_step (tmpbuf, 1, &zmq_decoder_t::one_byte_size_ready);
+ next_step (tmpbuf, 1, &decoder_t::one_byte_size_ready);
return true;
}
diff --git a/src/decoder.hpp b/src/decoder.hpp
index f05f651..ab7d454 100644
--- a/src/decoder.hpp
+++ b/src/decoder.hpp
@@ -27,25 +27,27 @@
#include "err.hpp"
+#include "../include/zmq.h"
+
namespace zmq
{
// Helper base class for decoders that know the amount of data to read
// in advance at any moment. Knowing the amount in advance is a property
- // of the protocol used. Both AMQP and backend protocol are based on
- // size-prefixed paradigm, therefore they are using decoder_t to parse
- // the messages. On the other hand, XML-based transports (like XMPP or
- // SOAP) don't allow for knowing the size of data to read in advance and
- // should use different decoding algorithms.
+ // of the protocol used. 0MQ framing protocol is based size-prefixed
+ // paradigm, whixh qualifies it to be parsed by this class.
+ // On the other hand, XML-based transports (like XMPP or SOAP) don't allow
+ // for knowing the size of data to read in advance and should use different
+ // decoding algorithms.
//
- // Decoder implements the state machine that parses the incoming buffer.
+ // This class implements the state machine that parses the incoming buffer.
// Derived class should implement individual state machine actions.
- template <typename T> class decoder_t
+ template <typename T> class decoder_base_t
{
public:
- inline decoder_t (size_t bufsize_) :
+ inline decoder_base_t (size_t bufsize_) :
read_pos (NULL),
to_read (0),
next (NULL),
@@ -57,7 +59,7 @@ namespace zmq
// The destructor doesn't have to be virtual. It is mad virtual
// just to keep ICC and code checking tools from complaining.
- inline virtual ~decoder_t ()
+ inline virtual ~decoder_base_t ()
{
free (buf);
}
@@ -96,9 +98,13 @@ namespace zmq
read_pos += size_;
to_read -= size_;
- while (!to_read)
- if (!(static_cast <T*> (this)->*next) ())
+ while (!to_read) {
+ if (!(static_cast <T*> (this)->*next) ()) {
+ if (unlikely (!(static_cast <T*> (this)->next)))
+ return (size_t) -1;
return size_;
+ }
+ }
return size_;
}
@@ -107,9 +113,13 @@ namespace zmq
// Try to get more space in the message to fill in.
// If none is available, return.
- while (!to_read)
- if (!(static_cast <T*> (this)->*next) ())
+ while (!to_read) {
+ if (!(static_cast <T*> (this)->*next) ()) {
+ if (unlikely (!(static_cast <T*> (this)->next)))
+ return (size_t) -1;
return pos;
+ }
+ }
// If there are no more data in the buffer, return.
if (pos == size_)
@@ -140,6 +150,13 @@ namespace zmq
next = next_;
}
+ // This function should be called from the derived class to
+ // abort decoder state machine.
+ inline void decoding_error ()
+ {
+ next = NULL;
+ }
+
private:
unsigned char *read_pos;
@@ -149,6 +166,32 @@ namespace zmq
size_t bufsize;
unsigned char *buf;
+ decoder_base_t (const decoder_base_t&);
+ void operator = (const decoder_base_t&);
+ };
+
+ // Decoder for 0MQ framing protocol. Converts data batches into messages.
+
+ class decoder_t : public decoder_base_t <decoder_t>
+ {
+ public:
+
+ decoder_t (size_t bufsize_);
+ ~decoder_t ();
+
+ void set_inout (struct i_inout *destination_);
+
+ private:
+
+ bool one_byte_size_ready ();
+ bool eight_byte_size_ready ();
+ bool flags_ready ();
+ bool message_ready ();
+
+ struct i_inout *destination;
+ unsigned char tmpbuf [8];
+ ::zmq_msg_t in_progress;
+
decoder_t (const decoder_t&);
void operator = (const decoder_t&);
};
@@ -156,3 +199,4 @@ namespace zmq
}
#endif
+
diff --git a/src/devpoll.cpp b/src/devpoll.cpp
index 003f465..fa6f484 100644
--- a/src/devpoll.cpp
+++ b/src/devpoll.cpp
@@ -56,10 +56,6 @@ zmq::devpoll_t::devpoll_t () :
zmq::devpoll_t::~devpoll_t ()
{
worker.stop ();
-
- // Make sure there are no fds registered on shutdown.
- zmq_assert (load.get () == 0);
-
close (devpoll_fd);
}
@@ -84,7 +80,7 @@ zmq::devpoll_t::handle_t zmq::devpoll_t::add_fd (fd_t fd_,
pending_list.push_back (fd_);
// Increase the load metric of the thread.
- load.add (1);
+ adjust_load (1);
return fd_;
}
@@ -97,7 +93,7 @@ void zmq::devpoll_t::rm_fd (handle_t handle_)
fd_table [handle_].valid = false;
// Decrease the load metric of the thread.
- load.sub (1);
+ adjust_load (-1);
}
void zmq::devpoll_t::set_pollin (handle_t handle_)
@@ -128,23 +124,6 @@ void zmq::devpoll_t::reset_pollout (handle_t handle_)
devpoll_ctl (handle_, fd_table [handle_].events);
}
-void zmq::devpoll_t::add_timer (i_poll_events *events_)
-{
- timers.push_back (events_);
-}
-
-void zmq::devpoll_t::cancel_timer (i_poll_events *events_)
-{
- timers_t::iterator it = std::find (timers.begin (), timers.end (), events_);
- if (it != timers.end ())
- timers.erase (it);
-}
-
-int zmq::devpoll_t::get_load ()
-{
- return load.get ();
-}
-
void zmq::devpoll_t::start ()
{
worker.start (worker_routine, this);
@@ -170,31 +149,18 @@ void zmq::devpoll_t::loop ()
fd_table [pending_list [i]].accepted = true;
pending_list.clear ();
- poll_req.dp_fds = &ev_buf [0];
- poll_req.dp_nfds = nfds;
- poll_req.dp_timeout = timers.empty () ? -1 : max_timer_period;
+ // Execute any due timers.
+ int timeout = (int) execute_timers ();
// Wait for events.
+ poll_req.dp_fds = &ev_buf [0];
+ poll_req.dp_nfds = nfds;
+ poll_req.dp_timeout = timeout ? timeout : -1;
int n = ioctl (devpoll_fd, DP_POLL, &poll_req);
if (n == -1 && errno == EINTR)
continue;
errno_assert (n != -1);
- // Handle timer.
- if (!n) {
-
- // Use local list of timers as timer handlers may fill new timers
- // into the original array.
- timers_t t;
- std::swap (timers, t);
-
- // Trigger all the timers.
- for (timers_t::iterator it = t.begin (); it != t.end (); it ++)
- (*it)->timer_event ();
-
- continue;
- }
-
for (int i = 0; i < n; i ++) {
fd_entry_t *fd_ptr = &fd_table [ev_buf [i].fd];
diff --git a/src/devpoll.hpp b/src/devpoll.hpp
index 019d268..c920b81 100644
--- a/src/devpoll.hpp
+++ b/src/devpoll.hpp
@@ -22,21 +22,20 @@
#include "platform.hpp"
-#if defined ZMQ_HAVE_SOLARIS || ZMQ_HAVE_HPUX
+#if defined ZMQ_HAVE_SOLARIS || defined ZMQ_HAVE_HPUX
#include <vector>
#include "fd.hpp"
#include "thread.hpp"
-#include "atomic_counter.hpp"
+#include "poller_base.hpp"
namespace zmq
{
- // Implements socket polling mechanism using the Solaris-specific
- // "/dev/poll" interface.
+ // Implements socket polling mechanism using the "/dev/poll" interface.
- class devpoll_t
+ class devpoll_t : public poller_base_t
{
public:
@@ -52,9 +51,6 @@ namespace zmq
void reset_pollin (handle_t handle_);
void set_pollout (handle_t handle_);
void reset_pollout (handle_t handle_);
- void add_timer (struct i_poll_events *events_);
- void cancel_timer (struct i_poll_events *events_);
- int get_load ();
void start ();
void stop ();
@@ -85,20 +81,12 @@ namespace zmq
// Pollset manipulation function.
void devpoll_ctl (fd_t fd_, short events_);
- // List of all the engines waiting for the timer event.
- typedef std::vector <struct i_poll_events*> timers_t;
- timers_t timers;
-
// If true, thread is in the process of shutting down.
bool stopping;
// Handle of the physical thread doing the I/O work.
thread_t worker;
- // Load of the poller. Currently number of file descriptors
- // registered with the poller.
- atomic_counter_t load;
-
devpoll_t (const devpoll_t&);
void operator = (const devpoll_t&);
};
diff --git a/src/zmq_encoder.cpp b/src/encoder.cpp
index 077286f..be9a2c2 100644
--- a/src/zmq_encoder.cpp
+++ b/src/encoder.cpp
@@ -17,42 +17,42 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "zmq_encoder.hpp"
+#include "encoder.hpp"
#include "i_inout.hpp"
#include "wire.hpp"
-zmq::zmq_encoder_t::zmq_encoder_t (size_t bufsize_) :
- encoder_t <zmq_encoder_t> (bufsize_),
+zmq::encoder_t::encoder_t (size_t bufsize_) :
+ encoder_base_t <encoder_t> (bufsize_),
source (NULL)
{
zmq_msg_init (&in_progress);
// Write 0 bytes to the batch and go to message_ready state.
- next_step (NULL, 0, &zmq_encoder_t::message_ready, true);
+ next_step (NULL, 0, &encoder_t::message_ready, true);
}
-zmq::zmq_encoder_t::~zmq_encoder_t ()
+zmq::encoder_t::~encoder_t ()
{
zmq_msg_close (&in_progress);
}
-void zmq::zmq_encoder_t::set_inout (i_inout *source_)
+void zmq::encoder_t::set_inout (i_inout *source_)
{
source = source_;
}
-bool zmq::zmq_encoder_t::size_ready ()
+bool zmq::encoder_t::size_ready ()
{
// Write message body into the buffer.
next_step (zmq_msg_data (&in_progress), zmq_msg_size (&in_progress),
- &zmq_encoder_t::message_ready, false);
+ &encoder_t::message_ready, false);
return true;
}
-bool zmq::zmq_encoder_t::message_ready ()
+bool zmq::encoder_t::message_ready ()
{
// Destroy content of the old message.
- zmq_msg_close(&in_progress);
+ zmq_msg_close (&in_progress);
// Read new message. If there is none, return false.
// Note that new state is set only if write is successful. That way
@@ -75,14 +75,14 @@ bool zmq::zmq_encoder_t::message_ready ()
if (size < 255) {
tmpbuf [0] = (unsigned char) size;
tmpbuf [1] = (in_progress.flags & ~ZMQ_MSG_SHARED);
- next_step (tmpbuf, 2, &zmq_encoder_t::size_ready,
+ next_step (tmpbuf, 2, &encoder_t::size_ready,
!(in_progress.flags & ZMQ_MSG_MORE));
}
else {
tmpbuf [0] = 0xff;
put_uint64 (tmpbuf + 1, size);
tmpbuf [9] = (in_progress.flags & ~ZMQ_MSG_SHARED);
- next_step (tmpbuf, 10, &zmq_encoder_t::size_ready,
+ next_step (tmpbuf, 10, &encoder_t::size_ready,
!(in_progress.flags & ZMQ_MSG_MORE));
}
return true;
diff --git a/src/encoder.hpp b/src/encoder.hpp
index 0d5b6ba..d5997db 100644
--- a/src/encoder.hpp
+++ b/src/encoder.hpp
@@ -20,11 +20,6 @@
#ifndef __ZMQ_ENCODER_HPP_INCLUDED__
#define __ZMQ_ENCODER_HPP_INCLUDED__
-#include "platform.hpp"
-#if defined ZMQ_HAVE_WINDOWS
-#include "windows.hpp"
-#endif
-
#include <stddef.h>
#include <string.h>
#include <stdlib.h>
@@ -32,6 +27,8 @@
#include "err.hpp"
+#include "../include/zmq.h"
+
namespace zmq
{
@@ -39,20 +36,20 @@ namespace zmq
// fills the outgoing buffer. Derived classes should implement individual
// state machine actions.
- template <typename T> class encoder_t
+ template <typename T> class encoder_base_t
{
public:
- inline encoder_t (size_t bufsize_) :
+ inline encoder_base_t (size_t bufsize_) :
bufsize (bufsize_)
{
buf = (unsigned char*) malloc (bufsize_);
zmq_assert (buf);
}
- // The destructor doesn't have to be virtual. It is mad virtual
+ // The destructor doesn't have to be virtual. It is made virtual
// just to keep ICC and code checking tools from complaining.
- inline virtual ~encoder_t ()
+ inline virtual ~encoder_base_t ()
{
free (buf);
}
@@ -153,10 +150,34 @@ namespace zmq
size_t bufsize;
unsigned char *buf;
+ encoder_base_t (const encoder_base_t&);
+ void operator = (const encoder_base_t&);
+ };
+
+ // Encoder for 0MQ framing protocol. Converts messages into data batches.
+
+ class encoder_t : public encoder_base_t <encoder_t>
+ {
+ public:
+
+ encoder_t (size_t bufsize_);
+ ~encoder_t ();
+
+ void set_inout (struct i_inout *source_);
+
+ private:
+
+ bool size_ready ();
+ bool message_ready ();
+
+ struct i_inout *source;
+ ::zmq_msg_t in_progress;
+ unsigned char tmpbuf [10];
+
encoder_t (const encoder_t&);
void operator = (const encoder_t&);
};
-
}
#endif
+
diff --git a/src/epoll.cpp b/src/epoll.cpp
index e22eb8c..fdaa74e 100644
--- a/src/epoll.cpp
+++ b/src/epoll.cpp
@@ -45,9 +45,6 @@ zmq::epoll_t::~epoll_t ()
// Wait till the worker thread exits.
worker.stop ();
- // Make sure there are no fds registered on shutdown.
- zmq_assert (load.get () == 0);
-
close (epoll_fd);
for (retired_t::iterator it = retired.begin (); it != retired.end (); it ++)
delete *it;
@@ -71,7 +68,7 @@ zmq::epoll_t::handle_t zmq::epoll_t::add_fd (fd_t fd_, i_poll_events *events_)
errno_assert (rc != -1);
// Increase the load metric of the thread.
- load.add (1);
+ adjust_load (1);
return pe;
}
@@ -85,7 +82,7 @@ void zmq::epoll_t::rm_fd (handle_t handle_)
retired.push_back (pe);
// Decrease the load metric of the thread.
- load.sub (1);
+ adjust_load (-1);
}
void zmq::epoll_t::set_pollin (handle_t handle_)
@@ -120,24 +117,6 @@ void zmq::epoll_t::reset_pollout (handle_t handle_)
errno_assert (rc != -1);
}
-void zmq::epoll_t::add_timer (i_poll_events *events_)
-{
- timers.push_back (events_);
-}
-
-void zmq::epoll_t::cancel_timer (i_poll_events *events_)
-{
- timers_t::iterator it = std::find (timers.begin (), timers.end (), events_);
- if (it == timers.end ())
- return;
- timers.erase (it);
-}
-
-int zmq::epoll_t::get_load ()
-{
- return load.get ();
-}
-
void zmq::epoll_t::start ()
{
worker.start (worker_routine, this);
@@ -154,31 +133,15 @@ void zmq::epoll_t::loop ()
while (!stopping) {
- // Wait for events.
- int n;
- while (true) {
- n = epoll_wait (epoll_fd, &ev_buf [0], max_io_events,
- timers.empty () ? -1 : max_timer_period);
- if (!(n == -1 && errno == EINTR)) {
- errno_assert (n != -1);
- break;
- }
- }
-
- // Handle timer.
- if (!n) {
-
- // Use local list of timers as timer handlers may fill new timers
- // into the original array.
- timers_t t;
- std::swap (timers, t);
-
- // Trigger all the timers.
- for (timers_t::iterator it = t.begin (); it != t.end (); it ++)
- (*it)->timer_event ();
+ // Execute any due timers.
+ int timeout = (int) execute_timers ();
+ // Wait for events.
+ int n = epoll_wait (epoll_fd, &ev_buf [0], max_io_events,
+ timeout ? timeout : -1);
+ if (n == -1 && errno == EINTR)
continue;
- }
+ errno_assert (n != -1);
for (int i = 0; i < n; i ++) {
poll_entry_t *pe = ((poll_entry_t*) ev_buf [i].data.ptr);
diff --git a/src/epoll.hpp b/src/epoll.hpp
index 38175cb..015e3d8 100644
--- a/src/epoll.hpp
+++ b/src/epoll.hpp
@@ -29,7 +29,7 @@
#include "fd.hpp"
#include "thread.hpp"
-#include "atomic_counter.hpp"
+#include "poller_base.hpp"
namespace zmq
{
@@ -37,7 +37,7 @@ namespace zmq
// This class implements socket polling mechanism using the Linux-specific
// epoll mechanism.
- class epoll_t
+ class epoll_t : public poller_base_t
{
public:
@@ -53,9 +53,6 @@ namespace zmq
void reset_pollin (handle_t handle_);
void set_pollout (handle_t handle_);
void reset_pollout (handle_t handle_);
- void add_timer (struct i_poll_events *events_);
- void cancel_timer (struct i_poll_events *events_);
- int get_load ();
void start ();
void stop ();
@@ -81,20 +78,12 @@ namespace zmq
typedef std::vector <poll_entry_t*> retired_t;
retired_t retired;
- // List of all the engines waiting for the timer event.
- typedef std::vector <struct i_poll_events*> timers_t;
- timers_t timers;
-
// If true, thread is in the process of shutting down.
bool stopping;
// Handle of the physical thread doing the I/O work.
thread_t worker;
- // Load of the poller. Currently number of file descriptors
- // registered with the poller.
- atomic_counter_t load;
-
epoll_t (const epoll_t&);
void operator = (const epoll_t&);
};
diff --git a/src/err.cpp b/src/err.cpp
index 17a9689..a44b7e0 100644
--- a/src/err.cpp
+++ b/src/err.cpp
@@ -22,6 +22,47 @@
#include "err.hpp"
#include "platform.hpp"
+const char *zmq::errno_to_string (int errno_)
+{
+ switch (errno_) {
+#if defined ZMQ_HAVE_WINDOWS
+ case ENOTSUP:
+ return "Not supported";
+ case EPROTONOSUPPORT:
+ return "Protocol not supported";
+ case ENOBUFS:
+ return "No buffer space available";
+ case ENETDOWN:
+ return "Network is down";
+ case EADDRINUSE:
+ return "Address in use";
+ case EADDRNOTAVAIL:
+ return "Address not available";
+ case ECONNREFUSED:
+ return "Connection refused";
+ case EINPROGRESS:
+ return "Operation in progress";
+#endif
+ case EFSM:
+ return "Operation cannot be accomplished in current state";
+ case ENOCOMPATPROTO:
+ return "The protocol is not compatible with the socket type";
+ case ETERM:
+ return "Context was terminated";
+ case EMTHREAD:
+ return "No thread available";
+ default:
+#if defined _MSC_VER
+#pragma warning (push)
+#pragma warning (disable:4996)
+#endif
+ return strerror (errno_);
+#if defined _MSC_VER
+#pragma warning (pop)
+#endif
+ }
+}
+
#ifdef ZMQ_HAVE_WINDOWS
const char *zmq::wsa_error()
diff --git a/src/err.hpp b/src/err.hpp
index 2b76569..a31eb9b 100644
--- a/src/err.hpp
+++ b/src/err.hpp
@@ -35,15 +35,18 @@
#include <netdb.h>
#endif
+namespace zmq
+{
+ const char *errno_to_string (int errno_);
+}
+
#ifdef ZMQ_HAVE_WINDOWS
namespace zmq
{
-
const char *wsa_error ();
void win_error (char *buffer_, size_t buffer_size_);
- void wsa_error_to_errno ();
-
+ void wsa_error_to_errno ();
}
// Provides convenient way to check WSA-style errors on Windows.
diff --git a/src/fq.cpp b/src/fq.cpp
index 9028853..67c9bb0 100644
--- a/src/fq.cpp
+++ b/src/fq.cpp
@@ -22,29 +22,42 @@
#include "fq.hpp"
#include "pipe.hpp"
#include "err.hpp"
+#include "own.hpp"
-zmq::fq_t::fq_t () :
+zmq::fq_t::fq_t (own_t *sink_) :
active (0),
current (0),
- more (false)
+ more (false),
+ sink (sink_),
+ terminating (false)
{
}
zmq::fq_t::~fq_t ()
{
- for (pipes_t::size_type i = 0; i != pipes.size (); i++)
- pipes [i]->term ();
+ zmq_assert (pipes.empty ());
}
void zmq::fq_t::attach (reader_t *pipe_)
{
+ pipe_->set_event_sink (this);
+
pipes.push_back (pipe_);
pipes.swap (active, pipes.size () - 1);
active++;
+
+ // If we are already terminating, ask the pipe to terminate straight away.
+ if (terminating) {
+ sink->register_term_acks (1);
+ pipe_->terminate ();
+ }
}
-void zmq::fq_t::detach (reader_t *pipe_)
+void zmq::fq_t::terminated (reader_t *pipe_)
{
+ // TODO: This is a problem with session-initiated termination. It breaks
+ // message atomicity. However, for socket initiated termination it's
+ // just fine.
zmq_assert (!more || pipes [current] != pipe_);
// Remove the pipe from the list; adjust number of active pipes
@@ -55,18 +68,26 @@ void zmq::fq_t::detach (reader_t *pipe_)
current = 0;
}
pipes.erase (pipe_);
+
+ if (terminating)
+ sink->unregister_term_ack ();
}
-void zmq::fq_t::kill (reader_t *pipe_)
+void zmq::fq_t::delimited (reader_t *pipe_)
{
- // Move the pipe to the list of inactive pipes.
- active--;
- if (current == active)
- current = 0;
- pipes.swap (pipes.index (pipe_), active);
}
-void zmq::fq_t::revive (reader_t *pipe_)
+void zmq::fq_t::terminate ()
+{
+ zmq_assert (!terminating);
+ terminating = true;
+
+ sink->register_term_acks (pipes.size ());
+ for (pipes_t::size_type i = 0; i != pipes.size (); i++)
+ pipes [i]->terminate ();
+}
+
+void zmq::fq_t::activated (reader_t *pipe_)
{
// Move the pipe to the list of active pipes.
pipes.swap (pipes.index (pipe_), active);
@@ -84,10 +105,14 @@ int zmq::fq_t::recv (zmq_msg_t *msg_, int flags_)
// Try to fetch new message. If we've already read part of the message
// subsequent part should be immediately available.
bool fetched = pipes [current]->read (msg_);
+
+ // Check the atomicity of the message. If we've already received the
+ // first part of the message we should get the remaining parts
+ // without blocking.
zmq_assert (!(more && !fetched));
- // Note that when message is not fetched, current pipe is killed and
- // replaced by another active pipe. Thus we don't have to increase
+ // Note that when message is not fetched, current pipe is deactivated
+ // and replaced by another active pipe. Thus we don't have to increase
// the 'current' pointer.
if (fetched) {
more = msg_->flags & ZMQ_MSG_MORE;
@@ -98,6 +123,12 @@ int zmq::fq_t::recv (zmq_msg_t *msg_, int flags_)
}
return 0;
}
+ else {
+ active--;
+ pipes.swap (current, active);
+ if (current == active)
+ current = 0;
+ }
}
// No message is available. Initialise the output parameter
@@ -120,8 +151,11 @@ bool zmq::fq_t::has_in ()
for (int count = active; count != 0; count--) {
if (pipes [current]->check_read ())
return true;
- current++;
- if (current >= active)
+
+ // Deactivate the pipe.
+ active--;
+ pipes.swap (current, active);
+ if (current == active)
current = 0;
}
diff --git a/src/fq.hpp b/src/fq.hpp
index 5c699ee..8825d92 100644
--- a/src/fq.hpp
+++ b/src/fq.hpp
@@ -20,7 +20,8 @@
#ifndef __ZMQ_FQ_HPP_INCLUDED__
#define __ZMQ_FQ_HPP_INCLUDED__
-#include "yarray.hpp"
+#include "array.hpp"
+#include "pipe.hpp"
namespace zmq
{
@@ -28,24 +29,28 @@ namespace zmq
// Class manages a set of inbound pipes. On receive it performs fair
// queueing (RFC970) so that senders gone berserk won't cause denial of
// service for decent senders.
- class fq_t
+ class fq_t : public i_reader_events
{
public:
- fq_t ();
+ fq_t (class own_t *sink_);
~fq_t ();
- void attach (class reader_t *pipe_);
- void detach (class reader_t *pipe_);
- void kill (class reader_t *pipe_);
- void revive (class reader_t *pipe_);
+ void attach (reader_t *pipe_);
+ void terminate ();
+
int recv (zmq_msg_t *msg_, int flags_);
bool has_in ();
+ // i_reader_events implementation.
+ void activated (reader_t *pipe_);
+ void terminated (reader_t *pipe_);
+ void delimited (reader_t *pipe_);
+
private:
// Inbound pipes.
- typedef yarray_t <class reader_t> pipes_t;
+ typedef array_t <reader_t> pipes_t;
pipes_t pipes;
// Number of active pipes. All the active pipes are located at the
@@ -59,6 +64,12 @@ namespace zmq
// there are following parts still waiting in the current pipe.
bool more;
+ // Object to send events to.
+ class own_t *sink;
+
+ // If true, termination process is already underway.
+ bool terminating;
+
fq_t (const fq_t&);
void operator = (const fq_t&);
};
diff --git a/src/i_engine.hpp b/src/i_engine.hpp
index ea6b850..e104a9c 100644
--- a/src/i_engine.hpp
+++ b/src/i_engine.hpp
@@ -20,8 +20,6 @@
#ifndef __ZMQ_I_ENGINE_HPP_INCLUDED__
#define __ZMQ_I_ENGINE_HPP_INCLUDED__
-#include <stddef.h>
-
namespace zmq
{
@@ -30,18 +28,23 @@ namespace zmq
virtual ~i_engine () {}
// Plug the engine to the session.
- virtual void plug (struct i_inout *inout_) = 0;
+ virtual void plug (class io_thread_t *io_thread_,
+ struct i_inout *inout_) = 0;
// Unplug the engine from the session.
virtual void unplug () = 0;
- // This method is called by the session to signalise that there
- // are messages to send available.
- virtual void revive () = 0;
+ // Terminate and deallocate the engine. Note that 'detached'
+ // events in not fired on termination.
+ virtual void terminate () = 0;
// This method is called by the session to signalise that more
// messages can be written to the pipe.
- virtual void resume_input () = 0;
+ virtual void activate_in () = 0;
+
+ // This method is called by the session to signalise that there
+ // are messages to send available.
+ virtual void activate_out () = 0;
};
}
diff --git a/src/i_inout.hpp b/src/i_inout.hpp
index 21d1838..60bc518 100644
--- a/src/i_inout.hpp
+++ b/src/i_inout.hpp
@@ -31,28 +31,17 @@ namespace zmq
{
virtual ~i_inout () {}
- // Engine asks to get a message to send to the network.
+ // Engine asks for a message to send to the network.
virtual bool read (::zmq_msg_t *msg_) = 0;
- // Engine sends the incoming message further on downstream.
+ // Engine received message from the network and sends it further on.
virtual bool write (::zmq_msg_t *msg_) = 0;
- // Flush all the previously written messages downstream.
+ // Flush all the previously written messages.
virtual void flush () = 0;
-
- // Drop all the references to the engine. The parameter is the object
- // to use to reconnect. If reconnection is not required, the argument
- // is set to NULL.
- virtual void detach (class owned_t *reconnecter_) = 0;
- // Returns least loaded I/O thread.
- virtual class io_thread_t *get_io_thread () = 0;
-
- // Return pointer to the owning socket.
- virtual class socket_base_t *get_owner () = 0;
-
- // Return ordinal number of the session.
- virtual uint64_t get_ordinal () = 0;
+ // Engine is dead. Drop all the references to it.
+ virtual void detach () = 0;
};
}
diff --git a/src/i_poll_events.hpp b/src/i_poll_events.hpp
index 6d474b2..8e70921 100644
--- a/src/i_poll_events.hpp
+++ b/src/i_poll_events.hpp
@@ -37,7 +37,7 @@ namespace zmq
virtual void out_event () = 0;
// Called when timer expires.
- virtual void timer_event () = 0;
+ virtual void timer_event (int id_) = 0;
};
}
diff --git a/src/io_object.cpp b/src/io_object.cpp
index 086f173..d2620a6 100644
--- a/src/io_object.cpp
+++ b/src/io_object.cpp
@@ -21,21 +21,35 @@
#include "io_thread.hpp"
#include "err.hpp"
-zmq::io_object_t::io_object_t (io_thread_t *io_thread_)
+zmq::io_object_t::io_object_t (io_thread_t *io_thread_) :
+ poller (NULL)
{
- // Retrieve the poller from the thread we are running in.
- poller = io_thread_->get_poller ();
+ if (io_thread_)
+ plug (io_thread_);
}
zmq::io_object_t::~io_object_t ()
{
}
-void zmq::io_object_t::set_io_thread (io_thread_t *io_thread_)
+void zmq::io_object_t::plug (io_thread_t *io_thread_)
{
+ zmq_assert (io_thread_);
+ zmq_assert (!poller);
+
+ // Retrieve the poller from the thread we are running in.
poller = io_thread_->get_poller ();
}
+void zmq::io_object_t::unplug ()
+{
+ zmq_assert (poller);
+
+ // Forget about old poller in preparation to be migrated
+ // to a different I/O thread.
+ poller = NULL;
+}
+
zmq::io_object_t::handle_t zmq::io_object_t::add_fd (fd_t fd_)
{
return poller->add_fd (fd_, this);
@@ -66,14 +80,14 @@ void zmq::io_object_t::reset_pollout (handle_t handle_)
poller->reset_pollout (handle_);
}
-void zmq::io_object_t::add_timer ()
+void zmq::io_object_t::add_timer (int timeout_, int id_)
{
- poller->add_timer (this);
+ poller->add_timer (timeout_, this, id_);
}
-void zmq::io_object_t::cancel_timer ()
+void zmq::io_object_t::cancel_timer (int id_)
{
- poller->cancel_timer (this);
+ poller->cancel_timer (this, id_);
}
void zmq::io_object_t::in_event ()
@@ -86,7 +100,7 @@ void zmq::io_object_t::out_event ()
zmq_assert (false);
}
-void zmq::io_object_t::timer_event ()
+void zmq::io_object_t::timer_event (int id_)
{
zmq_assert (false);
}
diff --git a/src/io_object.hpp b/src/io_object.hpp
index 655e7f5..ba69acc 100644
--- a/src/io_object.hpp
+++ b/src/io_object.hpp
@@ -40,15 +40,15 @@ namespace zmq
io_object_t (class io_thread_t *io_thread_ = NULL);
~io_object_t ();
+ // When migrating an object from one I/O thread to another, first
+ // unplug it, then migrate it, then plug it to the new thread.
+ void plug (class io_thread_t *io_thread_);
+ void unplug ();
+
protected:
typedef poller_t::handle_t handle_t;
- // Derived class can init/swap the underlying I/O thread.
- // Caution: Remove all the file descriptors from the old I/O thread
- // before swapping to the new one!
- void set_io_thread (class io_thread_t *io_thread_);
-
// Methods to access underlying poller object.
handle_t add_fd (fd_t fd_);
void rm_fd (handle_t handle_);
@@ -56,13 +56,13 @@ namespace zmq
void reset_pollin (handle_t handle_);
void set_pollout (handle_t handle_);
void reset_pollout (handle_t handle_);
- void add_timer ();
- void cancel_timer ();
+ void add_timer (int timout_, int id_);
+ void cancel_timer (int id_);
// i_poll_events interface implementation.
void in_event ();
void out_event ();
- void timer_event ();
+ void timer_event (int id_);
private:
diff --git a/src/io_thread.cpp b/src/io_thread.cpp
index fac6961..05a5eb2 100644
--- a/src/io_thread.cpp
+++ b/src/io_thread.cpp
@@ -26,9 +26,8 @@
#include "err.hpp"
#include "ctx.hpp"
-zmq::io_thread_t::io_thread_t (ctx_t *ctx_,
- uint32_t thread_slot_) :
- object_t (ctx_, thread_slot_)
+zmq::io_thread_t::io_thread_t (ctx_t *ctx_, uint32_t slot_) :
+ object_t (ctx_, slot_)
{
poller = new (std::nothrow) poller_t;
zmq_assert (poller);
@@ -72,8 +71,12 @@ void zmq::io_thread_t::in_event ()
// Get the next command. If there is none, exit.
command_t cmd;
- if (!signaler.recv (&cmd, false))
- break;
+ int rc = signaler.recv (&cmd, false);
+ if (rc != 0 && errno == EINTR)
+ continue;
+ if (rc != 0 && errno == EAGAIN)
+ break;
+ errno_assert (rc == 0);
// Process the command.
cmd.destination->process_command (cmd);
@@ -86,7 +89,7 @@ void zmq::io_thread_t::out_event ()
zmq_assert (false);
}
-void zmq::io_thread_t::timer_event ()
+void zmq::io_thread_t::timer_event (int id_)
{
// No timers here. This function is never called.
zmq_assert (false);
diff --git a/src/io_thread.hpp b/src/io_thread.hpp
index 3d832c0..20d4ae3 100644
--- a/src/io_thread.hpp
+++ b/src/io_thread.hpp
@@ -38,7 +38,7 @@ namespace zmq
{
public:
- io_thread_t (class ctx_t *ctx_, uint32_t thread_slot_);
+ io_thread_t (class ctx_t *ctx_, uint32_t slot_);
// Clean-up. If the thread was started, it's neccessary to call 'stop'
// before invoking destructor. Otherwise the destructor would hang up.
@@ -56,7 +56,7 @@ namespace zmq
// i_poll_events implementation.
void in_event ();
void out_event ();
- void timer_event ();
+ void timer_event (int id_);
// Used by io_objects to retrieve the assciated poller object.
poller_t *get_poller ();
diff --git a/src/kqueue.cpp b/src/kqueue.cpp
index e1fe2fa..15e1b48 100644
--- a/src/kqueue.cpp
+++ b/src/kqueue.cpp
@@ -54,10 +54,6 @@ zmq::kqueue_t::kqueue_t () :
zmq::kqueue_t::~kqueue_t ()
{
worker.stop ();
-
- // Make sure there are no fds registered on shutdown.
- zmq_assert (load.get () == 0);
-
close (kqueue_fd);
}
@@ -74,7 +70,7 @@ void zmq::kqueue_t::kevent_delete (fd_t fd_, short filter_)
{
struct kevent ev;
- EV_SET (&ev, fd_, filter_, EV_DELETE, 0, 0, (kevent_udata_t)NULL);
+ EV_SET (&ev, fd_, filter_, EV_DELETE, 0, 0, (kevent_udata_t) NULL);
int rc = kevent (kqueue_fd, &ev, 1, NULL, 0, NULL);
errno_assert (rc != -1);
}
@@ -90,6 +86,8 @@ zmq::kqueue_t::handle_t zmq::kqueue_t::add_fd (fd_t fd_,
pe->flag_pollout = 0;
pe->reactor = reactor_;
+ adjust_load (1);
+
return pe;
}
@@ -102,6 +100,8 @@ void zmq::kqueue_t::rm_fd (handle_t handle_)
kevent_delete (pe->fd, EVFILT_WRITE);
pe->fd = retired_fd;
retired.push_back (pe);
+
+ adjust_load (-1);
}
void zmq::kqueue_t::set_pollin (handle_t handle_)
@@ -132,23 +132,6 @@ void zmq::kqueue_t::reset_pollout (handle_t handle_)
kevent_delete (pe->fd, EVFILT_WRITE);
}
-void zmq::kqueue_t::add_timer (i_poll_events *events_)
-{
- timers.push_back (events_);
-}
-
-void zmq::kqueue_t::cancel_timer (i_poll_events *events_)
-{
- timers_t::iterator it = std::find (timers.begin (), timers.end (), events_);
- if (it != timers.end ())
- timers.erase (it);
-}
-
-int zmq::kqueue_t::get_load ()
-{
- return load.get ();
-}
-
void zmq::kqueue_t::start ()
{
worker.start (worker_routine, this);
@@ -163,34 +146,18 @@ void zmq::kqueue_t::loop ()
{
while (!stopping) {
- struct kevent ev_buf [max_io_events];
-
- // Compute time interval to wait.
- timespec timeout = {max_timer_period / 1000,
- (max_timer_period % 1000) * 1000000};
+ // Execute any due timers.
+ int timeout = (int) execute_timers ();
// Wait for events.
- int n = kevent (kqueue_fd, NULL, 0,
- &ev_buf [0], max_io_events, timers.empty () ? NULL : &timeout);
+ struct kevent ev_buf [max_io_events];
+ timespec ts = {timeout / 1000, (timeout % 1000) * 1000000};
+ int n = kevent (kqueue_fd, NULL, 0, &ev_buf [0], max_io_events,
+ timeout ? &ts: NULL);
if (n == -1 && errno == EINTR)
continue;
errno_assert (n != -1);
- // Handle timer.
- if (!n) {
-
- // Use local list of timers as timer handlers may fill new timers
- // into the original array.
- timers_t t;
- std::swap (timers, t);
-
- // Trigger all the timers.
- for (timers_t::iterator it = t.begin (); it != t.end (); it ++)
- (*it)->timer_event ();
-
- continue;
- }
-
for (int i = 0; i < n; i ++) {
poll_entry_t *pe = (poll_entry_t*) ev_buf [i].udata;
diff --git a/src/kqueue.hpp b/src/kqueue.hpp
index ac28a7d..47d6b74 100644
--- a/src/kqueue.hpp
+++ b/src/kqueue.hpp
@@ -29,7 +29,7 @@
#include "fd.hpp"
#include "thread.hpp"
-#include "atomic_counter.hpp"
+#include "poller_base.hpp"
namespace zmq
{
@@ -37,7 +37,7 @@ namespace zmq
// Implements socket polling mechanism using the BSD-specific
// kqueue interface.
- class kqueue_t
+ class kqueue_t : public poller_base_t
{
public:
@@ -53,9 +53,6 @@ namespace zmq
void reset_pollin (handle_t handle_);
void set_pollout (handle_t handle_);
void reset_pollout (handle_t handle_);
- void add_timer (struct i_poll_events *events_);
- void cancel_timer (struct i_poll_events *events_);
- int get_load ();
void start ();
void stop ();
@@ -88,20 +85,12 @@ namespace zmq
typedef std::vector <poll_entry_t*> retired_t;
retired_t retired;
- // List of all the engines waiting for the timer event.
- typedef std::vector <struct i_poll_events*> timers_t;
- timers_t timers;
-
// If true, thread is in the process of shutting down.
bool stopping;
// Handle of the physical thread doing the I/O work.
thread_t worker;
- // Load of the poller. Currently number of file descriptors
- // registered with the poller.
- atomic_counter_t load;
-
kqueue_t (const kqueue_t&);
void operator = (const kqueue_t&);
};
diff --git a/src/lb.cpp b/src/lb.cpp
index ca93ba2..62d0680 100644
--- a/src/lb.cpp
+++ b/src/lb.cpp
@@ -22,31 +22,48 @@
#include "lb.hpp"
#include "pipe.hpp"
#include "err.hpp"
+#include "own.hpp"
-zmq::lb_t::lb_t () :
+zmq::lb_t::lb_t (own_t *sink_) :
active (0),
current (0),
- more (false)
+ more (false),
+ sink (sink_),
+ terminating (false)
{
}
zmq::lb_t::~lb_t ()
{
- for (pipes_t::size_type i = 0; i != pipes.size (); i++)
- pipes [i]->term ();
+ zmq_assert (pipes.empty ());
}
void zmq::lb_t::attach (writer_t *pipe_)
{
+ pipe_->set_event_sink (this);
+
pipes.push_back (pipe_);
pipes.swap (active, pipes.size () - 1);
active++;
+
+ if (terminating) {
+ sink->register_term_acks (1);
+ pipe_->terminate ();
+ }
}
-void zmq::lb_t::detach (writer_t *pipe_)
+void zmq::lb_t::terminate ()
{
- zmq_assert (!more || pipes [current] != pipe_);
+ zmq_assert (!terminating);
+ terminating = true;
+
+ sink->register_term_acks (pipes.size ());
+ for (pipes_t::size_type i = 0; i != pipes.size (); i++)
+ pipes [i]->terminate ();
+}
+void zmq::lb_t::terminated (writer_t *pipe_)
+{
// Remove the pipe from the list; adjust number of active pipes
// accordingly.
if (pipes.index (pipe_) < active) {
@@ -55,9 +72,12 @@ void zmq::lb_t::detach (writer_t *pipe_)
current = 0;
}
pipes.erase (pipe_);
+
+ if (terminating)
+ sink->unregister_term_ack ();
}
-void zmq::lb_t::revive (writer_t *pipe_)
+void zmq::lb_t::activated (writer_t *pipe_)
{
// Move the pipe to the list of active pipes.
pipes.swap (pipes.index (pipe_), active);
@@ -111,10 +131,10 @@ bool zmq::lb_t::has_out ()
if (pipes [current]->check_write ())
return true;
+ // Deactivate the pipe.
active--;
- if (current < active)
- pipes.swap (current, active);
- else
+ pipes.swap (current, active);
+ if (current == active)
current = 0;
}
diff --git a/src/lb.hpp b/src/lb.hpp
index 526a727..29ea343 100644
--- a/src/lb.hpp
+++ b/src/lb.hpp
@@ -20,30 +20,34 @@
#ifndef __ZMQ_LB_HPP_INCLUDED__
#define __ZMQ_LB_HPP_INCLUDED__
-#include "yarray.hpp"
+#include "array.hpp"
+#include "pipe.hpp"
namespace zmq
{
// Class manages a set of outbound pipes. On send it load balances
// messages fairly among the pipes.
- class lb_t
+ class lb_t : public i_writer_events
{
public:
- lb_t ();
+ lb_t (class own_t *sink_);
~lb_t ();
- void attach (class writer_t *pipe_);
- void detach (class writer_t *pipe_);
- void revive (class writer_t *pipe_);
+ void attach (writer_t *pipe_);
+ void terminate ();
int send (zmq_msg_t *msg_, int flags_);
bool has_out ();
+ // i_writer_events interface implementation.
+ void activated (writer_t *pipe_);
+ void terminated (writer_t *pipe_);
+
private:
// List of outbound pipes.
- typedef yarray_t <class writer_t> pipes_t;
+ typedef array_t <class writer_t> pipes_t;
pipes_t pipes;
// Number of active pipes. All the active pipes are located at the
@@ -56,6 +60,12 @@ namespace zmq
// True if last we are in the middle of a multipart message.
bool more;
+ // Object to send events to.
+ class own_t *sink;
+
+ // If true, termination process is already underway.
+ bool terminating;
+
lb_t (const lb_t&);
void operator = (const lb_t&);
};
diff --git a/src/named_session.cpp b/src/named_session.cpp
new file mode 100644
index 0000000..b6a3acf
--- /dev/null
+++ b/src/named_session.cpp
@@ -0,0 +1,84 @@
+/*
+ Copyright (c) 2007-2010 iMatix Corporation
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "named_session.hpp"
+#include "socket_base.hpp"
+
+zmq::named_session_t::named_session_t (class io_thread_t *io_thread_,
+ socket_base_t *socket_, const options_t &options_,
+ const blob_t &name_) :
+ session_t (io_thread_, socket_, options_),
+ name (name_)
+{
+ // Make double sure that the session has valid name.
+ zmq_assert (!name.empty ());
+ zmq_assert (name [0] != 0);
+
+ if (!socket_->register_session (name, this)) {
+
+ // TODO: There's already a session with the specified
+ // identity. We should log the error and drop the
+ // session.
+ zmq_assert (false);
+ }
+}
+
+zmq::named_session_t::~named_session_t ()
+{
+ // Unregister the session from the global list of named sessions.
+ if (!name.empty ())
+ unregister_session (name);
+}
+
+void zmq::named_session_t::attached (const blob_t &peer_identity_)
+{
+ if (!name.empty ()) {
+
+ // If both IDs are temporary, no checking is needed.
+ // TODO: Old ID should be reused in this case...
+ if (name.empty () || name [0] != 0 ||
+ peer_identity_.empty () || peer_identity_ [0] != 0) {
+
+ // If we already know the peer name do nothing, just check whether
+ // it haven't changed.
+ zmq_assert (name == peer_identity_);
+ }
+ }
+ else if (!peer_identity_.empty ()) {
+
+ // Store the peer identity.
+ name = peer_identity_;
+
+ // Register the session using the peer name.
+ if (!register_session (name, this)) {
+
+ // TODO: There's already a session with the specified
+ // identity. We should presumably syslog it and drop the
+ // session.
+ zmq_assert (false);
+ }
+ }
+}
+
+void zmq::named_session_t::detached ()
+{
+ // Do nothing. Named sessions are never destroyed because of disconnection,
+ // neither they have to actively reconnect.
+}
+
diff --git a/src/named_session.hpp b/src/named_session.hpp
new file mode 100644
index 0000000..9c8f814
--- /dev/null
+++ b/src/named_session.hpp
@@ -0,0 +1,56 @@
+/*
+ Copyright (c) 2007-2010 iMatix Corporation
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __ZMQ_NAMED_SESSION_HPP_INCLUDED__
+#define __ZMQ_NAMED_SESSION_HPP_INCLUDED__
+
+#include "session.hpp"
+#include "blob.hpp"
+
+namespace zmq
+{
+
+ // Named session is created by listener object when the peer identifies
+ // itself by a strong name. Named session survives reconnections.
+
+ class named_session_t : public session_t
+ {
+ public:
+
+ named_session_t (class io_thread_t *io_thread_,
+ class socket_base_t *socket_, const options_t &options_,
+ const blob_t &name_);
+ ~named_session_t ();
+
+ // Handlers for events from session base class.
+ void attached (const blob_t &peer_identity_);
+ void detached ();
+
+ private:
+
+ // Name of the session. Corresponds to the peer's strong identity.
+ blob_t name;
+
+ named_session_t (const named_session_t&);
+ void operator = (const named_session_t&);
+ };
+
+}
+
+#endif
diff --git a/src/object.cpp b/src/object.cpp
index c8e2355..9a4bd74 100644
--- a/src/object.cpp
+++ b/src/object.cpp
@@ -24,19 +24,18 @@
#include "err.hpp"
#include "pipe.hpp"
#include "io_thread.hpp"
-#include "owned.hpp"
#include "session.hpp"
#include "socket_base.hpp"
-zmq::object_t::object_t (ctx_t *ctx_, uint32_t thread_slot_) :
+zmq::object_t::object_t (ctx_t *ctx_, uint32_t slot_) :
ctx (ctx_),
- thread_slot (thread_slot_)
+ slot (slot_)
{
}
zmq::object_t::object_t (object_t *parent_) :
ctx (parent_->ctx),
- thread_slot (parent_->thread_slot)
+ slot (parent_->slot)
{
}
@@ -44,9 +43,9 @@ zmq::object_t::~object_t ()
{
}
-uint32_t zmq::object_t::get_thread_slot ()
+uint32_t zmq::object_t::get_slot ()
{
- return thread_slot;
+ return slot;
}
zmq::ctx_t *zmq::object_t::get_ctx ()
@@ -58,8 +57,12 @@ void zmq::object_t::process_command (command_t &cmd_)
{
switch (cmd_.type) {
- case command_t::revive:
- process_revive ();
+ case command_t::activate_reader:
+ process_activate_reader ();
+ break;
+
+ case command_t::activate_writer:
+ process_activate_writer (cmd_.args.activate_writer.msgs_read);
break;
case command_t::stop:
@@ -90,10 +93,6 @@ void zmq::object_t::process_command (command_t &cmd_)
process_seqnum ();
break;
- case command_t::reader_info:
- process_reader_info (cmd_.args.reader_info.msgs_read);
- break;
-
case command_t::pipe_term:
process_pipe_term ();
return;
@@ -107,7 +106,7 @@ void zmq::object_t::process_command (command_t &cmd_)
break;
case command_t::term:
- process_term ();
+ process_term (cmd_.args.term.linger);
break;
case command_t::term_ack:
@@ -123,16 +122,6 @@ void zmq::object_t::process_command (command_t &cmd_)
deallocate_command (&cmd_);
}
-void zmq::object_t::register_pipe (class pipe_t *pipe_)
-{
- ctx->register_pipe (pipe_);
-}
-
-void zmq::object_t::unregister_pipe (class pipe_t *pipe_)
-{
- ctx->unregister_pipe (pipe_);
-}
-
int zmq::object_t::register_endpoint (const char *addr_, socket_base_t *socket_)
{
return ctx->register_endpoint (addr_, socket_);
@@ -148,9 +137,19 @@ zmq::socket_base_t *zmq::object_t::find_endpoint (const char *addr_)
return ctx->find_endpoint (addr_);
}
-zmq::io_thread_t *zmq::object_t::choose_io_thread (uint64_t taskset_)
+void zmq::object_t::log (zmq_msg_t *msg_)
+{
+ ctx->log (msg_);
+}
+
+zmq::io_thread_t *zmq::object_t::choose_io_thread (uint64_t affinity_)
+{
+ return ctx->choose_io_thread (affinity_);
+}
+
+void zmq::object_t::zombify_socket (socket_base_t *socket_)
{
- return ctx->choose_io_thread (taskset_);
+ ctx->zombify_socket (socket_);
}
void zmq::object_t::send_stop ()
@@ -158,26 +157,35 @@ void zmq::object_t::send_stop ()
// 'stop' command goes always from administrative thread to
// the current object.
command_t cmd;
+#if defined ZMQ_MAKE_VALGRIND_HAPPY
+ memset (&cmd, 0, sizeof (cmd));
+#endif
cmd.destination = this;
cmd.type = command_t::stop;
- ctx->send_command (thread_slot, cmd);
+ ctx->send_command (slot, cmd);
}
-void zmq::object_t::send_plug (owned_t *destination_, bool inc_seqnum_)
+void zmq::object_t::send_plug (own_t *destination_, bool inc_seqnum_)
{
if (inc_seqnum_)
destination_->inc_seqnum ();
command_t cmd;
+#if defined ZMQ_MAKE_VALGRIND_HAPPY
+ memset (&cmd, 0, sizeof (cmd));
+#endif
cmd.destination = destination_;
cmd.type = command_t::plug;
send_command (cmd);
}
-void zmq::object_t::send_own (socket_base_t *destination_, owned_t *object_)
+void zmq::object_t::send_own (own_t *destination_, own_t *object_)
{
destination_->inc_seqnum ();
command_t cmd;
+#if defined ZMQ_MAKE_VALGRIND_HAPPY
+ memset (&cmd, 0, sizeof (cmd));
+#endif
cmd.destination = destination_;
cmd.type = command_t::own;
cmd.args.own.object = object_;
@@ -191,6 +199,9 @@ void zmq::object_t::send_attach (session_t *destination_, i_engine *engine_,
destination_->inc_seqnum ();
command_t cmd;
+#if defined ZMQ_MAKE_VALGRIND_HAPPY
+ memset (&cmd, 0, sizeof (cmd));
+#endif
cmd.destination = destination_;
cmd.type = command_t::attach;
cmd.args.attach.engine = engine_;
@@ -211,14 +222,16 @@ void zmq::object_t::send_attach (session_t *destination_, i_engine *engine_,
send_command (cmd);
}
-void zmq::object_t::send_bind (socket_base_t *destination_,
- reader_t *in_pipe_, writer_t *out_pipe_, const blob_t &peer_identity_,
- bool inc_seqnum_)
+void zmq::object_t::send_bind (own_t *destination_, reader_t *in_pipe_,
+ writer_t *out_pipe_, const blob_t &peer_identity_, bool inc_seqnum_)
{
if (inc_seqnum_)
destination_->inc_seqnum ();
command_t cmd;
+#if defined ZMQ_MAKE_VALGRIND_HAPPY
+ memset (&cmd, 0, sizeof (cmd));
+#endif
cmd.destination = destination_;
cmd.type = command_t::bind;
cmd.args.bind.in_pipe = in_pipe_;
@@ -240,27 +253,36 @@ void zmq::object_t::send_bind (socket_base_t *destination_,
send_command (cmd);
}
-void zmq::object_t::send_revive (object_t *destination_)
+void zmq::object_t::send_activate_reader (reader_t *destination_)
{
command_t cmd;
+#if defined ZMQ_MAKE_VALGRIND_HAPPY
+ memset (&cmd, 0, sizeof (cmd));
+#endif
cmd.destination = destination_;
- cmd.type = command_t::revive;
+ cmd.type = command_t::activate_reader;
send_command (cmd);
}
-void zmq::object_t::send_reader_info (writer_t *destination_,
+void zmq::object_t::send_activate_writer (writer_t *destination_,
uint64_t msgs_read_)
{
command_t cmd;
+#if defined ZMQ_MAKE_VALGRIND_HAPPY
+ memset (&cmd, 0, sizeof (cmd));
+#endif
cmd.destination = destination_;
- cmd.type = command_t::reader_info;
- cmd.args.reader_info.msgs_read = msgs_read_;
+ cmd.type = command_t::activate_writer;
+ cmd.args.activate_writer.msgs_read = msgs_read_;
send_command (cmd);
}
void zmq::object_t::send_pipe_term (writer_t *destination_)
{
command_t cmd;
+#if defined ZMQ_MAKE_VALGRIND_HAPPY
+ memset (&cmd, 0, sizeof (cmd));
+#endif
cmd.destination = destination_;
cmd.type = command_t::pipe_term;
send_command (cmd);
@@ -269,32 +291,45 @@ void zmq::object_t::send_pipe_term (writer_t *destination_)
void zmq::object_t::send_pipe_term_ack (reader_t *destination_)
{
command_t cmd;
+#if defined ZMQ_MAKE_VALGRIND_HAPPY
+ memset (&cmd, 0, sizeof (cmd));
+#endif
cmd.destination = destination_;
cmd.type = command_t::pipe_term_ack;
send_command (cmd);
}
-void zmq::object_t::send_term_req (socket_base_t *destination_,
- owned_t *object_)
+void zmq::object_t::send_term_req (own_t *destination_,
+ own_t *object_)
{
command_t cmd;
+#if defined ZMQ_MAKE_VALGRIND_HAPPY
+ memset (&cmd, 0, sizeof (cmd));
+#endif
cmd.destination = destination_;
cmd.type = command_t::term_req;
cmd.args.term_req.object = object_;
send_command (cmd);
}
-void zmq::object_t::send_term (owned_t *destination_)
+void zmq::object_t::send_term (own_t *destination_, int linger_)
{
command_t cmd;
+#if defined ZMQ_MAKE_VALGRIND_HAPPY
+ memset (&cmd, 0, sizeof (cmd));
+#endif
cmd.destination = destination_;
cmd.type = command_t::term;
+ cmd.args.term.linger = linger_;
send_command (cmd);
}
-void zmq::object_t::send_term_ack (socket_base_t *destination_)
+void zmq::object_t::send_term_ack (own_t *destination_)
{
command_t cmd;
+#if defined ZMQ_MAKE_VALGRIND_HAPPY
+ memset (&cmd, 0, sizeof (cmd));
+#endif
cmd.destination = destination_;
cmd.type = command_t::term_ack;
send_command (cmd);
@@ -310,7 +345,7 @@ void zmq::object_t::process_plug ()
zmq_assert (false);
}
-void zmq::object_t::process_own (owned_t *object_)
+void zmq::object_t::process_own (own_t *object_)
{
zmq_assert (false);
}
@@ -327,12 +362,12 @@ void zmq::object_t::process_bind (reader_t *in_pipe_, writer_t *out_pipe_,
zmq_assert (false);
}
-void zmq::object_t::process_revive ()
+void zmq::object_t::process_activate_reader ()
{
zmq_assert (false);
}
-void zmq::object_t::process_reader_info (uint64_t msgs_read_)
+void zmq::object_t::process_activate_writer (uint64_t msgs_read_)
{
zmq_assert (false);
}
@@ -347,12 +382,12 @@ void zmq::object_t::process_pipe_term_ack ()
zmq_assert (false);
}
-void zmq::object_t::process_term_req (owned_t *object_)
+void zmq::object_t::process_term_req (own_t *object_)
{
zmq_assert (false);
}
-void zmq::object_t::process_term ()
+void zmq::object_t::process_term (int linger_)
{
zmq_assert (false);
}
@@ -369,6 +404,6 @@ void zmq::object_t::process_seqnum ()
void zmq::object_t::send_command (command_t &cmd_)
{
- ctx->send_command (cmd_.destination->get_thread_slot (), cmd_);
+ ctx->send_command (cmd_.destination->get_slot (), cmd_);
}
diff --git a/src/object.hpp b/src/object.hpp
index a38b0a6..9580556 100644
--- a/src/object.hpp
+++ b/src/object.hpp
@@ -20,6 +20,8 @@
#ifndef __ZMQ_OBJECT_HPP_INCLUDED__
#define __ZMQ_OBJECT_HPP_INCLUDED__
+#include "../include/zmq.h"
+
#include "stdint.hpp"
#include "blob.hpp"
@@ -32,18 +34,14 @@ namespace zmq
{
public:
- object_t (class ctx_t *ctx_, uint32_t thread_slot_);
+ object_t (class ctx_t *ctx_, uint32_t slot_);
object_t (object_t *parent_);
virtual ~object_t ();
- uint32_t get_thread_slot ();
+ uint32_t get_slot ();
ctx_t *get_ctx ();
void process_command (struct command_t &cmd_);
- // Allow pipe to access corresponding context functions.
- void register_pipe (class pipe_t *pipe_);
- void unregister_pipe (class pipe_t *pipe_);
-
protected:
// Using following function, socket is able to access global
@@ -52,46 +50,54 @@ namespace zmq
void unregister_endpoints (class socket_base_t *socket_);
class socket_base_t *find_endpoint (const char *addr_);
+ // Logs an message.
+ void log (zmq_msg_t *msg_);
+
// Chooses least loaded I/O thread.
- class io_thread_t *choose_io_thread (uint64_t taskset_);
+ class io_thread_t *choose_io_thread (uint64_t affinity_);
+
+ // Zombify particular socket. In other words, pass the ownership to
+ // the context.
+ void zombify_socket (class socket_base_t *socket_);
// Derived object can use these functions to send commands
// to other objects.
void send_stop ();
- void send_plug (class owned_t *destination_, bool inc_seqnum_ = true);
- void send_own (class socket_base_t *destination_,
- class owned_t *object_);
+ void send_plug (class own_t *destination_,
+ bool inc_seqnum_ = true);
+ void send_own (class own_t *destination_,
+ class own_t *object_);
void send_attach (class session_t *destination_,
struct i_engine *engine_, const blob_t &peer_identity_,
bool inc_seqnum_ = true);
- void send_bind (class socket_base_t *destination_,
+ void send_bind (class own_t *destination_,
class reader_t *in_pipe_, class writer_t *out_pipe_,
const blob_t &peer_identity_, bool inc_seqnum_ = true);
- void send_revive (class object_t *destination_);
- void send_reader_info (class writer_t *destination_,
+ void send_activate_reader (class reader_t *destination_);
+ void send_activate_writer (class writer_t *destination_,
uint64_t msgs_read_);
void send_pipe_term (class writer_t *destination_);
void send_pipe_term_ack (class reader_t *destination_);
- void send_term_req (class socket_base_t *destination_,
- class owned_t *object_);
- void send_term (class owned_t *destination_);
- void send_term_ack (class socket_base_t *destination_);
+ void send_term_req (class own_t *destination_,
+ class own_t *object_);
+ void send_term (class own_t *destination_, int linger_);
+ void send_term_ack (class own_t *destination_);
// These handlers can be overloaded by the derived objects. They are
// called when command arrives from another thread.
virtual void process_stop ();
virtual void process_plug ();
- virtual void process_own (class owned_t *object_);
+ virtual void process_own (class own_t *object_);
virtual void process_attach (struct i_engine *engine_,
const blob_t &peer_identity_);
virtual void process_bind (class reader_t *in_pipe_,
class writer_t *out_pipe_, const blob_t &peer_identity_);
- virtual void process_revive ();
- virtual void process_reader_info (uint64_t msgs_read_);
+ virtual void process_activate_reader ();
+ virtual void process_activate_writer (uint64_t msgs_read_);
virtual void process_pipe_term ();
virtual void process_pipe_term_ack ();
- virtual void process_term_req (class owned_t *object_);
- virtual void process_term ();
+ virtual void process_term_req (class own_t *object_);
+ virtual void process_term (int linger_);
virtual void process_term_ack ();
// Special handler called after a command that requires a seqnum
@@ -105,7 +111,7 @@ namespace zmq
class ctx_t *ctx;
// Slot ID of the thread the object belongs to.
- uint32_t thread_slot;
+ uint32_t slot;
void send_command (command_t &cmd_);
diff --git a/src/options.cpp b/src/options.cpp
index dcbb51d..ec85269 100644
--- a/src/options.cpp
+++ b/src/options.cpp
@@ -33,6 +33,10 @@ zmq::options_t::options_t () :
use_multicast_loop (true),
sndbuf (0),
rcvbuf (0),
+ type (-1),
+ linger (-1),
+ reconnect_ivl (100),
+ backlog (100),
requires_in (false),
requires_out (false),
immediate_connect (true)
@@ -127,6 +131,35 @@ int zmq::options_t::setsockopt (int option_, const void *optval_,
}
rcvbuf = *((uint64_t*) optval_);
return 0;
+
+ case ZMQ_LINGER:
+ if (optvallen_ != sizeof (int)) {
+ errno = EINVAL;
+ return -1;
+ }
+ linger = *((int*) optval_);
+ return 0;
+
+ case ZMQ_RECONNECT_IVL:
+ if (optvallen_ != sizeof (int)) {
+ errno = EINVAL;
+ return -1;
+ }
+ if (*((int*) optval_) < 0) {
+ errno = EINVAL;
+ return -1;
+ }
+ reconnect_ivl = *((int*) optval_);
+ return 0;
+
+ case ZMQ_BACKLOG:
+ if (optvallen_ != sizeof (int)) {
+ errno = EINVAL;
+ return -1;
+ }
+ backlog = *((int*) optval_);
+ return 0;
+
}
errno = EINVAL;
@@ -218,6 +251,43 @@ int zmq::options_t::getsockopt (int option_, void *optval_, size_t *optvallen_)
*((uint64_t*) optval_) = rcvbuf;
*optvallen_ = sizeof (uint64_t);
return 0;
+
+ case ZMQ_TYPE:
+ if (*optvallen_ < sizeof (int)) {
+ errno = EINVAL;
+ return -1;
+ }
+ *((int*) optval_) = type;
+ *optvallen_ = sizeof (int);
+ return 0;
+
+ case ZMQ_LINGER:
+ if (*optvallen_ < sizeof (int)) {
+ errno = EINVAL;
+ return -1;
+ }
+ *((int*) optval_) = linger;
+ *optvallen_ = sizeof (int);
+ return 0;
+
+ case ZMQ_RECONNECT_IVL:
+ if (*optvallen_ < sizeof (int)) {
+ errno = EINVAL;
+ return -1;
+ }
+ *((int*) optval_) = reconnect_ivl;
+ *optvallen_ = sizeof (int);
+ return 0;
+
+ case ZMQ_BACKLOG:
+ if (*optvallen_ < sizeof (int)) {
+ errno = EINVAL;
+ return -1;
+ }
+ *((int*) optval_) = backlog;
+ *optvallen_ = sizeof (int);
+ return 0;
+
}
errno = EINVAL;
diff --git a/src/options.hpp b/src/options.hpp
index 908b166..647e811 100644
--- a/src/options.hpp
+++ b/src/options.hpp
@@ -51,6 +51,18 @@ namespace zmq
uint64_t sndbuf;
uint64_t rcvbuf;
+ // Socket type.
+ int type;
+
+ // Linger time, in milliseconds.
+ int linger;
+
+ // Interval between attempts to reconnect, in milliseconds.
+ int reconnect_ivl;
+
+ // Maximum backlog for pending connections.
+ int backlog;
+
// These options are never set by the user directly. Instead they are
// provided by the specific socket type.
bool requires_in;
diff --git a/src/own.cpp b/src/own.cpp
new file mode 100644
index 0000000..c91650d
--- /dev/null
+++ b/src/own.cpp
@@ -0,0 +1,213 @@
+/*
+ Copyright (c) 2007-2010 iMatix Corporation
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "own.hpp"
+#include "err.hpp"
+#include "io_thread.hpp"
+
+zmq::own_t::own_t (class ctx_t *parent_, uint32_t slot_) :
+ object_t (parent_, slot_),
+ terminating (false),
+ sent_seqnum (0),
+ processed_seqnum (0),
+ owner (NULL),
+ term_acks (0)
+{
+}
+
+zmq::own_t::own_t (io_thread_t *io_thread_, const options_t &options_) :
+ object_t (io_thread_),
+ options (options_),
+ terminating (false),
+ sent_seqnum (0),
+ processed_seqnum (0),
+ owner (NULL),
+ term_acks (0)
+{
+}
+
+zmq::own_t::~own_t ()
+{
+}
+
+void zmq::own_t::set_owner (own_t *owner_)
+{
+ zmq_assert (!owner);
+ owner = owner_;
+}
+
+void zmq::own_t::inc_seqnum ()
+{
+ // This function may be called from a different thread!
+ sent_seqnum.add (1);
+}
+
+void zmq::own_t::process_seqnum ()
+{
+ // Catch up with counter of processed commands.
+ processed_seqnum++;
+
+ // We may have catched up and still have pending terms acks.
+ check_term_acks ();
+}
+
+void zmq::own_t::launch_child (own_t *object_)
+{
+ // Specify the owner of the object.
+ object_->set_owner (this);
+
+ // Plug the object into the I/O thread.
+ send_plug (object_);
+
+ // Take ownership of the object.
+ send_own (this, object_);
+}
+
+void zmq::own_t::launch_sibling (own_t *object_)
+{
+ // At this point it is important that object is plugged in before its
+ // owner has a chance to terminate it. Thus, 'plug' command is sent before
+ // the 'own' command. Given that the signaler preserves ordering of
+ // commands, 'term' command from the owner cannot make it to the object
+ // before the already written 'plug' command.
+
+ // Specify the owner of the object.
+ object_->set_owner (owner);
+
+ // Plug the object into its I/O thread.
+ send_plug (object_);
+
+ // Make parent own the object.
+ send_own (owner, object_);
+}
+
+void zmq::own_t::process_term_req (own_t *object_)
+{
+ // When shutting down we can ignore termination requests from owned
+ // objects. The termination request was already sent to the object.
+ if (terminating)
+ return;
+
+ // If I/O object is well and alive let's ask it to terminate.
+ owned_t::iterator it = std::find (owned.begin (), owned.end (), object_);
+
+ // If not found, we assume that termination request was already sent to
+ // the object so we can safely ignore the request.
+ if (it == owned.end ())
+ return;
+
+ owned.erase (it);
+ register_term_acks (1);
+
+ // Note that this object is the root of the (partial shutdown) thus, its
+ // value of linger is used, rather than the value stored by the children.
+ send_term (object_, options.linger);
+}
+
+void zmq::own_t::process_own (own_t *object_)
+{
+ // If the object is already being shut down, new owned objects are
+ // immediately asked to terminate. Note that linger is set to zero.
+ if (terminating) {
+ register_term_acks (1);
+ send_term (object_, 0);
+ return;
+ }
+
+ // Store the reference to the owned object.
+ owned.insert (object_);
+}
+
+void zmq::own_t::terminate ()
+{
+ // If termination is already underway, there's no point
+ // in starting it anew.
+ if (terminating)
+ return;
+
+ // As for the root of the ownership tree, there's noone to terminate it,
+ // so it has to terminate itself.
+ if (!owner) {
+ process_term (options.linger);
+ return;
+ }
+
+ // If I am an owned object, I'll ask my owner to terminate me.
+ send_term_req (owner, this);
+}
+
+void zmq::own_t::process_term (int linger_)
+{
+ // Double termination should never happen.
+ zmq_assert (!terminating);
+
+ // Send termination request to all owned objects.
+ for (owned_t::iterator it = owned.begin (); it != owned.end (); it++)
+ send_term (*it, linger_);
+ register_term_acks (owned.size ());
+ owned.clear ();
+
+ // Start termination process and check whether by chance we cannot
+ // terminate immediately.
+ terminating = true;
+ check_term_acks ();
+}
+
+void zmq::own_t::register_term_acks (int count_)
+{
+ term_acks += count_;
+}
+
+void zmq::own_t::unregister_term_ack ()
+{
+ zmq_assert (term_acks > 0);
+ term_acks--;
+
+ // This may be a last ack we are waiting for before termination...
+ check_term_acks ();
+}
+
+void zmq::own_t::process_term_ack ()
+{
+ unregister_term_ack ();
+}
+
+void zmq::own_t::check_term_acks ()
+{
+ if (terminating && processed_seqnum == sent_seqnum.get () &&
+ term_acks == 0) {
+
+ // Sanity check. There should be no active children at this point.
+ zmq_assert (owned.empty ());
+
+ // The root object has nobody to confirm the termination to.
+ // Other nodes will confirm the termination to the owner.
+ if (owner)
+ send_term_ack (owner);
+
+ // Deallocate the resources.
+ process_destroy ();
+ }
+}
+
+void zmq::own_t::process_destroy ()
+{
+ delete this;
+}
+
diff --git a/src/own.hpp b/src/own.hpp
new file mode 100644
index 0000000..18f7251
--- /dev/null
+++ b/src/own.hpp
@@ -0,0 +1,139 @@
+/*
+ Copyright (c) 2007-2010 iMatix Corporation
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __ZMQ_OWN_HPP_INCLUDED__
+#define __ZMQ_OWN_HPP_INCLUDED__
+
+#include <set>
+#include <algorithm>
+
+#include "object.hpp"
+#include "options.hpp"
+#include "atomic_counter.hpp"
+#include "stdint.hpp"
+
+namespace zmq
+{
+
+ // Base class for objects forming a part of ownership hierarchy.
+ // It handles initialisation and destruction of such objects.
+
+ class own_t : public object_t
+ {
+ public:
+
+ // Note that the owner is unspecified in the constructor.
+ // It'll be supplied later on when the object is plugged in.
+
+ // The object is not living within an I/O thread. It has it's own
+ // thread outside of 0MQ infrastructure.
+ own_t (class ctx_t *parent_, uint32_t slot_);
+
+ // The object is living within I/O thread.
+ own_t (class io_thread_t *io_thread_, const options_t &options_);
+
+ // When another owned object wants to send command to this object
+ // it calls this function to let it know it should not shut down
+ // before the command is delivered.
+ void inc_seqnum ();
+
+ // Use following two functions to wait for arbitrary events before
+ // terminating. Just add number of events to wait for using
+ // register_tem_acks functions. When event occurs, call
+ // remove_term_ack. When number of pending acks reaches zero
+ // object will be deallocated.
+ void register_term_acks (int count_);
+ void unregister_term_ack ();
+
+ protected:
+
+ // Launch the supplied object and become its owner.
+ void launch_child (own_t *object_);
+
+ // Launch the supplied object and make it your sibling (make your
+ // owner become its owner as well).
+ void launch_sibling (own_t *object_);
+
+ // Ask owner object to terminate this object. It may take a while
+ // while actual termination is started. This function should not be
+ // called more than once.
+ void terminate ();
+
+ // Derived object destroys own_t. There's no point in allowing
+ // others to invoke the destructor. At the same time, it has to be
+ // virtual so that generic own_t deallocation mechanism destroys
+ // specific type of the owned object correctly.
+ virtual ~own_t ();
+
+ // Term handler is protocted rather than private so that it can
+ // be intercepted by the derived class. This is useful to add custom
+ // steps to the beginning of the termination process.
+ void process_term (int linger_);
+
+ // A place to hook in when phyicallal destruction of the object
+ // is to be delayed.
+ virtual void process_destroy ();
+
+ // Socket options associated with this object.
+ options_t options;
+
+ private:
+
+ // Set owner of the object
+ void set_owner (own_t *owner_);
+
+ // Handlers for incoming commands.
+ void process_own (own_t *object_);
+ void process_term_req (own_t *object_);
+ void process_term_ack ();
+ void process_seqnum ();
+
+ // Check whether all the peding term acks were delivered.
+ // If so, deallocate this object.
+ void check_term_acks ();
+
+ // True if termination was already initiated. If so, we can destroy
+ // the object if there are no more child objects or pending term acks.
+ bool terminating;
+
+ // Sequence number of the last command sent to this object.
+ atomic_counter_t sent_seqnum;
+
+ // Sequence number of the last command processed by this object.
+ uint64_t processed_seqnum;
+
+ // Socket owning this object. It's responsible for shutting down
+ // this object.
+ own_t *owner;
+
+ // List of all objects owned by this socket. We are responsible
+ // for deallocating them before we quit.
+ typedef std::set <own_t*> owned_t;
+ owned_t owned;
+
+ // Number of events we have to get before we can destroy the object.
+ int term_acks;
+
+ own_t (const own_t&);
+ void operator = (const own_t&);
+ };
+
+}
+
+#endif
diff --git a/src/owned.cpp b/src/owned.cpp
deleted file mode 100644
index d6be444..0000000
--- a/src/owned.cpp
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- Copyright (c) 2007-2010 iMatix Corporation
-
- This file is part of 0MQ.
-
- 0MQ is free software; you can redistribute it and/or modify it under
- the terms of the Lesser GNU General Public License as published by
- the Free Software Foundation; either version 3 of the License, or
- (at your option) any later version.
-
- 0MQ is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- Lesser GNU General Public License for more details.
-
- You should have received a copy of the Lesser GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-*/
-
-#include "owned.hpp"
-#include "err.hpp"
-
-zmq::owned_t::owned_t (object_t *parent_, socket_base_t *owner_) :
- object_t (parent_),
- owner (owner_),
- sent_seqnum (0),
- processed_seqnum (0),
- shutting_down (false)
-{
-}
-
-zmq::owned_t::~owned_t ()
-{
-}
-
-void zmq::owned_t::inc_seqnum ()
-{
- // NB: This function may be called from a different thread!
- sent_seqnum.add (1);
-}
-
-void zmq::owned_t::term ()
-{
- send_term_req (owner, this);
-}
-
-void zmq::owned_t::process_term ()
-{
- zmq_assert (!shutting_down);
- shutting_down = true;
- finalise ();
-}
-
-void zmq::owned_t::process_seqnum ()
-{
- // Catch up with counter of processed commands.
- processed_seqnum++;
- finalise ();
-}
-
-void zmq::owned_t::finalise ()
-{
- // If termination request was already received and there are no more
- // commands to wait for, terminate the object.
- if (shutting_down && processed_seqnum == sent_seqnum.get ()) {
- process_unplug ();
- send_term_ack (owner);
- delete this;
- }
-}
-
diff --git a/src/owned.hpp b/src/owned.hpp
deleted file mode 100644
index 91189a1..0000000
--- a/src/owned.hpp
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- Copyright (c) 2007-2010 iMatix Corporation
-
- This file is part of 0MQ.
-
- 0MQ is free software; you can redistribute it and/or modify it under
- the terms of the Lesser GNU General Public License as published by
- the Free Software Foundation; either version 3 of the License, or
- (at your option) any later version.
-
- 0MQ is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- Lesser GNU General Public License for more details.
-
- You should have received a copy of the Lesser GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-*/
-
-#ifndef __ZMQ_OWNED_HPP_INCLUDED__
-#define __ZMQ_OWNED_HPP_INCLUDED__
-
-#include "socket_base.hpp"
-#include "atomic_counter.hpp"
-#include "stdint.hpp"
-
-namespace zmq
-{
-
- // Base class for objects owned by individual sockets. Handles
- // initialisation and destruction of such objects.
-
- class owned_t : public object_t
- {
- public:
-
- // The object will live in parent's thread, however, its lifetime
- // will be managed by its owner socket.
- owned_t (object_t *parent_, socket_base_t *owner_);
-
- // When another owned object wants to send command to this object
- // it calls this function to let it know it should not shut down
- // before the command is delivered.
- void inc_seqnum ();
-
- protected:
-
- // Ask owner socket to terminate this object.
- void term ();
-
- // Derived object destroys owned_t. No point in allowing others to
- // invoke the destructor. At the same time, it has to be virtual so
- // that generic owned_t deallocation mechanism destroys specific type
- // of the owned object correctly.
- virtual ~owned_t ();
-
- // io_object_t defines a new handler used to disconnect the object
- // from the poller object. Implement the handlen in the derived
- // classes to ensure sane cleanup.
- virtual void process_unplug () = 0;
-
- // Socket owning this object. When the socket is being closed it's
- // responsible for shutting down this object.
- socket_base_t *owner;
-
- private:
-
- // Handlers for incoming commands.
- void process_term ();
- void process_seqnum ();
-
- void finalise ();
-
- // Sequence number of the last command sent to this object.
- atomic_counter_t sent_seqnum;
-
- // Sequence number of the last command processed by this object.
- uint64_t processed_seqnum;
-
- // If true, the object is already shutting down.
- bool shutting_down;
-
- owned_t (const owned_t&);
- void operator = (const owned_t&);
- };
-
-}
-
-#endif
diff --git a/src/pair.cpp b/src/pair.cpp
index 3872b28..8ee29cf 100644
--- a/src/pair.cpp
+++ b/src/pair.cpp
@@ -23,68 +23,96 @@
#include "err.hpp"
#include "pipe.hpp"
-zmq::pair_t::pair_t (class app_thread_t *parent_) :
- socket_base_t (parent_),
+zmq::pair_t::pair_t (class ctx_t *parent_, uint32_t slot_) :
+ socket_base_t (parent_, slot_),
inpipe (NULL),
outpipe (NULL),
- alive (true)
+ inpipe_alive (false),
+ outpipe_alive (false),
+ terminating (false)
{
+ options.type = ZMQ_PAIR;
options.requires_in = true;
options.requires_out = true;
}
zmq::pair_t::~pair_t ()
{
- if (inpipe)
- inpipe->term ();
- if (outpipe)
- outpipe->term ();
+ zmq_assert (!inpipe);
+ zmq_assert (!outpipe);
}
-void zmq::pair_t::xattach_pipes (class reader_t *inpipe_,
- class writer_t *outpipe_, const blob_t &peer_identity_)
+void zmq::pair_t::xattach_pipes (reader_t *inpipe_, writer_t *outpipe_,
+ const blob_t &peer_identity_)
{
zmq_assert (!inpipe && !outpipe);
+
inpipe = inpipe_;
+ inpipe_alive = true;
+ inpipe->set_event_sink (this);
+
outpipe = outpipe_;
outpipe_alive = true;
+ outpipe->set_event_sink (this);
+
+ if (terminating) {
+ register_term_acks (2);
+ inpipe_->terminate ();
+ outpipe_->terminate ();
+ }
}
-void zmq::pair_t::xdetach_inpipe (class reader_t *pipe_)
+void zmq::pair_t::terminated (reader_t *pipe_)
{
zmq_assert (pipe_ == inpipe);
inpipe = NULL;
+ inpipe_alive = false;
+
+ if (terminating)
+ unregister_term_ack ();
}
-void zmq::pair_t::xdetach_outpipe (class writer_t *pipe_)
+void zmq::pair_t::terminated (writer_t *pipe_)
{
zmq_assert (pipe_ == outpipe);
outpipe = NULL;
+ outpipe_alive = false;
+
+ if (terminating)
+ unregister_term_ack ();
}
-void zmq::pair_t::xkill (class reader_t *pipe_)
+void zmq::pair_t::delimited (reader_t *pipe_)
{
- zmq_assert (alive);
- alive = false;
}
-void zmq::pair_t::xrevive (class reader_t *pipe_)
+void zmq::pair_t::process_term (int linger_)
{
- zmq_assert (!alive);
- alive = true;
+ terminating = true;
+
+ if (inpipe) {
+ register_term_acks (1);
+ inpipe->terminate ();
+ }
+
+ if (outpipe) {
+ register_term_acks (1);
+ outpipe->terminate ();
+ }
+
+ socket_base_t::process_term (linger_);
}
-void zmq::pair_t::xrevive (class writer_t *pipe_)
+void zmq::pair_t::activated (class reader_t *pipe_)
{
- zmq_assert (!outpipe_alive);
- outpipe_alive = true;
+ zmq_assert (!inpipe_alive);
+ inpipe_alive = true;
}
-int zmq::pair_t::xsetsockopt (int option_, const void *optval_,
- size_t optvallen_)
+void zmq::pair_t::activated (class writer_t *pipe_)
{
- errno = EINVAL;
- return -1;
+ zmq_assert (!outpipe_alive);
+ outpipe_alive = true;
}
int zmq::pair_t::xsend (zmq_msg_t *msg_, int flags_)
@@ -100,7 +128,8 @@ int zmq::pair_t::xsend (zmq_msg_t *msg_, int flags_)
return -1;
}
- outpipe->flush ();
+ if (!(flags_ & ZMQ_SNDMORE))
+ outpipe->flush ();
// Detach the original message from the data buffer.
int rc = zmq_msg_init (msg_);
@@ -114,9 +143,12 @@ int zmq::pair_t::xrecv (zmq_msg_t *msg_, int flags_)
// Deallocate old content of the message.
zmq_msg_close (msg_);
- if (!alive || !inpipe || !inpipe->read (msg_)) {
- // No message is available. Initialise the output parameter
- // to be a 0-byte message.
+ if (!inpipe_alive || !inpipe || !inpipe->read (msg_)) {
+
+ // No message is available.
+ inpipe_alive = false;
+
+ // Initialise the output parameter to be a 0-byte message.
zmq_msg_init (msg_);
errno = EAGAIN;
return -1;
@@ -126,14 +158,16 @@ int zmq::pair_t::xrecv (zmq_msg_t *msg_, int flags_)
bool zmq::pair_t::xhas_in ()
{
- if (alive && inpipe && inpipe->check_read ())
- return true;
- return false;
+ if (!inpipe || !inpipe_alive)
+ return false;
+
+ inpipe_alive = inpipe->check_read ();
+ return inpipe_alive;
}
bool zmq::pair_t::xhas_out ()
{
- if (outpipe == NULL || !outpipe_alive)
+ if (!outpipe || !outpipe_alive)
return false;
outpipe_alive = outpipe->check_write ();
diff --git a/src/pair.hpp b/src/pair.hpp
index aea249f..a14544f 100644
--- a/src/pair.hpp
+++ b/src/pair.hpp
@@ -21,39 +21,51 @@
#define __ZMQ_PAIR_HPP_INCLUDED__
#include "socket_base.hpp"
+#include "pipe.hpp"
namespace zmq
{
- class pair_t : public socket_base_t
+ class pair_t :
+ public socket_base_t,
+ public i_reader_events,
+ public i_writer_events
{
public:
- pair_t (class app_thread_t *parent_);
+ pair_t (class ctx_t *parent_, uint32_t slot_);
~pair_t ();
// Overloads of functions from socket_base_t.
void xattach_pipes (class reader_t *inpipe_, class writer_t *outpipe_,
const blob_t &peer_identity_);
- void xdetach_inpipe (class reader_t *pipe_);
- void xdetach_outpipe (class writer_t *pipe_);
- void xkill (class reader_t *pipe_);
- void xrevive (class reader_t *pipe_);
- void xrevive (class writer_t *pipe_);
- int xsetsockopt (int option_, const void *optval_, size_t optvallen_);
int xsend (zmq_msg_t *msg_, int flags_);
int xrecv (zmq_msg_t *msg_, int flags_);
bool xhas_in ();
bool xhas_out ();
+ // i_reader_events interface implementation.
+ void activated (class reader_t *pipe_);
+ void terminated (class reader_t *pipe_);
+ void delimited (class reader_t *pipe_);
+
+ // i_writer_events interface implementation.
+ void activated (class writer_t *pipe_);
+ void terminated (class writer_t *pipe_);
+
private:
+ // Hook into termination process.
+ void process_term (int linger_);
+
class reader_t *inpipe;
class writer_t *outpipe;
- bool alive;
+ bool inpipe_alive;
bool outpipe_alive;
+ bool terminating;
+
pair_t (const pair_t&);
void operator = (const pair_t&);
};
diff --git a/src/pgm_receiver.cpp b/src/pgm_receiver.cpp
index 048c529..c1b35f1 100644
--- a/src/pgm_receiver.cpp
+++ b/src/pgm_receiver.cpp
@@ -36,6 +36,7 @@
zmq::pgm_receiver_t::pgm_receiver_t (class io_thread_t *parent_,
const options_t &options_) :
io_object_t (parent_),
+ has_rx_timer (false),
pgm_socket (true, options_),
options (options_),
inout (NULL),
@@ -55,7 +56,7 @@ int zmq::pgm_receiver_t::init (bool udp_encapsulation_, const char *network_)
return pgm_socket.init (udp_encapsulation_, network_);
}
-void zmq::pgm_receiver_t::plug (i_inout *inout_)
+void zmq::pgm_receiver_t::plug (io_thread_t *io_thread_, i_inout *inout_)
{
// Retrieve PGM fds and start polling.
int socket_fd;
@@ -81,19 +82,29 @@ void zmq::pgm_receiver_t::unplug ()
mru_decoder = NULL;
pending_bytes = 0;
- // Stop polling.
+ if (has_rx_timer) {
+ cancel_timer (rx_timer_id);
+ has_rx_timer = false;
+ }
+
rm_fd (socket_handle);
rm_fd (pipe_handle);
inout = NULL;
}
-void zmq::pgm_receiver_t::revive ()
+void zmq::pgm_receiver_t::terminate ()
+{
+ unplug ();
+ delete this;
+}
+
+void zmq::pgm_receiver_t::activate_out ()
{
zmq_assert (false);
}
-void zmq::pgm_receiver_t::resume_input ()
+void zmq::pgm_receiver_t::activate_in ()
{
// It is possible that the most recently used decoder
// processed the whole buffer but failed to write
@@ -129,6 +140,11 @@ void zmq::pgm_receiver_t::in_event ()
zmq_assert (pending_bytes == 0);
+ if (has_rx_timer) {
+ cancel_timer (rx_timer_id);
+ has_rx_timer = false;
+ }
+
// TODO: This loop can effectively block other engines in the same I/O
// thread in the case of high load.
while (true) {
@@ -138,8 +154,14 @@ void zmq::pgm_receiver_t::in_event ()
// No data to process. This may happen if the packet received is
// neither ODATA nor ODATA.
- if (received == 0)
+ if (received == 0) {
+ if (errno == ENOMEM || errno == EBUSY) {
+ const long timeout = pgm_socket.get_rx_timeout ();
+ add_timer (timeout, rx_timer_id);
+ has_rx_timer = true;
+ }
break;
+ }
// Find the peer based on its TSI.
peers_t::iterator it = peers.find (*tsi);
@@ -161,7 +183,7 @@ void zmq::pgm_receiver_t::in_event ()
// New peer. Add it to the list of know but unjoint peers.
if (it == peers.end ()) {
peer_info_t peer_info = {false, NULL};
- it = peers.insert (std::make_pair (*tsi, peer_info)).first;
+ it = peers.insert (peers_t::value_type (*tsi, peer_info)).first;
}
// Read the offset of the fist message in the current packet.
@@ -189,7 +211,7 @@ void zmq::pgm_receiver_t::in_event ()
it->second.joined = true;
// Create and connect decoder for the peer.
- it->second.decoder = new (std::nothrow) zmq_decoder_t (0);
+ it->second.decoder = new (std::nothrow) decoder_t (0);
it->second.decoder->set_inout (inout);
}
@@ -213,5 +235,14 @@ void zmq::pgm_receiver_t::in_event ()
inout->flush ();
}
+void zmq::pgm_receiver_t::timer_event (int token)
+{
+ zmq_assert (token == rx_timer_id);
+
+ // Timer cancels on return by poller_base.
+ has_rx_timer = false;
+ in_event ();
+}
+
#endif
diff --git a/src/pgm_receiver.hpp b/src/pgm_receiver.hpp
index 1b367bf..627a658 100644
--- a/src/pgm_receiver.hpp
+++ b/src/pgm_receiver.hpp
@@ -34,7 +34,7 @@
#include "io_object.hpp"
#include "i_engine.hpp"
#include "options.hpp"
-#include "zmq_decoder.hpp"
+#include "decoder.hpp"
#include "pgm_socket.hpp"
namespace zmq
@@ -51,36 +51,42 @@ namespace zmq
int init (bool udp_encapsulation_, const char *network_);
// i_engine interface implementation.
- void plug (struct i_inout *inout_);
+ void plug (class io_thread_t *io_thread_, struct i_inout *inout_);
void unplug ();
- void revive ();
- void resume_input ();
+ void terminate ();
+ void activate_in ();
+ void activate_out ();
// i_poll_events interface implementation.
void in_event ();
+ void timer_event (int token);
private:
+ // RX timeout timer ID.
+ enum {rx_timer_id = 0xa1};
+
+ // RX timer is running.
+ bool has_rx_timer;
+
// If joined is true we are already getting messages from the peer.
// It it's false, we are getting data but still we haven't seen
// beginning of a message.
struct peer_info_t
{
bool joined;
- zmq_decoder_t *decoder;
+ decoder_t *decoder;
};
struct tsi_comp
{
- inline bool operator () (const pgm_tsi_t &ltsi,
+ bool operator () (const pgm_tsi_t &ltsi,
const pgm_tsi_t &rtsi) const
{
- if (ltsi.sport < rtsi.sport)
- return true;
-
- return (std::lexicographical_compare (ltsi.gsi.identifier,
- ltsi.gsi.identifier + 6,
- rtsi.gsi.identifier, rtsi.gsi.identifier + 6));
+ uint32_t ll[2], rl[2];
+ memcpy (ll, &ltsi, sizeof (ll));
+ memcpy (rl, &rtsi, sizeof (rl));
+ return (ll[0] < rl[0]) || (ll[0] == rl[0] && ll[1] < rl[1]);
}
};
@@ -97,7 +103,7 @@ namespace zmq
i_inout *inout;
// Most recently used decoder.
- zmq_decoder_t *mru_decoder;
+ decoder_t *mru_decoder;
// Number of bytes not consumed by the decoder due to pipe overflow.
size_t pending_bytes;
diff --git a/src/pgm_sender.cpp b/src/pgm_sender.cpp
index 9aeb7a9..957de6d 100644
--- a/src/pgm_sender.cpp
+++ b/src/pgm_sender.cpp
@@ -36,6 +36,8 @@
zmq::pgm_sender_t::pgm_sender_t (io_thread_t *parent_,
const options_t &options_) :
io_object_t (parent_),
+ has_tx_timer (false),
+ has_rx_timer (false),
encoder (0),
pgm_socket (false, options_),
options (options_),
@@ -58,7 +60,7 @@ int zmq::pgm_sender_t::init (bool udp_encapsulation_, const char *network_)
return rc;
}
-void zmq::pgm_sender_t::plug (i_inout *inout_)
+void zmq::pgm_sender_t::plug (io_thread_t *io_thread_, i_inout *inout_)
{
// Alocate 2 fds for PGM socket.
int downlink_socket_fd = 0;
@@ -89,6 +91,16 @@ void zmq::pgm_sender_t::plug (i_inout *inout_)
void zmq::pgm_sender_t::unplug ()
{
+ if (has_rx_timer) {
+ cancel_timer (rx_timer_id);
+ has_rx_timer = false;
+ }
+
+ if (has_tx_timer) {
+ cancel_timer (tx_timer_id);
+ has_tx_timer = false;
+ }
+
rm_fd (handle);
rm_fd (uplink_handle);
rm_fd (rdata_notify_handle);
@@ -96,13 +108,19 @@ void zmq::pgm_sender_t::unplug ()
encoder.set_inout (NULL);
}
-void zmq::pgm_sender_t::revive ()
+void zmq::pgm_sender_t::terminate ()
+{
+ unplug ();
+ delete this;
+}
+
+void zmq::pgm_sender_t::activate_out ()
{
set_pollout (handle);
out_event ();
}
-void zmq::pgm_sender_t::resume_input ()
+void zmq::pgm_sender_t::activate_in ()
{
zmq_assert (false);
}
@@ -117,8 +135,18 @@ zmq::pgm_sender_t::~pgm_sender_t ()
void zmq::pgm_sender_t::in_event ()
{
- // In event on sender side means NAK or SPMR receiving from some peer.
+ if (has_rx_timer) {
+ cancel_timer (rx_timer_id);
+ has_rx_timer = false;
+ }
+
+ // In-event on sender side means NAK or SPMR receiving from some peer.
pgm_socket.process_upstream ();
+ if (errno == ENOMEM || errno == EBUSY) {
+ const long timeout = pgm_socket.get_rx_timeout ();
+ add_timer (timeout, rx_timer_id);
+ has_rx_timer = true;
+ }
}
void zmq::pgm_sender_t::out_event ()
@@ -146,14 +174,40 @@ void zmq::pgm_sender_t::out_event ()
put_uint16 (out_buffer, offset == -1 ? 0xffff : (uint16_t) offset);
}
+ if (has_tx_timer) {
+ cancel_timer (tx_timer_id);
+ has_tx_timer = false;
+ }
+
// Send the data.
size_t nbytes = pgm_socket.send (out_buffer, write_size);
// We can write either all data or 0 which means rate limit reached.
- if (nbytes == write_size)
+ if (nbytes == write_size) {
write_size = 0;
- else
+ } else {
zmq_assert (nbytes == 0);
+
+ if (errno == ENOMEM) {
+ const long timeout = pgm_socket.get_tx_timeout ();
+ add_timer (timeout, tx_timer_id);
+ has_tx_timer = true;
+ } else
+ zmq_assert (errno == EBUSY);
+ }
+}
+
+void zmq::pgm_sender_t::timer_event (int token)
+{
+ // Timer cancels on return by poller_base.
+ if (token == rx_timer_id) {
+ has_rx_timer = false;
+ in_event ();
+ } else if (token == tx_timer_id) {
+ has_tx_timer = false;
+ out_event ();
+ } else
+ zmq_assert (false);
}
#endif
diff --git a/src/pgm_sender.hpp b/src/pgm_sender.hpp
index 23a53bc..9270ba0 100644
--- a/src/pgm_sender.hpp
+++ b/src/pgm_sender.hpp
@@ -33,7 +33,7 @@
#include "i_engine.hpp"
#include "options.hpp"
#include "pgm_socket.hpp"
-#include "zmq_encoder.hpp"
+#include "encoder.hpp"
namespace zmq
{
@@ -49,19 +49,28 @@ namespace zmq
int init (bool udp_encapsulation_, const char *network_);
// i_engine interface implementation.
- void plug (struct i_inout *inout_);
+ void plug (class io_thread_t *io_thread_, struct i_inout *inout_);
void unplug ();
- void revive ();
- void resume_input ();
+ void terminate ();
+ void activate_in ();
+ void activate_out ();
// i_poll_events interface implementation.
void in_event ();
void out_event ();
+ void timer_event (int token);
private:
+ // TX and RX timeout timer ID's.
+ enum {tx_timer_id = 0xa0, rx_timer_id = 0xa1};
+
+ // Timers are running.
+ bool has_tx_timer;
+ bool has_rx_timer;
+
// Message encoder.
- zmq_encoder_t encoder;
+ encoder_t encoder;
// PGM socket.
pgm_socket_t pgm_socket;
diff --git a/src/pgm_socket.cpp b/src/pgm_socket.cpp
index 5a952a7..abfa804 100644
--- a/src/pgm_socket.cpp
+++ b/src/pgm_socket.cpp
@@ -40,8 +40,12 @@
#include "uuid.hpp"
#include "stdint.hpp"
+#ifndef MSG_ERRQUEUE
+#define MSG_ERRQUEUE 0x2000
+#endif
+
zmq::pgm_socket_t::pgm_socket_t (bool receiver_, const options_t &options_) :
- transport (NULL),
+ sock (NULL),
options (options_),
receiver (receiver_),
pgm_msgv (NULL),
@@ -52,13 +56,18 @@ zmq::pgm_socket_t::pgm_socket_t (bool receiver_, const options_t &options_) :
{
}
+// Create, bind and connect PGM socket.
+// network_ of the form <interface & multicast group decls>:<IP port>
+// e.g. eth0;239.192.0.1:7500
+// link-local;224.250.0.1,224.250.0.2;224.250.0.3:8000
+// ;[fe80::1%en0]:7500
int zmq::pgm_socket_t::init (bool udp_encapsulation_, const char *network_)
{
// Can not open transport before destroying old one.
- zmq_assert (transport == NULL);
+ zmq_assert (sock == NULL);
- // Parse port number.
- const char *port_delim = strchr (network_, ':');
+ // Parse port number, start from end for IPv6
+ const char *port_delim = strrchr (network_, ':');
if (!port_delim) {
errno = EINVAL;
return -1;
@@ -73,261 +82,234 @@ int zmq::pgm_socket_t::init (bool udp_encapsulation_, const char *network_)
}
memset (network, '\0', sizeof (network));
memcpy (network, network_, port_delim - network_);
-
- // Zero counter used in msgrecv.
- nbytes_rec = 0;
- nbytes_processed = 0;
- pgm_msgv_processed = 0;
-
- int rc;
- GError *pgm_error = NULL;
-
- // PGM transport GSI.
- pgm_gsi_t gsi;
-
- std::string gsi_base;
-
- if (options.identity.size () > 0) {
-
- // Create gsi from identity.
- // TODO: We assume that identity is standard C string here.
- // What if it contains binary zeroes?
- gsi_base.assign ((const char*) options.identity.data (),
- options.identity.size ());
- } else {
-
- // Generate random gsi.
- gsi_base = uuid_t ().to_string ();
+
+ // Validate socket options
+ // Data rate is in [B/s]. options.rate is in [kb/s].
+ if (options.rate <= 0) {
+ errno = EINVAL;
+ return -1;
}
-
- rc = pgm_gsi_create_from_string (&gsi, gsi_base.c_str (), -1);
- if (rc != TRUE) {
+ // Recovery interval [s].
+ if (options.recovery_ivl <= 0) {
errno = EINVAL;
return -1;
}
- struct pgm_transport_info_t *res = NULL;
- struct pgm_transport_info_t hint;
- memset (&hint, 0, sizeof (hint));
- hint.ti_family = AF_INET;
-
- if (!pgm_if_get_transport_info (network, &hint, &res, &pgm_error)) {
- if (pgm_error->domain == PGM_IF_ERROR && (
- pgm_error->code == PGM_IF_ERROR_INVAL ||
- pgm_error->code == PGM_IF_ERROR_XDEV ||
- pgm_error->code == PGM_IF_ERROR_NODEV ||
- pgm_error->code == PGM_IF_ERROR_NOTUNIQ ||
- pgm_error->code == PGM_IF_ERROR_ADDRFAMILY ||
- pgm_error->code == PGM_IF_ERROR_FAMILY ||
- pgm_error->code == PGM_IF_ERROR_NODATA ||
- pgm_error->code == PGM_IF_ERROR_NONAME ||
- pgm_error->code == PGM_IF_ERROR_SERVICE)) {
- g_error_free (pgm_error);
- errno = EINVAL;
- return -1;
- }
+ // Zero counter used in msgrecv.
+ nbytes_rec = 0;
+ nbytes_processed = 0;
+ pgm_msgv_processed = 0;
+ pgm_error_t *pgm_error = NULL;
+ struct pgm_addrinfo_t hints, *res = NULL;
+ sa_family_t sa_family;
+
+ memset (&hints, 0, sizeof (hints));
+ hints.ai_family = AF_UNSPEC;
+ if (!pgm_getaddrinfo (network, NULL, &res, &pgm_error)) {
+// Invalid parameters don't set pgm_error_t
+ zmq_assert (pgm_error != NULL);
+ if (pgm_error->domain == PGM_ERROR_DOMAIN_IF && (
+// NB: cannot catch EAI_BADFLAGS
+ pgm_error->code != PGM_ERROR_SERVICE &&
+ pgm_error->code != PGM_ERROR_SOCKTNOSUPPORT))
+// User, host, or network configuration or transient error
+ goto err_abort;
+
+// Fatal OpenPGM internal error
zmq_assert (false);
}
- res->ti_gsi = gsi;
- res->ti_dport = port_number;
+ zmq_assert (res != NULL);
- // If we are using UDP encapsulation update gsr or res.
- if (udp_encapsulation_) {
- res->ti_udp_encap_ucast_port = port_number;
- res->ti_udp_encap_mcast_port = port_number;
- }
+ // Pick up detected IP family
+ sa_family = res->ai_send_addrs[0].gsr_group.ss_family;
- if (!pgm_transport_create (&transport, res, &pgm_error)) {
- if (pgm_error->domain == PGM_TRANSPORT_ERROR && (
- pgm_error->code == PGM_TRANSPORT_ERROR_INVAL ||
- pgm_error->code == PGM_TRANSPORT_ERROR_PERM ||
- pgm_error->code == PGM_TRANSPORT_ERROR_NODEV)) {
- pgm_if_free_transport_info (res);
- g_error_free (pgm_error);
- errno = EINVAL;
- return -1;
+ // Create IP/PGM or UDP/PGM socket
+ if (udp_encapsulation_) {
+ if (!pgm_socket (&sock, sa_family, SOCK_SEQPACKET, IPPROTO_UDP, &pgm_error)) {
+// Invalid parameters don't set pgm_error_t
+ zmq_assert (pgm_error != NULL);
+ if (pgm_error->domain == PGM_ERROR_DOMAIN_SOCKET && (
+ pgm_error->code != PGM_ERROR_BADF &&
+ pgm_error->code != PGM_ERROR_FAULT &&
+ pgm_error->code != PGM_ERROR_NOPROTOOPT &&
+ pgm_error->code != PGM_ERROR_FAILED))
+// User, host, or network configuration or transient error
+ goto err_abort;
+
+// Fatal OpenPGM internal error
+ zmq_assert (false);
}
- zmq_assert (false);
+ // All options are of data type int
+ const int encapsulation_port = port_number;
+ if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_UDP_ENCAP_UCAST_PORT, &encapsulation_port, sizeof (encapsulation_port)) ||
+ !pgm_setsockopt (sock, IPPROTO_PGM, PGM_UDP_ENCAP_MCAST_PORT, &encapsulation_port, sizeof (encapsulation_port)))
+ goto err_abort;
+ } else {
+ if (!pgm_socket (&sock, sa_family, SOCK_SEQPACKET, IPPROTO_PGM, &pgm_error)) {
+// Invalid parameters don't set pgm_error_t
+ zmq_assert (pgm_error != NULL);
+ if (pgm_error->domain == PGM_ERROR_DOMAIN_SOCKET && (
+ pgm_error->code != PGM_ERROR_BADF &&
+ pgm_error->code != PGM_ERROR_FAULT &&
+ pgm_error->code != PGM_ERROR_NOPROTOOPT &&
+ pgm_error->code != PGM_ERROR_FAILED))
+// User, host, or network configuration or transient error
+ goto err_abort;
+
+// Fatal OpenPGM internal error
+ zmq_assert (false);
+ }
}
- pgm_if_free_transport_info (res);
-
- // Common parameters for receiver and sender.
+ {
+ const int rcvbuf = (int) options.rcvbuf,
+ sndbuf = (int) options.sndbuf,
+ max_tpdu = (int) pgm_max_tpdu;
+ if (rcvbuf) {
+ if (!pgm_setsockopt (sock, SOL_SOCKET, SO_RCVBUF, &rcvbuf, sizeof (rcvbuf)))
+ goto err_abort;
+ }
+ if (sndbuf) {
+ if (!pgm_setsockopt (sock, SOL_SOCKET, SO_SNDBUF, &sndbuf, sizeof (sndbuf)))
+ goto err_abort;
+ }
// Set maximum transport protocol data unit size (TPDU).
- rc = pgm_transport_set_max_tpdu (transport, pgm_max_tpdu);
- if (rc != TRUE) {
- errno = EINVAL;
- return -1;
- }
-
- // Set maximum number of network hops to cross.
- rc = pgm_transport_set_hops (transport, 16);
- if (rc != TRUE) {
- errno = EINVAL;
- return -1;
- }
-
- // Set nonblocking send/recv sockets.
- if (!pgm_transport_set_nonblocking (transport, true)) {
- errno = EINVAL;
- return -1;
+ if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_MTU, &max_tpdu, sizeof (max_tpdu)))
+ goto err_abort;
}
if (receiver) {
+ const int recv_only = 1,
+ rxw_max_rte = options.rate * 1000 / 8,
+ rxw_secs = options.recovery_ivl,
+ peer_expiry = pgm_secs (300),
+ spmr_expiry = pgm_msecs (25),
+ nak_bo_ivl = pgm_msecs (50),
+ nak_rpt_ivl = pgm_msecs (200),
+ nak_rdata_ivl = pgm_msecs (200),
+ nak_data_retries = 50,
+ nak_ncf_retries = 50;
+
+ if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_RECV_ONLY, &recv_only, sizeof (recv_only)) ||
+ !pgm_setsockopt (sock, IPPROTO_PGM, PGM_RXW_MAX_RTE, &rxw_max_rte, sizeof (rxw_max_rte)) ||
+ !pgm_setsockopt (sock, IPPROTO_PGM, PGM_RXW_SECS, &rxw_secs, sizeof (rxw_secs)) ||
+ !pgm_setsockopt (sock, IPPROTO_PGM, PGM_PEER_EXPIRY, &peer_expiry, sizeof (peer_expiry)) ||
+ !pgm_setsockopt (sock, IPPROTO_PGM, PGM_SPMR_EXPIRY, &spmr_expiry, sizeof (spmr_expiry)) ||
+ !pgm_setsockopt (sock, IPPROTO_PGM, PGM_NAK_BO_IVL, &nak_bo_ivl, sizeof (nak_bo_ivl)) ||
+ !pgm_setsockopt (sock, IPPROTO_PGM, PGM_NAK_RPT_IVL, &nak_rpt_ivl, sizeof (nak_rpt_ivl)) ||
+ !pgm_setsockopt (sock, IPPROTO_PGM, PGM_NAK_RDATA_IVL, &nak_rdata_ivl, sizeof (nak_rdata_ivl)) ||
+ !pgm_setsockopt (sock, IPPROTO_PGM, PGM_NAK_DATA_RETRIES, &nak_data_retries, sizeof (nak_data_retries)) ||
+ !pgm_setsockopt (sock, IPPROTO_PGM, PGM_NAK_NCF_RETRIES, &nak_ncf_retries, sizeof (nak_ncf_retries)))
+ goto err_abort;
+ } else {
+ const int send_only = 1,
+ txw_max_rte = options.rate * 1000 / 8,
+ txw_secs = options.recovery_ivl,
+ ambient_spm = pgm_secs (30),
+ heartbeat_spm[] = { pgm_msecs (100),
+ pgm_msecs (100),
+ pgm_msecs (100),
+ pgm_msecs (100),
+ pgm_msecs (1300),
+ pgm_secs (7),
+ pgm_secs (16),
+ pgm_secs (25),
+ pgm_secs (30) };
+
+ if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_SEND_ONLY, &send_only, sizeof (send_only)) ||
+ !pgm_setsockopt (sock, IPPROTO_PGM, PGM_TXW_MAX_RTE, &txw_max_rte, sizeof (txw_max_rte)) ||
+ !pgm_setsockopt (sock, IPPROTO_PGM, PGM_TXW_SECS, &txw_secs, sizeof (txw_secs)) ||
+ !pgm_setsockopt (sock, IPPROTO_PGM, PGM_AMBIENT_SPM, &ambient_spm, sizeof (ambient_spm)) ||
+ !pgm_setsockopt (sock, IPPROTO_PGM, PGM_HEARTBEAT_SPM, &heartbeat_spm, sizeof (heartbeat_spm)))
+ goto err_abort;
+ }
- // Receiver transport.
-
- // Note that NAKs are still generated by the transport.
- rc = pgm_transport_set_recv_only (transport, true, false);
- zmq_assert (rc == TRUE);
-
- if (options.rcvbuf) {
- rc = pgm_transport_set_rcvbuf (transport, (int) options.rcvbuf);
- if (rc != TRUE)
- return -1;
- }
-
- // Set NAK transmit back-off interval [us].
- rc = pgm_transport_set_nak_bo_ivl (transport, 50 * 1000);
- zmq_assert (rc == TRUE);
-
- // Set timeout before repeating NAK [us].
- rc = pgm_transport_set_nak_rpt_ivl (transport, 200 * 1000);
- zmq_assert (rc == TRUE);
-
- // Set timeout for receiving RDATA.
- rc = pgm_transport_set_nak_rdata_ivl (transport, 200 * 1000);
- zmq_assert (rc == TRUE);
-
- // Set retries for NAK without NCF/DATA (NAK_DATA_RETRIES).
- rc = pgm_transport_set_nak_data_retries (transport, 5);
- zmq_assert (rc == TRUE);
-
- // Set retries for NCF after NAK (NAK_NCF_RETRIES).
- rc = pgm_transport_set_nak_ncf_retries (transport, 2);
- zmq_assert (rc == TRUE);
-
- // Set timeout for removing a dead peer [us].
- rc = pgm_transport_set_peer_expiry (transport, 5 * 8192 * 1000);
- zmq_assert (rc == TRUE);
+ // PGM transport GSI.
+ struct pgm_sockaddr_t addr;
- // Set expiration time of SPM Requests [us].
- rc = pgm_transport_set_spmr_expiry (transport, 25 * 1000);
- zmq_assert (rc == TRUE);
+ memset (&addr, 0, sizeof(addr));
+ addr.sa_port = port_number;
+ addr.sa_addr.sport = DEFAULT_DATA_SOURCE_PORT;
- // Set the size of the receive window.
- // Data rate is in [B/s]. options.rate is in [kb/s].
- if (options.rate <= 0) {
- errno = EINVAL;
- return -1;
- }
- rc = pgm_transport_set_rxw_max_rte (transport,
- options.rate * 1000 / 8);
- if (rc != TRUE) {
- errno = EINVAL;
- return -1;
- }
-
- // Recovery interval [s].
- if (options.recovery_ivl <= 0) {
- errno = EINVAL;
- return -1;
- }
- rc = pgm_transport_set_rxw_secs (transport, options.recovery_ivl);
- if (rc != TRUE) {
- errno = EINVAL;
- return -1;
- }
+ if (options.identity.size () > 0) {
+ // Create gsi from identity.
+ if (!pgm_gsi_create_from_data (&addr.sa_addr.gsi, options.identity.data (), options.identity.size ()))
+ goto err_abort;
} else {
- // Sender transport.
-
- // Waiting pipe won't be read.
- rc = pgm_transport_set_send_only (transport, TRUE);
- zmq_assert (rc == TRUE);
+ // Generate random gsi.
+ std::string gsi_base = uuid_t ().to_string ();
+ if (!pgm_gsi_create_from_string (&addr.sa_addr.gsi, gsi_base.c_str (), -1))
+ goto err_abort;
+ }
- if (options.sndbuf) {
- rc = pgm_transport_set_sndbuf (transport, (int) options.sndbuf);
- if (rc != TRUE)
- return -1;
- }
+ // Bind a transport to the specified network devices.
+ struct pgm_interface_req_t if_req;
+ memset (&if_req, 0, sizeof(if_req));
+ if_req.ir_interface = res->ai_recv_addrs[0].gsr_interface;
+ if_req.ir_scope_id = 0;
+ if (AF_INET6 == sa_family) {
+ struct sockaddr_in6 sa6;
+ memcpy (&sa6, &res->ai_recv_addrs[0].gsr_group, sizeof (sa6));
+ if_req.ir_scope_id = sa6.sin6_scope_id;
+ }
+ if (!pgm_bind3 (sock, &addr, sizeof (addr), &if_req, sizeof (if_req), &if_req, sizeof (if_req), &pgm_error)) {
+// Invalid parameters don't set pgm_error_t
+ zmq_assert (pgm_error != NULL);
+ if ((pgm_error->domain == PGM_ERROR_DOMAIN_SOCKET ||
+ pgm_error->domain == PGM_ERROR_DOMAIN_IF) && (
+ pgm_error->code != PGM_ERROR_INVAL &&
+ pgm_error->code != PGM_ERROR_BADF &&
+ pgm_error->code != PGM_ERROR_FAULT))
+// User, host, or network configuration or transient error
+ goto err_abort;
+
+// Fatal OpenPGM internal error
+ zmq_assert (false);
+ }
- // Set the size of the send window.
- // Data rate is in [B/s] options.rate is in [kb/s].
- if (options.rate <= 0) {
- errno = EINVAL;
- return -1;
- }
- rc = pgm_transport_set_txw_max_rte (transport,
- options.rate * 1000 / 8);
- if (rc != TRUE) {
- errno = EINVAL;
- return -1;
- }
+ // Join IP multicast groups
+ for (unsigned i = 0; i < res->ai_recv_addrs_len; i++)
+ {
+ if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_JOIN_GROUP, &res->ai_recv_addrs[i], sizeof (struct group_req)))
+ goto err_abort;
+ }
+ if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_SEND_GROUP, &res->ai_send_addrs[0], sizeof (struct group_req)))
+ goto err_abort;
- // Recovery interval [s].
- if (options.recovery_ivl <= 0) {
- errno = EINVAL;
- return -1;
- }
- rc = pgm_transport_set_txw_secs (transport, options.recovery_ivl);
- if (rc != TRUE) {
- errno = EINVAL;
- return -1;
- }
+ pgm_freeaddrinfo (res);
+ res = NULL;
- // Set interval of background SPM packets [us].
- rc = pgm_transport_set_ambient_spm (transport, 8192 * 1000);
- zmq_assert (rc == TRUE);
-
- // Set intervals of data flushing SPM packets [us].
- guint spm_heartbeat[] = {4 * 1000, 4 * 1000, 8 * 1000, 16 * 1000,
- 32 * 1000, 64 * 1000, 128 * 1000, 256 * 1000, 512 * 1000,
- 1024 * 1000, 2048 * 1000, 4096 * 1000, 8192 * 1000};
- rc = pgm_transport_set_heartbeat_spm (transport, spm_heartbeat,
- G_N_ELEMENTS(spm_heartbeat));
- zmq_assert (rc == TRUE);
- }
-
- // Enable multicast loopback.
- if (options.use_multicast_loop) {
- rc = pgm_transport_set_multicast_loop (transport, true);
- zmq_assert (rc == TRUE);
+ // Set IP level parameters
+ {
+ const int nonblocking = 1,
+ multicast_loop = options.use_multicast_loop ? 1 : 0,
+ multicast_hops = 16,
+ dscp = 0x2e << 2; /* Expedited Forwarding PHB for network elements, no ECN. */
+
+ if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_MULTICAST_LOOP, &multicast_loop, sizeof (multicast_loop)) ||
+ !pgm_setsockopt (sock, IPPROTO_PGM, PGM_MULTICAST_HOPS, &multicast_hops, sizeof (multicast_hops)))
+ goto err_abort;
+ if (AF_INET6 != sa_family &&
+ !pgm_setsockopt (sock, IPPROTO_PGM, PGM_TOS, &dscp, sizeof (dscp)))
+ goto err_abort;
+ if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_NOBLOCK, &nonblocking, sizeof (nonblocking)))
+ goto err_abort;
}
- // Bind a transport to the specified network devices.
- if (!pgm_transport_bind (transport, &pgm_error)) {
- if (pgm_error->domain == PGM_IF_ERROR && (
- pgm_error->code == PGM_IF_ERROR_INVAL ||
- pgm_error->code == PGM_IF_ERROR_XDEV ||
- pgm_error->code == PGM_IF_ERROR_NODEV ||
- pgm_error->code == PGM_IF_ERROR_NOTUNIQ ||
- pgm_error->code == PGM_IF_ERROR_ADDRFAMILY ||
- pgm_error->code == PGM_IF_ERROR_FAMILY ||
- pgm_error->code == PGM_IF_ERROR_NODATA ||
- pgm_error->code == PGM_IF_ERROR_NONAME ||
- pgm_error->code == PGM_IF_ERROR_SERVICE)) {
- g_error_free (pgm_error);
- errno = EINVAL;
- return -1;
- }
- if (pgm_error->domain == PGM_TRANSPORT_ERROR && (
- pgm_error->code == PGM_TRANSPORT_ERROR_FAILED)) {
- g_error_free (pgm_error);
- errno = EINVAL;
- return -1;
- }
-
- zmq_assert (false);
+ // Connect PGM transport to start state machine.
+ if (!pgm_connect (sock, &pgm_error)) {
+// Invalid parameters don't set pgm_error_t
+ zmq_assert (pgm_error != NULL);
+ goto err_abort;
}
// For receiver transport preallocate pgm_msgv array.
- // TODO: ?
if (receiver) {
zmq_assert (in_batch_size > 0);
size_t max_tsdu_size = get_max_tsdu_size ();
@@ -340,91 +322,162 @@ int zmq::pgm_socket_t::init (bool udp_encapsulation_, const char *network_)
}
return 0;
+
+err_abort:
+ if (sock != NULL) {
+ pgm_close (sock, FALSE);
+ sock = NULL;
+ }
+ if (res != NULL) {
+ pgm_freeaddrinfo (res);
+ res = NULL;
+ }
+ if (pgm_error != NULL) {
+ pgm_error_free (pgm_error);
+ pgm_error = NULL;
+ }
+ errno = EINVAL;
+ return -1;
}
zmq::pgm_socket_t::~pgm_socket_t ()
{
if (pgm_msgv)
free (pgm_msgv);
- if (transport)
- pgm_transport_destroy (transport, TRUE);
+ if (sock)
+ pgm_close (sock, TRUE);
}
-// Get receiver fds. recv_fd is from transport->recv_sock
-// waiting_pipe_fd is from transport->waiting_pipe [0]
+// Get receiver fds. receive_fd_ is signaled for incoming
+// packets, waiting_pipe_fd_ is signaled for state driven
+// events and data.
void zmq::pgm_socket_t::get_receiver_fds (int *receive_fd_,
int *waiting_pipe_fd_)
{
+ socklen_t socklen;
+ bool rc;
+
zmq_assert (receive_fd_);
zmq_assert (waiting_pipe_fd_);
- // recv_sock2 should not be used - check it.
- zmq_assert (transport->recv_sock2 == -1);
-
- // Check if transport can receive data and can not send.
- zmq_assert (transport->can_recv_data);
- zmq_assert (!transport->can_send_data);
+ socklen = sizeof (*receive_fd_);
+ rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_RECV_SOCK, receive_fd_, &socklen);
+ zmq_assert (rc);
+ zmq_assert (socklen == sizeof (*receive_fd_));
- // Take FDs directly from transport.
- *receive_fd_ = pgm_transport_get_recv_fd (transport);
- *waiting_pipe_fd_ = pgm_transport_get_pending_fd (transport);
+ socklen = sizeof (*waiting_pipe_fd_);
+ rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_PENDING_SOCK, waiting_pipe_fd_, &socklen);
+ zmq_assert (rc);
+ zmq_assert (socklen == sizeof (*waiting_pipe_fd_));
}
// Get fds and store them into user allocated memory.
-// sender_fd is from pgm_transport->send_sock.
-// receive_fd_ is from transport->recv_sock.
-// rdata_notify_fd_ is from transport->rdata_notify.
-// pending_notify_fd_ is from transport->pending_notify.
+// send_fd is for non-blocking send wire notifications.
+// receive_fd_ is for incoming back-channel protocol packets.
+// rdata_notify_fd_ is raised for waiting repair transmissions.
+// pending_notify_fd_ is for state driven events.
void zmq::pgm_socket_t::get_sender_fds (int *send_fd_, int *receive_fd_,
int *rdata_notify_fd_, int *pending_notify_fd_)
{
+ socklen_t socklen;
+ bool rc;
+
zmq_assert (send_fd_);
zmq_assert (receive_fd_);
-
zmq_assert (rdata_notify_fd_);
zmq_assert (pending_notify_fd_);
- // recv_sock2 should not be used - check it.
- zmq_assert (transport->recv_sock2 == -1);
-
- // Check if transport can send data and can not receive.
- zmq_assert (transport->can_send_data);
- zmq_assert (!transport->can_recv_data);
-
- // Take FDs from transport.
- *send_fd_ = pgm_transport_get_send_fd (transport);
- *receive_fd_ = pgm_transport_get_recv_fd (transport);
-
- *rdata_notify_fd_ = pgm_transport_get_repair_fd (transport);
- *pending_notify_fd_ = pgm_transport_get_pending_fd (transport);
+ socklen = sizeof (*send_fd_);
+ rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_SEND_SOCK, send_fd_, &socklen);
+ zmq_assert (rc);
+ zmq_assert (socklen == sizeof (*receive_fd_));
+
+ socklen = sizeof (*receive_fd_);
+ rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_RECV_SOCK, receive_fd_, &socklen);
+ zmq_assert (rc);
+ zmq_assert (socklen == sizeof (*receive_fd_));
+
+ socklen = sizeof (*rdata_notify_fd_);
+ rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_REPAIR_SOCK, rdata_notify_fd_, &socklen);
+ zmq_assert (rc);
+ zmq_assert (socklen == sizeof (*rdata_notify_fd_));
+
+ socklen = sizeof (*pending_notify_fd_);
+ rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_PENDING_SOCK, pending_notify_fd_, &socklen);
+ zmq_assert (rc);
+ zmq_assert (socklen == sizeof (*pending_notify_fd_));
}
// Send one APDU, transmit window owned memory.
+// data_len_ must be less than one TPDU.
size_t zmq::pgm_socket_t::send (unsigned char *data_, size_t data_len_)
{
size_t nbytes = 0;
- PGMIOStatus status = pgm_send (transport, data_, data_len_, &nbytes);
+ const int status = pgm_send (sock, data_, data_len_, &nbytes);
- if (nbytes != data_len_) {
- zmq_assert (status == PGM_IO_STATUS_RATE_LIMITED);
- zmq_assert (nbytes == 0);
- }
-
// We have to write all data as one packet.
- if (nbytes > 0)
+ if (nbytes > 0) {
+ zmq_assert (status == PGM_IO_STATUS_NORMAL);
zmq_assert ((ssize_t) nbytes == (ssize_t) data_len_);
+ } else {
+ zmq_assert (status == PGM_IO_STATUS_RATE_LIMITED || status == PGM_IO_STATUS_WOULD_BLOCK);
+
+ if (status == PGM_IO_STATUS_RATE_LIMITED)
+ errno = ENOMEM;
+ else
+ errno = EBUSY;
+ }
+
+ // Save return value.
+ last_tx_status = status;
return nbytes;
}
+long zmq::pgm_socket_t::get_rx_timeout ()
+{
+ if (last_rx_status != PGM_IO_STATUS_RATE_LIMITED && last_rx_status != PGM_IO_STATUS_TIMER_PENDING)
+ return -1;
+
+ struct timeval tv;
+ socklen_t optlen = sizeof (tv);
+ const bool rc = pgm_getsockopt (sock, IPPROTO_PGM, last_rx_status == PGM_IO_STATUS_RATE_LIMITED ? PGM_RATE_REMAIN : PGM_TIME_REMAIN, &tv, &optlen);
+ zmq_assert (rc);
+
+ const long timeout = (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
+
+ return timeout;
+}
+
+long zmq::pgm_socket_t::get_tx_timeout ()
+{
+ if (last_tx_status != PGM_IO_STATUS_RATE_LIMITED)
+ return -1;
+
+ struct timeval tv;
+ socklen_t optlen = sizeof (tv);
+ const bool rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_RATE_REMAIN, &tv, &optlen);
+ zmq_assert (rc);
+
+ const long timeout = (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
+
+ return timeout;
+}
+
// Return max TSDU size without fragmentation from current PGM transport.
size_t zmq::pgm_socket_t::get_max_tsdu_size ()
{
- return (size_t) pgm_transport_max_tsdu (transport, false);
+ int max_tsdu = 0;
+ socklen_t optlen = sizeof (max_tsdu);
+
+ bool rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_MSS, &max_tsdu, &optlen);
+ zmq_assert (rc);
+ zmq_assert (optlen == sizeof (max_tsdu));
+ return (size_t) max_tsdu;
}
-// pgm_transport_recvmsgv is called to fill the pgm_msgv array up to
+// pgm_recvmsgv is called to fill the pgm_msgv array up to
// pgm_msgv_len. In subsequent calls data from pgm_msgv structure are
// returned.
ssize_t zmq::pgm_socket_t::receive (void **raw_data_, const pgm_tsi_t **tsi_)
@@ -439,6 +492,7 @@ ssize_t zmq::pgm_socket_t::receive (void **raw_data_, const pgm_tsi_t **tsi_)
nbytes_rec = 0;
nbytes_processed = 0;
pgm_msgv_processed = 0;
+ errno = EAGAIN;
return 0;
}
@@ -453,15 +507,18 @@ ssize_t zmq::pgm_socket_t::receive (void **raw_data_, const pgm_tsi_t **tsi_)
// Receive a vector of Application Protocol Domain Unit's (APDUs)
// from the transport.
- GError *pgm_error = NULL;
+ pgm_error_t *pgm_error = NULL;
- const PGMIOStatus status = pgm_recvmsgv (transport, pgm_msgv,
- pgm_msgv_len, MSG_DONTWAIT, &nbytes_rec, &pgm_error);
+ const int status = pgm_recvmsgv (sock, pgm_msgv,
+ pgm_msgv_len, MSG_ERRQUEUE, &nbytes_rec, &pgm_error);
+ // Invalid parameters
zmq_assert (status != PGM_IO_STATUS_ERROR);
+ last_rx_status = status;
+
// In a case when no ODATA/RDATA fired POLLIN event (SPM...)
- // pgm_recvmsg returns ?.
+ // pgm_recvmsg returns PGM_IO_STATUS_TIMER_PENDING.
if (status == PGM_IO_STATUS_TIMER_PENDING) {
zmq_assert (nbytes_rec == 0);
@@ -469,21 +526,46 @@ ssize_t zmq::pgm_socket_t::receive (void **raw_data_, const pgm_tsi_t **tsi_)
// In case if no RDATA/ODATA caused POLLIN 0 is
// returned.
nbytes_rec = 0;
+ errno = EBUSY;
+ return 0;
+ }
+
+ // Send SPMR, NAK, ACK is rate limited.
+ if (status == PGM_IO_STATUS_RATE_LIMITED) {
+
+ zmq_assert (nbytes_rec == 0);
+
+ // In case if no RDATA/ODATA caused POLLIN 0 is
+ // returned.
+ nbytes_rec = 0;
+ errno = ENOMEM;
+ return 0;
+ }
+
+ // No peers and hence no incoming packets.
+ if (status == PGM_IO_STATUS_WOULD_BLOCK) {
+
+ zmq_assert (nbytes_rec == 0);
+
+ // In case if no RDATA/ODATA caused POLLIN 0 is
+ // returned.
+ nbytes_rec = 0;
+ errno = EAGAIN;
return 0;
}
// Data loss.
if (status == PGM_IO_STATUS_RESET) {
- pgm_peer_t* peer = (pgm_peer_t*) transport->peers_pending->data;
+ struct pgm_sk_buff_t* skb = pgm_msgv[0].msgv_skb[0];
// Save lost data TSI.
- *tsi_ = &peer->tsi;
+ *tsi_ = &skb->tsi;
nbytes_rec = 0;
// In case of dala loss -1 is returned.
errno = EINVAL;
- g_error_free (pgm_error);
+ pgm_free_skb (skb);
return -1;
}
@@ -494,6 +576,7 @@ ssize_t zmq::pgm_socket_t::receive (void **raw_data_, const pgm_tsi_t **tsi_)
zmq_assert (pgm_msgv_processed <= pgm_msgv_len);
}
+ // Zero byte payloads are valid in PGM, but not 0MQ protocol.
zmq_assert (nbytes_rec > 0);
// Only one APDU per pgm_msgv_t structure is allowed.
@@ -522,16 +605,26 @@ void zmq::pgm_socket_t::process_upstream ()
pgm_msgv_t dummy_msg;
size_t dummy_bytes = 0;
- GError *pgm_error = NULL;
+ pgm_error_t *pgm_error = NULL;
- PGMIOStatus status = pgm_recvmsgv (transport, &dummy_msg,
- 1, MSG_DONTWAIT, &dummy_bytes, &pgm_error);
+ const int status = pgm_recvmsgv (sock, &dummy_msg,
+ 1, MSG_ERRQUEUE, &dummy_bytes, &pgm_error);
+ // Invalid parameters
zmq_assert (status != PGM_IO_STATUS_ERROR);
// No data should be returned.
zmq_assert (dummy_bytes == 0 && (status == PGM_IO_STATUS_TIMER_PENDING ||
- status == PGM_IO_STATUS_RATE_LIMITED));
+ status == PGM_IO_STATUS_RATE_LIMITED || status == PGM_IO_STATUS_WOULD_BLOCK));
+
+ last_rx_status = status;
+
+ if (status == PGM_IO_STATUS_TIMER_PENDING)
+ errno = EBUSY;
+ else if (status == PGM_IO_STATUS_RATE_LIMITED)
+ errno = ENOMEM;
+ else
+ errno = EAGAIN;
}
#endif
diff --git a/src/pgm_socket.hpp b/src/pgm_socket.hpp
index b9f55d1..10af0c2 100644
--- a/src/pgm_socket.hpp
+++ b/src/pgm_socket.hpp
@@ -28,6 +28,7 @@
#include "windows.hpp"
#endif
+#define __PGM_WININT_H__
#include <pgm/pgm.h>
#include "options.hpp"
@@ -67,6 +68,9 @@ namespace zmq
// Receive data from pgm socket.
ssize_t receive (void **data_, const pgm_tsi_t **tsi_);
+ long get_rx_timeout ();
+ long get_tx_timeout ();
+
// POLLIN on sender side should mean NAK or SPMR receiving.
// process_upstream function is used to handle such a situation.
void process_upstream ();
@@ -74,7 +78,9 @@ namespace zmq
private:
// OpenPGM transport
- pgm_transport_t* transport;
+ pgm_sock_t* sock;
+
+ int last_rx_status, last_tx_status;
// Associated socket options.
options_t options;
diff --git a/src/pipe.cpp b/src/pipe.cpp
index 200beb0..65e9b0b 100644
--- a/src/pipe.cpp
+++ b/src/pipe.cpp
@@ -17,31 +17,55 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <new>
+
#include "../include/zmq.h"
#include "pipe.hpp"
+#include "likely.hpp"
-zmq::reader_t::reader_t (object_t *parent_, uint64_t lwm_) :
+zmq::reader_t::reader_t (object_t *parent_, pipe_t *pipe_,
+ uint64_t lwm_) :
object_t (parent_),
- pipe (NULL),
- peer (NULL),
+ active (true),
+ pipe (pipe_),
+ writer (NULL),
lwm (lwm_),
msgs_read (0),
- endpoint (NULL)
-{}
+ sink (NULL),
+ terminating (false)
+{
+ // Note that writer is not set here. Writer will inform reader about its
+ // address once it is created (via set_writer method).
+}
+
+void zmq::reader_t::set_writer (writer_t *writer_)
+{
+ zmq_assert (!writer);
+ writer = writer_;
+}
zmq::reader_t::~reader_t ()
{
- if (pipe)
- unregister_pipe (pipe);
+ // Pipe as such is owned and deallocated by reader object.
+ // The point is that reader processes the last step of terminal
+ // handshaking (term_ack).
+ zmq_assert (pipe);
+
+ // First delete all the unread messages in the pipe. We have to do it by
+ // hand because zmq_msg_t is a POD, not a class, so there's no associated
+ // destructor.
+ zmq_msg_t msg;
+ while (pipe->read (&msg))
+ zmq_msg_close (&msg);
+
+ delete pipe;
}
-void zmq::reader_t::set_pipe (pipe_t *pipe_)
+void zmq::reader_t::set_event_sink (i_reader_events *sink_)
{
- zmq_assert (!pipe);
- pipe = pipe_;
- peer = &pipe->writer;
- register_pipe (pipe);
+ zmq_assert (!sink);
+ sink = sink_;
}
bool zmq::reader_t::is_delimiter (zmq_msg_t &msg_)
@@ -53,19 +77,24 @@ bool zmq::reader_t::is_delimiter (zmq_msg_t &msg_)
bool zmq::reader_t::check_read ()
{
+ if (!active)
+ return false;
+
// Check if there's an item in the pipe.
- // If not, deactivate the pipe.
if (!pipe->check_read ()) {
- endpoint->kill (this);
+ active = false;
return false;
}
// If the next item in the pipe is message delimiter,
// initiate its termination.
if (pipe->probe (is_delimiter)) {
- if (endpoint)
- endpoint->detach_inpipe (this);
- term ();
+ zmq_msg_t msg;
+ bool ok = pipe->read (&msg);
+ zmq_assert (ok);
+ if (sink)
+ sink->delimited (this);
+ terminate ();
return false;
}
@@ -74,17 +103,20 @@ bool zmq::reader_t::check_read ()
bool zmq::reader_t::read (zmq_msg_t *msg_)
{
+ if (!active)
+ return false;
+
if (!pipe->read (msg_)) {
- endpoint->kill (this);
+ active = false;
return false;
}
// If delimiter was read, start termination process of the pipe.
unsigned char *offset = 0;
if (msg_->content == (void*) (offset + ZMQ_DELIMITER)) {
- if (endpoint)
- endpoint->detach_inpipe (this);
- term ();
+ if (sink)
+ sink->delimited (this);
+ terminate ();
return false;
}
@@ -92,87 +124,104 @@ bool zmq::reader_t::read (zmq_msg_t *msg_)
msgs_read++;
if (lwm > 0 && msgs_read % lwm == 0)
- send_reader_info (peer, msgs_read);
+ send_activate_writer (writer, msgs_read);
return true;
}
-void zmq::reader_t::set_endpoint (i_endpoint *endpoint_)
+void zmq::reader_t::terminate ()
{
- endpoint = endpoint_;
-}
+ // If termination was already started by the peer, do nothing.
+ if (terminating)
+ return;
-void zmq::reader_t::term ()
-{
- endpoint = NULL;
- send_pipe_term (peer);
+ active = false;
+ terminating = true;
+ send_pipe_term (writer);
}
-void zmq::reader_t::process_revive ()
+void zmq::reader_t::process_activate_reader ()
{
- // Beacuse of command throttling mechanism, incoming termination request
- // may not have been processed before subsequent send.
- // In that case endpoint is NULL.
- if (endpoint)
- endpoint->revive (this);
+ // Forward the event to the sink (either socket or session).
+ active = true;
+ sink->activated (this);
}
void zmq::reader_t::process_pipe_term_ack ()
{
- peer = NULL;
- delete pipe;
+ // At this point writer may already be deallocated.
+ // For safety's sake drop the reference to it.
+ writer = NULL;
+
+ // Notify owner about the termination.
+ zmq_assert (sink);
+ sink->terminated (this);
+
+ // Deallocate resources.
+ delete this;
}
-zmq::writer_t::writer_t (object_t *parent_,
+zmq::writer_t::writer_t (object_t *parent_, pipe_t *pipe_, reader_t *reader_,
uint64_t hwm_, int64_t swap_size_) :
object_t (parent_),
- pipe (NULL),
- peer (NULL),
+ active (true),
+ pipe (pipe_),
+ reader (reader_),
hwm (hwm_),
msgs_read (0),
msgs_written (0),
- msg_store (NULL),
- extra_msg_flag (false),
- stalled (false),
- pending_close (false),
- endpoint (NULL)
+ swap (NULL),
+ sink (NULL),
+ swapping (false),
+ pending_delimiter (false),
+ terminating (false)
{
+ // Inform reader about the writer.
+ reader->set_writer (this);
+
+ // Open the swap file, if required.
if (swap_size_ > 0) {
- msg_store = new (std::nothrow) msg_store_t (swap_size_);
- if (msg_store != NULL) {
- if (msg_store->init () < 0) {
- delete msg_store;
- msg_store = NULL;
- }
- }
+ swap = new (std::nothrow) swap_t (swap_size_);
+ zmq_assert (swap);
+ int rc = swap->init ();
+ zmq_assert (rc == 0);
}
}
-void zmq::writer_t::set_endpoint (i_endpoint *endpoint_)
-{
- endpoint = endpoint_;
-}
-
zmq::writer_t::~writer_t ()
{
- if (extra_msg_flag)
- zmq_msg_close (&extra_msg);
-
- delete msg_store;
+ if (swap)
+ delete swap;
}
-void zmq::writer_t::set_pipe (pipe_t *pipe_)
+void zmq::writer_t::set_event_sink (i_writer_events *sink_)
{
- zmq_assert (!pipe);
- pipe = pipe_;
- peer = &pipe->reader;
+ zmq_assert (!sink);
+ sink = sink_;
}
bool zmq::writer_t::check_write ()
{
- if (pipe_full () && (msg_store == NULL || msg_store->full () || extra_msg_flag)) {
- stalled = true;
+ // We've already checked and there's no space free for the new message.
+ // There's no point in checking once again.
+ if (unlikely (!active))
return false;
+
+ if (unlikely (swapping)) {
+ if (unlikely (swap->full ())) {
+ active = false;
+ return false;
+ }
+ }
+ else {
+ if (unlikely (pipe_full ())) {
+ if (swap)
+ swapping = true;
+ else {
+ active = false;
+ return false;
+ }
+ }
}
return true;
@@ -180,73 +229,67 @@ bool zmq::writer_t::check_write ()
bool zmq::writer_t::write (zmq_msg_t *msg_)
{
- if (!check_write ())
+ if (unlikely (!check_write ()))
return false;
- if (pipe_full ()) {
- if (msg_store->store (msg_)) {
- if (!(msg_->flags & ZMQ_MSG_MORE))
- msg_store->commit ();
- } else {
- extra_msg = *msg_;
- extra_msg_flag = true;
- }
- }
- else {
- pipe->write (*msg_, msg_->flags & ZMQ_MSG_MORE);
+ if (unlikely (swapping)) {
+ bool stored = swap->store (msg_);
+ zmq_assert (stored);
if (!(msg_->flags & ZMQ_MSG_MORE))
- msgs_written++;
+ swap->commit ();
+ return true;
}
+ pipe->write (*msg_, msg_->flags & ZMQ_MSG_MORE);
+ if (!(msg_->flags & ZMQ_MSG_MORE))
+ msgs_written++;
+
return true;
}
void zmq::writer_t::rollback ()
{
- if (extra_msg_flag && extra_msg.flags & ZMQ_MSG_MORE) {
- zmq_msg_close (&extra_msg);
- extra_msg_flag = false;
+ // Remove incomplete message from the swap.
+ if (unlikely (swapping)) {
+ swap->rollback ();
+ return;
}
- if (msg_store != NULL)
- msg_store->rollback ();
-
+ // Remove incomplete message from the pipe.
zmq_msg_t msg;
- // Remove all incomplete messages from the pipe.
while (pipe->unwrite (&msg)) {
zmq_assert (msg.flags & ZMQ_MSG_MORE);
zmq_msg_close (&msg);
}
-
- if (stalled && endpoint != NULL && check_write ()) {
- stalled = false;
- endpoint->revive (this);
- }
}
void zmq::writer_t::flush ()
{
- if (!pipe->flush ())
- send_revive (peer);
+ // In the swapping mode, flushing is automatically handled by swap object.
+ if (!swapping && !pipe->flush ())
+ send_activate_reader (reader);
}
-void zmq::writer_t::term ()
+void zmq::writer_t::terminate ()
{
- endpoint = NULL;
+ // Prevent double termination.
+ if (terminating)
+ return;
+
+ // Mark the pipe as not available for writing.
+ active = false;
// Rollback any unfinished messages.
rollback ();
- if (msg_store == NULL || (msg_store->empty () && !extra_msg_flag))
- write_delimiter ();
- else
- pending_close = true;
-}
+ if (swapping) {
+ pending_delimiter = true;
+ return;
+ }
-void zmq::writer_t::write_delimiter ()
-{
- // Push delimiter into the pipe.
- // Trick the compiler to belive that the tag is a valid pointer.
+ // Push delimiter into the pipe. Trick the compiler to belive that
+ // the tag is a valid pointer. Note that watermarks are not checked
+ // thus the delimiter can be written even though the pipe is full.
zmq_msg_t msg;
const unsigned char *offset = 0;
msg.content = (void*) (offset + ZMQ_DELIMITER);
@@ -255,109 +298,110 @@ void zmq::writer_t::write_delimiter ()
flush ();
}
-void zmq::writer_t::process_reader_info (uint64_t msgs_read_)
+void zmq::writer_t::process_activate_writer (uint64_t msgs_read_)
{
- zmq_msg_t msg;
-
+ // Store the reader's message sequence number.
msgs_read = msgs_read_;
- if (msg_store) {
- // Move messages from backing store into pipe.
- while (!pipe_full () && !msg_store->empty ()) {
- msg_store->fetch(&msg);
- // Write message into the pipe.
+ // If we are in the swapping mode, we have some messages in the swap.
+ // Given that pipe is now ready for writing we can move part of the
+ // swap into the pipe.
+ if (swapping) {
+ zmq_msg_t msg;
+ while (!pipe_full () && !swap->empty ()) {
+ swap->fetch(&msg);
pipe->write (msg, msg.flags & ZMQ_MSG_MORE);
if (!(msg.flags & ZMQ_MSG_MORE))
msgs_written++;
}
-
- if (extra_msg_flag) {
- if (!pipe_full ()) {
- pipe->write (extra_msg, extra_msg.flags & ZMQ_MSG_MORE);
- if (!(extra_msg.flags & ZMQ_MSG_MORE))
- msgs_written++;
- extra_msg_flag = false;
- }
- else if (msg_store->store (&extra_msg)) {
- if (!(extra_msg.flags & ZMQ_MSG_MORE))
- msg_store->commit ();
- extra_msg_flag = false;
+ if (!pipe->flush ())
+ send_activate_reader (reader);
+
+ // There are no more messages in the swap. We can switch into
+ // standard in-memory mode.
+ if (swap->empty ()) {
+ swapping = false;
+
+ // Push delimiter into the pipe. Trick the compiler to belive that
+ // the tag is a valid pointer. Note that watermarks are not checked
+ // thus the delimiter can be written even though the pipe is full.
+ if (pending_delimiter) {
+ zmq_msg_t msg;
+ const unsigned char *offset = 0;
+ msg.content = (void*) (offset + ZMQ_DELIMITER);
+ msg.flags = 0;
+ pipe->write (msg, false);
+ flush ();
+ return;
}
}
-
- if (pending_close && msg_store->empty () && !extra_msg_flag) {
- write_delimiter ();
- pending_close = false;
- }
-
- flush ();
}
- if (stalled && endpoint != NULL) {
- stalled = false;
- endpoint->revive (this);
+ // If the writer was non-active before, let's make it active
+ // (available for writing messages to).
+ if (!active) {
+ active = true;
+ zmq_assert (sink);
+ sink->activated (this);
}
}
void zmq::writer_t::process_pipe_term ()
{
- if (endpoint)
- endpoint->detach_outpipe (this);
+ send_pipe_term_ack (reader);
- reader_t *p = peer;
- peer = NULL;
- send_pipe_term_ack (p);
-}
+ // The above command allows reader to deallocate itself and the pipe.
+ // For safety's sake we'll drop the pointers here.
+ reader = NULL;
+ pipe = NULL;
-bool zmq::writer_t::pipe_full ()
-{
- return hwm > 0 && msgs_written - msgs_read == hwm;
-}
+ // Notify owner about the termination.
+ zmq_assert (sink);
+ sink->terminated (this);
-zmq::pipe_t::pipe_t (object_t *reader_parent_, object_t *writer_parent_,
- uint64_t hwm_, int64_t swap_size_) :
- reader (reader_parent_, compute_lwm (hwm_)),
- writer (writer_parent_, hwm_, swap_size_)
-{
- reader.set_pipe (this);
- writer.set_pipe (this);
+ // Deallocate the resources.
+ delete this;
}
-zmq::pipe_t::~pipe_t ()
+bool zmq::writer_t::pipe_full ()
{
- // Deallocate all the unread messages in the pipe. We have to do it by
- // hand because zmq_msg_t is a POD, not a class, so there's no associated
- // destructor.
- zmq_msg_t msg;
- while (read (&msg))
- zmq_msg_close (&msg);
+ return hwm > 0 && msgs_written - msgs_read == hwm;
}
-uint64_t zmq::pipe_t::compute_lwm (uint64_t hwm_)
+void zmq::create_pipe (object_t *reader_parent_, object_t *writer_parent_,
+ uint64_t hwm_, int64_t swap_size_, reader_t **reader_, writer_t **writer_)
{
- // Following point should be taken into consideration when computing
- // low watermark:
- //
- // 1. LWM has to be less than HWM.
- // 2. LWM cannot be set to very low value (such as zero) as after filling
- // the queue it would start to refill only after all the messages are
- // read from it and thus unnecessarily hold the progress back.
- // 3. LWM cannot be set to very high value (such as HWM-1) as it would
- // result in lock-step filling of the queue - if a single message is read
- // from a full queue, writer thread is resumed to write exactly one
- // message to the queue and go back to sleep immediately. This would
- // result in low performance.
- //
- // Given the 3. it would be good to keep HWM and LWM as far apart as
- // possible to reduce the thread switching overhead to almost zero,
- // say HWM-LWM should be 500 (max_wm_delta).
- //
- // That done, we still we have to account for the cases where HWM<500 thus
- // driving LWM to negative numbers. Let's make LWM 1/2 of HWM in such cases.
-
- if (hwm_ > max_wm_delta * 2)
- return hwm_ - max_wm_delta;
- else
- return (hwm_ + 1) / 2;
+ // First compute the low water mark. Following point should be taken
+ // into consideration:
+ //
+ // 1. LWM has to be less than HWM.
+ // 2. LWM cannot be set to very low value (such as zero) as after filling
+ // the queue it would start to refill only after all the messages are
+ // read from it and thus unnecessarily hold the progress back.
+ // 3. LWM cannot be set to very high value (such as HWM-1) as it would
+ // result in lock-step filling of the queue - if a single message is
+ // read from a full queue, writer thread is resumed to write exactly one
+ // message to the queue and go back to sleep immediately. This would
+ // result in low performance.
+ //
+ // Given the 3. it would be good to keep HWM and LWM as far apart as
+ // possible to reduce the thread switching overhead to almost zero,
+ // say HWM-LWM should be max_wm_delta.
+ //
+ // That done, we still we have to account for the cases where
+ // HWM < max_wm_delta thus driving LWM to negative numbers.
+ // Let's make LWM 1/2 of HWM in such cases.
+ uint64_t lwm = (hwm_ > max_wm_delta * 2) ?
+ hwm_ - max_wm_delta : (hwm_ + 1) / 2;
+
+ // Create all three objects pipe consists of: the pipe per se, reader and
+ // writer. The pipe will be handled by reader and writer, its never passed
+ // to the user. Reader and writer are returned to the user.
+ pipe_t *pipe = new (std::nothrow) pipe_t ();
+ zmq_assert (pipe);
+ *reader_ = new (std::nothrow) reader_t (reader_parent_, pipe, lwm);
+ zmq_assert (*reader_);
+ *writer_ = new (std::nothrow) writer_t (writer_parent_, pipe, *reader_,
+ hwm_, swap_size_);
+ zmq_assert (*writer_);
}
-
diff --git a/src/pipe.hpp b/src/pipe.hpp
index ece678a..b154b12 100644
--- a/src/pipe.hpp
+++ b/src/pipe.hpp
@@ -23,25 +23,48 @@
#include "../include/zmq.h"
#include "stdint.hpp"
-#include "i_endpoint.hpp"
-#include "yarray_item.hpp"
+#include "array.hpp"
#include "ypipe.hpp"
-#include "msg_store.hpp"
+#include "swap.hpp"
#include "config.hpp"
#include "object.hpp"
namespace zmq
{
- class reader_t : public object_t, public yarray_item_t
+ // Creates a pipe. Returns pointer to reader and writer objects.
+ void create_pipe (object_t *reader_parent_, object_t *writer_parent_,
+ uint64_t hwm_, int64_t swap_size_, class reader_t **reader_,
+ class writer_t **writer_);
+
+ // The shutdown mechanism for pipe works as follows: Either endpoint
+ // (or even both of them) can ask pipe to terminate by calling 'terminate'
+ // method. Pipe then terminates in asynchronous manner. When the part of
+ // the shutdown tied to the endpoint is done it triggers 'terminated'
+ // event. When endpoint processes the event and returns, associated
+ // reader/writer object is deallocated.
+
+ typedef ypipe_t <zmq_msg_t, message_pipe_granularity> pipe_t;
+
+ struct i_reader_events
{
- public:
+ virtual ~i_reader_events () {}
- reader_t (class object_t *parent_, uint64_t lwm_);
- ~reader_t ();
+ virtual void terminated (class reader_t *pipe_) = 0;
+ virtual void activated (class reader_t *pipe_) = 0;
+ virtual void delimited (class reader_t *pipe_) = 0;
+ };
+
+ class reader_t : public object_t, public array_item_t
+ {
+ friend void create_pipe (object_t*, object_t*, uint64_t,
+ int64_t, reader_t**, writer_t**);
+ friend class writer_t;
+
+ public:
- void set_pipe (class pipe_t *pipe_);
- void set_endpoint (i_endpoint *endpoint_);
+ // Specifies the object to get events from the reader.
+ void set_event_sink (i_reader_events *endpoint_);
// Returns true if there is at least one message to read in the pipe.
bool check_read ();
@@ -50,22 +73,31 @@ namespace zmq
bool read (zmq_msg_t *msg_);
// Ask pipe to terminate.
- void term ();
+ void terminate ();
private:
+ reader_t (class object_t *parent_, pipe_t *pipe_, uint64_t lwm_);
+ ~reader_t ();
+
+ // To be called only by writer itself!
+ void set_writer (class writer_t *writer_);
+
// Command handlers.
- void process_revive ();
+ void process_activate_reader ();
void process_pipe_term_ack ();
// Returns true if the message is delimiter; false otherwise.
static bool is_delimiter (zmq_msg_t &msg_);
+ // True, if pipe can be read from.
+ bool active;
+
// The underlying pipe.
- class pipe_t *pipe;
+ pipe_t *pipe;
// Pipe writer associated with the other side of the pipe.
- class writer_t *peer;
+ class writer_t *writer;
// Low watermark for in-memory storage (in bytes).
uint64_t lwm;
@@ -73,26 +105,38 @@ namespace zmq
// Number of messages read so far.
uint64_t msgs_read;
- // Endpoint (either session or socket) the pipe is attached to.
- i_endpoint *endpoint;
+ // Sink for the events (either the socket of the session).
+ i_reader_events *sink;
+
+ // True is 'terminate' method was called or delimiter
+ // was read from the pipe.
+ bool terminating;
reader_t (const reader_t&);
void operator = (const reader_t&);
};
- class writer_t : public object_t, public yarray_item_t
+ struct i_writer_events
{
- public:
+ virtual ~i_writer_events () {}
- writer_t (class object_t *parent_, uint64_t hwm_, int64_t swap_size_);
- ~writer_t ();
+ virtual void terminated (class writer_t *pipe_) = 0;
+ virtual void activated (class writer_t *pipe_) = 0;
+ };
- void set_pipe (class pipe_t *pipe_);
- void set_endpoint (i_endpoint *endpoint_);
+ class writer_t : public object_t, public array_item_t
+ {
+ friend void create_pipe (object_t*, object_t*, uint64_t,
+ int64_t, reader_t**, writer_t**);
+
+ public:
+
+ // Specifies the object to get events from the writer.
+ void set_event_sink (i_writer_events *endpoint_);
// Checks whether a message can be written to the pipe.
- // If writing the message would cause high watermark to be
- // exceeded, the function returns false.
+ // If writing the message would cause high watermark and (optionally)
+ // swap to be exceeded, the function returns false.
bool check_write ();
// Writes a message to the underlying pipe. Returns false if the
@@ -106,27 +150,31 @@ namespace zmq
void flush ();
// Ask pipe to terminate.
- void term ();
+ void terminate ();
private:
- void process_reader_info (uint64_t msgs_read_);
+ writer_t (class object_t *parent_, pipe_t *pipe_, reader_t *reader_,
+ uint64_t hwm_, int64_t swap_size_);
+ ~writer_t ();
// Command handlers.
+ void process_activate_writer (uint64_t msgs_read_);
void process_pipe_term ();
- // Tests whether the pipe is already full.
+ // Tests whether underlying pipe is already full. The swap is not
+ // taken into account.
bool pipe_full ();
- // Write special message to the pipe so that the reader
- // can find out we are finished.
- void write_delimiter ();
+ // True, if this object can be written to. Undelying ypipe may be full
+ // but as long as there's swap space available, this flag is true.
+ bool active;
// The underlying pipe.
- class pipe_t *pipe;
+ pipe_t *pipe;
// Pipe reader associated with the other side of the pipe.
- class reader_t *peer;
+ reader_t *reader;
// High watermark for in-memory storage (in bytes).
uint64_t hwm;
@@ -138,46 +186,28 @@ namespace zmq
// Number of messages we have written so far.
uint64_t msgs_written;
- // Pointer to backing store. If NULL, messages are always
+ // Pointer to the message swap. If NULL, messages are always
// kept in main memory.
- msg_store_t *msg_store;
-
- bool extra_msg_flag;
+ swap_t *swap;
- zmq_msg_t extra_msg;
+ // Sink for the events (either the socket or the session).
+ i_writer_events *sink;
- // True iff the last attempt to write a message has failed.
- bool stalled;
+ // If true, swap is active. New messages are to be written to the swap.
+ bool swapping;
- bool pending_close;
+ // If true, there's a delimiter to be written to the pipe after the
+ // swap is empied.
+ bool pending_delimiter;
- // Endpoint (either session or socket) the pipe is attached to.
- i_endpoint *endpoint;
+ // True is 'terminate' method was called of 'pipe_term' command
+ // arrived from the reader.
+ bool terminating;
writer_t (const writer_t&);
void operator = (const writer_t&);
};
- // Message pipe.
- class pipe_t : public ypipe_t <zmq_msg_t, message_pipe_granularity>
- {
- public:
-
- pipe_t (object_t *reader_parent_, object_t *writer_parent_,
- uint64_t hwm_, int64_t swap_size_);
- ~pipe_t ();
-
- reader_t reader;
- writer_t writer;
-
- private:
-
- uint64_t compute_lwm (uint64_t hwm_);
-
- pipe_t (const pipe_t&);
- void operator = (const pipe_t&);
- };
-
}
#endif
diff --git a/src/poll.cpp b/src/poll.cpp
index 1b203db..af50793 100644
--- a/src/poll.cpp
+++ b/src/poll.cpp
@@ -54,9 +54,6 @@ zmq::poll_t::poll_t () :
zmq::poll_t::~poll_t ()
{
worker.stop ();
-
- // Make sure there are no fds registered on shutdown.
- zmq_assert (load.get () == 0);
}
zmq::poll_t::handle_t zmq::poll_t::add_fd (fd_t fd_, i_poll_events *events_)
@@ -69,7 +66,7 @@ zmq::poll_t::handle_t zmq::poll_t::add_fd (fd_t fd_, i_poll_events *events_)
fd_table [fd_].events = events_;
// Increase the load metric of the thread.
- load.add (1);
+ adjust_load (1);
return fd_;
}
@@ -85,7 +82,7 @@ void zmq::poll_t::rm_fd (handle_t handle_)
retired = true;
// Decrease the load metric of the thread.
- load.sub (1);
+ adjust_load (-1);
}
void zmq::poll_t::set_pollin (handle_t handle_)
@@ -112,23 +109,6 @@ void zmq::poll_t::reset_pollout (handle_t handle_)
pollset [index].events &= ~((short) POLLOUT);
}
-void zmq::poll_t::add_timer (i_poll_events *events_)
-{
- timers.push_back (events_);
-}
-
-void zmq::poll_t::cancel_timer (i_poll_events *events_)
-{
- timers_t::iterator it = std::find (timers.begin (), timers.end (), events_);
- if (it != timers.end ())
- timers.erase (it);
-}
-
-int zmq::poll_t::get_load ()
-{
- return load.get ();
-}
-
void zmq::poll_t::start ()
{
worker.start (worker_routine, this);
@@ -143,27 +123,20 @@ void zmq::poll_t::loop ()
{
while (!stopping) {
+ // Execute any due timers.
+ int timeout = (int) execute_timers ();
+
// Wait for events.
- int rc = poll (&pollset [0], pollset.size (),
- timers.empty () ? -1 : max_timer_period);
+ int rc = poll (&pollset [0], pollset.size (), timeout ? timeout : -1);
if (rc == -1 && errno == EINTR)
continue;
errno_assert (rc != -1);
- // Handle timer.
- if (!rc) {
-
- // Use local list of timers as timer handlers may fill new timers
- // into the original array.
- timers_t t;
- std::swap (timers, t);
-
- // Trigger all the timers.
- for (timers_t::iterator it = t.begin (); it != t.end (); it ++)
- (*it)->timer_event ();
+ // If there are no events (i.e. it's a timeout) there's no point
+ // in checking the pollset.
+ if (rc == 0)
continue;
- }
for (pollset_t::size_type i = 0; i != pollset.size (); i++) {
diff --git a/src/poll.hpp b/src/poll.hpp
index f4ae35a..07555b0 100644
--- a/src/poll.hpp
+++ b/src/poll.hpp
@@ -34,7 +34,7 @@
#include "fd.hpp"
#include "thread.hpp"
-#include "atomic_counter.hpp"
+#include "poller_base.hpp"
namespace zmq
{
@@ -42,7 +42,7 @@ namespace zmq
// Implements socket polling mechanism using the POSIX.1-2001
// poll() system call.
- class poll_t
+ class poll_t : public poller_base_t
{
public:
@@ -58,9 +58,6 @@ namespace zmq
void reset_pollin (handle_t handle_);
void set_pollout (handle_t handle_);
void reset_pollout (handle_t handle_);
- void add_timer (struct i_poll_events *events_);
- void cancel_timer (struct i_poll_events *events_);
- int get_load ();
void start ();
void stop ();
@@ -88,20 +85,12 @@ namespace zmq
// If true, there's at least one retired event source.
bool retired;
- // List of all the engines waiting for the timer event.
- typedef std::vector <struct i_poll_events*> timers_t;
- timers_t timers;
-
// If true, thread is in the process of shutting down.
bool stopping;
// Handle of the physical thread doing the I/O work.
thread_t worker;
- // Load of the poller. Currently number of file descriptors
- // registered with the poller.
- atomic_counter_t load;
-
poll_t (const poll_t&);
void operator = (const poll_t&);
};
diff --git a/src/poller_base.cpp b/src/poller_base.cpp
new file mode 100644
index 0000000..c5292d2
--- /dev/null
+++ b/src/poller_base.cpp
@@ -0,0 +1,98 @@
+/*
+ Copyright (c) 2007-2010 iMatix Corporation
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "poller_base.hpp"
+#include "i_poll_events.hpp"
+#include "err.hpp"
+
+zmq::poller_base_t::poller_base_t ()
+{
+}
+
+zmq::poller_base_t::~poller_base_t ()
+{
+ // Make sure there is no more load on the shutdown.
+ zmq_assert (get_load () == 0);
+}
+
+int zmq::poller_base_t::get_load ()
+{
+ return load.get ();
+}
+
+void zmq::poller_base_t::adjust_load (int amount_)
+{
+ if (amount_ > 0)
+ load.add (amount_);
+ else if (amount_ < 0)
+ load.sub (-amount_);
+}
+
+void zmq::poller_base_t::add_timer (int timeout_, i_poll_events *sink_, int id_)
+{
+ uint64_t expiration = clock.now_ms () + timeout_;
+ timer_info_t info = {sink_, id_};
+ timers.insert (timers_t::value_type (expiration, info));
+}
+
+void zmq::poller_base_t::cancel_timer (i_poll_events *sink_, int id_)
+{
+ // Complexity of this operation is O(n). We assume it is rarely used.
+ for (timers_t::iterator it = timers.begin (); it != timers.end (); it++)
+ if (it->second.sink == sink_ && it->second.id == id_) {
+ timers.erase (it);
+ return;
+ }
+
+ // Timer not found.
+ zmq_assert (false);
+}
+
+uint64_t zmq::poller_base_t::execute_timers ()
+{
+ // Fast track.
+ if (timers.empty ())
+ return 0;
+
+ // Get the current time.
+ uint64_t current = clock.now_ms ();
+
+ // Execute the timers that are already due.
+ timers_t::iterator it = timers.begin ();
+ while (it != timers.end ()) {
+
+ // If we have to wait to execute the item, same will be true about
+ // all the following items (multimap is sorted). Thus we can stop
+ // checking the subsequent timers and return the time to wait for
+ // the next timer (at least 1ms).
+ if (it->first > current)
+ return it->first - current;
+
+ // Trigger the timer.
+ it->second.sink->timer_event (it->second.id);
+
+ // Remove it from the list of active timers.
+ timers_t::iterator o = it;
+ ++it;
+ timers.erase (o);
+ }
+
+ // There are no more timers.
+ return 0;
+}
diff --git a/src/poller_base.hpp b/src/poller_base.hpp
new file mode 100644
index 0000000..80a4025
--- /dev/null
+++ b/src/poller_base.hpp
@@ -0,0 +1,83 @@
+/*
+ Copyright (c) 2007-2010 iMatix Corporation
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __ZMQ_POLLER_BASE_HPP_INCLUDED__
+#define __ZMQ_POLLER_BASE_HPP_INCLUDED__
+
+#include <map>
+
+#include "clock.hpp"
+#include "atomic_counter.hpp"
+
+namespace zmq
+{
+
+ class poller_base_t
+ {
+ public:
+
+ poller_base_t ();
+ virtual ~poller_base_t ();
+
+ // Returns load of the poller. Note that this function can be
+ // invoked from a different thread!
+ int get_load ();
+
+ // Add a timeout to expire in timeout_ milliseconds. After the
+ // expiration timer_event on sink_ object will be called with
+ // argument set to id_.
+ void add_timer (int timeout_, struct i_poll_events *sink_, int id_);
+
+ // Cancel the timer created by sink_ object with ID equal to id_.
+ void cancel_timer (struct i_poll_events *sink_, int id_);
+
+ protected:
+
+ // Called by individual poller implementations to manage the load.
+ void adjust_load (int amount_);
+
+ // Executes any timers that are due. Returns number of milliseconds
+ // to wait to match the next timer or 0 meaning "no timers".
+ uint64_t execute_timers ();
+
+ private:
+
+ // Clock instance private to this I/O thread.
+ clock_t clock;
+
+ // List of active timers.
+ struct timer_info_t
+ {
+ struct i_poll_events *sink;
+ int id;
+ };
+ typedef std::multimap <uint64_t, timer_info_t> timers_t;
+ timers_t timers;
+
+ // Load of the poller. Currently the number of file descriptors
+ // registered.
+ atomic_counter_t load;
+
+ poller_base_t (const poller_base_t&);
+ void operator = (const poller_base_t&);
+ };
+
+}
+
+#endif
diff --git a/src/pub.cpp b/src/pub.cpp
index 4e73b19..6290e9a 100644
--- a/src/pub.cpp
+++ b/src/pub.cpp
@@ -24,66 +24,71 @@
#include "msg_content.hpp"
#include "pipe.hpp"
-zmq::pub_t::pub_t (class app_thread_t *parent_) :
- socket_base_t (parent_),
- active (0)
+zmq::pub_t::pub_t (class ctx_t *parent_, uint32_t slot_) :
+ socket_base_t (parent_, slot_),
+ active (0),
+ terminating (false)
{
+ options.type = ZMQ_PUB;
options.requires_in = false;
options.requires_out = true;
}
zmq::pub_t::~pub_t ()
{
- for (pipes_t::size_type i = 0; i != pipes.size (); i++)
- pipes [i]->term ();
- pipes.clear ();
+ zmq_assert (pipes.empty ());
}
void zmq::pub_t::xattach_pipes (class reader_t *inpipe_,
class writer_t *outpipe_, const blob_t &peer_identity_)
{
zmq_assert (!inpipe_);
+
+ outpipe_->set_event_sink (this);
+
pipes.push_back (outpipe_);
pipes.swap (active, pipes.size () - 1);
active++;
-}
-void zmq::pub_t::xdetach_inpipe (class reader_t *pipe_)
-{
- zmq_assert (false);
+ if (terminating) {
+ register_term_acks (1);
+ outpipe_->terminate ();
+ }
}
-void zmq::pub_t::xdetach_outpipe (class writer_t *pipe_)
+void zmq::pub_t::process_term (int linger_)
{
- // Remove the pipe from the list; adjust number of active pipes
- // accordingly.
- if (pipes.index (pipe_) < active)
- active--;
- pipes.erase (pipe_);
-}
+ terminating = true;
-void zmq::pub_t::xkill (class reader_t *pipe_)
-{
- zmq_assert (false);
-}
+ // Start shutdown process for all the pipes.
+ for (pipes_t::size_type i = 0; i != pipes.size (); i++)
+ pipes [i]->terminate ();
-void zmq::pub_t::xrevive (class reader_t *pipe_)
-{
- zmq_assert (false);
+ // Wait for pipes to terminate before terminating yourself.
+ register_term_acks (pipes.size ());
+
+ // Continue with the termination immediately.
+ socket_base_t::process_term (linger_);
}
-void zmq::pub_t::xrevive (class writer_t *pipe_)
+void zmq::pub_t::activated (writer_t *pipe_)
{
// Move the pipe to the list of active pipes.
pipes.swap (pipes.index (pipe_), active);
active++;
}
-int zmq::pub_t::xsetsockopt (int option_, const void *optval_,
- size_t optvallen_)
+void zmq::pub_t::terminated (writer_t *pipe_)
{
- errno = EINVAL;
- return -1;
+ // Remove the pipe from the list; adjust number of active pipes
+ // accordingly.
+ if (pipes.index (pipe_) < active)
+ active--;
+ pipes.erase (pipe_);
+
+ // If we are already terminating, wait for one term ack less.
+ if (terminating)
+ unregister_term_ack ();
}
int zmq::pub_t::xsend (zmq_msg_t *msg_, int flags_)
@@ -101,7 +106,7 @@ int zmq::pub_t::xsend (zmq_msg_t *msg_, int flags_)
// For VSMs the copying is straighforward.
if (content == (msg_content_t*) ZMQ_VSM) {
- for (pipes_t::size_type i = 0; i != active;)
+ for (pipes_t::size_type i = 0; i < active;)
if (write (pipes [i], msg_))
i++;
int rc = zmq_msg_init (msg_);
@@ -133,7 +138,7 @@ int zmq::pub_t::xsend (zmq_msg_t *msg_, int flags_)
}
// Push the message to all destinations.
- for (pipes_t::size_type i = 0; i != active;) {
+ for (pipes_t::size_type i = 0; i < active;) {
if (!write (pipes [i], msg_))
content->refcnt.sub (1);
else
@@ -147,17 +152,6 @@ int zmq::pub_t::xsend (zmq_msg_t *msg_, int flags_)
return 0;
}
-int zmq::pub_t::xrecv (zmq_msg_t *msg_, int flags_)
-{
- errno = ENOTSUP;
- return -1;
-}
-
-bool zmq::pub_t::xhas_in ()
-{
- return false;
-}
-
bool zmq::pub_t::xhas_out ()
{
return true;
diff --git a/src/pub.hpp b/src/pub.hpp
index ac3924a..6e02be7 100644
--- a/src/pub.hpp
+++ b/src/pub.hpp
@@ -21,46 +21,49 @@
#define __ZMQ_PUB_HPP_INCLUDED__
#include "socket_base.hpp"
-#include "yarray.hpp"
+#include "array.hpp"
+#include "pipe.hpp"
namespace zmq
{
- class pub_t : public socket_base_t
+ class pub_t : public socket_base_t, public i_writer_events
{
public:
- pub_t (class app_thread_t *parent_);
+ pub_t (class ctx_t *parent_, uint32_t slot_);
~pub_t ();
- // Overloads of functions from socket_base_t.
+ // Implementations of virtual functions from socket_base_t.
void xattach_pipes (class reader_t *inpipe_, class writer_t *outpipe_,
const blob_t &peer_identity_);
- void xdetach_inpipe (class reader_t *pipe_);
- void xdetach_outpipe (class writer_t *pipe_);
- void xkill (class reader_t *pipe_);
- void xrevive (class reader_t *pipe_);
- void xrevive (class writer_t *pipe_);
- int xsetsockopt (int option_, const void *optval_, size_t optvallen_);
int xsend (zmq_msg_t *msg_, int flags_);
- int xrecv (zmq_msg_t *msg_, int flags_);
- bool xhas_in ();
bool xhas_out ();
+ // i_writer_events interface implementation.
+ void activated (writer_t *pipe_);
+ void terminated (writer_t *pipe_);
+
private:
+ // Hook into the termination process.
+ void process_term (int linger_);
+
// Write the message to the pipe. Make the pipe inactive if writing
// fails. In such a case false is returned.
bool write (class writer_t *pipe_, zmq_msg_t *msg_);
// Outbound pipes, i.e. those the socket is sending messages to.
- typedef yarray_t <class writer_t> pipes_t;
+ typedef array_t <class writer_t> pipes_t;
pipes_t pipes;
// Number of active pipes. All the active pipes are located at the
// beginning of the pipes array.
pipes_t::size_type active;
+ // True if termination process is already underway.
+ bool terminating;
+
pub_t (const pub_t&);
void operator = (const pub_t&);
};
diff --git a/src/pull.cpp b/src/pull.cpp
index b2413ee..5bccf06 100644
--- a/src/pull.cpp
+++ b/src/pull.cpp
@@ -22,9 +22,11 @@
#include "pull.hpp"
#include "err.hpp"
-zmq::pull_t::pull_t (class app_thread_t *parent_) :
- socket_base_t (parent_)
+zmq::pull_t::pull_t (class ctx_t *parent_, uint32_t slot_) :
+ socket_base_t (parent_, slot_),
+ fq (this)
{
+ options.type = ZMQ_PULL;
options.requires_in = true;
options.requires_out = false;
}
@@ -40,45 +42,10 @@ void zmq::pull_t::xattach_pipes (class reader_t *inpipe_,
fq.attach (inpipe_);
}
-void zmq::pull_t::xdetach_inpipe (class reader_t *pipe_)
+void zmq::pull_t::process_term (int linger_)
{
- zmq_assert (pipe_);
- fq.detach (pipe_);
-}
-
-void zmq::pull_t::xdetach_outpipe (class writer_t *pipe_)
-{
- // There are no outpipes, so this function shouldn't be called at all.
- zmq_assert (false);
-}
-
-void zmq::pull_t::xkill (class reader_t *pipe_)
-{
- fq.kill (pipe_);
-}
-
-void zmq::pull_t::xrevive (class reader_t *pipe_)
-{
- fq.revive (pipe_);
-}
-
-void zmq::pull_t::xrevive (class writer_t *pipe_)
-{
- zmq_assert (false);
-}
-
-int zmq::pull_t::xsetsockopt (int option_, const void *optval_,
- size_t optvallen_)
-{
- // No special options for this socket type.
- errno = EINVAL;
- return -1;
-}
-
-int zmq::pull_t::xsend (zmq_msg_t *msg_, int flags_)
-{
- errno = ENOTSUP;
- return -1;
+ fq.terminate ();
+ socket_base_t::process_term (linger_);
}
int zmq::pull_t::xrecv (zmq_msg_t *msg_, int flags_)
@@ -91,8 +58,3 @@ bool zmq::pull_t::xhas_in ()
return fq.has_in ();
}
-bool zmq::pull_t::xhas_out ()
-{
- return false;
-}
-
diff --git a/src/pull.hpp b/src/pull.hpp
index 7f249e9..d80bf60 100644
--- a/src/pull.hpp
+++ b/src/pull.hpp
@@ -30,25 +30,22 @@ namespace zmq
{
public:
- pull_t (class app_thread_t *parent_);
+ pull_t (class ctx_t *parent_, uint32_t slot_);
~pull_t ();
+ protected:
+
// Overloads of functions from socket_base_t.
void xattach_pipes (class reader_t *inpipe_, class writer_t *outpipe_,
const blob_t &peer_identity_);
- void xdetach_inpipe (class reader_t *pipe_);
- void xdetach_outpipe (class writer_t *pipe_);
- void xkill (class reader_t *pipe_);
- void xrevive (class reader_t *pipe_);
- void xrevive (class writer_t *pipe_);
- int xsetsockopt (int option_, const void *optval_, size_t optvallen_);
- int xsend (zmq_msg_t *msg_, int flags_);
int xrecv (zmq_msg_t *msg_, int flags_);
bool xhas_in ();
- bool xhas_out ();
private:
+ // Hook into the termination process.
+ void process_term (int linger_);
+
// Fair queueing object for inbound pipes.
fq_t fq;
diff --git a/src/push.cpp b/src/push.cpp
index 522101f..4f3fa5b 100644
--- a/src/push.cpp
+++ b/src/push.cpp
@@ -23,9 +23,11 @@
#include "err.hpp"
#include "pipe.hpp"
-zmq::push_t::push_t (class app_thread_t *parent_) :
- socket_base_t (parent_)
+zmq::push_t::push_t (class ctx_t *parent_, uint32_t slot_) :
+ socket_base_t (parent_, slot_),
+ lb (this)
{
+ options.type = ZMQ_PUSH;
options.requires_in = false;
options.requires_out = true;
}
@@ -41,41 +43,10 @@ void zmq::push_t::xattach_pipes (class reader_t *inpipe_,
lb.attach (outpipe_);
}
-void zmq::push_t::xdetach_inpipe (class reader_t *pipe_)
+void zmq::push_t::process_term (int linger_)
{
- // There are no inpipes, so this function shouldn't be called at all.
- zmq_assert (false);
-}
-
-void zmq::push_t::xdetach_outpipe (class writer_t *pipe_)
-{
- zmq_assert (pipe_);
- lb.detach (pipe_);
-}
-
-void zmq::push_t::xkill (class reader_t *pipe_)
-{
- // There are no inpipes, so this function shouldn't be called at all.
- zmq_assert (false);
-}
-
-void zmq::push_t::xrevive (class reader_t *pipe_)
-{
- // There are no inpipes, so this function shouldn't be called at all.
- zmq_assert (false);
-}
-
-void zmq::push_t::xrevive (class writer_t *pipe_)
-{
- lb.revive (pipe_);
-}
-
-int zmq::push_t::xsetsockopt (int option_, const void *optval_,
- size_t optvallen_)
-{
- // No special option for this socket type.
- errno = EINVAL;
- return -1;
+ lb.terminate ();
+ socket_base_t::process_term (linger_);
}
int zmq::push_t::xsend (zmq_msg_t *msg_, int flags_)
@@ -83,17 +54,6 @@ int zmq::push_t::xsend (zmq_msg_t *msg_, int flags_)
return lb.send (msg_, flags_);
}
-int zmq::push_t::xrecv (zmq_msg_t *msg_, int flags_)
-{
- errno = ENOTSUP;
- return -1;
-}
-
-bool zmq::push_t::xhas_in ()
-{
- return false;
-}
-
bool zmq::push_t::xhas_out ()
{
return lb.has_out ();
diff --git a/src/push.hpp b/src/push.hpp
index b3c8d87..ccc98f9 100644
--- a/src/push.hpp
+++ b/src/push.hpp
@@ -30,25 +30,22 @@ namespace zmq
{
public:
- push_t (class app_thread_t *parent_);
+ push_t (class ctx_t *parent_, uint32_t slot_);
~push_t ();
+ protected:
+
// Overloads of functions from socket_base_t.
void xattach_pipes (class reader_t *inpipe_, class writer_t *outpipe_,
const blob_t &peer_identity_);
- void xdetach_inpipe (class reader_t *pipe_);
- void xdetach_outpipe (class writer_t *pipe_);
- void xkill (class reader_t *pipe_);
- void xrevive (class reader_t *pipe_);
- void xrevive (class writer_t *pipe_);
- int xsetsockopt (int option_, const void *optval_, size_t optvallen_);
int xsend (zmq_msg_t *msg_, int flags_);
- int xrecv (zmq_msg_t *msg_, int flags_);
- bool xhas_in ();
bool xhas_out ();
private:
+ // Hook into the termination process.
+ void process_term (int linger_);
+
// Load balancer managing the outbound pipes.
lb_t lb;
diff --git a/src/rep.cpp b/src/rep.cpp
index 34b77c4..2904f06 100644
--- a/src/rep.cpp
+++ b/src/rep.cpp
@@ -21,160 +21,37 @@
#include "rep.hpp"
#include "err.hpp"
-#include "pipe.hpp"
-zmq::rep_t::rep_t (class app_thread_t *parent_) :
- socket_base_t (parent_),
- active (0),
- current (0),
+zmq::rep_t::rep_t (class ctx_t *parent_, uint32_t slot_) :
+ xrep_t (parent_, slot_),
sending_reply (false),
- more (false),
- reply_pipe (NULL)
+ request_begins (true)
{
- options.requires_in = true;
- options.requires_out = true;
-
- // We don't need immediate connect. We'll be able to send messages
- // (replies) only when connection is established and thus requests
- // can arrive anyway.
- options.immediate_connect = false;
+ options.type = ZMQ_REP;
}
zmq::rep_t::~rep_t ()
{
}
-void zmq::rep_t::xattach_pipes (class reader_t *inpipe_,
- class writer_t *outpipe_, const blob_t &peer_identity_)
-{
- zmq_assert (inpipe_ && outpipe_);
- zmq_assert (in_pipes.size () == out_pipes.size ());
-
- in_pipes.push_back (inpipe_);
- in_pipes.swap (active, in_pipes.size () - 1);
- out_pipes.push_back (outpipe_);
- out_pipes.swap (active, out_pipes.size () - 1);
- active++;
-}
-
-void zmq::rep_t::xdetach_inpipe (class reader_t *pipe_)
-{
- zmq_assert (sending_reply || !more || in_pipes [current] != pipe_);
-
- zmq_assert (pipe_);
- zmq_assert (in_pipes.size () == out_pipes.size ());
-
- in_pipes_t::size_type index = in_pipes.index (pipe_);
-
- if (index < active) {
- active--;
- if (current == active)
- current = 0;
- }
-
- if (out_pipes [index])
- out_pipes [index]->term ();
- in_pipes.erase (index);
- out_pipes.erase (index);
-}
-
-void zmq::rep_t::xdetach_outpipe (class writer_t *pipe_)
-{
- zmq_assert (pipe_);
- zmq_assert (in_pipes.size () == out_pipes.size ());
-
- out_pipes_t::size_type index = out_pipes.index (pipe_);
-
- // If the connection we've got the request from disconnects,
- // there's nowhere to send the reply. Forget about the reply pipe.
- // Once the reply is sent it will be dropped.
- if (sending_reply && pipe_ == reply_pipe)
- reply_pipe = NULL;
-
- if (out_pipes.index (pipe_) < active) {
- active--;
- if (current == active)
- current = 0;
- }
-
- if (in_pipes [index])
- in_pipes [index]->term ();
- in_pipes.erase (index);
- out_pipes.erase (index);
-}
-
-void zmq::rep_t::xkill (class reader_t *pipe_)
-{
- // Move the pipe to the list of inactive pipes.
- in_pipes_t::size_type index = in_pipes.index (pipe_);
- active--;
- in_pipes.swap (index, active);
- out_pipes.swap (index, active);
-}
-
-void zmq::rep_t::xrevive (class reader_t *pipe_)
-{
- // Move the pipe to the list of active pipes.
- in_pipes_t::size_type index = in_pipes.index (pipe_);
- in_pipes.swap (index, active);
- out_pipes.swap (index, active);
- active++;
-}
-
-void zmq::rep_t::xrevive (class writer_t *pipe_)
-{
-}
-
-int zmq::rep_t::xsetsockopt (int option_, const void *optval_,
- size_t optvallen_)
-{
- errno = EINVAL;
- return -1;
-}
-
int zmq::rep_t::xsend (zmq_msg_t *msg_, int flags_)
{
+ // If we are in the middle of receiving a request, we cannot send reply.
if (!sending_reply) {
errno = EFSM;
return -1;
}
- if (reply_pipe) {
+ bool more = (msg_->flags & ZMQ_MSG_MORE);
- // Push message to the reply pipe.
- bool written = reply_pipe->write (msg_);
- zmq_assert (!more || written);
+ // Push message to the reply pipe.
+ int rc = xrep_t::xsend (msg_, flags_);
+ if (rc != 0)
+ return rc;
- // The pipe is full...
- // When this happens, we simply return an error.
- // This makes REP sockets vulnerable to DoS attack when
- // misbehaving requesters stop collecting replies.
- // TODO: Tear down the underlying connection (?)
- if (!written) {
- errno = EAGAIN;
- return -1;
- }
-
- more = msg_->flags & ZMQ_MSG_MORE;
- }
- else {
-
- // If the requester have disconnected in the meantime, drop the reply.
- more = msg_->flags & ZMQ_MSG_MORE;
- zmq_msg_close (msg_);
- }
-
- // Flush the reply to the requester.
- if (!more) {
- if (reply_pipe)
- reply_pipe->flush ();
+ // If the reply is complete flip the FSM back to request receiving state.
+ if (!more)
sending_reply = false;
- reply_pipe = NULL;
- }
-
- // Detach the message from the data buffer.
- int rc = zmq_msg_init (msg_);
- zmq_assert (rc == 0);
return 0;
}
@@ -187,63 +64,44 @@ int zmq::rep_t::xrecv (zmq_msg_t *msg_, int flags_)
return -1;
}
- // Deallocate old content of the message.
- zmq_msg_close (msg_);
-
- // We haven't started reading a request yet...
- if (!more) {
+ if (request_begins) {
- // Round-robin over the pipes to get next message.
- int count;
- for (count = active; count != 0; count--) {
- if (in_pipes [current]->read (msg_))
- break;
- current++;
- if (current >= active)
- current = 0;
- }
+ // Copy the backtrace stack to the reply pipe.
+ bool bottom = false;
+ while (!bottom) {
- // No message is available. Initialise the output parameter
- // to be a 0-byte message.
- if (count == 0) {
- zmq_msg_init (msg_);
- errno = EAGAIN;
- return -1;
- }
+ // TODO: What if request can be read but reply pipe is not
+ // ready for writing?
- // We are aware of a new message now. Setup the reply pipe.
- reply_pipe = out_pipes [current];
+ // Get next part of the backtrace stack.
+ int rc = xrep_t::xrecv (msg_, flags_);
+ if (rc != 0)
+ return rc;
+ zmq_assert (msg_->flags & ZMQ_MSG_MORE);
- // Copy the routing info to the reply pipe.
- while (true) {
+ // Empty message part delimits the traceback stack.
+ bottom = (zmq_msg_size (msg_) == 0);
- // Push message to the reply pipe.
- // TODO: What if the pipe is full?
- // Tear down the underlying connection?
- bool written = reply_pipe->write (msg_);
- zmq_assert (written);
-
- // Message part of zero size delimits the traceback stack.
- if (zmq_msg_size (msg_) == 0)
- break;
-
- // Get next part of the message.
- bool fetched = in_pipes [current]->read (msg_);
- zmq_assert (fetched);
+ // Push it to the reply pipe.
+ rc = xrep_t::xsend (msg_, flags_);
+ zmq_assert (rc == 0);
}
+
+ request_begins = false;
}
- // Now the routing info is processed. Get the first part
+ // Now the routing info is safely stored. Get the first part
// of the message payload and exit.
- bool fetched = in_pipes [current]->read (msg_);
- zmq_assert (fetched);
- more = msg_->flags & ZMQ_MSG_MORE;
- if (!more) {
- current++;
- if (current >= active)
- current = 0;
+ int rc = xrep_t::xrecv (msg_, flags_);
+ if (rc != 0)
+ return rc;
+
+ // If whole request is read, flip the FSM to reply-sending state.
+ if (!(msg_->flags & ZMQ_MSG_MORE)) {
sending_reply = true;
+ request_begins = true;
}
+
return 0;
}
@@ -252,18 +110,7 @@ bool zmq::rep_t::xhas_in ()
if (sending_reply)
return false;
- if (more)
- return true;
-
- for (int count = active; count != 0; count--) {
- if (in_pipes [current]->check_read ())
- return !sending_reply;
- current++;
- if (current >= active)
- current = 0;
- }
-
- return false;
+ return xrep_t::xhas_in ();
}
bool zmq::rep_t::xhas_out ()
@@ -271,10 +118,6 @@ bool zmq::rep_t::xhas_out ()
if (!sending_reply)
return false;
- if (more)
- return true;
-
- // TODO: No check for write here...
- return sending_reply;
+ return xrep_t::xhas_out ();
}
diff --git a/src/rep.hpp b/src/rep.hpp
index aef4318..09eda02 100644
--- a/src/rep.hpp
+++ b/src/rep.hpp
@@ -20,28 +20,19 @@
#ifndef __ZMQ_REP_HPP_INCLUDED__
#define __ZMQ_REP_HPP_INCLUDED__
-#include "socket_base.hpp"
-#include "yarray.hpp"
+#include "xrep.hpp"
namespace zmq
{
- class rep_t : public socket_base_t
+ class rep_t : public xrep_t
{
public:
- rep_t (class app_thread_t *parent_);
+ rep_t (class ctx_t *parent_, uint32_t slot_);
~rep_t ();
// Overloads of functions from socket_base_t.
- void xattach_pipes (class reader_t *inpipe_, class writer_t *outpipe_,
- const blob_t &peer_identity_);
- void xdetach_inpipe (class reader_t *pipe_);
- void xdetach_outpipe (class writer_t *pipe_);
- void xkill (class reader_t *pipe_);
- void xrevive (class reader_t *pipe_);
- void xrevive (class writer_t *pipe_);
- int xsetsockopt (int option_, const void *optval_, size_t optvallen_);
int xsend (zmq_msg_t *msg_, int flags_);
int xrecv (zmq_msg_t *msg_, int flags_);
bool xhas_in ();
@@ -49,31 +40,13 @@ namespace zmq
private:
- // List in outbound and inbound pipes. Note that the two lists are
- // always in sync. I.e. outpipe with index N communicates with the
- // same session as inpipe with index N.
- typedef yarray_t <class writer_t> out_pipes_t;
- out_pipes_t out_pipes;
- typedef yarray_t <class reader_t> in_pipes_t;
- in_pipes_t in_pipes;
-
- // Number of active inpipes. All the active inpipes are located at the
- // beginning of the in_pipes array.
- in_pipes_t::size_type active;
-
- // Index of the next inbound pipe to read a request from.
- in_pipes_t::size_type current;
-
- // If true, request was already received and reply wasn't completely
- // sent yet.
+ // If true, we are in process of sending the reply. If false we are
+ // in process of receiving a request.
bool sending_reply;
- // True, if message processed at the moment (either sent or received)
- // is processed only partially.
- bool more;
-
- // Pipe we are going to send reply to.
- class writer_t *reply_pipe;
+ // If true, we are starting to receive a request. The beginning
+ // of the request is the backtrace stack.
+ bool request_begins;
rep_t (const rep_t&);
void operator = (const rep_t&);
diff --git a/src/req.cpp b/src/req.cpp
index a77c061..03203c5 100644
--- a/src/req.cpp
+++ b/src/req.cpp
@@ -21,119 +21,19 @@
#include "req.hpp"
#include "err.hpp"
-#include "pipe.hpp"
-zmq::req_t::req_t (class app_thread_t *parent_) :
- socket_base_t (parent_),
- active (0),
- current (0),
+zmq::req_t::req_t (class ctx_t *parent_, uint32_t slot_) :
+ xreq_t (parent_, slot_),
receiving_reply (false),
- reply_pipe_active (false),
- more (false),
- reply_pipe (NULL)
+ message_begins (true)
{
- options.requires_in = true;
- options.requires_out = true;
+ options.type = ZMQ_REQ;
}
zmq::req_t::~req_t ()
{
}
-void zmq::req_t::xattach_pipes (class reader_t *inpipe_,
- class writer_t *outpipe_, const blob_t &peer_identity_)
-{
- zmq_assert (inpipe_ && outpipe_);
- zmq_assert (in_pipes.size () == out_pipes.size ());
-
- in_pipes.push_back (inpipe_);
- in_pipes.swap (active, in_pipes.size () - 1);
-
- out_pipes.push_back (outpipe_);
- out_pipes.swap (active, out_pipes.size () - 1);
-
- active++;
-}
-
-void zmq::req_t::xdetach_inpipe (class reader_t *pipe_)
-{
- zmq_assert (!receiving_reply || !more || reply_pipe != pipe_);
-
- zmq_assert (pipe_);
- zmq_assert (in_pipes.size () == out_pipes.size ());
-
- // TODO: The pipe we are awaiting the reply from is detached. What now?
- // Return ECONNRESET from subsequent recv?
- if (receiving_reply && pipe_ == reply_pipe) {
- zmq_assert (false);
- }
-
- in_pipes_t::size_type index = in_pipes.index (pipe_);
-
- if (out_pipes [index])
- out_pipes [index]->term ();
- in_pipes.erase (index);
- out_pipes.erase (index);
- if (index < active) {
- active--;
- if (current == active)
- current = 0;
- }
-}
-
-void zmq::req_t::xdetach_outpipe (class writer_t *pipe_)
-{
- zmq_assert (receiving_reply || !more || out_pipes [current] != pipe_);
-
- zmq_assert (pipe_);
- zmq_assert (in_pipes.size () == out_pipes.size ());
-
- out_pipes_t::size_type index = out_pipes.index (pipe_);
-
- if (in_pipes [index])
- in_pipes [index]->term ();
- in_pipes.erase (index);
- out_pipes.erase (index);
- if (index < active) {
- active--;
- if (current == active)
- current = 0;
- }
-}
-
-void zmq::req_t::xkill (class reader_t *pipe_)
-{
- zmq_assert (receiving_reply);
- zmq_assert (pipe_ == reply_pipe);
-
- reply_pipe_active = false;
-}
-
-void zmq::req_t::xrevive (class reader_t *pipe_)
-{
- if (pipe_ == reply_pipe)
- reply_pipe_active = true;
-}
-
-void zmq::req_t::xrevive (class writer_t *pipe_)
-{
- out_pipes_t::size_type index = out_pipes.index (pipe_);
- zmq_assert (index >= active);
-
- if (in_pipes [index] != NULL) {
- in_pipes.swap (index, active);
- out_pipes.swap (index, active);
- active++;
- }
-}
-
-int zmq::req_t::xsetsockopt (int option_, const void *optval_,
- size_t optvallen_)
-{
- errno = EINVAL;
- return -1;
-}
-
int zmq::req_t::xsend (zmq_msg_t *msg_, int flags_)
{
// If we've sent a request and we still haven't got the reply,
@@ -143,98 +43,59 @@ int zmq::req_t::xsend (zmq_msg_t *msg_, int flags_)
return -1;
}
- while (active > 0) {
- if (out_pipes [current]->check_write ())
- break;
-
- zmq_assert (!more);
- active--;
- if (current < active) {
- in_pipes.swap (current, active);
- out_pipes.swap (current, active);
- }
- else
- current = 0;
- }
-
- if (active == 0) {
- errno = EAGAIN;
- return -1;
- }
-
- // If we are starting to send the request, generate a prefix.
- if (!more) {
+ // First part of the request is empty message part (stack bottom).
+ if (message_begins) {
zmq_msg_t prefix;
int rc = zmq_msg_init (&prefix);
zmq_assert (rc == 0);
- prefix.flags |= ZMQ_MSG_MORE;
- bool written = out_pipes [current]->write (&prefix);
- zmq_assert (written);
+ prefix.flags = ZMQ_MSG_MORE;
+ rc = xreq_t::xsend (&prefix, flags_);
+ if (rc != 0)
+ return rc;
+ message_begins = false;
}
- // Push the message to the selected pipe.
- bool written = out_pipes [current]->write (msg_);
- zmq_assert (written);
- more = msg_->flags & ZMQ_MSG_MORE;
- if (!more) {
- out_pipes [current]->flush ();
- receiving_reply = true;
- reply_pipe = in_pipes [current];
+ bool more = msg_->flags & ZMQ_MSG_MORE;
- // We can safely assume that the reply pipe is active as the last time
- // we've used it we've read the reply and haven't tried to read from it
- // anymore.
- reply_pipe_active = true;
+ int rc = xreq_t::xsend (msg_, flags_);
+ if (rc != 0)
+ return rc;
- // Move to the next pipe (load-balancing).
- current = (current + 1) % active;
+ // If the request was fully sent, flip the FSM into reply-receiving state.
+ if (!more) {
+ receiving_reply = true;
+ message_begins = true;
}
- // Detach the message from the data buffer.
- int rc = zmq_msg_init (msg_);
- zmq_assert (rc == 0);
-
return 0;
}
int zmq::req_t::xrecv (zmq_msg_t *msg_, int flags_)
{
- // Deallocate old content of the message.
- int rc = zmq_msg_close (msg_);
- zmq_assert (rc == 0);
-
// If request wasn't send, we can't wait for reply.
if (!receiving_reply) {
- zmq_msg_init (msg_);
errno = EFSM;
return -1;
}
- // Get the reply from the reply pipe.
- if (!reply_pipe_active || !reply_pipe->read (msg_)) {
- zmq_msg_init (msg_);
- errno = EAGAIN;
- return -1;
- }
-
- // If we are starting to receive new reply, check whether prefix
- // is well-formed and drop it.
- if (!more) {
+ // First part of the reply should be empty message part (stack bottom).
+ if (message_begins) {
+ int rc = xreq_t::xrecv (msg_, flags_);
+ if (rc != 0)
+ return rc;
zmq_assert (msg_->flags & ZMQ_MSG_MORE);
zmq_assert (zmq_msg_size (msg_) == 0);
- rc = zmq_msg_close (msg_);
- zmq_assert (rc == 0);
-
- // Get the actual reply.
- bool recvd = reply_pipe->read (msg_);
- zmq_assert (recvd);
+ message_begins = false;
}
- // If this was last part of the reply, switch to request phase.
- more = msg_->flags & ZMQ_MSG_MORE;
- if (!more) {
+ int rc = xreq_t::xrecv (msg_, flags_);
+ if (rc != 0)
+ return rc;
+
+ // If the reply is fully received, flip the FSM into request-sending state.
+ if (!(msg_->flags & ZMQ_MSG_MORE)) {
receiving_reply = false;
- reply_pipe = NULL;
+ message_begins = true;
}
return 0;
@@ -242,43 +103,18 @@ int zmq::req_t::xrecv (zmq_msg_t *msg_, int flags_)
bool zmq::req_t::xhas_in ()
{
- if (receiving_reply && more)
- return true;
-
- if (!receiving_reply || !reply_pipe_active)
- return false;
-
- zmq_assert (reply_pipe);
- if (!reply_pipe->check_read ()) {
- reply_pipe_active = false;
+ if (!receiving_reply)
return false;
- }
- return true;
+ return xreq_t::xhas_in ();
}
bool zmq::req_t::xhas_out ()
{
- if (!receiving_reply && more)
- return true;
-
if (receiving_reply)
return false;
- while (active > 0) {
- if (out_pipes [current]->check_write ())
- return true;;
-
- active--;
- if (current < active) {
- in_pipes.swap (current, active);
- out_pipes.swap (current, active);
- }
- else
- current = 0;
- }
-
- return false;
+ return xreq_t::xhas_out ();
}
diff --git a/src/req.hpp b/src/req.hpp
index 5ab7bca..0df59b9 100644
--- a/src/req.hpp
+++ b/src/req.hpp
@@ -20,28 +20,19 @@
#ifndef __ZMQ_REQ_HPP_INCLUDED__
#define __ZMQ_REQ_HPP_INCLUDED__
-#include "socket_base.hpp"
-#include "yarray.hpp"
+#include "xreq.hpp"
namespace zmq
{
- class req_t : public socket_base_t
+ class req_t : public xreq_t
{
public:
- req_t (class app_thread_t *parent_);
+ req_t (class ctx_t *parent_, uint32_t slot_);
~req_t ();
// Overloads of functions from socket_base_t.
- void xattach_pipes (class reader_t *inpipe_, class writer_t *outpipe_,
- const blob_t &peer_identity_);
- void xdetach_inpipe (class reader_t *pipe_);
- void xdetach_outpipe (class writer_t *pipe_);
- void xkill (class reader_t *pipe_);
- void xrevive (class reader_t *pipe_);
- void xrevive (class writer_t *pipe_);
- int xsetsockopt (int option_, const void *optval_, size_t optvallen_);
int xsend (zmq_msg_t *msg_, int flags_);
int xrecv (zmq_msg_t *msg_, int flags_);
bool xhas_in ();
@@ -49,40 +40,13 @@ namespace zmq
private:
- // List in outbound and inbound pipes. Note that the two lists are
- // always in sync. I.e. outpipe with index N communicates with the
- // same session as inpipe with index N.
- //
- // TODO: Once we have queue limits in place, list of active outpipes
- // is to be held (presumably by stacking active outpipes at
- // the beginning of the array). We don't have to do the same thing for
- // inpipes, because we know which pipe we want to read the
- // reply from.
- typedef yarray_t <class writer_t> out_pipes_t;
- out_pipes_t out_pipes;
- typedef yarray_t <class reader_t> in_pipes_t;
- in_pipes_t in_pipes;
-
- // Number of active pipes.
- size_t active;
-
- // Req_t load-balances the requests - 'current' points to the session
- // that's processing the request at the moment.
- out_pipes_t::size_type current;
-
// If true, request was already sent and reply wasn't received yet or
// was raceived partially.
bool receiving_reply;
- // True, if read can be attempted from the reply pipe.
- bool reply_pipe_active;
-
- // True, if message processed at the moment (either sent or received)
- // is processed only partially.
- bool more;
-
- // Pipe we are awaiting the reply from.
- class reader_t *reply_pipe;
+ // If true, we are starting to send/recv a message. The first part
+ // of the message must be empty message part (backtrace stack bottom).
+ bool message_begins;
req_t (const req_t&);
void operator = (const req_t&);
diff --git a/src/select.cpp b/src/select.cpp
index 59eb83e..c2497ca 100644
--- a/src/select.cpp
+++ b/src/select.cpp
@@ -54,9 +54,6 @@ zmq::select_t::select_t () :
zmq::select_t::~select_t ()
{
worker.stop ();
-
- // Make sure there are no fds registered on shutdown.
- zmq_assert (load.get () == 0);
}
zmq::select_t::handle_t zmq::select_t::add_fd (fd_t fd_, i_poll_events *events_)
@@ -77,7 +74,7 @@ zmq::select_t::handle_t zmq::select_t::add_fd (fd_t fd_, i_poll_events *events_)
maxfd = fd_;
// Increase the load metric of the thread.
- load.add (1);
+ adjust_load (1);
return fd_;
}
@@ -113,7 +110,7 @@ void zmq::select_t::rm_fd (handle_t handle_)
}
// Decrease the load metric of the thread.
- load.sub (1);
+ adjust_load (-1);
}
void zmq::select_t::set_pollin (handle_t handle_)
@@ -136,23 +133,6 @@ void zmq::select_t::reset_pollout (handle_t handle_)
FD_CLR (handle_, &source_set_out);
}
-void zmq::select_t::add_timer (i_poll_events *events_)
-{
- timers.push_back (events_);
-}
-
-void zmq::select_t::cancel_timer (i_poll_events *events_)
-{
- timers_t::iterator it = std::find (timers.begin (), timers.end (), events_);
- if (it != timers.end ())
- timers.erase (it);
-}
-
-int zmq::select_t::get_load ()
-{
- return load.get ();
-}
-
void zmq::select_t::start ()
{
worker.start (worker_routine, this);
@@ -167,19 +147,19 @@ void zmq::select_t::loop ()
{
while (!stopping) {
+ // Execute any due timers.
+ int timeout = (int) execute_timers ();
+
// Intialise the pollsets.
memcpy (&readfds, &source_set_in, sizeof source_set_in);
memcpy (&writefds, &source_set_out, sizeof source_set_out);
memcpy (&exceptfds, &source_set_err, sizeof source_set_err);
- // Compute the timout interval. Select is free to overwrite the
- // value so we have to compute it each time anew.
- timeval timeout = {max_timer_period / 1000,
- (max_timer_period % 1000) * 1000};
-
// Wait for events.
+ struct timeval tv = {(long) (timeout / 1000),
+ (long) (timeout % 1000 * 1000)};
int rc = select (maxfd + 1, &readfds, &writefds, &exceptfds,
- timers.empty () ? NULL : &timeout);
+ timeout ? &tv : NULL);
#ifdef ZMQ_HAVE_WINDOWS
wsa_assert (rc != SOCKET_ERROR);
@@ -189,20 +169,10 @@ void zmq::select_t::loop ()
errno_assert (rc != -1);
#endif
- // Handle timer.
- if (!rc) {
-
- // Use local list of timers as timer handlers may fill new timers
- // into the original array.
- timers_t t;
- std::swap (timers, t);
-
- // Trigger all the timers.
- for (timers_t::iterator it = t.begin (); it != t.end (); it ++)
- (*it)->timer_event ();
-
+ // If there are no events (i.e. it's a timeout) there's no point
+ // in checking the pollset.
+ if (rc == 0)
continue;
- }
for (fd_set_t::size_type i = 0; i < fds.size (); i ++) {
if (fds [i].fd == retired_fd)
diff --git a/src/select.hpp b/src/select.hpp
index 01e9fa8..121857e 100644
--- a/src/select.hpp
+++ b/src/select.hpp
@@ -36,7 +36,7 @@
#include "fd.hpp"
#include "thread.hpp"
-#include "atomic_counter.hpp"
+#include "poller_base.hpp"
namespace zmq
{
@@ -44,7 +44,7 @@ namespace zmq
// Implements socket polling mechanism using POSIX.1-2001 select()
// function.
- class select_t
+ class select_t : public poller_base_t
{
public:
@@ -60,9 +60,6 @@ namespace zmq
void reset_pollin (handle_t handle_);
void set_pollout (handle_t handle_);
void reset_pollout (handle_t handle_);
- void add_timer (struct i_poll_events *events_);
- void cancel_timer (struct i_poll_events *events_);
- int get_load ();
void start ();
void stop ();
@@ -99,20 +96,12 @@ namespace zmq
// If true, at least one file descriptor has retired.
bool retired;
- // List of all the engines waiting for the timer event.
- typedef std::vector <struct i_poll_events*> timers_t;
- timers_t timers;
-
// If true, thread is shutting down.
bool stopping;
// Handle of the physical thread doing the I/O work.
thread_t worker;
- // Load of the poller. Currently number of file descriptors
- // registered with the poller.
- atomic_counter_t load;
-
select_t (const select_t&);
void operator = (const select_t&);
};
diff --git a/src/semaphore.hpp b/src/semaphore.hpp
new file mode 100644
index 0000000..453c1b0
--- /dev/null
+++ b/src/semaphore.hpp
@@ -0,0 +1,186 @@
+/*
+ Copyright (c) 2007-2010 iMatix Corporation
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __ZMQ_SEMAPHORE_HPP_INCLUDED__
+#define __ZMQ_SEMAPHORE_HPP_INCLUDED__
+
+#include "platform.hpp"
+#include "err.hpp"
+
+#if defined ZMQ_HAVE_WINDOWS
+#include "windows.hpp"
+#else
+#include <semaphore.h>
+#endif
+
+namespace zmq
+{
+ // Simple semaphore. Only single thread may be waiting at any given time.
+ // Also, the semaphore may not be posted before the previous post
+ // was matched by corresponding wait and the waiting thread was
+ // released.
+
+#if defined ZMQ_HAVE_WINDOWS
+
+ // On Windows platform simple semaphore is implemeted using event object.
+
+ class semaphore_t
+ {
+ public:
+
+ // Initialise the semaphore.
+ inline semaphore_t ()
+ {
+ ev = CreateEvent (NULL, FALSE, FALSE, NULL);
+ win_assert (ev != NULL);
+ }
+
+ // Destroy the semaphore.
+ inline ~semaphore_t ()
+ {
+ int rc = CloseHandle (ev);
+ win_assert (rc != 0);
+ }
+
+ // Wait for the semaphore.
+ inline void wait ()
+ {
+ DWORD rc = WaitForSingleObject (ev, INFINITE);
+ win_assert (rc != WAIT_FAILED);
+ }
+
+ // Post the semaphore.
+ inline void post ()
+ {
+ int rc = SetEvent (ev);
+ win_assert (rc != 0);
+ }
+
+ private:
+
+ HANDLE ev;
+
+ semaphore_t (const semaphore_t&);
+ void operator = (const semaphore_t&);
+ };
+
+#elif defined ZMQ_HAVE_LINUX || defined ZMQ_HAVE_OSX || defined ZMQ_HAVE_OPENVMS
+
+ // On platforms that allow for double locking of a mutex from the same
+ // thread, simple semaphore is implemented using mutex, as it is more
+ // efficient than full-blown semaphore.
+
+ // Note that OS-level semaphore is not implemented on OSX, so the below
+ // code is not only optimisation, it's necessary to make 0MQ work on OSX.
+
+ class semaphore_t
+ {
+ public:
+
+ // Initialise the semaphore.
+ inline semaphore_t ()
+ {
+ int rc = pthread_mutex_init (&mutex, NULL);
+ posix_assert (rc);
+ rc = pthread_mutex_lock (&mutex);
+ posix_assert (rc);
+ }
+
+ // Destroy the semaphore.
+ inline ~semaphore_t ()
+ {
+ int rc = pthread_mutex_unlock (&mutex);
+ posix_assert (rc);
+ rc = pthread_mutex_destroy (&mutex);
+ posix_assert (rc);
+ }
+
+ // Wait for the semaphore.
+ inline void wait ()
+ {
+ int rc = pthread_mutex_lock (&mutex);
+ posix_assert (rc);
+ }
+
+ // Post the semaphore.
+ inline void post ()
+ {
+ int rc = pthread_mutex_unlock (&mutex);
+ posix_assert (rc);
+ }
+
+ private:
+
+ pthread_mutex_t mutex;
+
+ semaphore_t (const semaphore_t&);
+ void operator = (const semaphore_t&);
+ };
+
+#else
+
+ // Default implementation maps simple semaphore to POSIX semaphore.
+
+ class semaphore_t
+ {
+ public:
+
+ // Initialise the semaphore.
+ inline semaphore_t ()
+ {
+ int rc = sem_init (&sem, 0, 0);
+ errno_assert (rc != -1);
+ }
+
+ // Destroy the semaphore.
+ inline ~semaphore_t ()
+ {
+ int rc = sem_destroy (&sem);
+ errno_assert (rc != -1);
+ }
+
+ // Wait for the semaphore.
+ inline void wait ()
+ {
+ int rc = sem_wait (&sem);
+ errno_assert (rc != -1);
+ }
+
+ // Post the semaphore.
+ inline void post ()
+ {
+ int rc = sem_post (&sem);
+ errno_assert (rc != -1);
+ }
+
+ private:
+
+ // Underlying system semaphore object.
+ sem_t sem;
+
+ semaphore_t (const semaphore_t&);
+ void operator = (const semaphore_t&);
+ };
+
+#endif
+
+}
+
+#endif
+
diff --git a/src/session.cpp b/src/session.cpp
index f798877..f853d5a 100644
--- a/src/session.cpp
+++ b/src/session.cpp
@@ -20,58 +20,70 @@
#include <new>
#include "session.hpp"
+#include "socket_base.hpp"
#include "i_engine.hpp"
#include "err.hpp"
#include "pipe.hpp"
+#include "likely.hpp"
-zmq::session_t::session_t (object_t *parent_, socket_base_t *owner_,
- const options_t &options_) :
- owned_t (parent_, owner_),
+zmq::session_t::session_t (class io_thread_t *io_thread_,
+ class socket_base_t *socket_, const options_t &options_) :
+ own_t (io_thread_, options_),
+ io_object_t (io_thread_),
in_pipe (NULL),
incomplete_in (false),
- active (true),
out_pipe (NULL),
engine (NULL),
- options (options_)
+ socket (socket_),
+ io_thread (io_thread_),
+ pipes_attached (false),
+ delimiter_processed (false),
+ force_terminate (false),
+ has_linger_timer (false),
+ state (active)
{
- // It's possible to register the session at this point as it will be
- // searched for only on reconnect, i.e. no race condition (session found
- // before it is plugged into it's I/O thread) is possible.
- ordinal = owner->register_session (this);
}
-zmq::session_t::session_t (object_t *parent_, socket_base_t *owner_,
- const options_t &options_, const blob_t &peer_identity_) :
- owned_t (parent_, owner_),
- in_pipe (NULL),
- incomplete_in (false),
- active (true),
- out_pipe (NULL),
- engine (NULL),
- ordinal (0),
- peer_identity (peer_identity_),
- options (options_)
+zmq::session_t::~session_t ()
{
- if (!peer_identity.empty () && peer_identity [0] != 0) {
- if (!owner->register_session (peer_identity, this)) {
+ zmq_assert (!in_pipe);
+ zmq_assert (!out_pipe);
- // TODO: There's already a session with the specified
- // identity. We should presumably syslog it and drop the
- // session.
- zmq_assert (false);
- }
- }
+ if (engine)
+ engine->terminate ();
}
-zmq::session_t::~session_t ()
+void zmq::session_t::proceed_with_term ()
{
- zmq_assert (!in_pipe);
- zmq_assert (!out_pipe);
+ if (state == terminating)
+ return;
+
+ zmq_assert (state == pending);
+ state = terminating;
+
+ // If there's still a pending linger timer, remove it.
+ if (has_linger_timer) {
+ cancel_timer (linger_timer_id);
+ has_linger_timer = false;
+ }
+
+ if (in_pipe) {
+ register_term_acks (1);
+ in_pipe->terminate ();
+ }
+ if (out_pipe) {
+ register_term_acks (1);
+ out_pipe->terminate ();
+ }
+
+ // The session has already waited for the linger period. We don't want
+ // the child objects to linger any more thus linger is set to zero.
+ own_t::process_term (0);
}
bool zmq::session_t::read (::zmq_msg_t *msg_)
{
- if (!in_pipe || !active)
+ if (!in_pipe)
return false;
if (!in_pipe->read (msg_))
@@ -97,17 +109,8 @@ void zmq::session_t::flush ()
out_pipe->flush ();
}
-void zmq::session_t::detach (owned_t *reconnecter_)
+void zmq::session_t::clean_pipes ()
{
- // Plug in the reconnecter object if any.
- if (reconnecter_) {
- send_plug (reconnecter_);
- send_own (owner, reconnecter_);
- }
-
- // Engine is terminating itself. No need to deallocate it from here.
- engine = NULL;
-
// Get rid of half-processed messages in the out pipe. Flush any
// unflushed messages upstream.
if (out_pipe) {
@@ -127,165 +130,213 @@ void zmq::session_t::detach (owned_t *reconnecter_)
zmq_msg_close (&msg);
}
}
-
- // Terminate transient session.
- if (!ordinal && (peer_identity.empty () || peer_identity [0] == 0))
- term ();
-}
-
-zmq::io_thread_t *zmq::session_t::get_io_thread ()
-{
- return choose_io_thread (options.affinity);
-}
-
-class zmq::socket_base_t *zmq::session_t::get_owner ()
-{
- return owner;
-}
-
-uint64_t zmq::session_t::get_ordinal ()
-{
- zmq_assert (ordinal);
- return ordinal;
}
void zmq::session_t::attach_pipes (class reader_t *inpipe_,
class writer_t *outpipe_, const blob_t &peer_identity_)
{
+ zmq_assert (!pipes_attached);
+ pipes_attached = true;
+
if (inpipe_) {
zmq_assert (!in_pipe);
in_pipe = inpipe_;
- active = true;
- in_pipe->set_endpoint (this);
+ in_pipe->set_event_sink (this);
}
if (outpipe_) {
zmq_assert (!out_pipe);
out_pipe = outpipe_;
- out_pipe->set_endpoint (this);
+ out_pipe->set_event_sink (this);
+ }
+
+ // If we are already terminating, terminate the pipes straight away.
+ if (state == terminating) {
+ if (in_pipe) {
+ in_pipe->terminate ();
+ register_term_acks (1);
+ }
+ if (out_pipe) {
+ out_pipe->terminate ();
+ register_term_acks (1);
+ }
}
}
-void zmq::session_t::detach_inpipe (reader_t *pipe_)
+void zmq::session_t::delimited (reader_t *pipe_)
{
- active = false;
- in_pipe = NULL;
+ zmq_assert (in_pipe == pipe_);
+ zmq_assert (!delimiter_processed);
+ delimiter_processed = true;
+
+ // If we are in process of being closed, but still waiting for all
+ // pending messeges being sent, we can terminate here.
+ if (state == pending)
+ proceed_with_term ();
}
-void zmq::session_t::detach_outpipe (writer_t *pipe_)
+void zmq::session_t::terminated (reader_t *pipe_)
{
- out_pipe = NULL;
+ zmq_assert (in_pipe == pipe_);
+ in_pipe = NULL;
+ if (state == terminating)
+ unregister_term_ack ();
}
-void zmq::session_t::kill (reader_t *pipe_)
+void zmq::session_t::terminated (writer_t *pipe_)
{
- active = false;
+ zmq_assert (out_pipe == pipe_);
+ out_pipe = NULL;
+ if (state == terminating)
+ unregister_term_ack ();
}
-void zmq::session_t::revive (reader_t *pipe_)
+void zmq::session_t::activated (reader_t *pipe_)
{
zmq_assert (in_pipe == pipe_);
- active = true;
- if (engine)
- engine->revive ();
+
+ if (likely (engine != NULL))
+ engine->activate_out ();
+ else
+ in_pipe->check_read ();
}
-void zmq::session_t::revive (writer_t *pipe_)
+void zmq::session_t::activated (writer_t *pipe_)
{
zmq_assert (out_pipe == pipe_);
if (engine)
- engine->resume_input ();
+ engine->activate_in ();
}
void zmq::session_t::process_plug ()
{
}
-void zmq::session_t::process_unplug ()
+void zmq::session_t::process_attach (i_engine *engine_,
+ const blob_t &peer_identity_)
{
- // Unregister the session from the socket.
- if (ordinal)
- owner->unregister_session (ordinal);
- else if (!peer_identity.empty () && peer_identity [0] != 0)
- owner->unregister_session (peer_identity);
-
- // Ask associated pipes to terminate.
- if (in_pipe) {
- in_pipe->term ();
- in_pipe = NULL;
- }
- if (out_pipe) {
- out_pipe->term ();
- out_pipe = NULL;
+ // If some other object (e.g. init) notifies us that the connection failed
+ // we need to start the reconnection process.
+ if (!engine_) {
+ zmq_assert (!engine);
+ detached ();
+ return;
}
- if (engine) {
- engine->unplug ();
+ // If we are already terminating, we destroy the engine straight away.
+ // Note that we don't have to unplug it before deleting as it's not
+ // yet plugged to the session.
+ if (state == terminating) {
delete engine;
- engine = NULL;
+ return;
+ }
+
+ // Check whether the required pipes already exist. If not so, we'll
+ // create them and bind them to the socket object.
+ if (!pipes_attached) {
+ zmq_assert (!in_pipe && !out_pipe);
+ pipes_attached = true;
+ reader_t *socket_reader = NULL;
+ writer_t *socket_writer = NULL;
+
+ // Create the pipes, as required.
+ if (options.requires_in) {
+ create_pipe (socket, this, options.hwm, options.swap, &socket_reader,
+ &out_pipe);
+ out_pipe->set_event_sink (this);
+ }
+ if (options.requires_out) {
+ create_pipe (this, socket, options.hwm, options.swap, &in_pipe,
+ &socket_writer);
+ in_pipe->set_event_sink (this);
+ }
+
+ // Bind the pipes to the socket object.
+ if (socket_reader || socket_writer)
+ send_bind (socket, socket_reader, socket_writer, peer_identity_);
}
+
+ // Plug in the engine.
+ zmq_assert (!engine);
+ zmq_assert (engine_);
+ engine = engine_;
+ engine->plug (io_thread, this);
+
+ // Trigger the notfication about the attachment.
+ attached (peer_identity_);
}
-void zmq::session_t::process_attach (i_engine *engine_,
- const blob_t &peer_identity_)
+void zmq::session_t::detach ()
{
- if (!peer_identity.empty ()) {
-
- // If both IDs are temporary, no checking is needed.
- // TODO: Old ID should be reused in this case...
- if (peer_identity.empty () || peer_identity [0] != 0 ||
- peer_identity_.empty () || peer_identity_ [0] != 0) {
+ // Engine is dead. Let's forget about it.
+ engine = NULL;
- // If we already know the peer name do nothing, just check whether
- // it haven't changed.
- zmq_assert (peer_identity == peer_identity_);
- }
- }
- else if (!peer_identity_.empty ()) {
+ // Remove any half-done messages from the pipes.
+ clean_pipes ();
- // Store the peer identity.
- peer_identity = peer_identity_;
+ // Send the event to the derived class.
+ detached ();
- // If the session is not registered with the ordinal, let's register
- // it using the peer name.
- if (!ordinal) {
- if (!owner->register_session (peer_identity, this)) {
+ // Just in case, there's only a delimiter in the inbound pipe.
+ if (in_pipe)
+ in_pipe->check_read ();
+}
- // TODO: There's already a session with the specified
- // identity. We should presumably syslog it and drop the
- // session.
- zmq_assert (false);
- }
- }
+void zmq::session_t::process_term (int linger_)
+{
+ zmq_assert (state == active);
+ state = pending;
+
+ // If linger is set to zero, we can terminate the session straight away
+ // not waiting for the pending messages to be sent.
+ if (linger_ == 0) {
+ proceed_with_term ();
+ return;
}
- // Check whether the required pipes already exist. If not so, we'll
- // create them and bind them to the socket object.
- reader_t *socket_reader = NULL;
- writer_t *socket_writer = NULL;
-
- if (options.requires_in && !out_pipe) {
- pipe_t *pipe = new (std::nothrow) pipe_t (owner, this, options.hwm, options.swap);
- zmq_assert (pipe);
- out_pipe = &pipe->writer;
- out_pipe->set_endpoint (this);
- socket_reader = &pipe->reader;
+ // If there's finite linger value, set up a timer.
+ if (linger_ > 0) {
+ zmq_assert (!has_linger_timer);
+ add_timer (linger_, linger_timer_id);
+ has_linger_timer = true;
}
- if (options.requires_out && !in_pipe) {
- pipe_t *pipe = new (std::nothrow) pipe_t (this, owner, options.hwm, options.swap);
- zmq_assert (pipe);
- in_pipe = &pipe->reader;
- in_pipe->set_endpoint (this);
- socket_writer = &pipe->writer;
- }
+ // If there's no engine and there's only delimiter in the pipe it wouldn't
+ // be ever read. Thus we check for it explicitly.
+ if (in_pipe)
+ in_pipe->check_read ();
+
+ // If there's no in pipe there are no pending messages to send.
+ // We can proceed with the shutdown straight away. Also, if there is
+ // inbound pipe, but the delimiter was already processed, we can
+ // terminate immediately. Alternatively, if the derived session type have
+ // called 'terminate' we'll finish straight away.
+ if (!options.requires_out || delimiter_processed || force_terminate ||
+ (!options.immediate_connect && !in_pipe))
+ proceed_with_term ();
+}
+
+void zmq::session_t::timer_event (int id_)
+{
+ // Linger period expired. We can proceed with termination even though
+ // there are still pending messages to be sent.
+ zmq_assert (id_ == linger_timer_id);
+ has_linger_timer = false;
+ proceed_with_term ();
+}
- if (socket_reader || socket_writer)
- send_bind (owner, socket_reader, socket_writer, peer_identity);
+bool zmq::session_t::register_session (const blob_t &name_, session_t *session_)
+{
+ return socket->register_session (name_, session_);
+}
- // Plug in the engine.
- zmq_assert (!engine);
- zmq_assert (engine_);
- engine = engine_;
- engine->plug (this);
+void zmq::session_t::unregister_session (const blob_t &name_)
+{
+ socket->unregister_session (name_);
+}
+
+void zmq::session_t::terminate ()
+{
+ force_terminate = true;
+ own_t::terminate ();
}
diff --git a/src/session.hpp b/src/session.hpp
index 9bda1ad..8adda5e 100644
--- a/src/session.hpp
+++ b/src/session.hpp
@@ -20,54 +20,83 @@
#ifndef __ZMQ_SESSION_HPP_INCLUDED__
#define __ZMQ_SESSION_HPP_INCLUDED__
+#include "own.hpp"
#include "i_inout.hpp"
-#include "i_endpoint.hpp"
-#include "owned.hpp"
-#include "options.hpp"
+#include "io_object.hpp"
#include "blob.hpp"
+#include "pipe.hpp"
namespace zmq
{
- class session_t : public owned_t, public i_inout, public i_endpoint
+ class session_t :
+ public own_t,
+ public io_object_t,
+ public i_inout,
+ public i_reader_events,
+ public i_writer_events
{
public:
- // Creates unnamed session.
- session_t (object_t *parent_, socket_base_t *owner_,
- const options_t &options_);
+ session_t (class io_thread_t *io_thread_,
+ class socket_base_t *socket_, const options_t &options_);
- // Creates named session.
- session_t (object_t *parent_, socket_base_t *owner_,
- const options_t &options_, const blob_t &peer_identity_);
-
- // i_inout interface implementation.
+ // i_inout interface implementation. Note that detach method is not
+ // implemented by generic session. Different session types may handle
+ // engine disconnection in different ways.
bool read (::zmq_msg_t *msg_);
bool write (::zmq_msg_t *msg_);
void flush ();
- void detach (owned_t *reconnecter_);
- class io_thread_t *get_io_thread ();
- class socket_base_t *get_owner ();
- uint64_t get_ordinal ();
+ void detach ();
- // i_endpoint interface implementation.
void attach_pipes (class reader_t *inpipe_, class writer_t *outpipe_,
const blob_t &peer_identity_);
- void detach_inpipe (class reader_t *pipe_);
- void detach_outpipe (class writer_t *pipe_);
- void kill (class reader_t *pipe_);
- void revive (class reader_t *pipe_);
- void revive (class writer_t *pipe_);
- private:
+ // i_reader_events interface implementation.
+ void activated (class reader_t *pipe_);
+ void terminated (class reader_t *pipe_);
+ void delimited (class reader_t *pipe_);
+
+ // i_writer_events interface implementation.
+ void activated (class writer_t *pipe_);
+ void terminated (class writer_t *pipe_);
+
+ protected:
+
+ // This function allows to shut down the session even though
+ // there are pending messages in the inbound pipe.
+ void terminate ();
+
+ // Two events for the derived session type. Attached is triggered
+ // when session is attached to a peer, detached is triggered at the
+ // beginning of the termination process when session is about to
+ // be detached from the peer.
+ virtual void attached (const blob_t &peer_identity_) = 0;
+ virtual void detached () = 0;
+
+ // Allows derives session types to (un)register session names.
+ bool register_session (const blob_t &name_, class session_t *session_);
+ void unregister_session (const blob_t &name_);
~session_t ();
+ private:
+
// Handlers for incoming commands.
void process_plug ();
- void process_unplug ();
void process_attach (struct i_engine *engine_,
const blob_t &peer_identity_);
+ void process_term (int linger_);
+
+ // i_poll_events handlers.
+ void timer_event (int id_);
+
+ // Remove any half processed messages. Flush unflushed messages.
+ // Call this function when engine disconnect to get rid of leftovers.
+ void clean_pipes ();
+
+ // Call this function to move on with the delayed process_term.
+ void proceed_with_term ();
// Inbound pipe, i.e. one the session is getting messages from.
class reader_t *in_pipe;
@@ -76,24 +105,40 @@ namespace zmq
// is still in the in pipe.
bool incomplete_in;
- // If true, in_pipe is active. Otherwise there are no messages to get.
- bool active;
-
// Outbound pipe, i.e. one the socket is sending messages to.
class writer_t *out_pipe;
+ // The protocol I/O engine connected to the session.
struct i_engine *engine;
- // Session is identified by ordinal in the case when it was created
- // before connection to the peer was established and thus we are
- // unaware of peer's identity.
- uint64_t ordinal;
+ // The socket the session belongs to.
+ class socket_base_t *socket;
+
+ // I/O thread the session is living in. It will be used to plug in
+ // the engines into the same thread.
+ class io_thread_t *io_thread;
+
+ // If true, pipes were already attached to this session.
+ bool pipes_attached;
+
+ // If true, delimiter was already read from the inbound pipe.
+ bool delimiter_processed;
+
+ // If true, we should terminate the session even though there are
+ // pending messages in the inbound pipe.
+ bool force_terminate;
+
+ // ID of the linger timer
+ enum {linger_timer_id = 0x20};
- // Identity of the peer.
- blob_t peer_identity;
+ // True is linger timer is running.
+ bool has_linger_timer;
- // Inherited socket options.
- options_t options;
+ enum {
+ active,
+ pending,
+ terminating
+ } state;
session_t (const session_t&);
void operator = (const session_t&);
diff --git a/src/signaler.cpp b/src/signaler.cpp
index d4a9214..787ce4a 100644
--- a/src/signaler.cpp
+++ b/src/signaler.cpp
@@ -49,10 +49,10 @@ zmq::signaler_t::signaler_t ()
struct sockaddr_in addr;
SOCKET listener;
int addrlen = sizeof (addr);
-
+
w = INVALID_SOCKET;
r = INVALID_SOCKET;
-
+
fd_t rcs = (listener = socket (AF_INET, SOCK_STREAM, 0));
wsa_assert (rcs != INVALID_SOCKET);
@@ -60,25 +60,34 @@ zmq::signaler_t::signaler_t ()
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = htonl (INADDR_LOOPBACK);
addr.sin_port = 0;
-
+
int rc = bind (listener, (const struct sockaddr*) &addr, sizeof (addr));
wsa_assert (rc != SOCKET_ERROR);
rc = getsockname (listener, (struct sockaddr*) &addr, &addrlen);
wsa_assert (rc != SOCKET_ERROR);
-
+
// Listen for incomming connections.
rc = listen (listener, 1);
wsa_assert (rc != SOCKET_ERROR);
-
+
// Create the socket.
w = WSASocket (AF_INET, SOCK_STREAM, 0, NULL, 0, 0);
wsa_assert (w != INVALID_SOCKET);
-
+
+ // Increase signaler SNDBUF if requested in config.hpp.
+ if (signaler_sndbuf_size) {
+ int sndbuf = signaler_sndbuf_size;
+ socklen_t sndbuf_size = sizeof sndbuf;
+ rc = setsockopt (w, SOL_SOCKET, SO_SNDBUF, (const char *)&sndbuf,
+ sndbuf_size);
+ errno_assert (rc == 0);
+ }
+
// Connect to the remote peer.
rc = connect (w, (sockaddr *) &addr, sizeof (addr));
wsa_assert (rc != SOCKET_ERROR);
-
+
// Accept connection from w.
r = accept (listener, NULL, NULL);
wsa_assert (r != INVALID_SOCKET);
@@ -112,7 +121,7 @@ void zmq::signaler_t::send (const command_t &cmd_)
zmq_assert (rc == sizeof (command_t));
}
-bool zmq::signaler_t::recv (command_t *cmd_, bool block_)
+int zmq::signaler_t::recv (command_t *cmd_, bool block_)
{
if (block_) {
@@ -122,10 +131,12 @@ bool zmq::signaler_t::recv (command_t *cmd_, bool block_)
wsa_assert (rc != SOCKET_ERROR);
}
- bool result;
+ int err;
+ int result;
int nbytes = ::recv (r, (char*) cmd_, sizeof (command_t), 0);
if (nbytes == -1 && WSAGetLastError () == WSAEWOULDBLOCK) {
- result = false;
+ err = EAGAIN;
+ result = -1;
}
else {
wsa_assert (nbytes != -1);
@@ -133,7 +144,7 @@ bool zmq::signaler_t::recv (command_t *cmd_, bool block_)
// Check whether we haven't got half of a signal.
zmq_assert (nbytes % sizeof (uint32_t) == 0);
- result = true;
+ result = 0;
}
if (block_) {
@@ -144,6 +155,8 @@ bool zmq::signaler_t::recv (command_t *cmd_, bool block_)
wsa_assert (rc != SOCKET_ERROR);
}
+ if (result == -1)
+ errno = err;
return result;
}
@@ -166,6 +179,14 @@ zmq::signaler_t::signaler_t ()
flags = 0;
rc = fcntl (r, F_SETFL, flags | O_NONBLOCK);
errno_assert (rc != -1);
+
+ // Increase signaler SNDBUF if requested in config.hpp.
+ if (signaler_sndbuf_size) {
+ int sndbuf = signaler_sndbuf_size;
+ socklen_t sndbuf_size = sizeof sndbuf;
+ rc = setsockopt (w, SOL_SOCKET, SO_SNDBUF, &sndbuf, sndbuf_size);
+ errno_assert (rc == 0);
+ }
}
zmq::signaler_t::~signaler_t ()
@@ -184,7 +205,7 @@ void zmq::signaler_t::send (const command_t &cmd_)
zmq_assert (nbytes == sizeof (command_t));
}
-bool zmq::signaler_t::recv (command_t *cmd_, bool block_)
+int zmq::signaler_t::recv (command_t *cmd_, bool block_)
{
if (block_) {
@@ -196,13 +217,12 @@ bool zmq::signaler_t::recv (command_t *cmd_, bool block_)
errno_assert (rc != -1);
}
- bool result;
- ssize_t nbytes;
- do {
- nbytes = ::recv (r, (char*) cmd_, sizeof (command_t), 0);
- } while (nbytes == -1 && errno == EINTR);
- if (nbytes == -1 && errno == EAGAIN) {
- result = false;
+ int err;
+ int result;
+ ssize_t nbytes = ::recv (r, (char*) cmd_, sizeof (command_t), 0);
+ if (nbytes == -1 && (errno == EAGAIN || errno == EINTR)) {
+ err = errno;
+ result = -1;
}
else {
zmq_assert (nbytes != -1);
@@ -210,7 +230,7 @@ bool zmq::signaler_t::recv (command_t *cmd_, bool block_)
// Check whether we haven't got half of command.
zmq_assert (nbytes == sizeof (command_t));
- result = true;
+ result = 0;
}
if (block_) {
@@ -223,6 +243,8 @@ bool zmq::signaler_t::recv (command_t *cmd_, bool block_)
errno_assert (rc != -1);
}
+ if (result == -1)
+ errno = err;
return result;
}
@@ -243,6 +265,14 @@ zmq::signaler_t::signaler_t ()
errno_assert (rc == 0);
w = sv [0];
r = sv [1];
+
+ // Increase signaler SNDBUF if requested in config.hpp.
+ if (signaler_sndbuf_size) {
+ int sndbuf = signaler_sndbuf_size;
+ socklen_t sndbuf_size = sizeof sndbuf;
+ rc = setsockopt (w, SOL_SOCKET, SO_SNDBUF, &sndbuf, sndbuf_size);
+ errno_assert (rc == 0);
+ }
}
zmq::signaler_t::~signaler_t ()
@@ -266,24 +296,18 @@ void zmq::signaler_t::send (const command_t &cmd_)
zmq_assert (nbytes == sizeof (command_t));
}
-bool zmq::signaler_t::recv (command_t *cmd_, bool block_)
+int zmq::signaler_t::recv (command_t *cmd_, bool block_)
{
ssize_t nbytes;
- do {
- nbytes = ::recv (r, cmd_, sizeof (command_t),
- block_ ? 0 : MSG_DONTWAIT);
- } while (nbytes == -1 && errno == EINTR);
-
- // If there's no signal available return false.
- if (nbytes == -1 && errno == EAGAIN)
- return false;
-
+ nbytes = ::recv (r, cmd_, sizeof (command_t), block_ ? 0 : MSG_DONTWAIT);
+ if (nbytes == -1 && (errno == EAGAIN || errno == EINTR))
+ return -1;
errno_assert (nbytes != -1);
// Check whether we haven't got half of command.
zmq_assert (nbytes == sizeof (command_t));
- return true;
+ return 0;
}
#endif
diff --git a/src/signaler.hpp b/src/signaler.hpp
index 64a1899..217c3a6 100644
--- a/src/signaler.hpp
+++ b/src/signaler.hpp
@@ -40,7 +40,7 @@ namespace zmq
fd_t get_fd ();
void send (const command_t &cmd_);
- bool recv (command_t *cmd_, bool block_);
+ int recv (command_t *cmd_, bool block_);
private:
diff --git a/src/socket_base.cpp b/src/socket_base.cpp
index c933954..c1d210d 100644
--- a/src/socket_base.cpp
+++ b/src/socket_base.cpp
@@ -23,45 +23,173 @@
#include "../include/zmq.h"
-#include "socket_base.hpp"
-#include "app_thread.hpp"
+#include "platform.hpp"
+#if defined ZMQ_HAVE_WINDOWS
+#include "windows.hpp"
+#if defined _MSC_VER
+#include <intrin.h>
+#endif
+#else
+#include <unistd.h>
+#endif
+
+#include "socket_base.hpp"
#include "zmq_listener.hpp"
#include "zmq_connecter.hpp"
#include "io_thread.hpp"
-#include "session.hpp"
+#include "connect_session.hpp"
#include "config.hpp"
-#include "owned.hpp"
+#include "clock.hpp"
#include "pipe.hpp"
#include "err.hpp"
#include "ctx.hpp"
#include "platform.hpp"
-#include "pgm_sender.hpp"
-#include "pgm_receiver.hpp"
#include "likely.hpp"
+#include "pair.hpp"
+#include "pub.hpp"
+#include "sub.hpp"
+#include "req.hpp"
+#include "rep.hpp"
+#include "pull.hpp"
+#include "push.hpp"
+#include "xreq.hpp"
+#include "xrep.hpp"
#include "uuid.hpp"
-zmq::socket_base_t::socket_base_t (app_thread_t *parent_) :
- object_t (parent_),
- pending_term_acks (0),
+zmq::socket_base_t *zmq::socket_base_t::create (int type_, class ctx_t *parent_,
+ uint32_t slot_)
+{
+ socket_base_t *s = NULL;
+ switch (type_) {
+
+ case ZMQ_PAIR:
+ s = new (std::nothrow) pair_t (parent_, slot_);
+ break;
+ case ZMQ_PUB:
+ s = new (std::nothrow) pub_t (parent_, slot_);
+ break;
+ case ZMQ_SUB:
+ s = new (std::nothrow) sub_t (parent_, slot_);
+ break;
+ case ZMQ_REQ:
+ s = new (std::nothrow) req_t (parent_, slot_);
+ break;
+ case ZMQ_REP:
+ s = new (std::nothrow) rep_t (parent_, slot_);
+ break;
+ case ZMQ_XREQ:
+ s = new (std::nothrow) xreq_t (parent_, slot_);
+ break;
+ case ZMQ_XREP:
+ s = new (std::nothrow) xrep_t (parent_, slot_);
+ break;
+ case ZMQ_PULL:
+ s = new (std::nothrow) pull_t (parent_, slot_);
+ break;
+ case ZMQ_PUSH:
+ s = new (std::nothrow) push_t (parent_, slot_);
+ break;
+ default:
+ errno = EINVAL;
+ return NULL;
+ }
+ zmq_assert (s);
+ return s;
+}
+
+zmq::socket_base_t::socket_base_t (ctx_t *parent_, uint32_t slot_) :
+ own_t (parent_, slot_),
+ ctx_terminated (false),
+ destroyed (false),
+ last_tsc (0),
ticks (0),
- rcvmore (false),
- app_thread (parent_),
- shutting_down (false),
- sent_seqnum (0),
- processed_seqnum (0),
- next_ordinal (1)
+ rcvmore (false)
{
}
zmq::socket_base_t::~socket_base_t ()
{
+ zmq_assert (destroyed);
+
+ // Check whether there are no session leaks.
+ sessions_sync.lock ();
+ zmq_assert (sessions.empty ());
+ sessions_sync.unlock ();
+}
+
+zmq::signaler_t *zmq::socket_base_t::get_signaler ()
+{
+ return &signaler;
+}
+
+void zmq::socket_base_t::stop ()
+{
+ // Called by ctx when it is terminated (zmq_term).
+ // 'stop' command is sent from the threads that called zmq_term to
+ // the thread owning the socket. This way, blocking call in the
+ // owner thread can be interrupted.
+ send_stop ();
+}
+
+int zmq::socket_base_t::check_protocol (const std::string &protocol_)
+{
+ // First check out whether the protcol is something we are aware of.
+ if (protocol_ != "inproc" && protocol_ != "ipc" && protocol_ != "tcp" &&
+ protocol_ != "pgm" && protocol_ != "epgm" && protocol_ != "sys") {
+ errno = EPROTONOSUPPORT;
+ return -1;
+ }
+
+ // If 0MQ is not compiled with OpenPGM, pgm and epgm transports
+ // are not avaialble.
+#if !defined ZMQ_HAVE_OPENPGM
+ if (protocol_ == "pgm" || protocol_ == "epgm") {
+ errno = EPROTONOSUPPORT;
+ return -1;
+ }
+#endif
+
+ // IPC transport is not available on Windows and OpenVMS.
+#if defined ZMQ_HAVE_WINDOWS || defined ZMQ_HAVE_OPENVMS
+ if (protocol_ == "ipc") {
+ // Unknown protocol.
+ errno = EPROTONOSUPPORT;
+ return -1;
+ }
+#endif
+
+ // Check whether socket type and transport protocol match.
+ // Specifically, multicast protocols can't be combined with
+ // bi-directional messaging patterns (socket types).
+ if ((protocol_ == "pgm" || protocol_ == "epgm") &&
+ options.requires_in && options.requires_out) {
+ errno = ENOCOMPATPROTO;
+ return -1;
+ }
+
+ // Protocol is available.
+ return 0;
+}
+
+void zmq::socket_base_t::attach_pipes (class reader_t *inpipe_,
+ class writer_t *outpipe_, const blob_t &peer_identity_)
+{
+ // If the peer haven't specified it's identity, let's generate one.
+ if (peer_identity_.size ()) {
+ xattach_pipes (inpipe_, outpipe_, peer_identity_);
+ }
+ else {
+ blob_t identity (1, 0);
+ identity.append (uuid_t ().to_blob (), uuid_t::uuid_blob_len);
+ xattach_pipes (inpipe_, outpipe_, identity);
+ }
}
int zmq::socket_base_t::setsockopt (int option_, const void *optval_,
size_t optvallen_)
{
- if (unlikely (app_thread->is_terminated ())) {
+ if (unlikely (ctx_terminated)) {
errno = ETERM;
return -1;
}
@@ -79,7 +207,7 @@ int zmq::socket_base_t::setsockopt (int option_, const void *optval_,
int zmq::socket_base_t::getsockopt (int option_, void *optval_,
size_t *optvallen_)
{
- if (unlikely (app_thread->is_terminated ())) {
+ if (unlikely (ctx_terminated)) {
errno = ETERM;
return -1;
}
@@ -94,271 +222,225 @@ int zmq::socket_base_t::getsockopt (int option_, void *optval_,
return 0;
}
+ if (option_ == ZMQ_FD) {
+ if (*optvallen_ < sizeof (fd_t)) {
+ errno = EINVAL;
+ return -1;
+ }
+ *((fd_t*) optval_) = signaler.get_fd ();
+ *optvallen_ = sizeof (fd_t);
+ return 0;
+ }
+
+ if (option_ == ZMQ_EVENTS) {
+ if (*optvallen_ < sizeof (uint32_t)) {
+ errno = EINVAL;
+ return -1;
+ }
+ int rc = process_commands (false, false);
+ if (rc != 0 && errno == EINTR)
+ return -1;
+ errno_assert (rc == 0);
+ *((uint32_t*) optval_) = 0;
+ if (has_out ())
+ *((uint32_t*) optval_) |= ZMQ_POLLOUT;
+ if (has_in ())
+ *((uint32_t*) optval_) |= ZMQ_POLLIN;
+ *optvallen_ = sizeof (uint32_t);
+ return 0;
+ }
+
return options.getsockopt (option_, optval_, optvallen_);
}
int zmq::socket_base_t::bind (const char *addr_)
{
- if (unlikely (app_thread->is_terminated ())) {
+ if (unlikely (ctx_terminated)) {
errno = ETERM;
return -1;
}
// Parse addr_ string.
- std::string addr_type;
- std::string addr_args;
-
- std::string addr (addr_);
- std::string::size_type pos = addr.find ("://");
-
- if (pos == std::string::npos) {
- errno = EINVAL;
- return -1;
+ std::string protocol;
+ std::string address;
+ {
+ std::string addr (addr_);
+ std::string::size_type pos = addr.find ("://");
+ if (pos == std::string::npos) {
+ errno = EINVAL;
+ return -1;
+ }
+ protocol = addr.substr (0, pos);
+ address = addr.substr (pos + 3);
}
- addr_type = addr.substr (0, pos);
- addr_args = addr.substr (pos + 3);
+ int rc = check_protocol (protocol);
+ if (rc != 0)
+ return -1;
- if (addr_type == "inproc")
- return register_endpoint (addr_args.c_str (), this);
+ if (protocol == "inproc" || protocol == "sys")
+ return register_endpoint (addr_, this);
- if (addr_type == "tcp" || addr_type == "ipc") {
+ if (protocol == "tcp" || protocol == "ipc") {
-#if defined ZMQ_HAVE_WINDOWS || defined ZMQ_HAVE_OPENVMS
- if (addr_type == "ipc") {
- errno = EPROTONOSUPPORT;
+ // Choose I/O thread to run the listerner in.
+ io_thread_t *io_thread = choose_io_thread (options.affinity);
+ if (!io_thread) {
+ errno = EMTHREAD;
return -1;
}
-#endif
+ // Create and run the listener.
zmq_listener_t *listener = new (std::nothrow) zmq_listener_t (
- choose_io_thread (options.affinity), this, options);
+ io_thread, this, options);
zmq_assert (listener);
- int rc = listener->set_address (addr_type.c_str(), addr_args.c_str ());
+ int rc = listener->set_address (protocol.c_str(), address.c_str ());
if (rc != 0) {
delete listener;
return -1;
}
+ launch_child (listener);
- send_plug (listener);
- send_own (this, listener);
return 0;
}
-#if defined ZMQ_HAVE_OPENPGM
- if (addr_type == "pgm" || addr_type == "epgm") {
- // In the case of PGM bind behaves the same like connect.
+ if (protocol == "pgm" || protocol == "epgm") {
+
+ // For convenience's sake, bind can be used interchageable with
+ // connect for PGM and EPGM transports.
return connect (addr_);
}
-#endif
- // Unknown protocol.
- errno = EPROTONOSUPPORT;
+ zmq_assert (false);
return -1;
}
int zmq::socket_base_t::connect (const char *addr_)
{
- if (unlikely (app_thread->is_terminated ())) {
+ if (unlikely (ctx_terminated)) {
errno = ETERM;
return -1;
}
// Parse addr_ string.
- std::string addr_type;
- std::string addr_args;
-
- std::string addr (addr_);
- std::string::size_type pos = addr.find ("://");
-
- if (pos == std::string::npos) {
- errno = EINVAL;
- return -1;
+ std::string protocol;
+ std::string address;
+ {
+ std::string addr (addr_);
+ std::string::size_type pos = addr.find ("://");
+ if (pos == std::string::npos) {
+ errno = EINVAL;
+ return -1;
+ }
+ protocol = addr.substr (0, pos);
+ address = addr.substr (pos + 3);
}
- addr_type = addr.substr (0, pos);
- addr_args = addr.substr (pos + 3);
+ int rc = check_protocol (protocol);
+ if (rc != 0)
+ return -1;
- if (addr_type == "inproc") {
+ if (protocol == "inproc" || protocol == "sys") {
// TODO: inproc connect is specific with respect to creating pipes
// as there's no 'reconnect' functionality implemented. Once that
// is in place we should follow generic pipe creation algorithm.
// Find the peer socket.
- socket_base_t *peer = find_endpoint (addr_args.c_str ());
+ socket_base_t *peer = find_endpoint (addr_);
if (!peer)
return -1;
- pipe_t *in_pipe = NULL;
- pipe_t *out_pipe = NULL;
-
+ reader_t *inpipe_reader = NULL;
+ writer_t *inpipe_writer = NULL;
+ reader_t *outpipe_reader = NULL;
+ writer_t *outpipe_writer = NULL;
+
// Create inbound pipe, if required.
- if (options.requires_in) {
- in_pipe = new (std::nothrow) pipe_t (this, peer, options.hwm, options.swap);
- zmq_assert (in_pipe);
- }
+ if (options.requires_in)
+ create_pipe (this, peer, options.hwm, options.swap,
+ &inpipe_reader, &inpipe_writer);
// Create outbound pipe, if required.
- if (options.requires_out) {
- out_pipe = new (std::nothrow) pipe_t (peer, this, options.hwm, options.swap);
- zmq_assert (out_pipe);
- }
+ if (options.requires_out)
+ create_pipe (peer, this, options.hwm, options.swap,
+ &outpipe_reader, &outpipe_writer);
// Attach the pipes to this socket object.
- attach_pipes (in_pipe ? &in_pipe->reader : NULL,
- out_pipe ? &out_pipe->writer : NULL, blob_t ());
+ attach_pipes (inpipe_reader, outpipe_writer, blob_t ());
// Attach the pipes to the peer socket. Note that peer's seqnum
- // was incremented in find_endpoint function. The callee is notified
- // about the fact via the last parameter.
- send_bind (peer, out_pipe ? &out_pipe->reader : NULL,
- in_pipe ? &in_pipe->writer : NULL, options.identity, false);
+ // was incremented in find_endpoint function. We don't need it
+ // increased here.
+ send_bind (peer, outpipe_reader, inpipe_writer,
+ options.identity, false);
return 0;
}
- // Create unnamed session.
+ // Choose the I/O thread to run the session in.
io_thread_t *io_thread = choose_io_thread (options.affinity);
- session_t *session = new (std::nothrow) session_t (io_thread,
- this, options);
+ if (!io_thread) {
+ errno = EMTHREAD;
+ return -1;
+ }
+
+ // Create session.
+ connect_session_t *session = new (std::nothrow) connect_session_t (
+ io_thread, this, options, protocol.c_str (), address.c_str ());
zmq_assert (session);
- // If 'immediate connect' feature is required, we'll created the pipes
+ // If 'immediate connect' feature is required, we'll create the pipes
// to the session straight away. Otherwise, they'll be created by the
// session once the connection is established.
if (options.immediate_connect) {
- pipe_t *in_pipe = NULL;
- pipe_t *out_pipe = NULL;
+ reader_t *inpipe_reader = NULL;
+ writer_t *inpipe_writer = NULL;
+ reader_t *outpipe_reader = NULL;
+ writer_t *outpipe_writer = NULL;
// Create inbound pipe, if required.
- if (options.requires_in) {
- in_pipe = new (std::nothrow) pipe_t (this, session, options.hwm, options.swap);
- zmq_assert (in_pipe);
-
- }
+ if (options.requires_in)
+ create_pipe (this, session, options.hwm, options.swap,
+ &inpipe_reader, &inpipe_writer);
// Create outbound pipe, if required.
- if (options.requires_out) {
- out_pipe = new (std::nothrow) pipe_t (session, this, options.hwm, options.swap);
- zmq_assert (out_pipe);
- }
+ if (options.requires_out)
+ create_pipe (session, this, options.hwm, options.swap,
+ &outpipe_reader, &outpipe_writer);
// Attach the pipes to the socket object.
- attach_pipes (in_pipe ? &in_pipe->reader : NULL,
- out_pipe ? &out_pipe->writer : NULL, blob_t ());
+ attach_pipes (inpipe_reader, outpipe_writer, blob_t ());
// Attach the pipes to the session object.
- session->attach_pipes (out_pipe ? &out_pipe->reader : NULL,
- in_pipe ? &in_pipe->writer : NULL, blob_t ());
- }
-
- // Activate the session.
- send_plug (session);
- send_own (this, session);
-
- if (addr_type == "tcp" || addr_type == "ipc") {
-
-#if defined ZMQ_HAVE_WINDOWS || defined ZMQ_HAVE_OPENVMS
- // Windows named pipes are not compatible with Winsock API.
- // There's no UNIX domain socket implementation on OpenVMS.
- if (addr_type == "ipc") {
- errno = EPROTONOSUPPORT;
- return -1;
- }
-#endif
-
- // Create the connecter object. Supply it with the session name
- // so that it can bind the new connection to the session once
- // it is established.
- zmq_connecter_t *connecter = new (std::nothrow) zmq_connecter_t (
- choose_io_thread (options.affinity), this, options,
- session->get_ordinal (), false);
- zmq_assert (connecter);
- int rc = connecter->set_address (addr_type.c_str(), addr_args.c_str ());
- if (rc != 0) {
- delete connecter;
- return -1;
- }
- send_plug (connecter);
- send_own (this, connecter);
-
- return 0;
+ session->attach_pipes (outpipe_reader, inpipe_writer, blob_t ());
}
-#if defined ZMQ_HAVE_OPENPGM
- if (addr_type == "pgm" || addr_type == "epgm") {
-
- // If the socket type requires bi-directional communication
- // multicast is not an option (it is uni-directional).
- if (options.requires_in && options.requires_out) {
- errno = ENOCOMPATPROTO;
- return -1;
- }
-
- // For epgm, pgm transport with UDP encapsulation is used.
- bool udp_encapsulation = (addr_type == "epgm");
-
- // At this point we'll create message pipes to the session straight
- // away. There's no point in delaying it as no concept of 'connect'
- // exists with PGM anyway.
- if (options.requires_out) {
+ // Activate the session. Make it a child of this socket.
+ launch_child (session);
- // PGM sender.
- pgm_sender_t *pgm_sender = new (std::nothrow) pgm_sender_t (
- choose_io_thread (options.affinity), options);
- zmq_assert (pgm_sender);
-
- int rc = pgm_sender->init (udp_encapsulation, addr_args.c_str ());
- if (rc != 0) {
- delete pgm_sender;
- return -1;
- }
-
- send_attach (session, pgm_sender, blob_t ());
- }
- else if (options.requires_in) {
-
- // PGM receiver.
- pgm_receiver_t *pgm_receiver = new (std::nothrow) pgm_receiver_t (
- choose_io_thread (options.affinity), options);
- zmq_assert (pgm_receiver);
-
- int rc = pgm_receiver->init (udp_encapsulation, addr_args.c_str ());
- if (rc != 0) {
- delete pgm_receiver;
- return -1;
- }
-
- send_attach (session, pgm_receiver, blob_t ());
- }
- else
- zmq_assert (false);
-
- return 0;
- }
-#endif
-
- // Unknown protoco.
- errno = EPROTONOSUPPORT;
- return -1;
+ return 0;
}
int zmq::socket_base_t::send (::zmq_msg_t *msg_, int flags_)
{
- // Process pending commands, if any.
- if (unlikely (!app_thread->process_commands (false, true))) {
+ if (unlikely (ctx_terminated)) {
errno = ETERM;
return -1;
}
+ // Process pending commands, if any.
+ int rc = process_commands (false, true);
+ if (unlikely (rc != 0))
+ return -1;
+
// At this point we impose the MORE flag on the message.
if (flags_ & ZMQ_SNDMORE)
msg_->flags |= ZMQ_MSG_MORE;
// Try to send the message.
- int rc = xsend (msg_, flags_);
+ rc = xsend (msg_, flags_);
if (rc == 0)
return 0;
@@ -372,10 +454,8 @@ int zmq::socket_base_t::send (::zmq_msg_t *msg_, int flags_)
while (rc != 0) {
if (errno != EAGAIN)
return -1;
- if (unlikely (!app_thread->process_commands (true, false))) {
- errno = ETERM;
+ if (unlikely (process_commands (true, false) != 0))
return -1;
- }
rc = xsend (msg_, flags_);
}
return 0;
@@ -383,6 +463,11 @@ int zmq::socket_base_t::send (::zmq_msg_t *msg_, int flags_)
int zmq::socket_base_t::recv (::zmq_msg_t *msg_, int flags_)
{
+ if (unlikely (ctx_terminated)) {
+ errno = ETERM;
+ return -1;
+ }
+
// Get the message.
int rc = xrecv (msg_, flags_);
int err = errno;
@@ -394,12 +479,10 @@ int zmq::socket_base_t::recv (::zmq_msg_t *msg_, int flags_)
//
// Note that 'recv' uses different command throttling algorithm (the one
// described above) from the one used by 'send'. This is because counting
- // ticks is more efficient than doing rdtsc all the time.
+ // ticks is more efficient than doing RDTSC all the time.
if (++ticks == inbound_poll_rate) {
- if (unlikely (!app_thread->process_commands (false, false))) {
- errno = ETERM;
+ if (unlikely (process_commands (false, false) != 0))
return -1;
- }
ticks = 0;
}
@@ -415,15 +498,14 @@ int zmq::socket_base_t::recv (::zmq_msg_t *msg_, int flags_)
errno = err;
// If the message cannot be fetched immediately, there are two scenarios.
- // For non-blocking recv, commands are processed in case there's a revive
- // command already waiting int a command pipe. If it's not, return EAGAIN.
+ // For non-blocking recv, commands are processed in case there's an
+ // activate_reader command already waiting int a command pipe.
+ // If it's not, return EAGAIN.
if (flags_ & ZMQ_NOBLOCK) {
if (errno != EAGAIN)
return -1;
- if (unlikely (!app_thread->process_commands (false, false))) {
- errno = ETERM;
+ if (unlikely (process_commands (false, false) != 0))
return -1;
- }
ticks = 0;
rc = xrecv (msg_, flags_);
@@ -440,10 +522,8 @@ int zmq::socket_base_t::recv (::zmq_msg_t *msg_, int flags_)
while (rc != 0) {
if (errno != EAGAIN)
return -1;
- if (unlikely (!app_thread->process_commands (true, false))) {
- errno = ETERM;
+ if (unlikely (process_commands (true, false) != 0))
return -1;
- }
rc = xrecv (msg_, flags_);
ticks = 0;
}
@@ -456,74 +536,17 @@ int zmq::socket_base_t::recv (::zmq_msg_t *msg_, int flags_)
int zmq::socket_base_t::close ()
{
- shutting_down = true;
-
- // Let the thread know that the socket is no longer available.
- app_thread->remove_socket (this);
+ // Start termination of associated I/O object hierarchy.
+ terminate ();
- // Pointer to the context must be retrieved before the socket is
- // deallocated. Afterwards it is not available.
- ctx_t *ctx = get_ctx ();
-
- // Unregister all inproc endpoints associated with this socket.
- // From this point we are sure that inc_seqnum won't be called again
- // on this object.
- ctx->unregister_endpoints (this);
-
- // Wait till all undelivered commands are delivered. This should happen
- // very quickly. There's no way to wait here for extensive period of time.
- while (processed_seqnum != sent_seqnum.get ())
- app_thread->process_commands (true, false);
-
- while (true) {
-
- // On third pass of the loop there should be no more I/O objects
- // because all connecters and listerners were destroyed during
- // the first pass and all engines delivered by delayed 'own' commands
- // are destroyed during the second pass.
- if (io_objects.empty () && !pending_term_acks)
- break;
-
- // Send termination request to all associated I/O objects.
- for (io_objects_t::iterator it = io_objects.begin ();
- it != io_objects.end (); it++)
- send_term (*it);
-
- // Move the objects to the list of pending term acks.
- pending_term_acks += io_objects.size ();
- io_objects.clear ();
-
- // Process commands till we get all the termination acknowledgements.
- while (pending_term_acks)
- app_thread->process_commands (true, false);
- }
-
- // Check whether there are no session leaks.
- sessions_sync.lock ();
- zmq_assert (named_sessions.empty ());
- zmq_assert (unnamed_sessions.empty ());
- sessions_sync.unlock ();
-
- delete this;
-
- // This function must be called after the socket is completely deallocated
- // as it may cause termination of the whole 0MQ infrastructure.
- ctx->destroy_socket ();
+ // Ask context to zombify this socket. In other words, transfer
+ // the ownership of the socket from this application thread
+ // to the context which will take care of the rest of shutdown process.
+ zombify_socket (this);
return 0;
}
-void zmq::socket_base_t::inc_seqnum ()
-{
- // NB: This function may be called from a different thread!
- sent_seqnum.add (1);
-}
-
-zmq::app_thread_t *zmq::socket_base_t::get_thread ()
-{
- return app_thread;
-}
-
bool zmq::socket_base_t::has_in ()
{
return xhas_in ();
@@ -534,30 +557,30 @@ bool zmq::socket_base_t::has_out ()
return xhas_out ();
}
-bool zmq::socket_base_t::register_session (const blob_t &peer_identity_,
+bool zmq::socket_base_t::register_session (const blob_t &name_,
session_t *session_)
{
sessions_sync.lock ();
- bool registered = named_sessions.insert (
- std::make_pair (peer_identity_, session_)).second;
+ bool registered = sessions.insert (
+ sessions_t::value_type (name_, session_)).second;
sessions_sync.unlock ();
return registered;
}
-void zmq::socket_base_t::unregister_session (const blob_t &peer_identity_)
+void zmq::socket_base_t::unregister_session (const blob_t &name_)
{
sessions_sync.lock ();
- named_sessions_t::iterator it = named_sessions.find (peer_identity_);
- zmq_assert (it != named_sessions.end ());
- named_sessions.erase (it);
+ sessions_t::iterator it = sessions.find (name_);
+ zmq_assert (it != sessions.end ());
+ sessions.erase (it);
sessions_sync.unlock ();
}
-zmq::session_t *zmq::socket_base_t::find_session (const blob_t &peer_identity_)
+zmq::session_t *zmq::socket_base_t::find_session (const blob_t &name_)
{
sessions_sync.lock ();
- named_sessions_t::iterator it = named_sessions.find (peer_identity_);
- if (it == named_sessions.end ()) {
+ sessions_t::iterator it = sessions.find (name_);
+ if (it == sessions.end ()) {
sessions_sync.unlock ();
return NULL;
}
@@ -570,129 +593,136 @@ zmq::session_t *zmq::socket_base_t::find_session (const blob_t &peer_identity_)
return session;
}
-uint64_t zmq::socket_base_t::register_session (session_t *session_)
+bool zmq::socket_base_t::dezombify ()
{
- sessions_sync.lock ();
- uint64_t ordinal = next_ordinal;
- next_ordinal++;
- unnamed_sessions.insert (std::make_pair (ordinal, session_));
- sessions_sync.unlock ();
- return ordinal;
-}
+ // Process any commands from other threads/sockets that may be available
+ // at the moment. Ultimately, socket will be destroyed.
+ process_commands (false, false);
+
+ // If the object was already marked as destroyed, finish the deallocation.
+ if (destroyed) {
+ own_t::process_destroy ();
+ return true;
+ }
-void zmq::socket_base_t::unregister_session (uint64_t ordinal_)
-{
- sessions_sync.lock ();
- unnamed_sessions_t::iterator it = unnamed_sessions.find (ordinal_);
- zmq_assert (it != unnamed_sessions.end ());
- unnamed_sessions.erase (it);
- sessions_sync.unlock ();
+ return false;
}
-zmq::session_t *zmq::socket_base_t::find_session (uint64_t ordinal_)
+int zmq::socket_base_t::process_commands (bool block_, bool throttle_)
{
- sessions_sync.lock ();
+ int rc;
+ command_t cmd;
+ if (block_) {
+ rc = signaler.recv (&cmd, true);
+ if (rc == -1 && errno == EINTR)
+ return -1;
+ errno_assert (rc == 0);
+ }
+ else {
- unnamed_sessions_t::iterator it = unnamed_sessions.find (ordinal_);
- if (it == unnamed_sessions.end ()) {
- sessions_sync.unlock ();
- return NULL;
+ // Get the CPU's tick counter. If 0, the counter is not available.
+ uint64_t tsc = zmq::clock_t::rdtsc ();
+
+ // Optimised version of command processing - it doesn't have to check
+ // for incoming commands each time. It does so only if certain time
+ // elapsed since last command processing. Command delay varies
+ // depending on CPU speed: It's ~1ms on 3GHz CPU, ~2ms on 1.5GHz CPU
+ // etc. The optimisation makes sense only on platforms where getting
+ // a timestamp is a very cheap operation (tens of nanoseconds).
+ if (tsc && throttle_) {
+
+ // Check whether TSC haven't jumped backwards (in case of migration
+ // between CPU cores) and whether certain time have elapsed since
+ // last command processing. If it didn't do nothing.
+ if (tsc >= last_tsc && tsc - last_tsc <= max_command_delay)
+ return 0;
+ last_tsc = tsc;
+ }
+
+ // Check whether there are any commands pending for this thread.
+ rc = signaler.recv (&cmd, false);
}
- session_t *session = it->second;
- // Prepare the session for subsequent attach command.
- session->inc_seqnum ();
+ // Process all the commands available at the moment.
+ while (true) {
+ if (rc == -1 && errno == EAGAIN)
+ break;
+ if (rc == -1 && errno == EINTR)
+ return -1;
+ errno_assert (rc == 0);
+ cmd.destination->process_command (cmd);
+ rc = signaler.recv (&cmd, false);
+ }
- sessions_sync.unlock ();
- return session;
-}
+ if (ctx_terminated) {
+ errno = ETERM;
+ return -1;
+ }
-void zmq::socket_base_t::kill (reader_t *pipe_)
-{
- xkill (pipe_);
+ return 0;
}
-void zmq::socket_base_t::revive (reader_t *pipe_)
+void zmq::socket_base_t::process_stop ()
{
- xrevive (pipe_);
+ // Here, someone have called zmq_term while the socket was still alive.
+ // We'll remember the fact so that any blocking call is interrupted and any
+ // further attempt to use the socket will return ETERM. The user is still
+ // responsible for calling zmq_close on the socket though!
+ ctx_terminated = true;
}
-void zmq::socket_base_t::revive (writer_t *pipe_)
+void zmq::socket_base_t::process_bind (reader_t *in_pipe_, writer_t *out_pipe_,
+ const blob_t &peer_identity_)
{
- xrevive (pipe_);
+ attach_pipes (in_pipe_, out_pipe_, peer_identity_);
}
-void zmq::socket_base_t::attach_pipes (class reader_t *inpipe_,
- class writer_t *outpipe_, const blob_t &peer_identity_)
+void zmq::socket_base_t::process_unplug ()
{
- if (inpipe_)
- inpipe_->set_endpoint (this);
- if (outpipe_)
- outpipe_->set_endpoint (this);
-
- // If the peer haven't specified it's identity, let's generate one.
- if (peer_identity_.size ()) {
- xattach_pipes (inpipe_, outpipe_, peer_identity_);
- }
- else {
- blob_t identity (1, 0);
- identity.append (uuid_t ().to_blob (), uuid_t::uuid_blob_len);
- xattach_pipes (inpipe_, outpipe_, identity);
- }
}
-void zmq::socket_base_t::detach_inpipe (class reader_t *pipe_)
+void zmq::socket_base_t::process_term (int linger_)
{
- xdetach_inpipe (pipe_);
- pipe_->set_endpoint (NULL); // ?
+ // Unregister all inproc endpoints associated with this socket.
+ // Doing this we make sure that no new pipes from other sockets (inproc)
+ // will be initiated.
+ unregister_endpoints (this);
+
+ // Continue the termination process immediately.
+ own_t::process_term (linger_);
}
-void zmq::socket_base_t::detach_outpipe (class writer_t *pipe_)
+void zmq::socket_base_t::process_destroy ()
{
- xdetach_outpipe (pipe_);
- pipe_->set_endpoint (NULL); // ?
+ destroyed = true;
}
-void zmq::socket_base_t::process_own (owned_t *object_)
+int zmq::socket_base_t::xsetsockopt (int option_, const void *optval_,
+ size_t optvallen_)
{
- io_objects.insert (object_);
+ errno = EINVAL;
+ return -1;
}
-void zmq::socket_base_t::process_bind (reader_t *in_pipe_, writer_t *out_pipe_,
- const blob_t &peer_identity_)
+bool zmq::socket_base_t::xhas_out ()
{
- attach_pipes (in_pipe_, out_pipe_, peer_identity_);
+ return false;
}
-void zmq::socket_base_t::process_term_req (owned_t *object_)
+int zmq::socket_base_t::xsend (zmq_msg_t *msg_, int options_)
{
- // When shutting down we can ignore termination requests from owned
- // objects. They are going to be terminated anyway.
- if (shutting_down)
- return;
-
- // If I/O object is well and alive ask it to terminate.
- io_objects_t::iterator it = std::find (io_objects.begin (),
- io_objects.end (), object_);
-
- // If not found, we assume that termination request was already sent to
- // the object so we can sagely ignore the request.
- if (it == io_objects.end ())
- return;
-
- pending_term_acks++;
- io_objects.erase (it);
- send_term (object_);
+ errno = ENOTSUP;
+ return -1;
}
-void zmq::socket_base_t::process_term_ack ()
+bool zmq::socket_base_t::xhas_in ()
{
- zmq_assert (pending_term_acks);
- pending_term_acks--;
+ return false;
}
-void zmq::socket_base_t::process_seqnum ()
+int zmq::socket_base_t::xrecv (zmq_msg_t *msg_, int options_)
{
- processed_seqnum++;
+ errno = ENOTSUP;
+ return -1;
}
diff --git a/src/socket_base.hpp b/src/socket_base.hpp
index 3d95cec..5d083ca 100644
--- a/src/socket_base.hpp
+++ b/src/socket_base.hpp
@@ -20,31 +20,40 @@
#ifndef __ZMQ_SOCKET_BASE_HPP_INCLUDED__
#define __ZMQ_SOCKET_BASE_HPP_INCLUDED__
-#include <set>
#include <map>
#include <vector>
#include "../include/zmq.h"
-#include "i_endpoint.hpp"
-#include "object.hpp"
-#include "yarray_item.hpp"
+#include "own.hpp"
+#include "array.hpp"
#include "mutex.hpp"
-#include "options.hpp"
#include "stdint.hpp"
#include "atomic_counter.hpp"
+#include "signaler.hpp"
#include "stdint.hpp"
#include "blob.hpp"
+#include "own.hpp"
namespace zmq
{
class socket_base_t :
- public object_t, public i_endpoint, public yarray_item_t
+ public own_t,
+ public array_item_t
{
public:
- socket_base_t (class app_thread_t *parent_);
+ // Create a socket of a specified type.
+ static socket_base_t *create (int type_, class ctx_t *parent_,
+ uint32_t slot_);
+
+ // Returns the signaler associated with this socket.
+ signaler_t *get_signaler ();
+
+ // Interrupt blocking call if the socket is stuck in one.
+ // This function can be called from a different thread!
+ void stop ();
// Interface for communication with the API layer.
int setsockopt (int option_, const void *optval_, size_t optvallen_);
@@ -55,88 +64,95 @@ namespace zmq
int recv (zmq_msg_t *msg_, int flags_);
int close ();
- // When another owned object wants to send command to this object
- // it calls this function to let it know it should not shut down
- // before the command is delivered.
- void inc_seqnum ();
-
- // This function is used by the polling mechanism to determine
- // whether the socket belongs to the application thread the poll
- // is called from.
- class app_thread_t *get_thread ();
-
// These functions are used by the polling mechanism to determine
// which events are to be reported from this socket.
bool has_in ();
bool has_out ();
- // The list of sessions cannot be accessed via inter-thread
- // commands as it is unacceptable to wait for the completion of the
- // action till user application yields control of the application
- // thread to 0MQ. Locking is used instead.
- // There are two distinct types of sessions: those identified by name
- // and those identified by ordinal number. Thus two sets of session
- // management functions.
- bool register_session (const blob_t &peer_identity_,
- class session_t *session_);
- void unregister_session (const blob_t &peer_identity_);
- class session_t *find_session (const blob_t &peer_identity_);
- uint64_t register_session (class session_t *session_);
- void unregister_session (uint64_t ordinal_);
- class session_t *find_session (uint64_t ordinal_);
-
- // i_endpoint interface implementation.
- void attach_pipes (class reader_t *inpipe_, class writer_t *outpipe_,
- const blob_t &peer_identity_);
- void detach_inpipe (class reader_t *pipe_);
- void detach_outpipe (class writer_t *pipe_);
- void kill (class reader_t *pipe_);
- void revive (class reader_t *pipe_);
- void revive (class writer_t *pipe_);
+ // Registry of named sessions.
+ bool register_session (const blob_t &name_, class session_t *session_);
+ void unregister_session (const blob_t &name_);
+ class session_t *find_session (const blob_t &name_);
+
+ // i_reader_events interface implementation.
+ void activated (class reader_t *pipe_);
+ void terminated (class reader_t *pipe_);
+
+ // i_writer_events interface implementation.
+ void activated (class writer_t *pipe_);
+ void terminated (class writer_t *pipe_);
+
+ // This function should be called only on zombie sockets. It tries
+ // to deallocate the zombie. Returns true is object is destroyed.
+ bool dezombify ();
protected:
- // Destructor is protected. Socket is closed using 'close' function.
+ socket_base_t (class ctx_t *parent_, uint32_t slot_);
virtual ~socket_base_t ();
- // Pipe management is done by individual socket types.
+ // Concrete algorithms for the x- methods are to be defined by
+ // individual socket types.
virtual void xattach_pipes (class reader_t *inpipe_,
class writer_t *outpipe_, const blob_t &peer_identity_) = 0;
- virtual void xdetach_inpipe (class reader_t *pipe_) = 0;
- virtual void xdetach_outpipe (class writer_t *pipe_) = 0;
- virtual void xkill (class reader_t *pipe_) = 0;
- virtual void xrevive (class reader_t *pipe_) = 0;
- virtual void xrevive (class writer_t *pipe_) = 0;
- // Actual algorithms are to be defined by individual socket types.
+ // The default implementation assumes there are no specific socket
+ // options for the particular socket type. If not so, overload this
+ // method.
virtual int xsetsockopt (int option_, const void *optval_,
- size_t optvallen_) = 0;
- virtual int xsend (zmq_msg_t *msg_, int options_) = 0;
- virtual int xrecv (zmq_msg_t *msg_, int options_) = 0;
- virtual bool xhas_in () = 0;
- virtual bool xhas_out () = 0;
+ size_t optvallen_);
+
+ // The default implementation assumes that send is not supported.
+ virtual bool xhas_out ();
+ virtual int xsend (zmq_msg_t *msg_, int options_);
+
+ // The default implementation assumes that recv in not supported.
+ virtual bool xhas_in ();
+ virtual int xrecv (zmq_msg_t *msg_, int options_);
+
+ // We are declaring termination handler as protected so that
+ // individual socket types can hook into the termination process
+ // by overloading it.
+ void process_term (int linger_);
- // Socket options.
- options_t options;
+ // Delay actual destruction of the socket.
+ void process_destroy ();
private:
+ // If true, associated context was already terminated.
+ bool ctx_terminated;
+
+ // If true, object should have been already destroyed. However,
+ // destruction is delayed while we unwind the stack to the point
+ // where it doesn't intersect the object being destroyed.
+ bool destroyed;
+
+ // Check whether transport protocol, as specified in connect or
+ // bind, is available and compatible with the socket type.
+ int check_protocol (const std::string &protocol_);
+
+ // If no identity set generate one and call xattach_pipes ().
+ void attach_pipes (class reader_t *inpipe_, class writer_t *outpipe_,
+ const blob_t &peer_identity_);
+
+ // Processes commands sent to this socket (if any). If 'block' is
+ // set to true, returns only after at least one command was processed.
+ // If throttle argument is true, commands are processed at most once
+ // in a predefined time period.
+ int process_commands (bool block_, bool throttle_);
+
// Handlers for incoming commands.
- void process_own (class owned_t *object_);
+ void process_stop ();
void process_bind (class reader_t *in_pipe_, class writer_t *out_pipe_,
const blob_t &peer_identity_);
- void process_term_req (class owned_t *object_);
- void process_term_ack ();
- void process_seqnum ();
+ void process_unplug ();
- // List of all I/O objects owned by this socket. The socket is
- // responsible for deallocating them before it quits.
- typedef std::set <class owned_t*> io_objects_t;
- io_objects_t io_objects;
+ // App thread's signaler object.
+ signaler_t signaler;
- // Number of I/O objects that were already asked to terminate
- // but haven't acknowledged it yet.
- int pending_term_acks;
+ // Timestamp of when commands were processed the last time.
+ uint64_t last_tsc;
// Number of messages received since last command processing.
int ticks;
@@ -144,28 +160,12 @@ namespace zmq
// If true there's a half-read message in the socket.
bool rcvmore;
- // Application thread the socket lives in.
- class app_thread_t *app_thread;
-
- // If true, socket is already shutting down. No new work should be
- // started.
- bool shutting_down;
-
- // Sequence number of the last command sent to this object.
- atomic_counter_t sent_seqnum;
-
- // Sequence number of the last command processed by this object.
- uint64_t processed_seqnum;
-
- // Lists of existing sessions. This lists are never referenced from
- // within the socket, instead they are used by I/O objects owned by
+ // Lists of existing sessions. This list is never referenced from
+ // within the socket, instead it is used by objects owned by
// the socket. As those objects can live in different threads,
// the access is synchronised by mutex.
- typedef std::map <blob_t, session_t*> named_sessions_t;
- named_sessions_t named_sessions;
- typedef std::map <uint64_t, session_t*> unnamed_sessions_t;
- unnamed_sessions_t unnamed_sessions;
- uint64_t next_ordinal;
+ typedef std::map <blob_t, session_t*> sessions_t;
+ sessions_t sessions;
mutex_t sessions_sync;
socket_base_t (const socket_base_t&);
diff --git a/src/sub.cpp b/src/sub.cpp
index eeb50cd..20ffd91 100644
--- a/src/sub.cpp
+++ b/src/sub.cpp
@@ -24,11 +24,13 @@
#include "sub.hpp"
#include "err.hpp"
-zmq::sub_t::sub_t (class app_thread_t *parent_) :
- socket_base_t (parent_),
+zmq::sub_t::sub_t (class ctx_t *parent_, uint32_t slot_) :
+ socket_base_t (parent_, slot_),
+ fq (this),
has_message (false),
more (false)
{
+ options.type = ZMQ_SUB;
options.requires_in = true;
options.requires_out = false;
zmq_msg_init (&message);
@@ -46,31 +48,10 @@ void zmq::sub_t::xattach_pipes (class reader_t *inpipe_,
fq.attach (inpipe_);
}
-void zmq::sub_t::xdetach_inpipe (class reader_t *pipe_)
+void zmq::sub_t::process_term (int linger_)
{
- zmq_assert (pipe_);
- fq.detach (pipe_);
-}
-
-void zmq::sub_t::xdetach_outpipe (class writer_t *pipe_)
-{
- // SUB socket is read-only thus there should be no outpipes.
- zmq_assert (false);
-}
-
-void zmq::sub_t::xkill (class reader_t *pipe_)
-{
- fq.kill (pipe_);
-}
-
-void zmq::sub_t::xrevive (class reader_t *pipe_)
-{
- fq.revive (pipe_);
-}
-
-void zmq::sub_t::xrevive (class writer_t *pipe_)
-{
- zmq_assert (false);
+ fq.terminate ();
+ socket_base_t::process_term (linger_);
}
int zmq::sub_t::xsetsockopt (int option_, const void *optval_,
@@ -93,12 +74,6 @@ int zmq::sub_t::xsetsockopt (int option_, const void *optval_,
return -1;
}
-int zmq::sub_t::xsend (zmq_msg_t *msg_, int flags_)
-{
- errno = ENOTSUP;
- return -1;
-}
-
int zmq::sub_t::xrecv (zmq_msg_t *msg_, int flags_)
{
// If there's already a message prepared by a previous call to zmq_poll,
@@ -179,11 +154,6 @@ bool zmq::sub_t::xhas_in ()
}
}
-bool zmq::sub_t::xhas_out ()
-{
- return false;
-}
-
bool zmq::sub_t::match (zmq_msg_t *msg_)
{
return subscriptions.check ((unsigned char*) zmq_msg_data (msg_),
diff --git a/src/sub.hpp b/src/sub.hpp
index 7b997c9..45c9073 100644
--- a/src/sub.hpp
+++ b/src/sub.hpp
@@ -22,7 +22,7 @@
#include "../include/zmq.h"
-#include "prefix_tree.hpp"
+#include "trie.hpp"
#include "socket_base.hpp"
#include "fq.hpp"
@@ -33,7 +33,7 @@ namespace zmq
{
public:
- sub_t (class app_thread_t *parent_);
+ sub_t (class ctx_t *parent_, uint32_t slot_);
~sub_t ();
protected:
@@ -41,19 +41,15 @@ namespace zmq
// Overloads of functions from socket_base_t.
void xattach_pipes (class reader_t *inpipe_, class writer_t *outpipe_,
const blob_t &peer_identity_);
- void xdetach_inpipe (class reader_t *pipe_);
- void xdetach_outpipe (class writer_t *pipe_);
- void xkill (class reader_t *pipe_);
- void xrevive (class reader_t *pipe_);
- void xrevive (class writer_t *pipe_);
int xsetsockopt (int option_, const void *optval_, size_t optvallen_);
- int xsend (zmq_msg_t *msg_, int flags_);
int xrecv (zmq_msg_t *msg_, int flags_);
bool xhas_in ();
- bool xhas_out ();
private:
+ // Hook into the termination process.
+ void process_term (int linger_);
+
// Check whether the message matches at least one subscription.
bool match (zmq_msg_t *msg_);
@@ -61,7 +57,7 @@ namespace zmq
fq_t fq;
// The repository of subscriptions.
- prefix_tree_t subscriptions;
+ trie_t subscriptions;
// If true, 'message' contains a matching message to return on the
// next recv call.
diff --git a/src/msg_store.cpp b/src/swap.cpp
index aaf6dbe..7a2234d 100644
--- a/src/msg_store.cpp
+++ b/src/swap.cpp
@@ -35,17 +35,18 @@
#include <sstream>
#include <algorithm>
+#include "swap.hpp"
+#include "config.hpp"
#include "atomic_counter.hpp"
-#include "msg_store.hpp"
#include "err.hpp"
-zmq::msg_store_t::msg_store_t (int64_t filesize_, size_t block_size_) :
+zmq::swap_t::swap_t (int64_t filesize_) :
fd (-1),
filesize (filesize_),
file_pos (0),
write_pos (0),
read_pos (0),
- block_size (block_size_),
+ block_size (swap_block_size),
write_buf_start_addr (0)
{
zmq_assert (filesize > 0);
@@ -60,7 +61,7 @@ zmq::msg_store_t::msg_store_t (int64_t filesize_, size_t block_size_) :
read_buf = write_buf = buf1;
}
-zmq::msg_store_t::~msg_store_t ()
+zmq::swap_t::~swap_t ()
{
delete [] buf1;
delete [] buf2;
@@ -83,7 +84,7 @@ zmq::msg_store_t::~msg_store_t ()
errno_assert (rc == 0);
}
-int zmq::msg_store_t::init ()
+int zmq::swap_t::init ()
{
static zmq::atomic_counter_t seqnum (0);
@@ -116,7 +117,7 @@ int zmq::msg_store_t::init ()
return 0;
}
-bool zmq::msg_store_t::store (zmq_msg_t *msg_)
+bool zmq::swap_t::store (zmq_msg_t *msg_)
{
size_t msg_size = zmq_msg_size (msg_);
@@ -138,7 +139,7 @@ bool zmq::msg_store_t::store (zmq_msg_t *msg_)
return true;
}
-void zmq::msg_store_t::fetch (zmq_msg_t *msg_)
+void zmq::swap_t::fetch (zmq_msg_t *msg_)
{
// There must be at least one message available.
zmq_assert (read_pos != write_pos);
@@ -157,12 +158,12 @@ void zmq::msg_store_t::fetch (zmq_msg_t *msg_)
copy_from_file (zmq_msg_data (msg_), msg_size);
}
-void zmq::msg_store_t::commit ()
+void zmq::swap_t::commit ()
{
commit_pos = write_pos;
}
-void zmq::msg_store_t::rollback ()
+void zmq::swap_t::rollback ()
{
if (commit_pos == write_pos || read_pos == write_pos)
return;
@@ -183,17 +184,17 @@ void zmq::msg_store_t::rollback ()
write_pos = commit_pos;
}
-bool zmq::msg_store_t::empty ()
+bool zmq::swap_t::empty ()
{
return read_pos == write_pos;
}
-bool zmq::msg_store_t::full ()
+bool zmq::swap_t::full ()
{
return buffer_space () == 1;
}
-void zmq::msg_store_t::copy_from_file (void *buffer_, size_t count_)
+void zmq::swap_t::copy_from_file (void *buffer_, size_t count_)
{
char *dest_ptr = (char *) buffer_;
size_t chunk_size, remainder = count_;
@@ -217,7 +218,7 @@ void zmq::msg_store_t::copy_from_file (void *buffer_, size_t count_)
}
}
-void zmq::msg_store_t::copy_to_file (const void *buffer_, size_t count_)
+void zmq::swap_t::copy_to_file (const void *buffer_, size_t count_)
{
char *source_ptr = (char *) buffer_;
size_t chunk_size, remainder = count_;
@@ -246,7 +247,7 @@ void zmq::msg_store_t::copy_to_file (const void *buffer_, size_t count_)
}
}
-void zmq::msg_store_t::fill_buf (char *buf, int64_t pos)
+void zmq::swap_t::fill_buf (char *buf, int64_t pos)
{
if (file_pos != pos) {
#ifdef ZMQ_HAVE_WINDOWS
@@ -272,7 +273,7 @@ void zmq::msg_store_t::fill_buf (char *buf, int64_t pos)
file_pos += octets_total;
}
-void zmq::msg_store_t::save_write_buf ()
+void zmq::swap_t::save_write_buf ()
{
if (file_pos != write_buf_start_addr) {
#ifdef ZMQ_HAVE_WINDOWS
@@ -298,7 +299,7 @@ void zmq::msg_store_t::save_write_buf ()
file_pos += octets_total;
}
-int64_t zmq::msg_store_t::buffer_space ()
+int64_t zmq::swap_t::buffer_space ()
{
if (write_pos < read_pos)
return read_pos - write_pos;
diff --git a/src/msg_store.hpp b/src/swap.hpp
index 765fc60..76ad339 100644
--- a/src/msg_store.hpp
+++ b/src/swap.hpp
@@ -17,8 +17,8 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __ZMQ_MSG_STORE_HPP_INCLUDED__
-#define __ZMQ_MSG_STORE_HPP_INCLUDED__
+#ifndef __ZMQ_SWAP_HPP_INCLUDED__
+#define __ZMQ_SWAP_HPP_INCLUDED__
#include "../include/zmq.h"
@@ -28,38 +28,38 @@
namespace zmq
{
- // This class implements a message store. Messages are retrieved from
- // the store in the same order as they entered it.
+ // This class implements a message swap. Messages are retrieved from
+ // the swap in the same order as they entered it.
- class msg_store_t
+ class swap_t
{
public:
enum { default_block_size = 8192 };
- // Creates message store.
- msg_store_t (int64_t filesize_, size_t block_size_ = default_block_size);
+ // Creates the swap.
+ swap_t (int64_t filesize_);
- ~msg_store_t ();
+ ~swap_t ();
int init ();
- // Stores the message into the message store. The function
- // returns false if the message store is full; true otherwise.
+ // Stores the message into the swap. The function
+ // returns false if the swap is full; true otherwise.
bool store (zmq_msg_t *msg_);
- // Fetches the oldest message from the message store. It is an error
- // to call this function when the message store is empty.
+ // Fetches the oldest message from the swap. It is an error
+ // to call this function when the swap is empty.
void fetch (zmq_msg_t *msg_);
void commit ();
void rollback ();
- // Returns true if the message store is empty; false otherwise.
+ // Returns true if the swap is empty; false otherwise.
bool empty ();
- // Returns true if and only if the store is full.
+ // Returns true if and only if the swap is full.
bool full ();
private:
diff --git a/src/tcp_listener.cpp b/src/tcp_listener.cpp
index a62bc04..3766682 100644
--- a/src/tcp_listener.cpp
+++ b/src/tcp_listener.cpp
@@ -42,7 +42,8 @@ zmq::tcp_listener_t::~tcp_listener_t ()
close ();
}
-int zmq::tcp_listener_t::set_address (const char *protocol_, const char *addr_)
+int zmq::tcp_listener_t::set_address (const char *protocol_, const char *addr_,
+ int backlog_)
{
// IPC protocol is not supported on Windows platform.
if (strcmp (protocol_, "tcp") != 0 ) {
@@ -81,7 +82,7 @@ int zmq::tcp_listener_t::set_address (const char *protocol_, const char *addr_)
}
// Listen for incomming connections.
- rc = listen (s, 1);
+ rc = listen (s, backlog_);
if (rc == SOCKET_ERROR) {
wsa_error_to_errno ();
return -1;
@@ -161,7 +162,8 @@ zmq::tcp_listener_t::~tcp_listener_t ()
close ();
}
-int zmq::tcp_listener_t::set_address (const char *protocol_, const char *addr_)
+int zmq::tcp_listener_t::set_address (const char *protocol_, const char *addr_,
+ int backlog_)
{
if (strcmp (protocol_, "tcp") == 0 ) {
@@ -201,7 +203,7 @@ int zmq::tcp_listener_t::set_address (const char *protocol_, const char *addr_)
}
// Listen for incomming connections.
- rc = listen (s, tcp_connection_backlog);
+ rc = listen (s, backlog_);
if (rc != 0) {
close ();
return -1;
@@ -241,7 +243,7 @@ int zmq::tcp_listener_t::set_address (const char *protocol_, const char *addr_)
}
// Listen for incomming connections.
- rc = listen (s, tcp_connection_backlog);
+ rc = listen (s, backlog_);
if (rc != 0) {
close ();
return -1;
diff --git a/src/tcp_listener.hpp b/src/tcp_listener.hpp
index 3b60719..0ee90d8 100644
--- a/src/tcp_listener.hpp
+++ b/src/tcp_listener.hpp
@@ -36,7 +36,8 @@ namespace zmq
~tcp_listener_t ();
// Start listening on the interface.
- int set_address (const char *protocol_, const char *addr_);
+ int set_address (const char *protocol_, const char *addr_,
+ int backlog_);
// Close the listening socket.
int close ();
diff --git a/src/thread.cpp b/src/thread.cpp
index 602ca8b..4e86531 100644
--- a/src/thread.cpp
+++ b/src/thread.cpp
@@ -38,16 +38,6 @@ void zmq::thread_t::stop ()
win_assert (rc != WAIT_FAILED);
}
-zmq::thread_t::id_t zmq::thread_t::id ()
-{
- return GetCurrentThreadId ();
-}
-
-bool zmq::thread_t::equal (id_t id1_, id_t id2_)
-{
- return id1_ == id2_;
-}
-
unsigned int __stdcall zmq::thread_t::thread_routine (void *arg_)
{
thread_t *self = (thread_t*) arg_;
@@ -73,16 +63,6 @@ void zmq::thread_t::stop ()
errno_assert (rc == 0);
}
-zmq::thread_t::id_t zmq::thread_t::id ()
-{
- return pthread_self ();
-}
-
-bool zmq::thread_t::equal (id_t id1_, id_t id2_)
-{
- return pthread_equal (id1_, id2_) != 0;
-}
-
void *zmq::thread_t::thread_routine (void *arg_)
{
#if !defined ZMQ_HAVE_OPENVMS
diff --git a/src/thread.hpp b/src/thread.hpp
index 432770c..8af6ea5 100644
--- a/src/thread.hpp
+++ b/src/thread.hpp
@@ -54,15 +54,6 @@ namespace zmq
// Waits for thread termination.
void stop ();
-
-#ifdef ZMQ_HAVE_WINDOWS
- typedef DWORD id_t;
-#else
- typedef pthread_t id_t;
-#endif
-
- static id_t id ();
- static bool equal (id_t id1_, id_t id2_);
private:
diff --git a/src/i_endpoint.hpp b/src/transient_session.cpp
index 0d14224..ff4b978 100644
--- a/src/i_endpoint.hpp
+++ b/src/transient_session.cpp
@@ -17,27 +17,24 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __ZMQ_I_ENDPOINT_HPP_INCLUDED__
-#define __ZMQ_I_ENDPOINT_HPP_INCLUDED__
+#include "transient_session.hpp"
-#include "blob.hpp"
-
-namespace zmq
+zmq::transient_session_t::transient_session_t (class io_thread_t *io_thread_,
+ class socket_base_t *socket_, const options_t &options_) :
+ session_t (io_thread_, socket_, options_)
{
+}
- struct i_endpoint
- {
- virtual ~i_endpoint () {}
-
- virtual void attach_pipes (class reader_t *inpipe_,
- class writer_t *outpipe_, const blob_t &peer_identity_) = 0;
- virtual void detach_inpipe (class reader_t *pipe_) = 0;
- virtual void detach_outpipe (class writer_t *pipe_) = 0;
- virtual void kill (class reader_t *pipe_) = 0;
- virtual void revive (class reader_t *pipe_) = 0;
- virtual void revive (class writer_t *pipe_) = 0;
- };
+zmq::transient_session_t::~transient_session_t ()
+{
+}
+void zmq::transient_session_t::attached (const blob_t &peer_identity_)
+{
}
-#endif
+void zmq::transient_session_t::detached ()
+{
+ // There's no way to reestablish a transient session. Tear it down.
+ terminate ();
+}
diff --git a/src/zmq_encoder.hpp b/src/transient_session.hpp
index 61899f4..d244982 100644
--- a/src/zmq_encoder.hpp
+++ b/src/transient_session.hpp
@@ -17,39 +17,35 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __ZMQ_ZMQ_ENCODER_HPP_INCLUDED__
-#define __ZMQ_ZMQ_ENCODER_HPP_INCLUDED__
+#ifndef __ZMQ_TRANSIENT_SESSION_HPP_INCLUDED__
+#define __ZMQ_TRANSIENT_SESSION_HPP_INCLUDED__
-#include "../include/zmq.h"
-
-#include "encoder.hpp"
+#include "session.hpp"
namespace zmq
{
- // Encoder for 0MQ backend protocol. Converts messages into data batches.
- class zmq_encoder_t : public encoder_t <zmq_encoder_t>
+ // Transient session is created by the listener when the connected peer
+ // stays anonymous. Transient session is destroyed on disconnect.
+
+ class transient_session_t : public session_t
{
public:
- zmq_encoder_t (size_t bufsize_);
- ~zmq_encoder_t ();
-
- void set_inout (struct i_inout *source_);
+ transient_session_t (class io_thread_t *io_thread_,
+ class socket_base_t *socket_, const options_t &options_);
+ ~transient_session_t ();
private:
- bool size_ready ();
- bool message_ready ();
+ // Handlers for events from session base class.
+ void attached (const blob_t &peer_identity_);
+ void detached ();
- struct i_inout *source;
- ::zmq_msg_t in_progress;
- unsigned char tmpbuf [10];
-
- zmq_encoder_t (const zmq_encoder_t&);
- void operator = (const zmq_encoder_t&);
+ transient_session_t (const transient_session_t&);
+ void operator = (const transient_session_t&);
};
+
}
#endif
-
diff --git a/src/prefix_tree.cpp b/src/trie.cpp
index 6d4f084..8bcfbbc 100644
--- a/src/prefix_tree.cpp
+++ b/src/trie.cpp
@@ -28,16 +28,16 @@
#endif
#include "err.hpp"
-#include "prefix_tree.hpp"
+#include "trie.hpp"
-zmq::prefix_tree_t::prefix_tree_t () :
+zmq::trie_t::trie_t () :
refcnt (0),
min (0),
count (0)
{
}
-zmq::prefix_tree_t::~prefix_tree_t ()
+zmq::trie_t::~trie_t ()
{
if (count == 1)
delete next.node;
@@ -49,7 +49,7 @@ zmq::prefix_tree_t::~prefix_tree_t ()
}
}
-void zmq::prefix_tree_t::add (unsigned char *prefix_, size_t size_)
+void zmq::trie_t::add (unsigned char *prefix_, size_t size_)
{
// We are at the node corresponding to the prefix. We are done.
if (!size_) {
@@ -69,10 +69,10 @@ void zmq::prefix_tree_t::add (unsigned char *prefix_, size_t size_)
}
else if (count == 1) {
unsigned char oldc = min;
- prefix_tree_t *oldp = next.node;
+ trie_t *oldp = next.node;
count = (min < c ? c - min : min - c) + 1;
- next.table = (prefix_tree_t**)
- malloc (sizeof (prefix_tree_t*) * count);
+ next.table = (trie_t**)
+ malloc (sizeof (trie_t*) * count);
zmq_assert (next.table);
for (unsigned short i = 0; i != count; ++i)
next.table [i] = 0;
@@ -84,8 +84,8 @@ void zmq::prefix_tree_t::add (unsigned char *prefix_, size_t size_)
// The new character is above the current character range.
unsigned short old_count = count;
count = c - min + 1;
- next.table = (prefix_tree_t**) realloc ((void*) next.table,
- sizeof (prefix_tree_t*) * count);
+ next.table = (trie_t**) realloc ((void*) next.table,
+ sizeof (trie_t*) * count);
zmq_assert (next.table);
for (unsigned short i = old_count; i != count; i++)
next.table [i] = NULL;
@@ -95,11 +95,11 @@ void zmq::prefix_tree_t::add (unsigned char *prefix_, size_t size_)
// The new character is below the current character range.
unsigned short old_count = count;
count = (min + old_count) - c;
- next.table = (prefix_tree_t**) realloc ((void*) next.table,
- sizeof (prefix_tree_t*) * count);
+ next.table = (trie_t**) realloc ((void*) next.table,
+ sizeof (trie_t*) * count);
zmq_assert (next.table);
memmove (next.table + min - c, next.table,
- old_count * sizeof (prefix_tree_t*));
+ old_count * sizeof (trie_t*));
for (unsigned short i = 0; i != min - c; i++)
next.table [i] = NULL;
min = c;
@@ -109,21 +109,21 @@ void zmq::prefix_tree_t::add (unsigned char *prefix_, size_t size_)
// If next node does not exist, create one.
if (count == 1) {
if (!next.node) {
- next.node = new (std::nothrow) prefix_tree_t;
+ next.node = new (std::nothrow) trie_t;
zmq_assert (next.node);
}
next.node->add (prefix_ + 1, size_ - 1);
}
else {
if (!next.table [c - min]) {
- next.table [c - min] = new (std::nothrow) prefix_tree_t;
+ next.table [c - min] = new (std::nothrow) trie_t;
zmq_assert (next.table [c - min]);
}
next.table [c - min]->add (prefix_ + 1, size_ - 1);
}
}
-bool zmq::prefix_tree_t::rm (unsigned char *prefix_, size_t size_)
+bool zmq::trie_t::rm (unsigned char *prefix_, size_t size_)
{
if (!size_) {
if (!refcnt)
@@ -136,7 +136,7 @@ bool zmq::prefix_tree_t::rm (unsigned char *prefix_, size_t size_)
if (!count || c < min || c >= min + count)
return false;
- prefix_tree_t *next_node =
+ trie_t *next_node =
count == 1 ? next.node : next.table [c - min];
if (!next_node)
@@ -145,11 +145,11 @@ bool zmq::prefix_tree_t::rm (unsigned char *prefix_, size_t size_)
return next_node->rm (prefix_ + 1, size_ - 1);
}
-bool zmq::prefix_tree_t::check (unsigned char *data_, size_t size_)
+bool zmq::trie_t::check (unsigned char *data_, size_t size_)
{
// This function is on critical path. It deliberately doesn't use
// recursion to get a bit better performance.
- prefix_tree_t *current = this;
+ trie_t *current = this;
while (true) {
// We've found a corresponding subscription!
diff --git a/src/prefix_tree.hpp b/src/trie.hpp
index bf1c4b9..08b2eac 100644
--- a/src/prefix_tree.hpp
+++ b/src/trie.hpp
@@ -17,8 +17,8 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __ZMQ_PREFIX_TREE_HPP_INCLUDED__
-#define __ZMQ_PREFIX_TREE_HPP_INCLUDED__
+#ifndef __ZMQ_TRIE_HPP_INCLUDED__
+#define __ZMQ_TRIE_HPP_INCLUDED__
#include <stddef.h>
@@ -27,12 +27,12 @@
namespace zmq
{
- class prefix_tree_t
+ class trie_t
{
public:
- prefix_tree_t ();
- ~prefix_tree_t ();
+ trie_t ();
+ ~trie_t ();
void add (unsigned char *prefix_, size_t size_);
bool rm (unsigned char *prefix_, size_t size_);
@@ -44,9 +44,12 @@ namespace zmq
unsigned char min;
unsigned short count;
union {
- class prefix_tree_t *node;
- class prefix_tree_t **table;
+ class trie_t *node;
+ class trie_t **table;
} next;
+
+ trie_t (const trie_t&);
+ void operator = (const trie_t&);
};
}
diff --git a/src/xrep.cpp b/src/xrep.cpp
index f50e32e..b3e8ebd 100644
--- a/src/xrep.cpp
+++ b/src/xrep.cpp
@@ -23,14 +23,16 @@
#include "err.hpp"
#include "pipe.hpp"
-zmq::xrep_t::xrep_t (class app_thread_t *parent_) :
- socket_base_t (parent_),
+zmq::xrep_t::xrep_t (class ctx_t *parent_, uint32_t slot_) :
+ socket_base_t (parent_, slot_),
current_in (0),
prefetched (false),
more_in (false),
current_out (NULL),
- more_out (false)
+ more_out (false),
+ terminating (false)
{
+ options.type = ZMQ_XREP;
options.requires_in = true;
options.requires_out = true;
@@ -41,42 +43,76 @@ zmq::xrep_t::xrep_t (class app_thread_t *parent_) :
zmq::xrep_t::~xrep_t ()
{
- for (inpipes_t::iterator it = inpipes.begin (); it != inpipes.end (); it++)
- it->reader->term ();
- for (outpipes_t::iterator it = outpipes.begin (); it != outpipes.end ();
- it++)
- it->second.writer->term ();
+ zmq_assert (inpipes.empty ());
+ zmq_assert (outpipes.empty ());
+}
+
+void zmq::xrep_t::xattach_pipes (reader_t *inpipe_, writer_t *outpipe_,
+ const blob_t &peer_identity_)
+{
+ if (outpipe_) {
+
+ outpipe_->set_event_sink (this);
+
+ // TODO: What if new connection has same peer identity as the old one?
+ outpipe_t outpipe = {outpipe_, true};
+ bool ok = outpipes.insert (outpipes_t::value_type (
+ peer_identity_, outpipe)).second;
+ zmq_assert (ok);
+
+ if (terminating) {
+ register_term_acks (1);
+ outpipe_->terminate ();
+ }
+ }
+
+ if (inpipe_) {
+
+ inpipe_->set_event_sink (this);
+
+ inpipe_t inpipe = {inpipe_, peer_identity_, true};
+ inpipes.push_back (inpipe);
+
+ if (terminating) {
+ register_term_acks (1);
+ inpipe_->terminate ();
+ }
+ }
}
-void zmq::xrep_t::xattach_pipes (class reader_t *inpipe_,
- class writer_t *outpipe_, const blob_t &peer_identity_)
+void zmq::xrep_t::process_term (int linger_)
{
- zmq_assert (inpipe_ && outpipe_);
+ terminating = true;
- // TODO: What if new connection has same peer identity as the old one?
- outpipe_t outpipe = {outpipe_, true};
- bool ok = outpipes.insert (std::make_pair (
- peer_identity_, outpipe)).second;
- zmq_assert (ok);
+ register_term_acks (inpipes.size () + outpipes.size ());
+
+ for (inpipes_t::iterator it = inpipes.begin (); it != inpipes.end ();
+ it++)
+ it->reader->terminate ();
+ for (outpipes_t::iterator it = outpipes.begin (); it != outpipes.end ();
+ it++)
+ it->second.writer->terminate ();
- inpipe_t inpipe = {inpipe_, peer_identity_, true};
- inpipes.push_back (inpipe);
+ socket_base_t::process_term (linger_);
}
-void zmq::xrep_t::xdetach_inpipe (class reader_t *pipe_)
+void zmq::xrep_t::terminated (reader_t *pipe_)
{
-// TODO:!
for (inpipes_t::iterator it = inpipes.begin (); it != inpipes.end ();
it++) {
if (it->reader == pipe_) {
inpipes.erase (it);
+ if (terminating)
+ unregister_term_ack ();
+ if (current_in >= inpipes.size ())
+ current_in = 0;
return;
}
}
zmq_assert (false);
}
-void zmq::xrep_t::xdetach_outpipe (class writer_t *pipe_)
+void zmq::xrep_t::terminated (writer_t *pipe_)
{
for (outpipes_t::iterator it = outpipes.begin ();
it != outpipes.end (); ++it) {
@@ -84,26 +120,19 @@ void zmq::xrep_t::xdetach_outpipe (class writer_t *pipe_)
outpipes.erase (it);
if (pipe_ == current_out)
current_out = NULL;
+ if (terminating)
+ unregister_term_ack ();
return;
}
}
zmq_assert (false);
}
-void zmq::xrep_t::xkill (class reader_t *pipe_)
+void zmq::xrep_t::delimited (reader_t *pipe_)
{
- for (inpipes_t::iterator it = inpipes.begin (); it != inpipes.end ();
- it++) {
- if (it->reader == pipe_) {
- zmq_assert (it->active);
- it->active = false;
- return;
- }
- }
- zmq_assert (false);
}
-void zmq::xrep_t::xrevive (class reader_t *pipe_)
+void zmq::xrep_t::activated (reader_t *pipe_)
{
for (inpipes_t::iterator it = inpipes.begin (); it != inpipes.end ();
it++) {
@@ -116,7 +145,7 @@ void zmq::xrep_t::xrevive (class reader_t *pipe_)
zmq_assert (false);
}
-void zmq::xrep_t::xrevive (class writer_t *pipe_)
+void zmq::xrep_t::activated (writer_t *pipe_)
{
for (outpipes_t::iterator it = outpipes.begin ();
it != outpipes.end (); ++it) {
@@ -129,13 +158,6 @@ void zmq::xrep_t::xrevive (class writer_t *pipe_)
zmq_assert (false);
}
-int zmq::xrep_t::xsetsockopt (int option_, const void *optval_,
- size_t optvallen_)
-{
- errno = EINVAL;
- return -1;
-}
-
int zmq::xrep_t::xsend (zmq_msg_t *msg_, int flags_)
{
// If this is the first part of the message it's the identity of the
@@ -144,23 +166,24 @@ int zmq::xrep_t::xsend (zmq_msg_t *msg_, int flags_)
zmq_assert (!current_out);
// If we have malformed message (prefix with no subsequent message)
- // then just silently drop the message.
- if ((msg_->flags & ZMQ_MSG_MORE) == 0)
- return 0;
+ // then just silently ignore it.
+ if (msg_->flags & ZMQ_MSG_MORE) {
- more_out = true;
+ more_out = true;
- // Find the pipe associated with the identity stored in the prefix.
- // If there's no such pipe just silently drop the message.
- blob_t identity ((unsigned char*) zmq_msg_data (msg_),
- zmq_msg_size (msg_));
- outpipes_t::iterator it = outpipes.find (identity);
- if (it == outpipes.end ())
- return 0;
-
- // Remember the outgoing pipe.
- current_out = it->second.writer;
+ // Find the pipe associated with the identity stored in the prefix.
+ // If there's no such pipe just silently ignore the message.
+ blob_t identity ((unsigned char*) zmq_msg_data (msg_),
+ zmq_msg_size (msg_));
+ outpipes_t::iterator it = outpipes.find (identity);
+ if (it != outpipes.end ())
+ current_out = it->second.writer;
+ }
+ int rc = zmq_msg_close (msg_);
+ zmq_assert (rc == 0);
+ rc = zmq_msg_init (msg_);
+ zmq_assert (rc == 0);
return 0;
}
@@ -233,7 +256,9 @@ int zmq::xrep_t::xrecv (zmq_msg_t *msg_, int flags_)
return 0;
}
- // If me don't have a message, move to next pipe.
+ // If me don't have a message, mark the pipe as passive and
+ // move to next pipe.
+ inpipes [current_in].active = false;
current_in++;
if (current_in >= inpipes.size ())
current_in = 0;
@@ -260,6 +285,10 @@ bool zmq::xrep_t::xhas_in ()
if (inpipes [current_in].active &&
inpipes [current_in].reader->check_read ())
return true;
+
+ // If me don't have a message, mark the pipe as passive and
+ // move to next pipe.
+ inpipes [current_in].active = false;
current_in++;
if (current_in >= inpipes.size ())
current_in = 0;
diff --git a/src/xrep.hpp b/src/xrep.hpp
index da1b3d8..575fc44 100644
--- a/src/xrep.hpp
+++ b/src/xrep.hpp
@@ -25,27 +25,25 @@
#include "socket_base.hpp"
#include "blob.hpp"
+#include "pipe.hpp"
namespace zmq
{
// TODO: This class uses O(n) scheduling. Rewrite it to use O(1) algorithm.
- class xrep_t : public socket_base_t
+ class xrep_t :
+ public socket_base_t,
+ public i_reader_events,
+ public i_writer_events
{
public:
- xrep_t (class app_thread_t *parent_);
+ xrep_t (class ctx_t *parent_, uint32_t slot_);
~xrep_t ();
// Overloads of functions from socket_base_t.
- void xattach_pipes (class reader_t *inpipe_, class writer_t *outpipe_,
+ void xattach_pipes (reader_t *inpipe_, writer_t *outpipe_,
const blob_t &peer_identity_);
- void xdetach_inpipe (class reader_t *pipe_);
- void xdetach_outpipe (class writer_t *pipe_);
- void xkill (class reader_t *pipe_);
- void xrevive (class reader_t *pipe_);
- void xrevive (class writer_t *pipe_);
- int xsetsockopt (int option_, const void *optval_, size_t optvallen_);
int xsend (zmq_msg_t *msg_, int flags_);
int xrecv (zmq_msg_t *msg_, int flags_);
bool xhas_in ();
@@ -53,6 +51,18 @@ namespace zmq
private:
+ // Hook into the termination process.
+ void process_term (int linger_);
+
+ // i_reader_events interface implementation.
+ void activated (reader_t *pipe_);
+ void terminated (reader_t *pipe_);
+ void delimited (reader_t *pipe_);
+
+ // i_writer_events interface implementation.
+ void activated (writer_t *pipe_);
+ void terminated (writer_t *pipe_);
+
struct inpipe_t
{
class reader_t *reader;
@@ -92,6 +102,9 @@ namespace zmq
// If true, more outgoing message parts are expected.
bool more_out;
+ // If true, termination process is already underway.
+ bool terminating;
+
xrep_t (const xrep_t&);
void operator = (const xrep_t&);
};
diff --git a/src/xreq.cpp b/src/xreq.cpp
index 66e5cc3..017f127 100644
--- a/src/xreq.cpp
+++ b/src/xreq.cpp
@@ -22,9 +22,12 @@
#include "xreq.hpp"
#include "err.hpp"
-zmq::xreq_t::xreq_t (class app_thread_t *parent_) :
- socket_base_t (parent_)
+zmq::xreq_t::xreq_t (class ctx_t *parent_, uint32_t slot_) :
+ socket_base_t (parent_, slot_),
+ fq (this),
+ lb (this)
{
+ options.type = ZMQ_XREQ;
options.requires_in = true;
options.requires_out = true;
}
@@ -41,38 +44,11 @@ void zmq::xreq_t::xattach_pipes (class reader_t *inpipe_,
lb.attach (outpipe_);
}
-void zmq::xreq_t::xdetach_inpipe (class reader_t *pipe_)
+void zmq::xreq_t::process_term (int linger_)
{
- zmq_assert (pipe_);
- fq.detach (pipe_);
-}
-
-void zmq::xreq_t::xdetach_outpipe (class writer_t *pipe_)
-{
- zmq_assert (pipe_);
- lb.detach (pipe_);
-}
-
-void zmq::xreq_t::xkill (class reader_t *pipe_)
-{
- fq.kill (pipe_);
-}
-
-void zmq::xreq_t::xrevive (class reader_t *pipe_)
-{
- fq.revive (pipe_);
-}
-
-void zmq::xreq_t::xrevive (class writer_t *pipe_)
-{
- lb.revive (pipe_);
-}
-
-int zmq::xreq_t::xsetsockopt (int option_, const void *optval_,
- size_t optvallen_)
-{
- errno = EINVAL;
- return -1;
+ fq.terminate ();
+ lb.terminate ();
+ socket_base_t::process_term (linger_);
}
int zmq::xreq_t::xsend (zmq_msg_t *msg_, int flags_)
diff --git a/src/xreq.hpp b/src/xreq.hpp
index 8ee0bb9..6dbc117 100644
--- a/src/xreq.hpp
+++ b/src/xreq.hpp
@@ -31,18 +31,14 @@ namespace zmq
{
public:
- xreq_t (class app_thread_t *parent_);
+ xreq_t (class ctx_t *parent_, uint32_t slot_);
~xreq_t ();
+ protected:
+
// Overloads of functions from socket_base_t.
void xattach_pipes (class reader_t *inpipe_, class writer_t *outpipe_,
const blob_t &peer_identity_);
- void xdetach_inpipe (class reader_t *pipe_);
- void xdetach_outpipe (class writer_t *pipe_);
- void xkill (class reader_t *pipe_);
- void xrevive (class reader_t *pipe_);
- void xrevive (class writer_t *pipe_);
- int xsetsockopt (int option_, const void *optval_, size_t optvallen_);
int xsend (zmq_msg_t *msg_, int flags_);
int xrecv (zmq_msg_t *msg_, int flags_);
bool xhas_in ();
@@ -50,6 +46,9 @@ namespace zmq
private:
+ // Hook into the termination process.
+ void process_term (int linger_);
+
// Messages are fair-queued from inbound pipes. And load-balanced to
// the outbound pipes.
fq_t fq;
diff --git a/src/yarray_item.hpp b/src/yarray_item.hpp
deleted file mode 100644
index db24dda..0000000
--- a/src/yarray_item.hpp
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- Copyright (c) 2007-2010 iMatix Corporation
-
- This file is part of 0MQ.
-
- 0MQ is free software; you can redistribute it and/or modify it under
- the terms of the Lesser GNU General Public License as published by
- the Free Software Foundation; either version 3 of the License, or
- (at your option) any later version.
-
- 0MQ is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- Lesser GNU General Public License for more details.
-
- You should have received a copy of the Lesser GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-*/
-
-#ifndef __ZMQ_YARRAY_ITEM_INCLUDED__
-#define __ZMQ_YARRAY_ITEM_INCLUDED__
-
-namespace zmq
-{
-
- // Base class for objects stored in yarray. Note that each object can
- // be stored in at most one yarray.
-
- class yarray_item_t
- {
- public:
-
- inline yarray_item_t () :
- yarray_index (-1)
- {
- }
-
- // The destructor doesn't have to be virtual. It is mad virtual
- // just to keep ICC and code checking tools from complaining.
- inline virtual ~yarray_item_t ()
- {
- }
-
- inline void set_yarray_index (int index_)
- {
- yarray_index = index_;
- }
-
- inline int get_yarray_index ()
- {
- return yarray_index;
- }
-
- private:
-
- int yarray_index;
-
- yarray_item_t (const yarray_item_t&);
- void operator = (const yarray_item_t&);
- };
-
-}
-
-#endif
diff --git a/src/zmq.cpp b/src/zmq.cpp
index 5770e04..a99aeb5 100644
--- a/src/zmq.cpp
+++ b/src/zmq.cpp
@@ -29,11 +29,11 @@
#include "queue.hpp"
#include "streamer.hpp"
#include "socket_base.hpp"
-#include "app_thread.hpp"
#include "msg_content.hpp"
#include "platform.hpp"
#include "stdint.hpp"
#include "config.hpp"
+#include "clock.hpp"
#include "ctx.hpp"
#include "err.hpp"
#include "fd.hpp"
@@ -48,10 +48,10 @@
#if !defined ZMQ_HAVE_WINDOWS
#include <unistd.h>
-#include <sys/time.h>
#endif
#if defined ZMQ_HAVE_OPENPGM
+#define __PGM_WININT_H__
#include <pgm/pgm.h>
#endif
@@ -64,43 +64,7 @@ void zmq_version (int *major_, int *minor_, int *patch_)
const char *zmq_strerror (int errnum_)
{
- switch (errnum_) {
-#if defined ZMQ_HAVE_WINDOWS
- case ENOTSUP:
- return "Not supported";
- case EPROTONOSUPPORT:
- return "Protocol not supported";
- case ENOBUFS:
- return "No buffer space available";
- case ENETDOWN:
- return "Network is down";
- case EADDRINUSE:
- return "Address in use";
- case EADDRNOTAVAIL:
- return "Address not available";
- case ECONNREFUSED:
- return "Connection refused";
- case EINPROGRESS:
- return "Operation in progress";
-#endif
- case EMTHREAD:
- return "Number of preallocated application threads exceeded";
- case EFSM:
- return "Operation cannot be accomplished in current state";
- case ENOCOMPATPROTO:
- return "The protocol is not compatible with the socket type";
- case ETERM:
- return "Context was terminated";
- default:
-#if defined _MSC_VER
-#pragma warning (push)
-#pragma warning (disable:4996)
-#endif
- return strerror (errnum_);
-#if defined _MSC_VER
-#pragma warning (pop)
-#endif
- }
+ return zmq::errno_to_string (errnum_);
}
int zmq_msg_init (zmq_msg_t *msg_)
@@ -155,24 +119,30 @@ int zmq_msg_init_data (zmq_msg_t *msg_, void *data_, size_t size_,
int zmq_msg_close (zmq_msg_t *msg_)
{
// For VSMs and delimiters there are no resources to free.
- if (msg_->content == (zmq::msg_content_t*) ZMQ_DELIMITER ||
- msg_->content == (zmq::msg_content_t*) ZMQ_VSM)
- return 0;
+ if (msg_->content != (zmq::msg_content_t*) ZMQ_DELIMITER &&
+ msg_->content != (zmq::msg_content_t*) ZMQ_VSM) {
- // If the content is not shared, or if it is shared and the reference.
- // count has dropped to zero, deallocate it.
- zmq::msg_content_t *content = (zmq::msg_content_t*) msg_->content;
- if (!(msg_->flags & ZMQ_MSG_SHARED) || !content->refcnt.sub (1)) {
+ // If the content is not shared, or if it is shared and the reference.
+ // count has dropped to zero, deallocate it.
+ zmq::msg_content_t *content = (zmq::msg_content_t*) msg_->content;
+ if (!(msg_->flags & ZMQ_MSG_SHARED) || !content->refcnt.sub (1)) {
- // We used "placement new" operator to initialize the reference.
- // counter so we call its destructor now.
- content->refcnt.~atomic_counter_t ();
+ // We used "placement new" operator to initialize the reference.
+ // counter so we call its destructor now.
+ content->refcnt.~atomic_counter_t ();
- if (content->ffn)
- content->ffn (content->data, content->hint);
- free (content);
+ if (content->ffn)
+ content->ffn (content->data, content->hint);
+ free (content);
+ }
}
+ // As a safety measure, let's make the deallocated message look like
+ // an empty message.
+ msg_->content = (zmq::msg_content_t*) ZMQ_VSM;
+ msg_->flags = 0;
+ msg_->vsm_size = 0;
+
return 0;
}
@@ -235,31 +205,27 @@ void *zmq_init (int io_threads_)
}
#if defined ZMQ_HAVE_OPENPGM
- // Unfortunately, OpenPGM doesn't support refcounted init/shutdown, thus,
- // let's fail if it was initialised beforehand.
- zmq_assert (!pgm_supported ());
// Init PGM transport. Ensure threading and timer are enabled. Find PGM
// protocol ID. Note that if you want to use gettimeofday and sleep for
// openPGM timing, set environment variables PGM_TIMER to "GTOD" and
// PGM_SLEEP to "USLEEP".
- GError *pgm_error = NULL;
- int rc = pgm_init (&pgm_error);
+ pgm_error_t *pgm_error = NULL;
+ const bool rc = pgm_init (&pgm_error);
if (rc != TRUE) {
- if (pgm_error->domain == PGM_IF_ERROR && (
- pgm_error->code == PGM_IF_ERROR_INVAL ||
- pgm_error->code == PGM_IF_ERROR_XDEV ||
- pgm_error->code == PGM_IF_ERROR_NODEV ||
- pgm_error->code == PGM_IF_ERROR_NOTUNIQ ||
- pgm_error->code == PGM_IF_ERROR_ADDRFAMILY ||
- pgm_error->code == PGM_IF_ERROR_FAMILY ||
- pgm_error->code == PGM_IF_ERROR_NODATA ||
- pgm_error->code == PGM_IF_ERROR_NONAME ||
- pgm_error->code == PGM_IF_ERROR_SERVICE)) {
- g_error_free (pgm_error);
+
+ // Invalid parameters don't set pgm_error_t
+ zmq_assert (pgm_error != NULL);
+ if (pgm_error->domain == PGM_ERROR_DOMAIN_TIME && (
+ pgm_error->code == PGM_ERROR_FAILED)) {
+
+ // Failed to access RTC or HPET device.
+ pgm_error_free (pgm_error);
errno = EINVAL;
return NULL;
}
+
+ // PGM_ERROR_DOMAIN_ENGINE: WSAStartup errors or missing WSARecvMsg.
zmq_assert (false);
}
#endif
@@ -277,7 +243,7 @@ int zmq_term (void *ctx_)
return -1;
}
- int rc = ((zmq::ctx_t*) ctx_)->term ();
+ int rc = ((zmq::ctx_t*) ctx_)->terminate ();
int en = errno;
#if defined ZMQ_HAVE_OPENPGM
@@ -366,158 +332,174 @@ int zmq_recv (void *s_, zmq_msg_t *msg_, int flags_)
return (((zmq::socket_base_t*) s_)->recv (msg_, flags_));
}
-int zmq_poll (zmq_pollitem_t *items_, int nitems_, long timeout_)
-{
-#if defined ZMQ_HAVE_LINUX || defined ZMQ_HAVE_FREEBSD ||\
+#if defined ZMQ_FORCE_SELECT
+#define ZMQ_POLL_BASED_ON_SELECT
+#elif defined ZMQ_FORCE_POLL
+#define ZMQ_POLL_BASED_ON_POLL
+#elif defined ZMQ_HAVE_LINUX || defined ZMQ_HAVE_FREEBSD ||\
defined ZMQ_HAVE_OPENBSD || defined ZMQ_HAVE_SOLARIS ||\
defined ZMQ_HAVE_OSX || defined ZMQ_HAVE_QNXNTO ||\
defined ZMQ_HAVE_HPUX || defined ZMQ_HAVE_AIX ||\
defined ZMQ_HAVE_NETBSD
+#define ZMQ_POLL_BASED_ON_POLL
+#elif defined ZMQ_HAVE_WINDOWS || defined ZMQ_HAVE_OPENVMS
+#define ZMQ_POLL_BASED_ON_SELECT
+#endif
+
+int zmq_poll (zmq_pollitem_t *items_, int nitems_, long timeout_)
+{
+#if defined ZMQ_POLL_BASED_ON_POLL
if (!items_) {
errno = EFAULT;
return -1;
}
+
+ zmq::clock_t clock;
+ uint64_t now = 0;
+ uint64_t end = 0;
+
pollfd *pollfds = (pollfd*) malloc (nitems_ * sizeof (pollfd));
zmq_assert (pollfds);
- int npollfds = 0;
- int nsockets = 0;
-
- zmq::app_thread_t *app_thread = NULL;
+ // Build pollset for poll () system call.
for (int i = 0; i != nitems_; i++) {
- // 0MQ sockets.
+ // If the poll item is a 0MQ socket, we poll on the file descriptor
+ // retrieved by the ZMQ_FD socket option.
if (items_ [i].socket) {
-
- // Get the app_thread the socket is living in. If there are two
- // sockets in the same pollset with different app threads, fail.
- zmq::socket_base_t *s = (zmq::socket_base_t*) items_ [i].socket;
- if (app_thread) {
- if (app_thread != s->get_thread ()) {
- free (pollfds);
- errno = EFAULT;
- return -1;
- }
+ size_t zmq_fd_size = sizeof (zmq::fd_t);
+ if (zmq_getsockopt (items_ [i].socket, ZMQ_FD, &pollfds [i].fd,
+ &zmq_fd_size) == -1) {
+ free (pollfds);
+ return -1;
}
- else
- app_thread = s->get_thread ();
-
- nsockets++;
- continue;
+ pollfds [i].events = items_ [i].events ? POLLIN : 0;
}
-
- // Raw file descriptors.
- pollfds [npollfds].fd = items_ [i].fd;
- pollfds [npollfds].events =
- (items_ [i].events & ZMQ_POLLIN ? POLLIN : 0) |
- (items_ [i].events & ZMQ_POLLOUT ? POLLOUT : 0);
- npollfds++;
- }
-
- // If there's at least one 0MQ socket in the pollset we have to poll
- // for 0MQ commands. If ZMQ_POLL was not set, fail.
- if (nsockets) {
- pollfds [npollfds].fd = app_thread->get_signaler ()->get_fd ();
- if (pollfds [npollfds].fd == zmq::retired_fd) {
- free (pollfds);
- errno = ENOTSUP;
- return -1;
+ // Else, the poll item is a raw file descriptor. Just convert the
+ // events to normal POLLIN/POLLOUT for poll ().
+ else {
+ pollfds [i].fd = items_ [i].fd;
+ pollfds [i].events =
+ (items_ [i].events & ZMQ_POLLIN ? POLLIN : 0) |
+ (items_ [i].events & ZMQ_POLLOUT ? POLLOUT : 0);
}
- pollfds [npollfds].events = POLLIN;
- npollfds++;
- }
-
- // First iteration just check for events, don't block. Waiting would
- // prevent exiting on any events that may already been signaled on
- // 0MQ sockets.
- int rc = poll (pollfds, npollfds, 0);
- if (rc == -1 && errno == EINTR && timeout_ >= 0) {
- free (pollfds);
- return 0;
}
- errno_assert (rc >= 0 || (rc == -1 && errno == EINTR));
- int timeout = timeout_ > 0 ? timeout_ / 1000 : -1;
+ bool first_pass = true;
int nevents = 0;
while (true) {
- // Process 0MQ commands if needed.
- if (nsockets && pollfds [npollfds -1].revents & POLLIN)
- if (!app_thread->process_commands (false, false)) {
+ // Compute the timeout for the subsequent poll.
+ int timeout;
+ if (first_pass)
+ timeout = 0;
+ else if (timeout_ < 0)
+ timeout = -1;
+ else
+ timeout = end - now;
+
+ // Wait for events.
+ while (true) {
+ int rc = poll (pollfds, nitems_, timeout);
+ if (rc == -1 && errno == EINTR) {
free (pollfds);
- errno = ETERM;
return -1;
}
+ errno_assert (rc >= 0);
+ break;
+ }
// Check for the events.
- int pollfd_pos = 0;
for (int i = 0; i != nitems_; i++) {
- // If the poll item is a raw file descriptor, simply convert
+ items_ [i].revents = 0;
+
+ // The poll item is a 0MQ socket. Retrieve pending events
+ // using the ZMQ_EVENTS socket option.
+ if (items_ [i].socket) {
+ size_t zmq_events_size = sizeof (uint32_t);
+ uint32_t zmq_events;
+ if (zmq_getsockopt (items_ [i].socket, ZMQ_EVENTS, &zmq_events,
+ &zmq_events_size) == -1) {
+ free (pollfds);
+ return -1;
+ }
+ if ((items_ [i].events & ZMQ_POLLOUT) &&
+ (zmq_events & ZMQ_POLLOUT))
+ items_ [i].revents |= ZMQ_POLLOUT;
+ if ((items_ [i].events & ZMQ_POLLIN) &&
+ (zmq_events & ZMQ_POLLIN))
+ items_ [i].revents |= ZMQ_POLLIN;
+ }
+ // Else, the poll item is a raw file descriptor, simply convert
// the events to zmq_pollitem_t-style format.
- if (!items_ [i].socket) {
- items_ [i].revents = 0;
- if (pollfds [pollfd_pos].revents & POLLIN)
+ else {
+ if (pollfds [i].revents & POLLIN)
items_ [i].revents |= ZMQ_POLLIN;
- if (pollfds [pollfd_pos].revents & POLLOUT)
+ if (pollfds [i].revents & POLLOUT)
items_ [i].revents |= ZMQ_POLLOUT;
- if (pollfds [pollfd_pos].revents & ~(POLLIN | POLLOUT))
+ if (pollfds [i].revents & ~(POLLIN | POLLOUT))
items_ [i].revents |= ZMQ_POLLERR;
-
- if (items_ [i].revents)
- nevents++;
- pollfd_pos++;
- continue;
}
- // The poll item is a 0MQ socket.
- zmq::socket_base_t *s = (zmq::socket_base_t*) items_ [i].socket;
- items_ [i].revents = 0;
- if ((items_ [i].events & ZMQ_POLLOUT) && s->has_out ())
- items_ [i].revents |= ZMQ_POLLOUT;
- if ((items_ [i].events & ZMQ_POLLIN) && s->has_in ())
- items_ [i].revents |= ZMQ_POLLIN;
if (items_ [i].revents)
nevents++;
}
- // If there's at least one event, or if we are asked not to block,
- // return immediately.
- if (nevents || !timeout_)
+ // If timout is zero, exit immediately whether there are events or not.
+ if (timeout_ == 0)
break;
- // Wait for events. Ignore interrupts if there's infinite timeout.
- while (true) {
- rc = poll (pollfds, npollfds, timeout);
- if (rc == -1 && errno == EINTR) {
- if (timeout_ < 0)
- continue;
- else {
- rc = 0;
- break;
- }
- }
- errno_assert (rc >= 0);
+ // If there are events to return, we can exit immediately.
+ if (nevents)
break;
+
+ // At this point we are meant to wait for events but there are none.
+ // If timeout is infinite we can just loop until we get some events.
+ if (timeout_ < 0) {
+ if (first_pass)
+ first_pass = false;
+ continue;
}
-
- // If timeout was hit with no events signaled, return zero.
- if (rc == 0)
- break;
- // If timeout was already applied, we don't want to poll anymore.
- // Setting timeout to zero will cause termination of the function
- // once the events we've got are processed.
- if (timeout > 0)
- timeout = 0;
+ // The timeout is finite and there are no events. In the first pass
+ // we get a timestamp of when the polling have begun. (We assume that
+ // first pass have taken negligible time). We also compute the time
+ // when the polling should time out.
+ if (first_pass) {
+ now = clock.now_ms ();
+ end = now + (timeout_ / 1000);
+ if (now == end)
+ break;
+ first_pass = false;
+ continue;
+ }
+
+ // Find out whether timeout have expired.
+ now = clock.now_ms ();
+ if (now >= end)
+ break;
}
free (pollfds);
return nevents;
-#elif defined ZMQ_HAVE_WINDOWS || defined ZMQ_HAVE_OPENVMS
+#elif defined ZMQ_POLL_BASED_ON_SELECT
+
+ if (!items_) {
+ errno = EFAULT;
+ return -1;
+ }
+
+ zmq::clock_t clock;
+ uint64_t now = 0;
+ uint64_t end = 0;
+
+ // Ensure we do not attempt to select () on more than FD_SETSIZE
+ // file descriptors.
+ zmq_assert (nitems_ <= FD_SETSIZE);
fd_set pollset_in;
FD_ZERO (&pollset_in);
@@ -526,166 +508,163 @@ int zmq_poll (zmq_pollitem_t *items_, int nitems_, long timeout_)
fd_set pollset_err;
FD_ZERO (&pollset_err);
- zmq::app_thread_t *app_thread = NULL;
- int nsockets = 0;
- zmq::fd_t maxfd = zmq::retired_fd;
- zmq::fd_t notify_fd = zmq::retired_fd;
-
- // Ensure we do not attempt to select () on more than FD_SETSIZE
- // file descriptors.
- zmq_assert (nitems_ <= FD_SETSIZE);
+ zmq::fd_t maxfd = 0;
+ // Build the fd_sets for passing to select ().
for (int i = 0; i != nitems_; i++) {
- // 0MQ sockets.
+ // If the poll item is a 0MQ socket we are interested in input on the
+ // notification file descriptor retrieved by the ZMQ_FD socket option.
if (items_ [i].socket) {
-
- // Get the app_thread the socket is living in. If there are two
- // sockets in the same pollset with different app threads, fail.
- zmq::socket_base_t *s = (zmq::socket_base_t*) items_ [i].socket;
- if (app_thread) {
- if (app_thread != s->get_thread ()) {
- errno = EFAULT;
- return -1;
- }
+ size_t zmq_fd_size = sizeof (zmq::fd_t);
+ zmq::fd_t notify_fd;
+ if (zmq_getsockopt (items_ [i].socket, ZMQ_FD, &notify_fd,
+ &zmq_fd_size) == -1)
+ return -1;
+ if (items_ [i].events) {
+ FD_SET (notify_fd, &pollset_in);
+ if (maxfd < notify_fd)
+ maxfd = notify_fd;
}
- else
- app_thread = s->get_thread ();
-
- nsockets++;
- continue;
}
-
- // Raw file descriptors.
- if (items_ [i].events & ZMQ_POLLIN)
- FD_SET (items_ [i].fd, &pollset_in);
- if (items_ [i].events & ZMQ_POLLOUT)
- FD_SET (items_ [i].fd, &pollset_out);
- if (items_ [i].events & ZMQ_POLLERR)
- FD_SET (items_ [i].fd, &pollset_err);
- if (maxfd == zmq::retired_fd || maxfd < items_ [i].fd)
- maxfd = items_ [i].fd;
- }
-
- // If there's at least one 0MQ socket in the pollset we have to poll
- // for 0MQ commands. If ZMQ_POLL was not set, fail.
- if (nsockets) {
- notify_fd = app_thread->get_signaler ()->get_fd ();
- if (notify_fd == zmq::retired_fd) {
- errno = ENOTSUP;
- return -1;
+ // Else, the poll item is a raw file descriptor. Convert the poll item
+ // events to the appropriate fd_sets.
+ else {
+ if (items_ [i].events & ZMQ_POLLIN)
+ FD_SET (items_ [i].fd, &pollset_in);
+ if (items_ [i].events & ZMQ_POLLOUT)
+ FD_SET (items_ [i].fd, &pollset_out);
+ if (items_ [i].events & ZMQ_POLLERR)
+ FD_SET (items_ [i].fd, &pollset_err);
+ if (maxfd < items_ [i].fd)
+ maxfd = items_ [i].fd;
}
- FD_SET (notify_fd, &pollset_in);
- if (maxfd == zmq::retired_fd || maxfd < notify_fd)
- maxfd = notify_fd;
}
- bool block = (timeout_ < 0);
- timeval timeout = {timeout_ / 1000000, timeout_ % 1000000};
- timeval zero_timeout = {0, 0};
+ bool first_pass = true;
int nevents = 0;
-
- // First iteration just check for events, don't block. Waiting would
- // prevent exiting on any events that may already been signaled on
- // 0MQ sockets.
fd_set inset, outset, errset;
- memcpy (&inset, &pollset_in, sizeof (fd_set));
- memcpy (&outset, &pollset_out, sizeof (fd_set));
- memcpy (&errset, &pollset_err, sizeof (fd_set));
- int rc = select (maxfd, &inset, &outset, &errset, &zero_timeout);
-#if defined ZMQ_HAVE_WINDOWS
- wsa_assert (rc != SOCKET_ERROR);
-#else
- if (rc == -1 && errno == EINTR && timeout_ >= 0)
- return 0;
- errno_assert (rc >= 0 || (rc == -1 && errno == EINTR));
-#endif
+
+ // Compute the timeout for the subsequent poll.
+ timeval timeout;
+ timeval *ptimeout;
+ if (first_pass) {
+ timeout.tv_sec = 0;
+ timeout.tv_usec = 0;
+ ptimeout = &timeout;
+ }
+ else if (timeout_ < 0)
+ ptimeout = NULL;
+ else {
+ timeout.tv_sec = (long) ((end - now) / 1000);
+ timeout.tv_usec = (long) ((end - now) % 1000 * 1000);
+ ptimeout = &timeout;
+ }
while (true) {
- // Process 0MQ commands if needed.
- if (nsockets && FD_ISSET (notify_fd, &inset))
- if (!app_thread->process_commands (false, false)) {
- errno = ETERM;
+ // Wait for events. Ignore interrupts if there's infinite timeout.
+ while (true) {
+ memcpy (&inset, &pollset_in, sizeof (fd_set));
+ memcpy (&outset, &pollset_out, sizeof (fd_set));
+ memcpy (&errset, &pollset_err, sizeof (fd_set));
+ int rc = select (maxfd + 1, &inset, &outset, &errset, ptimeout);
+#if defined ZMQ_HAVE_WINDOWS
+ wsa_assert (rc != SOCKET_ERROR);
+#else
+ if (rc == -1 && errno == EINTR)
return -1;
- }
+ errno_assert (rc >= 0);
+#endif
+ break;
+ }
// Check for the events.
for (int i = 0; i != nitems_; i++) {
- // If the poll item is a raw file descriptor, simply convert
+ items_ [i].revents = 0;
+
+ // The poll item is a 0MQ socket. Retrieve pending events
+ // using the ZMQ_EVENTS socket option.
+ if (items_ [i].socket) {
+ size_t zmq_events_size = sizeof (uint32_t);
+ uint32_t zmq_events;
+ if (zmq_getsockopt (items_ [i].socket, ZMQ_EVENTS, &zmq_events,
+ &zmq_events_size) == -1)
+ return -1;
+ if ((items_ [i].events & ZMQ_POLLOUT) &&
+ (zmq_events & ZMQ_POLLOUT))
+ items_ [i].revents |= ZMQ_POLLOUT;
+ if ((items_ [i].events & ZMQ_POLLIN) &&
+ (zmq_events & ZMQ_POLLIN))
+ items_ [i].revents |= ZMQ_POLLIN;
+ }
+ // Else, the poll item is a raw file descriptor, simply convert
// the events to zmq_pollitem_t-style format.
- if (!items_ [i].socket) {
- items_ [i].revents = 0;
+ else {
if (FD_ISSET (items_ [i].fd, &inset))
items_ [i].revents |= ZMQ_POLLIN;
if (FD_ISSET (items_ [i].fd, &outset))
items_ [i].revents |= ZMQ_POLLOUT;
if (FD_ISSET (items_ [i].fd, &errset))
items_ [i].revents |= ZMQ_POLLERR;
- if (items_ [i].revents)
- nevents++;
- continue;
}
- // The poll item is a 0MQ socket.
- zmq::socket_base_t *s = (zmq::socket_base_t*) items_ [i].socket;
- items_ [i].revents = 0;
- if ((items_ [i].events & ZMQ_POLLOUT) && s->has_out ())
- items_ [i].revents |= ZMQ_POLLOUT;
- if ((items_ [i].events & ZMQ_POLLIN) && s->has_in ())
- items_ [i].revents |= ZMQ_POLLIN;
if (items_ [i].revents)
nevents++;
}
- // If there's at least one event, or if we are asked not to block,
- // return immediately.
- if (nevents || (timeout.tv_sec == 0 && timeout.tv_usec == 0))
+ // If timout is zero, exit immediately whether there are events or not.
+ if (timeout_ == 0)
break;
- // Wait for events. Ignore interrupts if there's infinite timeout.
- while (true) {
- memcpy (&inset, &pollset_in, sizeof (fd_set));
- memcpy (&outset, &pollset_out, sizeof (fd_set));
- memcpy (&errset, &pollset_err, sizeof (fd_set));
- rc = select (maxfd, &inset, &outset, &errset,
- block ? NULL : &timeout);
-#if defined ZMQ_HAVE_WINDOWS
- wsa_assert (rc != SOCKET_ERROR);
-#else
- if (rc == -1 && errno == EINTR) {
- if (timeout_ < 0)
- continue;
- else {
- rc = 0;
- break;
- }
- }
- errno_assert (rc >= 0);
-#endif
+ // If there are events to return, we can exit immediately.
+ if (nevents)
break;
+
+ // At this point we are meant to wait for events but there are none.
+ // If timeout is infinite we can just loop until we get some events.
+ if (timeout_ < 0) {
+ if (first_pass)
+ first_pass = false;
+ continue;
}
-
- // If timeout was hit with no events signaled, return zero.
- if (rc == 0)
- break;
- // If timeout was already applied, we don't want to poll anymore.
- // Setting timeout to zero will cause termination of the function
- // once the events we've got are processed.
- if (!block)
- timeout = zero_timeout;
+ // The timeout is finite and there are no events. In the first pass
+ // we get a timestamp of when the polling have begun. (We assume that
+ // first pass have taken negligible time). We also compute the time
+ // when the polling should time out.
+ if (first_pass) {
+ now = clock.now_ms ();
+ end = now + (timeout_ / 1000);
+ if (now == end)
+ break;
+ first_pass = false;
+ continue;
+ }
+
+ // Find out whether timeout have expired.
+ now = clock.now_ms ();
+ if (now >= end)
+ break;
}
return nevents;
#else
+ // Exotic platforms that support neither poll() nor select().
errno = ENOTSUP;
return -1;
#endif
}
+#if defined ZMQ_POLL_BASED_ON_SELECT
+#undef ZMQ_POLL_BASED_ON_SELECT
+#endif
+#if defined ZMQ_POLL_BASED_ON_POLL
+#undef ZMQ_POLL_BASED_ON_POLL
+#endif
+
int zmq_errno ()
{
return errno;
@@ -716,59 +695,26 @@ int zmq_device (int device_, void *insocket_, void *outsocket_)
// 0MQ utils - to be used by perf tests
////////////////////////////////////////////////////////////////////////////////
-#if defined ZMQ_HAVE_WINDOWS
-
-static uint64_t now ()
-{
- // Get the high resolution counter's accuracy.
- LARGE_INTEGER ticksPerSecond;
- QueryPerformanceFrequency (&ticksPerSecond);
-
- // What time is it?
- LARGE_INTEGER tick;
- QueryPerformanceCounter (&tick);
-
- // Convert the tick number into the number of seconds
- // since the system was started.
- double ticks_div = (double) (ticksPerSecond.QuadPart / 1000000);
- return (uint64_t) (tick.QuadPart / ticks_div);
-}
-
void zmq_sleep (int seconds_)
{
+#if defined ZMQ_HAVE_WINDOWS
Sleep (seconds_ * 1000);
-}
-
#else
-
-static uint64_t now ()
-{
- struct timeval tv;
- int rc;
-
- rc = gettimeofday (&tv, NULL);
- assert (rc == 0);
- return (tv.tv_sec * (uint64_t) 1000000 + tv.tv_usec);
-}
-
-void zmq_sleep (int seconds_)
-{
sleep (seconds_);
-}
-
#endif
+}
void *zmq_stopwatch_start ()
{
uint64_t *watch = (uint64_t*) malloc (sizeof (uint64_t));
assert (watch);
- *watch = now ();
+ *watch = zmq::clock_t::now_us ();
return (void*) watch;
}
unsigned long zmq_stopwatch_stop (void *watch_)
{
- uint64_t end = now ();
+ uint64_t end = zmq::clock_t::now_us ();
uint64_t start = *(uint64_t*) watch_;
free (watch_);
return (unsigned long) (end - start);
diff --git a/src/zmq_connecter.cpp b/src/zmq_connecter.cpp
index ebd7572..2dd9576 100644
--- a/src/zmq_connecter.cpp
+++ b/src/zmq_connecter.cpp
@@ -19,55 +19,60 @@
#include <new>
+#include "platform.hpp"
+#if defined ZMQ_HAVE_WINDOWS
+#include "windows.hpp"
+#else
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
#include "zmq_connecter.hpp"
#include "zmq_engine.hpp"
#include "zmq_init.hpp"
#include "io_thread.hpp"
#include "err.hpp"
-zmq::zmq_connecter_t::zmq_connecter_t (io_thread_t *parent_,
- socket_base_t *owner_, const options_t &options_,
- uint64_t session_ordinal_, bool wait_) :
- owned_t (parent_, owner_),
- io_object_t (parent_),
+zmq::zmq_connecter_t::zmq_connecter_t (class io_thread_t *io_thread_,
+ class session_t *session_, const options_t &options_,
+ const char *protocol_, const char *address_) :
+ own_t (io_thread_, options_),
+ io_object_t (io_thread_),
handle_valid (false),
- wait (wait_),
- session_ordinal (session_ordinal_),
- options (options_)
+ wait (wait_before_connect),
+ session (session_)
{
+ int rc = tcp_connecter.set_address (protocol_, address_);
+ zmq_assert (rc == 0);
}
zmq::zmq_connecter_t::~zmq_connecter_t ()
{
+ if (wait)
+ cancel_timer (reconnect_timer_id);
+ if (handle_valid)
+ rm_fd (handle);
}
-int zmq::zmq_connecter_t::set_address (const char *protocol_,
- const char *address_)
+int zmq::zmq_connecter_t::get_reconnect_ivl ()
{
- int rc = tcp_connecter.set_address (protocol_, address_);
- if (rc != 0)
- return rc;
- protocol = protocol_;
- address = address_;
- return 0;
+#if defined ZMQ_HAVE_WINDOWS
+ return (options.reconnect_ivl + (((int) GetCurrentProcessId () * 13)
+ % options.reconnect_ivl));
+#else
+ return (options.reconnect_ivl + (((int) getpid () * 13)
+ % options.reconnect_ivl));
+#endif
}
void zmq::zmq_connecter_t::process_plug ()
{
if (wait)
- add_timer ();
+ add_timer (get_reconnect_ivl (), reconnect_timer_id);
else
start_connecting ();
}
-void zmq::zmq_connecter_t::process_unplug ()
-{
- if (wait)
- cancel_timer ();
- if (handle_valid)
- rm_fd (handle);
-}
-
void zmq::zmq_connecter_t::in_event ()
{
// We are not polling for incomming data, so we are actually called
@@ -86,25 +91,28 @@ void zmq::zmq_connecter_t::out_event ()
if (fd == retired_fd) {
tcp_connecter.close ();
wait = true;
- add_timer ();
+ add_timer (get_reconnect_ivl (), reconnect_timer_id);
return;
}
+ // Choose I/O thread to run connecter in. Given that we are already
+ // running in an I/O thread, there must be at least one available.
+ io_thread_t *io_thread = choose_io_thread (options.affinity);
+ zmq_assert (io_thread);
+
// Create an init object.
- zmq_init_t *init = new (std::nothrow) zmq_init_t (
- choose_io_thread (options.affinity), owner,
- fd, options, true, protocol.c_str (), address.c_str (),
- session_ordinal);
+ zmq_init_t *init = new (std::nothrow) zmq_init_t (io_thread, NULL,
+ session, fd, options);
zmq_assert (init);
- send_plug (init);
- send_own (owner, init);
+ launch_sibling (init);
- // Ask owner socket to shut the connecter down.
- term ();
+ // Shut the connecter down.
+ terminate ();
}
-void zmq::zmq_connecter_t::timer_event ()
+void zmq::zmq_connecter_t::timer_event (int id_)
{
+ zmq_assert (id_ == reconnect_timer_id);
wait = false;
start_connecting ();
}
@@ -132,5 +140,5 @@ void zmq::zmq_connecter_t::start_connecting ()
// Handle any other error condition by eventual reconnect.
wait = true;
- add_timer ();
+ add_timer (get_reconnect_ivl (), reconnect_timer_id);
}
diff --git a/src/zmq_connecter.hpp b/src/zmq_connecter.hpp
index 328dd6a..f9ff7ba 100644
--- a/src/zmq_connecter.hpp
+++ b/src/zmq_connecter.hpp
@@ -20,42 +20,44 @@
#ifndef __ZMQ_ZMQ_CONNECTER_HPP_INCLUDED__
#define __ZMQ_ZMQ_CONNECTER_HPP_INCLUDED__
-#include <string>
-
-#include "owned.hpp"
+#include "own.hpp"
#include "io_object.hpp"
#include "tcp_connecter.hpp"
-#include "options.hpp"
#include "stdint.hpp"
namespace zmq
{
- class zmq_connecter_t : public owned_t, public io_object_t
+ class zmq_connecter_t : public own_t, public io_object_t
{
public:
- zmq_connecter_t (class io_thread_t *parent_, socket_base_t *owner_,
- const options_t &options_, uint64_t session_ordinal_, bool wait_);
+ // If 'wait' is true connecter first waits for a while, then starts
+ // connection process.
+ zmq_connecter_t (class io_thread_t *io_thread_,
+ class session_t *session_, const options_t &options_,
+ const char *protocol_, const char *address_);
~zmq_connecter_t ();
- // Set address to connect to.
- int set_address (const char *protocol_, const char *address_);
-
private:
+ // ID of the timer used to delay the reconnection.
+ enum {reconnect_timer_id = 1};
+
// Handlers for incoming commands.
void process_plug ();
- void process_unplug ();
// Handlers for I/O events.
void in_event ();
void out_event ();
- void timer_event ();
+ void timer_event (int id_);
// Internal function to start the actual connection establishment.
void start_connecting ();
+ // Internal function to return the reconnect backoff delay.
+ int get_reconnect_ivl ();
+
// Actual connecting socket.
tcp_connecter_t tcp_connecter;
@@ -69,15 +71,8 @@ namespace zmq
// If true, connecter is waiting a while before trying to connect.
bool wait;
- // Ordinal of the session to attach to.
- uint64_t session_ordinal;
-
- // Associated socket options.
- options_t options;
-
- // Protocol and address to connect to.
- std::string protocol;
- std::string address;
+ // Reference to the session we belong to.
+ class session_t *session;
zmq_connecter_t (const zmq_connecter_t&);
void operator = (const zmq_connecter_t&);
diff --git a/src/zmq_engine.cpp b/src/zmq_engine.cpp
index 41b10c8..761f6fe 100644
--- a/src/zmq_engine.cpp
+++ b/src/zmq_engine.cpp
@@ -32,10 +32,7 @@
#include "config.hpp"
#include "err.hpp"
-zmq::zmq_engine_t::zmq_engine_t (io_thread_t *parent_, fd_t fd_,
- const options_t &options_, bool reconnect_,
- const char *protocol_, const char *address_) :
- io_object_t (parent_),
+zmq::zmq_engine_t::zmq_engine_t (fd_t fd_, const options_t &options_) :
inpos (NULL),
insize (0),
decoder (in_batch_size),
@@ -44,13 +41,8 @@ zmq::zmq_engine_t::zmq_engine_t (io_thread_t *parent_, fd_t fd_,
encoder (out_batch_size),
inout (NULL),
options (options_),
- reconnect (reconnect_)
+ plugged (false)
{
- if (reconnect) {
- protocol = protocol_;
- address = address_;
- }
-
// Initialise the underlying socket.
int rc = tcp_socket.open (fd_, options.sndbuf, options.rcvbuf);
zmq_assert (rc == 0);
@@ -58,33 +50,54 @@ zmq::zmq_engine_t::zmq_engine_t (io_thread_t *parent_, fd_t fd_,
zmq::zmq_engine_t::~zmq_engine_t ()
{
+ zmq_assert (!plugged);
}
-void zmq::zmq_engine_t::plug (i_inout *inout_)
+void zmq::zmq_engine_t::plug (io_thread_t *io_thread_, i_inout *inout_)
{
- zmq_assert (!inout);
+ zmq_assert (!plugged);
+ plugged = true;
+ // Conncet to session/init object.
+ zmq_assert (!inout);
+ zmq_assert (inout_);
encoder.set_inout (inout_);
decoder.set_inout (inout_);
+ inout = inout_;
+ // Connect to I/O threads poller object.
+ io_object_t::plug (io_thread_);
handle = add_fd (tcp_socket.get_fd ());
set_pollin (handle);
set_pollout (handle);
- inout = inout_;
-
// Flush all the data that may have been already received downstream.
in_event ();
}
void zmq::zmq_engine_t::unplug ()
{
+ zmq_assert (plugged);
+ plugged = false;
+
+ // Cancel all fd subscriptions.
rm_fd (handle);
+
+ // Disconnect from I/O threads poller object.
+ io_object_t::unplug ();
+
+ // Disconnect from init/session object.
encoder.set_inout (NULL);
decoder.set_inout (NULL);
inout = NULL;
}
+void zmq::zmq_engine_t::terminate ()
+{
+ unplug ();
+ delete this;
+}
+
void zmq::zmq_engine_t::in_event ()
{
bool disconnection = false;
@@ -106,18 +119,24 @@ void zmq::zmq_engine_t::in_event ()
// Push the data to the decoder.
size_t processed = decoder.process_buffer (inpos, insize);
- // Stop polling for input if we got stuck.
- if (processed < insize) {
-
- // This may happen if queue limits are in effect or when
- // init object reads all required information from the socket
- // and rejects to read more data.
- reset_pollin (handle);
+ if (unlikely (processed == (size_t) -1)) {
+ disconnection = true;
}
+ else {
+
+ // Stop polling for input if we got stuck.
+ if (processed < insize) {
+
+ // This may happen if queue limits are in effect or when
+ // init object reads all required information from the socket
+ // and rejects to read more data.
+ reset_pollin (handle);
+ }
- // Adjust the buffer.
- inpos += processed;
- insize -= processed;
+ // Adjust the buffer.
+ inpos += processed;
+ insize -= processed;
+ }
// Flush all messages the decoder may have produced.
inout->flush ();
@@ -155,7 +174,7 @@ void zmq::zmq_engine_t::out_event ()
outsize -= nbytes;
}
-void zmq::zmq_engine_t::revive ()
+void zmq::zmq_engine_t::activate_out ()
{
set_pollout (handle);
@@ -166,30 +185,18 @@ void zmq::zmq_engine_t::revive ()
out_event ();
}
-void zmq::zmq_engine_t::resume_input ()
+void zmq::zmq_engine_t::activate_in ()
{
set_pollin (handle);
+ // Speculative read.
in_event ();
}
void zmq::zmq_engine_t::error ()
{
zmq_assert (inout);
-
- zmq_connecter_t *reconnecter = NULL;
- if (reconnect) {
-
- // Create a connecter object to attempt reconnect.
- // Ask it to wait for a while before reconnecting.
- reconnecter = new (std::nothrow) zmq_connecter_t (
- inout->get_io_thread (), inout->get_owner (),
- options, inout->get_ordinal (), true);
- zmq_assert (reconnecter);
- reconnecter->set_address (protocol.c_str(), address.c_str ());
- }
-
- inout->detach (reconnecter);
+ inout->detach ();
unplug ();
delete this;
}
diff --git a/src/zmq_engine.hpp b/src/zmq_engine.hpp
index d89dccc..806e710 100644
--- a/src/zmq_engine.hpp
+++ b/src/zmq_engine.hpp
@@ -27,8 +27,8 @@
#include "i_engine.hpp"
#include "io_object.hpp"
#include "tcp_socket.hpp"
-#include "zmq_encoder.hpp"
-#include "zmq_decoder.hpp"
+#include "encoder.hpp"
+#include "decoder.hpp"
#include "options.hpp"
namespace zmq
@@ -38,16 +38,15 @@ namespace zmq
{
public:
- zmq_engine_t (class io_thread_t *parent_, fd_t fd_,
- const options_t &options_, bool reconnect_,
- const char *protocol_, const char *address_);
+ zmq_engine_t (fd_t fd_, const options_t &options_);
~zmq_engine_t ();
// i_engine interface implementation.
- void plug (struct i_inout *inout_);
+ void plug (class io_thread_t *io_thread_, struct i_inout *inout_);
void unplug ();
- void revive ();
- void resume_input ();
+ void terminate ();
+ void activate_in ();
+ void activate_out ();
// i_poll_events interface implementation.
void in_event ();
@@ -63,19 +62,17 @@ namespace zmq
unsigned char *inpos;
size_t insize;
- zmq_decoder_t decoder;
+ decoder_t decoder;
unsigned char *outpos;
size_t outsize;
- zmq_encoder_t encoder;
+ encoder_t encoder;
i_inout *inout;
options_t options;
- bool reconnect;
- std::string protocol;
- std::string address;
+ bool plugged;
zmq_engine_t (const zmq_engine_t&);
void operator = (const zmq_engine_t&);
diff --git a/src/zmq_init.cpp b/src/zmq_init.cpp
index 5824f5c..b5c5c86 100644
--- a/src/zmq_init.cpp
+++ b/src/zmq_init.cpp
@@ -20,31 +20,35 @@
#include <string.h>
#include "zmq_init.hpp"
+#include "transient_session.hpp"
+#include "named_session.hpp"
+#include "socket_base.hpp"
#include "zmq_engine.hpp"
#include "io_thread.hpp"
#include "session.hpp"
#include "uuid.hpp"
+#include "blob.hpp"
#include "err.hpp"
-zmq::zmq_init_t::zmq_init_t (io_thread_t *parent_, socket_base_t *owner_,
- fd_t fd_, const options_t &options_, bool reconnect_,
- const char *protocol_, const char *address_, uint64_t session_ordinal_) :
- owned_t (parent_, owner_),
+zmq::zmq_init_t::zmq_init_t (io_thread_t *io_thread_,
+ socket_base_t *socket_, session_t *session_, fd_t fd_,
+ const options_t &options_) :
+ own_t (io_thread_, options_),
sent (false),
received (false),
- session_ordinal (session_ordinal_),
- options (options_)
+ socket (socket_),
+ session (session_),
+ io_thread (io_thread_)
{
// Create the engine object for this connection.
- engine = new (std::nothrow) zmq_engine_t (parent_, fd_, options,
- reconnect_, protocol_, address_);
+ engine = new (std::nothrow) zmq_engine_t (fd_, options);
zmq_assert (engine);
}
zmq::zmq_init_t::~zmq_init_t ()
{
if (engine)
- delete engine;
+ engine->terminate ();
}
bool zmq::zmq_init_t::read (::zmq_msg_t *msg_)
@@ -62,7 +66,7 @@ bool zmq::zmq_init_t::read (::zmq_msg_t *msg_)
// If initialisation is done, pass the engine to the session and
// destroy the init object.
- finalise ();
+ finalise_initialisation ();
return true;
}
@@ -99,44 +103,28 @@ void zmq::zmq_init_t::flush ()
// If initialisation is done, pass the engine to the session and
// destroy the init object.
- finalise ();
+ finalise_initialisation ();
}
-void zmq::zmq_init_t::detach (owned_t *reconnecter_)
+void zmq::zmq_init_t::detach ()
{
// This function is called by engine when disconnection occurs.
- // If required, launch the reconnecter.
- if (reconnecter_) {
- send_plug (reconnecter_);
- send_own (owner, reconnecter_);
- }
+ // If there is an associated session, send it a null engine to let it know
+ // that connection process was unsuccesful.
+ if (session)
+ send_attach (session, NULL, blob_t (), true);
// The engine will destroy itself, so let's just drop the pointer here and
// start termination of the init object.
engine = NULL;
- term ();
-}
-
-zmq::io_thread_t *zmq::zmq_init_t::get_io_thread ()
-{
- return choose_io_thread (options.affinity);
-}
-
-class zmq::socket_base_t *zmq::zmq_init_t::get_owner ()
-{
- return owner;
-}
-
-uint64_t zmq::zmq_init_t::get_ordinal ()
-{
- return session_ordinal;
+ terminate ();
}
void zmq::zmq_init_t::process_plug ()
{
zmq_assert (engine);
- engine->plug (this);
+ engine->plug (io_thread, this);
}
void zmq::zmq_init_t::process_unplug ()
@@ -145,51 +133,71 @@ void zmq::zmq_init_t::process_unplug ()
engine->unplug ();
}
-void zmq::zmq_init_t::finalise ()
+void zmq::zmq_init_t::finalise_initialisation ()
{
if (sent && received) {
- // Disconnect the engine from the init object.
- engine->unplug ();
+ // If we know what session we belong to, it's easy, just send the
+ // engine to that session and destroy the init object. Note that we
+ // know about the session only if this object is owned by it. Thus,
+ // lifetime of this object in contained in the lifetime of the session
+ // so the pointer cannot become invalid without notice.
+ if (session) {
+ engine->unplug ();
+ send_attach (session, engine, peer_identity, true);
+ engine = NULL;
+ terminate ();
+ return;
+ }
- session_t *session = NULL;
-
- // If we have the session ordinal, let's use it to find the session.
- // If it is not found, it means socket is already being shut down
- // and the session have been deallocated.
- // TODO: We should check whether the name of the peer haven't changed
- // upon reconnection.
- if (session_ordinal) {
- session = owner->find_session (session_ordinal);
- if (!session) {
- term ();
- return;
- }
+ // All the cases below are listener-based. Therefore we need the socket
+ // reference so that new sessions can bind to that socket.
+ zmq_assert (socket);
+
+ // We have no associated session. If the peer has no identity we'll
+ // create a transient session for the connection. Note that
+ // seqnum is incremented to account for attach command before the
+ // session is launched. That way we are sure it won't terminate before
+ // being attached.
+ if (peer_identity [0] == 0) {
+ session = new (std::nothrow) transient_session_t (io_thread,
+ socket, options);
+ zmq_assert (session);
+ session->inc_seqnum ();
+ launch_sibling (session);
+ engine->unplug ();
+ send_attach (session, engine, peer_identity, false);
+ engine = NULL;
+ terminate ();
+ return;
}
- else {
-
- // If the peer has a unique name, find the associated session.
- // If it does not exist, create it.
- zmq_assert (!peer_identity.empty ());
- session = owner->find_session (peer_identity);
- if (!session) {
- session = new (std::nothrow) session_t (
- choose_io_thread (options.affinity), owner, options,
- peer_identity);
- zmq_assert (session);
- send_plug (session);
- send_own (owner, session);
-
- // Reserve a sequence number for following 'attach' command.
- session->inc_seqnum ();
- }
+
+ // Try to find the session corresponding to the peer's identity.
+ // If found, send the engine to that session and destroy this object.
+ // Note that session's seqnum is incremented by find_session rather
+ // than by send_attach.
+ session = socket->find_session (peer_identity);
+ if (session) {
+ engine->unplug ();
+ send_attach (session, engine, peer_identity, false);
+ engine = NULL;
+ terminate ();
+ return;
}
- // No need to increment seqnum as it was already incremented above.
+ // There's no such named session. We have to create one. Note that
+ // seqnum is incremented to account for attach command before the
+ // session is launched. That way we are sure it won't terminate before
+ // being attached.
+ session = new (std::nothrow) named_session_t (io_thread, socket,
+ options, peer_identity);
+ zmq_assert (session);
+ session->inc_seqnum ();
+ launch_sibling (session);
+ engine->unplug ();
send_attach (session, engine, peer_identity, false);
-
- // Destroy the init object.
engine = NULL;
- term ();
+ terminate ();
+ return;
}
}
diff --git a/src/zmq_init.hpp b/src/zmq_init.hpp
index 6f935c2..6087de9 100644
--- a/src/zmq_init.hpp
+++ b/src/zmq_init.hpp
@@ -22,10 +22,9 @@
#include "i_inout.hpp"
#include "i_engine.hpp"
-#include "owned.hpp"
+#include "own.hpp"
#include "fd.hpp"
#include "stdint.hpp"
-#include "options.hpp"
#include "stdint.hpp"
#include "blob.hpp"
@@ -34,28 +33,23 @@ namespace zmq
// The class handles initialisation phase of 0MQ wire-level protocol.
- class zmq_init_t : public owned_t, public i_inout
+ class zmq_init_t : public own_t, public i_inout
{
public:
- zmq_init_t (class io_thread_t *parent_, socket_base_t *owner_,
- fd_t fd_, const options_t &options_, bool reconnect_,
- const char *protocol_, const char *address_,
- uint64_t session_ordinal_);
+ zmq_init_t (class io_thread_t *io_thread_, class socket_base_t *socket_,
+ class session_t *session_, fd_t fd_, const options_t &options_);
~zmq_init_t ();
private:
- void finalise ();
+ void finalise_initialisation ();
// i_inout interface implementation.
bool read (::zmq_msg_t *msg_);
bool write (::zmq_msg_t *msg_);
void flush ();
- void detach (owned_t *reconnecter_);
- class io_thread_t *get_io_thread ();
- class socket_base_t *get_owner ();
- uint64_t get_ordinal ();
+ void detach ();
// Handlers for incoming commands.
void process_plug ();
@@ -70,15 +64,20 @@ namespace zmq
// True if peer's identity was already received.
bool received;
+ // Socket the object belongs to.
+ class socket_base_t *socket;
+
+ // Reference to the session the init object belongs to.
+ // If the associated session is unknown and should be found
+ // depending on peer identity this value is NULL.
+ class session_t *session;
+
// Identity of the peer socket.
blob_t peer_identity;
- // TCP connecter creates session before the name of the peer is known.
- // Thus we know only its ordinal number.
- uint64_t session_ordinal;
-
- // Associated socket options.
- options_t options;
+ // I/O thread the object is living in. It will be used to plug
+ // the engine into the same I/O thread.
+ class io_thread_t *io_thread;
zmq_init_t (const zmq_init_t&);
void operator = (const zmq_init_t&);
diff --git a/src/zmq_listener.cpp b/src/zmq_listener.cpp
index d7cf292..4f5dbb1 100644
--- a/src/zmq_listener.cpp
+++ b/src/zmq_listener.cpp
@@ -24,11 +24,11 @@
#include "io_thread.hpp"
#include "err.hpp"
-zmq::zmq_listener_t::zmq_listener_t (io_thread_t *parent_,
- socket_base_t *owner_, const options_t &options_) :
- owned_t (parent_, owner_),
- io_object_t (parent_),
- options (options_)
+zmq::zmq_listener_t::zmq_listener_t (io_thread_t *io_thread_,
+ socket_base_t *socket_, const options_t &options_) :
+ own_t (io_thread_, options_),
+ io_object_t (io_thread_),
+ socket (socket_)
{
}
@@ -38,7 +38,7 @@ zmq::zmq_listener_t::~zmq_listener_t ()
int zmq::zmq_listener_t::set_address (const char *protocol_, const char *addr_)
{
- return tcp_listener.set_address (protocol_, addr_);
+ return tcp_listener.set_address (protocol_, addr_, options.backlog);
}
void zmq::zmq_listener_t::process_plug ()
@@ -48,9 +48,10 @@ void zmq::zmq_listener_t::process_plug ()
set_pollin (handle);
}
-void zmq::zmq_listener_t::process_unplug ()
+void zmq::zmq_listener_t::process_term (int linger_)
{
rm_fd (handle);
+ own_t::process_term (linger_);
}
void zmq::zmq_listener_t::in_event ()
@@ -62,14 +63,15 @@ void zmq::zmq_listener_t::in_event ()
if (fd == retired_fd)
return;
- // Create an init object.
+ // Choose I/O thread to run connecter in. Given that we are already
+ // running in an I/O thread, there must be at least one available.
io_thread_t *io_thread = choose_io_thread (options.affinity);
- zmq_init_t *init = new (std::nothrow) zmq_init_t (
- io_thread, owner, fd, options, false, NULL, NULL, 0);
+ zmq_assert (io_thread);
+
+ // Create and launch an init object.
+ zmq_init_t *init = new (std::nothrow) zmq_init_t (io_thread, socket,
+ NULL, fd, options);
zmq_assert (init);
- send_plug (init);
- send_own (owner, init);
+ launch_sibling (init);
}
-
-
diff --git a/src/zmq_listener.hpp b/src/zmq_listener.hpp
index c990b02..f157cf6 100644
--- a/src/zmq_listener.hpp
+++ b/src/zmq_listener.hpp
@@ -20,21 +20,20 @@
#ifndef __ZMQ_ZMQ_LISTENER_HPP_INCLUDED__
#define __ZMQ_ZMQ_LISTENER_HPP_INCLUDED__
-#include "owned.hpp"
+#include "own.hpp"
#include "io_object.hpp"
#include "tcp_listener.hpp"
-#include "options.hpp"
#include "stdint.hpp"
namespace zmq
{
- class zmq_listener_t : public owned_t, public io_object_t
+ class zmq_listener_t : public own_t, public io_object_t
{
public:
- zmq_listener_t (class io_thread_t *parent_, socket_base_t *owner_,
- const options_t &options_);
+ zmq_listener_t (class io_thread_t *io_thread_,
+ class socket_base_t *socket_, const options_t &options_);
~zmq_listener_t ();
// Set address to listen on.
@@ -44,7 +43,7 @@ namespace zmq
// Handlers for incoming commands.
void process_plug ();
- void process_unplug ();
+ void process_term (int linger_);
// Handlers for I/O events.
void in_event ();
@@ -55,8 +54,8 @@ namespace zmq
// Handle corresponding to the listening socket.
handle_t handle;
- // Associated socket options.
- options_t options;
+ // Socket the listerner belongs to.
+ class socket_base_t *socket;
zmq_listener_t (const zmq_listener_t&);
void operator = (const zmq_listener_t&);
diff --git a/tests/Makefile.am b/tests/Makefile.am
new file mode 100644
index 0000000..e580802
--- /dev/null
+++ b/tests/Makefile.am
@@ -0,0 +1,22 @@
+INCLUDES = -I$(top_builddir)/include
+LDADD = $(top_builddir)/src/libzmq.la
+
+noinst_PROGRAMS = test_pair_inproc \
+ test_pair_ipc \
+ test_pair_tcp \
+ test_reqrep_inproc \
+ test_reqrep_ipc \
+ test_reqrep_tcp \
+ test_shutdown_stress
+
+test_pair_inproc_SOURCES = test_pair_inproc.cpp testutil.hpp
+test_pair_ipc_SOURCES = test_pair_ipc.cpp testutil.hpp
+test_pair_tcp_SOURCES = test_pair_tcp.cpp testutil.hpp
+
+test_reqrep_inproc_SOURCES = test_reqrep_inproc.cpp testutil.hpp
+test_reqrep_ipc_SOURCES = test_reqrep_ipc.cpp testutil.hpp
+test_reqrep_tcp_SOURCES = test_reqrep_tcp.cpp testutil.hpp
+
+test_shutdown_stress_SOURCES = test_shutdown_stress.cpp
+
+TESTS = $(noinst_PROGRAMS)
diff --git a/tests/test_pair_inproc.cpp b/tests/test_pair_inproc.cpp
new file mode 100644
index 0000000..d5da401
--- /dev/null
+++ b/tests/test_pair_inproc.cpp
@@ -0,0 +1,30 @@
+/*
+ Copyright (c) 2007-2010 iMatix Corporation
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "testutil.hpp"
+
+using namespace std;
+using namespace zmqtestutil;
+
+int main (int argc, char *argv [])
+{
+ const char *transport = "inproc://tester" ;
+ basic_tests (transport, ZMQ_PAIR, ZMQ_PAIR);
+ return 0 ;
+}
diff --git a/tests/test_pair_ipc.cpp b/tests/test_pair_ipc.cpp
new file mode 100644
index 0000000..5bf2621
--- /dev/null
+++ b/tests/test_pair_ipc.cpp
@@ -0,0 +1,30 @@
+/*
+ Copyright (c) 2007-2010 iMatix Corporation
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "testutil.hpp"
+
+using namespace std;
+using namespace zmqtestutil;
+
+int main (int argc, char *argv [])
+{
+ const char *transport = "ipc:///tmp/tester" ;
+ basic_tests (transport, ZMQ_PAIR, ZMQ_PAIR);
+ return 0 ;
+}
diff --git a/tests/test_pair_tcp.cpp b/tests/test_pair_tcp.cpp
new file mode 100644
index 0000000..60b73ac
--- /dev/null
+++ b/tests/test_pair_tcp.cpp
@@ -0,0 +1,30 @@
+/*
+ Copyright (c) 2007-2010 iMatix Corporation
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "testutil.hpp"
+
+using namespace std;
+using namespace zmqtestutil;
+
+int main (int argc, char *argv [])
+{
+ const char *transport = "tcp://127.0.0.1:5555" ;
+ basic_tests (transport, ZMQ_PAIR, ZMQ_PAIR);
+ return 0 ;
+}
diff --git a/tests/test_reqrep_inproc.cpp b/tests/test_reqrep_inproc.cpp
new file mode 100644
index 0000000..847d925
--- /dev/null
+++ b/tests/test_reqrep_inproc.cpp
@@ -0,0 +1,30 @@
+/*
+ Copyright (c) 2007-2010 iMatix Corporation
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "testutil.hpp"
+
+using namespace std;
+using namespace zmqtestutil;
+
+int main (int argc, char *argv [])
+{
+ const char *transport = "inproc://tester" ;
+ basic_tests (transport, ZMQ_REQ, ZMQ_REP);
+ return 0 ;
+}
diff --git a/tests/test_reqrep_ipc.cpp b/tests/test_reqrep_ipc.cpp
new file mode 100644
index 0000000..990834e
--- /dev/null
+++ b/tests/test_reqrep_ipc.cpp
@@ -0,0 +1,30 @@
+/*
+ Copyright (c) 2007-2010 iMatix Corporation
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "testutil.hpp"
+
+using namespace std;
+using namespace zmqtestutil;
+
+int main (int argc, char *argv [])
+{
+ const char *transport = "ipc:///tmp/tester" ;
+ basic_tests (transport, ZMQ_REQ, ZMQ_REP);
+ return 0 ;
+}
diff --git a/tests/test_reqrep_tcp.cpp b/tests/test_reqrep_tcp.cpp
new file mode 100644
index 0000000..e841d0e
--- /dev/null
+++ b/tests/test_reqrep_tcp.cpp
@@ -0,0 +1,30 @@
+/*
+ Copyright (c) 2007-2010 iMatix Corporation
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "testutil.hpp"
+
+using namespace std;
+using namespace zmqtestutil;
+
+int main (int argc, char *argv [])
+{
+ const char *transport = "tcp://127.0.0.1:5555" ;
+ basic_tests (transport, ZMQ_REQ, ZMQ_REP);
+ return 0 ;
+}
diff --git a/tests/test_shutdown_stress.cpp b/tests/test_shutdown_stress.cpp
new file mode 100644
index 0000000..9f24138
--- /dev/null
+++ b/tests/test_shutdown_stress.cpp
@@ -0,0 +1,83 @@
+/*
+ Copyright (c) 2007-2010 iMatix Corporation
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "../include/zmq.h"
+#include <assert.h>
+#include <pthread.h>
+#include <stddef.h>
+
+#define THREAD_COUNT 100
+
+void *worker (void *s)
+{
+ int rc;
+
+ rc = zmq_connect (s, "tcp://127.0.0.1:5555");
+ assert (rc == 0);
+
+ // Start closing the socket while the connecting process is underway.
+ rc = zmq_close (s);
+ assert (rc == 0);
+
+ return NULL;
+}
+
+int main (int argc, char *argv [])
+{
+ void *ctx;
+ void *s1;
+ void *s2;
+ int i;
+ int j;
+ int rc;
+ pthread_t threads [THREAD_COUNT];
+
+ for (j = 0; j != 10; j++) {
+
+ // Check the shutdown with many parallel I/O threads.
+ ctx = zmq_init (7);
+ assert (ctx);
+
+ s1 = zmq_socket (ctx, ZMQ_REP);
+ assert (s1);
+
+ rc = zmq_bind (s1, "tcp://127.0.0.1:5555");
+ assert (rc == 0);
+
+ for (i = 0; i != THREAD_COUNT; i++) {
+ s2 = zmq_socket (ctx, ZMQ_SUB);
+ assert (s2);
+ rc = pthread_create (&threads [i], NULL, worker, s2);
+ assert (rc == 0);
+ }
+
+ for (i = 0; i != THREAD_COUNT; i++) {
+ rc = pthread_join (threads [i], NULL);
+ assert (rc == 0);
+ }
+
+ rc = zmq_close (s1);
+ assert (rc == 0);
+
+ rc = zmq_term (ctx);
+ assert (rc == 0);
+ }
+
+ return 0;
+}
diff --git a/tests/testutil.hpp b/tests/testutil.hpp
new file mode 100644
index 0000000..313fad4
--- /dev/null
+++ b/tests/testutil.hpp
@@ -0,0 +1,129 @@
+/*
+ Copyright (c) 2007-2010 iMatix Corporation
+
+ This file is part of 0MQ.
+
+ 0MQ is free software; you can redistribute it and/or modify it under
+ the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ 0MQ is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __ZMQ_TEST_TESTUTIL_HPP_INCLUDED__
+#define __ZMQ_TEST_TESTUTIL_HPP_INCLUDED__
+
+#include <assert.h>
+#include <iostream>
+#include <string>
+#include <utility>
+
+#include "../include/zmq.hpp"
+
+namespace zmqtestutil
+{
+
+ using namespace std ;
+
+ typedef std::pair <zmq::socket_t*, zmq::socket_t*> socket_pair;
+
+ // Create a pair of sockets connected to each other.
+ socket_pair create_bound_pair (zmq::context_t *context_,
+ int t1_, int t2_, const char *transport_)
+ {
+ zmq::socket_t *s1 = new zmq::socket_t (*context_, t1_);
+ zmq::socket_t *s2 = new zmq::socket_t (*context_, t2_);
+ s1->bind (transport_);
+ s2->connect (transport_);
+ return socket_pair (s1, s2);
+ }
+
+ // Send a message from one socket in the pair to the other and back.
+ std::string ping_pong (const socket_pair &sp_, const std::string &orig_msg_)
+ {
+ zmq::socket_t &s1 = *sp_.first;
+ zmq::socket_t &s2 = *sp_.second;
+
+ // Construct message to send.
+ zmq::message_t ping (orig_msg_.size ());
+ memcpy (ping.data (), orig_msg_.c_str (), orig_msg_.size ());
+
+ // Send ping out.
+ s1.send (ping, 0);
+
+ // Get pong from connected socket.
+ zmq::message_t pong;
+ s2.recv (&pong, 0);
+
+ // Send message via s2, so state is clean in case of req/rep.
+ std::string ret ((char*) pong.data(), pong.size ());
+ s2.send (pong, 0);
+
+ // Return received data as std::string.
+ return ret ;
+ }
+
+ /* Run basic tests for the given transport.
+
+ Basic tests are:
+ * ping pong as defined above.
+ * send receive where the receive is signalled by zmq::poll
+ */
+ void basic_tests (const char *transport_, int t1_, int t2_)
+ {
+ zmq::context_t context (1);
+
+ zmq::pollitem_t items [2];
+ socket_pair p = create_bound_pair (&context, t1_, t2_, transport_);
+
+ // First test simple ping pong.
+ const string expect ("XXX");
+
+ {
+ const string returned = zmqtestutil::ping_pong (p, expect);
+ assert (expect == returned);
+
+ // Adjust socket state so that poll shows only 1 pending message.
+ zmq::message_t mx ;
+ p.first->recv (&mx, 0);
+ }
+
+ {
+ // Now poll is used to singal that a message is ready to read.
+ zmq::message_t m1 (expect.size ());
+ memcpy (m1.data (), expect.c_str (), expect.size ());
+ items [0].socket = *p.first;
+ items [0].fd = 0;
+ items [0].events = ZMQ_POLLIN;
+ items [0].revents = 0;
+ items [1].socket = *p.second;
+ items [1].fd = 0;
+ items [1].events = ZMQ_POLLIN;
+ items [1].revents = 0;
+
+ p.first->send (m1, 0);
+
+ int rc = zmq::poll (&items [0], 2, -1);
+ assert (rc == 1);
+ assert ((items [1].revents & ZMQ_POLLIN) != 0);
+
+ zmq::message_t m2;
+ p.second->recv (&m2, 0);
+ const string ret ((char*) m2.data (), m2.size ());
+ assert (expect == ret);
+ }
+
+ // Delete sockets.
+ delete (p.first);
+ delete (p.second);
+ }
+}
+
+#endif