11991 lines
474 KiB
Diff
11991 lines
474 KiB
Diff
diff --git a/.gitignore b/.gitignore
|
|
index 362b9c890..db310602b 100644
|
|
--- a/.gitignore
|
|
+++ b/.gitignore
|
|
@@ -1,4 +1,3 @@
|
|
-pjlib/include/pj/config_site.h
|
|
lib/
|
|
bin/
|
|
output/
|
|
diff --git a/aconfigure b/aconfigure
|
|
index b5d344631..6707ea11b 100755
|
|
--- a/aconfigure
|
|
+++ b/aconfigure
|
|
@@ -6974,9 +6974,6 @@ case $target in
|
|
|
|
# UUID
|
|
case $target in
|
|
- *android*)
|
|
- ac_os_objs="$ac_os_objs guid_android.o"
|
|
- ;;
|
|
*darwin*)
|
|
ac_os_objs="$ac_os_objs guid_darwin.o"
|
|
;;
|
|
diff --git a/aconfigure.ac b/aconfigure.ac
|
|
index 279870e9d..43d69dc09 100644
|
|
--- a/aconfigure.ac
|
|
+++ b/aconfigure.ac
|
|
@@ -675,9 +675,6 @@ case $target in
|
|
|
|
# UUID
|
|
case $target in
|
|
- *android*)
|
|
- ac_os_objs="$ac_os_objs guid_android.o"
|
|
- ;;
|
|
*darwin*)
|
|
ac_os_objs="$ac_os_objs guid_darwin.o"
|
|
;;
|
|
diff --git a/build/vs/pjproject-vs14-common-config.props b/build/vs/pjproject-vs14-common-config.props
|
|
index 456e4f02e..400439a78 100644
|
|
--- a/build/vs/pjproject-vs14-common-config.props
|
|
+++ b/build/vs/pjproject-vs14-common-config.props
|
|
@@ -18,12 +18,12 @@
|
|
<PropertyGroup>
|
|
<API_Family Condition="'$(API_Family)'==''">WinDesktop</API_Family>
|
|
<PreprocessorDef></PreprocessorDef>
|
|
- <DefaultToolset>v140</DefaultToolset>
|
|
+ <DefaultToolset>v141</DefaultToolset>
|
|
</PropertyGroup>
|
|
<Choose>
|
|
<When Condition="'$(API_Family)'=='WinDesktop'">
|
|
<PropertyGroup>
|
|
- <BuildToolset>v140</BuildToolset>
|
|
+ <BuildToolset>v141</BuildToolset>
|
|
<PreprocessorDef Condition="'$(Platform)'=='Win32'">WIN32;PJ_WIN32=1;PJ_M_I386=1;</PreprocessorDef>
|
|
<PreprocessorDef Condition="'$(Platform)'=='x64'">WIN64;PJ_WIN64=1;PJ_M_X86_64=1;</PreprocessorDef>
|
|
<PreprocessorDef Condition="'$(Platform)'=='ARM64'">PJ_M_ARM64=1;</PreprocessorDef>
|
|
@@ -31,10 +31,10 @@
|
|
</When>
|
|
<When Condition="'$(API_Family)'=='UWP'">
|
|
<PropertyGroup>
|
|
- <BuildToolset>v140</BuildToolset>
|
|
+ <BuildToolset>v141</BuildToolset>
|
|
<PreprocessorDef>PJ_WIN32_UWP;UNICODE;_UNICODE;</PreprocessorDef>
|
|
<PreprocessorDef Condition="'$(Platform)'=='ARM'">$(PreprocessorDef);PJ_M_ARMV7=1;</PreprocessorDef>
|
|
- <PlatformVersion>10.0.10586.0</PlatformVersion>
|
|
+ <PlatformVersion>10.0.16299.0</PlatformVersion>
|
|
<MinPlatformVersion>10.0.10240.0</MinPlatformVersion>
|
|
<AppTypeRev>10.0</AppTypeRev>
|
|
|
|
diff --git a/build/vs/pjproject-vs14-common-defaults.props b/build/vs/pjproject-vs14-common-defaults.props
|
|
index 526f6c925..974447f43 100644
|
|
--- a/build/vs/pjproject-vs14-common-defaults.props
|
|
+++ b/build/vs/pjproject-vs14-common-defaults.props
|
|
@@ -3,7 +3,7 @@
|
|
<ImportGroup Label="PropertySheets">
|
|
</ImportGroup>
|
|
<PropertyGroup Label="UserMacros">
|
|
- <VSVer>14</VSVer>
|
|
+ <VSVer>15</VSVer>
|
|
</PropertyGroup>
|
|
<PropertyGroup>
|
|
<_ProjectFileVersion>14.0.22823.1</_ProjectFileVersion>
|
|
diff --git a/pjlib/include/pj/config_site.h b/pjlib/include/pj/config_site.h
|
|
new file mode 100644
|
|
index 000000000..ba81c7f4d
|
|
--- /dev/null
|
|
+++ b/pjlib/include/pj/config_site.h
|
|
@@ -0,0 +1,26 @@
|
|
+#include "config_site_sample.h"
|
|
+
|
|
+/*
|
|
+* PJLIB settings.
|
|
+*/
|
|
+#define PJ_HAS_IPV6 1
|
|
+#define PJ_GETHOSTIP_DISABLE_LOCAL_RESOLUTION 1
|
|
+
|
|
+/*
|
|
+* PJSIP settings.
|
|
+*/
|
|
+#define PJSIP_MAX_PKT_LEN 8000
|
|
+#define PJSIP_TRANSPORT_SERVER_IDLE_TIME 3
|
|
+
|
|
+/*
|
|
+* PJNAT settings.
|
|
+*/
|
|
+#define PJ_ICE_MAX_CAND 256
|
|
+#define PJ_ICE_ST_MAX_CAND 32
|
|
+#define PJ_ICE_MAX_STUN 6
|
|
+#define PJ_ICE_MAX_TURN 4
|
|
+#define PJ_ICE_COMP_BITS 5
|
|
+#define PJ_ICE_MAX_CHECKS 1024
|
|
+/* Set permanent permissions on the TURN
|
|
+ server for all peer candidates */
|
|
+#define PJ_ICE_ST_USE_TURN_PERMANENT_PERM PJ_TRUE
|
|
diff --git a/pjlib/include/pj/sock.h b/pjlib/include/pj/sock.h
|
|
index 88d679c5e..9af42ba3a 100644
|
|
--- a/pjlib/include/pj/sock.h
|
|
+++ b/pjlib/include/pj/sock.h
|
|
@@ -320,6 +320,12 @@ extern const pj_uint16_t PJ_SO_REUSEADDR;
|
|
/** Do not generate SIGPIPE. @see pj_SO_NOSIGPIPE */
|
|
extern const pj_uint16_t PJ_SO_NOSIGPIPE;
|
|
|
|
+extern const pj_uint16_t PJ_SO_KEEPALIVE;
|
|
+extern const pj_uint16_t PJ_TCP_KEEPIDLE;
|
|
+extern const pj_uint16_t PJ_TCP_KEEPINTVL;
|
|
+extern const pj_uint16_t PJ_TCP_KEEPCNT;
|
|
+extern const pj_uint16_t PJ_TCP_USER_TIMEOUT;
|
|
+
|
|
/** Set the protocol-defined priority for all packets to be sent on socket.
|
|
*/
|
|
extern const pj_uint16_t PJ_SO_PRIORITY;
|
|
@@ -350,9 +356,24 @@ extern const pj_uint16_t PJ_IP_DROP_MEMBERSHIP;
|
|
/** Get #PJ_SO_SNDBUF constant */
|
|
PJ_DECL(pj_uint16_t) pj_SO_SNDBUF(void);
|
|
|
|
+ /** Get #PJ_SO_KEEPALIVE constant */
|
|
+# define pj_SO_KEEPALIVE() PJ_SO_KEEPALIVE(void);
|
|
+
|
|
/** Get #PJ_TCP_NODELAY constant */
|
|
PJ_DECL(pj_uint16_t) pj_TCP_NODELAY(void);
|
|
|
|
+ /** Get #PJ_TCP_KEEPIDLE constant */
|
|
+# define pj_TCP_KEEPIDLE() PJ_TCP_KEEPIDLE(void);
|
|
+
|
|
+ /** Get #PJ_TCP_KEEPINTVL constant */
|
|
+# define pj_TCP_KEEPINTVL() PJ_TCP_KEEPINTVL(void);
|
|
+
|
|
+ /** Get #PJ_TCP_USER_TIMEOUT constant */
|
|
+ PJ_DECL(pj_uint16_t) PJ_TCP_USER_TIMEOUT(void);
|
|
+
|
|
+ /** Get #PJ_TCP_KEEPCNT constant */
|
|
+# define pj_TCP_KEEPCNT() PJ_TCP_KEEPCNT(void);
|
|
+
|
|
/** Get #PJ_SO_REUSEADDR constant */
|
|
PJ_DECL(pj_uint16_t) pj_SO_REUSEADDR(void);
|
|
|
|
@@ -386,9 +407,24 @@ extern const pj_uint16_t PJ_IP_DROP_MEMBERSHIP;
|
|
/** Get #PJ_SO_SNDBUF constant */
|
|
# define pj_SO_SNDBUF() PJ_SO_SNDBUF
|
|
|
|
+ /** Get #PJ_SO_KEEPALIVE constant */
|
|
+# define pj_SO_KEEPALIVE() PJ_SO_KEEPALIVE
|
|
+
|
|
/** Get #PJ_TCP_NODELAY constant */
|
|
# define pj_TCP_NODELAY() PJ_TCP_NODELAY
|
|
|
|
+ /** Get #PJ_TCP_KEEPIDLE constant */
|
|
+# define pj_TCP_KEEPIDLE() PJ_TCP_KEEPIDLE
|
|
+
|
|
+ /** Get #PJ_TCP_USER_TIMEOUT constant */
|
|
+# define pj_TCP_USER_TIMEOUT() PJ_TCP_USER_TIMEOUT
|
|
+
|
|
+ /** Get #PJ_TCP_KEEPINTVL constant */
|
|
+# define pj_TCP_KEEPINTVL() PJ_TCP_KEEPINTVL
|
|
+
|
|
+ /** Get #PJ_TCP_KEEPCNT constant */
|
|
+# define pj_TCP_KEEPCNT() PJ_TCP_KEEPCNT
|
|
+
|
|
/** Get #PJ_SO_REUSEADDR constant */
|
|
# define pj_SO_REUSEADDR() PJ_SO_REUSEADDR
|
|
|
|
diff --git a/pjlib/src/pj/ioqueue_common_abs.c b/pjlib/src/pj/ioqueue_common_abs.c
|
|
index a9a6a9cfd..a0d17e72e 100644
|
|
--- a/pjlib/src/pj/ioqueue_common_abs.c
|
|
+++ b/pjlib/src/pj/ioqueue_common_abs.c
|
|
@@ -1056,7 +1056,10 @@ retry_on_restart:
|
|
/*
|
|
* Check that address storage can hold the address parameter.
|
|
*/
|
|
- PJ_ASSERT_RETURN(addrlen <= (int)sizeof(pj_sockaddr), PJ_EBUG);
|
|
+ PJ_ASSERT_RETURN((((pj_sockaddr*)addr)->addr.sa_family == pj_AF_INET() &&
|
|
+ addrlen <= (int)sizeof(pj_sockaddr_in)) ||
|
|
+ (((pj_sockaddr*)addr)->addr.sa_family == pj_AF_INET6() &&
|
|
+ addrlen <= (int)sizeof(pj_sockaddr_in6)), PJ_EBUG);
|
|
|
|
/*
|
|
* Schedule asynchronous send.
|
|
diff --git a/pjlib/src/pj/os_core_unix.c b/pjlib/src/pj/os_core_unix.c
|
|
index c90c5ef69..233bbbb00 100644
|
|
--- a/pjlib/src/pj/os_core_unix.c
|
|
+++ b/pjlib/src/pj/os_core_unix.c
|
|
@@ -71,7 +71,7 @@ JavaVM *pj_jni_jvm = NULL;
|
|
JNIEXPORT jint JNI_OnLoad(JavaVM *vm, void *reserved)
|
|
{
|
|
pj_jni_jvm = vm;
|
|
-
|
|
+
|
|
return JNI_VERSION_1_4;
|
|
}
|
|
|
|
@@ -845,6 +845,18 @@ PJ_DEF(pj_status_t) pj_thread_resume(pj_thread_t *p)
|
|
return rc;
|
|
}
|
|
|
|
+#if PJ_DARWINOS
|
|
+static pthread_key_t key;
|
|
+static pthread_once_t key_once = PTHREAD_ONCE_INIT;
|
|
+
|
|
+static void
|
|
+make_key()
|
|
+{
|
|
+ (void) pthread_key_create(&key, free);
|
|
+}
|
|
+#endif
|
|
+
|
|
+
|
|
/*
|
|
* pj_thread_this()
|
|
*/
|
|
@@ -854,9 +866,26 @@ PJ_DEF(pj_thread_t*) pj_thread_this(void)
|
|
pj_thread_t *rec = (pj_thread_t*)pj_thread_local_get(thread_tls_id);
|
|
|
|
if (rec == NULL) {
|
|
- pj_assert(!"Calling pjlib from unknown/external thread. You must "
|
|
- "register external threads with pj_thread_register() "
|
|
- "before calling any pjlib functions.");
|
|
+
|
|
+ static pj_thread_t* dummy;
|
|
+
|
|
+#if PJ_DARWINOS
|
|
+ (void) pthread_once(&key_once, make_key);
|
|
+
|
|
+ pj_thread_t* desc;
|
|
+
|
|
+ if ((desc = pthread_getspecific(key)) == NULL) {
|
|
+ desc = malloc(sizeof(pj_thread_desc));
|
|
+ pj_bzero(desc, sizeof(pj_thread_desc));
|
|
+ (void) pthread_setspecific(key, desc);
|
|
+ }
|
|
+#else
|
|
+ static __thread pj_thread_desc desc;
|
|
+#endif
|
|
+
|
|
+ pj_thread_register(NULL, (long*)desc, &dummy);
|
|
+
|
|
+ rec = (pj_thread_t*)pj_thread_local_get(thread_tls_id);
|
|
}
|
|
|
|
/*
|
|
@@ -1049,7 +1078,7 @@ PJ_DEF(pj_status_t) pj_atomic_destroy( pj_atomic_t *atomic_var )
|
|
pj_status_t status;
|
|
|
|
PJ_ASSERT_RETURN(atomic_var, PJ_EINVAL);
|
|
-
|
|
+
|
|
#if PJ_HAS_THREADS
|
|
status = pj_mutex_destroy( atomic_var->mutex );
|
|
if (status == PJ_SUCCESS) {
|
|
diff --git a/pjlib/src/pj/os_core_win32.c b/pjlib/src/pj/os_core_win32.c
|
|
index 68a538dcb..d2ee5b180 100644
|
|
--- a/pjlib/src/pj/os_core_win32.c
|
|
+++ b/pjlib/src/pj/os_core_win32.c
|
|
@@ -655,9 +655,10 @@ PJ_DEF(pj_thread_t*) pj_thread_this(void)
|
|
pj_thread_t *rec = pj_thread_local_get(thread_tls_id);
|
|
|
|
if (rec == NULL) {
|
|
- pj_assert(!"Calling pjlib from unknown/external thread. You must "
|
|
- "register external threads with pj_thread_register() "
|
|
- "before calling any pjlib functions.");
|
|
+ static __declspec(thread) pj_thread_desc desc;
|
|
+ static __declspec(thread) pj_thread_t* this_thread;
|
|
+ pj_thread_register(NULL, desc, &this_thread);
|
|
+ rec = (pj_thread_t*)pj_thread_local_get(thread_tls_id);
|
|
}
|
|
|
|
/*
|
|
diff --git a/pjlib/src/pj/os_timestamp_posix.c b/pjlib/src/pj/os_timestamp_posix.c
|
|
index 07ef682a9..0371aad43 100644
|
|
--- a/pjlib/src/pj/os_timestamp_posix.c
|
|
+++ b/pjlib/src/pj/os_timestamp_posix.c
|
|
@@ -202,7 +202,7 @@ PJ_DEF(pj_status_t) pj_get_timestamp_freq(pj_timestamp *freq)
|
|
return PJ_SUCCESS;
|
|
}
|
|
|
|
-#elif defined(__ANDROID__)
|
|
+#elif defined(PJ_ANDROID) && PJ_ANDROID
|
|
|
|
#include <errno.h>
|
|
#include <time.h>
|
|
diff --git a/pjlib/src/pj/sock_bsd.c b/pjlib/src/pj/sock_bsd.c
|
|
index 5f594efa7..ddc8cd9cc 100644
|
|
--- a/pjlib/src/pj/sock_bsd.c
|
|
+++ b/pjlib/src/pj/sock_bsd.c
|
|
@@ -1,4 +1,4 @@
|
|
-/*
|
|
+/*
|
|
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
|
|
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
|
|
*
|
|
@@ -14,7 +14,7 @@
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#include <pj/sock.h>
|
|
#include <pj/os.h>
|
|
@@ -35,6 +35,15 @@
|
|
|
|
#define THIS_FILE "sock_bsd.c"
|
|
|
|
+#if !defined(PJ_WIN32) && !defined(PJ_WIN64)
|
|
+# if !defined(SOL_TCP) && defined(IPPROTO_TCP)
|
|
+# define SOL_TCP IPPROTO_TCP
|
|
+# endif
|
|
+# if !defined(TCP_KEEPIDLE) && defined(TCP_KEEPALIVE)
|
|
+# define TCP_KEEPIDLE TCP_KEEPALIVE
|
|
+# endif
|
|
+#endif
|
|
+
|
|
/*
|
|
* Address families conversion.
|
|
* The values here are indexed based on pj_addr_family.
|
|
@@ -172,7 +181,24 @@ const pj_uint16_t PJ_IPV6_TCLASS = 0xFFFF;
|
|
const pj_uint16_t PJ_SO_TYPE = SO_TYPE;
|
|
const pj_uint16_t PJ_SO_RCVBUF = SO_RCVBUF;
|
|
const pj_uint16_t PJ_SO_SNDBUF = SO_SNDBUF;
|
|
+const pj_uint16_t PJ_SO_KEEPALIVE = SO_KEEPALIVE;
|
|
const pj_uint16_t PJ_TCP_NODELAY= TCP_NODELAY;
|
|
+#if !defined(PJ_WIN32) && !defined(PJ_WIN64)
|
|
+# ifdef TCP_KEEPIDLE
|
|
+const pj_uint16_t PJ_TCP_KEEPIDLE = TCP_KEEPIDLE;
|
|
+# endif
|
|
+# ifdef TCP_KEEPINTVL
|
|
+const pj_uint16_t PJ_TCP_KEEPINTVL = TCP_KEEPINTVL;
|
|
+# endif
|
|
+# ifdef TCP_USER_TIMEOUT
|
|
+const pj_uint16_t PJ_TCP_USER_TIMEOUT = TCP_USER_TIMEOUT;
|
|
+#else
|
|
+const pj_uint16_t PJ_TCP_USER_TIMEOUT = 18;
|
|
+# endif
|
|
+# ifdef TCP_KEEPCNT
|
|
+const pj_uint16_t PJ_TCP_KEEPCNT = TCP_KEEPCNT;
|
|
+# endif
|
|
+#endif
|
|
const pj_uint16_t PJ_SO_REUSEADDR= SO_REUSEADDR;
|
|
#ifdef SO_NOSIGPIPE
|
|
const pj_uint16_t PJ_SO_NOSIGPIPE = SO_NOSIGPIPE;
|
|
@@ -270,7 +296,7 @@ PJ_DEF(char*) pj_inet_ntoa(pj_in_addr inaddr)
|
|
/*
|
|
* This function converts the Internet host address cp from the standard
|
|
* numbers-and-dots notation into binary data and stores it in the structure
|
|
- * that inp points to.
|
|
+ * that inp points to.
|
|
*/
|
|
PJ_DEF(int) pj_inet_aton(const pj_str_t *cp, pj_in_addr *inp)
|
|
{
|
|
@@ -312,7 +338,7 @@ PJ_DEF(pj_status_t) pj_inet_pton(int af, const pj_str_t *src, void *dst)
|
|
PJ_ASSERT_RETURN(af==PJ_AF_INET || af==PJ_AF_INET6, PJ_EAFNOTSUP);
|
|
PJ_ASSERT_RETURN(src && src->slen && dst, PJ_EINVAL);
|
|
|
|
- /* Initialize output with PJ_IN_ADDR_NONE for IPv4 (to be
|
|
+ /* Initialize output with PJ_IN_ADDR_NONE for IPv4 (to be
|
|
* compatible with pj_inet_aton()
|
|
*/
|
|
if (af==PJ_AF_INET) {
|
|
@@ -357,7 +383,7 @@ PJ_DEF(pj_status_t) pj_inet_pton(int af, const pj_str_t *src, void *dst)
|
|
|
|
sock_addr.addr.sa_family = (pj_uint16_t)af;
|
|
rc = WSAStringToAddress(
|
|
- PJ_STRING_TO_NATIVE(tempaddr,wtempaddr,sizeof(wtempaddr)),
|
|
+ PJ_STRING_TO_NATIVE(tempaddr,wtempaddr,sizeof(wtempaddr)),
|
|
af, NULL, (LPSOCKADDR)&sock_addr, &addr_len);
|
|
if (rc != 0) {
|
|
/* If you get rc 130022 Invalid argument (WSAEINVAL) with IPv6,
|
|
@@ -505,8 +531,8 @@ PJ_DEF(const pj_str_t*) pj_gethostname(void)
|
|
/*
|
|
* Create new socket/endpoint for communication and returns a descriptor.
|
|
*/
|
|
-PJ_DEF(pj_status_t) pj_sock_socket(int af,
|
|
- int type,
|
|
+PJ_DEF(pj_status_t) pj_sock_socket(int af,
|
|
+ int type,
|
|
int proto,
|
|
pj_sock_t *sock)
|
|
{
|
|
@@ -514,14 +540,14 @@ PJ_DEF(pj_status_t) pj_sock_socket(int af,
|
|
|
|
/* Sanity checks. */
|
|
PJ_ASSERT_RETURN(sock!=NULL, PJ_EINVAL);
|
|
- PJ_ASSERT_RETURN((SOCKET)PJ_INVALID_SOCKET==INVALID_SOCKET,
|
|
+ PJ_ASSERT_RETURN((SOCKET)PJ_INVALID_SOCKET==INVALID_SOCKET,
|
|
(*sock=PJ_INVALID_SOCKET, PJ_EINVAL));
|
|
|
|
*sock = WSASocket(af, type, proto, NULL, 0, WSA_FLAG_OVERLAPPED);
|
|
|
|
- if (*sock == PJ_INVALID_SOCKET)
|
|
+ if (*sock == PJ_INVALID_SOCKET)
|
|
return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
|
|
-
|
|
+
|
|
#if PJ_SOCK_DISABLE_WSAECONNRESET && \
|
|
(!defined(PJ_WIN32_WINCE) || PJ_WIN32_WINCE==0)
|
|
|
|
@@ -555,9 +581,9 @@ PJ_DEF(pj_status_t) pj_sock_socket(int af,
|
|
/*
|
|
* Create new socket/endpoint for communication and returns a descriptor.
|
|
*/
|
|
-PJ_DEF(pj_status_t) pj_sock_socket(int af,
|
|
- int type,
|
|
- int proto,
|
|
+PJ_DEF(pj_status_t) pj_sock_socket(int af,
|
|
+ int type,
|
|
+ int proto,
|
|
pj_sock_t *sock)
|
|
{
|
|
int type0 = type;
|
|
@@ -566,7 +592,7 @@ PJ_DEF(pj_status_t) pj_sock_socket(int af,
|
|
|
|
/* Sanity checks. */
|
|
PJ_ASSERT_RETURN(sock!=NULL, PJ_EINVAL);
|
|
- PJ_ASSERT_RETURN(PJ_INVALID_SOCKET==-1,
|
|
+ PJ_ASSERT_RETURN(PJ_INVALID_SOCKET==-1,
|
|
(*sock=PJ_INVALID_SOCKET, PJ_EINVAL));
|
|
|
|
#if !defined(SOCK_CLOEXEC)
|
|
@@ -584,7 +610,22 @@ PJ_DEF(pj_status_t) pj_sock_socket(int af,
|
|
pj_int32_t val = 1;
|
|
if ((type & 0xF) == pj_SOCK_STREAM()) {
|
|
pj_sock_setsockopt(*sock, pj_SOL_SOCKET(), pj_SO_NOSIGPIPE(),
|
|
- &val, sizeof(val));
|
|
+ &val, sizeof(val));
|
|
+ pj_sock_setsockopt(*sock, pj_SOL_SOCKET(), pj_SO_KEEPALIVE(),
|
|
+ &val, sizeof(val));
|
|
+ pj_sock_setsockopt(*sock, pj_SOL_TCP(), pj_TCP_KEEPCNT(),
|
|
+ &val, sizeof(val));
|
|
+ val = 30;
|
|
+ pj_sock_setsockopt(*sock, pj_SOL_TCP(), pj_TCP_KEEPIDLE(),
|
|
+ &val, sizeof(val));
|
|
+ pj_sock_setsockopt(*sock, pj_SOL_TCP(), pj_TCP_KEEPINTVL(),
|
|
+ &val, sizeof(val));
|
|
+ val = 30000;
|
|
+ pj_sock_setsockopt(*sock, pj_SOL_TCP(), pj_TCP_USER_TIMEOUT(),
|
|
+ &val, sizeof(val));
|
|
+ val = 1;
|
|
+ pj_sock_setsockopt(*sock, pj_SOL_TCP(), pj_TCP_NODELAY(),
|
|
+ &val, sizeof(val));
|
|
}
|
|
#if defined(PJ_SOCK_HAS_IPV6_V6ONLY) && PJ_SOCK_HAS_IPV6_V6ONLY != 0
|
|
if (af == PJ_AF_INET6) {
|
|
@@ -595,7 +636,7 @@ PJ_DEF(pj_status_t) pj_sock_socket(int af,
|
|
#if defined(PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT) && \
|
|
PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT!=0
|
|
if ((type & 0xF) == pj_SOCK_DGRAM()) {
|
|
- pj_sock_setsockopt(*sock, pj_SOL_SOCKET(), SO_NOSIGPIPE,
|
|
+ pj_sock_setsockopt(*sock, pj_SOL_SOCKET(), SO_NOSIGPIPE,
|
|
&val, sizeof(val));
|
|
}
|
|
#endif
|
|
@@ -612,7 +653,7 @@ PJ_DEF(pj_status_t) pj_sock_socket(int af,
|
|
/*
|
|
* Bind socket.
|
|
*/
|
|
-PJ_DEF(pj_status_t) pj_sock_bind( pj_sock_t sock,
|
|
+PJ_DEF(pj_status_t) pj_sock_bind( pj_sock_t sock,
|
|
const pj_sockaddr_t *addr,
|
|
int len)
|
|
{
|
|
@@ -632,7 +673,7 @@ PJ_DEF(pj_status_t) pj_sock_bind( pj_sock_t sock,
|
|
/*
|
|
* Bind socket.
|
|
*/
|
|
-PJ_DEF(pj_status_t) pj_sock_bind_in( pj_sock_t sock,
|
|
+PJ_DEF(pj_status_t) pj_sock_bind_in( pj_sock_t sock,
|
|
pj_uint32_t addr32,
|
|
pj_uint16_t port)
|
|
{
|
|
@@ -741,7 +782,7 @@ PJ_DEF(pj_status_t) pj_sock_sendto(pj_sock_t sock,
|
|
{
|
|
PJ_CHECK_STACK();
|
|
PJ_ASSERT_RETURN(len, PJ_EINVAL);
|
|
-
|
|
+
|
|
CHECK_ADDR_LEN(to, tolen);
|
|
|
|
#ifdef MSG_NOSIGNAL
|
|
@@ -749,12 +790,12 @@ PJ_DEF(pj_status_t) pj_sock_sendto(pj_sock_t sock,
|
|
flags |= MSG_NOSIGNAL;
|
|
#endif
|
|
|
|
- *len = sendto(sock, (const char*)buf, (int)(*len), flags,
|
|
+ *len = sendto(sock, (const char*)buf, (int)(*len), flags,
|
|
(const struct sockaddr*)to, tolen);
|
|
|
|
- if (*len < 0)
|
|
+ if (*len < 0)
|
|
return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
|
|
- else
|
|
+ else
|
|
return PJ_SUCCESS;
|
|
}
|
|
|
|
@@ -771,7 +812,7 @@ PJ_DEF(pj_status_t) pj_sock_recv(pj_sock_t sock,
|
|
|
|
*len = recv(sock, (char*)buf, (int)(*len), flags);
|
|
|
|
- if (*len < 0)
|
|
+ if (*len < 0)
|
|
return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
|
|
else
|
|
return PJ_SUCCESS;
|
|
@@ -790,10 +831,10 @@ PJ_DEF(pj_status_t) pj_sock_recvfrom(pj_sock_t sock,
|
|
PJ_CHECK_STACK();
|
|
PJ_ASSERT_RETURN(buf && len, PJ_EINVAL);
|
|
|
|
- *len = recvfrom(sock, (char*)buf, (int)(*len), flags,
|
|
+ *len = recvfrom(sock, (char*)buf, (int)(*len), flags,
|
|
(struct sockaddr*)from, (socklen_t*)fromlen);
|
|
|
|
- if (*len < 0)
|
|
+ if (*len < 0)
|
|
return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
|
|
else {
|
|
if (from) {
|
|
@@ -832,12 +873,12 @@ PJ_DEF(pj_status_t) pj_sock_setsockopt( pj_sock_t sock,
|
|
{
|
|
int status;
|
|
PJ_CHECK_STACK();
|
|
-
|
|
+
|
|
#if (defined(PJ_WIN32) && PJ_WIN32) || (defined(PJ_SUNOS) && PJ_SUNOS)
|
|
/* Some opt may still need int value (e.g:SO_EXCLUSIVEADDRUSE in win32). */
|
|
- status = setsockopt(sock,
|
|
- level,
|
|
- ((optname&0xff00)==0xff00)?(int)optname|0xffff0000:optname,
|
|
+ status = setsockopt(sock,
|
|
+ level,
|
|
+ ((optname&0xff00)==0xff00)?(int)optname|0xffff0000:optname,
|
|
(const char*)optval, optlen);
|
|
#else
|
|
status = setsockopt(sock, level, optname, (const char*)optval, optlen);
|
|
@@ -861,12 +902,12 @@ PJ_DEF(pj_status_t) pj_sock_setsockopt_params( pj_sock_t sockfd,
|
|
pj_status_t retval = PJ_SUCCESS;
|
|
PJ_CHECK_STACK();
|
|
PJ_ASSERT_RETURN(params, PJ_EINVAL);
|
|
-
|
|
+
|
|
for (;i<params->cnt && i<PJ_MAX_SOCKOPT_PARAMS;++i) {
|
|
- pj_status_t status = pj_sock_setsockopt(sockfd,
|
|
+ pj_status_t status = pj_sock_setsockopt(sockfd,
|
|
(pj_uint16_t)params->options[i].level,
|
|
(pj_uint16_t)params->options[i].optname,
|
|
- params->options[i].optval,
|
|
+ params->options[i].optval,
|
|
params->options[i].optlen);
|
|
if (status != PJ_SUCCESS) {
|
|
retval = status;
|
|
@@ -937,18 +978,18 @@ PJ_DEF(pj_status_t) pj_sock_accept( pj_sock_t serverfd,
|
|
PJ_SOCKADDR_SET_LEN(addr, *addrlen);
|
|
}
|
|
#endif
|
|
-
|
|
+
|
|
*newsock = accept(serverfd, (struct sockaddr*)addr, (socklen_t*)addrlen);
|
|
if (*newsock==PJ_INVALID_SOCKET)
|
|
return PJ_RETURN_OS_ERROR(pj_get_native_netos_error());
|
|
else {
|
|
-
|
|
+
|
|
#if defined(PJ_SOCKADDR_HAS_LEN) && PJ_SOCKADDR_HAS_LEN!=0
|
|
if (addr) {
|
|
PJ_SOCKADDR_RESET_LEN(addr);
|
|
}
|
|
#endif
|
|
-
|
|
+
|
|
return PJ_SUCCESS;
|
|
}
|
|
}
|
|
diff --git a/pjlib/src/pj/sock_common.c b/pjlib/src/pj/sock_common.c
|
|
index 62b08bdea..e9f78ff4f 100644
|
|
--- a/pjlib/src/pj/sock_common.c
|
|
+++ b/pjlib/src/pj/sock_common.c
|
|
@@ -1649,11 +1649,36 @@ PJ_DEF(pj_uint16_t) pj_SO_SNDBUF(void)
|
|
return PJ_SO_SNDBUF;
|
|
}
|
|
|
|
+PJ_DEF(pj_uint16_t) pj_SO_KEEPALIVE(void)
|
|
+{
|
|
+ return PJ_SO_KEEPALIVE;
|
|
+}
|
|
+
|
|
+PJ_DEF(pj_uint16_t) pj_TCP_USER_TIMEOUT(void)
|
|
+{
|
|
+ return PJ_TCP_USER_TIMEOUT;
|
|
+}
|
|
+
|
|
PJ_DEF(pj_uint16_t) pj_TCP_NODELAY(void)
|
|
{
|
|
return PJ_TCP_NODELAY;
|
|
}
|
|
|
|
+PJ_DEF(pj_uint16_t) pj_TCP_KEEPIDLE(void)
|
|
+{
|
|
+ return PJ_TCP_KEEPIDLE
|
|
+}
|
|
+
|
|
+PJ_DEF(pj_uint16_t) pj_TCP_KEEPINTVL(void)
|
|
+{
|
|
+ return PJ_TCP_KEEPINTVL
|
|
+}
|
|
+
|
|
+PJ_DEF(pj_uint16_t) pj_TCP_KEEPCNT(void)
|
|
+{
|
|
+ return PJ_TCP_KEEPCNT
|
|
+}
|
|
+
|
|
PJ_DEF(pj_uint16_t) pj_SO_REUSEADDR(void)
|
|
{
|
|
return PJ_SO_REUSEADDR;
|
|
diff --git a/pjlib/src/pj/sock_uwp.cpp b/pjlib/src/pj/sock_uwp.cpp
|
|
index 14ce05875..2230af9d1 100644
|
|
--- a/pjlib/src/pj/sock_uwp.cpp
|
|
+++ b/pjlib/src/pj/sock_uwp.cpp
|
|
@@ -69,6 +69,24 @@ const pj_uint16_t PJ_SOL_IP = IPPROTO_IP;
|
|
const pj_uint16_t PJ_SOL_IP = 0;
|
|
#endif /* SOL_IP */
|
|
|
|
+#if defined(TCP_KEEPIDLE)
|
|
+const pj_uint16_t PJ_TCP_KEEPIDLE = TCP_KEEPIDLE;
|
|
+#else
|
|
+const pj_uint16_t PJ_TCP_KEEPIDLE = 4;
|
|
+#endif
|
|
+
|
|
+#if defined(TCP_KEEPINTVL)
|
|
+const pj_uint16_t PJ_TCP_KEEPINTVL = TCP_KEEPINTVL;
|
|
+#else
|
|
+const pj_uint16_t PJ_TCP_KEEPINTVL = 5;
|
|
+#endif
|
|
+
|
|
+#if defined(TCP_KEEPCNT)
|
|
+const pj_uint16_t PJ_TCP_KEEPCNT = TCP_KEEPCNT;
|
|
+#else
|
|
+const pj_uint16_t PJ_TCP_KEEPCNT = 6;
|
|
+#endif
|
|
+
|
|
#if defined(SOL_TCP)
|
|
const pj_uint16_t PJ_SOL_TCP = SOL_TCP;
|
|
#elif defined(IPPROTO_TCP)
|
|
@@ -79,6 +97,18 @@ const pj_uint16_t PJ_SOL_TCP = IPPROTO_TCP;
|
|
const pj_uint16_t PJ_SOL_TCP = 6;
|
|
#endif /* SOL_TCP */
|
|
|
|
+#if defined(TCP_USER_TIMEOUT)
|
|
+const pj_uint16_t PJ_TCP_USER_TIMEOUT = TCP_USER_TIMEOUT;
|
|
+#else
|
|
+const pj_uint16_t PJ_TCP_USER_TIMEOUT = 18;
|
|
+#endif
|
|
+
|
|
+#if defined(SOL_KEEPALIVE)
|
|
+const pj_uint16_t PJ_SOL_KEEPALIVE = SOL_KEEPALIVE;
|
|
+#else
|
|
+const pj_uint16_t PJ_SOL_KEEPALIVE = 9;
|
|
+#endif
|
|
+
|
|
#ifdef SOL_UDP
|
|
const pj_uint16_t PJ_SOL_UDP = SOL_UDP;
|
|
#elif defined(IPPROTO_UDP)
|
|
diff --git a/pjlib/src/pj/symbols.c b/pjlib/src/pj/symbols.c
|
|
index ad56c4f98..f224300c9 100644
|
|
--- a/pjlib/src/pj/symbols.c
|
|
+++ b/pjlib/src/pj/symbols.c
|
|
@@ -258,6 +258,10 @@ PJ_EXPORT_SYMBOL(PJ_SOCK_RAW)
|
|
PJ_EXPORT_SYMBOL(PJ_SOCK_RDM)
|
|
PJ_EXPORT_SYMBOL(PJ_SOL_SOCKET)
|
|
PJ_EXPORT_SYMBOL(PJ_SOL_IP)
|
|
+PJ_EXPORT_SYMBOL(PJ_TCP_KEEPIDLE)
|
|
+PJ_EXPORT_SYMBOL(PJ_TCP_KEEPINTVL)
|
|
+PJ_EXPORT_SYMBOL(PJ_TCP_KEEPCNT)
|
|
+PJ_EXPORT_SYMBOL(PJ_TCP_USER_TIMEOUT)
|
|
PJ_EXPORT_SYMBOL(PJ_SOL_TCP)
|
|
PJ_EXPORT_SYMBOL(PJ_SOL_UDP)
|
|
PJ_EXPORT_SYMBOL(PJ_SOL_IPV6)
|
|
diff --git a/pjnath/include/pjnath/config.h b/pjnath/include/pjnath/config.h
|
|
index e904c3ac4..bd988d3d5 100644
|
|
--- a/pjnath/include/pjnath/config.h
|
|
+++ b/pjnath/include/pjnath/config.h
|
|
@@ -1,4 +1,4 @@
|
|
-/*
|
|
+/*
|
|
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
|
|
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
|
|
*
|
|
@@ -14,7 +14,7 @@
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#ifndef __PJNATH_CONFIG_H__
|
|
#define __PJNATH_CONFIG_H__
|
|
@@ -65,9 +65,9 @@
|
|
|
|
/**
|
|
* The default initial STUN round-trip time estimation (the RTO value
|
|
- * in RFC 3489-bis), in miliseconds.
|
|
- * This value is used to control the STUN request
|
|
- * retransmit time. The initial value of retransmission interval
|
|
+ * in RFC 3489-bis), in miliseconds.
|
|
+ * This value is used to control the STUN request
|
|
+ * retransmit time. The initial value of retransmission interval
|
|
* would be set to this value, and will be doubled after each
|
|
* retransmission.
|
|
*/
|
|
@@ -78,7 +78,7 @@
|
|
|
|
/**
|
|
* The STUN transaction timeout value, in miliseconds.
|
|
- * After the last retransmission is sent and if no response is received
|
|
+ * After the last retransmission is sent and if no response is received
|
|
* after this time, the STUN transaction will be considered to have failed.
|
|
*
|
|
* The default value is 16x RTO (as per RFC 3489-bis).
|
|
@@ -201,8 +201,8 @@
|
|
|
|
|
|
/**
|
|
- * Number of seconds to refresh the permission/channel binding before the
|
|
- * permission/channel binding expires. This value should be greater than
|
|
+ * Number of seconds to refresh the permission/channel binding before the
|
|
+ * permission/channel binding expires. This value should be greater than
|
|
* PJ_TURN_PERM_TIMEOUT setting.
|
|
*/
|
|
#ifndef PJ_TURN_REFRESH_SEC_BEFORE
|
|
@@ -211,7 +211,7 @@
|
|
|
|
|
|
/**
|
|
- * The TURN session timer heart beat interval. When this timer occurs, the
|
|
+ * The TURN session timer heart beat interval. When this timer occurs, the
|
|
* TURN session will scan all the permissions/channel bindings to see which
|
|
* need to be refreshed.
|
|
*/
|
|
@@ -278,7 +278,7 @@
|
|
* the maximum number of components (PJ_ICE_MAX_COMP) value.
|
|
*/
|
|
#ifndef PJ_ICE_COMP_BITS
|
|
-# define PJ_ICE_COMP_BITS 1
|
|
+# define PJ_ICE_COMP_BITS 2
|
|
#endif
|
|
|
|
|
|
@@ -310,10 +310,10 @@
|
|
/**
|
|
* The number of bits to represent ICE candidate's local preference. The
|
|
* local preference is used to specify preference among candidates with
|
|
- * the same type, and ICE draft suggests 65535 as the default local
|
|
- * preference, which means we need 16 bits to represent the value. But
|
|
+ * the same type, and ICE draft suggests 65535 as the default local
|
|
+ * preference, which means we need 16 bits to represent the value. But
|
|
* since we don't have the facility to specify local preference, we'll
|
|
- * just disable this feature and let the preference sorted by the
|
|
+ * just disable this feature and let the preference sorted by the
|
|
* type only.
|
|
*
|
|
* Default: 0
|
|
@@ -339,20 +339,20 @@
|
|
* Default: 20
|
|
*/
|
|
#ifndef PJ_ICE_TA_VAL
|
|
-# define PJ_ICE_TA_VAL 20
|
|
+# define PJ_ICE_TA_VAL 50
|
|
#endif
|
|
|
|
|
|
/**
|
|
- * According to ICE Section 8.2. Updating States, if an In-Progress pair in
|
|
- * the check list is for the same component as a nominated pair, the agent
|
|
+ * According to ICE Section 8.2. Updating States, if an In-Progress pair in
|
|
+ * the check list is for the same component as a nominated pair, the agent
|
|
* SHOULD cease retransmissions for its check if its pair priority is lower
|
|
* than the lowest priority nominated pair for that component.
|
|
*
|
|
* If a higher priority check is In Progress, this rule would cause that
|
|
* check to be performed even when it most likely will fail.
|
|
*
|
|
- * The macro here controls if ICE session should cancel all In Progress
|
|
+ * The macro here controls if ICE session should cancel all In Progress
|
|
* checks for the same component regardless of its priority.
|
|
*
|
|
* Default: 1 (yes, cancel all)
|
|
@@ -382,6 +382,42 @@
|
|
# define ICE_CONTROLLED_AGENT_WAIT_NOMINATION_TIMEOUT 10000
|
|
#endif
|
|
|
|
+/**
|
|
+ * For TCP transport, this timer is time that a controlling agent must wait for
|
|
+ * incoming checks if the local candidate is of type "passive" or "s-o".
|
|
+ *
|
|
+ * Default: 10000 (milliseconds)
|
|
+ */
|
|
+#ifndef ICE_CONTROLLING_PASSIVE_TIMEOUT
|
|
+# define ICE_CONTROLLING_PASSIVE_TIMEOUT 10000
|
|
+#endif
|
|
+
|
|
+/**
|
|
+ * Allowed timeout for pending connections. TCP only.
|
|
+ *
|
|
+ * Default: 15000 (milliseconds)
|
|
+ */
|
|
+#ifndef PJ_ICE_TCP_CONNECTION_TIMEOUT
|
|
+# define PJ_ICE_TCP_CONNECTION_TIMEOUT 15000
|
|
+#endif
|
|
+
|
|
+/**
|
|
+ * Delay between two reconnection attempts. TCP only.
|
|
+ *
|
|
+ * Default: 500 (milliseconds)
|
|
+ */
|
|
+#ifndef PJ_ICE_TCP_RECONNECTION_DELAY
|
|
+# define PJ_ICE_TCP_RECONNECTION_DELAY 500
|
|
+#endif
|
|
+
|
|
+/**
|
|
+ * Maximum number of reconnection attempts. TCP only.
|
|
+ *
|
|
+ * Default: 24
|
|
+ */
|
|
+#ifndef PJ_ICE_TCP_MAX_RECONNECTION_COUNT
|
|
+# define PJ_ICE_TCP_MAX_RECONNECTION_COUNT 24
|
|
+#endif
|
|
|
|
/**
|
|
* For controlling agent if it uses regular nomination, specify the delay to
|
|
@@ -583,7 +619,7 @@
|
|
/** Default duration for searching UPnP Internet Gateway Devices (in seconds).
|
|
* Default: 5 seconds
|
|
*/
|
|
-#ifndef PJ_UPNP_DEFAULT_SEARCH_TIME
|
|
+#ifndef PJ_UPNP_DEFAULT_SEARCH_TIME
|
|
# define PJ_UPNP_DEFAULT_SEARCH_TIME 5
|
|
#endif
|
|
|
|
diff --git a/pjnath/include/pjnath/ice_session.h b/pjnath/include/pjnath/ice_session.h
|
|
index e796b2539..60e0564db 100644
|
|
--- a/pjnath/include/pjnath/ice_session.h
|
|
+++ b/pjnath/include/pjnath/ice_session.h
|
|
@@ -1,4 +1,4 @@
|
|
-/*
|
|
+/*
|
|
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
|
|
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
|
|
*
|
|
@@ -14,7 +14,7 @@
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#ifndef __PJNATH_ICE_SESSION_H__
|
|
#define __PJNATH_ICE_SESSION_H__
|
|
@@ -41,7 +41,7 @@ PJ_BEGIN_DECL
|
|
*
|
|
* \section pj_ice_sess_sec ICE Session
|
|
*
|
|
- * An ICE session, represented by #pj_ice_sess structure, is the lowest
|
|
+ * An ICE session, represented by #pj_ice_sess structure, is the lowest
|
|
* abstraction of ICE in PJNATH, and it is used to perform and manage
|
|
* connectivity checks of transport address candidates <b>within a
|
|
* single media stream</b> (note: this differs from what is described
|
|
@@ -50,12 +50,12 @@ PJ_BEGIN_DECL
|
|
*
|
|
* The ICE session described here is independent from any transports,
|
|
* meaning that the actual network I/O for this session would have to
|
|
- * be performed by the application, or higher layer abstraction.
|
|
+ * be performed by the application, or higher layer abstraction.
|
|
* Using this framework, application would give any incoming packets to
|
|
* the ICE session, and it would provide the ICE session with a callback
|
|
* to send outgoing message.
|
|
*
|
|
- * For higher abstraction of ICE where transport is included, please
|
|
+ * For higher abstraction of ICE where transport is included, please
|
|
* see \ref PJNATH_ICE_STREAM_TRANSPORT.
|
|
*
|
|
* \subsection pj_ice_sess_using_sec Using The ICE Session
|
|
@@ -162,6 +162,52 @@ typedef enum pj_ice_cand_type
|
|
|
|
} pj_ice_cand_type;
|
|
|
|
+/**
|
|
+ * ICE candidates types like described by RFC 6544.
|
|
+ */
|
|
+typedef enum pj_ice_cand_transport {
|
|
+ /**
|
|
+ * Candidates UDP compatible
|
|
+ */
|
|
+ PJ_CAND_UDP,
|
|
+ /**
|
|
+ * Candidates sending outgoing TCP connections
|
|
+ */
|
|
+ PJ_CAND_TCP_ACTIVE,
|
|
+ /**
|
|
+ * Candidates accepting incoming TCP connections
|
|
+ */
|
|
+ PJ_CAND_TCP_PASSIVE,
|
|
+ /**
|
|
+ * Candidates capable of receiving incoming connections and sending
|
|
+ * connections
|
|
+ */
|
|
+ PJ_CAND_TCP_SO
|
|
+} pj_ice_cand_transport;
|
|
+
|
|
+/**
|
|
+ * ICE transport types, which will be used both to specify the connection
|
|
+ * type for reaching candidates and other client
|
|
+ */
|
|
+typedef enum pj_ice_tp_type {
|
|
+ /**
|
|
+ * UDP transport, which value corresponds to IANA protocol number.
|
|
+ */
|
|
+ PJ_ICE_TP_UDP = 17,
|
|
+
|
|
+ /**
|
|
+ * TCP transport, which value corresponds to IANA protocol number.
|
|
+ */
|
|
+ PJ_ICE_TP_TCP = 6,
|
|
+
|
|
+ /**
|
|
+ * TLS transport. The TLS transport will only be used as the connection
|
|
+ * type to reach the server and never as the allocation transport type.
|
|
+ */
|
|
+ PJ_ICE_TP_TLS = 255
|
|
+
|
|
+} pj_ice_tp_type;
|
|
+
|
|
|
|
/** Forward declaration for pj_ice_sess */
|
|
typedef struct pj_ice_sess pj_ice_sess;
|
|
@@ -169,12 +215,9 @@ typedef struct pj_ice_sess pj_ice_sess;
|
|
/** Forward declaration for pj_ice_sess_check */
|
|
typedef struct pj_ice_sess_check pj_ice_sess_check;
|
|
|
|
-/** Forward declaration for pj_ice_sess_cand */
|
|
-typedef struct pj_ice_sess_cand pj_ice_sess_cand;
|
|
-
|
|
/**
|
|
- * This structure describes ICE component.
|
|
- * A media stream may require multiple components, each of which has
|
|
+ * This structure describes ICE component.
|
|
+ * A media stream may require multiple components, each of which has
|
|
* to work for the media stream as a whole to work. For media streams
|
|
* based on RTP, there are two components per media stream - one for RTP,
|
|
* and one for RTCP.
|
|
@@ -204,32 +247,6 @@ typedef struct pj_ice_sess_comp
|
|
} pj_ice_sess_comp;
|
|
|
|
|
|
-/**
|
|
- * Data structure to be attached to internal message processing.
|
|
- */
|
|
-typedef struct pj_ice_msg_data
|
|
-{
|
|
- /** Transport ID for this message */
|
|
- unsigned transport_id;
|
|
-
|
|
- /** Flag to indicate whether data.req contains data */
|
|
- pj_bool_t has_req_data;
|
|
-
|
|
- /** The data */
|
|
- union data {
|
|
- /** Request data */
|
|
- struct request_data {
|
|
- pj_ice_sess *ice; /**< ICE session */
|
|
- pj_ice_sess_checklist *clist; /**< Checklist */
|
|
- unsigned ckid; /**< Check ID */
|
|
- pj_ice_sess_cand *lcand; /**< Local cand */
|
|
- pj_ice_sess_cand *rcand; /**< Remote cand */
|
|
- } req; /**< Request data */
|
|
- } data; /**< The data */
|
|
-
|
|
-} pj_ice_msg_data;
|
|
-
|
|
-
|
|
/**
|
|
* This structure describes an ICE candidate.
|
|
* ICE candidate is a transport address that is to be tested by ICE
|
|
@@ -238,7 +255,7 @@ typedef struct pj_ice_msg_data
|
|
* (server reflexive, relayed or host), priority, foundation, and
|
|
* base.
|
|
*/
|
|
-struct pj_ice_sess_cand
|
|
+typedef struct pj_ice_sess_cand
|
|
{
|
|
/**
|
|
* The candidate ID.
|
|
@@ -250,10 +267,10 @@ struct pj_ice_sess_cand
|
|
*/
|
|
pj_ice_cand_type type;
|
|
|
|
- /**
|
|
+ /**
|
|
* Status of this candidate. The value will be PJ_SUCCESS if candidate
|
|
* address has been resolved successfully, PJ_EPENDING when the address
|
|
- * resolution process is in progress, or other value when the address
|
|
+ * resolution process is in progress, or other value when the address
|
|
* resolution has completed with failure.
|
|
*/
|
|
pj_status_t status;
|
|
@@ -277,8 +294,8 @@ struct pj_ice_sess_cand
|
|
|
|
/**
|
|
* The foundation string, which is an identifier which value will be
|
|
- * equivalent for two candidates that are of the same type, share the
|
|
- * same base, and come from the same STUN server. The foundation is
|
|
+ * equivalent for two candidates that are of the same type, share the
|
|
+ * same base, and come from the same STUN server. The foundation is
|
|
* used to optimize ICE performance in the Frozen algorithm.
|
|
*/
|
|
pj_str_t foundation;
|
|
@@ -295,16 +312,16 @@ struct pj_ice_sess_cand
|
|
* the local address of the socket. For reflexive candidates, the value
|
|
* will be the public address allocated in NAT router for the host
|
|
* candidate and as reported in MAPPED-ADDRESS or XOR-MAPPED-ADDRESS
|
|
- * attribute of STUN Binding request. For relayed candidate, the value
|
|
+ * attribute of STUN Binding request. For relayed candidate, the value
|
|
* will be the address allocated in the TURN server by STUN Allocate
|
|
* request.
|
|
*/
|
|
pj_sockaddr addr;
|
|
|
|
/**
|
|
- * Base address of this candidate. "Base" refers to the address an agent
|
|
+ * Base address of this candidate. "Base" refers to the address an agent
|
|
* sends from for a particular candidate. For host candidates, the base
|
|
- * is the same as the host candidate itself. For reflexive candidates,
|
|
+ * is the same as the host candidate itself. For reflexive candidates,
|
|
* the base is the local IP address of the socket. For relayed candidates,
|
|
* the base address is the transport address allocated in the TURN server
|
|
* for this candidate.
|
|
@@ -317,7 +334,38 @@ struct pj_ice_sess_cand
|
|
*/
|
|
pj_sockaddr rel_addr;
|
|
|
|
-};
|
|
+ /**
|
|
+ * Transport used (TCP or UDP)
|
|
+ */
|
|
+ pj_ice_cand_transport transport;
|
|
+
|
|
+} pj_ice_sess_cand;
|
|
+
|
|
+
|
|
+/**
|
|
+ * Data structure to be attached to internal message processing.
|
|
+ */
|
|
+typedef struct pj_ice_msg_data
|
|
+{
|
|
+ /** Transport ID for this message */
|
|
+ unsigned transport_id;
|
|
+
|
|
+ /** Flag to indicate whether data.req contains data */
|
|
+ pj_bool_t has_req_data;
|
|
+
|
|
+ /** The data */
|
|
+ union data {
|
|
+ /** Request data */
|
|
+ struct request_data {
|
|
+ pj_ice_sess *ice; /**< ICE session */
|
|
+ pj_ice_sess_checklist *clist; /**< Checklist */
|
|
+ unsigned ckid; /**< Check ID */
|
|
+ pj_ice_sess_cand *lcand; /**< Local cand */
|
|
+ pj_ice_sess_cand *rcand; /**< Remote cand */
|
|
+ } req; /**< Request data */
|
|
+ } data; /**< The data */
|
|
+
|
|
+} pj_ice_msg_data;
|
|
|
|
|
|
/**
|
|
@@ -332,6 +380,22 @@ typedef enum pj_ice_sess_check_state
|
|
*/
|
|
PJ_ICE_SESS_CHECK_STATE_FROZEN,
|
|
|
|
+ /**
|
|
+ * The following status is used when a packet sent via TURN got a
|
|
+ * "Connection reset by peer". This mean that the peer didn't allow
|
|
+ * us to connect yet. The socket will be reconnected during the next
|
|
+ * loop.
|
|
+ */
|
|
+ PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY,
|
|
+
|
|
+ /**
|
|
+ * TODO (sblin): REMOVE THIS! - https://github.com/coturn/coturn/issues/408
|
|
+ * For now, this status is only used because sometimes, the first packet
|
|
+ * doesn't receive any response. So, we retry to send the packet every
|
|
+ * 50 loops.
|
|
+ */
|
|
+ PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET,
|
|
+
|
|
/**
|
|
* A check has not been performed for this pair, and can be
|
|
* performed as soon as it is the highest priority Waiting pair on
|
|
@@ -339,6 +403,12 @@ typedef enum pj_ice_sess_check_state
|
|
*/
|
|
PJ_ICE_SESS_CHECK_STATE_WAITING,
|
|
|
|
+ /**
|
|
+ * A check has not been performed for this pair, but TCP socket
|
|
+ * is currently connecting to the pair. Wait to finish the connection.
|
|
+ */
|
|
+ PJ_ICE_SESS_CHECK_STATE_PENDING,
|
|
+
|
|
/**
|
|
* A check has not been performed for this pair, and can be
|
|
* performed as soon as it is the highest priority Waiting pair on
|
|
@@ -365,9 +435,9 @@ typedef enum pj_ice_sess_check_state
|
|
|
|
/**
|
|
* This structure describes an ICE connectivity check. An ICE check
|
|
- * contains a candidate pair, and will involve sending STUN Binding
|
|
- * Request transaction for the purposes of verifying connectivity.
|
|
- * A check is sent from the local candidate to the remote candidate
|
|
+ * contains a candidate pair, and will involve sending STUN Binding
|
|
+ * Request transaction for the purposes of verifying connectivity.
|
|
+ * A check is sent from the local candidate to the remote candidate
|
|
* of a candidate pair.
|
|
*/
|
|
struct pj_ice_sess_check
|
|
@@ -398,8 +468,8 @@ struct pj_ice_sess_check
|
|
pj_ice_sess_check_state state;
|
|
|
|
/**
|
|
- * STUN transmit data containing STUN Binding request that was sent
|
|
- * as part of this check. The value will only be set when this check
|
|
+ * STUN transmit data containing STUN Binding request that was sent
|
|
+ * as part of this check. The value will only be set when this check
|
|
* has a pending transaction, and is used to cancel the transaction
|
|
* when other check has succeeded.
|
|
*/
|
|
@@ -416,6 +486,13 @@ struct pj_ice_sess_check
|
|
* STUN transaction.
|
|
*/
|
|
pj_status_t err_code;
|
|
+
|
|
+#if PJ_HAS_TCP
|
|
+ /**
|
|
+ * TCP reconnection attemps counter.
|
|
+ */
|
|
+ int reconnect_count;
|
|
+#endif
|
|
};
|
|
|
|
|
|
@@ -445,7 +522,7 @@ typedef enum pj_ice_sess_checklist_state
|
|
|
|
|
|
/**
|
|
- * This structure represents ICE check list, that is an ordered set of
|
|
+ * This structure represents ICE check list, that is an ordered set of
|
|
* candidate pairs that an agent will use to generate checks.
|
|
*/
|
|
struct pj_ice_sess_checklist
|
|
@@ -509,7 +586,7 @@ typedef struct pj_ice_sess_cb
|
|
|
|
/**
|
|
* A mandatory callback which will be called by the ICE session when
|
|
- * it needs to send outgoing STUN packet.
|
|
+ * it needs to send outgoing STUN packet.
|
|
*
|
|
* @param ice The ICE session.
|
|
* @param comp_id ICE component ID.
|
|
@@ -519,7 +596,7 @@ typedef struct pj_ice_sess_cb
|
|
* @param dst_addr Packet destination address.
|
|
* @param dst_addr_len Length of destination address.
|
|
*/
|
|
- pj_status_t (*on_tx_pkt)(pj_ice_sess *ice, unsigned comp_id,
|
|
+ pj_status_t (*on_tx_pkt)(pj_ice_sess *ice, unsigned comp_id,
|
|
unsigned transport_id,
|
|
const void *pkt, pj_size_t size,
|
|
const pj_sockaddr_t *dst_addr,
|
|
@@ -534,15 +611,49 @@ typedef struct pj_ice_sess_cb
|
|
* @param transport_id Transport ID.
|
|
* @param pkt The whole packet.
|
|
* @param size Size of the packet.
|
|
- * @param src_addr Source address where this packet was received
|
|
+ * @param src_addr Source address where this packet was received
|
|
* from.
|
|
* @param src_addr_len The length of source address.
|
|
*/
|
|
- void (*on_rx_data)(pj_ice_sess *ice, unsigned comp_id,
|
|
- unsigned transport_id,
|
|
- void *pkt, pj_size_t size,
|
|
- const pj_sockaddr_t *src_addr,
|
|
- unsigned src_addr_len);
|
|
+ void (*on_rx_data)(pj_ice_sess *ice, unsigned comp_id,
|
|
+ unsigned transport_id,
|
|
+ void *pkt, pj_size_t size,
|
|
+ const pj_sockaddr_t *src_addr,
|
|
+ unsigned src_addr_len);
|
|
+
|
|
+ /**
|
|
+ * Wait for TCP and send connectivity check
|
|
+ *
|
|
+ * @param ice The ICE session.
|
|
+ * @param check_id The wanted check.
|
|
+ */
|
|
+ pj_status_t (*wait_tcp_connection)(pj_ice_sess *ice,
|
|
+ unsigned check_id);
|
|
+
|
|
+ /**
|
|
+ * Reconnect a resetted TCP connection and send connectivity check
|
|
+ * cf. PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY
|
|
+ *
|
|
+ * @param ice The ICE session.
|
|
+ * @param check_id The wanted check.
|
|
+ */
|
|
+ pj_status_t (*reconnect_tcp_connection)(pj_ice_sess *ice,
|
|
+ unsigned check_id);
|
|
+
|
|
+ /**
|
|
+ * Close TCP socket
|
|
+ *
|
|
+ * @param ice The ICE session.
|
|
+ * @param check_id The wanted check.
|
|
+ */
|
|
+ pj_status_t (*close_tcp_connection)(pj_ice_sess *ice,
|
|
+ unsigned check_id);
|
|
+
|
|
+ /**
|
|
+ * If an internal TCP keep alive, this mount the error to the application
|
|
+ */
|
|
+ void (*on_ice_destroy)(pj_ice_sess *ice);
|
|
+
|
|
} pj_ice_sess_cb;
|
|
|
|
|
|
@@ -630,7 +741,7 @@ typedef enum pj_ice_sess_trickle
|
|
|
|
/**
|
|
* This structure describes various ICE session options. Application
|
|
- * configure the ICE session with these options by calling
|
|
+ * configure the ICE session with these options by calling
|
|
* #pj_ice_sess_set_options().
|
|
*/
|
|
typedef struct pj_ice_sess_options
|
|
@@ -643,7 +754,7 @@ typedef struct pj_ice_sess_options
|
|
|
|
/**
|
|
* For controlling agent if it uses regular nomination, specify the delay
|
|
- * to perform nominated check (connectivity check with USE-CANDIDATE
|
|
+ * to perform nominated check (connectivity check with USE-CANDIDATE
|
|
* attribute) after all components have a valid pair.
|
|
*
|
|
* Default value is PJ_ICE_NOMINATED_CHECK_DELAY.
|
|
@@ -651,14 +762,14 @@ typedef struct pj_ice_sess_options
|
|
unsigned nominated_check_delay;
|
|
|
|
/**
|
|
- * For a controlled agent, specify how long it wants to wait (in
|
|
- * milliseconds) for the controlling agent to complete sending
|
|
+ * For a controlled agent, specify how long it wants to wait (in
|
|
+ * milliseconds) for the controlling agent to complete sending
|
|
* connectivity check with nominated flag set to true for all components
|
|
* after the controlled agent has found that all connectivity checks in
|
|
* its checklist have been completed and there is at least one successful
|
|
* (but not nominated) check for every component.
|
|
*
|
|
- * Default value for this option is
|
|
+ * Default value for this option is
|
|
* ICE_CONTROLLED_AGENT_WAIT_NOMINATION_TIMEOUT. Specify -1 to disable
|
|
* this timer.
|
|
*/
|
|
@@ -672,6 +783,13 @@ typedef struct pj_ice_sess_options
|
|
*/
|
|
pj_ice_sess_trickle trickle;
|
|
|
|
+ /**
|
|
+ * For a controlling agent, specify how long it wants to wait
|
|
+ * in milliseconds for passive candidates and wait for connection
|
|
+ * attempts
|
|
+ */
|
|
+ int agent_passive_timeout;
|
|
+
|
|
} pj_ice_sess_options;
|
|
|
|
|
|
@@ -704,6 +822,7 @@ struct pj_ice_sess
|
|
pj_status_t ice_status; /**< Error status. */
|
|
pj_timer_entry timer; /**< ICE timer. */
|
|
pj_timer_entry timer_end_of_cand; /**< End-of-cand timer. */
|
|
+ pj_timer_entry timer_connect; /**< ICE timer tcp timeout*/
|
|
pj_ice_sess_cb cb; /**< Callback. */
|
|
|
|
pj_stun_config stun_cfg; /**< STUN settings. */
|
|
@@ -741,10 +860,10 @@ struct pj_ice_sess
|
|
|
|
/* Checklist */
|
|
pj_ice_sess_checklist clist; /**< Active checklist */
|
|
-
|
|
+
|
|
/* Valid list */
|
|
pj_ice_sess_checklist valid_list; /**< Valid list. */
|
|
-
|
|
+
|
|
/** Temporary buffer for misc stuffs to avoid using stack too much */
|
|
union {
|
|
char txt[128];
|
|
@@ -813,7 +932,7 @@ PJ_DECL(void) pj_ice_sess_options_default(pj_ice_sess_options *opt);
|
|
* @param cb ICE callback.
|
|
* @param local_ufrag Optional string to be used as local username to
|
|
* authenticate incoming STUN binding request. If
|
|
- * the value is NULL, a random string will be
|
|
+ * the value is NULL, a random string will be
|
|
* generated.
|
|
* @param local_passwd Optional string to be used as local password.
|
|
* @param grp_lock Optional group lock to be used by this session.
|
|
@@ -911,8 +1030,8 @@ PJ_DECL(pj_status_t) pj_ice_sess_change_role(pj_ice_sess *ice,
|
|
/**
|
|
* Assign a custom preference values for ICE candidate types. By assigning
|
|
* custom preference value, application can control the order of candidates
|
|
- * to be checked first. The default preference settings is to use 126 for
|
|
- * host candidates, 100 for server reflexive candidates, 110 for peer
|
|
+ * to be checked first. The default preference settings is to use 126 for
|
|
+ * host candidates, 100 for server reflexive candidates, 110 for peer
|
|
* reflexive candidates, an 0 for relayed candidates.
|
|
*
|
|
* Note that this function must be called before any candidates are added
|
|
@@ -932,7 +1051,7 @@ PJ_DECL(pj_status_t) pj_ice_sess_set_prefs(pj_ice_sess *ice,
|
|
|
|
/**
|
|
* Add a candidate to this ICE session. Application must add candidates for
|
|
- * each components ID before it can start pairing the candidates and
|
|
+ * each components ID before it can start pairing the candidates and
|
|
* performing connectivity checks.
|
|
*
|
|
* @param ice ICE session instance.
|
|
@@ -948,6 +1067,7 @@ PJ_DECL(pj_status_t) pj_ice_sess_set_prefs(pj_ice_sess *ice,
|
|
* @param rel_addr Optional related address.
|
|
* @param addr_len Length of addresses.
|
|
* @param p_cand_id Optional pointer to receive the candidate ID.
|
|
+ * @param transport Candidate's type
|
|
*
|
|
* @return PJ_SUCCESS if candidate is successfully added.
|
|
*/
|
|
@@ -961,14 +1081,15 @@ PJ_DECL(pj_status_t) pj_ice_sess_add_cand(pj_ice_sess *ice,
|
|
const pj_sockaddr_t *base_addr,
|
|
const pj_sockaddr_t *rel_addr,
|
|
int addr_len,
|
|
- unsigned *p_cand_id);
|
|
+ unsigned *p_cand_id,
|
|
+ pj_ice_cand_transport transport);
|
|
|
|
/**
|
|
* Find default candidate for the specified component ID, using this
|
|
* rule:
|
|
* - if the component has a successful candidate pair, then the
|
|
* local candidate of this pair will be returned.
|
|
- * - otherwise a relay, reflexive, or host candidate will be selected
|
|
+ * - otherwise a relay, reflexive, or host candidate will be selected
|
|
* on that specified order.
|
|
*
|
|
* @param ice The ICE session instance.
|
|
@@ -991,18 +1112,18 @@ PJ_DECL(pj_status_t) pj_ice_sess_find_default_cand(pj_ice_sess *ice,
|
|
* #pj_ice_sess_start_check().
|
|
*
|
|
* @param ice ICE session instance.
|
|
- * @param rem_ufrag Remote ufrag, as seen in the SDP received from
|
|
+ * @param rem_ufrag Remote ufrag, as seen in the SDP received from
|
|
* the remote agent.
|
|
* @param rem_passwd Remote password, as seen in the SDP received from
|
|
* the remote agent.
|
|
* @param rem_cand_cnt Number of remote candidates.
|
|
* @param rem_cand Remote candidate array. Remote candidates are
|
|
- * gathered from the SDP received from the remote
|
|
+ * gathered from the SDP received from the remote
|
|
* agent.
|
|
*
|
|
* @return PJ_SUCCESS or the appropriate error code.
|
|
*/
|
|
-PJ_DECL(pj_status_t)
|
|
+PJ_DECL(pj_status_t)
|
|
pj_ice_sess_create_check_list(pj_ice_sess *ice,
|
|
const pj_str_t *rem_ufrag,
|
|
const pj_str_t *rem_passwd,
|
|
@@ -1020,13 +1141,13 @@ pj_ice_sess_create_check_list(pj_ice_sess *ice,
|
|
* This function is only applicable when trickle ICE is not disabled.
|
|
*
|
|
* @param ice ICE session instance.
|
|
- * @param rem_ufrag Remote ufrag, as seen in the SDP received from
|
|
+ * @param rem_ufrag Remote ufrag, as seen in the SDP received from
|
|
* the remote agent.
|
|
* @param rem_passwd Remote password, as seen in the SDP received from
|
|
* the remote agent.
|
|
* @param rem_cand_cnt Number of remote candidates.
|
|
* @param rem_cand Remote candidate array. Remote candidates are
|
|
- * gathered from the SDP received from the remote
|
|
+ * gathered from the SDP received from the remote
|
|
* agent.
|
|
* @param trickle_done Flag to indicate end of trickling, set to PJ_TRUE
|
|
* after all local candidates have been gathered AND
|
|
@@ -1035,7 +1156,7 @@ pj_ice_sess_create_check_list(pj_ice_sess *ice,
|
|
*
|
|
* @return PJ_SUCCESS or the appropriate error code.
|
|
*/
|
|
-PJ_DECL(pj_status_t)
|
|
+PJ_DECL(pj_status_t)
|
|
pj_ice_sess_update_check_list(pj_ice_sess *ice,
|
|
const pj_str_t *rem_ufrag,
|
|
const pj_str_t *rem_passwd,
|
|
@@ -1108,6 +1229,44 @@ PJ_DECL(pj_status_t) pj_ice_sess_on_rx_pkt(pj_ice_sess *ice,
|
|
const pj_sockaddr_t *src_addr,
|
|
int src_addr_len);
|
|
|
|
+/**
|
|
+ * Notification when ICE session get a new incoming connection
|
|
+ *
|
|
+ * @param ice The ICE session.
|
|
+ * @param transport_id Related transport
|
|
+ * @param status PJ_SUCCESS when connection is made, or any errors
|
|
+ * if the connection has failed (or if the peer has
|
|
+ * disconnected after an established connection).
|
|
+ * @param remote_addr Connected remove address
|
|
+ */
|
|
+PJ_DECL(void) ice_sess_on_peer_connection(pj_ice_sess *ice,
|
|
+ pj_uint8_t transport_id,
|
|
+ pj_status_t status,
|
|
+ pj_sockaddr_t* remote_addr);
|
|
+
|
|
+/**
|
|
+ * Notification when ICE session get a new resetted connection
|
|
+ * cf PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY
|
|
+ *
|
|
+ * @param ice The ICE session.
|
|
+ * @param transport_id Related transport
|
|
+ * @param remote_addr Connected remove address
|
|
+ */
|
|
+PJ_DECL(void) ice_sess_on_peer_reset_connection(pj_ice_sess *ice,
|
|
+ pj_uint8_t transport_id,
|
|
+ pj_sockaddr_t* remote_addr);
|
|
+
|
|
+/**
|
|
+ * Notification when ICE session get a new packet
|
|
+ * Used to remove the PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET status
|
|
+ *
|
|
+ * @param ice The ICE session.
|
|
+ * @param transport_id Related transport
|
|
+ * @param remote_addr Connected remove address
|
|
+ */
|
|
+PJ_DECL(void) ice_sess_on_peer_packet(pj_ice_sess *ice,
|
|
+ pj_uint8_t transport_id,
|
|
+ pj_sockaddr_t* remote_addr);
|
|
|
|
|
|
/**
|
|
diff --git a/pjnath/include/pjnath/ice_strans.h b/pjnath/include/pjnath/ice_strans.h
|
|
index 0f2510aa4..f8aa41eae 100644
|
|
--- a/pjnath/include/pjnath/ice_strans.h
|
|
+++ b/pjnath/include/pjnath/ice_strans.h
|
|
@@ -218,6 +218,13 @@ typedef struct pj_ice_strans_cb
|
|
const pj_ice_sess_cand *cand,
|
|
pj_bool_t end_of_cand);
|
|
|
|
+ /**
|
|
+ * This callback is called if an internal operation fails
|
|
+ *
|
|
+ * @param ice_st The ICE stream transport.
|
|
+ */
|
|
+ void (*on_destroy)(pj_ice_strans *ice_st);
|
|
+
|
|
} pj_ice_strans_cb;
|
|
|
|
|
|
@@ -300,6 +307,13 @@ typedef struct pj_ice_strans_stun_cfg
|
|
*/
|
|
pj_bool_t ignore_stun_error;
|
|
|
|
+ /**
|
|
+ * Type of connection to the STUN server.
|
|
+ *
|
|
+ * Default is PJ_STUN_TP_UDP.
|
|
+ */
|
|
+ pj_stun_tp_type conn_type;
|
|
+
|
|
} pj_ice_strans_stun_cfg;
|
|
|
|
|
|
@@ -315,6 +329,13 @@ typedef struct pj_ice_strans_turn_cfg
|
|
*/
|
|
int af;
|
|
|
|
+ /**
|
|
+ * If we want to use UDP or TCP as described by RFC 6544.
|
|
+ * This will discover candidates via TCP sockets. Then it will
|
|
+ * transfer messages on the transport via TCP.
|
|
+ */
|
|
+ pj_ice_tp_type protocol;
|
|
+
|
|
/**
|
|
* Optional TURN socket settings. The default values will be
|
|
* initialized by #pj_turn_sock_cfg_default(). This contains
|
|
@@ -394,6 +415,13 @@ typedef struct pj_ice_strans_cfg
|
|
*/
|
|
int af;
|
|
|
|
+ /**
|
|
+ * If we want to use UDP or TCP as described by RFC 6544.
|
|
+ * This will discover candidates via TCP sockets. Then it will
|
|
+ * transfer messages on the transport via TCP.
|
|
+ */
|
|
+ pj_ice_tp_type protocol;
|
|
+
|
|
/**
|
|
* STUN configuration which contains the timer heap and
|
|
* ioqueue instance to be used, and STUN retransmission
|
|
diff --git a/pjnath/include/pjnath/stun_session.h b/pjnath/include/pjnath/stun_session.h
|
|
index 4a5076bb1..56cc7dc3a 100644
|
|
--- a/pjnath/include/pjnath/stun_session.h
|
|
+++ b/pjnath/include/pjnath/stun_session.h
|
|
@@ -1,4 +1,4 @@
|
|
-/*
|
|
+/*
|
|
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
|
|
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
|
|
*
|
|
@@ -14,7 +14,7 @@
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#ifndef __PJNATH_STUN_SESSION_H__
|
|
#define __PJNATH_STUN_SESSION_H__
|
|
@@ -40,26 +40,26 @@ PJ_BEGIN_DECL
|
|
* @addtogroup PJNATH_STUN_SESSION
|
|
* @{
|
|
*
|
|
- * This is is a transport-independent object to manage a client or server
|
|
+ * This is is a transport-independent object to manage a client or server
|
|
* STUN session. It has the following features:
|
|
- *
|
|
+ *
|
|
* - <b>transport independent</b>:\n
|
|
* the object does not have it's own socket, but rather it provides
|
|
* functions and callbacks to send and receive packets. This way the
|
|
- * object can be used by different transport types (e.g. UDP, TCP,
|
|
+ * object can be used by different transport types (e.g. UDP, TCP,
|
|
* TLS, etc.) as well as better integration to application which
|
|
* already has its own means to send and receive packets.
|
|
- *
|
|
+ *
|
|
* - <b>authentication management</b>:\n
|
|
* the object manages STUN authentication throughout the lifetime of
|
|
* the session. For client sessions, once it's given a credential to
|
|
* authenticate itself with the server, the object will automatically
|
|
* add authentication info (the MESSAGE-INTEGRITY) to the request as
|
|
- * well as authenticate the response. It will also handle long-term
|
|
+ * well as authenticate the response. It will also handle long-term
|
|
* authentication challenges, including handling of nonce expiration,
|
|
- * and retry the request automatically. For server sessions, it can
|
|
+ * and retry the request automatically. For server sessions, it can
|
|
* be configured to authenticate incoming requests automatically.
|
|
- *
|
|
+ *
|
|
* - <b>static or dynamic credential</b>:\n
|
|
* application may specify static or dynamic credential to be used by
|
|
* the STUN session. Static credential means a static combination of
|
|
@@ -67,16 +67,16 @@ PJ_BEGIN_DECL
|
|
* duration), while dynamic credential provides callback to ask the
|
|
* application about which username/password to use everytime
|
|
* authentication is about to be performed.
|
|
- *
|
|
+ *
|
|
* - <b>client transaction management</b>:\n
|
|
* outgoing requests may be sent with a STUN transaction for reliability,
|
|
* and the object will manage the transaction internally (including
|
|
* performing retransmissions). Application will be notified about the
|
|
* result of the request when the response arrives (or the transaction
|
|
* times out). When the request is challenged with authentication, the
|
|
- * object will retry the request with new authentication info, and
|
|
+ * object will retry the request with new authentication info, and
|
|
* application will be notified about the final result of this request.
|
|
- *
|
|
+ *
|
|
* - <b>server transaction management</b>:\n
|
|
* application may ask response to incoming requests to be cached by
|
|
* the object, and in this case the object will check for cached
|
|
@@ -95,7 +95,7 @@ PJ_BEGIN_DECL
|
|
*
|
|
* - <b>create the STUN session</b>:\n
|
|
* by calling #pj_stun_session_create(). Among other things, this
|
|
- * function requires the instance of #pj_stun_config and also
|
|
+ * function requires the instance of #pj_stun_config and also
|
|
* #pj_stun_session_cb structure which stores callbacks to send
|
|
* outgoing packets as well as to notify application about incoming
|
|
* STUN requests, responses, and indicates and other events.
|
|
@@ -124,8 +124,8 @@ PJ_BEGIN_DECL
|
|
* use #pj_stun_session_send_msg() to send outgoing STUN messages (this
|
|
* includes STUN requests, indications, and responses). The function has
|
|
* options whether to retransmit the request (for non reliable transports)
|
|
- * or to cache the response if we're sending response. This function in
|
|
- * turn will call the \a on_send_msg() callback of #pj_stun_session_cb
|
|
+ * or to cache the response if we're sending response. This function in
|
|
+ * turn will call the \a on_send_msg() callback of #pj_stun_session_cb
|
|
* to request the application to send the packet.
|
|
*
|
|
* - <b>handling incoming packet:</b>\n
|
|
@@ -146,7 +146,7 @@ PJ_BEGIN_DECL
|
|
*
|
|
* - <b>creating and sending response:</b>\n
|
|
* create the STUN response with #pj_stun_session_create_res(). This will
|
|
- * create a transmit data buffer containing a blank STUN response. You
|
|
+ * create a transmit data buffer containing a blank STUN response. You
|
|
* will then typically need to add STUN attributes that are relevant to
|
|
* the response, but note that some default attributes will
|
|
* be added by the session later when the message is sent (such as
|
|
@@ -157,7 +157,7 @@ PJ_BEGIN_DECL
|
|
* - <b>convenient way to send response:</b>\n
|
|
* the #pj_stun_session_respond() is provided as a convenient way to
|
|
* create and send simple STUN responses, such as error responses.
|
|
- *
|
|
+ *
|
|
* - <b>destroying the session:</b>\n
|
|
* once the session is done, use #pj_stun_session_destroy() to destroy
|
|
* the session.
|
|
@@ -173,6 +173,29 @@ typedef struct pj_stun_rx_data pj_stun_rx_data;
|
|
/** Forward declaration for pj_stun_session */
|
|
typedef struct pj_stun_session pj_stun_session;
|
|
|
|
+/**
|
|
+ * STUN transport types, which will be used both to specify the connection
|
|
+ * type for reaching STUN server and the type of allocation transport to be
|
|
+ * requested to server (the REQUESTED-TRANSPORT attribute).
|
|
+ */
|
|
+typedef enum pj_stun_tp_type {
|
|
+ /**
|
|
+ * UDP transport, which value corresponds to IANA protocol number.
|
|
+ */
|
|
+ PJ_STUN_TP_UDP = 17,
|
|
+
|
|
+ /**
|
|
+ * TCP transport, which value corresponds to IANA protocol number.
|
|
+ */
|
|
+ PJ_STUN_TP_TCP = 6,
|
|
+
|
|
+ /**
|
|
+ * TLS transport. The TLS transport will only be used as the connection
|
|
+ * type to reach the server and never as the allocation transport type.
|
|
+ */
|
|
+ PJ_STUN_TP_TLS = 255
|
|
+
|
|
+} pj_stun_tp_type;
|
|
|
|
/**
|
|
* This is the callback to be registered to pj_stun_session, to send
|
|
@@ -186,7 +209,7 @@ typedef struct pj_stun_session_cb
|
|
*
|
|
* @param sess The STUN session.
|
|
* @param token The token associated with this outgoing message
|
|
- * and was set by the application. This token was
|
|
+ * and was set by the application. This token was
|
|
* set by application in pj_stun_session_send_msg()
|
|
* for outgoing messages that are initiated by the
|
|
* application, or in pj_stun_session_on_rx_pkt()
|
|
@@ -209,11 +232,11 @@ typedef struct pj_stun_session_cb
|
|
const pj_sockaddr_t *dst_addr,
|
|
unsigned addr_len);
|
|
|
|
- /**
|
|
+ /**
|
|
* Callback to be called on incoming STUN request message. This function
|
|
* is called when application calls pj_stun_session_on_rx_pkt() and when
|
|
* the STUN session has detected that the incoming STUN message is a
|
|
- * STUN request message. In the
|
|
+ * STUN request message. In the
|
|
* callback processing, application MUST create a response by calling
|
|
* pj_stun_session_create_response() function and send the response
|
|
* with pj_stun_session_send_msg() function, before returning from
|
|
@@ -241,7 +264,7 @@ typedef struct pj_stun_session_cb
|
|
unsigned src_addr_len);
|
|
|
|
/**
|
|
- * Callback to be called when response is received or the transaction
|
|
+ * Callback to be called when response is received or the transaction
|
|
* has timed out. This callback is called either when application calls
|
|
* pj_stun_session_on_rx_pkt() with the packet containing a STUN
|
|
* response for the client transaction, or when the internal timer of
|
|
@@ -254,7 +277,7 @@ typedef struct pj_stun_session_cb
|
|
* or other error has occurred, and the response
|
|
* argument may be NULL.
|
|
* Note that when the status is not success, the
|
|
- * response may contain non-NULL value if the
|
|
+ * response may contain non-NULL value if the
|
|
* response contains STUN ERROR-CODE attribute.
|
|
* @param token The token that was set by the application when
|
|
* calling pj_stun_session_send_msg() function.
|
|
@@ -264,9 +287,9 @@ typedef struct pj_stun_session_cb
|
|
* @param response The response message, on successful transaction,
|
|
* or otherwise MAY BE NULL if status is not success.
|
|
* Note that when the status is not success, this
|
|
- * argument may contain non-NULL value if the
|
|
+ * argument may contain non-NULL value if the
|
|
* response contains STUN ERROR-CODE attribute.
|
|
- * @param src_addr The source address where the response was
|
|
+ * @param src_addr The source address where the response was
|
|
* received, or NULL if the response is NULL.
|
|
* @param src_addr_len The length of the source address.
|
|
*/
|
|
@@ -306,6 +329,38 @@ typedef struct pj_stun_session_cb
|
|
const pj_sockaddr_t *src_addr,
|
|
unsigned src_addr_len);
|
|
|
|
+ /**
|
|
+ * Notification when STUN session get a ConnectionAttempt indication.
|
|
+ *
|
|
+ * @param stun_session The STUN session.
|
|
+ * @param status PJ_SUCCESS when connection is made, or any errors
|
|
+ * if the connection has failed (or if the peer has
|
|
+ * disconnected after an established connection).
|
|
+ * @param remote_addr The remote connected
|
|
+ */
|
|
+ void (*on_peer_connection)(pj_stun_session *sess,
|
|
+ pj_status_t status,
|
|
+ pj_sockaddr_t* remote_addr);
|
|
+
|
|
+ /**
|
|
+ * Notification when STUN connection is resetted (TCP only).
|
|
+ *
|
|
+ * @param stun_session The STUN session.
|
|
+ * @param remote_addr The remote resetted
|
|
+ */
|
|
+ void (*on_peer_reset_connection)(pj_stun_session *sess,
|
|
+ pj_sockaddr_t*
|
|
+ remote_addr);
|
|
+
|
|
+ /**
|
|
+ * Notification when STUN connection is resetted (TCP only).
|
|
+ *
|
|
+ * @param stun_session The STUN session.
|
|
+ * @param remote_addr The remote resetted
|
|
+ */
|
|
+ void (*on_peer_packet)(pj_stun_session *sess,
|
|
+ pj_sockaddr_t* remote_addr);
|
|
+
|
|
} pj_stun_session_cb;
|
|
|
|
|
|
@@ -320,8 +375,8 @@ struct pj_stun_rx_data
|
|
pj_stun_msg *msg;
|
|
|
|
/**
|
|
- * Credential information that is found and used to authenticate
|
|
- * incoming request. Application may use this information when
|
|
+ * Credential information that is found and used to authenticate
|
|
+ * incoming request. Application may use this information when
|
|
* generating authentication for the outgoing response.
|
|
*/
|
|
pj_stun_req_cred_info info;
|
|
@@ -348,7 +403,7 @@ struct pj_stun_tx_data
|
|
pj_bool_t retransmit; /**< Retransmit request? */
|
|
pj_uint32_t msg_magic; /**< Message magic. */
|
|
pj_uint8_t msg_key[12]; /**< Message/transaction key. */
|
|
-
|
|
+
|
|
pj_grp_lock_t *grp_lock; /**< Group lock (for resp cache). */
|
|
|
|
pj_stun_req_cred_info auth_info; /**< Credential info */
|
|
@@ -390,6 +445,7 @@ typedef enum pj_stun_sess_msg_log_flag
|
|
* @param grp_lock Optional group lock to be used by this session.
|
|
* If NULL, the session will create one itself.
|
|
* @param p_sess Pointer to receive STUN session instance.
|
|
+ * @param conn_type If the session use UDP or TCP
|
|
*
|
|
* @return PJ_SUCCESS on success, or the appropriate error code.
|
|
*/
|
|
@@ -398,7 +454,8 @@ PJ_DECL(pj_status_t) pj_stun_session_create(pj_stun_config *cfg,
|
|
const pj_stun_session_cb *cb,
|
|
pj_bool_t fingerprint,
|
|
pj_grp_lock_t *grp_lock,
|
|
- pj_stun_session **p_sess);
|
|
+ pj_stun_session **p_sess,
|
|
+ pj_stun_tp_type conn_type);
|
|
|
|
/**
|
|
* Destroy the STUN session and all objects created in the context of
|
|
@@ -499,7 +556,7 @@ PJ_DECL(pj_bool_t) pj_stun_session_use_fingerprint(pj_stun_session *sess,
|
|
|
|
/**
|
|
* Create a STUN request message. After the message has been successfully
|
|
- * created, application can send the message by calling
|
|
+ * created, application can send the message by calling
|
|
* pj_stun_session_send_msg().
|
|
*
|
|
* @param sess The STUN session instance.
|
|
@@ -520,7 +577,7 @@ PJ_DECL(pj_status_t) pj_stun_session_create_req(pj_stun_session *sess,
|
|
|
|
/**
|
|
* Create a STUN Indication message. After the message has been successfully
|
|
- * created, application can send the message by calling
|
|
+ * created, application can send the message by calling
|
|
* pj_stun_session_send_msg().
|
|
*
|
|
* @param sess The STUN session instance.
|
|
@@ -537,8 +594,8 @@ PJ_DECL(pj_status_t) pj_stun_session_create_ind(pj_stun_session *sess,
|
|
pj_stun_tx_data **p_tdata);
|
|
|
|
/**
|
|
- * Create a STUN response message. After the message has been
|
|
- * successfully created, application can send the message by calling
|
|
+ * Create a STUN response message. After the message has been
|
|
+ * successfully created, application can send the message by calling
|
|
* pj_stun_session_send_msg(). Alternatively application may use
|
|
* pj_stun_session_respond() to create and send response in one function
|
|
* call.
|
|
@@ -576,8 +633,8 @@ PJ_DECL(pj_status_t) pj_stun_session_create_res(pj_stun_session *sess,
|
|
* @param sess The STUN session instance.
|
|
* @param token Optional token which will be given back to application in
|
|
* \a on_send_msg() callback and \a on_request_complete()
|
|
- * callback, if the message is a STUN request message.
|
|
- * Internally this function will put the token in the
|
|
+ * callback, if the message is a STUN request message.
|
|
+ * Internally this function will put the token in the
|
|
* \a token field of pj_stun_tx_data, hence it will
|
|
* overwrite any value that the application puts there.
|
|
* @param cache_res If the message is a response message for an incoming
|
|
@@ -595,8 +652,8 @@ PJ_DECL(pj_status_t) pj_stun_session_create_res(pj_stun_session *sess,
|
|
* be sent.
|
|
*
|
|
* @return PJ_SUCCESS on success, or the appropriate error code.
|
|
- * This function will return PJNATH_ESTUNDESTROYED if
|
|
- * application has destroyed the session in
|
|
+ * This function will return PJNATH_ESTUNDESTROYED if
|
|
+ * application has destroyed the session in
|
|
* \a on_send_msg() callback.
|
|
*/
|
|
PJ_DECL(pj_status_t) pj_stun_session_send_msg(pj_stun_session *sess,
|
|
@@ -625,30 +682,30 @@ PJ_DECL(pj_status_t) pj_stun_session_send_msg(pj_stun_session *sess,
|
|
* be used.
|
|
* @param token Optional token which will be given back to application in
|
|
* \a on_send_msg() callback and \a on_request_complete()
|
|
- * callback, if the message is a STUN request message.
|
|
- * Internally this function will put the token in the
|
|
+ * callback, if the message is a STUN request message.
|
|
+ * Internally this function will put the token in the
|
|
* \a token field of pj_stun_tx_data, hence it will
|
|
* overwrite any value that the application puts there.
|
|
* @param cache Specify whether session should cache this response for
|
|
* future request retransmission. If TRUE, subsequent request
|
|
- * retransmission will be handled by the session and it
|
|
+ * retransmission will be handled by the session and it
|
|
* will not call request callback.
|
|
* @param dst_addr Destination address of the response (or equal to the
|
|
* source address of the original request).
|
|
* @param addr_len Address length.
|
|
*
|
|
* @return PJ_SUCCESS on success, or the appropriate error code.
|
|
- * This function will return PJNATH_ESTUNDESTROYED if
|
|
- * application has destroyed the session in
|
|
+ * This function will return PJNATH_ESTUNDESTROYED if
|
|
+ * application has destroyed the session in
|
|
* \a on_send_msg() callback.
|
|
*/
|
|
-PJ_DECL(pj_status_t) pj_stun_session_respond(pj_stun_session *sess,
|
|
+PJ_DECL(pj_status_t) pj_stun_session_respond(pj_stun_session *sess,
|
|
const pj_stun_rx_data *rdata,
|
|
- unsigned code,
|
|
+ unsigned code,
|
|
const char *err_msg,
|
|
void *token,
|
|
- pj_bool_t cache,
|
|
- const pj_sockaddr_t *dst_addr,
|
|
+ pj_bool_t cache,
|
|
+ const pj_sockaddr_t *dst_addr,
|
|
unsigned addr_len);
|
|
|
|
/**
|
|
@@ -665,8 +722,8 @@ PJ_DECL(pj_status_t) pj_stun_session_respond(pj_stun_session *sess,
|
|
* callback. This error status MUST NOT be PJ_SUCCESS.
|
|
*
|
|
* @return PJ_SUCCESS if transaction is successfully cancelled.
|
|
- * This function will return PJNATH_ESTUNDESTROYED if
|
|
- * application has destroyed the session in
|
|
+ * This function will return PJNATH_ESTUNDESTROYED if
|
|
+ * application has destroyed the session in
|
|
* \a on_request_complete() callback.
|
|
*/
|
|
PJ_DECL(pj_status_t) pj_stun_session_cancel_req(pj_stun_session *sess,
|
|
@@ -685,7 +742,7 @@ PJ_DECL(pj_status_t) pj_stun_session_cancel_req(pj_stun_session *sess,
|
|
* needs to be incremented.
|
|
*
|
|
* @return PJ_SUCCESS on success, or the appropriate error.
|
|
- * This function will return PJNATH_ESTUNDESTROYED if
|
|
+ * This function will return PJNATH_ESTUNDESTROYED if
|
|
* application has destroyed the session in \a on_send_msg()
|
|
* callback.
|
|
*/
|
|
@@ -716,8 +773,8 @@ PJ_DECL(pj_status_t) pj_stun_session_retransmit_req(pj_stun_session *sess,
|
|
* STUN message (useful if packet is received via a
|
|
* stream oriented protocol).
|
|
* @param token Optional token which will be given back to application
|
|
- * in the \a on_rx_request(), \a on_rx_indication() and
|
|
- * \a on_send_msg() callbacks. The token can be used to
|
|
+ * in the \a on_rx_request(), \a on_rx_indication() and
|
|
+ * \a on_send_msg() callbacks. The token can be used to
|
|
* associate processing or incoming request or indication
|
|
* with some context.
|
|
* @param src_addr The source address of the packet, which will also
|
|
@@ -726,7 +783,7 @@ PJ_DECL(pj_status_t) pj_stun_session_retransmit_req(pj_stun_session *sess,
|
|
* @param src_addr_len Length of the source address.
|
|
*
|
|
* @return PJ_SUCCESS on success, or the appropriate error code.
|
|
- * This function will return PJNATH_ESTUNDESTROYED if
|
|
+ * This function will return PJNATH_ESTUNDESTROYED if
|
|
* application has destroyed the session in one of the
|
|
* callback.
|
|
*/
|
|
@@ -751,6 +808,22 @@ PJ_DECL(pj_status_t) pj_stun_session_on_rx_pkt(pj_stun_session *sess,
|
|
PJ_DECL(void) pj_stun_msg_destroy_tdata(pj_stun_session *sess,
|
|
pj_stun_tx_data *tdata);
|
|
|
|
+/**
|
|
+ *
|
|
+ * @param sess The STUN session.
|
|
+ *
|
|
+ * @return The callback linked to the STUN session
|
|
+ */
|
|
+PJ_DECL(pj_stun_session_cb *) pj_stun_session_callback(pj_stun_session *sess);
|
|
+
|
|
+/**
|
|
+ *
|
|
+ * @param sess The STUN session.
|
|
+ *
|
|
+ * @return The connection type linked to the STUN session
|
|
+ */
|
|
+PJ_DECL(pj_stun_tp_type) pj_stun_session_tp_type(pj_stun_session *sess);
|
|
+
|
|
|
|
/**
|
|
* @}
|
|
diff --git a/pjnath/include/pjnath/stun_sock.h b/pjnath/include/pjnath/stun_sock.h
|
|
index a6610335e..b1601f65f 100644
|
|
--- a/pjnath/include/pjnath/stun_sock.h
|
|
+++ b/pjnath/include/pjnath/stun_sock.h
|
|
@@ -1,4 +1,4 @@
|
|
-/*
|
|
+/*
|
|
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
|
|
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
|
|
*
|
|
@@ -14,7 +14,7 @@
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#ifndef __PJNATH_STUN_SOCK_H__
|
|
#define __PJNATH_STUN_SOCK_H__
|
|
@@ -23,10 +23,14 @@
|
|
* @file stun_sock.h
|
|
* @brief STUN aware socket transport
|
|
*/
|
|
+#include <pj/activesock.h>
|
|
#include <pjnath/stun_config.h>
|
|
+#include <pjnath/stun_session.h>
|
|
#include <pjlib-util/resolver.h>
|
|
+#include <pjlib-util/srv_resolver.h>
|
|
#include <pj/ioqueue.h>
|
|
#include <pj/lock.h>
|
|
+#include <pj/pool.h>
|
|
#include <pj/sock.h>
|
|
#include <pj/sock_qos.h>
|
|
|
|
@@ -86,7 +90,17 @@ typedef enum pj_stun_sock_op
|
|
/**
|
|
* IP address change notification from the keep-alive operation.
|
|
*/
|
|
- PJ_STUN_SOCK_MAPPED_ADDR_CHANGE
|
|
+ PJ_STUN_SOCK_MAPPED_ADDR_CHANGE,
|
|
+
|
|
+ /**
|
|
+ * STUN session was destroyed.
|
|
+ */
|
|
+ PJ_STUN_SESS_DESTROYED,
|
|
+
|
|
+ /**
|
|
+ * TCP fails to connect
|
|
+ */
|
|
+ PJ_STUN_TCP_CONNECT_ERROR
|
|
|
|
|
|
} pj_stun_sock_op;
|
|
@@ -143,7 +157,7 @@ typedef struct pj_stun_sock_cb
|
|
* callback may be called for the following conditions:
|
|
* - the first time the publicly mapped address has been resolved from
|
|
* the STUN server, this callback will be called with \a op argument
|
|
- * set to PJ_STUN_SOCK_BINDING_OP \a status argument set to
|
|
+ * set to PJ_STUN_SOCK_BINDING_OP \a status argument set to
|
|
* PJ_SUCCESS.
|
|
* - anytime when the transport has detected that the publicly mapped
|
|
* address has changed, this callback will be called with \a op
|
|
@@ -152,7 +166,7 @@ typedef struct pj_stun_sock_cb
|
|
* application will get the resolved public address in the
|
|
* #pj_stun_sock_info structure.
|
|
* - for any terminal error (such as STUN time-out, DNS resolution
|
|
- * failure, or keep-alive failure), this callback will be called
|
|
+ * failure, or keep-alive failure), this callback will be called
|
|
* with the \a status argument set to non-PJ_SUCCESS.
|
|
*
|
|
* @param stun_sock The STUN transport.
|
|
@@ -166,7 +180,7 @@ typedef struct pj_stun_sock_cb
|
|
* should return PJ_TRUE to let the STUN socket operation
|
|
* continues.
|
|
*/
|
|
- pj_bool_t (*on_status)(pj_stun_sock *stun_sock,
|
|
+ pj_bool_t (*on_status)(pj_stun_sock *stun_sock,
|
|
pj_stun_sock_op op,
|
|
pj_status_t status);
|
|
|
|
@@ -196,6 +210,11 @@ typedef struct pj_stun_sock_info
|
|
*/
|
|
pj_sockaddr mapped_addr;
|
|
|
|
+ /**
|
|
+ * If connected, the remote address will be stored here.
|
|
+ */
|
|
+ pj_sockaddr outgoing_addr;
|
|
+
|
|
/**
|
|
* Number of interface address aliases. The interface address aliases
|
|
* are list of all interface addresses in this host.
|
|
@@ -207,6 +226,11 @@ typedef struct pj_stun_sock_info
|
|
*/
|
|
pj_sockaddr aliases[PJ_ICE_ST_MAX_CAND];
|
|
|
|
+ /**
|
|
+ * The tranport type of the socket
|
|
+ */
|
|
+ pj_stun_tp_type conn_type;
|
|
+
|
|
} pj_stun_sock_info;
|
|
|
|
|
|
@@ -251,6 +275,28 @@ typedef struct pj_stun_sock_cfg
|
|
*/
|
|
pj_sockaddr bound_addr;
|
|
|
|
+ /**
|
|
+ * This member holds a list of address mappings (internal/external) that
|
|
+ * the user (application) provides. These mappings are meant to be used
|
|
+ * to add server reflexive candidates that are not typically discovered
|
|
+ * by regular ICE operations. This is the case for mappings obtained
|
|
+ * through UPNP-IGD/NAT-PMP/PCP requests, or manually configured (port
|
|
+ * forward).
|
|
+ */
|
|
+ struct {
|
|
+ pj_sockaddr local_addr;
|
|
+ pj_sockaddr mapped_addr;
|
|
+ int tp_type;
|
|
+ } user_mapping[PJ_ICE_MAX_COMP];
|
|
+
|
|
+ /**
|
|
+ * Holds the actual number of allocated ports. If the feature is used,
|
|
+ * this value should match the number of components of the ICE session.
|
|
+ * The feature is disabled if this variable is set to 0.
|
|
+ * Default value is 0.
|
|
+ */
|
|
+ unsigned user_mapping_cnt;
|
|
+
|
|
/**
|
|
* Specify the port range for STUN socket binding, relative to the start
|
|
* port number specified in \a bound_addr. Note that this setting is only
|
|
@@ -262,7 +308,7 @@ typedef struct pj_stun_sock_cfg
|
|
|
|
/**
|
|
* Specify the STUN keep-alive duration, in seconds. The STUN transport
|
|
- * does keep-alive by sending STUN Binding request to the STUN server.
|
|
+ * does keep-alive by sending STUN Binding request to the STUN server.
|
|
* If this value is zero, the PJ_STUN_KEEP_ALIVE_SEC value will be used.
|
|
* If the value is negative, it will disable STUN keep-alive.
|
|
*/
|
|
@@ -341,9 +387,11 @@ PJ_DECL(void) pj_stun_sock_cfg_default(pj_stun_sock_cfg *cfg);
|
|
* things the ioqueue and timer heap instance for
|
|
* the operation of this transport.
|
|
* @param af Address family of socket. Currently pj_AF_INET()
|
|
- * and pj_AF_INET6() are supported.
|
|
+ * and pj_AF_INET6() are supported.
|
|
* @param name Optional name to be given to this transport to
|
|
* assist debugging.
|
|
+ * @param conn_type Connection type to the STUN server. Both TCP and UDP are
|
|
+ * supported.
|
|
* @param cb Callback to receive events/data from the transport.
|
|
* @param cfg Optional transport settings.
|
|
* @param user_data Arbitrary application data to be associated with
|
|
@@ -356,6 +404,7 @@ PJ_DECL(void) pj_stun_sock_cfg_default(pj_stun_sock_cfg *cfg);
|
|
PJ_DECL(pj_status_t) pj_stun_sock_create(pj_stun_config *stun_cfg,
|
|
const char *name,
|
|
int af,
|
|
+ pj_stun_tp_type conn_type,
|
|
const pj_stun_sock_cb *cb,
|
|
const pj_stun_sock_cfg *cfg,
|
|
void *user_data,
|
|
@@ -475,7 +524,7 @@ PJ_DECL(pj_status_t) pj_stun_sock_get_info(pj_stun_sock *stun_sock,
|
|
* this case the \a on_data_sent() callback will be
|
|
* called when data is actually sent. Any other return
|
|
* value indicates error condition.
|
|
- */
|
|
+ */
|
|
PJ_DECL(pj_status_t) pj_stun_sock_sendto(pj_stun_sock *stun_sock,
|
|
pj_ioqueue_op_key_t *send_key,
|
|
const void *pkt,
|
|
@@ -484,6 +533,51 @@ PJ_DECL(pj_status_t) pj_stun_sock_sendto(pj_stun_sock *stun_sock,
|
|
const pj_sockaddr_t *dst_addr,
|
|
unsigned addr_len);
|
|
|
|
+
|
|
+#if PJ_HAS_TCP
|
|
+/**
|
|
+ * Connect active socket to remote address
|
|
+ * @param stun_sock
|
|
+ * @param remote_addr the destination
|
|
+ * @param af address family
|
|
+ */
|
|
+PJ_DECL(pj_status_t) pj_stun_sock_connect_active(pj_stun_sock *stun_sock,
|
|
+ const pj_sockaddr_t *remote_addr,
|
|
+ int af);
|
|
+
|
|
+/**
|
|
+ * Connect active socket to remote address
|
|
+ * @param stun_sock
|
|
+ * @param remote_addr the destination
|
|
+ * @param af address family
|
|
+ */
|
|
+PJ_DECL(pj_status_t) pj_stun_sock_reconnect_active(pj_stun_sock *stun_sock,
|
|
+ const pj_sockaddr_t *remote_addr,
|
|
+ int af);
|
|
+
|
|
+/**
|
|
+ * Close active socket
|
|
+ * @param stun_sock
|
|
+ * @param remote_addr The remote address linked
|
|
+ */
|
|
+PJ_DECL(pj_status_t) pj_stun_sock_close(pj_stun_sock *stun_sock,
|
|
+ const pj_sockaddr_t *remote_addr);
|
|
+
|
|
+/**
|
|
+ * Close all active sockets except the one with remote_addr
|
|
+ * @param stun_sock
|
|
+ * @param remote_addr The remote address linked
|
|
+ */
|
|
+PJ_DECL(pj_status_t) pj_stun_sock_close_all_except(pj_stun_sock *stun_sock,
|
|
+ const pj_sockaddr_t *remote_addr);
|
|
+
|
|
+#endif
|
|
+
|
|
+/**
|
|
+ * Retrieve the linked session
|
|
+ * @param stun_sock
|
|
+ */
|
|
+PJ_DECL(pj_stun_session *) pj_stun_sock_get_session(pj_stun_sock *stun_sock);
|
|
/**
|
|
* @}
|
|
*/
|
|
diff --git a/pjnath/include/pjnath/turn_session.h b/pjnath/include/pjnath/turn_session.h
|
|
index 66acd9956..66a4afd1d 100644
|
|
--- a/pjnath/include/pjnath/turn_session.h
|
|
+++ b/pjnath/include/pjnath/turn_session.h
|
|
@@ -253,6 +253,35 @@ typedef struct pj_turn_session_cb
|
|
const pj_sockaddr_t *dst_addr,
|
|
unsigned addr_len);
|
|
|
|
+ /**
|
|
+ * This callback will be called by the TURN session whenever it
|
|
+ * needs to send outgoing message. Since the TURN session doesn't
|
|
+ * have a socket on its own, this callback must be implemented.
|
|
+ *
|
|
+ * The difference with on_send_pkt is that this function returns
|
|
+ * the size of the packet actually sent to predict when a busy will
|
|
+ * occurs. Indeed, activesock send the data asynchronously. When the
|
|
+ * data are actually sent, on_data_sent will be triggered.
|
|
+ *
|
|
+ * @param sess The TURN session.
|
|
+ * @param pkt The packet/data to be sent.
|
|
+ * @param pkt_len Length of the packet/data.
|
|
+ * @param dst_addr Destination address of the packet.
|
|
+ * @param addr_len Length of the destination address.
|
|
+ * @param send_size Length sent.
|
|
+ * @param original_size The length of the packet without the HEADER
|
|
+ *
|
|
+ * @return The callback should return the status of the
|
|
+ * send operation.
|
|
+ */
|
|
+ pj_status_t (*on_send_pkt2)(pj_turn_session *sess,
|
|
+ const pj_uint8_t *pkt,
|
|
+ unsigned pkt_len,
|
|
+ const pj_sockaddr_t *dst_addr,
|
|
+ unsigned addr_len,
|
|
+ unsigned* sent_size,
|
|
+ unsigned original_size);
|
|
+
|
|
/**
|
|
* This callback will be called by the TURN session whenever it
|
|
* needs to send outgoing STUN requests/messages for TURN signalling
|
|
@@ -833,6 +862,42 @@ PJ_DECL(pj_status_t) pj_turn_session_sendto(pj_turn_session *sess,
|
|
const pj_sockaddr_t *peer_addr,
|
|
unsigned addr_len);
|
|
|
|
+/**
|
|
+ * Send a data to the specified peer address via the TURN relay. This
|
|
+ * function will encapsulate the data as STUN Send Indication or TURN
|
|
+ * ChannelData packet and send the message to the TURN server. The TURN
|
|
+ * server then will send the data to the peer.
|
|
+ *
|
|
+ * The allocation (pj_turn_session_alloc()) must have been successfully
|
|
+ * created before application can relay any data.
|
|
+ *
|
|
+ * Since TURN session is transport independent, this function will
|
|
+ * ultimately call \a on_send_pkt() callback to request the application
|
|
+ * to actually send the packet containing the data to the TURN server.
|
|
+ *
|
|
+ * The difference with pj_turn_session_sendto is that this function returns
|
|
+ * the size of the packet actually sent to predict when a busy will
|
|
+ * occurs. Indeed, activesock send the data asynchronously. When the
|
|
+ * data are actually sent, on_data_sent will be triggered.
|
|
+ *
|
|
+ * @param sess The TURN client session.
|
|
+ * @param pkt The data/packet to be sent to peer.
|
|
+ * @param pkt_len Length of the data.
|
|
+ * @param peer_addr The remote peer address (the ultimate destination
|
|
+ * of the data, and not the TURN server address).
|
|
+ * @param addr_len Length of the address.
|
|
+ * @param sent The size of the packet actually sent
|
|
+ *
|
|
+ * @return PJ_SUCCESS if the operation has been successful,
|
|
+ * or the appropriate error code on failure.
|
|
+ */
|
|
+PJ_DECL(pj_status_t) pj_turn_session_sendto2(pj_turn_session *sess,
|
|
+ const pj_uint8_t *pkt,
|
|
+ unsigned pkt_len,
|
|
+ const pj_sockaddr_t *peer_addr,
|
|
+ unsigned addr_len,
|
|
+ unsigned *sent);
|
|
+
|
|
/**
|
|
* Optionally establish channel binding for the specified a peer address.
|
|
* This function will assign a unique channel number for the peer address
|
|
diff --git a/pjnath/include/pjnath/turn_sock.h b/pjnath/include/pjnath/turn_sock.h
|
|
index 2de6b4267..681ac32ba 100644
|
|
--- a/pjnath/include/pjnath/turn_sock.h
|
|
+++ b/pjnath/include/pjnath/turn_sock.h
|
|
@@ -668,6 +668,44 @@ PJ_DECL(pj_status_t) pj_turn_sock_disconnect(pj_turn_sock *turn_sock,
|
|
const pj_sockaddr_t *peer,
|
|
unsigned addr_len);
|
|
|
|
+/**
|
|
+ * Send a data to the specified peer address via the TURN relay. This
|
|
+ * function will encapsulate the data as STUN Send Indication or TURN
|
|
+ * ChannelData packet and send the message to the TURN server. The TURN
|
|
+ * server then will send the data to the peer.
|
|
+ *
|
|
+ * The allocation (pj_turn_sock_alloc()) must have been successfully
|
|
+ * created before application can relay any data.
|
|
+ *
|
|
+ * @param turn_sock The TURN transport instance.
|
|
+ * @param pkt The data/packet to be sent to peer.
|
|
+ * @param pkt_len Length of the data.
|
|
+ * @param peer_addr The remote peer address (the ultimate destination
|
|
+ * of the data, and not the TURN server address).
|
|
+ * @param addr_len Length of the address.
|
|
+ * @param sent Size actually sent.
|
|
+ *
|
|
+ * @return PJ_SUCCESS if the operation has been successful,
|
|
+ * or the appropriate error code on failure.
|
|
+ */
|
|
+PJ_DECL(pj_status_t) pj_turn_sock_sendto2(pj_turn_sock *turn_sock,
|
|
+ const pj_uint8_t *pkt,
|
|
+ unsigned pkt_len,
|
|
+ const pj_sockaddr_t *peer_addr,
|
|
+ unsigned addr_len,
|
|
+ unsigned* sent);
|
|
+
|
|
+/**
|
|
+ * Check if peer is a dataconn
|
|
+ *
|
|
+ * @param turn_sock The turn sock
|
|
+ * @param peer The peer addr to check
|
|
+ *
|
|
+ * @return true if dataconn else false
|
|
+ */
|
|
+PJ_DECL(pj_bool_t) pj_turn_sock_has_dataconn(pj_turn_sock *turn_sock,
|
|
+ const pj_sockaddr_t *peer);
|
|
+
|
|
|
|
/**
|
|
* @}
|
|
diff --git a/pjnath/src/pjnath-test/concur_test.c b/pjnath/src/pjnath-test/concur_test.c
|
|
index 54ddb7b6c..f684097a6 100644
|
|
--- a/pjnath/src/pjnath-test/concur_test.c
|
|
+++ b/pjnath/src/pjnath-test/concur_test.c
|
|
@@ -183,6 +183,7 @@ static int stun_destroy_test_session(struct stun_test_session *test_sess)
|
|
char name[10];
|
|
pj_ansi_snprintf(name, sizeof(name), "stun%02d", i);
|
|
status = pj_stun_sock_create(&test_sess->stun_cfg, name, pj_AF_INET(),
|
|
+ PJ_STUN_TP_UDP,
|
|
&stun_cb, NULL, test_sess,
|
|
&stun_sock[i]);
|
|
if (status != PJ_SUCCESS) {
|
|
diff --git a/pjnath/src/pjnath-test/sess_auth.c b/pjnath/src/pjnath-test/sess_auth.c
|
|
index f0e308bba..b67049a49 100644
|
|
--- a/pjnath/src/pjnath-test/sess_auth.c
|
|
+++ b/pjnath/src/pjnath-test/sess_auth.c
|
|
@@ -1,4 +1,4 @@
|
|
-/*
|
|
+/*
|
|
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
|
|
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
|
|
*
|
|
@@ -14,7 +14,7 @@
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#include "test.h"
|
|
|
|
@@ -81,7 +81,7 @@ static pj_status_t server_on_rx_request(pj_stun_session *sess,
|
|
PJ_UNUSED_ARG(pkt_len);
|
|
PJ_UNUSED_ARG(token);
|
|
|
|
- return pj_stun_session_respond(sess, rdata, 0, NULL, NULL, PJ_TRUE,
|
|
+ return pj_stun_session_respond(sess, rdata, 0, NULL, NULL, PJ_TRUE,
|
|
src_addr, src_addr_len);
|
|
}
|
|
|
|
@@ -106,7 +106,7 @@ static pj_status_t server_get_auth(void *user_data,
|
|
|
|
|
|
static pj_status_t server_get_password( const pj_stun_msg *msg,
|
|
- void *user_data,
|
|
+ void *user_data,
|
|
const pj_str_t *realm,
|
|
const pj_str_t *username,
|
|
pj_pool_t *pool,
|
|
@@ -172,8 +172,8 @@ static int server_thread(void *unused)
|
|
PJ_FD_ZERO(&readset);
|
|
PJ_FD_SET(server->sock, &readset);
|
|
|
|
- if (pj_sock_select((int)server->sock+1, &readset, NULL, NULL, &delay)==1
|
|
- && PJ_FD_ISSET(server->sock, &readset))
|
|
+ if (pj_sock_select((int)server->sock+1, &readset, NULL, NULL, &delay)==1
|
|
+ && PJ_FD_ISSET(server->sock, &readset))
|
|
{
|
|
char pkt[1000];
|
|
pj_ssize_t len;
|
|
@@ -195,7 +195,7 @@ static int server_thread(void *unused)
|
|
if (!server->responding)
|
|
continue;
|
|
|
|
- pj_stun_session_on_rx_pkt(server->sess, pkt, len,
|
|
+ pj_stun_session_on_rx_pkt(server->sess, pkt, len,
|
|
PJ_STUN_CHECK_PACKET | PJ_STUN_IS_DATAGRAM,
|
|
NULL, NULL, &src_addr, src_addr_len);
|
|
}
|
|
@@ -235,7 +235,7 @@ static int create_std_server(pj_stun_auth_type auth_type,
|
|
pj_stun_session_cb sess_cb;
|
|
pj_stun_auth_cred cred;
|
|
pj_status_t status;
|
|
-
|
|
+
|
|
/* Create server */
|
|
pool = pj_pool_create(mem, "server", 1000, 1000, NULL);
|
|
server = PJ_POOL_ZALLOC_T(pool, struct server);
|
|
@@ -247,7 +247,8 @@ static int create_std_server(pj_stun_auth_type auth_type,
|
|
pj_bzero(&sess_cb, sizeof(sess_cb));
|
|
sess_cb.on_rx_request = &server_on_rx_request;
|
|
sess_cb.on_send_msg = &server_send_msg;
|
|
- status = pj_stun_session_create(&stun_cfg, "server", &sess_cb, PJ_FALSE, NULL, &server->sess);
|
|
+ status = pj_stun_session_create(&stun_cfg, "server", &sess_cb, PJ_FALSE,
|
|
+ NULL, &server->sess, PJ_STUN_TP_UDP);
|
|
if (status != PJ_SUCCESS) {
|
|
destroy_server();
|
|
return -10;
|
|
@@ -294,7 +295,7 @@ static int create_std_server(pj_stun_auth_type auth_type,
|
|
* 'no route to host' error, so let's just hardcode to [::1]
|
|
*/
|
|
pj_sockaddr_init(pj_AF_INET6(), &addr, NULL, 0);
|
|
- addr.ipv6.sin6_addr.s6_addr[15] = 1;
|
|
+ addr.ipv6.sin6_addr.s6_addr[15] = 1;
|
|
} else {
|
|
status = pj_gethostip(GET_AF(use_ipv6), &addr);
|
|
if (status != PJ_SUCCESS) {
|
|
@@ -394,8 +395,8 @@ static int client_thread(void *unused)
|
|
PJ_FD_ZERO(&readset);
|
|
PJ_FD_SET(client->sock, &readset);
|
|
|
|
- if (pj_sock_select((int)client->sock+1, &readset, NULL, NULL, &delay)==1
|
|
- && PJ_FD_ISSET(client->sock, &readset))
|
|
+ if (pj_sock_select((int)client->sock+1, &readset, NULL, NULL, &delay)==1
|
|
+ && PJ_FD_ISSET(client->sock, &readset))
|
|
{
|
|
char pkt[1000];
|
|
pj_ssize_t len;
|
|
@@ -417,11 +418,11 @@ static int client_thread(void *unused)
|
|
if (!client->responding)
|
|
continue;
|
|
|
|
- pj_stun_session_on_rx_pkt(client->sess, pkt, len,
|
|
+ pj_stun_session_on_rx_pkt(client->sess, pkt, len,
|
|
PJ_STUN_CHECK_PACKET | PJ_STUN_IS_DATAGRAM,
|
|
NULL, NULL, &src_addr, src_addr_len);
|
|
}
|
|
-
|
|
+
|
|
}
|
|
|
|
return 0;
|
|
@@ -465,7 +466,7 @@ static int run_client_test(const char *title,
|
|
pj_status_t expected_code,
|
|
const char *expected_realm,
|
|
const char *expected_nonce,
|
|
-
|
|
+
|
|
int (*more_check)(void))
|
|
{
|
|
pj_pool_t *pool;
|
|
@@ -475,7 +476,7 @@ static int run_client_test(const char *title,
|
|
pj_status_t status;
|
|
pj_sockaddr addr;
|
|
int rc = 0;
|
|
-
|
|
+
|
|
PJ_LOG(3,(THIS_FILE, " %s test (%s)", title, use_ipv6?"IPv6":"IPv4"));
|
|
|
|
/* Create client */
|
|
@@ -488,7 +489,8 @@ static int run_client_test(const char *title,
|
|
pj_bzero(&sess_cb, sizeof(sess_cb));
|
|
sess_cb.on_request_complete = &client_on_request_complete;
|
|
sess_cb.on_send_msg = &client_send_msg;
|
|
- status = pj_stun_session_create(&stun_cfg, "client", &sess_cb, PJ_FALSE, NULL, &client->sess);
|
|
+ status = pj_stun_session_create(&stun_cfg, "client", &sess_cb, PJ_FALSE,
|
|
+ NULL, &client->sess, PJ_STUN_TP_UDP);
|
|
if (status != PJ_SUCCESS) {
|
|
destroy_client_server();
|
|
return -200;
|
|
@@ -545,7 +547,7 @@ static int run_client_test(const char *title,
|
|
}
|
|
|
|
/* Create request */
|
|
- status = pj_stun_session_create_req(client->sess, PJ_STUN_BINDING_REQUEST,
|
|
+ status = pj_stun_session_create_req(client->sess, PJ_STUN_BINDING_REQUEST,
|
|
PJ_STUN_MAGIC, NULL, &tdata);
|
|
if (status != PJ_SUCCESS) {
|
|
destroy_client_server();
|
|
@@ -570,11 +572,11 @@ static int run_client_test(const char *title,
|
|
pj_stun_msgint_attr_create(tdata->pool, &mi);
|
|
pj_stun_msg_add_attr(tdata->msg, &mi->hdr);
|
|
}
|
|
-
|
|
+
|
|
}
|
|
|
|
/* Send the request */
|
|
- status = pj_stun_session_send_msg(client->sess, NULL, PJ_FALSE, PJ_TRUE, &server->addr,
|
|
+ status = pj_stun_session_send_msg(client->sess, NULL, PJ_FALSE, (pj_stun_session_tp_type(client->sess) == PJ_STUN_TP_UDP), &server->addr,
|
|
pj_sockaddr_get_len(&server->addr), tdata);
|
|
if (status != PJ_SUCCESS) {
|
|
destroy_client_server();
|
|
@@ -596,7 +598,7 @@ static int run_client_test(const char *title,
|
|
PJ_LOG(3,(THIS_FILE, " err: expecting %d (%s) but got %d (%s) response",
|
|
expected_code, e1, client->response_status, e2));
|
|
rc = -500;
|
|
- }
|
|
+ }
|
|
|
|
} else {
|
|
int res_code = 0;
|
|
@@ -604,17 +606,17 @@ static int run_client_test(const char *title,
|
|
pj_stun_nonce_attr *anonce;
|
|
|
|
if (client->response_status != 0) {
|
|
- PJ_LOG(3,(THIS_FILE, " err: expecting successful operation but got error %d",
|
|
+ PJ_LOG(3,(THIS_FILE, " err: expecting successful operation but got error %d",
|
|
client->response_status));
|
|
rc = -600;
|
|
goto done;
|
|
- }
|
|
+ }
|
|
|
|
if (PJ_STUN_IS_ERROR_RESPONSE(client->response->hdr.type)) {
|
|
pj_stun_errcode_attr *aerr = NULL;
|
|
|
|
aerr = (pj_stun_errcode_attr*)
|
|
- pj_stun_msg_find_attr(client->response,
|
|
+ pj_stun_msg_find_attr(client->response,
|
|
PJ_STUN_ATTR_ERROR_CODE, 0);
|
|
if (aerr == NULL) {
|
|
PJ_LOG(3,(THIS_FILE, " err: received error response without ERROR-CODE"));
|
|
@@ -747,8 +749,8 @@ static int long_term_check1(void)
|
|
|
|
static int long_term_check2(void)
|
|
{
|
|
- /* response SHOULD NOT include a USERNAME, NONCE, REALM or
|
|
- * MESSAGE-INTEGRITY attribute.
|
|
+ /* response SHOULD NOT include a USERNAME, NONCE, REALM or
|
|
+ * MESSAGE-INTEGRITY attribute.
|
|
*/
|
|
if (pj_stun_msg_find_attr(client->response, PJ_STUN_ATTR_USERNAME, 0))
|
|
return -900;
|
|
@@ -851,7 +853,7 @@ int sess_auth_test(void)
|
|
}
|
|
|
|
/* If the USERNAME does not contain a username value currently valid
|
|
- * within the server: If the message is a request, the server MUST
|
|
+ * within the server: If the message is a request, the server MUST
|
|
* reject the request with an error response. This response MUST use
|
|
* an error code of 401 (Unauthorized).
|
|
*/
|
|
@@ -1083,7 +1085,7 @@ int sess_auth_test(void)
|
|
* MUST include a NONCE and REALM attribute and SHOULD NOT incude the
|
|
* USERNAME or MESSAGE-INTEGRITY attribute. Servers can invalidate
|
|
* nonces in order to provide additional security. See Section 4.3
|
|
- * of [RFC2617] for guidelines.
|
|
+ * of [RFC2617] for guidelines.
|
|
*/
|
|
// how??
|
|
|
|
diff --git a/pjnath/src/pjnath-test/stun_sock_test.c b/pjnath/src/pjnath-test/stun_sock_test.c
|
|
index f44988aee..76bcb241e 100644
|
|
--- a/pjnath/src/pjnath-test/stun_sock_test.c
|
|
+++ b/pjnath/src/pjnath-test/stun_sock_test.c
|
|
@@ -1,4 +1,4 @@
|
|
-/*
|
|
+/*
|
|
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
|
|
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
|
|
*
|
|
@@ -14,7 +14,7 @@
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#include "test.h"
|
|
|
|
@@ -65,9 +65,9 @@ static pj_bool_t srv_on_data_recvfrom(pj_activesock_t *asock,
|
|
pj_stun_msg *req_msg, *res_msg;
|
|
|
|
pool = pj_pool_create(mem, "stunsrv", 512, 512, NULL);
|
|
-
|
|
+
|
|
/* Parse request */
|
|
- status = pj_stun_msg_decode(pool, (pj_uint8_t*)data, size,
|
|
+ status = pj_stun_msg_decode(pool, (pj_uint8_t*)data, size,
|
|
PJ_STUN_IS_DATAGRAM | PJ_STUN_CHECK_PACKET,
|
|
&req_msg, NULL, NULL);
|
|
if (status != PJ_SUCCESS) {
|
|
@@ -89,8 +89,8 @@ static pj_bool_t srv_on_data_recvfrom(pj_activesock_t *asock,
|
|
if (srv->flag & WITH_MAPPED) {
|
|
pj_sockaddr addr;
|
|
pj_bool_t use_ipv6 = (srv->addr.addr.sa_family == pj_AF_INET6());
|
|
-
|
|
- pj_sockaddr_init(GET_AF(use_ipv6), &addr, &srv->ip_to_send,
|
|
+
|
|
+ pj_sockaddr_init(GET_AF(use_ipv6), &addr, &srv->ip_to_send,
|
|
srv->port_to_send);
|
|
|
|
pj_stun_msg_add_sockaddr_attr(pool, res_msg, PJ_STUN_ATTR_MAPPED_ADDR,
|
|
@@ -98,17 +98,17 @@ static pj_bool_t srv_on_data_recvfrom(pj_activesock_t *asock,
|
|
} else if (srv->flag & WITH_XOR_MAPPED) {
|
|
pj_sockaddr addr;
|
|
pj_bool_t use_ipv6 = (srv->addr.addr.sa_family == pj_AF_INET6());
|
|
-
|
|
- pj_sockaddr_init(GET_AF(use_ipv6), &addr, &srv->ip_to_send,
|
|
+
|
|
+ pj_sockaddr_init(GET_AF(use_ipv6), &addr, &srv->ip_to_send,
|
|
srv->port_to_send);
|
|
|
|
- pj_stun_msg_add_sockaddr_attr(pool, res_msg,
|
|
+ pj_stun_msg_add_sockaddr_attr(pool, res_msg,
|
|
PJ_STUN_ATTR_XOR_MAPPED_ADDR,
|
|
PJ_TRUE, &addr, sizeof(addr));
|
|
}
|
|
|
|
/* Encode */
|
|
- status = pj_stun_msg_encode(res_msg, (pj_uint8_t*)data, 100, 0,
|
|
+ status = pj_stun_msg_encode(res_msg, (pj_uint8_t*)data, 100, 0,
|
|
NULL, &size);
|
|
if (status != PJ_SUCCESS) {
|
|
app_perror(" pj_stun_msg_encode()", status);
|
|
@@ -118,7 +118,7 @@ static pj_bool_t srv_on_data_recvfrom(pj_activesock_t *asock,
|
|
|
|
/* Send back */
|
|
sent = size;
|
|
- pj_activesock_sendto(asock, &srv->send_key, data, &sent, 0,
|
|
+ pj_activesock_sendto(asock, &srv->send_key, data, &sent, 0,
|
|
src_addr, addr_len);
|
|
|
|
pj_pool_release(pool);
|
|
@@ -126,7 +126,7 @@ static pj_bool_t srv_on_data_recvfrom(pj_activesock_t *asock,
|
|
} else if (srv->flag & ECHO) {
|
|
/* Send back */
|
|
sent = size;
|
|
- pj_activesock_sendto(asock, &srv->send_key, data, &sent, 0,
|
|
+ pj_activesock_sendto(asock, &srv->send_key, data, &sent, 0,
|
|
src_addr, addr_len);
|
|
|
|
}
|
|
@@ -156,7 +156,7 @@ static pj_status_t create_server(pj_pool_t *pool,
|
|
pj_bzero(&activesock_cb, sizeof(activesock_cb));
|
|
activesock_cb.on_data_recvfrom = &srv_on_data_recvfrom;
|
|
status = pj_activesock_create_udp(pool, &srv->addr, NULL, ioqueue,
|
|
- &activesock_cb, srv, &srv->asock,
|
|
+ &activesock_cb, srv, &srv->asock,
|
|
&srv->addr);
|
|
if (status != PJ_SUCCESS)
|
|
return status;
|
|
@@ -194,7 +194,7 @@ struct stun_client
|
|
unsigned on_rx_data_cnt;
|
|
};
|
|
|
|
-static pj_bool_t stun_sock_on_status(pj_stun_sock *stun_sock,
|
|
+static pj_bool_t stun_sock_on_status(pj_stun_sock *stun_sock,
|
|
pj_stun_sock_op op,
|
|
pj_status_t status)
|
|
{
|
|
@@ -253,7 +253,7 @@ static pj_status_t create_client(pj_stun_config *cfg,
|
|
pj_bzero(&cb, sizeof(cb));
|
|
cb.on_status = &stun_sock_on_status;
|
|
cb.on_rx_data = &stun_sock_on_rx_data;
|
|
- status = pj_stun_sock_create(cfg, NULL, GET_AF(use_ipv6), &cb, &sock_cfg,
|
|
+ status = pj_stun_sock_create(cfg, NULL, GET_AF(use_ipv6), PJ_STUN_TP_UDP, &cb, &sock_cfg,
|
|
client, &client->sock);
|
|
if (status != PJ_SUCCESS) {
|
|
app_perror(" pj_stun_sock_create()", status);
|
|
@@ -298,7 +298,7 @@ static void handle_events(pj_stun_config *cfg, unsigned msec_delay)
|
|
/*
|
|
* Timeout test: scenario when no response is received from server
|
|
*/
|
|
-static int timeout_test(pj_stun_config *cfg, pj_bool_t destroy_on_err,
|
|
+static int timeout_test(pj_stun_config *cfg, pj_bool_t destroy_on_err,
|
|
pj_bool_t use_ipv6)
|
|
{
|
|
struct stun_srv *srv;
|
|
@@ -308,7 +308,7 @@ static int timeout_test(pj_stun_config *cfg, pj_bool_t destroy_on_err,
|
|
int i, ret = 0;
|
|
pj_status_t status;
|
|
|
|
- PJ_LOG(3,(THIS_FILE, " timeout test [%d] - (%s)", destroy_on_err,
|
|
+ PJ_LOG(3,(THIS_FILE, " timeout test [%d] - (%s)", destroy_on_err,
|
|
(use_ipv6)?"IPv6":"IPv4"));
|
|
|
|
status = create_client(cfg, &client, destroy_on_err, use_ipv6);
|
|
@@ -323,7 +323,7 @@ static int timeout_test(pj_stun_config *cfg, pj_bool_t destroy_on_err,
|
|
|
|
srv_addr = (use_ipv6)?pj_str("::1"):pj_str("127.0.0.1");
|
|
|
|
- status = pj_stun_sock_start(client->sock, &srv_addr,
|
|
+ status = pj_stun_sock_start(client->sock, &srv_addr,
|
|
pj_sockaddr_get_port(&srv->addr), NULL);
|
|
if (status != PJ_SUCCESS) {
|
|
destroy_server(srv);
|
|
@@ -382,7 +382,7 @@ on_return:
|
|
* Invalid response scenario: when server returns no MAPPED-ADDRESS or
|
|
* XOR-MAPPED-ADDRESS attribute.
|
|
*/
|
|
-static int missing_attr_test(pj_stun_config *cfg, pj_bool_t destroy_on_err,
|
|
+static int missing_attr_test(pj_stun_config *cfg, pj_bool_t destroy_on_err,
|
|
pj_bool_t use_ipv6)
|
|
{
|
|
struct stun_srv *srv;
|
|
@@ -392,14 +392,14 @@ static int missing_attr_test(pj_stun_config *cfg, pj_bool_t destroy_on_err,
|
|
int i, ret = 0;
|
|
pj_status_t status;
|
|
|
|
- PJ_LOG(3,(THIS_FILE, " missing attribute test [%d] - (%s)",
|
|
+ PJ_LOG(3,(THIS_FILE, " missing attribute test [%d] - (%s)",
|
|
destroy_on_err, (use_ipv6)?"IPv6":"IPv4"));
|
|
|
|
status = create_client(cfg, &client, destroy_on_err, use_ipv6);
|
|
if (status != PJ_SUCCESS)
|
|
return -110;
|
|
|
|
- status = create_server(client->pool, cfg->ioqueue, RESPOND_STUN, use_ipv6,
|
|
+ status = create_server(client->pool, cfg->ioqueue, RESPOND_STUN, use_ipv6,
|
|
&srv);
|
|
if (status != PJ_SUCCESS) {
|
|
destroy_client(client);
|
|
@@ -407,8 +407,8 @@ static int missing_attr_test(pj_stun_config *cfg, pj_bool_t destroy_on_err,
|
|
}
|
|
srv_addr = (use_ipv6)?pj_str("::1"):pj_str("127.0.0.1");
|
|
|
|
- status = pj_stun_sock_start(client->sock, &srv_addr,
|
|
- pj_sockaddr_get_port(&srv->addr), NULL);
|
|
+ status = pj_stun_sock_start(client->sock, &srv_addr,
|
|
+ pj_sockaddr_get_port(&srv->addr), NULL);
|
|
if (status != PJ_SUCCESS) {
|
|
destroy_server(srv);
|
|
destroy_client(client);
|
|
@@ -467,14 +467,14 @@ static int keep_alive_test(pj_stun_config *cfg, pj_bool_t use_ipv6)
|
|
int i, ret = 0;
|
|
pj_status_t status;
|
|
|
|
- PJ_LOG(3,(THIS_FILE, " normal operation - (%s)",
|
|
+ PJ_LOG(3,(THIS_FILE, " normal operation - (%s)",
|
|
(use_ipv6)?"IPv6":"IPv4"));
|
|
|
|
status = create_client(cfg, &client, PJ_TRUE, use_ipv6);
|
|
if (status != PJ_SUCCESS)
|
|
return -310;
|
|
|
|
- status = create_server(client->pool, cfg->ioqueue, RESPOND_STUN|WITH_XOR_MAPPED,
|
|
+ status = create_server(client->pool, cfg->ioqueue, RESPOND_STUN|WITH_XOR_MAPPED,
|
|
use_ipv6, &srv);
|
|
if (status != PJ_SUCCESS) {
|
|
destroy_client(client);
|
|
@@ -488,7 +488,7 @@ static int keep_alive_test(pj_stun_config *cfg, pj_bool_t use_ipv6)
|
|
|
|
srv_addr = (use_ipv6)?pj_str("::1"):pj_str("127.0.0.1");
|
|
|
|
- status = pj_stun_sock_start(client->sock, &srv_addr,
|
|
+ status = pj_stun_sock_start(client->sock, &srv_addr,
|
|
pj_sockaddr_get_port(&srv->addr), NULL);
|
|
if (status != PJ_SUCCESS) {
|
|
destroy_server(srv);
|
|
@@ -545,7 +545,7 @@ static int keep_alive_test(pj_stun_config *cfg, pj_bool_t use_ipv6)
|
|
goto on_return;
|
|
}
|
|
/* verify the mapped address */
|
|
- pj_sockaddr_init(GET_AF(use_ipv6), &mapped_addr,
|
|
+ pj_sockaddr_init(GET_AF(use_ipv6), &mapped_addr,
|
|
&srv->ip_to_send, srv->port_to_send);
|
|
if (pj_sockaddr_cmp(&info.mapped_addr, &mapped_addr) != 0) {
|
|
PJ_LOG(3,(THIS_FILE, " error: mapped address mismatched"));
|
|
@@ -583,7 +583,7 @@ static int keep_alive_test(pj_stun_config *cfg, pj_bool_t use_ipv6)
|
|
PJ_LOG(3,(THIS_FILE, " sending to %s", pj_sockaddr_print(&info.srv_addr, txt, sizeof(txt), 3)));
|
|
}
|
|
status = pj_stun_sock_sendto(client->sock, NULL, &ret, sizeof(ret),
|
|
- 0, &info.srv_addr,
|
|
+ 0, &info.srv_addr,
|
|
pj_sockaddr_get_len(&info.srv_addr));
|
|
if (status != PJ_SUCCESS && status != PJ_EPENDING) {
|
|
app_perror(" error: server sending data", status);
|
|
@@ -683,7 +683,7 @@ static int keep_alive_test(pj_stun_config *cfg, pj_bool_t use_ipv6)
|
|
srv->flag = RESPOND_STUN | WITH_XOR_MAPPED;
|
|
|
|
/* Change mapped address in the response */
|
|
- srv->ip_to_send = (use_ipv6)?pj_str("2002:202:202::"):pj_str("2.2.2.2");
|
|
+ srv->ip_to_send = (use_ipv6)?pj_str("2002:202:202::"):pj_str("2.2.2.2");
|
|
srv->port_to_send++;
|
|
|
|
/* Reset server */
|
|
@@ -754,7 +754,7 @@ static int keep_alive_test(pj_stun_config *cfg, pj_bool_t use_ipv6)
|
|
goto on_return;
|
|
}
|
|
/* verify the mapped address */
|
|
- pj_sockaddr_init(GET_AF(use_ipv6), &mapped_addr,
|
|
+ pj_sockaddr_init(GET_AF(use_ipv6), &mapped_addr,
|
|
&srv->ip_to_send, srv->port_to_send);
|
|
if (pj_sockaddr_cmp(&info.mapped_addr, &mapped_addr) != 0) {
|
|
PJ_LOG(3,(THIS_FILE, " error: mapped address mismatched"));
|
|
@@ -779,7 +779,7 @@ static int keep_alive_test(pj_stun_config *cfg, pj_bool_t use_ipv6)
|
|
* Part 5: Failed keep-alive
|
|
*/
|
|
PJ_LOG(3,(THIS_FILE, " failed keep-alive scenario"));
|
|
-
|
|
+
|
|
/* Change server operation mode to respond without attribute */
|
|
srv->flag = RESPOND_STUN;
|
|
|
|
@@ -864,7 +864,7 @@ int stun_sock_test(void)
|
|
ret = -8;
|
|
goto on_return;
|
|
}
|
|
-
|
|
+
|
|
pj_stun_config_init(&stun_cfg, mem, 0, ioqueue, timer_heap);
|
|
|
|
DO_TEST(timeout_test(&stun_cfg, PJ_FALSE, USE_IPV6));
|
|
diff --git a/pjnath/src/pjnath/ice_session.c b/pjnath/src/pjnath/ice_session.c
|
|
index 8afe4d181..d0cbb0ce5 100644
|
|
--- a/pjnath/src/pjnath/ice_session.c
|
|
+++ b/pjnath/src/pjnath/ice_session.c
|
|
@@ -1,4 +1,4 @@
|
|
-/*
|
|
+/*
|
|
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
|
|
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
|
|
*
|
|
@@ -14,9 +14,10 @@
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#include <pjnath/ice_session.h>
|
|
+#include <pjnath/stun_session.h>
|
|
#include <pj/addr_resolv.h>
|
|
#include <pj/array.h>
|
|
#include <pj/assert.h>
|
|
@@ -28,6 +29,22 @@
|
|
#include <pj/rand.h>
|
|
#include <pj/string.h>
|
|
|
|
+#if defined(_WIN32) || defined(__APPLE__)
|
|
+/* TODO(sblin): find an alternative for these paltforms */
|
|
+#else
|
|
+/* The following headers are used to get DEPRECATED addresses
|
|
+ * as specified in RFC 2462 Section 5.5.4
|
|
+ * https://tools.ietf.org/html/rfc2462#section-5.5.4
|
|
+ */
|
|
+#include <arpa/inet.h>
|
|
+#include <asm/types.h>
|
|
+#include <linux/netlink.h>
|
|
+#include <linux/rtnetlink.h>
|
|
+#include <sys/socket.h>
|
|
+#include <unistd.h>
|
|
+#include <strings.h>
|
|
+#endif
|
|
+
|
|
/* String names for candidate types */
|
|
static const char *cand_type_names[] =
|
|
{
|
|
@@ -40,10 +57,13 @@ static const char *cand_type_names[] =
|
|
|
|
/* String names for pj_ice_sess_check_state */
|
|
#if PJ_LOG_MAX_LEVEL >= 4
|
|
-static const char *check_state_name[] =
|
|
+static const char *check_state_name[] =
|
|
{
|
|
"Frozen",
|
|
+ "Needs Retry",
|
|
+ "Needs First Packet",
|
|
"Waiting",
|
|
+ "Pending",
|
|
"In Progress",
|
|
"Succeeded",
|
|
"Failed"
|
|
@@ -57,7 +77,7 @@ static const char *clist_state_name[] =
|
|
};
|
|
#endif /* PJ_LOG_MAX_LEVEL >= 4 */
|
|
|
|
-static const char *role_names[] =
|
|
+static const char *role_names[] =
|
|
{
|
|
"Unknown",
|
|
"Controlled",
|
|
@@ -68,13 +88,15 @@ enum timer_type
|
|
{
|
|
TIMER_NONE, /**< Timer not active */
|
|
TIMER_COMPLETION_CALLBACK, /**< Call on_ice_complete() callback */
|
|
- TIMER_CONTROLLED_WAIT_NOM, /**< Controlled agent is waiting for
|
|
+ TIMER_CONTROLLING_TCP_PASSIVE_TIMEOUT, /** < Controlling agent is waiting for passive TCP connection timeout **/
|
|
+ TIMER_CONTROLLED_WAIT_NOM, /**< Controlled agent is waiting for
|
|
controlling agent to send connectivity
|
|
check with nominated flag after it has
|
|
valid check for every components. */
|
|
TIMER_START_NOMINATED_CHECK,/**< Controlling agent start connectivity
|
|
checks with USE-CANDIDATE flag. */
|
|
- TIMER_KEEP_ALIVE /**< ICE keep-alive timer. */
|
|
+ TIMER_KEEP_ALIVE, /**< ICE keep-alive timer. */
|
|
+ TIMER_CONNECTION_TIMEOUT
|
|
|
|
};
|
|
|
|
@@ -122,6 +144,8 @@ typedef struct timer_data
|
|
{
|
|
pj_ice_sess *ice;
|
|
pj_ice_sess_checklist *clist;
|
|
+ /* TODO (remove), for now, needed for the NEEDS_FIRST_PACKET state */
|
|
+ unsigned first_packet_counter;
|
|
} timer_data;
|
|
|
|
|
|
@@ -132,15 +156,16 @@ typedef struct timer_data
|
|
|
|
/* Forward declarations */
|
|
static void on_timer(pj_timer_heap_t *th, pj_timer_entry *te);
|
|
+static void on_tcp_connect_timeout(pj_ice_sess *ice);
|
|
static void on_ice_complete(pj_ice_sess *ice, pj_status_t status);
|
|
static void ice_keep_alive(pj_ice_sess *ice, pj_bool_t send_now);
|
|
static void ice_on_destroy(void *obj);
|
|
static void destroy_ice(pj_ice_sess *ice,
|
|
pj_status_t reason);
|
|
-static pj_status_t start_periodic_check(pj_timer_heap_t *th,
|
|
+static pj_status_t start_periodic_check(pj_timer_heap_t *th,
|
|
pj_timer_entry *te);
|
|
static void start_nominated_check(pj_ice_sess *ice);
|
|
-static void periodic_timer(pj_timer_heap_t *th,
|
|
+static void periodic_timer(pj_timer_heap_t *th,
|
|
pj_timer_entry *te);
|
|
static void handle_incoming_check(pj_ice_sess *ice,
|
|
const pj_ice_rx_check *rcheck);
|
|
@@ -190,7 +215,7 @@ static pj_status_t stun_auth_get_cred(const pj_stun_msg *msg,
|
|
pj_stun_passwd_type *data_type,
|
|
pj_str_t *data);
|
|
static pj_status_t stun_auth_get_password(const pj_stun_msg *msg,
|
|
- void *user_data,
|
|
+ void *user_data,
|
|
const pj_str_t *realm,
|
|
const pj_str_t *username,
|
|
pj_pool_t *pool,
|
|
@@ -289,10 +314,11 @@ static pj_status_t init_comp(pj_ice_sess *ice,
|
|
sess_cb.on_send_msg = &on_stun_send_msg;
|
|
|
|
/* Create STUN session for this candidate */
|
|
- status = pj_stun_session_create(&ice->stun_cfg, NULL,
|
|
+ status = pj_stun_session_create(&ice->stun_cfg, NULL,
|
|
&sess_cb, PJ_TRUE,
|
|
ice->grp_lock,
|
|
- &comp->stun_sess);
|
|
+ &comp->stun_sess,
|
|
+ PJ_STUN_TP_UDP);
|
|
if (status != PJ_SUCCESS)
|
|
return status;
|
|
|
|
@@ -322,9 +348,10 @@ PJ_DEF(void) pj_ice_sess_options_default(pj_ice_sess_options *opt)
|
|
{
|
|
opt->aggressive = PJ_TRUE;
|
|
opt->nominated_check_delay = PJ_ICE_NOMINATED_CHECK_DELAY;
|
|
- opt->controlled_agent_want_nom_timeout =
|
|
+ opt->controlled_agent_want_nom_timeout =
|
|
ICE_CONTROLLED_AGENT_WAIT_NOMINATION_TIMEOUT;
|
|
opt->trickle = PJ_ICE_SESS_TRICKLE_DISABLED;
|
|
+ opt->agent_passive_timeout = ICE_CONTROLLING_PASSIVE_TIMEOUT;
|
|
}
|
|
|
|
/*
|
|
@@ -350,7 +377,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_create(pj_stun_config *stun_cfg,
|
|
if (name == NULL)
|
|
name = "icess%p";
|
|
|
|
- pool = pj_pool_create(stun_cfg->pf, name, PJNATH_POOL_LEN_ICE_SESS,
|
|
+ pool = pj_pool_create(stun_cfg->pf, name, PJNATH_POOL_LEN_ICE_SESS,
|
|
PJNATH_POOL_INC_ICE_SESS, NULL);
|
|
ice = PJ_POOL_ZALLOC_T(pool, pj_ice_sess);
|
|
ice->pool = pool;
|
|
@@ -361,6 +388,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_create(pj_stun_config *stun_cfg,
|
|
pj_ice_sess_options_default(&ice->opt);
|
|
|
|
pj_timer_entry_init(&ice->timer, TIMER_NONE, (void*)ice, &on_timer);
|
|
+ pj_timer_entry_init(&ice->timer_connect, TIMER_NONE, (void*)ice, &on_timer);
|
|
|
|
pj_ansi_snprintf(ice->obj_name, sizeof(ice->obj_name),
|
|
name, ice);
|
|
@@ -425,7 +453,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_create(pj_stun_config *stun_cfg,
|
|
/* Done */
|
|
*p_ice = ice;
|
|
|
|
- LOG4((ice->obj_name,
|
|
+ LOG4((ice->obj_name,
|
|
"ICE session created, comp_cnt=%d, role is %s agent",
|
|
comp_cnt, role_names[ice->role]));
|
|
|
|
@@ -507,6 +535,9 @@ static void destroy_ice(pj_ice_sess *ice,
|
|
pj_timer_heap_cancel_if_active(ice->stun_cfg.timer_heap,
|
|
&ice->timer, PJ_FALSE);
|
|
|
|
+ pj_timer_heap_cancel_if_active(ice->stun_cfg.timer_heap,
|
|
+ &ice->timer_connect, TIMER_NONE);
|
|
+
|
|
for (i=0; i<ice->comp_cnt; ++i) {
|
|
if (ice->comp[i].stun_sess) {
|
|
pj_stun_session_destroy(ice->comp[i].stun_sess);
|
|
@@ -551,7 +582,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_detach_grp_lock(pj_ice_sess *ice,
|
|
|
|
|
|
/*
|
|
- * Change session role.
|
|
+ * Change session role.
|
|
*/
|
|
PJ_DEF(pj_status_t) pj_ice_sess_change_role(pj_ice_sess *ice,
|
|
pj_ice_sess_role new_role)
|
|
@@ -651,7 +682,7 @@ static pj_status_t stun_auth_get_cred(const pj_stun_msg *msg,
|
|
|
|
/* Get password to be used to authenticate incoming message */
|
|
static pj_status_t stun_auth_get_password(const pj_stun_msg *msg,
|
|
- void *user_data,
|
|
+ void *user_data,
|
|
const pj_str_t *realm,
|
|
const pj_str_t *username,
|
|
pj_pool_t *pool,
|
|
@@ -680,8 +711,8 @@ static pj_status_t stun_auth_get_password(const pj_stun_msg *msg,
|
|
/* The agent MUST accept a credential if the username consists
|
|
* of two values separated by a colon, where the first value is
|
|
* equal to the username fragment generated by the agent in an offer
|
|
- * or answer for a session in-progress, and the MESSAGE-INTEGRITY
|
|
- * is the output of a hash of the password and the STUN packet's
|
|
+ * or answer for a session in-progress, and the MESSAGE-INTEGRITY
|
|
+ * is the output of a hash of the password and the STUN packet's
|
|
* contents.
|
|
*/
|
|
const char *pos;
|
|
@@ -712,7 +743,7 @@ static pj_uint32_t CALC_CAND_PRIO(pj_ice_sess *ice,
|
|
pj_uint32_t comp_id)
|
|
{
|
|
#if PJNATH_ICE_PRIO_STD
|
|
- return ((ice->prefs[type] & 0xFF) << 24) +
|
|
+ return ((ice->prefs[type] & 0xFF) << 24) +
|
|
((local_pref & 0xFFFF) << 8) +
|
|
(((256 - comp_id) & 0xFF) << 0);
|
|
#else
|
|
@@ -728,12 +759,151 @@ static pj_uint32_t CALC_CAND_PRIO(pj_ice_sess *ice,
|
|
max_comp = (2<<PJ_ICE_COMP_BITS),
|
|
};
|
|
|
|
- return ((ice->prefs[type] & type_mask) << type_shift) +
|
|
+ return ((ice->prefs[type] & type_mask) << type_shift) +
|
|
((local_pref & local_mask) << local_shift) +
|
|
(((max_comp - comp_id) & comp_mask) << comp_shift);
|
|
#endif
|
|
}
|
|
|
|
+/* retrieve invalid addresses and store it in a string */
|
|
+static PJ_DEF(void) get_invalid_addresses(char** addresses, size_t* size)
|
|
+{
|
|
+#if defined(_WIN32) || defined(__APPLE__)
|
|
+ // PJ_TODO("sblin: find alternative for WIN32 and APPLE");
|
|
+#else
|
|
+ struct {
|
|
+ struct nlmsghdr nlmsg_info;
|
|
+ struct ifaddrmsg ifaddrmsg_info;
|
|
+ } netlink_req;
|
|
+
|
|
+ int fd;
|
|
+
|
|
+ long pagesize = sysconf(_SC_PAGESIZE);
|
|
+
|
|
+ if (!pagesize)
|
|
+ pagesize = 4096; /* Assume pagesize is 4096 if sysconf() failed */
|
|
+
|
|
+ fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
|
|
+ if(fd < 0) {
|
|
+ perror("socket initialization error: abort");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ int rtn;
|
|
+
|
|
+ bzero(&netlink_req, sizeof(netlink_req));
|
|
+
|
|
+ netlink_req.nlmsg_info.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifaddrmsg));
|
|
+ netlink_req.nlmsg_info.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
|
|
+ netlink_req.nlmsg_info.nlmsg_type = RTM_GETADDR;
|
|
+ netlink_req.nlmsg_info.nlmsg_pid = getpid();
|
|
+
|
|
+ netlink_req.ifaddrmsg_info.ifa_family = AF_INET6;
|
|
+
|
|
+ rtn = send(fd, &netlink_req, netlink_req.nlmsg_info.nlmsg_len, 0);
|
|
+ if(rtn < 0) {
|
|
+ perror("send error: abort");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ char read_buffer[pagesize];
|
|
+ struct nlmsghdr *nlmsg_ptr;
|
|
+ int nlmsg_len;
|
|
+
|
|
+ size_t idx = 0;
|
|
+ /* Will store all deprecated addresses into a string */
|
|
+ char* deprecatedAddrs = malloc(256*sizeof(char)*PJ_INET6_ADDRSTRLEN);
|
|
+ if (!deprecatedAddrs) {
|
|
+ perror("malloc error: abort");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ while(1) {
|
|
+ int rtn;
|
|
+
|
|
+ bzero(read_buffer, pagesize);
|
|
+ rtn = recv(fd, read_buffer, pagesize, 0);
|
|
+ if(rtn < 0) {
|
|
+ perror ("recv(): ");
|
|
+ free(deprecatedAddrs);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ nlmsg_ptr = (struct nlmsghdr *) read_buffer;
|
|
+ nlmsg_len = rtn;
|
|
+
|
|
+ if (nlmsg_len < sizeof (struct nlmsghdr)) {
|
|
+ perror ("Received an incomplete netlink packet");
|
|
+ free(deprecatedAddrs);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ for(; NLMSG_OK(nlmsg_ptr, nlmsg_len);
|
|
+ nlmsg_ptr = NLMSG_NEXT(nlmsg_ptr, nlmsg_len))
|
|
+ {
|
|
+ if (nlmsg_ptr->nlmsg_type == NLMSG_DONE)
|
|
+ goto nlmsg_done;
|
|
+
|
|
+ struct ifaddrmsg *ifaddrmsg_ptr;
|
|
+ struct rtattr *rtattr_ptr;
|
|
+ int ifaddrmsg_len;
|
|
+
|
|
+ ifaddrmsg_ptr = (struct ifaddrmsg *) NLMSG_DATA(nlmsg_ptr);
|
|
+
|
|
+ if (ifaddrmsg_ptr->ifa_flags & IFA_F_DEPRECATED ||
|
|
+ ifaddrmsg_ptr->ifa_flags & IFA_F_TENTATIVE)
|
|
+ {
|
|
+ rtattr_ptr = (struct rtattr *) IFA_RTA(ifaddrmsg_ptr);
|
|
+ ifaddrmsg_len = IFA_PAYLOAD(nlmsg_ptr);
|
|
+
|
|
+ for(;RTA_OK(rtattr_ptr, ifaddrmsg_len);
|
|
+ rtattr_ptr = RTA_NEXT(rtattr_ptr, ifaddrmsg_len))
|
|
+ {
|
|
+ switch(rtattr_ptr->rta_type) {
|
|
+ case IFA_ADDRESS:
|
|
+ /* Any 256 obsolete ips (should not happen), resize the array. */
|
|
+ if (idx > 0 && idx % 256 == 0) {
|
|
+ char* newDeprecated = realloc(deprecatedAddrs,
|
|
+ (idx + 256)*sizeof(char)*PJ_INET6_ADDRSTRLEN);
|
|
+ if (newDeprecated == NULL) {
|
|
+ perror("realloc error: abort");
|
|
+ free(deprecatedAddrs);
|
|
+ return;
|
|
+ }
|
|
+ deprecatedAddrs = newDeprecated;
|
|
+ }
|
|
+ /* Store deprecated IP */
|
|
+ inet_ntop(ifaddrmsg_ptr->ifa_family,
|
|
+ RTA_DATA(rtattr_ptr),
|
|
+ &deprecatedAddrs[idx*PJ_INET6_ADDRSTRLEN],
|
|
+ sizeof(char)*PJ_INET6_ADDRSTRLEN);
|
|
+ ++idx;
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+nlmsg_done:
|
|
+ close(fd);
|
|
+ *size = idx;
|
|
+ if (idx > 0) {
|
|
+ char *final = realloc(deprecatedAddrs,
|
|
+ idx*sizeof(char)*PJ_INET6_ADDRSTRLEN);
|
|
+ if (final) {
|
|
+ *addresses = final;
|
|
+ } else {
|
|
+ perror("realloc error: abort");
|
|
+ free(deprecatedAddrs);
|
|
+ }
|
|
+ } else {
|
|
+ free(deprecatedAddrs);
|
|
+ }
|
|
+#endif
|
|
+}
|
|
|
|
/*
|
|
* Add ICE candidate
|
|
@@ -748,14 +918,38 @@ PJ_DEF(pj_status_t) pj_ice_sess_add_cand(pj_ice_sess *ice,
|
|
const pj_sockaddr_t *base_addr,
|
|
const pj_sockaddr_t *rel_addr,
|
|
int addr_len,
|
|
- unsigned *p_cand_id)
|
|
+ unsigned *p_cand_id,
|
|
+ pj_ice_cand_transport transport)
|
|
{
|
|
+ /**
|
|
+ * RFC 2466: an ip address can have the status DEPRECATED and SHOULD NOT
|
|
+ * be used by new by applications unless they already use it.
|
|
+ * So, we should ignore these addresses.
|
|
+ * Also, ips with the TENTATIVE state are not ready and SHOULD NOT be
|
|
+ * used for now. Ignore these addresses too.
|
|
+ */
|
|
+ char* deprecatedAddrs = NULL;
|
|
+ size_t size = 0;
|
|
+ get_invalid_addresses(&deprecatedAddrs, &size);
|
|
+ if (deprecatedAddrs != NULL) {
|
|
+ char tmpAddrStr[PJ_INET6_ADDRSTRLEN];
|
|
+ pj_sockaddr_print(addr, tmpAddrStr, sizeof(tmpAddrStr), 0);
|
|
+ for (int i = 0; i<size*PJ_INET6_ADDRSTRLEN; i+=PJ_INET6_ADDRSTRLEN) {
|
|
+ if (!strncmp(tmpAddrStr, &deprecatedAddrs[i], PJ_INET6_ADDRSTRLEN)) {
|
|
+ free(deprecatedAddrs);
|
|
+ /* This address is considered as deprecated ignore it. */
|
|
+ return PJ_SUCCESS;
|
|
+ }
|
|
+ }
|
|
+ free(deprecatedAddrs);
|
|
+ }
|
|
+
|
|
pj_ice_sess_cand *lcand;
|
|
pj_status_t status = PJ_SUCCESS;
|
|
char address[PJ_INET6_ADDRSTRLEN];
|
|
unsigned i;
|
|
|
|
- PJ_ASSERT_RETURN(ice && comp_id &&
|
|
+ PJ_ASSERT_RETURN(ice && comp_id &&
|
|
foundation && addr && base_addr && addr_len,
|
|
PJ_EINVAL);
|
|
PJ_ASSERT_RETURN(comp_id <= ice->comp_cnt, PJ_EINVAL);
|
|
@@ -794,6 +988,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_add_cand(pj_ice_sess *ice,
|
|
lcand->comp_id = (pj_uint8_t)comp_id;
|
|
lcand->transport_id = (pj_uint8_t)transport_id;
|
|
lcand->type = type;
|
|
+ lcand->transport = transport;
|
|
pj_strdup(ice->pool, &lcand->foundation, foundation);
|
|
lcand->local_pref = local_pref;
|
|
lcand->prio = CALC_CAND_PRIO(ice, type, local_pref, lcand->comp_id);
|
|
@@ -821,15 +1016,15 @@ PJ_DEF(pj_status_t) pj_ice_sess_add_cand(pj_ice_sess *ice,
|
|
pj_ansi_strxcpy(ice->tmp.txt, pj_sockaddr_print(&lcand->addr, address,
|
|
sizeof(address), 2),
|
|
sizeof(ice->tmp.txt));
|
|
- LOG4((ice->obj_name,
|
|
+ LOG4((ice->obj_name,
|
|
"Candidate %d added: comp_id=%d, type=%s, foundation=%.*s, "
|
|
"addr=%s:%d, base=%s:%d, prio=0x%x (%u)",
|
|
lcand->id,
|
|
- lcand->comp_id,
|
|
+ lcand->comp_id,
|
|
cand_type_names[lcand->type],
|
|
(int)lcand->foundation.slen,
|
|
lcand->foundation.ptr,
|
|
- ice->tmp.txt,
|
|
+ ice->tmp.txt,
|
|
pj_sockaddr_get_port(&lcand->addr),
|
|
pj_sockaddr_print(&lcand->base_addr, address, sizeof(address), 2),
|
|
pj_sockaddr_get_port(&lcand->base_addr),
|
|
@@ -863,7 +1058,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_find_default_cand(pj_ice_sess *ice,
|
|
/* First find in valid list if we have nominated pair */
|
|
for (i=0; i<ice->valid_list.count; ++i) {
|
|
pj_ice_sess_check *check = &ice->valid_list.checks[i];
|
|
-
|
|
+
|
|
if (check->lcand->comp_id == comp_id) {
|
|
*cand_id = GET_LCAND_ID(check->lcand);
|
|
pj_grp_lock_release(ice->grp_lock);
|
|
@@ -875,7 +1070,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_find_default_cand(pj_ice_sess *ice,
|
|
for (i=0; i<ice->lcand_cnt; ++i) {
|
|
pj_ice_sess_cand *lcand = &ice->lcand[i];
|
|
if (lcand->comp_id==comp_id &&
|
|
- lcand->type == PJ_ICE_CAND_TYPE_RELAYED)
|
|
+ lcand->type == PJ_ICE_CAND_TYPE_RELAYED)
|
|
{
|
|
*cand_id = GET_LCAND_ID(lcand);
|
|
pj_grp_lock_release(ice->grp_lock);
|
|
@@ -888,7 +1083,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_find_default_cand(pj_ice_sess *ice,
|
|
pj_ice_sess_cand *lcand = &ice->lcand[i];
|
|
if (lcand->comp_id==comp_id &&
|
|
(lcand->type == PJ_ICE_CAND_TYPE_SRFLX ||
|
|
- lcand->type == PJ_ICE_CAND_TYPE_PRFLX))
|
|
+ lcand->type == PJ_ICE_CAND_TYPE_PRFLX))
|
|
{
|
|
*cand_id = GET_LCAND_ID(lcand);
|
|
pj_grp_lock_release(ice->grp_lock);
|
|
@@ -900,7 +1095,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_find_default_cand(pj_ice_sess *ice,
|
|
for (i=0; i<ice->lcand_cnt; ++i) {
|
|
pj_ice_sess_cand *lcand = &ice->lcand[i];
|
|
if (lcand->comp_id==comp_id &&
|
|
- lcand->type == PJ_ICE_CAND_TYPE_HOST)
|
|
+ lcand->type == PJ_ICE_CAND_TYPE_HOST)
|
|
{
|
|
*cand_id = GET_LCAND_ID(lcand);
|
|
pj_grp_lock_release(ice->grp_lock);
|
|
@@ -924,7 +1119,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_find_default_cand(pj_ice_sess *ice,
|
|
# define MAX(a,b) (a > b ? a : b)
|
|
#endif
|
|
|
|
-static pj_timestamp CALC_CHECK_PRIO(const pj_ice_sess *ice,
|
|
+static pj_timestamp CALC_CHECK_PRIO(const pj_ice_sess *ice,
|
|
const pj_ice_sess_cand *lcand,
|
|
const pj_ice_sess_cand *rcand)
|
|
{
|
|
@@ -936,7 +1131,7 @@ static pj_timestamp CALC_CHECK_PRIO(const pj_ice_sess *ice,
|
|
*/
|
|
|
|
if (ice->role == PJ_ICE_SESS_ROLE_CONTROLLING) {
|
|
- O = lcand->prio;
|
|
+ O = lcand->prio;
|
|
A = rcand->prio;
|
|
} else {
|
|
O = rcand->prio;
|
|
@@ -1013,7 +1208,7 @@ static const char *dump_check(char *buffer, unsigned bufsize,
|
|
return buffer;
|
|
}
|
|
|
|
-static void dump_checklist(const char *title, pj_ice_sess *ice,
|
|
+static void dump_checklist(const char *title, pj_ice_sess *ice,
|
|
const pj_ice_sess_checklist *clist)
|
|
{
|
|
unsigned i;
|
|
@@ -1023,7 +1218,7 @@ static void dump_checklist(const char *title, pj_ice_sess *ice,
|
|
const pj_ice_sess_check *c = &clist->checks[i];
|
|
LOG4((ice->obj_name, " %s (%s, state=%s)",
|
|
dump_check(ice->tmp.txt, sizeof(ice->tmp.txt), clist, c),
|
|
- (c->nominated ? "nominated" : "not nominated"),
|
|
+ (c->nominated ? "nominated" : "not nominated"),
|
|
check_state_name[c->state]));
|
|
}
|
|
}
|
|
@@ -1033,9 +1228,12 @@ static void dump_checklist(const char *title, pj_ice_sess *ice,
|
|
#endif
|
|
|
|
static void check_set_state(pj_ice_sess *ice, pj_ice_sess_check *check,
|
|
- pj_ice_sess_check_state st,
|
|
+ pj_ice_sess_check_state st,
|
|
pj_status_t err_code)
|
|
{
|
|
+ if (check->state >= PJ_ICE_SESS_CHECK_STATE_SUCCEEDED)
|
|
+ return;
|
|
+
|
|
LOG5((ice->obj_name, "Check %s: state changed from %s to %s",
|
|
dump_check(ice->tmp.txt, sizeof(ice->tmp.txt), &ice->clist, check),
|
|
check_state_name[check->state],
|
|
@@ -1102,9 +1300,9 @@ static void sort_checklist(pj_ice_sess *ice, pj_ice_sess_checklist *clist)
|
|
unsigned k;
|
|
|
|
pj_memcpy(&tmp, &clist->checks[i], sizeof(pj_ice_sess_check));
|
|
- pj_memcpy(&clist->checks[i], &clist->checks[highest],
|
|
+ pj_memcpy(&clist->checks[i], &clist->checks[highest],
|
|
sizeof(pj_ice_sess_check));
|
|
- pj_memcpy(&clist->checks[highest], &tmp,
|
|
+ pj_memcpy(&clist->checks[highest], &tmp,
|
|
sizeof(pj_ice_sess_check));
|
|
|
|
/* Update valid and nominated check pointers, since we're moving
|
|
@@ -1138,7 +1336,7 @@ static void remove_check(pj_ice_sess *ice, pj_ice_sess_checklist *clist,
|
|
/* Prune checklist, this must have been done after the checklist
|
|
* is sorted.
|
|
*/
|
|
-static pj_status_t prune_checklist(pj_ice_sess *ice,
|
|
+static pj_status_t prune_checklist(pj_ice_sess *ice,
|
|
pj_ice_sess_checklist *clist)
|
|
{
|
|
unsigned i;
|
|
@@ -1151,7 +1349,7 @@ static pj_status_t prune_checklist(pj_ice_sess *ice,
|
|
* the list. This is done by removing a pair if its local and remote
|
|
* candidates are identical to the local and remote candidates of a pair
|
|
* higher up on the priority list. The result is a sequence of ordered
|
|
- * candidate pairs, called the check list for that media stream.
|
|
+ * candidate pairs, called the check list for that media stream.
|
|
*/
|
|
/* First replace SRFLX candidates with their base */
|
|
for (i=0; i<clist->count; ++i) {
|
|
@@ -1178,7 +1376,7 @@ static pj_status_t prune_checklist(pj_ice_sess *ice,
|
|
if (j==ice->lcand_cnt) {
|
|
char baddr[PJ_INET6_ADDRSTRLEN];
|
|
/* Host candidate not found this this srflx! */
|
|
- LOG4((ice->obj_name,
|
|
+ LOG4((ice->obj_name,
|
|
"Base candidate %s:%d not found for srflx candidate %d",
|
|
pj_sockaddr_print(&srflx->base_addr, baddr,
|
|
sizeof(baddr), 2),
|
|
@@ -1187,6 +1385,15 @@ static pj_status_t prune_checklist(pj_ice_sess *ice,
|
|
return PJNATH_EICENOHOSTCAND;
|
|
}
|
|
}
|
|
+
|
|
+ /* Section 6.2, RFC 6544 (https://tools.ietf.org/html/rfc6544)
|
|
+ * When the agent prunes the check list, it MUST also remove any pair
|
|
+ * for which the local candidate is a passive TCP candidate
|
|
+ */
|
|
+ if (clist->checks[i].lcand->transport == PJ_CAND_TCP_PASSIVE) {
|
|
+ remove_check(ice, clist, i, "local passive TCP");
|
|
+ i--;
|
|
+ }
|
|
}
|
|
|
|
/* Next remove a pair if its local and remote candidates are identical
|
|
@@ -1218,8 +1425,8 @@ static pj_status_t prune_checklist(pj_ice_sess *ice,
|
|
if ((licand == ljcand) && (ricand == rjcand)) {
|
|
reason = "duplicate found";
|
|
} else if ((rjcand == ricand) &&
|
|
- (pj_sockaddr_cmp(&ljcand->base_addr,
|
|
- &licand->base_addr)==0))
|
|
+ (pj_sockaddr_cmp(&ljcand->base_addr,
|
|
+ &licand->base_addr)==0))
|
|
{
|
|
reason = "equal base";
|
|
}
|
|
@@ -1256,8 +1463,13 @@ static void on_timer(pj_timer_heap_t *th, pj_timer_entry *te)
|
|
}
|
|
|
|
switch (type) {
|
|
+ case TIMER_CONTROLLING_TCP_PASSIVE_TIMEOUT:
|
|
+ LOG4((ice->obj_name,
|
|
+ "Controlling agent timed-out while waiting for incoming TCP checks. Set state to failed!"));
|
|
+ on_ice_complete(ice, PJNATH_EICEFAILED);
|
|
+ break;
|
|
case TIMER_CONTROLLED_WAIT_NOM:
|
|
- LOG4((ice->obj_name,
|
|
+ LOG4((ice->obj_name,
|
|
"Controlled agent timed-out in waiting for the controlling "
|
|
"agent to send nominated check. Setting state to fail now.."));
|
|
on_ice_complete(ice, PJNATH_EICENOMTIMEOUT);
|
|
@@ -1289,6 +1501,9 @@ static void on_timer(pj_timer_heap_t *th, pj_timer_entry *te)
|
|
case TIMER_KEEP_ALIVE:
|
|
ice_keep_alive(ice, PJ_TRUE);
|
|
break;
|
|
+ case TIMER_CONNECTION_TIMEOUT:
|
|
+ on_tcp_connect_timeout(ice);
|
|
+ break;
|
|
case TIMER_NONE:
|
|
/* Nothing to do, just to get rid of gcc warning */
|
|
break;
|
|
@@ -1315,7 +1530,7 @@ static void ice_keep_alive(pj_ice_sess *ice, pj_bool_t send_now)
|
|
the_check = comp->nominated_check;
|
|
|
|
/* Create the Binding Indication */
|
|
- status = pj_stun_session_create_ind(comp->stun_sess,
|
|
+ status = pj_stun_session_create_ind(comp->stun_sess,
|
|
PJ_STUN_BINDING_INDICATION,
|
|
&tdata);
|
|
if (status != PJ_SUCCESS)
|
|
@@ -1335,9 +1550,15 @@ static void ice_keep_alive(pj_ice_sess *ice, pj_bool_t send_now)
|
|
/* Send to session */
|
|
addr_len = pj_sockaddr_get_len(&the_check->rcand->addr);
|
|
status = pj_stun_session_send_msg(comp->stun_sess, msg_data,
|
|
- PJ_FALSE, PJ_FALSE,
|
|
- &the_check->rcand->addr,
|
|
+ PJ_FALSE, PJ_FALSE,
|
|
+ &the_check->rcand->addr,
|
|
addr_len, tdata);
|
|
+ if (status != PJ_SUCCESS && status != PJ_EPENDING && status != PJ_EBUSY) {
|
|
+ if (ice->cb.on_ice_destroy) {
|
|
+ ice->cb.on_ice_destroy(ice);
|
|
+ }
|
|
+ return;
|
|
+ }
|
|
|
|
/* Restore FINGERPRINT usage */
|
|
pj_stun_session_use_fingerprint(comp->stun_sess, saved);
|
|
@@ -1349,8 +1570,8 @@ done:
|
|
if (ice->timer.id == TIMER_NONE) {
|
|
pj_time_val delay = { 0, 0 };
|
|
|
|
- delay.msec = (PJ_ICE_SESS_KEEP_ALIVE_MIN +
|
|
- (pj_rand() % PJ_ICE_SESS_KEEP_ALIVE_MAX_RAND)) * 1000 /
|
|
+ delay.msec = (PJ_ICE_SESS_KEEP_ALIVE_MIN +
|
|
+ (pj_rand() % PJ_ICE_SESS_KEEP_ALIVE_MAX_RAND)) * 1000 /
|
|
ice->comp_cnt;
|
|
pj_time_val_normalize(&delay);
|
|
|
|
@@ -1370,13 +1591,13 @@ static void on_ice_complete(pj_ice_sess *ice, pj_status_t status)
|
|
if (!ice->is_complete) {
|
|
ice->is_complete = PJ_TRUE;
|
|
ice->ice_status = status;
|
|
-
|
|
+
|
|
pj_timer_heap_cancel_if_active(ice->stun_cfg.timer_heap, &ice->timer,
|
|
TIMER_NONE);
|
|
|
|
/* Log message */
|
|
- LOG4((ice->obj_name, "ICE process complete, status=%s",
|
|
- pj_strerror(status, ice->tmp.errmsg,
|
|
+ LOG4((ice->obj_name, "ICE process complete, status=%s",
|
|
+ pj_strerror(status, ice->tmp.errmsg,
|
|
sizeof(ice->tmp.errmsg)).ptr));
|
|
|
|
dump_checklist("Valid list", ice, &ice->valid_list);
|
|
@@ -1394,7 +1615,7 @@ static void on_ice_complete(pj_ice_sess *ice, pj_status_t status)
|
|
}
|
|
|
|
/* Update valid check and nominated check for the candidate */
|
|
-static void update_comp_check(pj_ice_sess *ice, unsigned comp_id,
|
|
+static void update_comp_check(pj_ice_sess *ice, unsigned comp_id,
|
|
pj_ice_sess_check *check)
|
|
{
|
|
pj_ice_sess_comp *comp;
|
|
@@ -1443,18 +1664,18 @@ static pj_bool_t check_ice_complete(pj_ice_sess *ice)
|
|
pj_bool_t no_pending_check = PJ_FALSE;
|
|
|
|
/* Still in 8.2. Updating States
|
|
- *
|
|
+ *
|
|
* o Once there is at least one nominated pair in the valid list for
|
|
* every component of at least one media stream and the state of the
|
|
* check list is Running:
|
|
- *
|
|
+ *
|
|
* * The agent MUST change the state of processing for its check
|
|
* list for that media stream to Completed.
|
|
- *
|
|
+ *
|
|
* * The agent MUST continue to respond to any checks it may still
|
|
* receive for that media stream, and MUST perform triggered
|
|
* checks if required by the processing of Section 7.2.
|
|
- *
|
|
+ *
|
|
* * The agent MAY begin transmitting media for this media stream as
|
|
* described in Section 11.1
|
|
*/
|
|
@@ -1474,28 +1695,28 @@ static pj_bool_t check_ice_complete(pj_ice_sess *ice)
|
|
|
|
/* Note: this is the stuffs that we don't do in 7.1.2.2.2, since our
|
|
* ICE session only supports one media stream for now:
|
|
- *
|
|
+ *
|
|
* 7.1.2.2.2. Updating Pair States
|
|
*
|
|
* 2. If there is a pair in the valid list for every component of this
|
|
* media stream (where this is the actual number of components being
|
|
* used, in cases where the number of components signaled in the SDP
|
|
* differs from offerer to answerer), the success of this check may
|
|
- * unfreeze checks for other media streams.
|
|
+ * unfreeze checks for other media streams.
|
|
*/
|
|
|
|
/* 7.1.2.3. Check List and Timer State Updates
|
|
* Regardless of whether the check was successful or failed, the
|
|
* completion of the transaction may require updating of check list and
|
|
* timer states.
|
|
- *
|
|
+ *
|
|
* If all of the pairs in the check list are now either in the Failed or
|
|
* Succeeded state, and there is not a pair in the valid list for each
|
|
* component of the media stream, the state of the check list is set to
|
|
- * Failed.
|
|
+ * Failed.
|
|
*/
|
|
|
|
- /*
|
|
+ /*
|
|
* See if all checks in the checklist have completed. If we do,
|
|
* then mark ICE processing as failed.
|
|
*/
|
|
@@ -1508,14 +1729,64 @@ static pj_bool_t check_ice_complete(pj_ice_sess *ice)
|
|
}
|
|
no_pending_check = (i == ice->clist.count);
|
|
}
|
|
+#if PJ_HAS_TCP
|
|
+ pj_bool_t hasTCP = PJ_FALSE;
|
|
+#endif
|
|
+ for (i=0; i<ice->clist.count; ++i) {
|
|
+ pj_ice_sess_check *c = &ice->clist.checks[i];
|
|
+
|
|
+#if PJ_HAS_TCP
|
|
+ if (c && c->lcand &&
|
|
+ (
|
|
+ c->lcand->transport == PJ_CAND_TCP_ACTIVE
|
|
+ )) {
|
|
+ hasTCP = PJ_TRUE;
|
|
+ }
|
|
+#endif
|
|
+ }
|
|
|
|
if (no_pending_check) {
|
|
+#if PJ_HAS_TCP
|
|
+ if (hasTCP) {
|
|
+ // STUN server procedure https://tools.ietf.org/html/rfc6544#section-7.2
|
|
+ // An ICE TCP agent, full or lite, MUST be prepared to receive incoming
|
|
+ // TCP connection requests on the base of any TCP candidate that is
|
|
+ // simultaneous-open or passive. When the connection request is
|
|
+ // received, the agent MUST accept it.
|
|
+ // https://tools.ietf.org/html/rfc5245#section-2.6
|
|
+ // In that case, allowing ICE to run a little longer might produce
|
|
+ // better results.
|
|
+ if (ice->timer.id == TIMER_NONE &&
|
|
+ ice->opt.agent_passive_timeout >= 0)
|
|
+ {
|
|
+ pj_time_val delay;
|
|
+
|
|
+ delay.sec = 0;
|
|
+ delay.msec = ice->opt.agent_passive_timeout;
|
|
+ pj_time_val_normalize(&delay);
|
|
+
|
|
+ pj_timer_heap_schedule_w_grp_lock(
|
|
+ ice->stun_cfg.timer_heap,
|
|
+ &ice->timer, &delay,
|
|
+ TIMER_CONTROLLING_TCP_PASSIVE_TIMEOUT,
|
|
+ ice->grp_lock);
|
|
+
|
|
+ LOG5((ice->obj_name,
|
|
+ "All checks have completed but failed. Just "
|
|
+ "wait for passive connections to timeout "
|
|
+ "(timeout=%d msec)",
|
|
+ ice->opt.agent_passive_timeout));
|
|
+ return PJ_FALSE;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
/* All checks have completed, but we don't have nominated pair.
|
|
- * If agent's role is controlled, check if all components have
|
|
- * valid pair. If it does, this means the controlled agent has
|
|
- * finished the check list and it's waiting for controlling
|
|
- * agent to send checks with USE-CANDIDATE flag set.
|
|
- */
|
|
+ * If agent's role is controlled, check if all components have
|
|
+ * valid pair. If it does, this means the controlled agent has
|
|
+ * finished the check list and it's waiting for controlling
|
|
+ * agent to send checks with USE-CANDIDATE flag set.
|
|
+ */
|
|
if (ice->role == PJ_ICE_SESS_ROLE_CONTROLLED) {
|
|
for (i=0; i < ice->comp_cnt; ++i) {
|
|
if (ice->comp[i].valid_check == NULL)
|
|
@@ -1524,16 +1795,16 @@ static pj_bool_t check_ice_complete(pj_ice_sess *ice)
|
|
|
|
if (i < ice->comp_cnt) {
|
|
/* This component ID doesn't have valid pair.
|
|
- * Mark ICE as failed.
|
|
- */
|
|
+ * Mark ICE as failed.
|
|
+ */
|
|
on_ice_complete(ice, PJNATH_EICEFAILED);
|
|
return PJ_TRUE;
|
|
} else {
|
|
/* All components have a valid pair.
|
|
- * We should wait until we receive nominated checks.
|
|
- */
|
|
+ * We should wait until we receive nominated checks.
|
|
+ */
|
|
if (ice->timer.id == TIMER_NONE &&
|
|
- ice->opt.controlled_agent_want_nom_timeout >= 0)
|
|
+ ice->opt.controlled_agent_want_nom_timeout >= 0)
|
|
{
|
|
pj_time_val delay;
|
|
|
|
@@ -1547,11 +1818,11 @@ static pj_bool_t check_ice_complete(pj_ice_sess *ice)
|
|
TIMER_CONTROLLED_WAIT_NOM,
|
|
ice->grp_lock);
|
|
|
|
- LOG5((ice->obj_name,
|
|
- "All checks have completed. Controlled agent now "
|
|
- "waits for nomination from controlling agent "
|
|
- "(timeout=%d msec)",
|
|
- ice->opt.controlled_agent_want_nom_timeout));
|
|
+ LOG5((ice->obj_name,
|
|
+ "All checks have completed. Controlled agent now "
|
|
+ "waits for nomination from controlling agent "
|
|
+ "(timeout=%d msec)",
|
|
+ ice->opt.controlled_agent_want_nom_timeout));
|
|
}
|
|
return PJ_FALSE;
|
|
}
|
|
@@ -1560,17 +1831,16 @@ static pj_bool_t check_ice_complete(pj_ice_sess *ice)
|
|
|
|
} else if (ice->is_nominating) {
|
|
/* We are controlling agent and all checks have completed but
|
|
- * there's at least one component without nominated pair (or
|
|
- * more likely we don't have any nominated pairs at all).
|
|
- */
|
|
+ * there's at least one component without nominated pair (or
|
|
+ * more likely we don't have any nominated pairs at all).
|
|
+ */
|
|
on_ice_complete(ice, PJNATH_EICEFAILED);
|
|
return PJ_TRUE;
|
|
-
|
|
} else {
|
|
/* We are controlling agent and all checks have completed. If
|
|
- * we have valid list for every component, then move on to
|
|
- * sending nominated check, otherwise we have failed.
|
|
- */
|
|
+ * we have valid list for every component, then move on to
|
|
+ * sending nominated check, otherwise we have failed.
|
|
+ */
|
|
for (i=0; i<ice->comp_cnt; ++i) {
|
|
if (ice->comp[i].valid_check == NULL)
|
|
break;
|
|
@@ -1578,17 +1848,17 @@ static pj_bool_t check_ice_complete(pj_ice_sess *ice)
|
|
|
|
if (i < ice->comp_cnt) {
|
|
/* At least one component doesn't have a valid check. Mark
|
|
- * ICE as failed.
|
|
- */
|
|
+ * ICE as failed.
|
|
+ */
|
|
on_ice_complete(ice, PJNATH_EICEFAILED);
|
|
return PJ_TRUE;
|
|
}
|
|
|
|
- /* Now it's time to send connectivity check with nomination
|
|
- * flag set.
|
|
- */
|
|
- LOG4((ice->obj_name,
|
|
- "All checks have completed, starting nominated checks now"));
|
|
+ /* Now it's time to send connectivity check with nomination
|
|
+ * flag set.
|
|
+ */
|
|
+ LOG4((ice->obj_name,
|
|
+ "All checks have completed, starting nominated checks now"));
|
|
start_nominated_check(ice);
|
|
return PJ_FALSE;
|
|
}
|
|
@@ -1602,7 +1872,7 @@ static pj_bool_t check_ice_complete(pj_ice_sess *ice)
|
|
if (/*check->err_code == PJ_SUCCESS && */
|
|
ice->role==PJ_ICE_SESS_ROLE_CONTROLLING &&
|
|
!ice->is_nominating &&
|
|
- ice->timer.id == TIMER_NONE)
|
|
+ ice->timer.id == TIMER_NONE)
|
|
{
|
|
pj_time_val delay;
|
|
|
|
@@ -1618,7 +1888,7 @@ static pj_bool_t check_ice_complete(pj_ice_sess *ice)
|
|
return PJ_FALSE;
|
|
}
|
|
|
|
- LOG4((ice->obj_name,
|
|
+ LOG4((ice->obj_name,
|
|
"Scheduling nominated check in %d ms",
|
|
ice->opt.nominated_check_delay));
|
|
|
|
@@ -1655,12 +1925,12 @@ static pj_bool_t on_check_complete(pj_ice_sess *ice,
|
|
comp = find_comp(ice, check->lcand->comp_id);
|
|
|
|
/* 7.1.2.2.2. Updating Pair States
|
|
- *
|
|
+ *
|
|
* The agent sets the state of the pair that generated the check to
|
|
* Succeeded. The success of this check might also cause the state of
|
|
* other checks to change as well. The agent MUST perform the following
|
|
* two steps:
|
|
- *
|
|
+ *
|
|
* 1. The agent changes the states for all other Frozen pairs for the
|
|
* same media stream and same foundation to Waiting. Typically
|
|
* these other pairs will have different component IDs but not
|
|
@@ -1692,7 +1962,7 @@ static pj_bool_t on_check_complete(pj_ice_sess *ice,
|
|
}
|
|
|
|
/* 8.2. Updating States
|
|
- *
|
|
+ *
|
|
* For both controlling and controlled agents, the state of ICE
|
|
* processing depends on the presence of nominated candidate pairs in
|
|
* the valid list and on the state of the check list:
|
|
@@ -1723,10 +1993,10 @@ static pj_bool_t on_check_complete(pj_ice_sess *ice,
|
|
if (c->state < PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS) {
|
|
|
|
/* Just fail Frozen/Waiting check */
|
|
- LOG5((ice->obj_name,
|
|
+ LOG5((ice->obj_name,
|
|
"Check %s to be failed because state is %s",
|
|
- dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
|
|
- &ice->clist, c),
|
|
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
|
|
+ &ice->clist, c),
|
|
check_state_name[c->state]));
|
|
check_set_state(ice, c, PJ_ICE_SESS_CHECK_STATE_FAILED,
|
|
PJ_ECANCELLED);
|
|
@@ -1737,11 +2007,11 @@ static pj_bool_t on_check_complete(pj_ice_sess *ice,
|
|
|
|
/* State is IN_PROGRESS, cancel transaction */
|
|
if (c->tdata) {
|
|
- LOG5((ice->obj_name,
|
|
+ LOG5((ice->obj_name,
|
|
"Cancelling check %s (In Progress)",
|
|
- dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
|
|
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
|
|
&ice->clist, c)));
|
|
- pj_stun_session_cancel_req(comp->stun_sess,
|
|
+ pj_stun_session_cancel_req(comp->stun_sess,
|
|
c->tdata, PJ_FALSE, 0);
|
|
c->tdata = NULL;
|
|
check_set_state(ice, c, PJ_ICE_SESS_CHECK_STATE_FAILED,
|
|
@@ -1755,6 +2025,44 @@ static pj_bool_t on_check_complete(pj_ice_sess *ice,
|
|
return check_ice_complete(ice);
|
|
}
|
|
|
|
+static void on_tcp_connect_timeout(pj_ice_sess* ice)
|
|
+{
|
|
+ pj_timer_heap_cancel_if_active(ice->stun_cfg.timer_heap,&ice->timer_connect,
|
|
+ TIMER_NONE);
|
|
+
|
|
+ pj_bool_t first_found = PJ_FALSE, set_timer = PJ_FALSE;
|
|
+
|
|
+ for (int i = 0; i<ice->clist.count && !set_timer; ++i) {
|
|
+ pj_ice_sess_check *check = &ice->clist.checks[i];
|
|
+ if (check->state == PJ_ICE_SESS_CHECK_STATE_PENDING) {
|
|
+ if (first_found) {
|
|
+ set_timer = PJ_TRUE;
|
|
+ } else {
|
|
+ first_found = PJ_TRUE;
|
|
+ if (*ice->cb.close_tcp_connection)
|
|
+ (*ice->cb.close_tcp_connection)(ice, i);
|
|
+
|
|
+ check_set_state(ice, check,
|
|
+ PJ_ICE_SESS_CHECK_STATE_FAILED, PJ_ECANCELLED);
|
|
+ on_check_complete(ice, check);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (set_timer && ice->timer_connect.id == TIMER_NONE) {
|
|
+ /* Reschedule */
|
|
+ pj_time_val delay = {
|
|
+ .sec = 15,
|
|
+ .msec = 0
|
|
+ };
|
|
+ pj_time_val_normalize(&delay);
|
|
+ pj_timer_heap_schedule_w_grp_lock(ice->stun_cfg.timer_heap,
|
|
+ &ice->timer_connect, &delay,
|
|
+ TIMER_CONNECTION_TIMEOUT,
|
|
+ ice->grp_lock);
|
|
+ }
|
|
+}
|
|
+
|
|
|
|
/* Get foundation index of a check pair. This function can also be used for
|
|
* adding a new foundation (combination of local & remote cands foundations)
|
|
@@ -1876,7 +2184,7 @@ static pj_status_t add_rcand_and_update_checklist(
|
|
if (j < ice->rcand_cnt)
|
|
continue;
|
|
}
|
|
-
|
|
+
|
|
/* Available cand slot? */
|
|
if (ice->rcand_cnt >= PJ_ICE_MAX_CAND) {
|
|
char tmp[PJ_INET6_ADDRSTRLEN + 10];
|
|
@@ -1912,10 +2220,10 @@ static pj_status_t add_rcand_and_update_checklist(
|
|
if (discard_check(ice, clist, &max_prio) == 0)
|
|
continue;
|
|
}
|
|
-
|
|
+
|
|
/* A local candidate is paired with a remote candidate if
|
|
- * and only if the two candidates have the same component ID
|
|
- * and have the same IP address version.
|
|
+ * and only if the two candidates have the same component ID
|
|
+ * and have the same IP address version.
|
|
*/
|
|
if ((lcand->comp_id != rcand->comp_id) ||
|
|
(lcand->addr.addr.sa_family != rcand->addr.addr.sa_family))
|
|
@@ -1923,6 +2231,29 @@ static pj_status_t add_rcand_and_update_checklist(
|
|
continue;
|
|
}
|
|
|
|
+ /* Section 6.2, RFC 6544 (https://tools.ietf.org/html/rfc6544)
|
|
+ * As with UDP, check lists are formed only by full ICE implementations.
|
|
+ * When forming candidate pairs, the following types of TCP candidates
|
|
+ * can be paired with each other:
|
|
+ *
|
|
+ * Local Remote
|
|
+ * Candidate Candidate
|
|
+ * ---------------------------
|
|
+ * tcp-so tcp-so
|
|
+ * tcp-active tcp-passive
|
|
+ * tcp-passive tcp-active
|
|
+ */
|
|
+ if ((lcand->transport == PJ_CAND_UDP &&
|
|
+ rcand->transport != PJ_CAND_UDP) ||
|
|
+ (lcand->transport == PJ_CAND_TCP_PASSIVE &&
|
|
+ rcand->transport != PJ_CAND_TCP_ACTIVE) ||
|
|
+ (lcand->transport == PJ_CAND_TCP_ACTIVE &&
|
|
+ rcand->transport != PJ_CAND_TCP_PASSIVE) ||
|
|
+ (lcand->transport == PJ_CAND_TCP_SO &&
|
|
+ rcand->transport != PJ_CAND_TCP_SO))
|
|
+ {
|
|
+ continue;
|
|
+ }
|
|
#if 0
|
|
/* Trickle ICE:
|
|
* Make sure that pair has not been added to checklist
|
|
@@ -1952,6 +2283,9 @@ static pj_status_t add_rcand_and_update_checklist(
|
|
chk->state = PJ_ICE_SESS_CHECK_STATE_FROZEN;
|
|
chk->foundation_idx = get_check_foundation_idx(ice, lcand, rcand,
|
|
PJ_TRUE);
|
|
+#if PJ_HAS_TCP
|
|
+ chk->reconnect_count = 0;
|
|
+#endif
|
|
|
|
/* Check if foundation cannot be added (e.g: list is full) */
|
|
if (chk->foundation_idx < 0)
|
|
@@ -2139,6 +2473,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_create_check_list(
|
|
td = PJ_POOL_ZALLOC_T(ice->pool, timer_data);
|
|
td->ice = ice;
|
|
td->clist = clist;
|
|
+ td->first_packet_counter = 1;
|
|
clist->timer.user_data = (void*)td;
|
|
clist->timer.cb = &periodic_timer;
|
|
|
|
@@ -2196,7 +2531,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_update_check_list(
|
|
pj_grp_lock_release(ice->grp_lock);
|
|
return PJ_SUCCESS;
|
|
}
|
|
-
|
|
+
|
|
/* Verify remote ufrag & passwd, if remote candidate specified */
|
|
if (rem_cand_cnt && (pj_strcmp(&ice->tx_ufrag, rem_ufrag) ||
|
|
pj_strcmp(&ice->tx_pass, rem_passwd)))
|
|
@@ -2227,8 +2562,38 @@ PJ_DEF(pj_status_t) pj_ice_sess_update_check_list(
|
|
return status;
|
|
}
|
|
|
|
+static pj_status_t send_connectivity_check(pj_ice_sess *ice,
|
|
+ pj_ice_sess_checklist *clist,
|
|
+ unsigned check_id,
|
|
+ pj_bool_t nominate,
|
|
+ pj_ice_msg_data *msg_data)
|
|
+{
|
|
+ pj_ice_sess_check *check;
|
|
+ const pj_ice_sess_cand *lcand;
|
|
+ const pj_ice_sess_cand *rcand;
|
|
+ pj_ice_sess_comp *comp;
|
|
+
|
|
+ check = &clist->checks[check_id];
|
|
+ lcand = check->lcand;
|
|
+ rcand = check->rcand;
|
|
+ comp = find_comp(ice, lcand->comp_id);
|
|
+
|
|
+ /* Note that USERNAME and MESSAGE-INTEGRITY will be added by the
|
|
+ * STUN session.
|
|
+ */
|
|
+
|
|
+ /* Initiate STUN transaction to send the request */
|
|
+
|
|
+ return pj_stun_session_send_msg(comp->stun_sess, msg_data, PJ_FALSE,
|
|
+ pj_stun_session_tp_type(comp->stun_sess)==
|
|
+ PJ_STUN_TP_UDP,
|
|
+ &rcand->addr,
|
|
+ pj_sockaddr_get_len(&rcand->addr),
|
|
+ check->tdata);
|
|
+}
|
|
+
|
|
/* Perform check on the specified candidate pair. */
|
|
-static pj_status_t perform_check(pj_ice_sess *ice,
|
|
+static pj_status_t perform_check(pj_ice_sess *ice,
|
|
pj_ice_sess_checklist *clist,
|
|
unsigned check_id,
|
|
pj_bool_t nominate)
|
|
@@ -2237,22 +2602,20 @@ static pj_status_t perform_check(pj_ice_sess *ice,
|
|
pj_ice_msg_data *msg_data;
|
|
pj_ice_sess_check *check;
|
|
const pj_ice_sess_cand *lcand;
|
|
- const pj_ice_sess_cand *rcand;
|
|
pj_uint32_t prio;
|
|
pj_status_t status;
|
|
|
|
check = &clist->checks[check_id];
|
|
lcand = check->lcand;
|
|
- rcand = check->rcand;
|
|
comp = find_comp(ice, lcand->comp_id);
|
|
|
|
- LOG5((ice->obj_name,
|
|
- "Sending connectivity check for check %s",
|
|
+ LOG5((ice->obj_name,
|
|
+ "Sending connectivity check for check %s",
|
|
dump_check(ice->tmp.txt, sizeof(ice->tmp.txt), clist, check)));
|
|
pj_log_push_indent();
|
|
|
|
/* Create request */
|
|
- status = pj_stun_session_create_req(comp->stun_sess,
|
|
+ status = pj_stun_session_create_req(comp->stun_sess,
|
|
PJ_STUN_BINDING_REQUEST, PJ_STUN_MAGIC,
|
|
NULL, &check->tdata);
|
|
if (status != PJ_SUCCESS) {
|
|
@@ -2282,7 +2645,7 @@ static pj_status_t perform_check(pj_ice_sess *ice,
|
|
((1 << PJ_ICE_LOCAL_PREF_BITS) - 1) - lcand->id,
|
|
lcand->comp_id);
|
|
#endif
|
|
- pj_stun_msg_add_uint_attr(check->tdata->pool, check->tdata->msg,
|
|
+ pj_stun_msg_add_uint_attr(check->tdata->pool, check->tdata->msg,
|
|
PJ_STUN_ATTR_PRIORITY, prio);
|
|
|
|
/* Add USE-CANDIDATE and set this check to nominated.
|
|
@@ -2295,44 +2658,84 @@ static pj_status_t perform_check(pj_ice_sess *ice,
|
|
check->nominated = PJ_TRUE;
|
|
}
|
|
|
|
- pj_stun_msg_add_uint64_attr(check->tdata->pool, check->tdata->msg,
|
|
+ pj_stun_msg_add_uint64_attr(check->tdata->pool, check->tdata->msg,
|
|
PJ_STUN_ATTR_ICE_CONTROLLING,
|
|
&ice->tie_breaker);
|
|
|
|
} else {
|
|
- pj_stun_msg_add_uint64_attr(check->tdata->pool, check->tdata->msg,
|
|
- PJ_STUN_ATTR_ICE_CONTROLLED,
|
|
- &ice->tie_breaker);
|
|
- }
|
|
-
|
|
|
|
- /* Note that USERNAME and MESSAGE-INTEGRITY will be added by the
|
|
- * STUN session.
|
|
- */
|
|
+ pj_stun_msg_add_uint64_attr(check->tdata->pool, check->tdata->msg,
|
|
+ PJ_STUN_ATTR_ICE_CONTROLLED,
|
|
+ &ice->tie_breaker);
|
|
+ }
|
|
|
|
- /* Initiate STUN transaction to send the request */
|
|
- status = pj_stun_session_send_msg(comp->stun_sess, msg_data, PJ_FALSE,
|
|
- PJ_TRUE, &rcand->addr,
|
|
- pj_sockaddr_get_len(&rcand->addr),
|
|
- check->tdata);
|
|
- if (status != PJ_SUCCESS) {
|
|
- check->tdata = NULL;
|
|
- pjnath_perror(ice->obj_name, "Error sending STUN request", status);
|
|
- pj_log_pop_indent();
|
|
- return status;
|
|
+#if PJ_HAS_TCP
|
|
+ switch (lcand->transport) {
|
|
+ case PJ_CAND_TCP_ACTIVE:
|
|
+ switch (check->state) {
|
|
+ case PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY:
|
|
+ status = (*ice->cb.reconnect_tcp_connection)(ice,check_id);
|
|
+ break;
|
|
+ case PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET:
|
|
+ status = send_connectivity_check(ice, clist, check_id,
|
|
+ nominate, msg_data);
|
|
+ break;
|
|
+ default:
|
|
+ pj_timer_heap_cancel_if_active(ice->stun_cfg.timer_heap,
|
|
+ &ice->timer_connect, TIMER_NONE);
|
|
+ status = (*ice->cb.wait_tcp_connection)(ice, check_id);
|
|
+ if (ice->timer_connect.id != TIMER_NONE) {
|
|
+ pj_assert(!"Not expected any timer active");
|
|
+ } else {
|
|
+ LOG5((ice->obj_name,
|
|
+ "Scheduling connection time-out for check %s",
|
|
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt), clist, check)));
|
|
+
|
|
+ pj_time_val delay = {
|
|
+ .sec = 0,
|
|
+ .msec = PJ_ICE_TCP_CONNECTION_TIMEOUT,
|
|
+ };
|
|
+ pj_time_val_normalize(&delay);
|
|
+ pj_timer_heap_schedule_w_grp_lock(ice->stun_cfg.timer_heap,
|
|
+ &ice->timer_connect, &delay,
|
|
+ TIMER_CONNECTION_TIMEOUT,
|
|
+ ice->grp_lock);
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ break;
|
|
+ case PJ_CAND_TCP_PASSIVE:
|
|
+ case PJ_CAND_TCP_SO:
|
|
+ case PJ_CAND_UDP:
|
|
+ default:
|
|
+ status = send_connectivity_check(ice, clist, check_id, nominate, msg_data);
|
|
+ break;
|
|
}
|
|
+#else
|
|
+ status = send_connectivity_check(ice, clist, check_id, nominate, msg_data);
|
|
+#endif
|
|
|
|
- check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS,
|
|
- PJ_SUCCESS);
|
|
+ if (status == PJ_SUCCESS) {
|
|
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS,
|
|
+ status);
|
|
+ } else if (status == PJ_EPENDING) {
|
|
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_PENDING, status);
|
|
+ } else if (check->rcand->type == PJ_ICE_CAND_TYPE_RELAYED) {
|
|
+ /* TODO (sblin) remove this - https://github.com/coturn/coturn/issues/408 */
|
|
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET,
|
|
+ status);
|
|
+ } else {
|
|
+ check->tdata = NULL;
|
|
+ pjnath_perror(ice->obj_name, "Error sending STUN request (perform check)", status);
|
|
+ }
|
|
pj_log_pop_indent();
|
|
- return PJ_SUCCESS;
|
|
+ return status;
|
|
}
|
|
|
|
-
|
|
/* Start periodic check for the specified checklist.
|
|
- * This callback is called by timer on every Ta (20msec by default)
|
|
+ * This callback is called by timer on every Ta
|
|
*/
|
|
-static pj_status_t start_periodic_check(pj_timer_heap_t *th,
|
|
+static pj_status_t start_periodic_check(pj_timer_heap_t *th,
|
|
pj_timer_entry *te)
|
|
{
|
|
timer_data *td;
|
|
@@ -2345,6 +2748,8 @@ static pj_status_t start_periodic_check(pj_timer_heap_t *th,
|
|
td = (struct timer_data*) te->user_data;
|
|
ice = td->ice;
|
|
clist = td->clist;
|
|
+ pj_time_val timeout = {0, PJ_ICE_TA_VAL};
|
|
+ pj_bool_t check_pending = PJ_FALSE;
|
|
|
|
pj_grp_lock_acquire(ice->grp_lock);
|
|
|
|
@@ -2401,10 +2806,53 @@ static pj_status_t start_periodic_check(pj_timer_heap_t *th,
|
|
}
|
|
}
|
|
|
|
+#if PJ_HAS_TCP
|
|
/* If we don't have anything in Waiting state, find any pair with
|
|
- * highest priority in Frozen state.
|
|
+ * highest priority in Retry state.
|
|
*/
|
|
+
|
|
if (!check) {
|
|
+ for (i = 0; i < clist->count; ++i) {
|
|
+ pj_ice_sess_check *c = &clist->checks[i];
|
|
+ // Reconnect closed TURN sockets
|
|
+ if (c->state == PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY) {
|
|
+ LOG5((ice->obj_name, "re-Starting periodic check for check %i (needs retry)", i));
|
|
+ check = c;
|
|
+ check_idx = i;
|
|
+
|
|
+ timeout.msec = PJ_ICE_TCP_RECONNECTION_DELAY;
|
|
+ timeout.sec = 0;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!check) {
|
|
+ // TODO (sblin) remove - https://github.com/coturn/coturn/issues/408
|
|
+ pj_bool_t inc_counter = PJ_TRUE;
|
|
+ for (i = 0; i < clist->count; ++i) {
|
|
+ pj_ice_sess_check *c = &clist->checks[i];
|
|
+ if (c->state == PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET) {
|
|
+ if (inc_counter) {
|
|
+ td->first_packet_counter += 1;
|
|
+ inc_counter = PJ_FALSE;
|
|
+ }
|
|
+ if (td->first_packet_counter % 50 == 0) {
|
|
+ LOG5((ice->obj_name, "re-Starting periodic check for check %i (needs 1st packet)", i));
|
|
+ check = c;
|
|
+ check_idx = i;
|
|
+ }
|
|
+ check_pending = PJ_TRUE;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ /* If we don't have anything in Waiting or Retry state, find any pair with
|
|
+ * highest priority in Frozen state.
|
|
+ */
|
|
+ if (!check && !check_pending) {
|
|
for (i=0; i<clist->count; ++i) {
|
|
pj_ice_sess_check *c = &clist->checks[i];
|
|
if (c->state == PJ_ICE_SESS_CHECK_STATE_FROZEN) {
|
|
@@ -2414,6 +2862,19 @@ static pj_status_t start_periodic_check(pj_timer_heap_t *th,
|
|
}
|
|
}
|
|
}
|
|
+
|
|
+#if PJ_HAS_TCP
|
|
+ if (!check && !check_pending) {
|
|
+ // If all sockets are pending, do nothing
|
|
+ for (i = 0; i < clist->count; ++i) {
|
|
+ pj_ice_sess_check *c = &clist->checks[i];
|
|
+ if (c->state == PJ_ICE_SESS_CHECK_STATE_PENDING) {
|
|
+ check_pending = PJ_TRUE;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
}
|
|
|
|
/* Perform check & schedule next check for next candidate pair,
|
|
@@ -2421,15 +2882,14 @@ static pj_status_t start_periodic_check(pj_timer_heap_t *th,
|
|
* or empty checklist).
|
|
*/
|
|
if (check) {
|
|
- pj_time_val timeout = {0, PJ_ICE_TA_VAL};
|
|
-
|
|
status = perform_check(ice, clist, check_idx, ice->is_nominating);
|
|
- if (status != PJ_SUCCESS) {
|
|
+ if (status != PJ_SUCCESS && status != PJ_EPENDING) {
|
|
check_set_state(ice, check,
|
|
PJ_ICE_SESS_CHECK_STATE_FAILED, status);
|
|
on_check_complete(ice, check);
|
|
}
|
|
-
|
|
+ }
|
|
+ if (check || check_pending) {
|
|
/* Schedule next check */
|
|
pj_time_val_normalize(&timeout);
|
|
pj_timer_heap_schedule_w_grp_lock(th, te, &timeout, PJ_TRUE,
|
|
@@ -2441,7 +2901,6 @@ static pj_status_t start_periodic_check(pj_timer_heap_t *th,
|
|
return PJ_SUCCESS;
|
|
}
|
|
|
|
-
|
|
/* Start sending connectivity check with USE-CANDIDATE */
|
|
static void start_nominated_check(pj_ice_sess *ice)
|
|
{
|
|
@@ -2483,7 +2942,7 @@ static void start_nominated_check(pj_ice_sess *ice)
|
|
{
|
|
pj_assert(c->err_code == PJ_SUCCESS);
|
|
c->state = PJ_ICE_SESS_CHECK_STATE_FROZEN;
|
|
- check_set_state(ice, c, PJ_ICE_SESS_CHECK_STATE_WAITING,
|
|
+ check_set_state(ice, c, PJ_ICE_SESS_CHECK_STATE_WAITING,
|
|
PJ_SUCCESS);
|
|
break;
|
|
}
|
|
@@ -2511,7 +2970,7 @@ static void start_nominated_check(pj_ice_sess *ice)
|
|
}
|
|
|
|
/* Timer callback to perform periodic check */
|
|
-static void periodic_timer(pj_timer_heap_t *th,
|
|
+static void periodic_timer(pj_timer_heap_t *th,
|
|
pj_timer_entry *te)
|
|
{
|
|
start_periodic_check(th, te);
|
|
@@ -2550,9 +3009,9 @@ PJ_DEF(pj_status_t) pj_ice_sess_start_check(pj_ice_sess *ice)
|
|
* media stream is the first media stream when it is described by
|
|
* the first m-line in the SDP offer and answer). For that media
|
|
* stream, it:
|
|
- *
|
|
+ *
|
|
* - Groups together all of the pairs with the same foundation,
|
|
- *
|
|
+ *
|
|
* - For each group, sets the state of the pair with the lowest
|
|
* component ID to Waiting. If there is more than one such pair,
|
|
* the one with the highest priority is used.
|
|
@@ -2600,7 +3059,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_start_check(pj_ice_sess *ice)
|
|
/* First, perform all pending triggered checks, simultaneously. */
|
|
rcheck = ice->early_check.next;
|
|
while (rcheck != &ice->early_check) {
|
|
- LOG4((ice->obj_name,
|
|
+ LOG4((ice->obj_name,
|
|
"Performing delayed triggerred check for component %d",
|
|
rcheck->comp_id));
|
|
pj_log_push_indent();
|
|
@@ -2611,7 +3070,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_start_check(pj_ice_sess *ice)
|
|
pj_list_init(&ice->early_check);
|
|
|
|
/* Start periodic check */
|
|
- /* We could start it immediately like below, but lets schedule timer
|
|
+ /* We could start it immediately like below, but lets schedule timer
|
|
* instead to reduce stack usage:
|
|
* return start_periodic_check(ice->stun_cfg.timer_heap, &clist->timer);
|
|
*/
|
|
@@ -2663,7 +3122,7 @@ static pj_status_t on_stun_send_msg(pj_stun_session *sess,
|
|
pj_ice_sess *ice = sd->ice;
|
|
pj_ice_msg_data *msg_data = (pj_ice_msg_data*) token;
|
|
pj_status_t status;
|
|
-
|
|
+
|
|
pj_grp_lock_acquire(ice->grp_lock);
|
|
|
|
if (ice->is_destroying) {
|
|
@@ -2680,6 +3139,252 @@ static pj_status_t on_stun_send_msg(pj_stun_session *sess,
|
|
return status;
|
|
}
|
|
|
|
+static pj_ice_sess_check* get_current_check_at_state(pj_ice_sess *ice,
|
|
+ pj_sockaddr_t *remote_addr,
|
|
+ pj_ice_sess_check_state state,
|
|
+ int *current_check)
|
|
+{
|
|
+ if (!ice || !remote_addr)
|
|
+ return NULL;
|
|
+ // NOTE: Multiple checks can have the same remote, we only take care of the first
|
|
+ // First, check if the TCP is really connected. If not, abort
|
|
+ pj_ice_sess_check *check = NULL;
|
|
+ for (int i = 0; i < ice->clist.count; ++i) {
|
|
+ // Find related check
|
|
+ pj_ice_sess_check *c = &ice->clist.checks[i];
|
|
+ /* Host candidate not found this this srflx! */
|
|
+ if (pj_sockaddr_cmp(remote_addr, &c->rcand->addr) == 0) {
|
|
+ if (c->tdata == NULL || c->state != state)
|
|
+ continue;
|
|
+ /* Match */
|
|
+ check = c;
|
|
+ if (current_check) *current_check = i;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ return check;
|
|
+}
|
|
+
|
|
+void ice_sess_on_peer_connection(pj_ice_sess *ice,
|
|
+ pj_uint8_t transport_id,
|
|
+ pj_status_t status,
|
|
+ pj_sockaddr_t* remote_addr)
|
|
+{
|
|
+ // The TCP link is now ready. We can now send the first STUN message (send
|
|
+ // connectivity check) This should trigger on_stun_request_complete when
|
|
+ // finished
|
|
+ if (!remote_addr)
|
|
+ return;
|
|
+
|
|
+ pj_grp_lock_acquire(ice->grp_lock);
|
|
+
|
|
+ int current_check = -1;
|
|
+ pj_ice_sess_check *check = get_current_check_at_state(ice,remote_addr,
|
|
+ PJ_ICE_SESS_CHECK_STATE_PENDING,
|
|
+ ¤t_check);
|
|
+ if (!check) {
|
|
+ // Handle peer reflexive candidates (incoming are still waiting here)
|
|
+ check = get_current_check_at_state(ice, remote_addr,
|
|
+ PJ_ICE_SESS_CHECK_STATE_WAITING,
|
|
+ ¤t_check);
|
|
+ if (!check) {
|
|
+ pj_grp_lock_release(ice->grp_lock);
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ const pj_ice_sess_cand *rcand = check->rcand;
|
|
+ if (rcand->type == PJ_ICE_CAND_TYPE_RELAYED && (
|
|
+ status == PJ_ERRNO_START_SYS + 104 || status == 130054 /* CONNECTION RESET BY PEER */ ||
|
|
+ status == PJ_ERRNO_START_SYS + 111 /* Connection refused */
|
|
+ )) {
|
|
+ /**
|
|
+ * This part of the code is triggered when using ICE over TCP via TURN
|
|
+ * In fact, the other peer has to authorize this peer to connect to
|
|
+ * the relayed candidate. This is done by set_perm from the other case.
|
|
+ * But from this side, we can't know if the peer has authorized us. If it's
|
|
+ * not the case, the connection will got a CONNECTION RESET BY PEER status.
|
|
+ * In this case, we try to reconnect few times with a delay between two
|
|
+ * attempts.
|
|
+ */
|
|
+ if (check->reconnect_count < PJ_ICE_TCP_MAX_RECONNECTION_COUNT) {
|
|
+ check->state = PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY;
|
|
+ check_set_state(ice, check,PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY,
|
|
+ status);
|
|
+ check->reconnect_count++;
|
|
+ } else {
|
|
+ // Max attempts reached. Fail this check.
|
|
+ LOG4((ice->obj_name, "Check %s: connection failed after %d attempts",
|
|
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt), &ice->clist, check),
|
|
+ PJ_ICE_TCP_MAX_RECONNECTION_COUNT));
|
|
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED, status);
|
|
+ on_check_complete(ice, check);
|
|
+ }
|
|
+ pj_grp_lock_release(ice->grp_lock);
|
|
+ return;
|
|
+ } else if (status != PJ_SUCCESS) {
|
|
+ if (rcand->type == PJ_ICE_CAND_TYPE_RELAYED) {
|
|
+ char raddr[PJ_INET6_ADDRSTRLEN + 10];
|
|
+ PJ_LOG(4, (ice->obj_name,
|
|
+ "Connection to TURN (%s) failed with status %u",
|
|
+ pj_sockaddr_print(&rcand->addr, raddr, sizeof(raddr), 3), status));
|
|
+ }
|
|
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED, status);
|
|
+ on_check_complete(ice, check);
|
|
+ pj_grp_lock_release(ice->grp_lock);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ // TCP is correctly connected. Craft the message to send
|
|
+ const pj_ice_sess_cand *lcand = check->lcand;
|
|
+ if (check->tdata == NULL) {
|
|
+ LOG5((ice->obj_name, "Error sending STUN request, empty data"));
|
|
+ pj_grp_lock_release(ice->grp_lock);
|
|
+ return;
|
|
+ }
|
|
+ pj_ice_msg_data *msg_data =
|
|
+ PJ_POOL_ZALLOC_T(check->tdata->pool, pj_ice_msg_data);
|
|
+
|
|
+ msg_data->transport_id = transport_id;
|
|
+ msg_data->has_req_data = PJ_TRUE;
|
|
+ msg_data->data.req.ice = ice;
|
|
+ msg_data->data.req.clist = &ice->clist;
|
|
+ msg_data->data.req.ckid = current_check;
|
|
+ msg_data->data.req.lcand = check->lcand;
|
|
+ msg_data->data.req.rcand = check->rcand;
|
|
+
|
|
+ pj_ice_sess_comp *comp = find_comp(ice, lcand->comp_id);
|
|
+ // Note that USERNAME and MESSAGE-INTEGRITY will be added by the
|
|
+ // STUN session.
|
|
+
|
|
+ // Initiate STUN transaction to send the request
|
|
+ status = pj_stun_session_send_msg(comp->stun_sess, msg_data,
|
|
+ PJ_FALSE, PJ_FALSE, &rcand->addr,
|
|
+ pj_sockaddr_get_len(&rcand->addr),
|
|
+ check->tdata);
|
|
+
|
|
+ if (rcand->type == PJ_ICE_CAND_TYPE_RELAYED && (
|
|
+ status == PJ_ERRNO_START_SYS + 104 || status == 130054 || /* CONNECTION RESET BY PEER */
|
|
+ status == PJ_ERRNO_START_SYS + 32 /* EPIPE */ ||
|
|
+ status == PJ_ERRNO_START_SYS + 111 /* Connection refused */
|
|
+ )) {
|
|
+ /**
|
|
+ * This part of the code is triggered when using ICE over TCP via TURN
|
|
+ * In fact, the other peer has to authorize this peer to connect to
|
|
+ * the relayed candidate. This is done by set_perm from the other case.
|
|
+ * But from this side, we can't know if the peer has authorized us. If it's
|
|
+ * not the case, the connection will got a CONNECTION RESET BY PEER status.
|
|
+ * In this case, we can try to reconnect a bit after and this until the check
|
|
+ * reached its timeout.
|
|
+ */
|
|
+
|
|
+ if (check->reconnect_count < PJ_ICE_TCP_MAX_RECONNECTION_COUNT) {
|
|
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY,
|
|
+ status);
|
|
+ check->reconnect_count++;
|
|
+ } else {
|
|
+ // Max attempts reached. Fail this check.
|
|
+ LOG4((ice->obj_name, "Check %s: connection failed after %d attempts",
|
|
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt), &ice->clist, check),
|
|
+ PJ_ICE_TCP_MAX_RECONNECTION_COUNT));
|
|
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED, status);
|
|
+ on_check_complete(ice, check);
|
|
+ pj_grp_lock_release(ice->grp_lock);
|
|
+ return;
|
|
+ }
|
|
+ } else if (status == PJ_EBUSY /* EBUSY */) {
|
|
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET,
|
|
+ status);
|
|
+ } else if (status != PJ_SUCCESS) {
|
|
+
|
|
+ if (rcand->type == PJ_ICE_CAND_TYPE_RELAYED) {
|
|
+ char raddr[PJ_INET6_ADDRSTRLEN + 10];
|
|
+ PJ_LOG(5, (ice->obj_name,
|
|
+ "STUN send message to TURN (%s) failed with status %u",
|
|
+ pj_sockaddr_print(&rcand->addr, raddr, sizeof(raddr), 3), status));
|
|
+ }
|
|
+ check->tdata = NULL;
|
|
+ pjnath_perror(ice->obj_name, "Error sending STUN request (on peer connection)", status);
|
|
+ pj_log_pop_indent();
|
|
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED, status);
|
|
+ on_check_complete(ice, check);
|
|
+ } else {
|
|
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS, status);
|
|
+ }
|
|
+ pj_grp_lock_release(ice->grp_lock);
|
|
+}
|
|
+
|
|
+void ice_sess_on_peer_reset_connection(pj_ice_sess *ice,
|
|
+ pj_uint8_t transport_id,
|
|
+ pj_sockaddr_t* remote_addr)
|
|
+{
|
|
+ // The TCP link is reset
|
|
+ if (!remote_addr)
|
|
+ return;
|
|
+
|
|
+ pj_grp_lock_acquire(ice->grp_lock);
|
|
+ pj_ice_sess_check *check = get_current_check_at_state(ice, remote_addr,
|
|
+ PJ_ICE_SESS_CHECK_STATE_PENDING,
|
|
+ NULL);
|
|
+ if (!check) {
|
|
+ // Handle peer reflexive candidates (incoming are still waiting here)
|
|
+ check = get_current_check_at_state(ice, remote_addr,
|
|
+ PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS,
|
|
+ NULL);
|
|
+
|
|
+ if (!check) {
|
|
+ // Just check if it's not the first packet failing
|
|
+ check = get_current_check_at_state(ice, remote_addr,
|
|
+ PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET,
|
|
+ NULL);
|
|
+ if (!check) {
|
|
+ pj_grp_lock_release(ice->grp_lock);
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ const pj_ice_sess_cand *rcand = check->rcand;
|
|
+ if (rcand->type == PJ_ICE_CAND_TYPE_RELAYED) {
|
|
+ char raddr[PJ_INET6_ADDRSTRLEN + 10];
|
|
+ PJ_LOG(5, (ice->obj_name,
|
|
+ "Connection to TURN (%s) is reset",
|
|
+ pj_sockaddr_print(&rcand->addr, raddr, sizeof(raddr), 3)));
|
|
+
|
|
+ check->state = PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY;
|
|
+ check_set_state(ice, check,
|
|
+ PJ_ICE_SESS_CHECK_STATE_NEEDS_RETRY, 120104);
|
|
+ }
|
|
+
|
|
+ pj_grp_lock_release(ice->grp_lock);
|
|
+}
|
|
+
|
|
+void ice_sess_on_peer_packet(pj_ice_sess *ice,
|
|
+ pj_uint8_t transport_id,
|
|
+ pj_sockaddr_t* remote_addr)
|
|
+{
|
|
+ // The TCP link received its bind request response
|
|
+ if (!ice || !remote_addr) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ pj_grp_lock_acquire(ice->grp_lock);
|
|
+ pj_ice_sess_check *check =
|
|
+ get_current_check_at_state(ice, remote_addr,
|
|
+ PJ_ICE_SESS_CHECK_STATE_NEEDS_FIRST_PACKET,
|
|
+ NULL);
|
|
+ if (!check) {
|
|
+ pj_grp_lock_release(ice->grp_lock);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ const pj_ice_sess_cand *rcand = check->rcand;
|
|
+ if (rcand->type == PJ_ICE_CAND_TYPE_RELAYED) {
|
|
+ check_set_state(ice, check,
|
|
+ PJ_ICE_SESS_CHECK_STATE_IN_PROGRESS, PJ_SUCCESS);
|
|
+ }
|
|
+ pj_grp_lock_release(ice->grp_lock);
|
|
+}
|
|
|
|
/* This callback is called when outgoing STUN request completed */
|
|
static void on_stun_request_complete(pj_stun_session *stun_sess,
|
|
@@ -2765,13 +3470,13 @@ static void on_stun_request_complete(pj_stun_session *stun_sess,
|
|
*
|
|
* 7.1.2.1. Failure Cases:
|
|
*
|
|
- * If the request had contained the ICE-CONTROLLED attribute,
|
|
+ * If the request had contained the ICE-CONTROLLED attribute,
|
|
* the agent MUST switch to the controlling role if it has not
|
|
- * already done so. If the request had contained the
|
|
- * ICE-CONTROLLING attribute, the agent MUST switch to the
|
|
+ * already done so. If the request had contained the
|
|
+ * ICE-CONTROLLING attribute, the agent MUST switch to the
|
|
* controlled role if it has not already done so. Once it has
|
|
* switched, the agent MUST immediately retry the request with
|
|
- * the ICE-CONTROLLING or ICE-CONTROLLED attribute reflecting
|
|
+ * the ICE-CONTROLLING or ICE-CONTROLLED attribute reflecting
|
|
* its new role.
|
|
*/
|
|
pj_ice_sess_role new_role = PJ_ICE_SESS_ROLE_UNKNOWN;
|
|
@@ -2779,7 +3484,7 @@ static void on_stun_request_complete(pj_stun_session *stun_sess,
|
|
|
|
if (pj_stun_msg_find_attr(req, PJ_STUN_ATTR_ICE_CONTROLLING, 0)) {
|
|
new_role = PJ_ICE_SESS_ROLE_CONTROLLED;
|
|
- } else if (pj_stun_msg_find_attr(req, PJ_STUN_ATTR_ICE_CONTROLLED,
|
|
+ } else if (pj_stun_msg_find_attr(req, PJ_STUN_ATTR_ICE_CONTROLLED,
|
|
0)) {
|
|
new_role = PJ_ICE_SESS_ROLE_CONTROLLING;
|
|
} else {
|
|
@@ -2788,7 +3493,7 @@ static void on_stun_request_complete(pj_stun_session *stun_sess,
|
|
}
|
|
|
|
if (new_role != ice->role) {
|
|
- LOG4((ice->obj_name,
|
|
+ LOG4((ice->obj_name,
|
|
"Changing role because of role conflict response"));
|
|
pj_ice_sess_change_role(ice, new_role);
|
|
}
|
|
@@ -2805,9 +3510,9 @@ static void on_stun_request_complete(pj_stun_session *stun_sess,
|
|
}
|
|
|
|
pj_strerror(status, errmsg, sizeof(errmsg));
|
|
- LOG4((ice->obj_name,
|
|
+ LOG4((ice->obj_name,
|
|
"Check %s%s: connectivity check FAILED: %s",
|
|
- dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
|
|
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
|
|
&ice->clist, check),
|
|
(check->nominated ? " (nominated)" : " (not nominated)"),
|
|
errmsg));
|
|
@@ -2836,7 +3541,7 @@ static void on_stun_request_complete(pj_stun_session *stun_sess,
|
|
* is synthesized from IPv4).
|
|
*/
|
|
pj_sockaddr synth_addr;
|
|
-
|
|
+
|
|
status = pj_sockaddr_synthesize(pj_AF_INET6(), &synth_addr,
|
|
&check->rcand->addr);
|
|
if (status == PJ_SUCCESS &&
|
|
@@ -2848,9 +3553,9 @@ static void on_stun_request_complete(pj_stun_session *stun_sess,
|
|
|
|
if (pj_sockaddr_cmp(&check->rcand->addr, source_addr) != 0) {
|
|
status = PJNATH_EICEINSRCADDR;
|
|
- LOG4((ice->obj_name,
|
|
+ LOG4((ice->obj_name,
|
|
"Check %s%s: connectivity check FAILED: source address mismatch",
|
|
- dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
|
|
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
|
|
&ice->clist, check),
|
|
(check->nominated ? " (nominated)" : " (not nominated)")));
|
|
pj_log_push_indent();
|
|
@@ -2862,24 +3567,24 @@ static void on_stun_request_complete(pj_stun_session *stun_sess,
|
|
}
|
|
|
|
/* 7.1.2.2. Success Cases
|
|
- *
|
|
+ *
|
|
* A check is considered to be a success if all of the following are
|
|
* true:
|
|
- *
|
|
+ *
|
|
* o the STUN transaction generated a success response
|
|
- *
|
|
+ *
|
|
* o the source IP address and port of the response equals the
|
|
* destination IP address and port that the Binding Request was sent
|
|
* to
|
|
- *
|
|
+ *
|
|
* o the destination IP address and port of the response match the
|
|
* source IP address and port that the Binding Request was sent from
|
|
*/
|
|
|
|
|
|
- LOG4((ice->obj_name,
|
|
+ LOG4((ice->obj_name,
|
|
"Check %s%s: connectivity check SUCCESS",
|
|
- dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
|
|
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
|
|
&ice->clist, check),
|
|
(check->nominated ? " (nominated)" : " (not nominated)")));
|
|
|
|
@@ -2887,7 +3592,7 @@ static void on_stun_request_complete(pj_stun_session *stun_sess,
|
|
xaddr = (pj_stun_xor_mapped_addr_attr*)
|
|
pj_stun_msg_find_attr(response, PJ_STUN_ATTR_XOR_MAPPED_ADDR,0);
|
|
if (!xaddr) {
|
|
- check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED,
|
|
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_FAILED,
|
|
PJNATH_ESTUNNOMAPPEDADDR);
|
|
on_check_complete(ice, check);
|
|
pj_grp_lock_release(ice->grp_lock);
|
|
@@ -2960,7 +3665,7 @@ static void on_stun_request_complete(pj_stun_session *stun_sess,
|
|
|
|
/* 7.1.2.2.1. Discovering Peer Reflexive Candidates
|
|
* If the transport address returned in XOR-MAPPED-ADDRESS does not match
|
|
- * any of the local candidates that the agent knows about, the mapped
|
|
+ * any of the local candidates that the agent knows about, the mapped
|
|
* address represents a new candidate - a peer reflexive candidate.
|
|
*/
|
|
if (lcand == NULL) {
|
|
@@ -2994,7 +3699,9 @@ static void on_stun_request_complete(pj_stun_session *stun_sess,
|
|
&check->lcand->base_addr,
|
|
&check->lcand->base_addr,
|
|
pj_sockaddr_get_len(&xaddr->sockaddr),
|
|
- &cand_id);
|
|
+ &cand_id,
|
|
+ check->rcand->transport == PJ_CAND_UDP ?
|
|
+ PJ_CAND_UDP : PJ_CAND_TCP_PASSIVE);
|
|
// Note: for IPv6, pj_ice_sess_add_cand can return SUCCESS
|
|
// without adding any candidates if the candidate is
|
|
// deprecated (because the ICE MUST NOT fail)
|
|
@@ -3018,7 +3725,7 @@ static void on_stun_request_complete(pj_stun_session *stun_sess,
|
|
/* 7.1.2.2.3. Constructing a Valid Pair
|
|
* Next, the agent constructs a candidate pair whose local candidate
|
|
* equals the mapped address of the response, and whose remote candidate
|
|
- * equals the destination address to which the request was sent.
|
|
+ * equals the destination address to which the request was sent.
|
|
*/
|
|
|
|
/* Add pair to valid list, if it's not there, otherwise just update
|
|
@@ -3039,6 +3746,9 @@ static void on_stun_request_complete(pj_stun_session *stun_sess,
|
|
new_check->state = PJ_ICE_SESS_CHECK_STATE_SUCCEEDED;
|
|
new_check->nominated = check->nominated;
|
|
new_check->err_code = PJ_SUCCESS;
|
|
+#if PJ_HAS_TCP
|
|
+ new_check->reconnect_count = 0;
|
|
+#endif
|
|
} else {
|
|
new_check = &ice->valid_list.checks[i];
|
|
ice->valid_list.checks[i].nominated = check->nominated;
|
|
@@ -3053,12 +3763,12 @@ static void on_stun_request_complete(pj_stun_session *stun_sess,
|
|
sort_checklist(ice, &ice->valid_list);
|
|
|
|
/* 7.1.2.2.2. Updating Pair States
|
|
- *
|
|
+ *
|
|
* The agent sets the state of the pair that generated the check to
|
|
* Succeeded. The success of this check might also cause the state of
|
|
* other checks to change as well.
|
|
*/
|
|
- check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_SUCCEEDED,
|
|
+ check_set_state(ice, check, PJ_ICE_SESS_CHECK_STATE_SUCCEEDED,
|
|
PJ_SUCCESS);
|
|
|
|
/* Perform 7.1.2.2.2. Updating Pair States.
|
|
@@ -3100,11 +3810,11 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess,
|
|
|
|
PJ_UNUSED_ARG(pkt);
|
|
PJ_UNUSED_ARG(pkt_len);
|
|
-
|
|
+
|
|
/* Reject any requests except Binding request */
|
|
if (msg->hdr.type != PJ_STUN_BINDING_REQUEST) {
|
|
- pj_stun_session_respond(sess, rdata, PJ_STUN_SC_BAD_REQUEST,
|
|
- NULL, token, PJ_TRUE,
|
|
+ pj_stun_session_respond(sess, rdata, PJ_STUN_SC_BAD_REQUEST,
|
|
+ NULL, token, PJ_TRUE,
|
|
src_addr, src_addr_len);
|
|
return PJ_SUCCESS;
|
|
}
|
|
@@ -3170,13 +3880,13 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess,
|
|
{
|
|
if (pj_cmp_timestamp(&ice->tie_breaker, &role_attr->value) < 0) {
|
|
/* Switch role to controlled */
|
|
- LOG4((ice->obj_name,
|
|
+ LOG4((ice->obj_name,
|
|
"Changing role because of ICE-CONTROLLING attribute"));
|
|
pj_ice_sess_change_role(ice, PJ_ICE_SESS_ROLE_CONTROLLED);
|
|
} else {
|
|
/* Generate 487 response */
|
|
- pj_stun_session_respond(sess, rdata, PJ_STUN_SC_ROLE_CONFLICT,
|
|
- NULL, token, PJ_TRUE,
|
|
+ pj_stun_session_respond(sess, rdata, PJ_STUN_SC_ROLE_CONFLICT,
|
|
+ NULL, token, PJ_TRUE,
|
|
src_addr, src_addr_len);
|
|
pj_grp_lock_release(ice->grp_lock);
|
|
return PJ_SUCCESS;
|
|
@@ -3187,21 +3897,21 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess,
|
|
{
|
|
if (pj_cmp_timestamp(&ice->tie_breaker, &role_attr->value) < 0) {
|
|
/* Generate 487 response */
|
|
- pj_stun_session_respond(sess, rdata, PJ_STUN_SC_ROLE_CONFLICT,
|
|
- NULL, token, PJ_TRUE,
|
|
+ pj_stun_session_respond(sess, rdata, PJ_STUN_SC_ROLE_CONFLICT,
|
|
+ NULL, token, PJ_TRUE,
|
|
src_addr, src_addr_len);
|
|
pj_grp_lock_release(ice->grp_lock);
|
|
return PJ_SUCCESS;
|
|
} else {
|
|
/* Switch role to controlled */
|
|
- LOG4((ice->obj_name,
|
|
+ LOG4((ice->obj_name,
|
|
"Changing role because of ICE-CONTROLLED attribute"));
|
|
pj_ice_sess_change_role(ice, PJ_ICE_SESS_ROLE_CONTROLLING);
|
|
}
|
|
}
|
|
|
|
- /*
|
|
- * First send response to this request
|
|
+ /*
|
|
+ * First send response to this request
|
|
*/
|
|
status = pj_stun_session_create_res(sess, rdata, 0, NULL, &tdata);
|
|
if (status != PJ_SUCCESS) {
|
|
@@ -3217,7 +3927,7 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess,
|
|
for (i = 0; i < ice->clist.count; ++i) {
|
|
pj_ice_sess_check *c = &ice->clist.checks[i];
|
|
if (c->lcand->comp_id == sd->comp_id &&
|
|
- c->lcand->transport_id == transport_id)
|
|
+ c->lcand->transport_id == transport_id)
|
|
{
|
|
lcand = c->lcand;
|
|
break;
|
|
@@ -3232,7 +3942,7 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess,
|
|
*/
|
|
for (i = 0; i < ice->rcand_cnt; ++i) {
|
|
pj_sockaddr synth_addr;
|
|
-
|
|
+
|
|
if (ice->rcand[i].addr.addr.sa_family != pj_AF_INET())
|
|
continue;
|
|
|
|
@@ -3252,7 +3962,7 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess,
|
|
|
|
|
|
/* Add XOR-MAPPED-ADDRESS attribute */
|
|
- status = pj_stun_msg_add_sockaddr_attr(tdata->pool, tdata->msg,
|
|
+ status = pj_stun_msg_add_sockaddr_attr(tdata->pool, tdata->msg,
|
|
PJ_STUN_ATTR_XOR_MAPPED_ADDR,
|
|
PJ_TRUE, source_addr,
|
|
source_addr_len);
|
|
@@ -3263,11 +3973,14 @@ static pj_status_t on_stun_rx_request(pj_stun_session *sess,
|
|
msg_data->has_req_data = PJ_FALSE;
|
|
|
|
/* Send the response */
|
|
- status = pj_stun_session_send_msg(sess, msg_data, PJ_TRUE, PJ_TRUE,
|
|
+ status = pj_stun_session_send_msg(sess, msg_data, PJ_TRUE, pj_stun_session_tp_type(sess) == PJ_STUN_TP_UDP,
|
|
src_addr, src_addr_len, tdata);
|
|
|
|
+ if (status == PJ_EBUSY) {
|
|
+ PJ_LOG(5, (ice->obj_name, "on_stun_rx_request, PJ_EBUSY"));
|
|
+ }
|
|
|
|
- /*
|
|
+ /*
|
|
* Handling early check.
|
|
*
|
|
* It's possible that we receive this request before we receive SDP
|
|
@@ -3326,7 +4039,7 @@ static void handle_incoming_check(pj_ice_sess *ice,
|
|
|
|
comp = find_comp(ice, rcheck->comp_id);
|
|
|
|
- /* Find remote candidate based on the source transport address of
|
|
+ /* Find remote candidate based on the source transport address of
|
|
* the request.
|
|
*/
|
|
for (i=0; i<ice->rcand_cnt; ++i) {
|
|
@@ -3344,7 +4057,7 @@ static void handle_incoming_check(pj_ice_sess *ice,
|
|
void *p;
|
|
|
|
if (ice->rcand_cnt >= PJ_ICE_MAX_CAND) {
|
|
- LOG4((ice->obj_name,
|
|
+ LOG4((ice->obj_name,
|
|
"Unable to add new peer reflexive candidate: too many "
|
|
"candidates already (%d)", PJ_ICE_MAX_CAND));
|
|
return;
|
|
@@ -3361,7 +4074,7 @@ static void handle_incoming_check(pj_ice_sess *ice,
|
|
rcand->foundation.slen = pj_ansi_snprintf(rcand->foundation.ptr, 36,
|
|
"f%p", p);
|
|
|
|
- LOG4((ice->obj_name,
|
|
+ LOG4((ice->obj_name,
|
|
"Added new remote candidate from the request: %s:%d",
|
|
pj_sockaddr_print(&rcand->addr, raddr, sizeof(raddr), 2),
|
|
pj_sockaddr_get_port(&rcand->addr)));
|
|
@@ -3391,12 +4104,12 @@ static void handle_incoming_check(pj_ice_sess *ice,
|
|
/* Just get candidate with the highest priority and same transport ID
|
|
* for the specified component ID in the checklist.
|
|
*/
|
|
- for (i=0; i<ice->clist.count; ++i) {
|
|
- pj_ice_sess_check *c = &ice->clist.checks[i];
|
|
- if (c->lcand->comp_id == rcheck->comp_id &&
|
|
- c->lcand->transport_id == rcheck->transport_id)
|
|
+ for (i=0; i<ice->lcand_cnt; ++i) {
|
|
+ pj_ice_sess_cand *lcand_tmp = &ice->lcand[i];
|
|
+ if (lcand_tmp->comp_id == rcheck->comp_id &&
|
|
+ lcand_tmp->transport_id == rcheck->transport_id)
|
|
{
|
|
- lcand = c->lcand;
|
|
+ lcand = lcand_tmp;
|
|
break;
|
|
}
|
|
}
|
|
@@ -3404,17 +4117,17 @@ static void handle_incoming_check(pj_ice_sess *ice,
|
|
/* Should not happen, but just in case remote is sending a
|
|
* Binding request for a component which it doesn't have.
|
|
*/
|
|
- LOG4((ice->obj_name,
|
|
+ LOG4((ice->obj_name,
|
|
"Received Binding request but no local candidate is found!"));
|
|
return;
|
|
}
|
|
#endif
|
|
|
|
- /*
|
|
- * Create candidate pair for this request.
|
|
+ /*
|
|
+ * Create candidate pair for this request.
|
|
*/
|
|
|
|
- /*
|
|
+ /*
|
|
* 7.2.1.4. Triggered Checks
|
|
*
|
|
* Now that we have local and remote candidate, check if we already
|
|
@@ -3435,14 +4148,14 @@ static void handle_incoming_check(pj_ice_sess *ice,
|
|
* generate an immediate retransmit of the Binding Request for the
|
|
* check in progress. This is to facilitate rapid completion of
|
|
* ICE when both agents are behind NAT.
|
|
- *
|
|
+ *
|
|
* - If the state of that pair is Failed or Succeeded, no triggered
|
|
* check is sent.
|
|
*/
|
|
if (i != ice->clist.count) {
|
|
pj_ice_sess_check *c = &ice->clist.checks[i];
|
|
|
|
- /* If USE-CANDIDATE is present, set nominated flag
|
|
+ /* If USE-CANDIDATE is present, set nominated flag
|
|
* Note: DO NOT overwrite nominated flag if one is already set.
|
|
*/
|
|
c->nominated = ((rcheck->use_candidate) || c->nominated);
|
|
@@ -3483,14 +4196,14 @@ static void handle_incoming_check(pj_ice_sess *ice,
|
|
unsigned j;
|
|
|
|
/* If this check is nominated, scan the valid_list for the
|
|
- * same check and update the nominated flag. A controlled
|
|
+ * same check and update the nominated flag. A controlled
|
|
* agent might have finished the check earlier.
|
|
*/
|
|
if (rcheck->use_candidate) {
|
|
for (j=0; j<ice->valid_list.count; ++j) {
|
|
pj_ice_sess_check *vc = &ice->valid_list.checks[j];
|
|
- if (vc->lcand->transport_id == c->lcand->transport_id &&
|
|
- vc->rcand == c->rcand)
|
|
+ if (vc->lcand->transport_id == c->lcand->transport_id &&
|
|
+ vc->rcand == c->rcand)
|
|
{
|
|
/* Set nominated flag */
|
|
vc->nominated = PJ_TRUE;
|
|
@@ -3498,8 +4211,8 @@ static void handle_incoming_check(pj_ice_sess *ice,
|
|
/* Update valid check and nominated check for the component */
|
|
update_comp_check(ice, vc->lcand->comp_id, vc);
|
|
|
|
- LOG5((ice->obj_name, "Valid check %s is nominated",
|
|
- dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
|
|
+ LOG5((ice->obj_name, "Valid check %s is nominated",
|
|
+ dump_check(ice->tmp.txt, sizeof(ice->tmp.txt),
|
|
&ice->valid_list, vc)));
|
|
}
|
|
}
|
|
@@ -3599,7 +4312,7 @@ static pj_status_t on_stun_rx_indication(pj_stun_session *sess,
|
|
"for component %d", sd->comp_id));
|
|
} else {
|
|
LOG4((sd->ice->obj_name, "Received unexpected %s indication "
|
|
- "for component %d", pj_stun_get_method_name(msg->hdr.type),
|
|
+ "for component %d", pj_stun_get_method_name(msg->hdr.type),
|
|
sd->comp_id));
|
|
}
|
|
|
|
@@ -3621,7 +4334,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_send_data(pj_ice_sess *ice,
|
|
pj_sockaddr addr;
|
|
|
|
PJ_ASSERT_RETURN(ice && comp_id, PJ_EINVAL);
|
|
-
|
|
+
|
|
/* It is possible that comp_cnt is less than comp_id, when remote
|
|
* doesn't support all the components that we have.
|
|
*/
|
|
@@ -3658,9 +4371,9 @@ PJ_DEF(pj_status_t) pj_ice_sess_send_data(pj_ice_sess *ice,
|
|
|
|
PJ_RACE_ME(5);
|
|
|
|
- status = (*ice->cb.on_tx_pkt)(ice, comp_id, transport_id,
|
|
- data, data_len,
|
|
- &addr,
|
|
+ status = (*ice->cb.on_tx_pkt)(ice, comp_id, transport_id,
|
|
+ data, data_len,
|
|
+ &addr,
|
|
pj_sockaddr_get_len(&addr));
|
|
|
|
on_return:
|
|
@@ -3713,7 +4426,7 @@ PJ_DEF(pj_status_t) pj_ice_sess_on_rx_pkt(pj_ice_sess *ice,
|
|
* packets. We don't need to verify the STUN packet too rigorously, that
|
|
* will be done by the user.
|
|
*/
|
|
- status = pj_stun_msg_check((const pj_uint8_t*)pkt, pkt_size,
|
|
+ status = pj_stun_msg_check((const pj_uint8_t*)pkt, pkt_size,
|
|
PJ_STUN_IS_DATAGRAM |
|
|
PJ_STUN_NO_FINGERPRINT_CHECK);
|
|
if (status == PJ_SUCCESS) {
|
|
@@ -3734,12 +4447,10 @@ PJ_DEF(pj_status_t) pj_ice_sess_on_rx_pkt(pj_ice_sess *ice,
|
|
|
|
PJ_RACE_ME(5);
|
|
|
|
- (*ice->cb.on_rx_data)(ice, comp_id, transport_id, pkt, pkt_size,
|
|
+ (*ice->cb.on_rx_data)(ice, comp_id, transport_id, pkt, pkt_size,
|
|
src_addr, src_addr_len);
|
|
status = PJ_SUCCESS;
|
|
}
|
|
|
|
return status;
|
|
}
|
|
-
|
|
-
|
|
diff --git a/pjnath/src/pjnath/ice_strans.c b/pjnath/src/pjnath/ice_strans.c
|
|
index 370ca6f14..b666696ac 100644
|
|
--- a/pjnath/src/pjnath/ice_strans.c
|
|
+++ b/pjnath/src/pjnath/ice_strans.c
|
|
@@ -68,6 +68,7 @@ enum tp_type
|
|
# define RELAY_PREF ((1 << PJ_ICE_LOCAL_PREF_BITS) - 1)
|
|
#endif
|
|
|
|
+#define MAX_RTP_SIZE 65536
|
|
|
|
/* The candidate type preference when STUN candidate is used */
|
|
static pj_uint8_t srflx_pref_table[PJ_ICE_CAND_TYPE_MAX] =
|
|
@@ -86,9 +87,18 @@ static pj_uint8_t srflx_pref_table[PJ_ICE_CAND_TYPE_MAX] =
|
|
#endif
|
|
};
|
|
|
|
+//////////////////////////////////////////////////////////////////////////////
|
|
+
|
|
+static pj_uint16_t GETVAL16H(const pj_uint8_t *buf1, const pj_uint8_t *buf2)
|
|
+{
|
|
+ return (pj_uint16_t) ((buf1[0] << 8) | (buf2[0] << 0));
|
|
+}
|
|
+
|
|
+//////////////////////////////////////////////////////////////////////////////
|
|
|
|
/* ICE callbacks */
|
|
static void on_valid_pair(pj_ice_sess *ice);
|
|
+static void on_ice_destroy(pj_ice_sess *ice);
|
|
static void on_ice_complete(pj_ice_sess *ice, pj_status_t status);
|
|
static pj_status_t ice_tx_pkt(pj_ice_sess *ice,
|
|
unsigned comp_id,
|
|
@@ -103,6 +113,18 @@ static void ice_rx_data(pj_ice_sess *ice,
|
|
const pj_sockaddr_t *src_addr,
|
|
unsigned src_addr_len);
|
|
|
|
+#if PJ_HAS_TCP
|
|
+static pj_status_t ice_wait_tcp_connection(pj_ice_sess *ice,
|
|
+ unsigned check_id);
|
|
+
|
|
+static pj_status_t ice_reconnect_tcp_connection(pj_ice_sess *ice,
|
|
+ unsigned check_id);
|
|
+
|
|
+static pj_status_t ice_close_tcp_connection(pj_ice_sess *ice,
|
|
+ unsigned check_id);
|
|
+static pj_status_t ice_close_remaining_tcp(pj_ice_sess *ice);
|
|
+#endif
|
|
+
|
|
|
|
/* STUN socket callbacks */
|
|
/* Notification when incoming packet has been received. */
|
|
@@ -182,6 +204,16 @@ typedef struct pj_ice_strans_comp
|
|
} pj_ice_strans_comp;
|
|
|
|
|
|
+static pj_bool_t add_local_candidate(pj_ice_sess_cand *cand,
|
|
+ unsigned idx,
|
|
+ unsigned i,
|
|
+ unsigned *cand_cnt,
|
|
+ unsigned *max_cand_cnt,
|
|
+ pj_stun_sock_info stun_sock_info,
|
|
+ pj_ice_strans *ice_st,
|
|
+ pj_ice_strans_comp *comp,
|
|
+ pj_ice_cand_transport transport);
|
|
+
|
|
/* Pending send buffer */
|
|
typedef struct pending_send
|
|
{
|
|
@@ -232,6 +264,12 @@ struct pj_ice_strans
|
|
signalled end of candidate? */
|
|
pj_bool_t loc_cand_end;/**< Trickle ICE: local has
|
|
signalled end of candidate? */
|
|
+ pj_uint8_t rtp_pkt[MAX_RTP_SIZE];
|
|
+ pj_uint8_t rx_buffer[MAX_RTP_SIZE];
|
|
+ pj_uint16_t rx_buffer_size;
|
|
+ pj_uint16_t rx_wanted_size;
|
|
+
|
|
+ pj_ssize_t last_data_len; /**< What the application is waiting. */
|
|
};
|
|
|
|
|
|
@@ -268,6 +306,7 @@ PJ_DEF(void) pj_ice_strans_cfg_default(pj_ice_strans_cfg *cfg)
|
|
pj_bzero(cfg, sizeof(*cfg));
|
|
|
|
cfg->af = pj_AF_INET();
|
|
+ cfg->protocol = PJ_ICE_TP_UDP;
|
|
pj_stun_config_init(&cfg->stun_cfg, NULL, 0, NULL, NULL);
|
|
pj_ice_strans_stun_cfg_default(&cfg->stun);
|
|
pj_ice_strans_turn_cfg_default(&cfg->turn);
|
|
@@ -285,6 +324,7 @@ PJ_DEF(void) pj_ice_strans_stun_cfg_default(pj_ice_strans_stun_cfg *cfg)
|
|
pj_bzero(cfg, sizeof(*cfg));
|
|
|
|
cfg->af = pj_AF_INET();
|
|
+ cfg->conn_type = PJ_STUN_TP_UDP;
|
|
cfg->port = PJ_STUN_PORT;
|
|
cfg->max_host_cands = 64;
|
|
cfg->ignore_stun_error = PJ_FALSE;
|
|
@@ -428,6 +468,9 @@ static pj_status_t add_update_turn(pj_ice_strans *ice_st,
|
|
cand->transport_id = tp_id;
|
|
cand->comp_id = (pj_uint8_t) comp->comp_id;
|
|
new_cand = PJ_TRUE;
|
|
+ cand->transport = turn_cfg->conn_type == PJ_TURN_TP_UDP ?
|
|
+ PJ_CAND_UDP :
|
|
+ PJ_CAND_TCP_PASSIVE;
|
|
}
|
|
|
|
/* Allocate and initialize TURN socket data */
|
|
@@ -435,6 +478,10 @@ static pj_status_t add_update_turn(pj_ice_strans *ice_st,
|
|
data->comp = comp;
|
|
data->transport_id = cand->transport_id;
|
|
|
|
+ if (turn_cfg->conn_type == PJ_TURN_TP_TCP) {
|
|
+ turn_cfg->alloc_param.peer_conn_type = PJ_TURN_TP_TCP;
|
|
+ }
|
|
+
|
|
/* Create the TURN transport */
|
|
status = pj_turn_sock_create(&ice_st->cfg.stun_cfg, turn_cfg->af,
|
|
turn_cfg->conn_type,
|
|
@@ -476,7 +523,7 @@ static pj_status_t add_update_turn(pj_ice_strans *ice_st,
|
|
return PJ_SUCCESS;
|
|
}
|
|
|
|
-static pj_bool_t ice_cand_equals(pj_ice_sess_cand *lcand,
|
|
+static pj_bool_t ice_cand_equals(pj_ice_sess_cand *lcand,
|
|
pj_ice_sess_cand *rcand)
|
|
{
|
|
if (lcand == NULL && rcand == NULL){
|
|
@@ -485,7 +532,7 @@ static pj_bool_t ice_cand_equals(pj_ice_sess_cand *lcand,
|
|
if (lcand == NULL || rcand == NULL){
|
|
return PJ_FALSE;
|
|
}
|
|
-
|
|
+
|
|
if (lcand->type != rcand->type
|
|
|| lcand->status != rcand->status
|
|
|| lcand->comp_id != rcand->comp_id
|
|
@@ -493,15 +540,145 @@ static pj_bool_t ice_cand_equals(pj_ice_sess_cand *lcand,
|
|
// local pref is no longer a constant, so it may be different
|
|
//|| lcand->local_pref != rcand->local_pref
|
|
|| lcand->prio != rcand->prio
|
|
+ || lcand->transport != rcand->transport
|
|
|| pj_sockaddr_cmp(&lcand->addr, &rcand->addr) != 0
|
|
|| pj_sockaddr_cmp(&lcand->base_addr, &rcand->base_addr) != 0)
|
|
{
|
|
return PJ_FALSE;
|
|
}
|
|
-
|
|
+
|
|
return PJ_TRUE;
|
|
}
|
|
|
|
+static pj_status_t add_nat_assisted_cand(pj_ice_strans *ice_st,
|
|
+ pj_ice_strans_comp *comp,
|
|
+ unsigned idx,
|
|
+ unsigned max_cand_cnt)
|
|
+{
|
|
+ /* PJNATH library handles host and srflx connections through STUN
|
|
+ * sockets, even if there is no actual STUN server configured (for host
|
|
+ * only candidates). Since NAT-assisted candidates are srflx candidates,
|
|
+ * they will be handled through STUN sockets as well.
|
|
+ * NAT-assisted candidates are provided as a STUN configuration (as an
|
|
+ * entry in the stun_tp list). The position (index) of the config in the
|
|
+ * list is used to calculate the "local preference" of the priority, thus
|
|
+ * it will determine the priority of the NAT-assisted candidates relative
|
|
+ * to other srflx candidates.
|
|
+ */
|
|
+
|
|
+ pj_ice_sess_cand *cand;
|
|
+ pj_ice_strans_stun_cfg *nat_cfg = &ice_st->cfg.stun_tp[idx];
|
|
+ pj_stun_sock_cfg *sock_cfg = &nat_cfg->cfg;
|
|
+ unsigned comp_idx = comp->comp_id - 1;
|
|
+ pj_stun_sock_cb sock_cb;
|
|
+ sock_user_data *data;
|
|
+ pj_status_t status;
|
|
+
|
|
+ PJ_ASSERT_RETURN(max_cand_cnt > 0, PJ_ETOOSMALL);
|
|
+ PJ_ASSERT_RETURN(nat_cfg->cfg.user_mapping_cnt > comp_idx, PJ_ETOOSMALL);
|
|
+
|
|
+ pj_sockaddr *laddr = &nat_cfg->cfg.user_mapping[comp_idx].local_addr;
|
|
+ pj_sockaddr *maddr = &nat_cfg->cfg.user_mapping[comp_idx].mapped_addr;
|
|
+
|
|
+ pj_bzero(&sock_cb, sizeof(sock_cb));
|
|
+ sock_cb.on_rx_data = &stun_on_rx_data;
|
|
+ sock_cb.on_status = &stun_on_status;
|
|
+ sock_cb.on_data_sent = &stun_on_data_sent;
|
|
+
|
|
+ /* Override component specific QoS settings, if any */
|
|
+ if (ice_st->cfg.comp[comp_idx].qos_type) {
|
|
+ sock_cfg->qos_type = ice_st->cfg.comp[comp_idx].qos_type;
|
|
+ }
|
|
+ if (ice_st->cfg.comp[comp_idx].qos_params.flags) {
|
|
+ pj_memcpy(&sock_cfg->qos_params,
|
|
+ &ice_st->cfg.comp[comp_idx].qos_params,
|
|
+ sizeof(sock_cfg->qos_params));
|
|
+ }
|
|
+
|
|
+ /* Override component specific socket buffer size settings, if any */
|
|
+ if (ice_st->cfg.comp[comp_idx].so_rcvbuf_size > 0) {
|
|
+ sock_cfg->so_rcvbuf_size = ice_st->cfg.comp[comp_idx].so_rcvbuf_size;
|
|
+ }
|
|
+ if (ice_st->cfg.comp[comp_idx].so_sndbuf_size > 0) {
|
|
+ sock_cfg->so_sndbuf_size = ice_st->cfg.comp[comp_idx].so_sndbuf_size;
|
|
+ }
|
|
+
|
|
+ /* Setup srflx candidate*/
|
|
+ cand = &comp->cand_list[comp->cand_cnt];
|
|
+ cand->type = PJ_ICE_CAND_TYPE_SRFLX;
|
|
+ /* User candidates are assumed ready */
|
|
+ cand->status = PJ_SUCCESS;
|
|
+ cand->local_pref = (pj_uint16_t)(SRFLX_PREF - idx);
|
|
+ cand->transport_id = CREATE_TP_ID(TP_STUN, idx);
|
|
+ cand->comp_id = (pj_uint8_t) comp->comp_id;
|
|
+ cand->transport = nat_cfg->cfg.user_mapping[comp_idx].tp_type;
|
|
+
|
|
+ /* Set the user mappings if availlabe. */
|
|
+ pj_sockaddr_cp(&sock_cfg->bound_addr, laddr);
|
|
+
|
|
+ {
|
|
+ char localStr[PJ_INET6_ADDRSTRLEN+8];
|
|
+ char mappedStr[PJ_INET6_ADDRSTRLEN+8];
|
|
+ PJ_LOG(5,(ice_st->obj_name, "Setting user mapping %s -> %s [%s (%i)] for comp %u at config index %i",
|
|
+ pj_sockaddr_print(laddr, localStr, sizeof(localStr), 3),
|
|
+ pj_sockaddr_print(maddr, mappedStr, sizeof(mappedStr), 3),
|
|
+ nat_cfg->conn_type == PJ_STUN_TP_UDP?"UDP":"TCP",
|
|
+ nat_cfg->conn_type,
|
|
+ comp->comp_id, idx));
|
|
+ }
|
|
+
|
|
+ /* Allocate and initialize STUN socket data */
|
|
+ data = PJ_POOL_ZALLOC_T(ice_st->pool, sock_user_data);
|
|
+ data->comp = comp;
|
|
+ data->transport_id = cand->transport_id;
|
|
+
|
|
+ /* Create the STUN transport */
|
|
+ status = pj_stun_sock_create(&ice_st->cfg.stun_cfg, NULL,
|
|
+ nat_cfg->af, nat_cfg->conn_type,
|
|
+ &sock_cb, sock_cfg, data,
|
|
+ &comp->stun[idx].sock);
|
|
+ if (status != PJ_SUCCESS)
|
|
+ return status;
|
|
+
|
|
+ /* Update and commit NAT-assisted candidate. */
|
|
+ pj_sockaddr_cp(&cand->addr, maddr);
|
|
+ pj_sockaddr_cp(&cand->base_addr, laddr);
|
|
+ pj_sockaddr_cp(&cand->rel_addr, &cand->base_addr);
|
|
+ pj_ice_calc_foundation(ice_st->pool, &cand->foundation,
|
|
+ cand->type, &cand->base_addr);
|
|
+ comp->cand_cnt++;
|
|
+ max_cand_cnt--;
|
|
+
|
|
+ // Check if we already have a matching host candidate for
|
|
+ // this srflx candidate
|
|
+
|
|
+ /* Find the base for this candidate */
|
|
+ unsigned j=0;
|
|
+ for (; j<comp->cand_cnt; j++) {
|
|
+ pj_ice_sess_cand *host = &comp->cand_list[j];
|
|
+
|
|
+ if (host->type != PJ_ICE_CAND_TYPE_HOST)
|
|
+ continue;
|
|
+
|
|
+ if (pj_sockaddr_cmp(&cand->base_addr, &host->addr) == 0) {
|
|
+ /* Found a matching host cadidate */
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Add local address as a host candidate if not already present. */
|
|
+ if (j == comp->cand_cnt && nat_cfg->max_host_cands) {
|
|
+ pj_stun_sock_info stun_sock_info;
|
|
+ pj_memset(&stun_sock_info, 0, sizeof(stun_sock_info));
|
|
+ stun_sock_info.alias_cnt = 1;
|
|
+ pj_sockaddr_cp(&stun_sock_info.aliases[0], laddr);
|
|
+ unsigned cand_cnt = 0;
|
|
+ status = add_local_candidate(cand, idx, 0, &cand_cnt, &max_cand_cnt,
|
|
+ stun_sock_info, ice_st, comp, cand->transport);
|
|
+ }
|
|
+
|
|
+ return status;
|
|
+}
|
|
|
|
static pj_status_t add_stun_and_host(pj_ice_strans *ice_st,
|
|
pj_ice_strans_comp *comp,
|
|
@@ -553,6 +730,9 @@ static pj_status_t add_stun_and_host(pj_ice_strans *ice_st,
|
|
cand->local_pref = (pj_uint16_t)(SRFLX_PREF - idx);
|
|
cand->transport_id = CREATE_TP_ID(TP_STUN, idx);
|
|
cand->comp_id = (pj_uint8_t) comp->comp_id;
|
|
+ cand->transport = stun_cfg->conn_type == PJ_STUN_TP_UDP ?
|
|
+ PJ_CAND_UDP :
|
|
+ PJ_CAND_TCP_PASSIVE;
|
|
|
|
/* Allocate and initialize STUN socket data */
|
|
data = PJ_POOL_ZALLOC_T(ice_st->pool, sock_user_data);
|
|
@@ -561,7 +741,7 @@ static pj_status_t add_stun_and_host(pj_ice_strans *ice_st,
|
|
|
|
/* Create the STUN transport */
|
|
status = pj_stun_sock_create(&ice_st->cfg.stun_cfg, NULL,
|
|
- stun_cfg->af, &stun_sock_cb,
|
|
+ stun_cfg->af, stun_cfg->conn_type, &stun_sock_cb,
|
|
sock_cfg, data, &comp->stun[idx].sock);
|
|
if (status != PJ_SUCCESS)
|
|
return status;
|
|
@@ -646,106 +826,154 @@ static pj_status_t add_stun_and_host(pj_ice_strans *ice_st,
|
|
return status;
|
|
}
|
|
|
|
- for (i = 0; i < stun_sock_info.alias_cnt &&
|
|
- cand_cnt < stun_cfg->max_host_cands; ++i)
|
|
- {
|
|
- unsigned j;
|
|
- pj_bool_t cand_duplicate = PJ_FALSE;
|
|
- char addrinfo[PJ_INET6_ADDRSTRLEN+10];
|
|
- const pj_sockaddr *addr = &stun_sock_info.aliases[i];
|
|
-
|
|
- if (max_cand_cnt==0) {
|
|
- PJ_LOG(4,(ice_st->obj_name, "Too many host candidates"));
|
|
- break;
|
|
- }
|
|
-
|
|
- /* Ignore loopback addresses if cfg->stun.loop_addr is unset */
|
|
- if (stun_cfg->loop_addr==PJ_FALSE) {
|
|
- if (stun_cfg->af == pj_AF_INET() &&
|
|
- (pj_ntohl(addr->ipv4.sin_addr.s_addr)>>24)==127)
|
|
- {
|
|
- continue;
|
|
- }
|
|
- else if (stun_cfg->af == pj_AF_INET6()) {
|
|
- pj_in6_addr in6addr = {{{0}}};
|
|
- in6addr.s6_addr[15] = 1;
|
|
- if (pj_memcmp(&in6addr, &addr->ipv6.sin6_addr,
|
|
- sizeof(in6addr))==0)
|
|
- {
|
|
- continue;
|
|
- }
|
|
- }
|
|
+ for (i = 0; i < stun_sock_info.alias_cnt &&
|
|
+ cand_cnt < stun_cfg->max_host_cands &&
|
|
+ status == PJ_SUCCESS; ++i)
|
|
+ {
|
|
+ status = !PJ_SUCCESS;
|
|
+ if (stun_sock_info.conn_type == PJ_STUN_TP_UDP) {
|
|
+ status = add_local_candidate(cand, idx, i,
|
|
+ &cand_cnt, &max_cand_cnt,
|
|
+ stun_sock_info, ice_st, comp,
|
|
+ PJ_CAND_UDP);
|
|
+ } else {
|
|
+ status = add_local_candidate(cand, idx, i,
|
|
+ &cand_cnt, &max_cand_cnt,
|
|
+ stun_sock_info, ice_st, comp,
|
|
+ PJ_CAND_TCP_PASSIVE);
|
|
+ /** RFC 6544, Section 4.1:
|
|
+ * First, agents SHOULD obtain host candidates as described in
|
|
+ * Section 5.1. Then, each agent SHOULD "obtain" (allocate a
|
|
+ * placeholder for) an active host candidate for each component of
|
|
+ * each TCP-capable media stream on each interface that the host
|
|
+ * has. The agent does not yet have to actually allocate a port for
|
|
+ * these candidates, but they are used for the creation of the check
|
|
+ * lists.
|
|
+ */
|
|
+ status = add_local_candidate(cand, idx, i,
|
|
+ &cand_cnt, &max_cand_cnt,
|
|
+ stun_sock_info, ice_st, comp,
|
|
+ PJ_CAND_TCP_ACTIVE);
|
|
}
|
|
+ }
|
|
+ }
|
|
|
|
- /* Ignore IPv6 link-local address, unless it is the default
|
|
- * address (first alias).
|
|
- */
|
|
- if (stun_cfg->af == pj_AF_INET6() && i != 0) {
|
|
- const pj_in6_addr *a = &addr->ipv6.sin6_addr;
|
|
- if (a->s6_addr[0] == 0xFE && (a->s6_addr[1] & 0xC0) == 0x80)
|
|
- continue;
|
|
- }
|
|
+ return status;
|
|
+}
|
|
|
|
- cand = &comp->cand_list[comp->cand_cnt];
|
|
-
|
|
- cand->type = PJ_ICE_CAND_TYPE_HOST;
|
|
- cand->status = PJ_SUCCESS;
|
|
- cand->local_pref = (pj_uint16_t)(HOST_PREF - cand_cnt);
|
|
- cand->transport_id = CREATE_TP_ID(TP_STUN, idx);
|
|
- cand->comp_id = (pj_uint8_t) comp->comp_id;
|
|
- pj_sockaddr_cp(&cand->addr, addr);
|
|
- pj_sockaddr_cp(&cand->base_addr, addr);
|
|
- pj_bzero(&cand->rel_addr, sizeof(cand->rel_addr));
|
|
-
|
|
- /* Check if not already in list */
|
|
- for (j=0; j<comp->cand_cnt; j++) {
|
|
- if (ice_cand_equals(cand, &comp->cand_list[j])) {
|
|
- cand_duplicate = PJ_TRUE;
|
|
- break;
|
|
- }
|
|
- }
|
|
+static pj_bool_t add_local_candidate(pj_ice_sess_cand *cand,
|
|
+ unsigned idx,
|
|
+ unsigned i,
|
|
+ unsigned *cand_cnt,
|
|
+ unsigned *max_cand_cnt,
|
|
+ pj_stun_sock_info stun_sock_info,
|
|
+ pj_ice_strans *ice_st,
|
|
+ pj_ice_strans_comp *comp,
|
|
+ pj_ice_cand_transport transport)
|
|
+{
|
|
+ unsigned j;
|
|
+ pj_bool_t cand_duplicate = PJ_FALSE;
|
|
+ char addrinfo[PJ_INET6_ADDRSTRLEN+10];
|
|
+ const pj_sockaddr *addr = &stun_sock_info.aliases[i];
|
|
+ pj_ice_strans_stun_cfg *stun_cfg = &ice_st->cfg.stun_tp[idx];
|
|
|
|
- if (cand_duplicate) {
|
|
- PJ_LOG(4, (ice_st->obj_name,
|
|
- "Comp %d: host candidate %s (tpid=%d) is a duplicate",
|
|
- comp->comp_id, pj_sockaddr_print(&cand->addr, addrinfo,
|
|
- sizeof(addrinfo), 3), cand->transport_id));
|
|
|
|
- pj_bzero(&cand->addr, sizeof(cand->addr));
|
|
- pj_bzero(&cand->base_addr, sizeof(cand->base_addr));
|
|
- continue;
|
|
- } else {
|
|
- comp->cand_cnt+=1;
|
|
- cand_cnt++;
|
|
- max_cand_cnt--;
|
|
- }
|
|
-
|
|
- pj_ice_calc_foundation(ice_st->pool, &cand->foundation,
|
|
- cand->type, &cand->base_addr);
|
|
+ if (*max_cand_cnt==0) {
|
|
+ PJ_LOG(4,(ice_st->obj_name, "Too many host candidates"));
|
|
+ return !PJ_SUCCESS;
|
|
+ }
|
|
|
|
- /* Set default candidate with the preferred default
|
|
- * address family
|
|
- */
|
|
- if (comp->ice_st->cfg.af != pj_AF_UNSPEC() &&
|
|
- addr->addr.sa_family == comp->ice_st->cfg.af &&
|
|
- comp->cand_list[comp->default_cand].base_addr.addr.sa_family !=
|
|
- ice_st->cfg.af)
|
|
+ /* Ignore loopback addresses if cfg->stun.loop_addr is unset */
|
|
+ if (stun_cfg->loop_addr==PJ_FALSE) {
|
|
+ if (stun_cfg->af == pj_AF_INET() &&
|
|
+ (pj_ntohl(addr->ipv4.sin_addr.s_addr)>>24)==127)
|
|
+ {
|
|
+ return PJ_SUCCESS;
|
|
+ }
|
|
+ else if (stun_cfg->af == pj_AF_INET6()) {
|
|
+ pj_in6_addr in6addr = {0};
|
|
+ in6addr.s6_addr[15] = 1;
|
|
+ if (pj_memcmp(&in6addr, &addr->ipv6.sin6_addr,
|
|
+ sizeof(in6addr))==0)
|
|
{
|
|
- comp->default_cand = (unsigned)(cand - comp->cand_list);
|
|
+ return PJ_SUCCESS;
|
|
}
|
|
+ }
|
|
+ }
|
|
|
|
- PJ_LOG(4,(ice_st->obj_name,
|
|
- "Comp %d/%d: host candidate %s (tpid=%d) added",
|
|
- comp->comp_id, comp->cand_cnt-1,
|
|
- pj_sockaddr_print(&cand->addr, addrinfo,
|
|
- sizeof(addrinfo), 3),
|
|
- cand->transport_id));
|
|
+ /* Ignore IPv6 link-local address, unless it is the default
|
|
+ * address (first alias).
|
|
+ */
|
|
+ if (stun_cfg->af == pj_AF_INET6() && i != 0) {
|
|
+ const pj_in6_addr *a = &addr->ipv6.sin6_addr;
|
|
+ if (a->s6_addr[0] == 0xFE && (a->s6_addr[1] & 0xC0) == 0x80)
|
|
+ return PJ_SUCCESS;
|
|
+ }
|
|
+
|
|
+ cand = &comp->cand_list[comp->cand_cnt];
|
|
+
|
|
+ cand->type = PJ_ICE_CAND_TYPE_HOST;
|
|
+ cand->status = PJ_SUCCESS;
|
|
+ cand->local_pref = (pj_uint16_t)(HOST_PREF - *cand_cnt);
|
|
+ cand->transport_id = CREATE_TP_ID(TP_STUN, idx);
|
|
+ cand->comp_id = (pj_uint8_t) comp->comp_id;
|
|
+ cand->transport = transport;
|
|
+
|
|
+ pj_sockaddr_cp(&cand->addr, addr);
|
|
+ pj_sockaddr_cp(&cand->base_addr, addr);
|
|
+ pj_bzero(&cand->rel_addr, sizeof(cand->rel_addr));
|
|
+
|
|
+ /* Check if not already in list */
|
|
+ for (j=0; j<comp->cand_cnt; j++) {
|
|
+ if (ice_cand_equals(cand, &comp->cand_list[j])) {
|
|
+ cand_duplicate = PJ_TRUE;
|
|
+ return !PJ_SUCCESS;
|
|
}
|
|
}
|
|
|
|
- return status;
|
|
-}
|
|
+ if (cand_duplicate) {
|
|
+ PJ_LOG(4, (ice_st->obj_name,
|
|
+ "Comp %d: host candidate %s (tpid=%d) is a duplicate",
|
|
+ comp->comp_id,
|
|
+ pj_sockaddr_print(&cand->addr,
|
|
+ addrinfo, sizeof(addrinfo), 3),
|
|
+ cand->transport_id));
|
|
+
|
|
+ pj_bzero(&cand->addr, sizeof(cand->addr));
|
|
+ pj_bzero(&cand->base_addr, sizeof(cand->base_addr));
|
|
+ return PJ_SUCCESS;
|
|
+ } else {
|
|
+ comp->cand_cnt+=1;
|
|
+ (*cand_cnt)++;
|
|
+ (*max_cand_cnt)--;
|
|
+ }
|
|
+ pj_ice_calc_foundation(ice_st->pool, &cand->foundation,
|
|
+ cand->type, &cand->base_addr);
|
|
+
|
|
+ /* Set default candidate with the preferred default
|
|
+ * address family
|
|
+ */
|
|
+ if (comp->ice_st->cfg.af != pj_AF_UNSPEC() &&
|
|
+ addr->addr.sa_family == comp->ice_st->cfg.af &&
|
|
+ comp->cand_list[comp->default_cand].base_addr.addr.sa_family !=
|
|
+ ice_st->cfg.af)
|
|
+ {
|
|
+ comp->default_cand = (unsigned)(cand - comp->cand_list);
|
|
+ }
|
|
|
|
+ if (transport == PJ_CAND_TCP_ACTIVE) {
|
|
+ // Use the port 9 (DISCARD Protocol) for TCP active candidates.
|
|
+ pj_sockaddr_set_port(&cand->addr, 9);
|
|
+ }
|
|
+
|
|
+ PJ_LOG(4,(ice_st->obj_name,
|
|
+ "Comp %d/%d: host candidate %s (tpid=%d) added",
|
|
+ comp->comp_id, comp->cand_cnt-1,
|
|
+ pj_sockaddr_print(&cand->addr, addrinfo,
|
|
+ sizeof(addrinfo), 3),
|
|
+ cand->transport_id));
|
|
+ return PJ_SUCCESS;
|
|
+}
|
|
|
|
/*
|
|
* Create the component.
|
|
@@ -776,18 +1004,31 @@ static pj_status_t create_comp(pj_ice_strans *ice_st, unsigned comp_id)
|
|
/* Create STUN transport if configured */
|
|
for (i=0; i<ice_st->cfg.stun_tp_cnt; ++i) {
|
|
unsigned max_cand_cnt = PJ_ICE_ST_MAX_CAND - comp->cand_cnt -
|
|
- ice_st->cfg.turn_tp_cnt;
|
|
+ ice_st->cfg.turn_tp_cnt;
|
|
|
|
status = PJ_ETOOSMALL;
|
|
|
|
- if ((max_cand_cnt > 0) && (max_cand_cnt <= PJ_ICE_ST_MAX_CAND))
|
|
- status = add_stun_and_host(ice_st, comp, i, max_cand_cnt);
|
|
-
|
|
- if (status != PJ_SUCCESS) {
|
|
- PJ_PERROR(3,(ice_st->obj_name, status,
|
|
- "Failed creating STUN transport #%d for comp %d",
|
|
- i, comp->comp_id));
|
|
- //return status;
|
|
+ if ((max_cand_cnt > 0) && (max_cand_cnt <= PJ_ICE_ST_MAX_CAND)) {
|
|
+ // Set custom mapping (nat) if provided by the user.
|
|
+ if (ice_st->cfg.stun_tp[i].cfg.user_mapping_cnt > 0) {
|
|
+ status = add_nat_assisted_cand(ice_st, comp, i, max_cand_cnt);
|
|
+ if (status != PJ_SUCCESS)
|
|
+ PJ_PERROR(3,(ice_st->obj_name, status,
|
|
+ "Failed to add NAT-assisted candidate at config #%d for comp %d",
|
|
+ i, comp->comp_id));
|
|
+ } else {
|
|
+ status = add_stun_and_host(ice_st, comp, i, max_cand_cnt);
|
|
+ if (status != PJ_SUCCESS)
|
|
+ PJ_PERROR(3,(ice_st->obj_name, status,
|
|
+ "Failed creating STUN transport #%d for comp %d",
|
|
+ i, comp->comp_id));
|
|
+ }
|
|
+ } else {
|
|
+ // All STUN config slots have been used.
|
|
+ if (status != PJ_SUCCESS)
|
|
+ PJ_PERROR(3,(ice_st->obj_name, status,
|
|
+ "Max STUN config (%d) has been reached for comp %d",
|
|
+ PJ_ICE_ST_MAX_CAND, comp->comp_id));
|
|
}
|
|
}
|
|
|
|
@@ -828,10 +1069,10 @@ static pj_status_t alloc_send_buf(pj_ice_strans *ice_st, unsigned buf_size)
|
|
{
|
|
if (buf_size > ice_st->buf_size) {
|
|
unsigned i;
|
|
-
|
|
+
|
|
if (ice_st->is_pending) {
|
|
/* The current buffer is insufficient, but still currently used.*/
|
|
- return PJ_EBUSY;
|
|
+ return PJ_EPENDING;
|
|
}
|
|
|
|
pj_pool_safe_release(&ice_st->buf_pool);
|
|
@@ -851,7 +1092,7 @@ static pj_status_t alloc_send_buf(pj_ice_strans *ice_st, unsigned buf_size)
|
|
}
|
|
ice_st->buf_idx = ice_st->empty_idx = 0;
|
|
}
|
|
-
|
|
+
|
|
return PJ_SUCCESS;
|
|
}
|
|
|
|
@@ -919,7 +1160,7 @@ PJ_DEF(pj_status_t) pj_ice_strans_create( const char *name,
|
|
/* To maintain backward compatibility, check if old/deprecated setting is set
|
|
* and the new setting is not, copy the value to the new setting.
|
|
*/
|
|
- if (cfg->stun_tp_cnt == 0 &&
|
|
+ if (cfg->stun_tp_cnt == 0 &&
|
|
(cfg->stun.server.slen || cfg->stun.max_host_cands))
|
|
{
|
|
ice_st->cfg.stun_tp_cnt = 1;
|
|
@@ -1135,7 +1376,7 @@ static void sess_init_update(pj_ice_strans *ice_st)
|
|
pj_ice_get_cand_type_name(cand->type)));
|
|
return;
|
|
}
|
|
-
|
|
+
|
|
if (status == PJ_EUNKNOWN) {
|
|
status = cand->status;
|
|
} else {
|
|
@@ -1144,7 +1385,7 @@ static void sess_init_update(pj_ice_strans *ice_st)
|
|
status = PJ_SUCCESS;
|
|
}
|
|
}
|
|
-
|
|
+
|
|
if (status != PJ_SUCCESS)
|
|
break;
|
|
}
|
|
@@ -1288,6 +1529,12 @@ PJ_DEF(pj_status_t) pj_ice_strans_init_ice(pj_ice_strans *ice_st,
|
|
ice_cb.on_ice_complete = &on_ice_complete;
|
|
ice_cb.on_rx_data = &ice_rx_data;
|
|
ice_cb.on_tx_pkt = &ice_tx_pkt;
|
|
+#if PJ_HAS_TCP
|
|
+ ice_cb.wait_tcp_connection = &ice_wait_tcp_connection;
|
|
+ ice_cb.reconnect_tcp_connection = &ice_reconnect_tcp_connection;
|
|
+ ice_cb.close_tcp_connection = &ice_close_tcp_connection;
|
|
+ ice_cb.on_ice_destroy = &on_ice_destroy;
|
|
+#endif
|
|
|
|
/* Release the pool of previous ICE session to avoid memory bloat,
|
|
* as otherwise it will only be released after ICE strans is destroyed
|
|
@@ -1372,7 +1619,8 @@ PJ_DEF(pj_status_t) pj_ice_strans_init_ice(pj_ice_strans *ice_st,
|
|
&cand->foundation, &cand->addr,
|
|
&cand->base_addr, &cand->rel_addr,
|
|
pj_sockaddr_get_len(&cand->addr),
|
|
- (unsigned*)&ice_cand_id);
|
|
+ (unsigned*)&ice_cand_id,
|
|
+ cand->transport);
|
|
if (status != PJ_SUCCESS)
|
|
goto on_error;
|
|
}
|
|
@@ -1735,7 +1983,7 @@ pj_ice_strans_get_valid_pair(const pj_ice_strans *ice_st,
|
|
PJ_DEF(pj_status_t) pj_ice_strans_stop_ice(pj_ice_strans *ice_st)
|
|
{
|
|
PJ_ASSERT_RETURN(ice_st, PJ_EINVAL);
|
|
-
|
|
+
|
|
/* Protect with group lock, since this may cause race condition with
|
|
* pj_ice_strans_sendto2().
|
|
* See ticket #1877.
|
|
@@ -1771,7 +2019,7 @@ static pj_status_t use_buffer( pj_ice_strans *ice_st,
|
|
status = alloc_send_buf(ice_st, (unsigned)data_len);
|
|
if (status != PJ_SUCCESS)
|
|
return status;
|
|
-
|
|
+
|
|
if (ice_st->is_pending && ice_st->empty_idx == ice_st->buf_idx) {
|
|
/* We don't use buffer or there's no more empty buffer. */
|
|
return PJ_EBUSY;
|
|
@@ -1786,12 +2034,12 @@ static pj_status_t use_buffer( pj_ice_strans *ice_st,
|
|
pj_sockaddr_cp(&ice_st->send_buf[idx].dst_addr, dst_addr);
|
|
ice_st->send_buf[idx].dst_addr_len = dst_addr_len;
|
|
*buffer = ice_st->send_buf[idx].buffer;
|
|
-
|
|
+
|
|
if (ice_st->is_pending) {
|
|
/* We'll continue later since there's still a pending send. */
|
|
return PJ_EPENDING;
|
|
}
|
|
-
|
|
+
|
|
ice_st->is_pending = PJ_TRUE;
|
|
ice_st->buf_idx = idx;
|
|
|
|
@@ -1844,6 +2092,8 @@ static pj_status_t send_data(pj_ice_strans *ice_st,
|
|
}
|
|
}
|
|
|
|
+ def_cand = &comp->cand_list[comp->default_cand];
|
|
+ pj_bool_t add_header = def_cand->transport != PJ_CAND_UDP;
|
|
/* If ICE is available, send data with ICE. If ICE nego is not completed
|
|
* yet, ICE will try to send using any valid candidate pair. For any
|
|
* failure, it will fallback to sending with the default candidate
|
|
@@ -1854,16 +2104,35 @@ static pj_status_t send_data(pj_ice_strans *ice_st,
|
|
*/
|
|
if (ice_st->ice && ice_st->state <= PJ_ICE_STRANS_STATE_RUNNING) {
|
|
status = pj_ice_sess_send_data(ice_st->ice, comp_id, buf, data_len);
|
|
- if (status == PJ_SUCCESS || status == PJ_EPENDING) {
|
|
- pj_grp_lock_release(ice_st->grp_lock);
|
|
- goto on_return;
|
|
- }
|
|
- }
|
|
+ pj_grp_lock_release(ice_st->grp_lock);
|
|
+ goto on_return;
|
|
+ }
|
|
|
|
pj_grp_lock_release(ice_st->grp_lock);
|
|
|
|
- def_cand = &comp->cand_list[comp->default_cand];
|
|
-
|
|
+ /* TCP, add header */
|
|
+ if (add_header) {
|
|
+ /*
|
|
+ * RFC6544 ICE requires an agent to demultiplex STUN and
|
|
+ * application-layer traffic, since they appear on the same port. This
|
|
+ * demultiplexing is described in [RFC5245] and is done using the magic
|
|
+ * cookie and other fields of the message. Stream-oriented transports
|
|
+ * introduce another wrinkle, since they require a way to frame the
|
|
+ * connection so that the application and STUN packets can be extracted
|
|
+ * in order to differentiate STUN packets from application-layer
|
|
+ * traffic. For this reason, TCP media streams utilizing ICE use the
|
|
+ * basic framing provided in RFC 4571 [RFC4571], even if the application
|
|
+ * layer protocol is not RTP.
|
|
+ */
|
|
+ pj_uint8_t header_1 = data_len % 256;
|
|
+ pj_uint8_t header_0 = data_len >> 8;
|
|
+ pj_memcpy(&ice_st->rtp_pkt, &(header_0), sizeof(pj_uint8_t));
|
|
+ pj_memcpy(&ice_st->rtp_pkt[1], &(header_1), sizeof(pj_uint8_t));
|
|
+ pj_memcpy(&ice_st->rtp_pkt[2], (unsigned char *)data, data_len);
|
|
+ buf = &ice_st->rtp_pkt;
|
|
+ data_len += 2;
|
|
+ }
|
|
+
|
|
if (def_cand->status == PJ_SUCCESS) {
|
|
unsigned tp_idx = GET_TP_IDX(def_cand->transport_id);
|
|
|
|
@@ -1925,6 +2194,10 @@ static pj_status_t send_data(pj_ice_strans *ice_st,
|
|
status = pj_stun_sock_sendto(comp->stun[tp_idx].sock, NULL, buf,
|
|
(unsigned)data_len, 0, dest_addr,
|
|
dest_addr_len);
|
|
+ /* Do not count the header */
|
|
+ if (add_header) {
|
|
+ data_len -= sizeof(pj_uint16_t);
|
|
+ }
|
|
goto on_return;
|
|
}
|
|
|
|
@@ -1933,8 +2206,14 @@ static pj_status_t send_data(pj_ice_strans *ice_st,
|
|
|
|
on_return:
|
|
/* We continue later in on_data_sent() callback. */
|
|
- if (status == PJ_EPENDING)
|
|
- return status;
|
|
+ if (status == PJ_EPENDING) {
|
|
+ ice_st->last_data_len = data_len;
|
|
+ if (add_header) {
|
|
+ // Don't forget the header
|
|
+ ice_st->last_data_len += sizeof(pj_uint16_t);
|
|
+ }
|
|
+ return status;
|
|
+ }
|
|
|
|
if (call_cb) {
|
|
on_data_sent(ice_st, (status == PJ_SUCCESS? (pj_ssize_t)data_len: -status));
|
|
@@ -1966,7 +2245,7 @@ PJ_DEF(pj_status_t) pj_ice_strans_sendto( pj_ice_strans *ice_st,
|
|
dst_addr_len, PJ_TRUE, PJ_FALSE);
|
|
if (status == PJ_EPENDING)
|
|
status = PJ_SUCCESS;
|
|
-
|
|
+
|
|
return status;
|
|
}
|
|
#endif
|
|
@@ -2026,7 +2305,7 @@ static void on_valid_pair(pj_ice_sess *ice)
|
|
pj_sockaddr_print(&check->lcand->addr, lip, sizeof(lip), 3);
|
|
pj_sockaddr_print(&check->rcand->addr, rip, sizeof(rip), 3);
|
|
|
|
- if (tp_typ == TP_TURN) {
|
|
+ if (tp_typ == TP_TURN && check->lcand->transport == PJ_CAND_UDP) {
|
|
/* Activate channel binding for the remote address
|
|
* for more efficient data transfer using TURN.
|
|
*/
|
|
@@ -2068,6 +2347,15 @@ static void on_valid_pair(pj_ice_sess *ice)
|
|
pj_grp_lock_dec_ref(ice_st->grp_lock);
|
|
}
|
|
|
|
+static void on_ice_destroy(pj_ice_sess *ice)
|
|
+{
|
|
+ pj_ice_strans *ice_st = (pj_ice_strans*)ice->user_data;
|
|
+
|
|
+ if (ice_st->cb.on_destroy) {
|
|
+ (*ice_st->cb.on_destroy)(ice_st);
|
|
+ }
|
|
+}
|
|
+
|
|
/*
|
|
* Callback called by ICE session when ICE processing is complete, either
|
|
* successfully or with failure.
|
|
@@ -2107,25 +2395,43 @@ static void on_ice_complete(pj_ice_sess *ice, pj_status_t status)
|
|
pj_ice_strans_comp *comp = ice_st->comp[i];
|
|
|
|
check = pj_ice_strans_get_valid_pair(ice_st, i+1);
|
|
+
|
|
+ // We nominated a connection, we can close the other ones.
|
|
+ ice_close_remaining_tcp(ice_st->ice);
|
|
if (check) {
|
|
char lip[PJ_INET6_ADDRSTRLEN+10];
|
|
char rip[PJ_INET6_ADDRSTRLEN+10];
|
|
unsigned tp_idx = GET_TP_IDX(check->lcand->transport_id);
|
|
unsigned tp_typ = GET_TP_TYPE(check->lcand->transport_id);
|
|
|
|
- pj_sockaddr_print(&check->lcand->addr, lip,
|
|
- sizeof(lip), 3);
|
|
+ pj_sockaddr_print(&check->lcand->addr, lip,
|
|
+ sizeof(lip), 3);
|
|
pj_sockaddr_print(&check->rcand->addr, rip,
|
|
- sizeof(rip), 3);
|
|
-
|
|
- if (tp_typ == TP_TURN) {
|
|
+ sizeof(rip), 3);
|
|
+#if PJ_HAS_TCP
|
|
+ int idx = -1;
|
|
+ for (int i=0; i<ice_st->cfg.stun_tp_cnt; ++i) {
|
|
+ if (ice_st->cfg.stun_tp[i].af ==
|
|
+ check->rcand->addr.addr.sa_family)
|
|
+ {
|
|
+ idx = i;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (idx == -1) {
|
|
+ PJ_LOG(4, (ice_st->obj_name,
|
|
+ "Comp %d: No STUN sock found.",
|
|
+ comp->comp_id));
|
|
+ }
|
|
+#endif
|
|
+ if (tp_typ == TP_TURN && check->lcand->transport == PJ_CAND_UDP) {
|
|
/* Activate channel binding for the remote address
|
|
- * for more efficient data transfer using TURN.
|
|
- */
|
|
+ * for more efficient data transfer using TURN.
|
|
+ */
|
|
status = pj_turn_sock_bind_channel(
|
|
- comp->turn[tp_idx].sock,
|
|
- &check->rcand->addr,
|
|
- sizeof(check->rcand->addr));
|
|
+ comp->turn[tp_idx].sock,
|
|
+ &check->rcand->addr,
|
|
+ sizeof(check->rcand->addr));
|
|
|
|
/* Disable logging for Send/Data indications */
|
|
PJ_LOG(5,(ice_st->obj_name,
|
|
@@ -2211,6 +2517,29 @@ static pj_status_t ice_tx_pkt(pj_ice_sess *ice,
|
|
pj_sockaddr_get_port(dst_addr),
|
|
tp_typ));
|
|
|
|
+ /* TCP, add header */
|
|
+ if (comp->ice_st->cfg.stun_tp->conn_type == PJ_STUN_TP_TCP) {
|
|
+ /*
|
|
+ * RFC6544 ICE requires an agent to demultiplex STUN and
|
|
+ * application-layer traffic, since they appear on the same port. This
|
|
+ * demultiplexing is described in [RFC5245] and is done using the magic
|
|
+ * cookie and other fields of the message. Stream-oriented transports
|
|
+ * introduce another wrinkle, since they require a way to frame the
|
|
+ * connection so that the application and STUN packets can be extracted
|
|
+ * in order to differentiate STUN packets from application-layer
|
|
+ * traffic. For this reason, TCP media streams utilizing ICE use the
|
|
+ * basic framing provided in RFC 4571 [RFC4571], even if the application
|
|
+ * layer protocol is not RTP.
|
|
+ */
|
|
+ pj_uint8_t header_1 = size % 256;
|
|
+ pj_uint8_t header_0 = size >> 8;
|
|
+ pj_memcpy(&ice_st->rtp_pkt, &(header_0), sizeof(pj_uint8_t));
|
|
+ pj_memcpy(&ice_st->rtp_pkt[1], &(header_1), sizeof(pj_uint8_t));
|
|
+ pj_memcpy(&ice_st->rtp_pkt[2], (unsigned char *)pkt, size);
|
|
+ buf = &ice_st->rtp_pkt;
|
|
+ size += 2;
|
|
+ }
|
|
+
|
|
if (tp_typ == TP_TURN) {
|
|
if (comp->turn[tp_idx].sock) {
|
|
status = pj_turn_sock_sendto(comp->turn[tp_idx].sock,
|
|
@@ -2233,7 +2562,7 @@ static pj_status_t ice_tx_pkt(pj_ice_sess *ice,
|
|
if (status != PJ_SUCCESS) {
|
|
goto on_return;
|
|
}
|
|
-
|
|
+
|
|
pj_sockaddr_cp(&comp->dst_addr, dst_addr);
|
|
comp->synth_addr_len = pj_sockaddr_get_len(&comp->synth_addr);
|
|
}
|
|
@@ -2244,9 +2573,13 @@ static pj_status_t ice_tx_pkt(pj_ice_sess *ice,
|
|
dest_addr_len = dst_addr_len;
|
|
}
|
|
|
|
- status = pj_stun_sock_sendto(comp->stun[tp_idx].sock, NULL,
|
|
- buf, (unsigned)size, 0,
|
|
- dest_addr, dest_addr_len);
|
|
+ if (comp->stun[tp_idx].sock) {
|
|
+ status = pj_stun_sock_sendto(comp->stun[tp_idx].sock, NULL,
|
|
+ buf, (unsigned)size, 0,
|
|
+ dest_addr, dest_addr_len);
|
|
+ } else {
|
|
+ status = PJ_EINVALIDOP;
|
|
+ }
|
|
} else {
|
|
pj_assert(!"Invalid transport ID");
|
|
status = PJ_EINVALIDOP;
|
|
@@ -2292,7 +2625,7 @@ static void check_pending_send(pj_ice_strans *ice_st)
|
|
|
|
if (ice_st->num_buf > 0)
|
|
ice_st->buf_idx = (ice_st->buf_idx + 1) % ice_st->num_buf;
|
|
-
|
|
+
|
|
if (ice_st->num_buf > 0 && ice_st->buf_idx != ice_st->empty_idx) {
|
|
/* There's some pending send. Send it one by one. */
|
|
pending_send *ps = &ice_st->send_buf[ice_st->buf_idx];
|
|
@@ -2306,6 +2639,253 @@ static void check_pending_send(pj_ice_strans *ice_st)
|
|
}
|
|
}
|
|
|
|
+static void on_peer_connection(pj_stun_session* sess,
|
|
+ pj_status_t status,
|
|
+ pj_sockaddr_t* remote_addr)
|
|
+{
|
|
+
|
|
+ pj_stun_sock *stun_sock;
|
|
+ sock_user_data *data;
|
|
+ pj_ice_strans_comp *comp;
|
|
+ pj_ice_strans *ice_st;
|
|
+
|
|
+ stun_sock = (pj_stun_sock *)pj_stun_session_get_user_data(sess);
|
|
+ /* We have disassociated ourselves from the STUN session */
|
|
+ if (!stun_sock)
|
|
+ return;
|
|
+
|
|
+ data = (sock_user_data *)pj_stun_sock_get_user_data(stun_sock);
|
|
+ /* We have disassociated ourselves from the STUN socket */
|
|
+ if (!data)
|
|
+ return;
|
|
+
|
|
+ comp = data->comp;
|
|
+ ice_st = comp->ice_st;
|
|
+
|
|
+ /* Incorrect ICE */
|
|
+ if (!ice_st || !ice_st->ice)
|
|
+ return;
|
|
+
|
|
+ pj_grp_lock_add_ref(ice_st->grp_lock);
|
|
+ ice_sess_on_peer_connection(ice_st->ice,
|
|
+ data->transport_id, status, remote_addr);
|
|
+ pj_grp_lock_dec_ref(ice_st->grp_lock);
|
|
+}
|
|
+
|
|
+static void on_peer_reset_connection(pj_stun_session* sess,
|
|
+ pj_sockaddr_t* remote_addr)
|
|
+{
|
|
+ pj_stun_sock *stun_sock;
|
|
+ sock_user_data *data;
|
|
+ pj_ice_strans_comp *comp;
|
|
+ pj_ice_strans *ice_st;
|
|
+
|
|
+ stun_sock = (pj_stun_sock *)pj_stun_session_get_user_data(sess);
|
|
+ /* We have disassociated ourselves from the STUN session */
|
|
+ if (!stun_sock)
|
|
+ return;
|
|
+
|
|
+ data = (sock_user_data *)pj_stun_sock_get_user_data(stun_sock);
|
|
+ /* We have disassociated ourselves from the STUN socket */
|
|
+ if (!data)
|
|
+ return;
|
|
+
|
|
+ comp = data->comp;
|
|
+ ice_st = comp->ice_st;
|
|
+
|
|
+ /* Incorrect ICE */
|
|
+ if (!ice_st || !ice_st->ice)
|
|
+ return;
|
|
+
|
|
+ pj_grp_lock_add_ref(ice_st->grp_lock);
|
|
+
|
|
+ ice_sess_on_peer_reset_connection(ice_st->ice,
|
|
+ data->transport_id, remote_addr);
|
|
+ pj_grp_lock_dec_ref(ice_st->grp_lock);
|
|
+}
|
|
+
|
|
+static void on_peer_packet(pj_stun_session* sess, pj_sockaddr_t* remote_addr)
|
|
+{
|
|
+
|
|
+ if (!sess || !remote_addr)
|
|
+ return;
|
|
+
|
|
+ pj_stun_sock *stun_sock;
|
|
+ sock_user_data *data;
|
|
+ pj_ice_strans_comp *comp;
|
|
+ pj_ice_strans *ice_st;
|
|
+
|
|
+ stun_sock = (pj_stun_sock *)pj_stun_session_get_user_data(sess);
|
|
+ /* We have disassociated ourselves from the STUN session */
|
|
+ if (!stun_sock)
|
|
+ return;
|
|
+
|
|
+ data = (sock_user_data *)pj_stun_sock_get_user_data(stun_sock);
|
|
+ /* We have disassociated ourselves from the STUN socket */
|
|
+ if (!data)
|
|
+ return;
|
|
+
|
|
+ comp = data->comp;
|
|
+ if (!comp)
|
|
+ return;
|
|
+
|
|
+ ice_st = comp->ice_st;
|
|
+ /* Incorrect ICE */
|
|
+ if (!ice_st || !ice_st->ice)
|
|
+ return;
|
|
+
|
|
+ pj_grp_lock_add_ref(ice_st->grp_lock);
|
|
+ ice_sess_on_peer_packet(ice_st->ice, data->transport_id, remote_addr);
|
|
+ pj_grp_lock_dec_ref(ice_st->grp_lock);
|
|
+}
|
|
+
|
|
+#if PJ_HAS_TCP
|
|
+static pj_status_t ice_wait_tcp_connection(pj_ice_sess *ice,
|
|
+ unsigned check_id)
|
|
+{
|
|
+ pj_ice_sess_check *check = &ice->clist.checks[check_id];
|
|
+ const pj_ice_sess_cand *lcand = check->lcand;
|
|
+ const pj_ice_sess_cand *rcand = check->rcand;
|
|
+ pj_ice_strans *ice_st = (pj_ice_strans *)ice->user_data;
|
|
+ pj_ice_strans_comp *st_comp = ice_st->comp[lcand->comp_id - 1];
|
|
+
|
|
+ int idx = -1;
|
|
+ for (int i=0; i<ice_st->cfg.stun_tp_cnt; ++i)
|
|
+ if (ice_st->cfg.stun_tp[i].af == rcand->addr.addr.sa_family) {
|
|
+ idx = i;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (idx == -1) {
|
|
+ PJ_LOG(4, (ice_st->obj_name, "Comp %d: No STUN sock found.",
|
|
+ st_comp->comp_id));
|
|
+ return PJ_EINVAL;
|
|
+ }
|
|
+ if (st_comp->stun[idx].sock) {
|
|
+ pj_stun_session *sess = pj_stun_sock_get_session(st_comp->stun[idx].sock);
|
|
+ if (!sess) {
|
|
+ PJ_LOG(4, (ice_st->obj_name, "Comp %d: No STUN session.",
|
|
+ st_comp->comp_id));
|
|
+ return PJ_EINVAL;
|
|
+ }
|
|
+ pj_stun_session_callback(sess)->on_peer_connection =
|
|
+ &on_peer_connection;
|
|
+ pj_stun_session_callback(sess)->on_peer_reset_connection =
|
|
+ &on_peer_reset_connection;
|
|
+ pj_stun_session_callback(sess)->on_peer_packet = &on_peer_packet;
|
|
+
|
|
+ return pj_stun_sock_connect_active(st_comp->stun[idx].sock,
|
|
+ &rcand->addr,
|
|
+ rcand->addr.addr.sa_family);
|
|
+ }
|
|
+
|
|
+ return PJ_EINVAL;
|
|
+}
|
|
+
|
|
+static pj_status_t ice_reconnect_tcp_connection(pj_ice_sess *ice,
|
|
+ unsigned check_id)
|
|
+{
|
|
+ pj_ice_sess_check *check = &ice->clist.checks[check_id];
|
|
+ const pj_ice_sess_cand *lcand = check->lcand;
|
|
+ const pj_ice_sess_cand *rcand = check->rcand;
|
|
+ pj_ice_strans *ice_st = (pj_ice_strans *)ice->user_data;
|
|
+ pj_ice_strans_comp *st_comp = ice_st->comp[lcand->comp_id - 1];
|
|
+
|
|
+ int idx = -1;
|
|
+ for (int i=0; i<ice_st->cfg.stun_tp_cnt; ++i)
|
|
+ if (ice_st->cfg.stun_tp[i].af == rcand->addr.addr.sa_family) {
|
|
+ idx = i;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (idx == -1) {
|
|
+ PJ_LOG(4, (ice_st->obj_name, "Comp %d: No STUN sock found.",
|
|
+ st_comp->comp_id));
|
|
+ return PJ_EINVAL;
|
|
+ }
|
|
+
|
|
+ if (st_comp->stun[idx].sock) {
|
|
+ pj_stun_session *sess = pj_stun_sock_get_session(st_comp->stun[idx].sock);
|
|
+ if (!sess) {
|
|
+ PJ_LOG(4, (ice_st->obj_name, "Comp %d: No STUN session.",
|
|
+ st_comp->comp_id));
|
|
+ return PJ_EINVAL;
|
|
+ }
|
|
+ pj_stun_session_callback(sess)->on_peer_connection =
|
|
+ &on_peer_connection;
|
|
+ pj_stun_session_callback(sess)->on_peer_reset_connection =
|
|
+ &on_peer_reset_connection;
|
|
+ pj_stun_session_callback(sess)->on_peer_packet = &on_peer_packet;
|
|
+ return pj_stun_sock_reconnect_active(st_comp->stun[idx].sock,
|
|
+ &rcand->addr,
|
|
+ rcand->addr.addr.sa_family);
|
|
+ }
|
|
+
|
|
+ return PJ_EINVAL;
|
|
+}
|
|
+
|
|
+static pj_status_t ice_close_tcp_connection(pj_ice_sess *ice,
|
|
+ unsigned check_id)
|
|
+{
|
|
+ pj_ice_sess_check *check = &ice->clist.checks[check_id];
|
|
+ const pj_ice_sess_cand *lcand = check->lcand;
|
|
+ const pj_ice_sess_cand *rcand = check->rcand;
|
|
+ pj_ice_strans *ice_st = (pj_ice_strans *)ice->user_data;
|
|
+ pj_ice_strans_comp *st_comp = ice_st->comp[lcand->comp_id - 1];
|
|
+
|
|
+ int idx = -1;
|
|
+ for (int i=0; i<ice_st->cfg.stun_tp_cnt; ++i)
|
|
+ if (ice_st->cfg.stun_tp[i].af == rcand->addr.addr.sa_family) {
|
|
+ idx = i;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (idx != -1 && st_comp->stun[idx].sock) {
|
|
+ const pj_ice_sess_cand *rcand = check->rcand;
|
|
+ return pj_stun_sock_close(st_comp->stun[idx].sock, &rcand->addr);
|
|
+ }
|
|
+
|
|
+ return PJ_EINVAL;
|
|
+}
|
|
+
|
|
+static pj_status_t ice_close_remaining_tcp(pj_ice_sess *ice)
|
|
+{
|
|
+ for (int i = 0; i < ice->comp_cnt; i++) {
|
|
+ pj_ice_strans *ice_st = (pj_ice_strans *)ice->user_data;
|
|
+ pj_ice_strans_comp *st_comp = ice_st->comp[i];
|
|
+
|
|
+ const pj_ice_sess_check *valid_check = pj_ice_strans_get_valid_pair(ice_st, i + 1);
|
|
+
|
|
+ if (!valid_check) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (valid_check->lcand->type != PJ_ICE_CAND_TYPE_RELAYED
|
|
+ && valid_check->rcand->type != PJ_ICE_CAND_TYPE_RELAYED) {
|
|
+ // If we're not a turn session we can close it.
|
|
+ for (int j = 0; j < ice_st->cfg.turn_tp_cnt; ++j) {
|
|
+ if (st_comp->turn[j].sock) {
|
|
+ pj_turn_sock_destroy(st_comp->turn[j].sock);
|
|
+ st_comp->turn[j].sock = NULL;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ for (int j=0; j< ice_st->cfg.stun_tp_cnt; ++j) {
|
|
+ if (st_comp->stun[j].sock) {
|
|
+ pj_stun_sock_close_all_except(st_comp->stun[j].sock, &valid_check->rcand->addr);
|
|
+ }
|
|
+ if (ice_st->cfg.stun_tp[j].af != valid_check->rcand->addr.addr.sa_family) {
|
|
+ // If the valid candidate got the other address family we can close.
|
|
+ pj_stun_sock_destroy(st_comp->stun[j].sock);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return PJ_SUCCESS;
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
/* Notifification when asynchronous send operation via STUN/TURN
|
|
* has completed.
|
|
*/
|
|
@@ -2314,7 +2894,8 @@ static pj_bool_t on_data_sent(pj_ice_strans *ice_st, pj_ssize_t sent)
|
|
if (ice_st->destroy_req || !ice_st->is_pending)
|
|
return PJ_TRUE;
|
|
|
|
- if (ice_st->call_send_cb && ice_st->cb.on_data_sent) {
|
|
+ if (ice_st->call_send_cb && ice_st->cb.on_data_sent
|
|
+ && sent == ice_st->last_data_len /* Only app data should be announced */) {
|
|
(*ice_st->cb.on_data_sent)(ice_st, sent);
|
|
}
|
|
|
|
@@ -2472,7 +3053,7 @@ static pj_bool_t stun_on_status(pj_stun_sock *stun_sock,
|
|
{
|
|
/* We get an IPv4 mapped address for our IPv6
|
|
* host address.
|
|
- */
|
|
+ */
|
|
comp->ipv4_mapped = PJ_TRUE;
|
|
|
|
/* Find other host candidates with the same (IPv6)
|
|
@@ -2484,7 +3065,7 @@ static pj_bool_t stun_on_status(pj_stun_sock *stun_sock,
|
|
|
|
if (comp->cand_list[i].type != PJ_ICE_CAND_TYPE_HOST)
|
|
continue;
|
|
-
|
|
+
|
|
a1 = &comp->cand_list[i].addr;
|
|
a2 = &cand->base_addr;
|
|
if (pj_memcmp(pj_sockaddr_get_addr(a1),
|
|
@@ -2501,7 +3082,7 @@ static pj_bool_t stun_on_status(pj_stun_sock *stun_sock,
|
|
pj_sockaddr_cp(&cand->base_addr, &info.mapped_addr);
|
|
pj_sockaddr_cp(&cand->rel_addr, &info.mapped_addr);
|
|
}
|
|
-
|
|
+
|
|
/* Eliminate the srflx candidate if the address is
|
|
* equal to other (host) candidates.
|
|
*/
|
|
@@ -2551,7 +3132,8 @@ static pj_bool_t stun_on_status(pj_stun_sock *stun_sock,
|
|
&cand->base_addr,
|
|
&cand->rel_addr,
|
|
pj_sockaddr_get_len(&cand->addr),
|
|
- NULL);
|
|
+ NULL,
|
|
+ cand->transport);
|
|
}
|
|
}
|
|
|
|
@@ -2576,7 +3158,7 @@ static pj_bool_t stun_on_status(pj_stun_sock *stun_sock,
|
|
if (op == PJ_STUN_SOCK_MAPPED_ADDR_CHANGE &&
|
|
ice_st->cb.on_ice_complete)
|
|
{
|
|
- (*ice_st->cb.on_ice_complete)(ice_st,
|
|
+ (*ice_st->cb.on_ice_complete)(ice_st,
|
|
PJ_ICE_STRANS_OP_ADDR_CHANGE,
|
|
status);
|
|
}
|
|
@@ -2632,6 +3214,10 @@ static pj_bool_t stun_on_status(pj_stun_sock *stun_sock,
|
|
}
|
|
}
|
|
break;
|
|
+ case PJ_STUN_SESS_DESTROYED:
|
|
+ case PJ_STUN_TCP_CONNECT_ERROR:
|
|
+ default:
|
|
+ break;
|
|
}
|
|
|
|
return pj_grp_lock_dec_ref(ice_st->grp_lock)? PJ_FALSE : PJ_TRUE;
|
|
@@ -2671,16 +3257,105 @@ static void turn_on_rx_data(pj_turn_sock *turn_sock,
|
|
|
|
} else {
|
|
|
|
- /* Hand over the packet to ICE */
|
|
- status = pj_ice_sess_on_rx_pkt(comp->ice_st->ice, comp->comp_id,
|
|
- data->transport_id, pkt, pkt_len,
|
|
- peer_addr, addr_len);
|
|
-
|
|
- if (status != PJ_SUCCESS) {
|
|
- ice_st_perror(comp->ice_st,
|
|
- "Error processing packet from TURN relay",
|
|
- status);
|
|
- }
|
|
+ /* Hand over the packet to ICE */
|
|
+ if (comp->ice_st->cfg.turn_tp->conn_type == PJ_TURN_TP_TCP && pkt_len > 0) {
|
|
+ unsigned parsed = 0;
|
|
+ pj_status_t status;
|
|
+
|
|
+ do {
|
|
+ pj_uint16_t leftover = pkt_len - parsed;
|
|
+ pj_uint8_t *current_packet = ((pj_uint8_t *)(pkt)) + parsed;
|
|
+
|
|
+ /**
|
|
+ * RFC6544, the packet is wrapped into a packet following the
|
|
+ * RFC4571
|
|
+ */
|
|
+ pj_bool_t store_remaining = PJ_TRUE;
|
|
+ if (comp->ice_st->rx_buffer_size ||
|
|
+ comp->ice_st->rx_wanted_size)
|
|
+ {
|
|
+ /* a single packet left to process */
|
|
+ if (comp->ice_st->rx_buffer_size == 1 && comp->ice_st->rx_wanted_size == 0) {
|
|
+ /* get last frame's lenght from its header */
|
|
+ leftover = GETVAL16H(comp->ice_st->rx_buffer,
|
|
+ current_packet);
|
|
+ /* adjust counters accordingly */
|
|
+ comp->ice_st->rx_buffer_size = 0;
|
|
+ current_packet++;
|
|
+ parsed++;
|
|
+
|
|
+ if (leftover + parsed <= pkt_len) {
|
|
+ /* we didn't get what we were promissed in the
|
|
+ * header. furthermore, this was the last frame and
|
|
+ * therefore we're done.
|
|
+ */
|
|
+ store_remaining = PJ_FALSE;
|
|
+ parsed += leftover;
|
|
+ } else {
|
|
+ comp->ice_st->rx_wanted_size = leftover;
|
|
+ }
|
|
+ } else if (leftover + comp->ice_st->rx_buffer_size >=
|
|
+ comp->ice_st->rx_wanted_size)
|
|
+ {
|
|
+ /* We have enough leftover bytes in buffer to build a new
|
|
+ * packet and parse it
|
|
+ */
|
|
+ store_remaining = PJ_FALSE;
|
|
+
|
|
+ pj_uint16_t eaten_bytes = comp->ice_st->rx_wanted_size -
|
|
+ comp->ice_st->rx_buffer_size;
|
|
+ pj_memcpy(comp->ice_st->rx_buffer +
|
|
+ comp->ice_st->rx_buffer_size,
|
|
+ current_packet, eaten_bytes);
|
|
+
|
|
+ leftover = comp->ice_st->rx_wanted_size;
|
|
+ current_packet = comp->ice_st->rx_buffer;
|
|
+ parsed += eaten_bytes;
|
|
+
|
|
+ comp->ice_st->rx_buffer_size = 0;
|
|
+ comp->ice_st->rx_wanted_size = 0;
|
|
+ }
|
|
+ } else if (leftover > 1) {
|
|
+ leftover = GETVAL16H(current_packet, current_packet+1);
|
|
+ current_packet += 2;
|
|
+ parsed += 2;
|
|
+ if (leftover + parsed <= pkt_len) {
|
|
+ store_remaining = PJ_FALSE;
|
|
+ parsed += leftover;
|
|
+ } else {
|
|
+ comp->ice_st->rx_wanted_size = leftover;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (store_remaining) {
|
|
+ leftover = pkt_len - parsed;
|
|
+ pj_memcpy(comp->ice_st->rx_buffer +
|
|
+ comp->ice_st->rx_buffer_size,
|
|
+ current_packet, leftover);
|
|
+ comp->ice_st->rx_buffer_size += leftover;
|
|
+ status = PJ_SUCCESS;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ status = pj_ice_sess_on_rx_pkt(comp->ice_st->ice, comp->comp_id,
|
|
+ data->transport_id,
|
|
+ current_packet, leftover,
|
|
+ peer_addr, addr_len);
|
|
+ if (status != PJ_SUCCESS) {
|
|
+ ice_st_perror(comp->ice_st,
|
|
+ "Error processing packet from TURN relay",
|
|
+ status);
|
|
+ }
|
|
+ } while (parsed < pkt_len);
|
|
+ } else {
|
|
+ status = pj_ice_sess_on_rx_pkt(comp->ice_st->ice, comp->comp_id,
|
|
+ data->transport_id, pkt, pkt_len,
|
|
+ peer_addr, addr_len);
|
|
+ if (status != PJ_SUCCESS)
|
|
+ ice_st_perror(comp->ice_st,
|
|
+ "Error processing packet from TURN relay",
|
|
+ status);
|
|
+ }
|
|
}
|
|
|
|
pj_grp_lock_dec_ref(comp->ice_st->grp_lock);
|
|
@@ -2816,10 +3491,11 @@ static void turn_on_state(pj_turn_sock *turn_sock, pj_turn_state_t old_state,
|
|
cand->local_pref,
|
|
&cand->foundation,
|
|
&cand->addr,
|
|
- &cand->base_addr,
|
|
+ &cand->base_addr,
|
|
&cand->rel_addr,
|
|
pj_sockaddr_get_len(&cand->addr),
|
|
- NULL);
|
|
+ NULL,
|
|
+ cand->transport);
|
|
if (status != PJ_SUCCESS) {
|
|
PJ_PERROR(4,(comp->ice_st->obj_name, status,
|
|
"Comp %d/%d: failed to add TURN (tpid=%d) to ICE",
|
|
@@ -2953,4 +3629,3 @@ on_return:
|
|
|
|
pj_log_pop_indent();
|
|
}
|
|
-
|
|
diff --git a/pjnath/src/pjnath/nat_detect.c b/pjnath/src/pjnath/nat_detect.c
|
|
index cf94c4e44..cb35770cd 100644
|
|
--- a/pjnath/src/pjnath/nat_detect.c
|
|
+++ b/pjnath/src/pjnath/nat_detect.c
|
|
@@ -1,4 +1,4 @@
|
|
-/*
|
|
+/*
|
|
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
|
|
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
|
|
*
|
|
@@ -14,7 +14,7 @@
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#include <pjnath/nat_detect.h>
|
|
#include <pjnath/errno.h>
|
|
@@ -109,8 +109,8 @@ typedef struct nat_detect_session
|
|
} nat_detect_session;
|
|
|
|
|
|
-static void on_read_complete(pj_ioqueue_key_t *key,
|
|
- pj_ioqueue_op_key_t *op_key,
|
|
+static void on_read_complete(pj_ioqueue_key_t *key,
|
|
+ pj_ioqueue_op_key_t *op_key,
|
|
pj_ssize_t bytes_read);
|
|
static void on_request_complete(pj_stun_session *sess,
|
|
pj_status_t status,
|
|
@@ -201,7 +201,7 @@ static pj_status_t get_local_interface(const pj_sockaddr *server,
|
|
}
|
|
|
|
pj_sockaddr_cp(local_addr, &tmp);
|
|
-
|
|
+
|
|
pj_sock_close(sock);
|
|
return PJ_SUCCESS;
|
|
}
|
|
@@ -241,7 +241,7 @@ PJ_DEF(pj_status_t) pj_stun_detect_nat_type2(const pj_sockaddr *server,
|
|
/*
|
|
* Init NAT detection session.
|
|
*/
|
|
- pool = pj_pool_create(stun_cfg->pf, "natck%p", PJNATH_POOL_LEN_NATCK,
|
|
+ pool = pj_pool_create(stun_cfg->pf, "natck%p", PJNATH_POOL_LEN_NATCK,
|
|
PJNATH_POOL_INC_NATCK, NULL);
|
|
if (!pool)
|
|
return PJ_ENOMEM;
|
|
@@ -317,7 +317,7 @@ PJ_DEF(pj_status_t) pj_stun_detect_nat_type2(const pj_sockaddr *server,
|
|
pj_bzero(&ioqueue_cb, sizeof(ioqueue_cb));
|
|
ioqueue_cb.on_read_complete = &on_read_complete;
|
|
|
|
- status = pj_ioqueue_register_sock2(sess->pool, stun_cfg->ioqueue,
|
|
+ status = pj_ioqueue_register_sock2(sess->pool, stun_cfg->ioqueue,
|
|
sess->sock, sess->grp_lock, sess,
|
|
&ioqueue_cb, &sess->key);
|
|
if (status != PJ_SUCCESS)
|
|
@@ -330,7 +330,7 @@ PJ_DEF(pj_status_t) pj_stun_detect_nat_type2(const pj_sockaddr *server,
|
|
sess_cb.on_request_complete = &on_request_complete;
|
|
sess_cb.on_send_msg = &on_send_msg;
|
|
status = pj_stun_session_create(stun_cfg, pool->obj_name, &sess_cb,
|
|
- PJ_FALSE, sess->grp_lock, &sess->stun_sess);
|
|
+ PJ_FALSE, sess->grp_lock, &sess->stun_sess, PJ_STUN_TP_UDP);
|
|
if (status != PJ_SUCCESS)
|
|
goto on_error;
|
|
|
|
@@ -359,7 +359,7 @@ on_error:
|
|
|
|
static void sess_destroy(nat_detect_session *sess)
|
|
{
|
|
- if (sess->stun_sess) {
|
|
+ if (sess->stun_sess) {
|
|
pj_stun_session_destroy(sess->stun_sess);
|
|
sess->stun_sess = NULL;
|
|
}
|
|
@@ -422,8 +422,8 @@ static void end_session(nat_detect_session *sess,
|
|
/*
|
|
* Callback upon receiving packet from network.
|
|
*/
|
|
-static void on_read_complete(pj_ioqueue_key_t *key,
|
|
- pj_ioqueue_op_key_t *op_key,
|
|
+static void on_read_complete(pj_ioqueue_key_t *key,
|
|
+ pj_ioqueue_op_key_t *op_key,
|
|
pj_ssize_t bytes_read)
|
|
{
|
|
nat_detect_session *sess;
|
|
@@ -440,19 +440,19 @@ static void on_read_complete(pj_ioqueue_key_t *key,
|
|
|
|
if (bytes_read < 0) {
|
|
if (-bytes_read != PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK) &&
|
|
- -bytes_read != PJ_STATUS_FROM_OS(OSERR_EINPROGRESS) &&
|
|
- -bytes_read != PJ_STATUS_FROM_OS(OSERR_ECONNRESET))
|
|
+ -bytes_read != PJ_STATUS_FROM_OS(OSERR_EINPROGRESS) &&
|
|
+ -bytes_read != PJ_STATUS_FROM_OS(OSERR_ECONNRESET))
|
|
{
|
|
/* Permanent error */
|
|
- end_session(sess, (pj_status_t)-bytes_read,
|
|
+ end_session(sess, (pj_status_t)-bytes_read,
|
|
PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
|
|
goto on_return;
|
|
}
|
|
|
|
} else if (bytes_read > 0) {
|
|
pj_stun_session_on_rx_pkt(sess->stun_sess, sess->rx_pkt, bytes_read,
|
|
- PJ_STUN_IS_DATAGRAM|PJ_STUN_CHECK_PACKET,
|
|
- NULL, NULL,
|
|
+ PJ_STUN_IS_DATAGRAM|PJ_STUN_CHECK_PACKET,
|
|
+ NULL, NULL,
|
|
&sess->src_addr, sess->src_addr_len);
|
|
}
|
|
|
|
@@ -460,7 +460,7 @@ static void on_read_complete(pj_ioqueue_key_t *key,
|
|
sess->rx_pkt_len = sizeof(sess->rx_pkt);
|
|
sess->src_addr_len = sizeof(sess->src_addr);
|
|
status = pj_ioqueue_recvfrom(key, op_key, sess->rx_pkt, &sess->rx_pkt_len,
|
|
- PJ_IOQUEUE_ALWAYS_ASYNC,
|
|
+ PJ_IOQUEUE_ALWAYS_ASYNC,
|
|
&sess->src_addr, &sess->src_addr_len);
|
|
|
|
if (status != PJ_EPENDING) {
|
|
@@ -595,11 +595,11 @@ static void on_request_complete(pj_stun_session *stun_sess,
|
|
/* Send Test 1B only when Test 2 completes. Must not send Test 1B
|
|
* before Test 2 completes to avoid creating mapping on the NAT.
|
|
*/
|
|
- if (!sess->result[ST_TEST_1B].executed &&
|
|
+ if (!sess->result[ST_TEST_1B].executed &&
|
|
sess->result[ST_TEST_2].complete &&
|
|
sess->result[ST_TEST_2].status != PJ_SUCCESS &&
|
|
sess->result[ST_TEST_1].complete &&
|
|
- sess->result[ST_TEST_1].status == PJ_SUCCESS)
|
|
+ sess->result[ST_TEST_1].status == PJ_SUCCESS)
|
|
{
|
|
cmp = pj_sockaddr_cmp(&sess->local_addr, &sess->result[ST_TEST_1].ma);
|
|
if (cmp != 0)
|
|
@@ -661,7 +661,7 @@ static void on_request_complete(pj_stun_session *stun_sess,
|
|
switch (sess->result[ST_TEST_1].status) {
|
|
case PJNATH_ESTUNTIMEDOUT:
|
|
/*
|
|
- * Test 1 has timed-out. Conclude with NAT_TYPE_BLOCKED.
|
|
+ * Test 1 has timed-out. Conclude with NAT_TYPE_BLOCKED.
|
|
*/
|
|
end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_BLOCKED);
|
|
break;
|
|
@@ -694,7 +694,7 @@ static void on_request_complete(pj_stun_session *stun_sess,
|
|
/*
|
|
* We've got other error with Test 2.
|
|
*/
|
|
- end_session(sess, sess->result[ST_TEST_2].status,
|
|
+ end_session(sess, sess->result[ST_TEST_2].status,
|
|
PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
|
|
break;
|
|
}
|
|
@@ -774,14 +774,14 @@ static void on_request_complete(pj_stun_session *stun_sess,
|
|
* It could be that port 3489 is blocked, while the
|
|
* NAT itself looks to be a Restricted one.
|
|
*/
|
|
- end_session(sess, PJ_SUCCESS,
|
|
+ end_session(sess, PJ_SUCCESS,
|
|
PJ_STUN_NAT_TYPE_RESTRICTED);
|
|
break;
|
|
default:
|
|
/* Can't distinguish between Symmetric and Port
|
|
* Restricted, so set the type to Unknown
|
|
*/
|
|
- end_session(sess, PJ_SUCCESS,
|
|
+ end_session(sess, PJ_SUCCESS,
|
|
PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
|
|
break;
|
|
}
|
|
@@ -799,7 +799,7 @@ static void on_request_complete(pj_stun_session *stun_sess,
|
|
/*
|
|
* We've got other error with Test 2.
|
|
*/
|
|
- end_session(sess, sess->result[ST_TEST_2].status,
|
|
+ end_session(sess, sess->result[ST_TEST_2].status,
|
|
PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
|
|
break;
|
|
}
|
|
@@ -809,7 +809,7 @@ static void on_request_complete(pj_stun_session *stun_sess,
|
|
/*
|
|
* We've got other error with Test 1.
|
|
*/
|
|
- end_session(sess, sess->result[ST_TEST_1].status,
|
|
+ end_session(sess, sess->result[ST_TEST_1].status,
|
|
PJ_STUN_NAT_TYPE_ERR_UNKNOWN);
|
|
break;
|
|
}
|
|
@@ -841,15 +841,15 @@ static pj_status_t send_test(nat_detect_session *sess,
|
|
tsx_id[2] = test_id;
|
|
|
|
/* Create BIND request */
|
|
- status = pj_stun_session_create_req(sess->stun_sess,
|
|
+ status = pj_stun_session_create_req(sess->stun_sess,
|
|
PJ_STUN_BINDING_REQUEST, magic,
|
|
- (pj_uint8_t*)tsx_id,
|
|
+ (pj_uint8_t*)tsx_id,
|
|
&sess->result[test_id].tdata);
|
|
if (status != PJ_SUCCESS)
|
|
goto on_error;
|
|
|
|
/* Add CHANGE-REQUEST attribute */
|
|
- status = pj_stun_msg_add_uint_attr(sess->pool,
|
|
+ status = pj_stun_msg_add_uint_attr(sess->pool,
|
|
sess->result[test_id].tdata->msg,
|
|
PJ_STUN_ATTR_CHANGE_REQUEST,
|
|
change_flag);
|
|
@@ -868,15 +868,16 @@ static pj_status_t send_test(nat_detect_session *sess,
|
|
sess->cur_server = &sess->server;
|
|
}
|
|
|
|
- PJ_LOG(5,(sess->pool->obj_name,
|
|
- "Performing %s to %s:%d",
|
|
+ PJ_LOG(5,(sess->pool->obj_name,
|
|
+ "Performing %s to %s:%d",
|
|
test_names[test_id],
|
|
pj_sockaddr_print(sess->cur_server, addr, sizeof(addr), 2),
|
|
pj_sockaddr_get_port(sess->cur_server)));
|
|
|
|
/* Send the request */
|
|
status = pj_stun_session_send_msg(sess->stun_sess, NULL, PJ_TRUE,
|
|
- PJ_TRUE, sess->cur_server,
|
|
+ (pj_stun_session_tp_type(sess->stun_sess) == PJ_STUN_TP_UDP),
|
|
+ sess->cur_server,
|
|
pj_sockaddr_get_len(sess->cur_server),
|
|
sess->result[test_id].tdata);
|
|
if (status != PJ_SUCCESS)
|
|
diff --git a/pjnath/src/pjnath/stun_session.c b/pjnath/src/pjnath/stun_session.c
|
|
index 4a3e165f5..e117fef39 100644
|
|
--- a/pjnath/src/pjnath/stun_session.c
|
|
+++ b/pjnath/src/pjnath/stun_session.c
|
|
@@ -1,4 +1,4 @@
|
|
-/*
|
|
+/*
|
|
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
|
|
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
|
|
*
|
|
@@ -14,7 +14,7 @@
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#include <pjnath/stun_session.h>
|
|
#include <pjnath/errno.h>
|
|
@@ -48,6 +48,8 @@ struct pj_stun_session
|
|
|
|
pj_stun_tx_data pending_request_list;
|
|
pj_stun_tx_data cached_response_list;
|
|
+
|
|
+ pj_stun_tp_type conn_type;
|
|
};
|
|
|
|
#define SNAME(s_) ((s_)->pool->obj_name)
|
|
@@ -66,7 +68,7 @@ struct pj_stun_session
|
|
|
|
|
|
static void stun_tsx_on_complete(pj_stun_client_tsx *tsx,
|
|
- pj_status_t status,
|
|
+ pj_status_t status,
|
|
const pj_stun_msg *response,
|
|
const pj_sockaddr_t *src_addr,
|
|
unsigned src_addr_len);
|
|
@@ -77,7 +79,7 @@ static void stun_tsx_on_destroy(pj_stun_client_tsx *tsx);
|
|
static void stun_sess_on_destroy(void *comp);
|
|
static void destroy_tdata(pj_stun_tx_data *tdata, pj_bool_t force);
|
|
|
|
-static pj_stun_tsx_cb tsx_cb =
|
|
+static pj_stun_tsx_cb tsx_cb =
|
|
{
|
|
&stun_tsx_on_complete,
|
|
&stun_tsx_on_send_msg,
|
|
@@ -109,7 +111,7 @@ static pj_stun_tx_data* tsx_lookup(pj_stun_session *sess,
|
|
while (tdata != &sess->pending_request_list) {
|
|
pj_assert(sizeof(tdata->msg_key)==sizeof(msg->hdr.tsx_id));
|
|
if (tdata->msg_magic == msg->hdr.magic &&
|
|
- pj_memcmp(tdata->msg_key, msg->hdr.tsx_id,
|
|
+ pj_memcmp(tdata->msg_key, msg->hdr.tsx_id,
|
|
sizeof(msg->hdr.tsx_id))==0)
|
|
{
|
|
return tdata;
|
|
@@ -127,7 +129,7 @@ static pj_status_t create_tdata(pj_stun_session *sess,
|
|
pj_stun_tx_data *tdata;
|
|
|
|
/* Create pool and initialize basic tdata attributes */
|
|
- pool = pj_pool_create(sess->cfg->pf, "tdata%p",
|
|
+ pool = pj_pool_create(sess->cfg->pf, "tdata%p",
|
|
TDATA_POOL_SIZE, TDATA_POOL_INC, NULL);
|
|
PJ_ASSERT_RETURN(pool, PJ_ENOMEM);
|
|
|
|
@@ -150,7 +152,7 @@ static void stun_tsx_on_destroy(pj_stun_client_tsx *tsx)
|
|
pj_stun_client_tsx_stop(tsx);
|
|
if (tdata) {
|
|
pj_stun_session *sess = tdata->sess;
|
|
-
|
|
+
|
|
pj_grp_lock_acquire(sess->grp_lock);
|
|
tsx_erase(sess, tdata);
|
|
destroy_tdata(tdata, PJ_TRUE);
|
|
@@ -268,16 +270,16 @@ static pj_status_t apply_msg_options(pj_stun_session *sess,
|
|
pj_str_t realm, username, nonce, auth_key;
|
|
|
|
/* If the agent is sending a request, it SHOULD add a SOFTWARE attribute
|
|
- * to the request. The server SHOULD include a SOFTWARE attribute in all
|
|
+ * to the request. The server SHOULD include a SOFTWARE attribute in all
|
|
* responses.
|
|
*
|
|
* If magic value is not PJ_STUN_MAGIC, only apply the attribute for
|
|
* responses.
|
|
*/
|
|
- if (sess->srv_name.slen &&
|
|
+ if (sess->srv_name.slen &&
|
|
pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_SOFTWARE, 0)==NULL &&
|
|
(PJ_STUN_IS_RESPONSE(msg->hdr.type) ||
|
|
- (PJ_STUN_IS_REQUEST(msg->hdr.type) && msg->hdr.magic==PJ_STUN_MAGIC)))
|
|
+ (PJ_STUN_IS_REQUEST(msg->hdr.type) && msg->hdr.magic==PJ_STUN_MAGIC)))
|
|
{
|
|
pj_stun_msg_add_string_attr(pool, msg, PJ_STUN_ATTR_SOFTWARE,
|
|
&sess->srv_name);
|
|
@@ -309,9 +311,9 @@ static pj_status_t apply_msg_options(pj_stun_session *sess,
|
|
}
|
|
|
|
/* Add NONCE when desired */
|
|
- if (nonce.slen &&
|
|
+ if (nonce.slen &&
|
|
(PJ_STUN_IS_REQUEST(msg->hdr.type) ||
|
|
- PJ_STUN_IS_ERROR_RESPONSE(msg->hdr.type)))
|
|
+ PJ_STUN_IS_ERROR_RESPONSE(msg->hdr.type)))
|
|
{
|
|
status = pj_stun_msg_add_string_attr(pool, msg,
|
|
PJ_STUN_ATTR_NONCE,
|
|
@@ -328,7 +330,7 @@ static pj_status_t apply_msg_options(pj_stun_session *sess,
|
|
|
|
/* Add FINGERPRINT attribute if necessary */
|
|
if (sess->use_fingerprint) {
|
|
- status = pj_stun_msg_add_uint_attr(pool, msg,
|
|
+ status = pj_stun_msg_add_uint_attr(pool, msg,
|
|
PJ_STUN_ATTR_FINGERPRINT, 0);
|
|
PJ_ASSERT_RETURN(status==PJ_SUCCESS, status);
|
|
}
|
|
@@ -352,7 +354,7 @@ static pj_status_t handle_auth_challenge(pj_stun_session *sess,
|
|
|
|
if (sess->auth_type != PJ_STUN_AUTH_LONG_TERM)
|
|
return PJ_SUCCESS;
|
|
-
|
|
+
|
|
if (!PJ_STUN_IS_ERROR_RESPONSE(response->hdr.type)) {
|
|
sess->auth_retry = 0;
|
|
return PJ_SUCCESS;
|
|
@@ -367,7 +369,7 @@ static pj_status_t handle_auth_challenge(pj_stun_session *sess,
|
|
return PJNATH_EINSTUNMSG;
|
|
}
|
|
|
|
- if (ea->err_code == PJ_STUN_SC_UNAUTHORIZED ||
|
|
+ if (ea->err_code == PJ_STUN_SC_UNAUTHORIZED ||
|
|
ea->err_code == PJ_STUN_SC_STALE_NONCE)
|
|
{
|
|
const pj_stun_nonce_attr *anonce;
|
|
@@ -433,7 +435,7 @@ static pj_status_t handle_auth_challenge(pj_stun_session *sess,
|
|
continue;
|
|
}
|
|
|
|
- tdata->msg->attr[tdata->msg->attr_count++] =
|
|
+ tdata->msg->attr[tdata->msg->attr_count++] =
|
|
pj_stun_attr_clone(tdata->pool, asrc);
|
|
}
|
|
|
|
@@ -445,8 +447,8 @@ static pj_status_t handle_auth_challenge(pj_stun_session *sess,
|
|
PJ_LOG(4,(SNAME(sess), "Retrying request with new authentication"));
|
|
|
|
/* Retry the request */
|
|
- status = pj_stun_session_send_msg(sess, request->token, PJ_TRUE,
|
|
- request->retransmit, src_addr,
|
|
+ status = pj_stun_session_send_msg(sess, request->token, PJ_TRUE,
|
|
+ request->retransmit, src_addr,
|
|
src_addr_len, tdata);
|
|
|
|
} else {
|
|
@@ -457,7 +459,7 @@ static pj_status_t handle_auth_challenge(pj_stun_session *sess,
|
|
}
|
|
|
|
static void stun_tsx_on_complete(pj_stun_client_tsx *tsx,
|
|
- pj_status_t status,
|
|
+ pj_status_t status,
|
|
const pj_stun_msg *response,
|
|
const pj_sockaddr_t *src_addr,
|
|
unsigned src_addr_len)
|
|
@@ -482,12 +484,12 @@ static void stun_tsx_on_complete(pj_stun_client_tsx *tsx,
|
|
src_addr_len, ¬ify_user);
|
|
|
|
if (notify_user && sess->cb.on_request_complete) {
|
|
- (*sess->cb.on_request_complete)(sess, status, tdata->token, tdata,
|
|
+ (*sess->cb.on_request_complete)(sess, status, tdata->token, tdata,
|
|
response, src_addr, src_addr_len);
|
|
}
|
|
|
|
/* Destroy the transmit data. This will remove the transaction
|
|
- * from the pending list too.
|
|
+ * from the pending list too.
|
|
*/
|
|
if (status == PJNATH_ESTUNTIMEDOUT)
|
|
destroy_tdata(tdata, PJ_TRUE);
|
|
@@ -514,15 +516,15 @@ static pj_status_t stun_tsx_on_send_msg(pj_stun_client_tsx *tsx,
|
|
|
|
/* Lock the session and prevent user from destroying us in the callback */
|
|
pj_grp_lock_acquire(sess->grp_lock);
|
|
-
|
|
+
|
|
if (sess->is_destroying) {
|
|
/* Stray timer */
|
|
pj_grp_lock_release(sess->grp_lock);
|
|
return PJ_EINVALIDOP;
|
|
}
|
|
|
|
- status = sess->cb.on_send_msg(tdata->sess, tdata->token, stun_pkt,
|
|
- pkt_size, tdata->dst_addr,
|
|
+ status = sess->cb.on_send_msg(tdata->sess, tdata->token, stun_pkt,
|
|
+ pkt_size, tdata->dst_addr,
|
|
tdata->addr_len);
|
|
if (pj_grp_lock_release(sess->grp_lock))
|
|
return PJ_EGONE;
|
|
@@ -537,7 +539,8 @@ PJ_DEF(pj_status_t) pj_stun_session_create( pj_stun_config *cfg,
|
|
const pj_stun_session_cb *cb,
|
|
pj_bool_t fingerprint,
|
|
pj_grp_lock_t *grp_lock,
|
|
- pj_stun_session **p_sess)
|
|
+ pj_stun_session **p_sess,
|
|
+ pj_stun_tp_type conn_type)
|
|
{
|
|
pj_pool_t *pool;
|
|
pj_stun_session *sess;
|
|
@@ -548,7 +551,7 @@ PJ_DEF(pj_status_t) pj_stun_session_create( pj_stun_config *cfg,
|
|
if (name==NULL)
|
|
name = "stuse%p";
|
|
|
|
- pool = pj_pool_create(cfg->pf, name, PJNATH_POOL_LEN_STUN_SESS,
|
|
+ pool = pj_pool_create(cfg->pf, name, PJNATH_POOL_LEN_STUN_SESS,
|
|
PJNATH_POOL_INC_STUN_SESS, NULL);
|
|
PJ_ASSERT_RETURN(pool, PJ_ENOMEM);
|
|
|
|
@@ -558,6 +561,7 @@ PJ_DEF(pj_status_t) pj_stun_session_create( pj_stun_config *cfg,
|
|
pj_memcpy(&sess->cb, cb, sizeof(*cb));
|
|
sess->use_fingerprint = fingerprint;
|
|
sess->log_flag = 0xFFFF;
|
|
+ sess->conn_type = conn_type;
|
|
|
|
if (grp_lock) {
|
|
sess->grp_lock = grp_lock;
|
|
@@ -727,7 +731,7 @@ static pj_status_t get_auth(pj_stun_session *sess,
|
|
tdata->auth_info.username = sess->cred.data.static_cred.username;
|
|
tdata->auth_info.nonce = sess->cred.data.static_cred.nonce;
|
|
|
|
- pj_stun_create_key(tdata->pool, &tdata->auth_info.auth_key,
|
|
+ pj_stun_create_key(tdata->pool, &tdata->auth_info.auth_key,
|
|
&tdata->auth_info.realm,
|
|
&tdata->auth_info.username,
|
|
sess->cred.data.static_cred.data_type,
|
|
@@ -739,16 +743,16 @@ static pj_status_t get_auth(pj_stun_session *sess,
|
|
pj_stun_passwd_type data_type = PJ_STUN_PASSWD_PLAIN;
|
|
pj_status_t rc;
|
|
|
|
- rc = (*sess->cred.data.dyn_cred.get_cred)(tdata->msg, user_data,
|
|
+ rc = (*sess->cred.data.dyn_cred.get_cred)(tdata->msg, user_data,
|
|
tdata->pool,
|
|
- &tdata->auth_info.realm,
|
|
+ &tdata->auth_info.realm,
|
|
&tdata->auth_info.username,
|
|
- &tdata->auth_info.nonce,
|
|
+ &tdata->auth_info.nonce,
|
|
&data_type, &password);
|
|
if (rc != PJ_SUCCESS)
|
|
return rc;
|
|
|
|
- pj_stun_create_key(tdata->pool, &tdata->auth_info.auth_key,
|
|
+ pj_stun_create_key(tdata->pool, &tdata->auth_info.auth_key,
|
|
&tdata->auth_info.realm, &tdata->auth_info.username,
|
|
data_type, &password);
|
|
|
|
@@ -782,7 +786,7 @@ PJ_DEF(pj_status_t) pj_stun_session_create_req(pj_stun_session *sess,
|
|
goto on_error;
|
|
|
|
/* Create STUN message */
|
|
- status = pj_stun_msg_create(tdata->pool, method, magic,
|
|
+ status = pj_stun_msg_create(tdata->pool, method, magic,
|
|
tsx_id, &tdata->msg);
|
|
if (status != PJ_SUCCESS)
|
|
goto on_error;
|
|
@@ -793,7 +797,7 @@ PJ_DEF(pj_status_t) pj_stun_session_create_req(pj_stun_session *sess,
|
|
pj_memcpy(tdata->msg_key, tdata->msg->hdr.tsx_id,
|
|
sizeof(tdata->msg->hdr.tsx_id));
|
|
|
|
-
|
|
+
|
|
/* Get authentication information for the request */
|
|
if (sess->auth_type == PJ_STUN_AUTH_NONE) {
|
|
/* No authentication */
|
|
@@ -856,7 +860,7 @@ PJ_DEF(pj_status_t) pj_stun_session_create_ind(pj_stun_session *sess,
|
|
|
|
/* Create STUN message */
|
|
msg_type |= PJ_STUN_INDICATION_BIT;
|
|
- status = pj_stun_msg_create(tdata->pool, msg_type, PJ_STUN_MAGIC,
|
|
+ status = pj_stun_msg_create(tdata->pool, msg_type, PJ_STUN_MAGIC,
|
|
NULL, &tdata->msg);
|
|
if (status != PJ_SUCCESS) {
|
|
pj_pool_safe_release(&tdata->pool);
|
|
@@ -895,7 +899,7 @@ PJ_DEF(pj_status_t) pj_stun_session_create_res( pj_stun_session *sess,
|
|
}
|
|
|
|
/* Create STUN response message */
|
|
- status = pj_stun_msg_create_response(tdata->pool, rdata->msg,
|
|
+ status = pj_stun_msg_create_response(tdata->pool, rdata->msg,
|
|
err_code, err_msg, &tdata->msg);
|
|
if (status != PJ_SUCCESS) {
|
|
pj_pool_safe_release(&tdata->pool);
|
|
@@ -906,7 +910,7 @@ PJ_DEF(pj_status_t) pj_stun_session_create_res( pj_stun_session *sess,
|
|
/* copy the request's transaction ID as the transaction key. */
|
|
pj_assert(sizeof(tdata->msg_key)==sizeof(rdata->msg->hdr.tsx_id));
|
|
tdata->msg_magic = rdata->msg->hdr.magic;
|
|
- pj_memcpy(tdata->msg_key, rdata->msg->hdr.tsx_id,
|
|
+ pj_memcpy(tdata->msg_key, rdata->msg->hdr.tsx_id,
|
|
sizeof(rdata->msg->hdr.tsx_id));
|
|
|
|
/* copy the credential found in the request */
|
|
@@ -925,8 +929,8 @@ static void dump_tx_msg(pj_stun_session *sess, const pj_stun_msg *msg,
|
|
unsigned pkt_size, const pj_sockaddr_t *addr)
|
|
{
|
|
char dst_name[PJ_INET6_ADDRSTRLEN+10];
|
|
-
|
|
- if ((PJ_STUN_IS_REQUEST(msg->hdr.type) &&
|
|
+
|
|
+ if ((PJ_STUN_IS_REQUEST(msg->hdr.type) &&
|
|
(sess->log_flag & PJ_STUN_SESS_LOG_TX_REQ)==0) ||
|
|
(PJ_STUN_IS_RESPONSE(msg->hdr.type) &&
|
|
(sess->log_flag & PJ_STUN_SESS_LOG_TX_RES)==0) ||
|
|
@@ -938,13 +942,13 @@ static void dump_tx_msg(pj_stun_session *sess, const pj_stun_msg *msg,
|
|
|
|
pj_sockaddr_print(addr, dst_name, sizeof(dst_name), 3);
|
|
|
|
- PJ_LOG(5,(SNAME(sess),
|
|
+ PJ_LOG(5,(SNAME(sess),
|
|
"TX %d bytes STUN message to %s:\n"
|
|
"--- begin STUN message ---\n"
|
|
"%s"
|
|
"--- end of STUN message ---\n",
|
|
- pkt_size, dst_name,
|
|
- pj_stun_msg_dump(msg, sess->dump_buf, sizeof(sess->dump_buf),
|
|
+ pkt_size, dst_name,
|
|
+ pj_stun_msg_dump(msg, sess->dump_buf, sizeof(sess->dump_buf),
|
|
NULL)));
|
|
|
|
}
|
|
@@ -979,7 +983,7 @@ PJ_DEF(pj_status_t) pj_stun_session_send_msg( pj_stun_session *sess,
|
|
tdata->retransmit = retransmit;
|
|
|
|
/* Apply options */
|
|
- status = apply_msg_options(sess, tdata->pool, &tdata->auth_info,
|
|
+ status = apply_msg_options(sess, tdata->pool, &tdata->auth_info,
|
|
tdata->msg);
|
|
if (status != PJ_SUCCESS) {
|
|
pj_stun_msg_destroy_tdata(sess, tdata);
|
|
@@ -988,8 +992,8 @@ PJ_DEF(pj_status_t) pj_stun_session_send_msg( pj_stun_session *sess,
|
|
}
|
|
|
|
/* Encode message */
|
|
- status = pj_stun_msg_encode(tdata->msg, (pj_uint8_t*)tdata->pkt,
|
|
- tdata->max_len, 0,
|
|
+ status = pj_stun_msg_encode(tdata->msg, (pj_uint8_t*)tdata->pkt,
|
|
+ tdata->max_len, 0,
|
|
&tdata->auth_info.auth_key,
|
|
&tdata->pkt_size);
|
|
if (status != PJ_SUCCESS) {
|
|
@@ -1019,11 +1023,11 @@ PJ_DEF(pj_status_t) pj_stun_session_send_msg( pj_stun_session *sess,
|
|
|
|
/* Send the request! */
|
|
status = pj_stun_client_tsx_send_msg(tdata->client_tsx, retransmit,
|
|
- tdata->pkt,
|
|
+ tdata->pkt,
|
|
(unsigned)tdata->pkt_size);
|
|
if (status != PJ_SUCCESS && status != PJ_EPENDING) {
|
|
pj_stun_msg_destroy_tdata(sess, tdata);
|
|
- LOG_ERR_(sess, "Error sending STUN request", status);
|
|
+ LOG_ERR_(sess, "Error sending STUN request (pj_stun_client_tsx_send_msg", status);
|
|
goto on_return;
|
|
}
|
|
|
|
@@ -1032,9 +1036,9 @@ PJ_DEF(pj_status_t) pj_stun_session_send_msg( pj_stun_session *sess,
|
|
|
|
} else {
|
|
/* Otherwise for non-request message, send directly to transport. */
|
|
- if (cache_res &&
|
|
+ if (cache_res &&
|
|
(PJ_STUN_IS_SUCCESS_RESPONSE(tdata->msg->hdr.type) ||
|
|
- PJ_STUN_IS_ERROR_RESPONSE(tdata->msg->hdr.type)))
|
|
+ PJ_STUN_IS_ERROR_RESPONSE(tdata->msg->hdr.type)))
|
|
{
|
|
/* Requested to keep the response in the cache */
|
|
pj_time_val timeout;
|
|
@@ -1053,7 +1057,7 @@ PJ_DEF(pj_status_t) pj_stun_session_send_msg( pj_stun_session *sess,
|
|
* is still valid when cache timeout callback is called.
|
|
*/
|
|
pj_grp_lock_add_ref(sess->grp_lock);
|
|
-
|
|
+
|
|
pj_memset(&tdata->res_timer, 0, sizeof(tdata->res_timer));
|
|
pj_timer_entry_init(&tdata->res_timer, PJ_FALSE, tdata,
|
|
&on_cache_timeout);
|
|
@@ -1075,12 +1079,12 @@ PJ_DEF(pj_status_t) pj_stun_session_send_msg( pj_stun_session *sess,
|
|
}
|
|
|
|
/* Send to transport directly. */
|
|
- status = sess->cb.on_send_msg(sess, token, tdata->pkt,
|
|
+ status = sess->cb.on_send_msg(sess, token, tdata->pkt,
|
|
tdata->pkt_size, server, addr_len);
|
|
|
|
if (status != PJ_SUCCESS && status != PJ_EPENDING) {
|
|
pj_stun_msg_destroy_tdata(sess, tdata);
|
|
- LOG_ERR_(sess, "Error sending STUN request", status);
|
|
+ LOG_ERR_(sess, "Error sending STUN request (pj_stun_session_send_msg)", status);
|
|
goto on_return;
|
|
}
|
|
|
|
@@ -1103,13 +1107,13 @@ on_return:
|
|
/*
|
|
* Create and send STUN response message.
|
|
*/
|
|
-PJ_DEF(pj_status_t) pj_stun_session_respond( pj_stun_session *sess,
|
|
+PJ_DEF(pj_status_t) pj_stun_session_respond( pj_stun_session *sess,
|
|
const pj_stun_rx_data *rdata,
|
|
- unsigned code,
|
|
+ unsigned code,
|
|
const char *errmsg,
|
|
void *token,
|
|
- pj_bool_t cache,
|
|
- const pj_sockaddr_t *dst_addr,
|
|
+ pj_bool_t cache,
|
|
+ const pj_sockaddr_t *dst_addr,
|
|
unsigned addr_len)
|
|
{
|
|
pj_status_t status;
|
|
@@ -1122,8 +1126,8 @@ PJ_DEF(pj_status_t) pj_stun_session_respond( pj_stun_session *sess,
|
|
return PJ_EINVALIDOP;
|
|
}
|
|
|
|
- status = pj_stun_session_create_res(sess, rdata, code,
|
|
- (errmsg?pj_cstr(&reason,errmsg):NULL),
|
|
+ status = pj_stun_session_create_res(sess, rdata, code,
|
|
+ (errmsg?pj_cstr(&reason,errmsg):NULL),
|
|
&tdata);
|
|
if (status != PJ_SUCCESS) {
|
|
pj_grp_lock_release(sess->grp_lock);
|
|
@@ -1139,7 +1143,7 @@ PJ_DEF(pj_status_t) pj_stun_session_respond( pj_stun_session *sess,
|
|
|
|
|
|
/*
|
|
- * Cancel outgoing STUN transaction.
|
|
+ * Cancel outgoing STUN transaction.
|
|
*/
|
|
PJ_DEF(pj_status_t) pj_stun_session_cancel_req( pj_stun_session *sess,
|
|
pj_stun_tx_data *tdata,
|
|
@@ -1158,7 +1162,7 @@ PJ_DEF(pj_status_t) pj_stun_session_cancel_req( pj_stun_session *sess,
|
|
}
|
|
|
|
if (notify) {
|
|
- (sess->cb.on_request_complete)(sess, notify_status, tdata->token,
|
|
+ (sess->cb.on_request_complete)(sess, notify_status, tdata->token,
|
|
tdata, NULL, NULL, 0);
|
|
}
|
|
|
|
@@ -1220,7 +1224,7 @@ static pj_status_t send_response(pj_stun_session *sess, void *token,
|
|
out_pkt = (pj_uint8_t*) pj_pool_alloc(pool, out_max_len);
|
|
|
|
/* Encode */
|
|
- status = pj_stun_msg_encode(response, out_pkt, out_max_len, 0,
|
|
+ status = pj_stun_msg_encode(response, out_pkt, out_max_len, 0,
|
|
&auth_info->auth_key, &out_len);
|
|
if (status != PJ_SUCCESS) {
|
|
LOG_ERR_(sess, "Error encoding message", status);
|
|
@@ -1231,7 +1235,7 @@ static pj_status_t send_response(pj_stun_session *sess, void *token,
|
|
dump_tx_msg(sess, response, (unsigned)out_len, addr);
|
|
|
|
/* Send packet */
|
|
- status = sess->cb.on_send_msg(sess, token, out_pkt, (unsigned)out_len,
|
|
+ status = sess->cb.on_send_msg(sess, token, out_pkt, (unsigned)out_len,
|
|
addr, addr_len);
|
|
|
|
return status;
|
|
@@ -1250,18 +1254,18 @@ static pj_status_t authenticate_req(pj_stun_session *sess,
|
|
pj_stun_msg *response;
|
|
pj_status_t status;
|
|
|
|
- if (PJ_STUN_IS_ERROR_RESPONSE(rdata->msg->hdr.type) ||
|
|
+ if (PJ_STUN_IS_ERROR_RESPONSE(rdata->msg->hdr.type) ||
|
|
sess->auth_type == PJ_STUN_AUTH_NONE)
|
|
{
|
|
return PJ_SUCCESS;
|
|
}
|
|
|
|
- status = pj_stun_authenticate_request(pkt, pkt_len, rdata->msg,
|
|
+ status = pj_stun_authenticate_request(pkt, pkt_len, rdata->msg,
|
|
&sess->cred, tmp_pool, &rdata->info,
|
|
&response);
|
|
if (status != PJ_SUCCESS && response != NULL) {
|
|
PJ_PERROR(5,(SNAME(sess), status, "Message authentication failed"));
|
|
- send_response(sess, token, tmp_pool, response, &rdata->info,
|
|
+ send_response(sess, token, tmp_pool, response, &rdata->info,
|
|
PJ_FALSE, src_addr, src_addr_len);
|
|
}
|
|
|
|
@@ -1284,7 +1288,7 @@ static pj_status_t on_incoming_response(pj_stun_session *sess,
|
|
/* Lookup pending client transaction */
|
|
tdata = tsx_lookup(sess, msg);
|
|
if (tdata == NULL) {
|
|
- PJ_LOG(5,(SNAME(sess),
|
|
+ PJ_LOG(5,(SNAME(sess),
|
|
"Transaction not found, response silently discarded"));
|
|
return PJ_SUCCESS;
|
|
}
|
|
@@ -1295,11 +1299,11 @@ static pj_status_t on_incoming_response(pj_stun_session *sess,
|
|
/* Authenticate the message, unless PJ_STUN_NO_AUTHENTICATE
|
|
* is specified in the option.
|
|
*/
|
|
- if ((options & PJ_STUN_NO_AUTHENTICATE) == 0 &&
|
|
- tdata->auth_info.auth_key.slen != 0 &&
|
|
+ if ((options & PJ_STUN_NO_AUTHENTICATE) == 0 &&
|
|
+ tdata->auth_info.auth_key.slen != 0 &&
|
|
pj_stun_auth_valid_for_msg(msg))
|
|
{
|
|
- status = pj_stun_authenticate_response(pkt, pkt_len, msg,
|
|
+ status = pj_stun_authenticate_response(pkt, pkt_len, msg,
|
|
&tdata->auth_info.auth_key);
|
|
if (status != PJ_SUCCESS) {
|
|
PJ_PERROR(5,(SNAME(sess), status,
|
|
@@ -1308,11 +1312,11 @@ static pj_status_t on_incoming_response(pj_stun_session *sess,
|
|
}
|
|
}
|
|
|
|
- /* Pass the response to the transaction.
|
|
+ /* Pass the response to the transaction.
|
|
* If the message is accepted, transaction callback will be called,
|
|
* and this will call the session callback too.
|
|
*/
|
|
- status = pj_stun_client_tsx_on_rx_msg(tdata->client_tsx, msg,
|
|
+ status = pj_stun_client_tsx_on_rx_msg(tdata->client_tsx, msg,
|
|
src_addr, src_addr_len);
|
|
if (status != PJ_SUCCESS) {
|
|
return status;
|
|
@@ -1336,7 +1340,7 @@ static pj_status_t check_cached_response(pj_stun_session *sess,
|
|
while (t != &sess->cached_response_list) {
|
|
if (t->msg_magic == msg->hdr.magic &&
|
|
t->msg->hdr.type == msg->hdr.type &&
|
|
- pj_memcmp(t->msg_key, msg->hdr.tsx_id,
|
|
+ pj_memcmp(t->msg_key, msg->hdr.tsx_id,
|
|
sizeof(msg->hdr.tsx_id))==0)
|
|
{
|
|
break;
|
|
@@ -1347,10 +1351,10 @@ static pj_status_t check_cached_response(pj_stun_session *sess,
|
|
if (t != &sess->cached_response_list) {
|
|
/* Found response in the cache */
|
|
|
|
- PJ_LOG(5,(SNAME(sess),
|
|
+ PJ_LOG(5,(SNAME(sess),
|
|
"Request retransmission, sending cached response"));
|
|
|
|
- send_response(sess, t->token, tmp_pool, t->msg, &t->auth_info,
|
|
+ send_response(sess, t->token, tmp_pool, t->msg, &t->auth_info,
|
|
PJ_TRUE, src_addr, src_addr_len);
|
|
return PJ_SUCCESS;
|
|
}
|
|
@@ -1383,8 +1387,8 @@ static pj_status_t on_incoming_request(pj_stun_session *sess,
|
|
* is specified in the option.
|
|
*/
|
|
if ((options & PJ_STUN_NO_AUTHENTICATE) == 0) {
|
|
- status = authenticate_req(sess, token, (const pj_uint8_t*) in_pkt,
|
|
- in_pkt_len,&rdata, tmp_pool, src_addr,
|
|
+ status = authenticate_req(sess, token, (const pj_uint8_t*) in_pkt,
|
|
+ in_pkt_len,&rdata, tmp_pool, src_addr,
|
|
src_addr_len);
|
|
if (status != PJ_SUCCESS) {
|
|
return status;
|
|
@@ -1400,11 +1404,11 @@ static pj_status_t on_incoming_request(pj_stun_session *sess,
|
|
pj_stun_msg *response;
|
|
|
|
err_text = pj_str("Callback is not set to handle request");
|
|
- status = pj_stun_msg_create_response(tmp_pool, msg,
|
|
- PJ_STUN_SC_BAD_REQUEST,
|
|
+ status = pj_stun_msg_create_response(tmp_pool, msg,
|
|
+ PJ_STUN_SC_BAD_REQUEST,
|
|
&err_text, &response);
|
|
if (status == PJ_SUCCESS && response) {
|
|
- status = send_response(sess, token, tmp_pool, response,
|
|
+ status = send_response(sess, token, tmp_pool, response,
|
|
NULL, PJ_FALSE, src_addr, src_addr_len);
|
|
}
|
|
}
|
|
@@ -1440,8 +1444,8 @@ static void dump_rx_msg(pj_stun_session *sess, const pj_stun_msg *msg,
|
|
unsigned pkt_size, const pj_sockaddr_t *addr)
|
|
{
|
|
char src_info[PJ_INET6_ADDRSTRLEN+10];
|
|
-
|
|
- if ((PJ_STUN_IS_REQUEST(msg->hdr.type) &&
|
|
+
|
|
+ if ((PJ_STUN_IS_REQUEST(msg->hdr.type) &&
|
|
(sess->log_flag & PJ_STUN_SESS_LOG_RX_REQ)==0) ||
|
|
(PJ_STUN_IS_RESPONSE(msg->hdr.type) &&
|
|
(sess->log_flag & PJ_STUN_SESS_LOG_RX_RES)==0) ||
|
|
@@ -1459,7 +1463,7 @@ static void dump_rx_msg(pj_stun_session *sess, const pj_stun_msg *msg,
|
|
"%s"
|
|
"--- end of STUN message ---\n",
|
|
pkt_size, src_info,
|
|
- pj_stun_msg_dump(msg, sess->dump_buf, sizeof(sess->dump_buf),
|
|
+ pj_stun_msg_dump(msg, sess->dump_buf, sizeof(sess->dump_buf),
|
|
NULL)));
|
|
|
|
}
|
|
@@ -1494,7 +1498,7 @@ PJ_DEF(pj_status_t) pj_stun_session_on_rx_pkt(pj_stun_session *sess,
|
|
|
|
/* Try to parse the message */
|
|
status = pj_stun_msg_decode(sess->rx_pool, (const pj_uint8_t*)packet,
|
|
- pkt_size, options,
|
|
+ pkt_size, options,
|
|
&msg, parsed_len, &response);
|
|
if (status != PJ_SUCCESS) {
|
|
LOG_ERR_(sess, "STUN msg_decode() error", status);
|
|
@@ -1508,7 +1512,7 @@ PJ_DEF(pj_status_t) pj_stun_session_on_rx_pkt(pj_stun_session *sess,
|
|
dump_rx_msg(sess, msg, (unsigned)pkt_size, src_addr);
|
|
|
|
/* For requests, check if we have cached response */
|
|
- status = check_cached_response(sess, sess->rx_pool, msg,
|
|
+ status = check_cached_response(sess, sess->rx_pool, msg,
|
|
src_addr, src_addr_len);
|
|
if (status == PJ_SUCCESS) {
|
|
goto on_return;
|
|
@@ -1518,23 +1522,23 @@ PJ_DEF(pj_status_t) pj_stun_session_on_rx_pkt(pj_stun_session *sess,
|
|
if (PJ_STUN_IS_SUCCESS_RESPONSE(msg->hdr.type) ||
|
|
PJ_STUN_IS_ERROR_RESPONSE(msg->hdr.type))
|
|
{
|
|
- status = on_incoming_response(sess, options,
|
|
- (const pj_uint8_t*) packet,
|
|
- (unsigned)pkt_size, msg,
|
|
+ status = on_incoming_response(sess, options,
|
|
+ (const pj_uint8_t*) packet,
|
|
+ (unsigned)pkt_size, msg,
|
|
src_addr, src_addr_len);
|
|
|
|
} else if (PJ_STUN_IS_REQUEST(msg->hdr.type)) {
|
|
|
|
- status = on_incoming_request(sess, options, token, sess->rx_pool,
|
|
- (const pj_uint8_t*) packet,
|
|
- (unsigned)pkt_size,
|
|
+ status = on_incoming_request(sess, options, token, sess->rx_pool,
|
|
+ (const pj_uint8_t*) packet,
|
|
+ (unsigned)pkt_size,
|
|
msg, src_addr, src_addr_len);
|
|
|
|
} else if (PJ_STUN_IS_INDICATION(msg->hdr.type)) {
|
|
|
|
- status = on_incoming_indication(sess, token, sess->rx_pool,
|
|
- (const pj_uint8_t*) packet,
|
|
- (unsigned)pkt_size, msg, src_addr,
|
|
+ status = on_incoming_indication(sess, token, sess->rx_pool,
|
|
+ (const pj_uint8_t*) packet,
|
|
+ (unsigned)pkt_size, msg, src_addr,
|
|
src_addr_len);
|
|
|
|
} else {
|
|
@@ -1551,3 +1555,12 @@ on_return:
|
|
return status;
|
|
}
|
|
|
|
+PJ_DECL(pj_stun_session_cb *) pj_stun_session_callback(pj_stun_session *sess)
|
|
+{
|
|
+ return sess ? &sess->cb : NULL;
|
|
+}
|
|
+
|
|
+PJ_DECL(pj_stun_tp_type) pj_stun_session_tp_type(pj_stun_session *sess)
|
|
+{
|
|
+ return sess ? sess->conn_type : PJ_STUN_TP_UDP;
|
|
+}
|
|
diff --git a/pjnath/src/pjnath/stun_sock.c b/pjnath/src/pjnath/stun_sock.c
|
|
index 28f760384..93b368777 100644
|
|
--- a/pjnath/src/pjnath/stun_sock.c
|
|
+++ b/pjnath/src/pjnath/stun_sock.c
|
|
@@ -1,4 +1,4 @@
|
|
-/*
|
|
+/*
|
|
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
|
|
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
|
|
*
|
|
@@ -14,7 +14,7 @@
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#include <pjnath/stun_sock.h>
|
|
#include <pjnath/errno.h>
|
|
@@ -39,6 +39,35 @@
|
|
|
|
enum { MAX_BIND_RETRY = 100 };
|
|
|
|
+#if PJ_HAS_TCP
|
|
+// The head of a RTP packet is stored in a 16 bits header, so the max size of a
|
|
+// packet is 65536
|
|
+#define MAX_RTP_SIZE 65536
|
|
+#endif
|
|
+
|
|
+typedef struct outgoing_sock {
|
|
+ pj_sock_t fd;
|
|
+ pj_activesock_t *sock;
|
|
+ pj_sockaddr addr;
|
|
+ int addr_len;
|
|
+} outgoing_sock;
|
|
+
|
|
+typedef struct incoming_sock {
|
|
+ pj_sock_t fd;
|
|
+ pj_activesock_t *sock;
|
|
+ pj_sockaddr addr;
|
|
+ int addr_len;
|
|
+} incoming_sock;
|
|
+
|
|
+typedef struct rx_buf {
|
|
+ pj_activesock_t *asock;
|
|
+ pj_uint8_t rx_buffer[MAX_RTP_SIZE];
|
|
+ pj_uint16_t rx_buffer_size;
|
|
+ pj_uint16_t rx_wanted_size;
|
|
+ struct rx_buf *next;
|
|
+ struct rx_buf *prev;
|
|
+} rx_buf;
|
|
+
|
|
struct pj_stun_sock
|
|
{
|
|
char *obj_name; /* Log identification */
|
|
@@ -46,6 +75,8 @@ struct pj_stun_sock
|
|
void *user_data; /* Application user data */
|
|
pj_bool_t is_destroying; /* Destroy already called */
|
|
int af; /* Address family */
|
|
+ pj_stun_tp_type conn_type;
|
|
+ pj_stun_sock_cfg cfg;
|
|
pj_stun_config stun_cfg; /* STUN config (ioqueue etc)*/
|
|
pj_stun_sock_cb cb; /* Application callbacks */
|
|
|
|
@@ -58,6 +89,14 @@ struct pj_stun_sock
|
|
pj_dns_srv_async_query *q; /* Pending DNS query */
|
|
pj_sock_t sock_fd; /* Socket descriptor */
|
|
pj_activesock_t *active_sock; /* Active socket object */
|
|
+#if PJ_HAS_TCP
|
|
+ pj_bool_t no_new_socket;
|
|
+ int outgoing_nb;
|
|
+ outgoing_sock outgoing_socks[PJ_ICE_MAX_CHECKS];
|
|
+ int incoming_nb;
|
|
+ incoming_sock incoming_socks[PJ_ICE_MAX_CHECKS];
|
|
+ rx_buf *rx_buffers;
|
|
+#endif
|
|
pj_ioqueue_op_key_t send_key; /* Default send key for app */
|
|
pj_ioqueue_op_key_t int_send_key; /* Send key for internal */
|
|
pj_status_t last_err; /* Last error status */
|
|
@@ -67,8 +106,17 @@ struct pj_stun_sock
|
|
pj_grp_lock_t *grp_lock; /* Session group lock */
|
|
};
|
|
|
|
-/*
|
|
- * Prototypes for static functions
|
|
+//////////////////////////////////////////////////////////////////////////////
|
|
+
|
|
+static pj_uint16_t GETVAL16H(const pj_uint8_t *buf1, const pj_uint8_t *buf2)
|
|
+{
|
|
+ return (pj_uint16_t) ((buf1[0] << 8) | (buf2[0] << 0));
|
|
+}
|
|
+
|
|
+//////////////////////////////////////////////////////////////////////////////
|
|
+
|
|
+/*
|
|
+ * Prototypes for static functions
|
|
*/
|
|
|
|
/* Destructor for group lock */
|
|
@@ -82,7 +130,7 @@ static pj_status_t sess_on_send_msg(pj_stun_session *sess,
|
|
const pj_sockaddr_t *dst_addr,
|
|
unsigned addr_len);
|
|
|
|
-/* This callback is called by the STUN session when outgoing transaction
|
|
+/* This callback is called by the STUN session when outgoing transaction
|
|
* is complete
|
|
*/
|
|
static void sess_on_request_complete(pj_stun_session *sess,
|
|
@@ -119,6 +167,24 @@ static void start_ka_timer(pj_stun_sock *stun_sock);
|
|
/* Keep-alive timer callback */
|
|
static void ka_timer_cb(pj_timer_heap_t *th, pj_timer_entry *te);
|
|
|
|
+
|
|
+static pj_bool_t on_stun_sock_ready(pj_activesock_t *asock,
|
|
+ pj_status_t status);
|
|
+
|
|
+static pj_bool_t on_stun_sock_accept(pj_activesock_t *asock,
|
|
+ pj_sock_t newsock,
|
|
+ const pj_sockaddr_t *src_addr,
|
|
+ int src_addr_len);
|
|
+
|
|
+static pj_bool_t on_connect_complete(pj_activesock_t *asock,
|
|
+ pj_status_t status);
|
|
+
|
|
+/* Notify application that session has failed */
|
|
+static pj_bool_t sess_fail(pj_stun_sock *stun_sock,
|
|
+ pj_stun_sock_op op,
|
|
+ pj_status_t status);
|
|
+
|
|
+
|
|
#define INTERNAL_MSG_TOKEN (void*)(pj_ssize_t)1
|
|
|
|
|
|
@@ -150,6 +216,7 @@ PJ_DEF(void) pj_stun_sock_cfg_default(pj_stun_sock_cfg *cfg)
|
|
cfg->ka_interval = PJ_STUN_KEEP_ALIVE_SEC;
|
|
cfg->qos_type = PJ_QOS_TYPE_BEST_EFFORT;
|
|
cfg->qos_ignore_error = PJ_TRUE;
|
|
+ cfg->user_mapping_cnt = 0;
|
|
}
|
|
|
|
|
|
@@ -160,116 +227,67 @@ static pj_bool_t pj_stun_sock_cfg_is_valid(const pj_stun_sock_cfg *cfg)
|
|
}
|
|
|
|
/*
|
|
- * Create the STUN transport using the specified configuration.
|
|
+ * Initialize.
|
|
*/
|
|
-PJ_DEF(pj_status_t) pj_stun_sock_create( pj_stun_config *stun_cfg,
|
|
- const char *name,
|
|
- int af,
|
|
- const pj_stun_sock_cb *cb,
|
|
- const pj_stun_sock_cfg *cfg,
|
|
- void *user_data,
|
|
- pj_stun_sock **p_stun_sock)
|
|
+PJ_DEF(pj_status_t) pj_stun_sock_alloc(pj_stun_sock *stun_sock)
|
|
{
|
|
- pj_pool_t *pool;
|
|
- pj_stun_sock *stun_sock;
|
|
- pj_stun_sock_cfg default_cfg;
|
|
+ pj_status_t status;
|
|
pj_sockaddr bound_addr;
|
|
- unsigned i;
|
|
pj_uint16_t max_bind_retry;
|
|
- pj_status_t status;
|
|
-
|
|
- PJ_ASSERT_RETURN(stun_cfg && cb && p_stun_sock, PJ_EINVAL);
|
|
- PJ_ASSERT_RETURN(af==pj_AF_INET()||af==pj_AF_INET6(), PJ_EAFNOTSUP);
|
|
- PJ_ASSERT_RETURN(!cfg || pj_stun_sock_cfg_is_valid(cfg), PJ_EINVAL);
|
|
- PJ_ASSERT_RETURN(cb->on_status, PJ_EINVAL);
|
|
-
|
|
- status = pj_stun_config_check_valid(stun_cfg);
|
|
- if (status != PJ_SUCCESS)
|
|
- return status;
|
|
+ int sock_type;
|
|
|
|
- if (name == NULL)
|
|
- name = "stuntp%p";
|
|
-
|
|
- if (cfg == NULL) {
|
|
- pj_stun_sock_cfg_default(&default_cfg);
|
|
- cfg = &default_cfg;
|
|
- }
|
|
-
|
|
-
|
|
- /* Create structure */
|
|
- pool = pj_pool_create(stun_cfg->pf, name, 256, 512, NULL);
|
|
- stun_sock = PJ_POOL_ZALLOC_T(pool, pj_stun_sock);
|
|
- stun_sock->pool = pool;
|
|
- stun_sock->obj_name = pool->obj_name;
|
|
- stun_sock->user_data = user_data;
|
|
- stun_sock->af = af;
|
|
- stun_sock->sock_fd = PJ_INVALID_SOCKET;
|
|
- pj_memcpy(&stun_sock->stun_cfg, stun_cfg, sizeof(*stun_cfg));
|
|
- pj_memcpy(&stun_sock->cb, cb, sizeof(*cb));
|
|
-
|
|
- stun_sock->ka_interval = cfg->ka_interval;
|
|
- if (stun_sock->ka_interval == 0)
|
|
- stun_sock->ka_interval = PJ_STUN_KEEP_ALIVE_SEC;
|
|
-
|
|
- if (cfg->grp_lock) {
|
|
- stun_sock->grp_lock = cfg->grp_lock;
|
|
- } else {
|
|
- status = pj_grp_lock_create(pool, NULL, &stun_sock->grp_lock);
|
|
- if (status != PJ_SUCCESS) {
|
|
- pj_pool_release(pool);
|
|
- return status;
|
|
- }
|
|
- }
|
|
+ pj_grp_lock_acquire(stun_sock->grp_lock);
|
|
|
|
- pj_grp_lock_add_ref(stun_sock->grp_lock);
|
|
- pj_grp_lock_add_handler(stun_sock->grp_lock, pool, stun_sock,
|
|
- &stun_sock_destructor);
|
|
+ if (stun_sock->conn_type == PJ_STUN_TP_UDP)
|
|
+ sock_type = pj_SOCK_DGRAM();
|
|
+ else
|
|
+ sock_type = pj_SOCK_STREAM();
|
|
|
|
/* Create socket and bind socket */
|
|
- status = pj_sock_socket(af, pj_SOCK_DGRAM() | pj_SOCK_CLOEXEC(), 0, &stun_sock->sock_fd);
|
|
+ status = pj_sock_socket(stun_sock->af, sock_type, 0, &stun_sock->sock_fd);
|
|
if (status != PJ_SUCCESS)
|
|
goto on_error;
|
|
|
|
/* Apply QoS, if specified */
|
|
- status = pj_sock_apply_qos2(stun_sock->sock_fd, cfg->qos_type,
|
|
- &cfg->qos_params, 2, stun_sock->obj_name,
|
|
+ status = pj_sock_apply_qos2(stun_sock->sock_fd, stun_sock->cfg.qos_type,
|
|
+ &stun_sock->cfg.qos_params, 2, stun_sock->obj_name,
|
|
NULL);
|
|
- if (status != PJ_SUCCESS && !cfg->qos_ignore_error)
|
|
+ if (status != PJ_SUCCESS && !stun_sock->cfg.qos_ignore_error)
|
|
goto on_error;
|
|
|
|
/* Apply socket buffer size */
|
|
- if (cfg->so_rcvbuf_size > 0) {
|
|
- unsigned sobuf_size = cfg->so_rcvbuf_size;
|
|
+ if (stun_sock->cfg.so_rcvbuf_size > 0) {
|
|
+ unsigned sobuf_size = stun_sock->cfg.so_rcvbuf_size;
|
|
status = pj_sock_setsockopt_sobuf(stun_sock->sock_fd, pj_SO_RCVBUF(),
|
|
PJ_TRUE, &sobuf_size);
|
|
if (status != PJ_SUCCESS) {
|
|
PJ_PERROR(3, (stun_sock->obj_name, status,
|
|
"Failed setting SO_RCVBUF"));
|
|
} else {
|
|
- if (sobuf_size < cfg->so_rcvbuf_size) {
|
|
- PJ_LOG(4, (stun_sock->obj_name,
|
|
+ if (sobuf_size < stun_sock->cfg.so_rcvbuf_size) {
|
|
+ PJ_LOG(4, (stun_sock->obj_name,
|
|
"Warning! Cannot set SO_RCVBUF as configured, "
|
|
"now=%d, configured=%d",
|
|
- sobuf_size, cfg->so_rcvbuf_size));
|
|
+ sobuf_size, stun_sock->cfg.so_rcvbuf_size));
|
|
} else {
|
|
PJ_LOG(5, (stun_sock->obj_name, "SO_RCVBUF set to %d",
|
|
sobuf_size));
|
|
}
|
|
}
|
|
}
|
|
- if (cfg->so_sndbuf_size > 0) {
|
|
- unsigned sobuf_size = cfg->so_sndbuf_size;
|
|
+ if (stun_sock->cfg.so_sndbuf_size > 0) {
|
|
+ unsigned sobuf_size = stun_sock->cfg.so_sndbuf_size;
|
|
status = pj_sock_setsockopt_sobuf(stun_sock->sock_fd, pj_SO_SNDBUF(),
|
|
PJ_TRUE, &sobuf_size);
|
|
if (status != PJ_SUCCESS) {
|
|
PJ_PERROR(3, (stun_sock->obj_name, status,
|
|
"Failed setting SO_SNDBUF"));
|
|
} else {
|
|
- if (sobuf_size < cfg->so_sndbuf_size) {
|
|
- PJ_LOG(4, (stun_sock->obj_name,
|
|
+ if (sobuf_size < stun_sock->cfg.so_sndbuf_size) {
|
|
+ PJ_LOG(4, (stun_sock->obj_name,
|
|
"Warning! Cannot set SO_SNDBUF as configured, "
|
|
"now=%d, configured=%d",
|
|
- sobuf_size, cfg->so_sndbuf_size));
|
|
+ sobuf_size, stun_sock->cfg.so_sndbuf_size));
|
|
} else {
|
|
PJ_LOG(5, (stun_sock->obj_name, "SO_SNDBUF set to %d",
|
|
sobuf_size));
|
|
@@ -279,16 +297,16 @@ PJ_DEF(pj_status_t) pj_stun_sock_create( pj_stun_config *stun_cfg,
|
|
|
|
/* Bind socket */
|
|
max_bind_retry = MAX_BIND_RETRY;
|
|
- if (cfg->port_range && cfg->port_range < max_bind_retry)
|
|
- max_bind_retry = cfg->port_range;
|
|
- pj_sockaddr_init(af, &bound_addr, NULL, 0);
|
|
- if (cfg->bound_addr.addr.sa_family == pj_AF_INET() ||
|
|
- cfg->bound_addr.addr.sa_family == pj_AF_INET6())
|
|
+ if (stun_sock->cfg.port_range && stun_sock->cfg.port_range < max_bind_retry)
|
|
+ max_bind_retry = stun_sock->cfg.port_range;
|
|
+ pj_sockaddr_init(stun_sock->af, &bound_addr, NULL, 0);
|
|
+ if (stun_sock->cfg.bound_addr.addr.sa_family == pj_AF_INET() ||
|
|
+ stun_sock->cfg.bound_addr.addr.sa_family == pj_AF_INET6())
|
|
{
|
|
- pj_sockaddr_cp(&bound_addr, &cfg->bound_addr);
|
|
+ pj_sockaddr_cp(&bound_addr, &stun_sock->cfg.bound_addr);
|
|
}
|
|
status = pj_sock_bind_random(stun_sock->sock_fd, &bound_addr,
|
|
- cfg->port_range, max_bind_retry);
|
|
+ stun_sock->cfg.port_range, max_bind_retry);
|
|
if (status != PJ_SUCCESS)
|
|
goto on_error;
|
|
|
|
@@ -298,13 +316,13 @@ PJ_DEF(pj_status_t) pj_stun_sock_create( pj_stun_config *stun_cfg,
|
|
pj_sockaddr bound_addr;
|
|
int addr_len = sizeof(bound_addr);
|
|
|
|
- status = pj_sock_getsockname(stun_sock->sock_fd, &bound_addr,
|
|
+ status = pj_sock_getsockname(stun_sock->sock_fd, &bound_addr,
|
|
&addr_len);
|
|
if (status != PJ_SUCCESS)
|
|
goto on_error;
|
|
|
|
stun_sock->info = pj_pool_alloc(pool, PJ_INET6_ADDRSTRLEN+10);
|
|
- pj_sockaddr_print(&bound_addr, stun_sock->info,
|
|
+ pj_sockaddr_print(&bound_addr, stun_sock->info,
|
|
PJ_INET6_ADDRSTRLEN, 3);
|
|
}
|
|
#endif
|
|
@@ -315,35 +333,153 @@ PJ_DEF(pj_status_t) pj_stun_sock_create( pj_stun_config *stun_cfg,
|
|
pj_activesock_cb activesock_cb;
|
|
|
|
pj_activesock_cfg_default(&activesock_cfg);
|
|
- activesock_cfg.grp_lock = stun_sock->grp_lock;
|
|
- activesock_cfg.async_cnt = cfg->async_cnt;
|
|
+ activesock_cfg.grp_lock = stun_sock->grp_lock;
|
|
+ activesock_cfg.async_cnt = stun_sock->cfg.async_cnt;
|
|
activesock_cfg.concurrency = 0;
|
|
|
|
/* Create the active socket */
|
|
pj_bzero(&activesock_cb, sizeof(activesock_cb));
|
|
+ activesock_cb.on_data_sent = &on_data_sent;
|
|
activesock_cb.on_data_recvfrom = &on_data_recvfrom;
|
|
- activesock_cb.on_data_sent = &on_data_sent;
|
|
- status = pj_activesock_create(pool, stun_sock->sock_fd,
|
|
- pj_SOCK_DGRAM(),
|
|
- &activesock_cfg, stun_cfg->ioqueue,
|
|
- &activesock_cb, stun_sock,
|
|
- &stun_sock->active_sock);
|
|
- if (status != PJ_SUCCESS)
|
|
+
|
|
+#if PJ_HAS_TCP
|
|
+ if (stun_sock->conn_type != PJ_STUN_TP_UDP) {
|
|
+ activesock_cb.on_accept_complete = &on_stun_sock_accept;
|
|
+ // Will be ready to accept incoming connections from the external world
|
|
+ status = pj_sock_listen(stun_sock->sock_fd, PJ_SOMAXCONN);
|
|
+ if (status != PJ_SUCCESS) {
|
|
+ goto on_error;
|
|
+ }
|
|
+ } else {
|
|
+ activesock_cb.on_connect_complete = &on_stun_sock_ready;
|
|
+ }
|
|
+#else
|
|
+ activesock_cb.on_connect_complete = &on_stun_sock_ready;
|
|
+#endif
|
|
+
|
|
+ status = pj_activesock_create(stun_sock->pool, stun_sock->sock_fd,
|
|
+ sock_type, &activesock_cfg,
|
|
+ stun_sock->stun_cfg.ioqueue,
|
|
+ &activesock_cb, stun_sock,
|
|
+ &stun_sock->active_sock);
|
|
+ if (status != PJ_SUCCESS) {
|
|
goto on_error;
|
|
+ }
|
|
|
|
- /* Start asynchronous read operations */
|
|
- status = pj_activesock_start_recvfrom(stun_sock->active_sock, pool,
|
|
- cfg->max_pkt_size, 0);
|
|
- if (status != PJ_SUCCESS)
|
|
+#if PJ_HAS_TCP
|
|
+ if (stun_sock->conn_type != PJ_STUN_TP_UDP) {
|
|
+ status = pj_activesock_start_accept(stun_sock->active_sock,
|
|
+ stun_sock->pool);
|
|
+ } else {
|
|
+ status = PJ_SUCCESS;
|
|
+ }
|
|
+ if (status == PJ_SUCCESS) {
|
|
+ on_stun_sock_ready(stun_sock->active_sock, PJ_SUCCESS);
|
|
+ } else if (status != PJ_EPENDING) {
|
|
+ char addrinfo[PJ_INET6_ADDRSTRLEN + 10];
|
|
+ pj_perror(3, stun_sock->pool->obj_name, status,
|
|
+ "Failed to connect to %s",
|
|
+ pj_sockaddr_print(&bound_addr, addrinfo,
|
|
+ sizeof(addrinfo), 3));
|
|
goto on_error;
|
|
+ }
|
|
+#else
|
|
+ on_stun_sock_ready(stun_sock->active_sock, PJ_SUCCESS);
|
|
+#endif
|
|
+ }
|
|
|
|
- /* Init send keys */
|
|
- pj_ioqueue_op_key_init(&stun_sock->send_key,
|
|
- sizeof(stun_sock->send_key));
|
|
- pj_ioqueue_op_key_init(&stun_sock->int_send_key,
|
|
- sizeof(stun_sock->int_send_key));
|
|
+ pj_grp_lock_release(stun_sock->grp_lock);
|
|
+ return status;
|
|
+on_error:
|
|
+ pj_stun_sock_destroy(stun_sock);
|
|
+ pj_grp_lock_release(stun_sock->grp_lock);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Create the STUN transport using the specified configuration.
|
|
+ */
|
|
+PJ_DEF(pj_status_t) pj_stun_sock_create( pj_stun_config *stun_cfg,
|
|
+ const char *name,
|
|
+ int af,
|
|
+ pj_stun_tp_type conn_type,
|
|
+ const pj_stun_sock_cb *cb,
|
|
+ const pj_stun_sock_cfg *cfg,
|
|
+ void *user_data,
|
|
+ pj_stun_sock **p_stun_sock)
|
|
+{
|
|
+ pj_pool_t *pool;
|
|
+ pj_stun_sock *stun_sock;
|
|
+ pj_stun_sock_cfg default_cfg;
|
|
+ pj_status_t status;
|
|
+
|
|
+ PJ_ASSERT_RETURN(stun_cfg && cb && p_stun_sock, PJ_EINVAL);
|
|
+ PJ_ASSERT_RETURN(af==pj_AF_INET()||af==pj_AF_INET6(), PJ_EAFNOTSUP);
|
|
+ PJ_ASSERT_RETURN(!cfg || pj_stun_sock_cfg_is_valid(cfg), PJ_EINVAL);
|
|
+ PJ_ASSERT_RETURN(cb->on_status, PJ_EINVAL);
|
|
+ PJ_ASSERT_RETURN(conn_type != PJ_STUN_TP_TCP || PJ_HAS_TCP, PJ_EINVAL);
|
|
+
|
|
+ status = pj_stun_config_check_valid(stun_cfg);
|
|
+ if (status != PJ_SUCCESS)
|
|
+ return status;
|
|
+
|
|
+ if (name == NULL) {
|
|
+ switch (conn_type) {
|
|
+ case PJ_STUN_TP_UDP:
|
|
+ name = "udpstun%p";
|
|
+ break;
|
|
+ case PJ_STUN_TP_TCP:
|
|
+ name = "tcpstun%p";
|
|
+ break;
|
|
+ default:
|
|
+ PJ_ASSERT_RETURN(!"Invalid STUN conn_type", PJ_EINVAL);
|
|
+ name = "tcpstun%p";
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (cfg == NULL) {
|
|
+ pj_stun_sock_cfg_default(&default_cfg);
|
|
+ cfg = &default_cfg;
|
|
+ }
|
|
+
|
|
+ /* Create structure */
|
|
+ pool = pj_pool_create(stun_cfg->pf, name, 256, 512, NULL);
|
|
+ stun_sock = PJ_POOL_ZALLOC_T(pool, pj_stun_sock);
|
|
+ stun_sock->pool = pool;
|
|
+ stun_sock->obj_name = pool->obj_name;
|
|
+ stun_sock->user_data = user_data;
|
|
+ stun_sock->af = af;
|
|
+ stun_sock->conn_type = conn_type;
|
|
+ stun_sock->sock_fd = PJ_INVALID_SOCKET;
|
|
+#if PJ_HAS_TCP
|
|
+ stun_sock->no_new_socket = PJ_FALSE;
|
|
+ stun_sock->outgoing_nb = -1;
|
|
+ stun_sock->incoming_nb = -1;
|
|
+#endif
|
|
+ pj_memcpy(&stun_sock->stun_cfg, stun_cfg, sizeof(*stun_cfg));
|
|
+ pj_memcpy(&stun_sock->cb, cb, sizeof(*cb));
|
|
+ /* Copy socket settings; QoS parameters etc */
|
|
+ pj_memcpy(&stun_sock->cfg, cfg, sizeof(*cfg));
|
|
+
|
|
+ stun_sock->ka_interval = cfg->ka_interval;
|
|
+ if (stun_sock->ka_interval == 0)
|
|
+ stun_sock->ka_interval = PJ_STUN_KEEP_ALIVE_SEC;
|
|
+
|
|
+ if (cfg->grp_lock) {
|
|
+ stun_sock->grp_lock = cfg->grp_lock;
|
|
+ } else {
|
|
+ status = pj_grp_lock_create(pool, NULL, &stun_sock->grp_lock);
|
|
+ if (status != PJ_SUCCESS) {
|
|
+ pj_pool_release(pool);
|
|
+ return status;
|
|
+ }
|
|
}
|
|
|
|
+ pj_grp_lock_add_ref(stun_sock->grp_lock);
|
|
+ pj_grp_lock_add_handler(stun_sock->grp_lock, pool, stun_sock,
|
|
+ &stun_sock_destructor);
|
|
+
|
|
/* Create STUN session */
|
|
{
|
|
pj_stun_session_cb sess_cb;
|
|
@@ -351,13 +487,16 @@ PJ_DEF(pj_status_t) pj_stun_sock_create( pj_stun_config *stun_cfg,
|
|
pj_bzero(&sess_cb, sizeof(sess_cb));
|
|
sess_cb.on_request_complete = &sess_on_request_complete;
|
|
sess_cb.on_send_msg = &sess_on_send_msg;
|
|
- status = pj_stun_session_create(&stun_sock->stun_cfg,
|
|
+ status = pj_stun_session_create(&stun_sock->stun_cfg,
|
|
stun_sock->obj_name,
|
|
- &sess_cb, PJ_FALSE,
|
|
+ &sess_cb, PJ_FALSE,
|
|
stun_sock->grp_lock,
|
|
- &stun_sock->stun_sess);
|
|
- if (status != PJ_SUCCESS)
|
|
- goto on_error;
|
|
+ &stun_sock->stun_sess,
|
|
+ conn_type);
|
|
+ if (status != PJ_SUCCESS) {
|
|
+ pj_stun_sock_destroy(stun_sock);
|
|
+ return status;
|
|
+ }
|
|
}
|
|
|
|
/* Associate us with the STUN session */
|
|
@@ -368,25 +507,370 @@ PJ_DEF(pj_status_t) pj_stun_sock_create( pj_stun_config *stun_cfg,
|
|
* STUN messages we sent with STUN messages that the application sends.
|
|
* The last 16bit value in the array is a counter.
|
|
*/
|
|
+ unsigned i;
|
|
for (i=0; i<PJ_ARRAY_SIZE(stun_sock->tsx_id); ++i) {
|
|
stun_sock->tsx_id[i] = (pj_uint16_t) pj_rand();
|
|
}
|
|
stun_sock->tsx_id[5] = 0;
|
|
|
|
-
|
|
/* Init timer entry */
|
|
stun_sock->ka_timer.cb = &ka_timer_cb;
|
|
stun_sock->ka_timer.user_data = stun_sock;
|
|
|
|
+ pj_stun_sock_alloc(stun_sock);
|
|
+
|
|
/* Done */
|
|
*p_stun_sock = stun_sock;
|
|
return PJ_SUCCESS;
|
|
+}
|
|
|
|
-on_error:
|
|
- pj_stun_sock_destroy(stun_sock);
|
|
- return status;
|
|
+/*
|
|
+ * Notification when outgoing TCP socket has been connected.
|
|
+ */
|
|
+static pj_bool_t on_stun_sock_ready(pj_activesock_t *asock, pj_status_t status)
|
|
+{
|
|
+ pj_stun_sock *stun_sock;
|
|
+ stun_sock = (pj_stun_sock *)pj_activesock_get_user_data(asock);
|
|
+ if (!stun_sock)
|
|
+ return PJ_FALSE;
|
|
+
|
|
+ pj_grp_lock_acquire(stun_sock->grp_lock);
|
|
+
|
|
+ /* TURN session may have already been destroyed here.
|
|
+ * See ticket #1557 (http://trac.pjsip.org/repos/ticket/1557).
|
|
+ */
|
|
+ if (!stun_sock->stun_sess) {
|
|
+ sess_fail(stun_sock, PJ_STUN_SESS_DESTROYED, status);
|
|
+ pj_grp_lock_release(stun_sock->grp_lock);
|
|
+ return PJ_FALSE;
|
|
+ }
|
|
+
|
|
+ if (status != PJ_SUCCESS) {
|
|
+ sess_fail(stun_sock, PJ_STUN_TCP_CONNECT_ERROR, status);
|
|
+ pj_grp_lock_release(stun_sock->grp_lock);
|
|
+ return PJ_FALSE;
|
|
+ }
|
|
+
|
|
+ if (stun_sock->conn_type != PJ_STUN_TP_UDP)
|
|
+ PJ_LOG(5,(stun_sock->obj_name, "TCP connected"));
|
|
+
|
|
+ /* Start asynchronous read operations */
|
|
+ pj_status_t result;
|
|
+ result = pj_activesock_start_recvfrom(asock, stun_sock->pool,
|
|
+ stun_sock->cfg.max_pkt_size, 0);
|
|
+ if (result != PJ_SUCCESS)
|
|
+ return PJ_FALSE;
|
|
+
|
|
+ /* Associate us with the STUN session */
|
|
+ pj_stun_session_set_user_data(stun_sock->stun_sess, stun_sock);
|
|
+
|
|
+ /* Initialize random numbers to be used as STUN transaction ID for
|
|
+ * outgoing Binding request. We use the 80bit number to distinguish
|
|
+ * STUN messages we sent with STUN messages that the application sends.
|
|
+ * The last 16bit value in the array is a counter.
|
|
+ */
|
|
+ unsigned i;
|
|
+ for (i=0; i<PJ_ARRAY_SIZE(stun_sock->tsx_id); ++i) {
|
|
+ stun_sock->tsx_id[i] = (pj_uint16_t) pj_rand();
|
|
+ }
|
|
+ stun_sock->tsx_id[5] = 0;
|
|
+
|
|
+ /* Init timer entry */
|
|
+ stun_sock->ka_timer.cb = &ka_timer_cb;
|
|
+ stun_sock->ka_timer.user_data = stun_sock;
|
|
+
|
|
+ if (status != PJ_SUCCESS) {
|
|
+ pj_stun_sock_destroy(stun_sock);
|
|
+ pj_grp_lock_release(stun_sock->grp_lock);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ /* Init send keys */
|
|
+ pj_ioqueue_op_key_init(&stun_sock->send_key, sizeof(stun_sock->send_key));
|
|
+ pj_ioqueue_op_key_init(&stun_sock->int_send_key,
|
|
+ sizeof(stun_sock->int_send_key));
|
|
+
|
|
+ pj_grp_lock_release(stun_sock->grp_lock);
|
|
+ return PJ_TRUE;
|
|
}
|
|
|
|
+static pj_bool_t parse_rx_packet(pj_activesock_t *asock,
|
|
+ void *data,
|
|
+ pj_size_t size,
|
|
+ const pj_sockaddr_t *rx_addr,
|
|
+ unsigned sock_addr_len)
|
|
+{
|
|
+
|
|
+ pj_stun_sock *stun_sock = (pj_stun_sock*) pj_activesock_get_user_data(asock);
|
|
+ if (!stun_sock)
|
|
+ return PJ_FALSE;
|
|
+
|
|
+ pj_grp_lock_acquire(stun_sock->grp_lock);
|
|
+ pj_uint16_t parsed = 0;
|
|
+ pj_status_t result = PJ_TRUE;
|
|
+ pj_status_t status;
|
|
+
|
|
+#if PJ_HAS_TCP
|
|
+ // Search current rx_buf
|
|
+ rx_buf* buf = NULL;
|
|
+ rx_buf* stun_sock_buf = stun_sock->rx_buffers;
|
|
+ while (stun_sock_buf) {
|
|
+ if (stun_sock_buf->asock == asock) {
|
|
+ buf = stun_sock_buf;
|
|
+ break;
|
|
+ }
|
|
+ stun_sock_buf = stun_sock_buf->next;
|
|
+ }
|
|
+ if (!buf) {
|
|
+ // Create rx_buf, this buf will be released when the pool is released
|
|
+ buf = (rx_buf*)pj_pool_calloc(stun_sock->pool, 1, sizeof(rx_buf));
|
|
+ if (!buf) {
|
|
+ PJ_LOG(5, (stun_sock->obj_name, "Cannot allocate memory for rx_buf"));
|
|
+ status = pj_grp_lock_release(stun_sock->grp_lock);
|
|
+ return PJ_FALSE;
|
|
+ }
|
|
+ buf->asock = asock;
|
|
+ buf->next = stun_sock->rx_buffers;
|
|
+ if (stun_sock->rx_buffers)
|
|
+ stun_sock->rx_buffers->prev = buf;
|
|
+ stun_sock->rx_buffers = buf;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ do {
|
|
+ pj_uint16_t leftover = size - parsed;
|
|
+ pj_uint8_t *current_packet = ((pj_uint8_t *)(data)) + parsed;
|
|
+
|
|
+#if PJ_HAS_TCP
|
|
+ if (stun_sock->conn_type != PJ_STUN_TP_UDP) {
|
|
+ /* RFC6544, the packet is wrapped into a packet following the RFC4571 */
|
|
+ pj_bool_t store_remaining = PJ_TRUE;
|
|
+ if (buf->rx_buffer_size != 0 || buf->rx_wanted_size != 0) {
|
|
+ if (buf->rx_buffer_size == 1 && buf->rx_wanted_size == 0) {
|
|
+ // In this case, we want to know the header size
|
|
+ leftover = GETVAL16H(buf->rx_buffer, current_packet);
|
|
+
|
|
+ buf->rx_buffer_size = 0;
|
|
+ current_packet++;
|
|
+ parsed++;
|
|
+
|
|
+ if (leftover + parsed <= size) {
|
|
+ store_remaining = PJ_FALSE;
|
|
+ parsed += leftover;
|
|
+ } else {
|
|
+ buf->rx_wanted_size = leftover;
|
|
+ }
|
|
+
|
|
+ } else if (leftover + buf->rx_buffer_size >= buf->rx_wanted_size) {
|
|
+ // We have enough data Build new packet to parse
|
|
+ store_remaining = PJ_FALSE;
|
|
+ pj_uint16_t eaten_bytes = buf->rx_wanted_size - buf->rx_buffer_size;
|
|
+ pj_memcpy(buf->rx_buffer + buf->rx_buffer_size,
|
|
+ current_packet, eaten_bytes);
|
|
+
|
|
+ leftover = buf->rx_wanted_size;
|
|
+ current_packet = buf->rx_buffer;
|
|
+ parsed += eaten_bytes;
|
|
+
|
|
+ buf->rx_buffer_size = 0;
|
|
+ buf->rx_wanted_size = 0;
|
|
+ }
|
|
+ } else if (leftover > 1) {
|
|
+ leftover = GETVAL16H(current_packet, current_packet+1);
|
|
+ current_packet += 2;
|
|
+ parsed += 2;
|
|
+ if (leftover + parsed <= size) {
|
|
+ store_remaining = PJ_FALSE;
|
|
+ parsed += leftover;
|
|
+ } else {
|
|
+ buf->rx_wanted_size = leftover;
|
|
+ }
|
|
+ }
|
|
+ if (store_remaining) {
|
|
+ leftover = size - parsed;
|
|
+ pj_memcpy(buf->rx_buffer + buf->rx_buffer_size,
|
|
+ current_packet, leftover);
|
|
+ buf->rx_buffer_size += leftover;
|
|
+ break;
|
|
+ }
|
|
+ } else {
|
|
+#endif
|
|
+ parsed = size;
|
|
+#if PJ_HAS_TCP
|
|
+ }
|
|
+#endif
|
|
+ /* Check that this is STUN message */
|
|
+ status = pj_stun_msg_check((const pj_uint8_t *)current_packet, leftover,
|
|
+ PJ_STUN_IS_DATAGRAM | PJ_STUN_CHECK_PACKET);
|
|
+ if (status != PJ_SUCCESS) {
|
|
+ /* Not STUN -- give it to application */
|
|
+ goto process_app_data;
|
|
+ }
|
|
+
|
|
+ /* Treat packet as STUN header and copy the STUN message type.
|
|
+ * We don't want to access the type directly from the header
|
|
+ * since it may not be properly aligned.
|
|
+ */
|
|
+ pj_stun_msg_hdr *hdr = (pj_stun_msg_hdr *)current_packet;
|
|
+ pj_uint16_t type;
|
|
+ pj_memcpy(&type, &hdr->type, 2);
|
|
+ type = pj_ntohs(type);
|
|
+
|
|
+ /* If the packet is a STUN Binding response and part of the
|
|
+ * transaction ID matches our internal ID, then this is
|
|
+ * our internal STUN message (Binding request or keep alive).
|
|
+ * Give it to our STUN session.
|
|
+ */
|
|
+ if (!PJ_STUN_IS_RESPONSE(type) ||
|
|
+ PJ_STUN_GET_METHOD(type) != PJ_STUN_BINDING_METHOD ||
|
|
+ pj_memcmp(hdr->tsx_id, stun_sock->tsx_id, 10) != 0)
|
|
+ {
|
|
+ /* Not STUN Binding response, or STUN transaction ID mismatch.
|
|
+ * This is not our message too -- give it to application.
|
|
+ */
|
|
+ goto process_app_data;
|
|
+ }
|
|
+
|
|
+ /* This is our STUN Binding response. Give it to the STUN session */
|
|
+ status = pj_stun_session_on_rx_pkt(stun_sock->stun_sess, current_packet,
|
|
+ leftover, PJ_STUN_IS_DATAGRAM, NULL,
|
|
+ NULL, rx_addr, sock_addr_len);
|
|
+
|
|
+ result &= status != PJ_EGONE ? PJ_TRUE : PJ_FALSE;
|
|
+ continue;
|
|
+
|
|
+process_app_data:
|
|
+ if (stun_sock->cb.on_rx_data)
|
|
+ (*stun_sock->cb.on_rx_data)(stun_sock, current_packet,
|
|
+ (unsigned)leftover, rx_addr, sock_addr_len);
|
|
+
|
|
+ result &= status != PJ_EGONE ? PJ_TRUE : PJ_FALSE;
|
|
+ } while (parsed < size && result);
|
|
+
|
|
+ status = pj_grp_lock_release(stun_sock->grp_lock);
|
|
+ return result;
|
|
+}
|
|
+
|
|
+static pj_bool_t on_data_read(pj_activesock_t *asock,
|
|
+ void *data,
|
|
+ pj_size_t size,
|
|
+ pj_status_t status,
|
|
+ pj_size_t *remainder)
|
|
+{
|
|
+
|
|
+ pj_stun_sock *stun_sock;
|
|
+
|
|
+ if (!(stun_sock = (pj_stun_sock *)pj_activesock_get_user_data(asock)))
|
|
+ return PJ_FALSE;
|
|
+
|
|
+ pj_stun_session_cb *cb = pj_stun_session_callback(stun_sock->stun_sess);
|
|
+ /* Log socket error or disconnection */
|
|
+ if (status != PJ_SUCCESS) {
|
|
+ if (stun_sock->conn_type == PJ_STUN_TP_UDP
|
|
+ || (status != PJ_EEOF && status != 120104 && status != 130054))
|
|
+ {
|
|
+ PJ_PERROR(2, (stun_sock->obj_name, status, "read() error"));
|
|
+ } else if (status == 120104
|
|
+ || status == 130054 /* RESET BY PEER */)
|
|
+ {
|
|
+ for (int i = 0; i <= stun_sock->outgoing_nb; ++i)
|
|
+ if (stun_sock->outgoing_socks[i].sock == asock
|
|
+ && cb
|
|
+ && (cb->on_peer_reset_connection))
|
|
+ {
|
|
+ (cb->on_peer_reset_connection)(stun_sock->stun_sess,
|
|
+ &stun_sock->outgoing_socks[i].addr);
|
|
+ }
|
|
+ }
|
|
+ return PJ_FALSE;
|
|
+ }
|
|
+#if PJ_HAS_TCP
|
|
+ pj_sockaddr_t *rx_addr = NULL;
|
|
+ unsigned sock_addr_len = 0;
|
|
+ for (int i = 0; i <= stun_sock->outgoing_nb; ++i)
|
|
+ if (stun_sock->outgoing_socks[i].sock == asock) {
|
|
+ rx_addr = &stun_sock->outgoing_socks[i].addr;
|
|
+ sock_addr_len = pj_sockaddr_get_len(rx_addr);
|
|
+ if (cb && (cb->on_peer_packet))
|
|
+ (cb->on_peer_packet)(stun_sock->stun_sess,
|
|
+ &stun_sock->outgoing_socks[i].addr);
|
|
+ }
|
|
+
|
|
+ if (rx_addr == NULL && stun_sock->incoming_nb != -1) {
|
|
+ // It's an incoming message
|
|
+ for (int i = 0; i <= stun_sock->incoming_nb; ++i)
|
|
+ if (stun_sock->incoming_socks[i].sock == asock) {
|
|
+ rx_addr = &stun_sock->incoming_socks[i].addr;
|
|
+ sock_addr_len = stun_sock->incoming_socks[i].addr_len;
|
|
+ }
|
|
+ }
|
|
+ return parse_rx_packet(asock, data, size, rx_addr, sock_addr_len);
|
|
+#else
|
|
+ pj_grp_lock_release(stun_sock->grp_lock);
|
|
+ return PJ_FALSE;
|
|
+#endif
|
|
+}
|
|
+
|
|
+#if PJ_HAS_TCP
|
|
+/*
|
|
+ * Notification when incoming TCP socket has been connected.
|
|
+ * NOTE: cf https://www.pjsip.org/docs/latest/pjlib/docs/html//structpj__activesock__cb.htm if status needed
|
|
+ */
|
|
+static pj_bool_t on_stun_sock_accept(pj_activesock_t *active_sock,
|
|
+ pj_sock_t sock,
|
|
+ const pj_sockaddr_t *src_addr,
|
|
+ int src_addr_len)
|
|
+{
|
|
+ pj_status_t status;
|
|
+ pj_stun_sock *stun_sock;
|
|
+ int sock_type = pj_SOCK_STREAM();
|
|
+ stun_sock = (pj_stun_sock *)pj_activesock_get_user_data(active_sock);
|
|
+
|
|
+ if (stun_sock->no_new_socket)
|
|
+ return PJ_FALSE;
|
|
+
|
|
+ stun_sock->incoming_nb += 1;
|
|
+ int nb_check = stun_sock->incoming_nb;
|
|
+ pj_sock_t *fd = &stun_sock->incoming_socks[nb_check].fd;
|
|
+ pj_activesock_t **asock = &stun_sock->incoming_socks[nb_check].sock;
|
|
+
|
|
+ pj_sockaddr_cp(&stun_sock->incoming_socks[nb_check].addr, src_addr);
|
|
+ stun_sock->incoming_socks[nb_check].addr_len = src_addr_len;
|
|
+ *fd = sock;
|
|
+
|
|
+ pj_activesock_cfg activesock_cfg;
|
|
+ pj_activesock_cb activesock_cb;
|
|
+
|
|
+ pj_activesock_cfg_default(&activesock_cfg);
|
|
+ activesock_cfg.grp_lock = stun_sock->grp_lock;
|
|
+ activesock_cfg.async_cnt = stun_sock->cfg.async_cnt;
|
|
+ activesock_cfg.concurrency = 0;
|
|
+
|
|
+ /* Create the active socket */
|
|
+ pj_bzero(&activesock_cb, sizeof(activesock_cb));
|
|
+ activesock_cb.on_data_read = &on_data_read;
|
|
+ activesock_cb.on_data_sent = &on_data_sent;
|
|
+
|
|
+ status = pj_activesock_create(stun_sock->pool, *fd, sock_type,
|
|
+ &activesock_cfg, stun_sock->stun_cfg.ioqueue,
|
|
+ &activesock_cb, stun_sock, asock);
|
|
+ if (status != PJ_SUCCESS) {
|
|
+ pj_stun_sock_destroy(stun_sock);
|
|
+ pj_grp_lock_release(stun_sock->grp_lock);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ /* Start asynchronous read operations */
|
|
+ pj_status_t result;
|
|
+ result = pj_activesock_start_read(*asock, stun_sock->pool,
|
|
+ stun_sock->cfg.max_pkt_size, 0);
|
|
+ if (result != PJ_SUCCESS)
|
|
+ return PJ_FALSE;
|
|
+
|
|
+ return PJ_TRUE;
|
|
+}
|
|
+#endif
|
|
+
|
|
/* Start socket. */
|
|
PJ_DEF(pj_status_t) pj_stun_sock_start( pj_stun_sock *stun_sock,
|
|
const pj_str_t *domain,
|
|
@@ -401,7 +885,7 @@ PJ_DEF(pj_status_t) pj_stun_sock_start( pj_stun_sock *stun_sock,
|
|
|
|
/* Check whether the domain contains IP address */
|
|
stun_sock->srv_addr.addr.sa_family = (pj_uint16_t)stun_sock->af;
|
|
- status = pj_inet_pton(stun_sock->af, domain,
|
|
+ status = pj_inet_pton(stun_sock->af, domain,
|
|
pj_sockaddr_get_addr(&stun_sock->srv_addr));
|
|
if (status != PJ_SUCCESS) {
|
|
stun_sock->srv_addr.addr.sa_family = (pj_uint16_t)0;
|
|
@@ -423,9 +907,9 @@ PJ_DEF(pj_status_t) pj_stun_sock_start( pj_stun_sock *stun_sock,
|
|
opt = PJ_DNS_SRV_FALLBACK_A;
|
|
|
|
stun_sock->last_err = PJ_SUCCESS;
|
|
- status = pj_dns_srv_resolve(domain, &res_name, default_port,
|
|
+ status = pj_dns_srv_resolve(domain, &res_name, default_port,
|
|
stun_sock->pool, resolver, opt,
|
|
- stun_sock, &dns_srv_resolver_cb,
|
|
+ stun_sock, &dns_srv_resolver_cb,
|
|
&stun_sock->q);
|
|
if (status != PJ_SUCCESS) {
|
|
PJ_PERROR(4,(stun_sock->obj_name, status,
|
|
@@ -525,6 +1009,26 @@ PJ_DEF(pj_status_t) pj_stun_sock_destroy(pj_stun_sock *stun_sock)
|
|
stun_sock->sock_fd = PJ_INVALID_SOCKET;
|
|
}
|
|
|
|
+ for (int i = 0; i <= stun_sock->incoming_nb ; ++i) {
|
|
+ if (stun_sock->incoming_socks[i].sock != NULL) {
|
|
+ stun_sock->incoming_socks[i].fd = PJ_INVALID_SOCKET;
|
|
+ pj_activesock_close(stun_sock->incoming_socks[i].sock);
|
|
+ } else if (stun_sock->incoming_socks[i].fd != PJ_INVALID_SOCKET) {
|
|
+ pj_sock_close(stun_sock->incoming_socks[i].fd);
|
|
+ stun_sock->incoming_socks[i].fd = PJ_INVALID_SOCKET;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (int i = 0; i <= stun_sock->outgoing_nb ; ++i) {
|
|
+ if (stun_sock->outgoing_socks[i].sock != NULL) {
|
|
+ stun_sock->outgoing_socks[i].fd = PJ_INVALID_SOCKET;
|
|
+ pj_activesock_close(stun_sock->outgoing_socks[i].sock);
|
|
+ } else if (stun_sock->outgoing_socks[i].fd != PJ_INVALID_SOCKET) {
|
|
+ pj_sock_close(stun_sock->outgoing_socks[i].fd);
|
|
+ stun_sock->outgoing_socks[i].fd = PJ_INVALID_SOCKET;
|
|
+ }
|
|
+ }
|
|
+
|
|
if (stun_sock->stun_sess) {
|
|
pj_stun_session_destroy(stun_sock->stun_sess);
|
|
}
|
|
@@ -558,13 +1062,13 @@ PJ_DEF(pj_grp_lock_t *) pj_stun_sock_get_grp_lock(pj_stun_sock *stun_sock)
|
|
}
|
|
|
|
/* Notify application that session has failed */
|
|
-static pj_bool_t sess_fail(pj_stun_sock *stun_sock,
|
|
+static pj_bool_t sess_fail(pj_stun_sock *stun_sock,
|
|
pj_stun_sock_op op,
|
|
pj_status_t status)
|
|
{
|
|
pj_bool_t ret;
|
|
|
|
- PJ_PERROR(4,(stun_sock->obj_name, status,
|
|
+ PJ_PERROR(4,(stun_sock->obj_name, status,
|
|
"Session failed because %s failed",
|
|
pj_stun_sock_op_name(op)));
|
|
|
|
@@ -601,10 +1105,10 @@ static void dns_srv_resolver_cb(void *user_data,
|
|
pj_sockaddr_init(stun_sock->af, &stun_sock->srv_addr, NULL,
|
|
rec->entry[0].port);
|
|
if (stun_sock->af == pj_AF_INET6()) {
|
|
- stun_sock->srv_addr.ipv6.sin6_addr =
|
|
+ stun_sock->srv_addr.ipv6.sin6_addr =
|
|
rec->entry[0].server.addr[0].ip.v6;
|
|
} else {
|
|
- stun_sock->srv_addr.ipv4.sin_addr =
|
|
+ stun_sock->srv_addr.ipv4.sin_addr =
|
|
rec->entry[0].server.addr[0].ip.v4;
|
|
}
|
|
|
|
@@ -625,18 +1129,18 @@ static pj_status_t get_mapped_addr(pj_stun_sock *stun_sock)
|
|
++stun_sock->tsx_id[5];
|
|
status = pj_stun_session_create_req(stun_sock->stun_sess,
|
|
PJ_STUN_BINDING_REQUEST,
|
|
- PJ_STUN_MAGIC,
|
|
- (const pj_uint8_t*)stun_sock->tsx_id,
|
|
+ PJ_STUN_MAGIC,
|
|
+ (const pj_uint8_t*)stun_sock->tsx_id,
|
|
&tdata);
|
|
if (status != PJ_SUCCESS)
|
|
goto on_error;
|
|
-
|
|
+
|
|
/* Send request */
|
|
status=pj_stun_session_send_msg(stun_sock->stun_sess, INTERNAL_MSG_TOKEN,
|
|
- PJ_FALSE, PJ_TRUE, &stun_sock->srv_addr,
|
|
+ PJ_FALSE, (stun_sock->conn_type == PJ_STUN_TP_UDP), &stun_sock->srv_addr,
|
|
pj_sockaddr_get_len(&stun_sock->srv_addr),
|
|
tdata);
|
|
- if (status != PJ_SUCCESS)
|
|
+ if (status != PJ_SUCCESS && status != PJ_EPENDING)
|
|
goto on_error;
|
|
|
|
return PJ_SUCCESS;
|
|
@@ -657,10 +1161,12 @@ PJ_DEF(pj_status_t) pj_stun_sock_get_info( pj_stun_sock *stun_sock,
|
|
|
|
pj_grp_lock_acquire(stun_sock->grp_lock);
|
|
|
|
+ info->conn_type = stun_sock->conn_type;
|
|
+
|
|
/* Copy STUN server address and mapped address */
|
|
pj_memcpy(&info->srv_addr, &stun_sock->srv_addr,
|
|
sizeof(pj_sockaddr));
|
|
- pj_memcpy(&info->mapped_addr, &stun_sock->mapped_addr,
|
|
+ pj_memcpy(&info->mapped_addr, &stun_sock->mapped_addr,
|
|
sizeof(pj_sockaddr));
|
|
|
|
/* Retrieve bound address */
|
|
@@ -673,7 +1179,7 @@ PJ_DEF(pj_status_t) pj_stun_sock_get_info( pj_stun_sock *stun_sock,
|
|
}
|
|
|
|
/* If socket is bound to a specific interface, then only put that
|
|
- * interface in the alias list. Otherwise query all the interfaces
|
|
+ * interface in the alias list. Otherwise query all the interfaces
|
|
* in the host.
|
|
*/
|
|
if (pj_sockaddr_has_addr(&info->bound_addr)) {
|
|
@@ -693,20 +1199,20 @@ PJ_DEF(pj_status_t) pj_stun_sock_get_info( pj_stun_sock *stun_sock,
|
|
pj_grp_lock_release(stun_sock->grp_lock);
|
|
return status;
|
|
}
|
|
-
|
|
+
|
|
pj_sockaddr_set_port(&def_addr, port);
|
|
-
|
|
+
|
|
/* Enum all IP interfaces in the host */
|
|
pj_enum_ip_option_default(&enum_opt);
|
|
enum_opt.af = stun_sock->af;
|
|
enum_opt.omit_deprecated_ipv6 = PJ_TRUE;
|
|
info->alias_cnt = PJ_ARRAY_SIZE(info->aliases);
|
|
- status = pj_enum_ip_interface2(&enum_opt, &info->alias_cnt,
|
|
+ status = pj_enum_ip_interface2(&enum_opt, &info->alias_cnt,
|
|
info->aliases);
|
|
if (status == PJ_ENOTSUP) {
|
|
/* Try again without omitting deprecated IPv6 addresses */
|
|
enum_opt.omit_deprecated_ipv6 = PJ_FALSE;
|
|
- status = pj_enum_ip_interface2(&enum_opt, &info->alias_cnt,
|
|
+ status = pj_enum_ip_interface2(&enum_opt, &info->alias_cnt,
|
|
info->aliases);
|
|
}
|
|
|
|
@@ -754,7 +1260,7 @@ PJ_DEF(pj_status_t) pj_stun_sock_sendto( pj_stun_sock *stun_sock,
|
|
pj_status_t status;
|
|
|
|
PJ_ASSERT_RETURN(stun_sock && pkt && dst_addr && addr_len, PJ_EINVAL);
|
|
-
|
|
+
|
|
pj_grp_lock_acquire(stun_sock->grp_lock);
|
|
|
|
if (!stun_sock->active_sock) {
|
|
@@ -769,13 +1275,276 @@ PJ_DEF(pj_status_t) pj_stun_sock_sendto( pj_stun_sock *stun_sock,
|
|
send_key = &stun_sock->send_key;
|
|
|
|
size = pkt_len;
|
|
- status = pj_activesock_sendto(stun_sock->active_sock, send_key,
|
|
- pkt, &size, flag, dst_addr, addr_len);
|
|
+ if (stun_sock->conn_type == PJ_STUN_TP_UDP) {
|
|
+ status = pj_activesock_sendto(stun_sock->active_sock, send_key,
|
|
+ pkt, &size, flag, dst_addr, addr_len);
|
|
+ } else {
|
|
+#if PJ_HAS_TCP
|
|
+ pj_bool_t is_outgoing = PJ_FALSE;
|
|
+ pj_bool_t is_incoming = PJ_FALSE;
|
|
+ for (int i = 0; i <= stun_sock->outgoing_nb; ++i) {
|
|
+ if (stun_sock->outgoing_socks[i].sock != NULL
|
|
+ && pj_sockaddr_cmp(&stun_sock->outgoing_socks[i].addr, dst_addr) == 0) {
|
|
+ is_outgoing = PJ_TRUE;
|
|
+ status = pj_activesock_send(stun_sock->outgoing_socks[i].sock,
|
|
+ send_key, pkt, &size, flag);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (is_outgoing == PJ_FALSE) {
|
|
+ for (int i = 0 ; i <= stun_sock->incoming_nb; ++i) {
|
|
+ if (stun_sock->incoming_socks[i].sock != NULL
|
|
+ && pj_sockaddr_cmp(&stun_sock->incoming_socks[i].addr,
|
|
+ dst_addr) == 0) {
|
|
+ status = pj_activesock_send(stun_sock->incoming_socks[i].sock,
|
|
+ send_key, pkt, &size, flag);
|
|
+ is_incoming = PJ_TRUE;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if (is_outgoing == PJ_FALSE && is_incoming == PJ_FALSE) {
|
|
+ status = pj_activesock_send(stun_sock->active_sock, send_key, pkt,
|
|
+ &size, flag);
|
|
+ }
|
|
+
|
|
+#endif
|
|
+ }
|
|
|
|
pj_grp_lock_release(stun_sock->grp_lock);
|
|
return status;
|
|
}
|
|
|
|
+#if PJ_HAS_TCP
|
|
+
|
|
+PJ_DECL(pj_status_t) pj_stun_sock_connect(pj_stun_sock *stun_sock,
|
|
+ const pj_sockaddr_t *remote_addr,
|
|
+ int af,
|
|
+ int nb_check)
|
|
+{
|
|
+
|
|
+ pj_grp_lock_acquire(stun_sock->grp_lock);
|
|
+ int sock_type = pj_SOCK_STREAM();
|
|
+
|
|
+ outgoing_sock* os = &stun_sock->outgoing_socks[nb_check];
|
|
+ pj_sock_t *fd = &os->fd;
|
|
+ pj_activesock_t **asock = &os->sock;
|
|
+
|
|
+ pj_sockaddr_t *addr = &os->addr;
|
|
+ os->addr_len = pj_sockaddr_get_len(remote_addr);
|
|
+
|
|
+
|
|
+ pj_status_t status = pj_sock_socket(af, sock_type, 0, fd);
|
|
+ if (status != PJ_SUCCESS) {
|
|
+ pj_stun_sock_destroy(stun_sock);
|
|
+ pj_grp_lock_release(stun_sock->grp_lock);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ /* Apply QoS, if specified */
|
|
+ status = pj_sock_apply_qos2(*fd, stun_sock->cfg.qos_type,
|
|
+ &stun_sock->cfg.qos_params, 2, stun_sock->obj_name, NULL);
|
|
+ if (status != PJ_SUCCESS && !stun_sock->cfg.qos_ignore_error) {
|
|
+ pj_stun_sock_destroy(stun_sock);
|
|
+ pj_grp_lock_release(stun_sock->grp_lock);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ /* Apply socket buffer size */
|
|
+ if (stun_sock->cfg.so_rcvbuf_size > 0) {
|
|
+ unsigned sobuf_size = stun_sock->cfg.so_rcvbuf_size;
|
|
+ status = pj_sock_setsockopt_sobuf(*fd, pj_SO_RCVBUF(), PJ_TRUE, &sobuf_size);
|
|
+ if (status != PJ_SUCCESS) {
|
|
+ pj_perror(3, stun_sock->obj_name, status, "Failed setting SO_RCVBUF");
|
|
+ } else {
|
|
+ if (sobuf_size < stun_sock->cfg.so_rcvbuf_size) {
|
|
+ PJ_LOG(4, (stun_sock->obj_name,
|
|
+ "Warning! Cannot set SO_RCVBUF as configured, "
|
|
+ "now=%d, configured=%d",
|
|
+ sobuf_size, stun_sock->cfg.so_rcvbuf_size));
|
|
+ } else {
|
|
+ PJ_LOG(5, (stun_sock->obj_name, "SO_RCVBUF set to %d", sobuf_size));
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (stun_sock->cfg.so_sndbuf_size > 0) {
|
|
+ unsigned sobuf_size = stun_sock->cfg.so_sndbuf_size;
|
|
+ status = pj_sock_setsockopt_sobuf(*fd, pj_SO_SNDBUF(), PJ_TRUE, &sobuf_size);
|
|
+ if (status != PJ_SUCCESS) {
|
|
+ pj_perror(3, stun_sock->obj_name, status, "Failed setting SO_SNDBUF");
|
|
+ } else {
|
|
+ if (sobuf_size < stun_sock->cfg.so_sndbuf_size) {
|
|
+ PJ_LOG(4, (stun_sock->obj_name,
|
|
+ "Warning! Cannot set SO_SNDBUF as configured, "
|
|
+ "now=%d, configured=%d",
|
|
+ sobuf_size, stun_sock->cfg.so_sndbuf_size));
|
|
+ } else {
|
|
+ PJ_LOG(5, (stun_sock->obj_name, "SO_SNDBUF set to %d", sobuf_size));
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Init active socket configuration */
|
|
+ {
|
|
+ pj_activesock_cfg activesock_cfg;
|
|
+ pj_activesock_cb activesock_cb;
|
|
+
|
|
+ pj_activesock_cfg_default(&activesock_cfg);
|
|
+ activesock_cfg.grp_lock = stun_sock->grp_lock;
|
|
+ activesock_cfg.async_cnt = stun_sock->cfg.async_cnt;
|
|
+ activesock_cfg.concurrency = 0;
|
|
+
|
|
+ /* Create the active socket */
|
|
+ pj_bzero(&activesock_cb, sizeof(activesock_cb));
|
|
+ activesock_cb.on_data_read = &on_data_read;
|
|
+ activesock_cb.on_data_sent = &on_data_sent;
|
|
+ activesock_cb.on_connect_complete = &on_connect_complete;
|
|
+
|
|
+ status = pj_activesock_create(stun_sock->pool, *fd,
|
|
+ sock_type, &activesock_cfg,
|
|
+ stun_sock->stun_cfg.ioqueue, &activesock_cb,
|
|
+ stun_sock, asock);
|
|
+ if (status != PJ_SUCCESS) {
|
|
+ pj_grp_lock_release(stun_sock->grp_lock);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ pj_sockaddr_init(stun_sock->af, addr, NULL, 0);
|
|
+ pj_sockaddr_cp(addr, remote_addr);
|
|
+
|
|
+ status = pj_activesock_start_connect(
|
|
+ *asock, stun_sock->pool, addr,
|
|
+ os->addr_len);
|
|
+ if (status == PJ_SUCCESS) {
|
|
+ on_connect_complete(*asock, status);
|
|
+ } else if (status != PJ_EPENDING) {
|
|
+ char addrinfo[PJ_INET6_ADDRSTRLEN+8];
|
|
+ pj_perror(3, stun_sock->pool->obj_name, status, "Failed to connect to %s",
|
|
+ pj_sockaddr_print(addr, addrinfo, sizeof(addrinfo), 3));
|
|
+ pj_grp_lock_release(stun_sock->grp_lock);
|
|
+ return status;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ pj_grp_lock_release(stun_sock->grp_lock);
|
|
+ return status;
|
|
+}
|
|
+
|
|
+PJ_DECL(pj_status_t) pj_stun_sock_connect_active(pj_stun_sock *stun_sock,
|
|
+ const pj_sockaddr_t *remote_addr,
|
|
+ int af)
|
|
+{
|
|
+
|
|
+ if (stun_sock->incoming_nb != -1) {
|
|
+ // Check if not incoming, if so, already connected (mainly for PRFLX candidates)
|
|
+ for (int i = 0 ; i <= stun_sock->incoming_nb; ++i) {
|
|
+ if (stun_sock->incoming_socks[i].sock != NULL
|
|
+ && pj_sockaddr_cmp(&stun_sock->incoming_socks[i].addr, remote_addr)==0) {
|
|
+ pj_stun_session_cb *cb =
|
|
+ pj_stun_session_callback(stun_sock->stun_sess);
|
|
+ (cb->on_peer_connection)(stun_sock->stun_sess, PJ_SUCCESS,
|
|
+ (pj_sockaddr_t *)remote_addr);
|
|
+ return PJ_SUCCESS;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Create socket and bind socket */
|
|
+ stun_sock->outgoing_nb += 1;
|
|
+ int nb_check = stun_sock->outgoing_nb;
|
|
+ return pj_stun_sock_connect(stun_sock, remote_addr, af, nb_check);
|
|
+
|
|
+}
|
|
+
|
|
+PJ_DECL(pj_status_t) pj_stun_sock_reconnect_active(pj_stun_sock *stun_sock,
|
|
+ const pj_sockaddr_t *remote_addr,
|
|
+ int af)
|
|
+{
|
|
+ for (int i = 0; i <= stun_sock->outgoing_nb; ++i) {
|
|
+ if (stun_sock->outgoing_socks[i].sock != NULL
|
|
+ && pj_sockaddr_cmp(&stun_sock->outgoing_socks[i].addr, remote_addr) == 0) {
|
|
+ pj_activesock_close(stun_sock->outgoing_socks[i].sock);
|
|
+ return pj_stun_sock_connect(stun_sock, remote_addr, af, i);
|
|
+ }
|
|
+ }
|
|
+ return PJ_EINVAL;
|
|
+}
|
|
+
|
|
+
|
|
+PJ_DECL(pj_status_t) pj_stun_sock_close(pj_stun_sock *stun_sock,
|
|
+ const pj_sockaddr_t *remote_addr)
|
|
+{
|
|
+ for (int i = 0; i <= stun_sock->outgoing_nb; ++i) {
|
|
+ if (stun_sock->outgoing_socks[i].sock != NULL
|
|
+ && pj_sockaddr_cmp(&stun_sock->outgoing_socks[i].addr, remote_addr) == 0) {
|
|
+ return pj_activesock_close(stun_sock->outgoing_socks[i].sock);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (int i = 0; i <= stun_sock->incoming_nb; ++i) {
|
|
+ if (stun_sock->incoming_socks[i].sock != NULL
|
|
+ && pj_sockaddr_cmp(&stun_sock->incoming_socks[i].addr, remote_addr) == 0) {
|
|
+ return pj_activesock_close(stun_sock->incoming_socks[i].sock);
|
|
+ }
|
|
+ }
|
|
+ return PJ_EINVAL;
|
|
+}
|
|
+
|
|
+
|
|
+PJ_DECL(pj_status_t) pj_stun_sock_close_all_except(pj_stun_sock *stun_sock, const pj_sockaddr_t *remote_addr)
|
|
+{
|
|
+ stun_sock->no_new_socket = PJ_TRUE;
|
|
+ for (int i = 0; i <= stun_sock->outgoing_nb; ++i) {
|
|
+ if (stun_sock->outgoing_socks[i].sock != NULL
|
|
+ && pj_sockaddr_cmp(&stun_sock->outgoing_socks[i].addr, remote_addr) != 0) {
|
|
+ pj_activesock_close(stun_sock->outgoing_socks[i].sock);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (int i = 0; i <= stun_sock->incoming_nb; ++i) {
|
|
+ if (stun_sock->incoming_socks[i].sock != NULL
|
|
+ && pj_sockaddr_cmp(&stun_sock->incoming_socks[i].addr, remote_addr) != 0) {
|
|
+ pj_activesock_close(stun_sock->incoming_socks[i].sock);
|
|
+ }
|
|
+ }
|
|
+ return PJ_SUCCESS;
|
|
+}
|
|
+
|
|
+static pj_bool_t on_connect_complete(pj_activesock_t *asock, pj_status_t status)
|
|
+{
|
|
+ pj_stun_sock *stun_sock;
|
|
+ stun_sock = (pj_stun_sock *)pj_activesock_get_user_data(asock);
|
|
+
|
|
+ pj_sockaddr remote_addr;
|
|
+ pj_bool_t addr_found = PJ_FALSE;
|
|
+
|
|
+ // Get remote connected address
|
|
+ for (int i = 0 ; i <= stun_sock->outgoing_nb ; ++i) {
|
|
+ if (stun_sock->outgoing_socks[i].sock == asock) {
|
|
+ pj_sockaddr_cp(&remote_addr, &stun_sock->outgoing_socks[i].addr);
|
|
+ addr_found = PJ_TRUE;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (!addr_found)
|
|
+ return PJ_FALSE;
|
|
+
|
|
+ pj_stun_session_cb *cb = pj_stun_session_callback(stun_sock->stun_sess);
|
|
+ if (!cb->on_peer_connection)
|
|
+ return PJ_FALSE;
|
|
+
|
|
+
|
|
+ if (status == PJ_SUCCESS) {
|
|
+ status = pj_activesock_start_read(asock, stun_sock->pool,
|
|
+ stun_sock->cfg.max_pkt_size, 0);
|
|
+ }
|
|
+ (cb->on_peer_connection)(stun_sock->stun_sess, status, &remote_addr);
|
|
+ return status != PJ_SUCCESS;
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
/* This callback is called by the STUN session to send packet */
|
|
static pj_status_t sess_on_send_msg(pj_stun_session *sess,
|
|
void *token,
|
|
@@ -786,6 +1555,7 @@ static pj_status_t sess_on_send_msg(pj_stun_session *sess,
|
|
{
|
|
pj_stun_sock *stun_sock;
|
|
pj_ssize_t size;
|
|
+ pj_status_t status;
|
|
|
|
stun_sock = (pj_stun_sock *) pj_stun_session_get_user_data(sess);
|
|
if (!stun_sock || !stun_sock->active_sock) {
|
|
@@ -799,12 +1569,33 @@ static pj_status_t sess_on_send_msg(pj_stun_session *sess,
|
|
PJ_UNUSED_ARG(token);
|
|
|
|
size = pkt_size;
|
|
- return pj_activesock_sendto(stun_sock->active_sock,
|
|
- &stun_sock->int_send_key,
|
|
- pkt, &size, 0, dst_addr, addr_len);
|
|
+ if (stun_sock->conn_type == PJ_STUN_TP_UDP) {
|
|
+ status = pj_activesock_sendto(stun_sock->active_sock,
|
|
+ &stun_sock->int_send_key,pkt, &size, 0,
|
|
+ dst_addr, addr_len);
|
|
+ }
|
|
+#if PJ_HAS_TCP
|
|
+ else {
|
|
+ for (int i = 0 ; i <= stun_sock->incoming_nb; ++i) {
|
|
+ if (stun_sock->incoming_socks[i].sock != NULL
|
|
+ && !pj_sockaddr_cmp(&stun_sock->incoming_socks[i].addr, dst_addr)) {
|
|
+ status = pj_activesock_send(stun_sock->incoming_socks[i].sock,
|
|
+ &stun_sock->int_send_key,
|
|
+ pkt, &size, 0);
|
|
+ if (status != PJ_SUCCESS && status != PJ_EPENDING)
|
|
+ PJ_PERROR(4,(stun_sock->obj_name, status,
|
|
+ "Error sending answer on incoming_sock(s)"));
|
|
+ }
|
|
+ }
|
|
+ /* last attempt */
|
|
+ status = pj_activesock_send(stun_sock->active_sock,
|
|
+ &stun_sock->int_send_key, pkt, &size, 0);
|
|
+ }
|
|
+#endif
|
|
+ return status;
|
|
}
|
|
|
|
-/* This callback is called by the STUN session when outgoing transaction
|
|
+/* This callback is called by the STUN session when outgoing transaction
|
|
* is complete
|
|
*/
|
|
static void sess_on_request_complete(pj_stun_session *sess,
|
|
@@ -860,16 +1651,16 @@ static void sess_on_request_complete(pj_stun_session *sess,
|
|
}
|
|
|
|
/* Determine if mapped address has changed, and save the new mapped
|
|
- * address and call callback if so
|
|
+ * address and call callback if so
|
|
*/
|
|
mapped_changed = !pj_sockaddr_has_addr(&stun_sock->mapped_addr) ||
|
|
- pj_sockaddr_cmp(&stun_sock->mapped_addr,
|
|
+ pj_sockaddr_cmp(&stun_sock->mapped_addr,
|
|
&mapped_attr->sockaddr) != 0;
|
|
if (mapped_changed) {
|
|
/* Print mapped adress */
|
|
{
|
|
char addrinfo[PJ_INET6_ADDRSTRLEN+10];
|
|
- PJ_LOG(4,(stun_sock->obj_name,
|
|
+ PJ_LOG(4,(stun_sock->obj_name,
|
|
"STUN mapped address found/changed: %s",
|
|
pj_sockaddr_print(&mapped_attr->sockaddr,
|
|
addrinfo, sizeof(addrinfo), 3)));
|
|
@@ -941,8 +1732,6 @@ static pj_bool_t on_data_recvfrom(pj_activesock_t *asock,
|
|
pj_status_t status)
|
|
{
|
|
pj_stun_sock *stun_sock;
|
|
- pj_stun_msg_hdr *hdr;
|
|
- pj_uint16_t type;
|
|
|
|
stun_sock = (pj_stun_sock*) pj_activesock_get_user_data(asock);
|
|
if (!stun_sock)
|
|
@@ -954,58 +1743,7 @@ static pj_bool_t on_data_recvfrom(pj_activesock_t *asock,
|
|
return PJ_TRUE;
|
|
}
|
|
|
|
- pj_grp_lock_acquire(stun_sock->grp_lock);
|
|
-
|
|
- /* Check that this is STUN message */
|
|
- status = pj_stun_msg_check((const pj_uint8_t*)data, size,
|
|
- PJ_STUN_IS_DATAGRAM | PJ_STUN_CHECK_PACKET);
|
|
- if (status != PJ_SUCCESS) {
|
|
- /* Not STUN -- give it to application */
|
|
- goto process_app_data;
|
|
- }
|
|
-
|
|
- /* Treat packet as STUN header and copy the STUN message type.
|
|
- * We don't want to access the type directly from the header
|
|
- * since it may not be properly aligned.
|
|
- */
|
|
- hdr = (pj_stun_msg_hdr*) data;
|
|
- pj_memcpy(&type, &hdr->type, 2);
|
|
- type = pj_ntohs(type);
|
|
-
|
|
- /* If the packet is a STUN Binding response and part of the
|
|
- * transaction ID matches our internal ID, then this is
|
|
- * our internal STUN message (Binding request or keep alive).
|
|
- * Give it to our STUN session.
|
|
- */
|
|
- if (!PJ_STUN_IS_RESPONSE(type) ||
|
|
- PJ_STUN_GET_METHOD(type) != PJ_STUN_BINDING_METHOD ||
|
|
- pj_memcmp(hdr->tsx_id, stun_sock->tsx_id, 10) != 0)
|
|
- {
|
|
- /* Not STUN Binding response, or STUN transaction ID mismatch.
|
|
- * This is not our message too -- give it to application.
|
|
- */
|
|
- goto process_app_data;
|
|
- }
|
|
-
|
|
- /* This is our STUN Binding response. Give it to the STUN session */
|
|
- status = pj_stun_session_on_rx_pkt(stun_sock->stun_sess, data, size,
|
|
- PJ_STUN_IS_DATAGRAM, NULL, NULL,
|
|
- src_addr, addr_len);
|
|
-
|
|
- status = pj_grp_lock_release(stun_sock->grp_lock);
|
|
-
|
|
- return status!=PJ_EGONE ? PJ_TRUE : PJ_FALSE;
|
|
-
|
|
-process_app_data:
|
|
- if (stun_sock->cb.on_rx_data) {
|
|
- (*stun_sock->cb.on_rx_data)(stun_sock, data, (unsigned)size,
|
|
- src_addr, addr_len);
|
|
- status = pj_grp_lock_release(stun_sock->grp_lock);
|
|
- return status!=PJ_EGONE ? PJ_TRUE : PJ_FALSE;
|
|
- }
|
|
-
|
|
- status = pj_grp_lock_release(stun_sock->grp_lock);
|
|
- return status!=PJ_EGONE ? PJ_TRUE : PJ_FALSE;
|
|
+ return parse_rx_packet(asock, data, size, src_addr, addr_len);
|
|
}
|
|
|
|
/* Callback from active socket about send status */
|
|
@@ -1031,7 +1769,7 @@ static pj_bool_t on_data_sent(pj_activesock_t *asock,
|
|
pj_grp_lock_acquire(stun_sock->grp_lock);
|
|
|
|
/* If app gives NULL send_key in sendto() function, then give
|
|
- * NULL in the callback too
|
|
+ * NULL in the callback too
|
|
*/
|
|
if (send_key == &stun_sock->send_key)
|
|
send_key = NULL;
|
|
@@ -1046,3 +1784,7 @@ static pj_bool_t on_data_sent(pj_activesock_t *asock,
|
|
return PJ_TRUE;
|
|
}
|
|
|
|
+pj_stun_session* pj_stun_sock_get_session(pj_stun_sock *stun_sock)
|
|
+{
|
|
+ return stun_sock ? stun_sock->stun_sess : NULL;
|
|
+}
|
|
diff --git a/pjnath/src/pjnath/stun_transaction.c b/pjnath/src/pjnath/stun_transaction.c
|
|
index 71c407b06..a367e6704 100644
|
|
--- a/pjnath/src/pjnath/stun_transaction.c
|
|
+++ b/pjnath/src/pjnath/stun_transaction.c
|
|
@@ -1,4 +1,4 @@
|
|
-/*
|
|
+/*
|
|
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
|
|
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
|
|
*
|
|
@@ -14,7 +14,7 @@
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#include <pjnath/stun_transaction.h>
|
|
#include <pjnath/errno.h>
|
|
@@ -61,9 +61,9 @@ struct pj_stun_client_tsx
|
|
#endif
|
|
|
|
|
|
-static void retransmit_timer_callback(pj_timer_heap_t *timer_heap,
|
|
+static void retransmit_timer_callback(pj_timer_heap_t *timer_heap,
|
|
pj_timer_entry *timer);
|
|
-static void destroy_timer_callback(pj_timer_heap_t *timer_heap,
|
|
+static void destroy_timer_callback(pj_timer_heap_t *timer_heap,
|
|
pj_timer_entry *timer);
|
|
|
|
/*
|
|
@@ -355,7 +355,7 @@ PJ_DEF(pj_status_t) pj_stun_client_tsx_send_msg(pj_stun_client_tsx *tsx,
|
|
|
|
|
|
/* Retransmit timer callback */
|
|
-static void retransmit_timer_callback(pj_timer_heap_t *timer_heap,
|
|
+static void retransmit_timer_callback(pj_timer_heap_t *timer_heap,
|
|
pj_timer_entry *timer)
|
|
{
|
|
pj_stun_client_tsx *tsx = (pj_stun_client_tsx *) timer->user_data;
|
|
@@ -411,6 +411,9 @@ static void retransmit_timer_callback(pj_timer_heap_t *timer_heap,
|
|
PJ_DEF(pj_status_t) pj_stun_client_tsx_retransmit(pj_stun_client_tsx *tsx,
|
|
pj_bool_t mod_count)
|
|
{
|
|
+ if (!tsx)
|
|
+ return PJ_EINVAL;
|
|
+
|
|
if (tsx->destroy_timer.id != 0 || tsx->is_destroying)
|
|
return PJ_SUCCESS;
|
|
|
|
@@ -423,7 +426,7 @@ PJ_DEF(pj_status_t) pj_stun_client_tsx_retransmit(pj_stun_client_tsx *tsx,
|
|
}
|
|
|
|
/* Timer callback to destroy transaction */
|
|
-static void destroy_timer_callback(pj_timer_heap_t *timer_heap,
|
|
+static void destroy_timer_callback(pj_timer_heap_t *timer_heap,
|
|
pj_timer_entry *timer)
|
|
{
|
|
pj_stun_client_tsx *tsx = (pj_stun_client_tsx *) timer->user_data;
|
|
@@ -449,23 +452,23 @@ PJ_DEF(pj_status_t) pj_stun_client_tsx_on_rx_msg(pj_stun_client_tsx *tsx,
|
|
pj_status_t status;
|
|
|
|
/* Must be STUN response message */
|
|
- if (!PJ_STUN_IS_SUCCESS_RESPONSE(msg->hdr.type) &&
|
|
+ if (!PJ_STUN_IS_SUCCESS_RESPONSE(msg->hdr.type) &&
|
|
!PJ_STUN_IS_ERROR_RESPONSE(msg->hdr.type))
|
|
{
|
|
- PJ_LOG(4,(tsx->obj_name,
|
|
+ PJ_LOG(4,(tsx->obj_name,
|
|
"STUN rx_msg() error: not response message"));
|
|
return PJNATH_EINSTUNMSGTYPE;
|
|
}
|
|
|
|
|
|
- /* We have a response with matching transaction ID.
|
|
+ /* We have a response with matching transaction ID.
|
|
* We can cancel retransmit timer now.
|
|
*/
|
|
pj_timer_heap_cancel_if_active(tsx->timer_heap, &tsx->retransmit_timer,
|
|
TIMER_INACTIVE);
|
|
|
|
/* Find STUN error code attribute */
|
|
- err_attr = (pj_stun_errcode_attr*)
|
|
+ err_attr = (pj_stun_errcode_attr*)
|
|
pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_ERROR_CODE, 0);
|
|
|
|
if (err_attr && err_attr->err_code <= 200) {
|
|
@@ -473,7 +476,7 @@ PJ_DEF(pj_status_t) pj_stun_client_tsx_on_rx_msg(pj_stun_client_tsx *tsx,
|
|
* Any response between 100 and 299 MUST result in the cessation
|
|
* of request retransmissions, but otherwise is discarded.
|
|
*/
|
|
- PJ_LOG(4,(tsx->obj_name,
|
|
+ PJ_LOG(4,(tsx->obj_name,
|
|
"STUN rx_msg() error: received provisional %d code (%.*s)",
|
|
err_attr->err_code,
|
|
(int)err_attr->reason.slen,
|
|
diff --git a/pjnath/src/pjnath/turn_session.c b/pjnath/src/pjnath/turn_session.c
|
|
index e85e971f8..279e1bb82 100644
|
|
--- a/pjnath/src/pjnath/turn_session.c
|
|
+++ b/pjnath/src/pjnath/turn_session.c
|
|
@@ -1,4 +1,4 @@
|
|
-/*
|
|
+/*
|
|
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
|
|
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
|
|
*
|
|
@@ -14,7 +14,7 @@
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#include <pjnath/turn_session.h>
|
|
#include <pjnath/errno.h>
|
|
@@ -35,7 +35,7 @@
|
|
#define PJ_TURN_CHANNEL_HTABLE_SIZE 8
|
|
#define PJ_TURN_PERM_HTABLE_SIZE 8
|
|
|
|
-static const char *state_names[] =
|
|
+static const char *state_names[] =
|
|
{
|
|
"Null",
|
|
"Resolving",
|
|
@@ -95,8 +95,8 @@ struct perm_t
|
|
/* The permission expiration */
|
|
pj_time_val expiry;
|
|
|
|
- /* Arbitrary/random pointer value (token) to map this perm with the
|
|
- * request to create it. It is used to invalidate this perm when the
|
|
+ /* Arbitrary/random pointer value (token) to map this perm with the
|
|
+ * request to create it. It is used to invalidate this perm when the
|
|
* request fails.
|
|
*/
|
|
void *req_token;
|
|
@@ -219,7 +219,7 @@ PJ_DEF(void) pj_turn_alloc_param_default(pj_turn_alloc_param *prm)
|
|
/*
|
|
* Duplicate pj_turn_alloc_param.
|
|
*/
|
|
-PJ_DEF(void) pj_turn_alloc_param_copy( pj_pool_t *pool,
|
|
+PJ_DEF(void) pj_turn_alloc_param_copy( pj_pool_t *pool,
|
|
pj_turn_alloc_param *dst,
|
|
const pj_turn_alloc_param *src)
|
|
{
|
|
@@ -311,7 +311,7 @@ PJ_DEF(pj_status_t) pj_turn_session_create( const pj_stun_config *cfg,
|
|
stun_cb.on_request_complete = &stun_on_request_complete;
|
|
stun_cb.on_rx_indication = &stun_on_rx_indication;
|
|
status = pj_stun_session_create(&sess->stun_cfg, sess->obj_name, &stun_cb,
|
|
- PJ_FALSE, sess->grp_lock, &sess->stun);
|
|
+ PJ_FALSE, sess->grp_lock, &sess->stun, conn_type);
|
|
if (status != PJ_SUCCESS) {
|
|
do_destroy(sess);
|
|
return status;
|
|
@@ -509,9 +509,9 @@ PJ_DEF(pj_status_t) pj_turn_session_get_info( pj_turn_session *sess,
|
|
else
|
|
pj_bzero(&info->server, sizeof(info->server));
|
|
|
|
- pj_memcpy(&info->mapped_addr, &sess->mapped_addr,
|
|
+ pj_memcpy(&info->mapped_addr, &sess->mapped_addr,
|
|
sizeof(sess->mapped_addr));
|
|
- pj_memcpy(&info->relay_addr, &sess->relay_addr,
|
|
+ pj_memcpy(&info->relay_addr, &sess->relay_addr,
|
|
sizeof(sess->relay_addr));
|
|
|
|
return PJ_SUCCESS;
|
|
@@ -594,7 +594,7 @@ PJ_DEF(pj_status_t) pj_turn_session_set_server( pj_turn_session *sess,
|
|
|
|
/* See if "domain" contains just IP address */
|
|
tmp_addr.addr.sa_family = sess->af;
|
|
- status = pj_inet_pton(sess->af, domain,
|
|
+ status = pj_inet_pton(sess->af, domain,
|
|
pj_sockaddr_get_addr(&tmp_addr));
|
|
is_ip_addr = (status == PJ_SUCCESS);
|
|
|
|
@@ -647,8 +647,8 @@ PJ_DEF(pj_status_t) pj_turn_session_set_server( pj_turn_session *sess,
|
|
/* Add reference before async DNS resolution */
|
|
pj_grp_lock_add_ref(sess->grp_lock);
|
|
|
|
- status = pj_dns_srv_resolve(domain, &res_name, default_port,
|
|
- sess->pool, resolver, opt, sess,
|
|
+ status = pj_dns_srv_resolve(domain, &res_name, default_port,
|
|
+ sess->pool, resolver, opt, sess,
|
|
&dns_srv_resolver_cb, NULL);
|
|
if (status != PJ_SUCCESS) {
|
|
set_state(sess, PJ_TURN_STATE_NULL);
|
|
@@ -664,9 +664,9 @@ PJ_DEF(pj_status_t) pj_turn_session_set_server( pj_turn_session *sess,
|
|
unsigned i, cnt;
|
|
|
|
/* Default port must be specified */
|
|
- PJ_ASSERT_ON_FAIL(default_port>0 && default_port<65536,
|
|
+ PJ_ASSERT_ON_FAIL(default_port>0 && default_port<65536,
|
|
{status=PJ_EINVAL; goto on_return;});
|
|
-
|
|
+
|
|
sess->default_port = (pj_uint16_t)default_port;
|
|
|
|
cnt = PJ_TURN_MAX_DNS_SRV_CNT;
|
|
@@ -689,7 +689,7 @@ PJ_DEF(pj_status_t) pj_turn_session_set_server( pj_turn_session *sess,
|
|
|
|
sess->srv_addr_cnt = (pj_uint16_t)cnt;
|
|
sess->srv_addr_list = (pj_sockaddr*)
|
|
- pj_pool_calloc(sess->pool, cnt,
|
|
+ pj_pool_calloc(sess->pool, cnt,
|
|
sizeof(pj_sockaddr));
|
|
for (i=0; i<cnt; ++i) {
|
|
pj_sockaddr *addr = &sess->srv_addr_list[i];
|
|
@@ -738,8 +738,8 @@ PJ_DEF(pj_status_t) pj_turn_session_alloc(pj_turn_session *sess,
|
|
pj_status_t status;
|
|
|
|
PJ_ASSERT_RETURN(sess, PJ_EINVAL);
|
|
- PJ_ASSERT_RETURN(sess->state>PJ_TURN_STATE_NULL &&
|
|
- sess->state<=PJ_TURN_STATE_RESOLVED,
|
|
+ PJ_ASSERT_RETURN(sess->state>PJ_TURN_STATE_NULL &&
|
|
+ sess->state<=PJ_TURN_STATE_RESOLVED,
|
|
PJ_EINVALIDOP);
|
|
PJ_ASSERT_RETURN(!param || param->peer_conn_type == PJ_TURN_TP_UDP ||
|
|
param->peer_conn_type == PJ_TURN_TP_TCP,
|
|
@@ -753,7 +753,7 @@ PJ_DEF(pj_status_t) pj_turn_session_alloc(pj_turn_session *sess,
|
|
|
|
pj_grp_lock_acquire(sess->grp_lock);
|
|
|
|
- if (param && param != &sess->alloc_param)
|
|
+ if (param && param != &sess->alloc_param)
|
|
pj_turn_alloc_param_copy(sess->pool, &sess->alloc_param, param);
|
|
|
|
if (sess->state < PJ_TURN_STATE_RESOLVED) {
|
|
@@ -769,7 +769,7 @@ PJ_DEF(pj_status_t) pj_turn_session_alloc(pj_turn_session *sess,
|
|
|
|
/* Ready to allocate */
|
|
pj_assert(sess->state == PJ_TURN_STATE_RESOLVED);
|
|
-
|
|
+
|
|
/* Create a bare request */
|
|
status = pj_stun_session_create_req(sess->stun, PJ_STUN_ALLOCATE_REQUEST,
|
|
PJ_STUN_MAGIC, NULL, &tdata);
|
|
@@ -820,9 +820,9 @@ PJ_DEF(pj_status_t) pj_turn_session_alloc(pj_turn_session *sess,
|
|
/* Send request */
|
|
set_state(sess, PJ_TURN_STATE_ALLOCATING);
|
|
retransmit = (sess->conn_type == PJ_TURN_TP_UDP);
|
|
- status = pj_stun_session_send_msg(sess->stun, NULL, PJ_FALSE,
|
|
+ status = pj_stun_session_send_msg(sess->stun, NULL, PJ_FALSE,
|
|
retransmit, sess->srv_addr,
|
|
- pj_sockaddr_get_len(sess->srv_addr),
|
|
+ pj_sockaddr_get_len(sess->srv_addr),
|
|
tdata);
|
|
if (status != PJ_SUCCESS) {
|
|
/* Set state back to RESOLVED. We don't want to destroy session now,
|
|
@@ -856,7 +856,7 @@ PJ_DEF(pj_status_t) pj_turn_session_set_perm( pj_turn_session *sess,
|
|
pj_grp_lock_acquire(sess->grp_lock);
|
|
|
|
/* Create a bare CreatePermission request */
|
|
- status = pj_stun_session_create_req(sess->stun,
|
|
+ status = pj_stun_session_create_req(sess->stun,
|
|
PJ_STUN_CREATE_PERM_REQUEST,
|
|
PJ_STUN_MAGIC, NULL, &tdata);
|
|
if (status != PJ_SUCCESS) {
|
|
@@ -905,10 +905,10 @@ PJ_DEF(pj_status_t) pj_turn_session_set_perm( pj_turn_session *sess,
|
|
}
|
|
|
|
/* Send the request */
|
|
- status = pj_stun_session_send_msg(sess->stun, req_token, PJ_FALSE,
|
|
+ status = pj_stun_session_send_msg(sess->stun, req_token, PJ_FALSE,
|
|
(sess->conn_type==PJ_TURN_TP_UDP),
|
|
sess->srv_addr,
|
|
- pj_sockaddr_get_len(sess->srv_addr),
|
|
+ pj_sockaddr_get_len(sess->srv_addr),
|
|
tdata);
|
|
if (status != PJ_SUCCESS) {
|
|
/* tdata is already destroyed */
|
|
@@ -964,10 +964,10 @@ static void send_refresh(pj_turn_session *sess, int lifetime)
|
|
set_state(sess, PJ_TURN_STATE_DEALLOCATING);
|
|
}
|
|
|
|
- status = pj_stun_session_send_msg(sess->stun, NULL, PJ_FALSE,
|
|
+ status = pj_stun_session_send_msg(sess->stun, NULL, PJ_FALSE,
|
|
(sess->conn_type==PJ_TURN_TP_UDP),
|
|
sess->srv_addr,
|
|
- pj_sockaddr_get_len(sess->srv_addr),
|
|
+ pj_sockaddr_get_len(sess->srv_addr),
|
|
tdata);
|
|
if (status != PJ_SUCCESS)
|
|
goto on_error;
|
|
@@ -985,17 +985,29 @@ on_error:
|
|
/**
|
|
* Relay data to the specified peer through the session.
|
|
*/
|
|
+
|
|
PJ_DEF(pj_status_t) pj_turn_session_sendto( pj_turn_session *sess,
|
|
const pj_uint8_t *pkt,
|
|
unsigned pkt_len,
|
|
const pj_sockaddr_t *addr,
|
|
unsigned addr_len)
|
|
+{
|
|
+ unsigned sent;
|
|
+ return pj_turn_session_sendto2(sess, pkt, pkt_len, addr, addr_len, &sent);
|
|
+}
|
|
+
|
|
+PJ_DEF(pj_status_t) pj_turn_session_sendto2(pj_turn_session *sess,
|
|
+ const pj_uint8_t *pkt,
|
|
+ unsigned pkt_len,
|
|
+ const pj_sockaddr_t *addr,
|
|
+ unsigned addr_len,
|
|
+ unsigned *sent)
|
|
{
|
|
struct ch_t *ch;
|
|
struct perm_t *perm;
|
|
pj_status_t status;
|
|
|
|
- PJ_ASSERT_RETURN(sess && pkt && pkt_len && addr && addr_len,
|
|
+ PJ_ASSERT_RETURN(sess && pkt && pkt_len && addr && addr_len,
|
|
PJ_EINVAL);
|
|
|
|
/* Return error if we're not ready */
|
|
@@ -1012,11 +1024,11 @@ PJ_DEF(pj_status_t) pj_turn_session_sendto( pj_turn_session *sess,
|
|
/* Permission doesn't exist, install it first */
|
|
char ipstr[PJ_INET6_ADDRSTRLEN+2];
|
|
|
|
- PJ_LOG(4,(sess->obj_name,
|
|
+ PJ_LOG(4,(sess->obj_name,
|
|
"sendto(): IP %s has no permission, requesting it first..",
|
|
pj_sockaddr_print(addr, ipstr, sizeof(ipstr), 2)));
|
|
|
|
- status = pj_turn_session_set_perm(sess, 1, (const pj_sockaddr*)addr,
|
|
+ status = pj_turn_session_set_perm(sess, 1, (const pj_sockaddr*)addr,
|
|
0);
|
|
if (status != PJ_SUCCESS) {
|
|
pj_grp_lock_release(sess->grp_lock);
|
|
@@ -1026,19 +1038,19 @@ PJ_DEF(pj_status_t) pj_turn_session_sendto( pj_turn_session *sess,
|
|
|
|
/* If peer connection is TCP (RFC 6062), send it directly */
|
|
if (sess->alloc_param.peer_conn_type == PJ_TURN_TP_TCP) {
|
|
- status = sess->cb.on_send_pkt(sess, pkt, pkt_len, addr, addr_len);
|
|
+ status = sess->cb.on_send_pkt2(sess, pkt, pkt_len, addr, addr_len, sent, pkt_len);
|
|
goto on_return;
|
|
}
|
|
|
|
/* See if the peer is bound to a channel number */
|
|
- ch = lookup_ch_by_addr(sess, addr, pj_sockaddr_get_len(addr),
|
|
+ ch = lookup_ch_by_addr(sess, addr, pj_sockaddr_get_len(addr),
|
|
PJ_FALSE, PJ_FALSE);
|
|
if (ch && ch->num != PJ_TURN_INVALID_CHANNEL && ch->bound) {
|
|
unsigned total_len;
|
|
|
|
/* Peer is assigned a channel number, we can use ChannelData */
|
|
pj_turn_channel_data *cd = (pj_turn_channel_data*)sess->tx_pkt;
|
|
-
|
|
+
|
|
pj_assert(sizeof(*cd)==4);
|
|
|
|
/* Calculate total length, including paddings */
|
|
@@ -1052,11 +1064,10 @@ PJ_DEF(pj_status_t) pj_turn_session_sendto( pj_turn_session *sess,
|
|
cd->length = pj_htons((pj_uint16_t)pkt_len);
|
|
pj_memcpy(cd+1, pkt, pkt_len);
|
|
|
|
- pj_assert(sess->srv_addr != NULL);
|
|
-
|
|
- status = sess->cb.on_send_pkt(sess, sess->tx_pkt, total_len,
|
|
- sess->srv_addr,
|
|
- pj_sockaddr_get_len(sess->srv_addr));
|
|
+ status = sess->cb.on_send_pkt2(sess, sess->tx_pkt, total_len,
|
|
+ sess->srv_addr,
|
|
+ pj_sockaddr_get_len(sess->srv_addr),
|
|
+ sent, pkt_len);
|
|
|
|
} else {
|
|
/* Use Send Indication. */
|
|
@@ -1070,7 +1081,7 @@ PJ_DEF(pj_status_t) pj_turn_session_sendto( pj_turn_session *sess,
|
|
|
|
/* Create blank SEND-INDICATION */
|
|
status = pj_stun_msg_init(&send_ind, PJ_STUN_SEND_INDICATION,
|
|
- PJ_STUN_MAGIC,
|
|
+ PJ_STUN_MAGIC,
|
|
(const pj_uint8_t*)sess->send_ind_tsx_id);
|
|
if (status != PJ_SUCCESS)
|
|
goto on_return;
|
|
@@ -1087,17 +1098,18 @@ PJ_DEF(pj_status_t) pj_turn_session_sendto( pj_turn_session *sess,
|
|
pj_stun_msg_add_attr(&send_ind, (pj_stun_attr_hdr*)&data_attr);
|
|
|
|
/* Encode the message */
|
|
- status = pj_stun_msg_encode(&send_ind, sess->tx_pkt,
|
|
+ status = pj_stun_msg_encode(&send_ind, sess->tx_pkt,
|
|
sizeof(sess->tx_pkt), 0,
|
|
NULL, &send_ind_len);
|
|
if (status != PJ_SUCCESS)
|
|
goto on_return;
|
|
|
|
/* Send the Send Indication */
|
|
- status = sess->cb.on_send_pkt(sess, sess->tx_pkt,
|
|
- (unsigned)send_ind_len,
|
|
- sess->srv_addr,
|
|
- pj_sockaddr_get_len(sess->srv_addr));
|
|
+ status = sess->cb.on_send_pkt2(sess, sess->tx_pkt,
|
|
+ (unsigned)send_ind_len,
|
|
+ sess->srv_addr,
|
|
+ pj_sockaddr_get_len(sess->srv_addr),
|
|
+ sent, pkt_len);
|
|
}
|
|
|
|
on_return:
|
|
@@ -1124,7 +1136,7 @@ PJ_DEF(pj_status_t) pj_turn_session_bind_channel(pj_turn_session *sess,
|
|
pj_grp_lock_acquire(sess->grp_lock);
|
|
|
|
/* Create blank ChannelBind request */
|
|
- status = pj_stun_session_create_req(sess->stun,
|
|
+ status = pj_stun_session_create_req(sess->stun,
|
|
PJ_STUN_CHANNEL_BIND_REQUEST,
|
|
PJ_STUN_MAGIC, NULL, &tdata);
|
|
if (status != PJ_SUCCESS)
|
|
@@ -1139,7 +1151,7 @@ PJ_DEF(pj_status_t) pj_turn_session_bind_channel(pj_turn_session *sess,
|
|
/* Channel is already bound. This is a refresh request. */
|
|
ch_num = ch->num;
|
|
} else {
|
|
- PJ_ASSERT_ON_FAIL(sess->next_ch <= PJ_TURN_CHANNEL_MAX,
|
|
+ PJ_ASSERT_ON_FAIL(sess->next_ch <= PJ_TURN_CHANNEL_MAX,
|
|
{status=PJ_ETOOMANY; goto on_return;});
|
|
ch->num = ch_num = sess->next_ch++;
|
|
}
|
|
@@ -1154,10 +1166,10 @@ PJ_DEF(pj_status_t) pj_turn_session_bind_channel(pj_turn_session *sess,
|
|
PJ_STUN_ATTR_XOR_PEER_ADDR, PJ_TRUE,
|
|
peer_adr, addr_len);
|
|
|
|
- /* Send the request, associate peer data structure with tdata
|
|
+ /* Send the request, associate peer data structure with tdata
|
|
* for future reference when we receive the ChannelBind response.
|
|
*/
|
|
- status = pj_stun_session_send_msg(sess->stun, ch, PJ_FALSE,
|
|
+ status = pj_stun_session_send_msg(sess->stun, ch, PJ_FALSE,
|
|
(sess->conn_type==PJ_TURN_TP_UDP),
|
|
sess->srv_addr,
|
|
pj_sockaddr_get_len(sess->srv_addr),
|
|
@@ -1190,7 +1202,7 @@ PJ_DEF(pj_status_t) pj_turn_session_connection_bind(
|
|
pj_grp_lock_acquire(sess->grp_lock);
|
|
|
|
/* Create blank ConnectionBind request */
|
|
- status = pj_stun_session_create_req(sess->stun,
|
|
+ status = pj_stun_session_create_req(sess->stun,
|
|
PJ_STUN_CONNECTION_BIND_REQUEST,
|
|
PJ_STUN_MAGIC, NULL, &tdata);
|
|
if (status != PJ_SUCCESS)
|
|
@@ -1206,7 +1218,7 @@ PJ_DEF(pj_status_t) pj_turn_session_connection_bind(
|
|
pj_sockaddr_cp(&conn_bind->peer_addr, peer_addr);
|
|
conn_bind->peer_addr_len = addr_len;
|
|
|
|
- /* Send the request, associate connection data structure with tdata
|
|
+ /* Send the request, associate connection data structure with tdata
|
|
* for future reference when we receive the ConnectionBind response.
|
|
*/
|
|
status = pj_stun_session_send_msg(sess->stun, conn_bind, PJ_FALSE,
|
|
@@ -1259,7 +1271,7 @@ PJ_DEF(pj_status_t) pj_turn_session_on_rx_pkt(pj_turn_session *sess,
|
|
{
|
|
pj_turn_session_on_rx_pkt_param prm;
|
|
pj_status_t status;
|
|
-
|
|
+
|
|
pj_bzero(&prm, sizeof(prm));
|
|
prm.pkt = pkt;
|
|
prm.pkt_len = pkt_len;
|
|
@@ -1349,7 +1361,7 @@ PJ_DEF(pj_status_t) pj_turn_session_on_rx_pkt2(
|
|
|
|
/* Notify application */
|
|
if (sess->cb.on_rx_data) {
|
|
- (*sess->cb.on_rx_data)(sess, ((pj_uint8_t*)prm->pkt)+sizeof(cd),
|
|
+ (*sess->cb.on_rx_data)(sess, ((pj_uint8_t*)prm->pkt)+sizeof(cd),
|
|
cd.length, &ch->addr,
|
|
pj_sockaddr_get_len(&ch->addr));
|
|
}
|
|
@@ -1394,7 +1406,7 @@ static pj_status_t stun_on_send_msg(pj_stun_session *stun,
|
|
* Handle failed ALLOCATE or REFRESH request. This may switch to alternate
|
|
* server if we have one.
|
|
*/
|
|
-static void on_session_fail( pj_turn_session *sess,
|
|
+static void on_session_fail( pj_turn_session *sess,
|
|
enum pj_stun_method_e method,
|
|
pj_status_t status,
|
|
const pj_str_t *reason)
|
|
@@ -1415,12 +1427,12 @@ static void on_session_fail( pj_turn_session *sess,
|
|
pj_stun_get_method_name(method),
|
|
(int)reason->slen, reason->ptr));
|
|
|
|
- /* If this is ALLOCATE response and we don't have more server
|
|
+ /* If this is ALLOCATE response and we don't have more server
|
|
* addresses to try, notify application and destroy the TURN
|
|
* session.
|
|
*/
|
|
if (method==PJ_STUN_ALLOCATE_METHOD &&
|
|
- sess->srv_addr == &sess->srv_addr_list[sess->srv_addr_cnt-1])
|
|
+ sess->srv_addr == &sess->srv_addr_list[sess->srv_addr_cnt-1])
|
|
{
|
|
|
|
set_state(sess, PJ_TURN_STATE_DEALLOCATED);
|
|
@@ -1451,7 +1463,7 @@ static void on_session_fail( pj_turn_session *sess,
|
|
/*
|
|
* Handle successful response to ALLOCATE or REFRESH request.
|
|
*/
|
|
-static void on_allocate_success(pj_turn_session *sess,
|
|
+static void on_allocate_success(pj_turn_session *sess,
|
|
enum pj_stun_method_e method,
|
|
const pj_stun_msg *msg)
|
|
{
|
|
@@ -1528,10 +1540,10 @@ static void on_allocate_success(pj_turn_session *sess,
|
|
"RELAY-ADDRESS attribute"));
|
|
return;
|
|
}
|
|
-
|
|
+
|
|
/* Save relayed address */
|
|
if (raddr_attr) {
|
|
- /* If we already have relay address, check if the relay address
|
|
+ /* If we already have relay address, check if the relay address
|
|
* in the response matches our relay address.
|
|
*/
|
|
if (pj_sockaddr_has_addr(&sess->relay_addr)) {
|
|
@@ -1543,7 +1555,7 @@ static void on_allocate_success(pj_turn_session *sess,
|
|
}
|
|
} else {
|
|
/* Otherwise save the relayed address */
|
|
- pj_memcpy(&sess->relay_addr, &raddr_attr->sockaddr,
|
|
+ pj_memcpy(&sess->relay_addr, &raddr_attr->sockaddr,
|
|
sizeof(pj_sockaddr));
|
|
}
|
|
}
|
|
@@ -1591,7 +1603,7 @@ static void stun_on_request_complete(pj_stun_session *stun,
|
|
unsigned src_addr_len)
|
|
{
|
|
pj_turn_session *sess;
|
|
- enum pj_stun_method_e method = (enum pj_stun_method_e)
|
|
+ enum pj_stun_method_e method = (enum pj_stun_method_e)
|
|
PJ_STUN_GET_METHOD(tdata->msg->hdr.type);
|
|
|
|
PJ_UNUSED_ARG(src_addr);
|
|
@@ -1612,8 +1624,8 @@ static void stun_on_request_complete(pj_stun_session *stun,
|
|
}
|
|
|
|
/* Handle ALLOCATE response */
|
|
- if (status==PJ_SUCCESS &&
|
|
- PJ_STUN_IS_SUCCESS_RESPONSE(response->hdr.type))
|
|
+ if (status==PJ_SUCCESS &&
|
|
+ PJ_STUN_IS_SUCCESS_RESPONSE(response->hdr.type))
|
|
{
|
|
|
|
/* Successful Allocate response */
|
|
@@ -1641,8 +1653,8 @@ static void stun_on_request_complete(pj_stun_session *stun,
|
|
|
|
} else if (method == PJ_STUN_REFRESH_METHOD) {
|
|
/* Handle Refresh response */
|
|
- if (status==PJ_SUCCESS &&
|
|
- PJ_STUN_IS_SUCCESS_RESPONSE(response->hdr.type))
|
|
+ if (status==PJ_SUCCESS &&
|
|
+ PJ_STUN_IS_SUCCESS_RESPONSE(response->hdr.type))
|
|
{
|
|
/* Success, schedule next refresh. */
|
|
on_allocate_success(sess, method, response);
|
|
@@ -1670,8 +1682,8 @@ static void stun_on_request_complete(pj_stun_session *stun,
|
|
|
|
} else if (method == PJ_STUN_CHANNEL_BIND_METHOD) {
|
|
/* Handle ChannelBind response */
|
|
- if (status==PJ_SUCCESS &&
|
|
- PJ_STUN_IS_SUCCESS_RESPONSE(response->hdr.type))
|
|
+ if (status==PJ_SUCCESS &&
|
|
+ PJ_STUN_IS_SUCCESS_RESPONSE(response->hdr.type))
|
|
{
|
|
/* Successful ChannelBind response */
|
|
struct ch_t *ch = (struct ch_t*)token;
|
|
@@ -1720,8 +1732,8 @@ static void stun_on_request_complete(pj_stun_session *stun,
|
|
|
|
} else if (method == PJ_STUN_CREATE_PERM_METHOD) {
|
|
/* Handle CreatePermission response */
|
|
- if (status==PJ_SUCCESS &&
|
|
- PJ_STUN_IS_SUCCESS_RESPONSE(response->hdr.type))
|
|
+ if (status==PJ_SUCCESS &&
|
|
+ PJ_STUN_IS_SUCCESS_RESPONSE(response->hdr.type))
|
|
{
|
|
/* No special handling when the request is successful. */
|
|
} else {
|
|
@@ -1740,7 +1752,7 @@ static void stun_on_request_complete(pj_stun_session *stun,
|
|
const pj_stun_errcode_attr *eattr;
|
|
|
|
eattr = (const pj_stun_errcode_attr*)
|
|
- pj_stun_msg_find_attr(response,
|
|
+ pj_stun_msg_find_attr(response,
|
|
PJ_STUN_ATTR_ERROR_CODE, 0);
|
|
if (eattr) {
|
|
err_code = eattr->err_code;
|
|
@@ -1761,9 +1773,9 @@ static void stun_on_request_complete(pj_stun_session *stun,
|
|
it = pj_hash_next(sess->perm_table, it);
|
|
|
|
if (perm->req_token == token) {
|
|
- PJ_LOG(1,(sess->obj_name,
|
|
+ PJ_LOG(1,(sess->obj_name,
|
|
"CreatePermission failed for IP %s: %d/%.*s",
|
|
- pj_sockaddr_print(&perm->addr, ipstr,
|
|
+ pj_sockaddr_print(&perm->addr, ipstr,
|
|
sizeof(ipstr), 2),
|
|
err_code, (int)reason.slen, reason.ptr));
|
|
|
|
@@ -1784,7 +1796,7 @@ static void stun_on_request_complete(pj_stun_session *stun,
|
|
struct conn_bind_t *conn_bind = (struct conn_bind_t*)token;
|
|
|
|
if (status != PJ_SUCCESS ||
|
|
- !PJ_STUN_IS_SUCCESS_RESPONSE(response->hdr.type))
|
|
+ !PJ_STUN_IS_SUCCESS_RESPONSE(response->hdr.type))
|
|
{
|
|
pj_str_t reason = {0};
|
|
if (status == PJ_SUCCESS) {
|
|
@@ -1898,7 +1910,7 @@ static pj_status_t stun_on_rx_indication(pj_stun_session *stun,
|
|
|
|
/* Must have both XOR-PEER-ADDRESS and CONNECTION-ID attributes */
|
|
if (!peer_attr || !connection_id_attr) {
|
|
- PJ_LOG(4,(sess->obj_name,
|
|
+ PJ_LOG(4,(sess->obj_name,
|
|
"Received ConnectionAttempt indication with missing "
|
|
"attributes"));
|
|
return PJ_EINVALIDOP;
|
|
@@ -1940,14 +1952,14 @@ static pj_status_t stun_on_rx_indication(pj_stun_session *stun,
|
|
|
|
/* Must have both XOR-PEER-ADDRESS and DATA attributes */
|
|
if (!peer_attr || !data_attr) {
|
|
- PJ_LOG(4,(sess->obj_name,
|
|
+ PJ_LOG(4,(sess->obj_name,
|
|
"Received Data indication with missing attributes"));
|
|
return PJ_EINVALIDOP;
|
|
}
|
|
|
|
/* Notify application */
|
|
if (sess->cb.on_rx_data) {
|
|
- (*sess->cb.on_rx_data)(sess, data_attr->data, data_attr->length,
|
|
+ (*sess->cb.on_rx_data)(sess, data_attr->data, data_attr->length,
|
|
&peer_attr->sockaddr,
|
|
pj_sockaddr_get_len(&peer_attr->sockaddr));
|
|
}
|
|
@@ -1985,15 +1997,15 @@ static void dns_srv_resolver_cb(void *user_data,
|
|
|
|
/* Allocate server entries */
|
|
sess->srv_addr_list = (pj_sockaddr*)
|
|
- pj_pool_calloc(sess->pool, tot_cnt,
|
|
+ pj_pool_calloc(sess->pool, tot_cnt,
|
|
sizeof(pj_sockaddr));
|
|
|
|
/* Copy results to server entries */
|
|
for (i=0, cnt=0; i<rec->count && cnt<PJ_TURN_MAX_DNS_SRV_CNT; ++i) {
|
|
unsigned j;
|
|
|
|
- for (j=0; j<rec->entry[i].server.addr_count &&
|
|
- cnt<PJ_TURN_MAX_DNS_SRV_CNT; ++j)
|
|
+ for (j=0; j<rec->entry[i].server.addr_count &&
|
|
+ cnt<PJ_TURN_MAX_DNS_SRV_CNT; ++j)
|
|
{
|
|
if (rec->entry[i].server.addr[j].af == sess->af) {
|
|
pj_sockaddr *addr = &sess->srv_addr_list[cnt];
|
|
@@ -2041,7 +2053,7 @@ static struct ch_t *lookup_ch_by_addr(pj_turn_session *sess,
|
|
pj_uint32_t hval = 0;
|
|
struct ch_t *ch;
|
|
|
|
- ch = (struct ch_t*)
|
|
+ ch = (struct ch_t*)
|
|
pj_hash_get(sess->ch_table, addr, addr_len, &hval);
|
|
if (ch == NULL && update) {
|
|
ch = PJ_POOL_ZALLOC_T(sess->pool, struct ch_t);
|
|
@@ -2062,7 +2074,7 @@ static struct ch_t *lookup_ch_by_addr(pj_turn_session *sess,
|
|
/* Register by channel number */
|
|
pj_assert(ch->num != PJ_TURN_INVALID_CHANNEL && ch->bound);
|
|
|
|
- if (pj_hash_get(sess->ch_table, &ch->num,
|
|
+ if (pj_hash_get(sess->ch_table, &ch->num,
|
|
sizeof(ch->num), &hval2)==0) {
|
|
pj_hash_set(sess->pool, sess->ch_table, &ch->num,
|
|
sizeof(ch->num), hval2, ch);
|
|
@@ -2089,7 +2101,7 @@ static struct ch_t *lookup_ch_by_addr(pj_turn_session *sess,
|
|
static struct ch_t *lookup_ch_by_chnum(pj_turn_session *sess,
|
|
pj_uint16_t chnum)
|
|
{
|
|
- return (struct ch_t*) pj_hash_get(sess->ch_table, &chnum,
|
|
+ return (struct ch_t*) pj_hash_get(sess->ch_table, &chnum,
|
|
sizeof(chnum), NULL);
|
|
}
|
|
|
|
@@ -2114,7 +2126,7 @@ static struct perm_t *lookup_perm(pj_turn_session *sess,
|
|
}
|
|
|
|
/* lookup and create if it doesn't exist and wanted */
|
|
- perm = (struct perm_t*)
|
|
+ perm = (struct perm_t*)
|
|
pj_hash_get(sess->perm_table, addr, addr_len, &hval);
|
|
if (perm == NULL && update) {
|
|
perm = PJ_POOL_ZALLOC_T(sess->pool, struct perm_t);
|
|
@@ -2147,7 +2159,7 @@ static void invalidate_perm(pj_turn_session *sess,
|
|
/*
|
|
* Scan permission's hash table to refresh the permission.
|
|
*/
|
|
-static unsigned refresh_permissions(pj_turn_session *sess,
|
|
+static unsigned refresh_permissions(pj_turn_session *sess,
|
|
const pj_time_val *now)
|
|
{
|
|
pj_stun_tx_data *tdata = NULL;
|
|
@@ -2169,7 +2181,7 @@ static unsigned refresh_permissions(pj_turn_session *sess,
|
|
if (tdata == NULL) {
|
|
/* Create a bare CreatePermission request */
|
|
status = pj_stun_session_create_req(
|
|
- sess->stun,
|
|
+ sess->stun,
|
|
PJ_STUN_CREATE_PERM_REQUEST,
|
|
PJ_STUN_MAGIC, NULL, &tdata);
|
|
if (status != PJ_SUCCESS) {
|
|
@@ -2185,7 +2197,7 @@ static unsigned refresh_permissions(pj_turn_session *sess,
|
|
}
|
|
|
|
status = pj_stun_msg_add_sockaddr_attr(
|
|
- tdata->pool,
|
|
+ tdata->pool,
|
|
tdata->msg,
|
|
PJ_STUN_ATTR_XOR_PEER_ADDR,
|
|
PJ_TRUE,
|
|
@@ -2211,10 +2223,10 @@ static unsigned refresh_permissions(pj_turn_session *sess,
|
|
}
|
|
|
|
if (tdata) {
|
|
- status = pj_stun_session_send_msg(sess->stun, req_token, PJ_FALSE,
|
|
+ status = pj_stun_session_send_msg(sess->stun, req_token, PJ_FALSE,
|
|
(sess->conn_type==PJ_TURN_TP_UDP),
|
|
sess->srv_addr,
|
|
- pj_sockaddr_get_len(sess->srv_addr),
|
|
+ pj_sockaddr_get_len(sess->srv_addr),
|
|
tdata);
|
|
if (status != PJ_SUCCESS) {
|
|
PJ_PERROR(1,(sess->obj_name, status,
|
|
@@ -2241,7 +2253,7 @@ static void on_timer_event(pj_timer_heap_t *th, pj_timer_entry *e)
|
|
|
|
eid = (enum timer_id_t) e->id;
|
|
e->id = TIMER_NONE;
|
|
-
|
|
+
|
|
if (eid == TIMER_KEEP_ALIVE) {
|
|
pj_time_val now;
|
|
pj_hash_iterator_t itbuf, *it;
|
|
@@ -2270,11 +2282,11 @@ static void on_timer_event(pj_timer_heap_t *th, pj_timer_entry *e)
|
|
/* Scan hash table to refresh bound channels */
|
|
it = pj_hash_first(sess->ch_table, &itbuf);
|
|
while (it) {
|
|
- struct ch_t *ch = (struct ch_t*)
|
|
+ struct ch_t *ch = (struct ch_t*)
|
|
pj_hash_this(sess->ch_table, it);
|
|
if (ch->bound && PJ_TIME_VAL_LTE(ch->expiry, now)) {
|
|
|
|
- /* Send ChannelBind to refresh channel binding and
|
|
+ /* Send ChannelBind to refresh channel binding and
|
|
* permission.
|
|
*/
|
|
pj_turn_session_bind_channel(sess, &ch->addr,
|
|
@@ -2297,7 +2309,7 @@ static void on_timer_event(pj_timer_heap_t *th, pj_timer_entry *e)
|
|
pj_status_t rc;
|
|
|
|
/* Create blank SEND-INDICATION */
|
|
- rc = pj_stun_session_create_ind(sess->stun,
|
|
+ rc = pj_stun_session_create_ind(sess->stun,
|
|
PJ_STUN_SEND_INDICATION, &tdata);
|
|
if (rc == PJ_SUCCESS) {
|
|
/* Add DATA attribute with zero length */
|
|
@@ -2305,7 +2317,7 @@ static void on_timer_event(pj_timer_heap_t *th, pj_timer_entry *e)
|
|
PJ_STUN_ATTR_DATA, NULL, 0);
|
|
|
|
/* Send the indication */
|
|
- pj_stun_session_send_msg(sess->stun, NULL, PJ_FALSE,
|
|
+ pj_stun_session_send_msg(sess->stun, NULL, PJ_FALSE,
|
|
PJ_FALSE, sess->srv_addr,
|
|
pj_sockaddr_get_len(sess->srv_addr),
|
|
tdata);
|
|
diff --git a/pjnath/src/pjnath/turn_sock.c b/pjnath/src/pjnath/turn_sock.c
|
|
index e273e6e28..ada864c85 100644
|
|
--- a/pjnath/src/pjnath/turn_sock.c
|
|
+++ b/pjnath/src/pjnath/turn_sock.c
|
|
@@ -1,4 +1,4 @@
|
|
-/*
|
|
+/*
|
|
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
|
|
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
|
|
*
|
|
@@ -14,7 +14,7 @@
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#include <pjnath/turn_sock.h>
|
|
#include <pj/activesock.h>
|
|
@@ -99,6 +99,10 @@ struct pj_turn_sock
|
|
/* Data connection, when peer_conn_type==PJ_TURN_TP_TCP (RFC 6062) */
|
|
unsigned data_conn_cnt;
|
|
tcp_data_conn_t data_conn[PJ_TURN_MAX_TCP_CONN_CNT];
|
|
+
|
|
+ /* The following variables are used by the on_data_sent callback */
|
|
+ unsigned current_pkt_len;
|
|
+ unsigned current_body_len;
|
|
};
|
|
|
|
|
|
@@ -115,6 +119,13 @@ static pj_status_t turn_on_stun_send_pkt(pj_turn_session *sess,
|
|
unsigned pkt_len,
|
|
const pj_sockaddr_t *dst_addr,
|
|
unsigned dst_addr_len);
|
|
+static pj_status_t turn_on_send_pkt2(pj_turn_session *sess,
|
|
+ const pj_uint8_t *pkt,
|
|
+ unsigned pkt_len,
|
|
+ const pj_sockaddr_t *dst_addr,
|
|
+ unsigned dst_addr_len,
|
|
+ unsigned *sent,
|
|
+ unsigned body_len);
|
|
static void turn_on_channel_bound(pj_turn_session *sess,
|
|
const pj_sockaddr_t *peer_addr,
|
|
unsigned addr_len,
|
|
@@ -124,7 +135,7 @@ static void turn_on_rx_data(pj_turn_session *sess,
|
|
unsigned pkt_len,
|
|
const pj_sockaddr_t *peer_addr,
|
|
unsigned addr_len);
|
|
-static void turn_on_state(pj_turn_session *sess,
|
|
+static void turn_on_state(pj_turn_session *sess,
|
|
pj_turn_state_t old_state,
|
|
pj_turn_state_t new_state);
|
|
static void turn_on_connection_attempt(pj_turn_session *sess,
|
|
@@ -252,7 +263,7 @@ PJ_DEF(void) pj_turn_sock_tls_cfg_wipe_keys(pj_turn_sock_tls_cfg *tls_cfg)
|
|
wipe_buf(&tls_cfg->password);
|
|
wipe_buf(&tls_cfg->ca_buf);
|
|
wipe_buf(&tls_cfg->cert_buf);
|
|
- wipe_buf(&tls_cfg->privkey_buf);
|
|
+ wipe_buf(&tls_cfg->privkey_buf);
|
|
}
|
|
#endif
|
|
|
|
@@ -349,6 +360,7 @@ PJ_DEF(pj_status_t) pj_turn_sock_create(pj_stun_config *cfg,
|
|
pj_bzero(&sess_cb, sizeof(sess_cb));
|
|
sess_cb.on_send_pkt = &turn_on_send_pkt;
|
|
sess_cb.on_stun_send_pkt = &turn_on_stun_send_pkt;
|
|
+ sess_cb.on_send_pkt2 = &turn_on_send_pkt2;
|
|
sess_cb.on_channel_bound = &turn_on_channel_bound;
|
|
sess_cb.on_rx_data = &turn_on_rx_data;
|
|
sess_cb.on_state = &turn_on_state;
|
|
@@ -545,7 +557,7 @@ PJ_DEF(pj_status_t) pj_turn_sock_unlock(pj_turn_sock *turn_sock)
|
|
}
|
|
|
|
/*
|
|
- * Set STUN message logging for this TURN session.
|
|
+ * Set STUN message logging for this TURN session.
|
|
*/
|
|
PJ_DEF(void) pj_turn_sock_set_log( pj_turn_sock *turn_sock,
|
|
unsigned flags)
|
|
@@ -579,7 +591,7 @@ PJ_DEF(pj_status_t) pj_turn_sock_alloc(pj_turn_sock *turn_sock,
|
|
|
|
pj_grp_lock_acquire(turn_sock->grp_lock);
|
|
|
|
- /* Copy alloc param. We will call session_alloc() only after the
|
|
+ /* Copy alloc param. We will call session_alloc() only after the
|
|
* server address has been resolved.
|
|
*/
|
|
if (param) {
|
|
@@ -643,7 +655,7 @@ PJ_DEF(pj_status_t) pj_turn_sock_set_perm( pj_turn_sock *turn_sock,
|
|
|
|
/*
|
|
* Send packet.
|
|
- */
|
|
+ */
|
|
PJ_DEF(pj_status_t) pj_turn_sock_sendto( pj_turn_sock *turn_sock,
|
|
const pj_uint8_t *pkt,
|
|
unsigned pkt_len,
|
|
@@ -659,10 +671,26 @@ PJ_DEF(pj_status_t) pj_turn_sock_sendto( pj_turn_sock *turn_sock,
|
|
* to store our actual data length to be sent here.
|
|
*/
|
|
turn_sock->body_len = pkt_len;
|
|
- return pj_turn_session_sendto(turn_sock->sess, pkt, pkt_len,
|
|
+ return pj_turn_session_sendto(turn_sock->sess, pkt, pkt_len,
|
|
addr, addr_len);
|
|
}
|
|
|
|
+PJ_DEF(pj_status_t) pj_turn_sock_sendto2( pj_turn_sock *turn_sock,
|
|
+ const pj_uint8_t *pkt,
|
|
+ unsigned pkt_len,
|
|
+ const pj_sockaddr_t *addr,
|
|
+ unsigned addr_len,
|
|
+ unsigned *sent)
|
|
+{
|
|
+ PJ_ASSERT_RETURN(turn_sock && addr && addr_len, PJ_EINVAL);
|
|
+
|
|
+ if (turn_sock->sess == NULL)
|
|
+ return PJ_EINVALIDOP;
|
|
+
|
|
+ return pj_turn_session_sendto2(turn_sock->sess, pkt, pkt_len,
|
|
+ addr, addr_len, sent);
|
|
+}
|
|
+
|
|
/*
|
|
* Bind a peer address to a channel number.
|
|
*/
|
|
@@ -759,10 +787,10 @@ static pj_bool_t on_connect_complete(pj_turn_sock *turn_sock,
|
|
}
|
|
|
|
/* Kick start pending read operation */
|
|
- if (turn_sock->conn_type != PJ_TURN_TP_TLS)
|
|
- status = pj_activesock_start_read(turn_sock->active_sock,
|
|
+ if (turn_sock->conn_type != PJ_TURN_TP_TLS)
|
|
+ status = pj_activesock_start_read(turn_sock->active_sock,
|
|
turn_sock->pool,
|
|
- turn_sock->setting.max_pkt_size,
|
|
+ turn_sock->setting.max_pkt_size,
|
|
0);
|
|
#if PJ_HAS_SSL_SOCK
|
|
else
|
|
@@ -854,7 +882,7 @@ static unsigned has_packet(pj_turn_sock *turn_sock, const void *buf, pj_size_t b
|
|
pj_memcpy(&cd, buf, sizeof(pj_turn_channel_data));
|
|
cd.length = pj_ntohs(cd.length);
|
|
|
|
- if (bufsize >= cd.length+sizeof(cd))
|
|
+ if (bufsize >= cd.length+sizeof(cd))
|
|
return (cd.length+sizeof(cd)+3) & (~3);
|
|
else
|
|
return 0;
|
|
@@ -880,18 +908,18 @@ static pj_bool_t on_data_read(pj_turn_sock *turn_sock,
|
|
*/
|
|
unsigned pkt_len;
|
|
|
|
- //PJ_LOG(5,(turn_sock->pool->obj_name,
|
|
+ //PJ_LOG(5,(turn_sock->pool->obj_name,
|
|
// "Incoming data, %lu bytes total buffer", size));
|
|
|
|
while ((pkt_len=has_packet(turn_sock, data, size)) != 0) {
|
|
pj_size_t parsed_len;
|
|
//const pj_uint8_t *pkt = (const pj_uint8_t*)data;
|
|
|
|
- //PJ_LOG(5,(turn_sock->pool->obj_name,
|
|
- // "Packet start: %02X %02X %02X %02X",
|
|
+ //PJ_LOG(5,(turn_sock->pool->obj_name,
|
|
+ // "Packet start: %02X %02X %02X %02X",
|
|
// pkt[0], pkt[1], pkt[2], pkt[3]));
|
|
|
|
- //PJ_LOG(5,(turn_sock->pool->obj_name,
|
|
+ //PJ_LOG(5,(turn_sock->pool->obj_name,
|
|
// "Processing %lu bytes packet of %lu bytes total buffer",
|
|
// pkt_len, size));
|
|
|
|
@@ -912,7 +940,7 @@ static pj_bool_t on_data_read(pj_turn_sock *turn_sock,
|
|
}
|
|
size = *remainder;
|
|
|
|
- //PJ_LOG(5,(turn_sock->pool->obj_name,
|
|
+ //PJ_LOG(5,(turn_sock->pool->obj_name,
|
|
// "Buffer size now %lu bytes", size));
|
|
}
|
|
} else if (status != PJ_SUCCESS) {
|
|
@@ -956,12 +984,7 @@ static pj_bool_t on_data_sent(pj_turn_sock *turn_sock,
|
|
}
|
|
|
|
if (turn_sock->cb.on_data_sent) {
|
|
- pj_ssize_t header_len, sent_size;
|
|
-
|
|
- /* Remove the length of packet header from sent size. */
|
|
- header_len = turn_sock->pkt_len - turn_sock->body_len;
|
|
- sent_size = (sent > header_len)? (sent - header_len) : 0;
|
|
- (*turn_sock->cb.on_data_sent)(turn_sock, sent_size);
|
|
+ (*turn_sock->cb.on_data_sent)(turn_sock, sent);
|
|
}
|
|
|
|
return PJ_TRUE;
|
|
@@ -1028,7 +1051,7 @@ static pj_status_t send_pkt(pj_turn_session *sess,
|
|
const pj_sockaddr_t *dst_addr,
|
|
unsigned dst_addr_len)
|
|
{
|
|
- pj_turn_sock *turn_sock = (pj_turn_sock*)
|
|
+ pj_turn_sock *turn_sock = (pj_turn_sock*)
|
|
pj_turn_session_get_user_data(sess);
|
|
pj_ssize_t len = pkt_len;
|
|
pj_status_t status = PJ_SUCCESS;
|
|
@@ -1108,6 +1131,74 @@ static pj_status_t turn_on_send_pkt(pj_turn_session *sess,
|
|
dst_addr, dst_addr_len);
|
|
}
|
|
|
|
+static pj_status_t turn_on_send_pkt2(pj_turn_session *sess,
|
|
+ const pj_uint8_t *pkt,
|
|
+ unsigned pkt_len,
|
|
+ const pj_sockaddr_t *dst_addr,
|
|
+ unsigned dst_addr_len,
|
|
+ unsigned *sent,
|
|
+ unsigned body_len)
|
|
+{
|
|
+ *sent = pkt_len;
|
|
+ pj_turn_sock *turn_sock = (pj_turn_sock*)pj_turn_session_get_user_data(sess);
|
|
+ pj_status_t status = PJ_SUCCESS;
|
|
+
|
|
+ pj_ssize_t len = pkt_len;
|
|
+ turn_sock->current_body_len = body_len;
|
|
+ turn_sock->current_pkt_len = pkt_len;
|
|
+
|
|
+ if (turn_sock == NULL || turn_sock->is_destroying) {
|
|
+ /* We've been destroyed */
|
|
+ // https://trac.pjsip.org/repos/ticket/1316
|
|
+ //pj_assert(!"We should shutdown gracefully");
|
|
+ return PJ_EINVALIDOP;
|
|
+ }
|
|
+
|
|
+ if (turn_sock->conn_type == PJ_TURN_TP_UDP) {
|
|
+ status = pj_activesock_sendto(turn_sock->active_sock,
|
|
+ &turn_sock->send_key, pkt, &len, 0,
|
|
+ dst_addr, dst_addr_len);
|
|
+ } else if (turn_sock->alloc_param.peer_conn_type == PJ_TURN_TP_TCP) {
|
|
+ pj_turn_session_info info;
|
|
+ pj_turn_session_get_info(turn_sock->sess, &info);
|
|
+ if (pj_sockaddr_cmp(&info.server, dst_addr) == 0) {
|
|
+ /* Destination address is TURN server */
|
|
+ status = pj_activesock_send(turn_sock->active_sock,
|
|
+ &turn_sock->send_key, pkt, &len, 0);
|
|
+ } else {
|
|
+ /* Destination address is peer, lookup data connection */
|
|
+ unsigned i;
|
|
+
|
|
+ status = PJ_ENOTFOUND;
|
|
+ for (i=0; i < PJ_TURN_MAX_TCP_CONN_CNT; ++i) {
|
|
+ tcp_data_conn_t *conn = &turn_sock->data_conn[i];
|
|
+ if (conn->state < DATACONN_STATE_CONN_BINDING)
|
|
+ continue;
|
|
+ if (pj_sockaddr_cmp(&conn->peer_addr, dst_addr) == 0) {
|
|
+ status = pj_activesock_send(conn->asock,
|
|
+ &conn->send_key,
|
|
+ pkt, &len, 0);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ } else {
|
|
+ status = pj_activesock_send(turn_sock->active_sock,
|
|
+ &turn_sock->send_key, pkt, &len, 0);
|
|
+ }
|
|
+
|
|
+ if (status != PJ_SUCCESS && status != PJ_EPENDING) {
|
|
+ show_err(turn_sock, "socket send()", status);
|
|
+ }
|
|
+
|
|
+ // Remove header from sent size.
|
|
+ // The application only wants to know if the packet is actually sent.
|
|
+ unsigned header_len = pkt_len - body_len;
|
|
+ *sent = (len > header_len)? (len - header_len) : 0;
|
|
+
|
|
+ return status;
|
|
+ }
|
|
+
|
|
static pj_status_t turn_on_stun_send_pkt(pj_turn_session *sess,
|
|
const pj_uint8_t *pkt,
|
|
unsigned pkt_len,
|
|
@@ -1143,7 +1234,7 @@ static void turn_on_rx_data(pj_turn_session *sess,
|
|
const pj_sockaddr_t *peer_addr,
|
|
unsigned addr_len)
|
|
{
|
|
- pj_turn_sock *turn_sock = (pj_turn_sock*)
|
|
+ pj_turn_sock *turn_sock = (pj_turn_sock*)
|
|
pj_turn_session_get_user_data(sess);
|
|
if (turn_sock == NULL || turn_sock->is_destroying) {
|
|
/* We've been destroyed */
|
|
@@ -1156,7 +1247,7 @@ static void turn_on_rx_data(pj_turn_session *sess,
|
|
}
|
|
|
|
if (turn_sock->cb.on_rx_data) {
|
|
- (*turn_sock->cb.on_rx_data)(turn_sock, pkt, pkt_len,
|
|
+ (*turn_sock->cb.on_rx_data)(turn_sock, pkt, pkt_len,
|
|
peer_addr, addr_len);
|
|
}
|
|
}
|
|
@@ -1165,11 +1256,11 @@ static void turn_on_rx_data(pj_turn_session *sess,
|
|
/*
|
|
* Callback from TURN session when state has changed
|
|
*/
|
|
-static void turn_on_state(pj_turn_session *sess,
|
|
+static void turn_on_state(pj_turn_session *sess,
|
|
pj_turn_state_t old_state,
|
|
pj_turn_state_t new_state)
|
|
{
|
|
- pj_turn_sock *turn_sock = (pj_turn_sock*)
|
|
+ pj_turn_sock *turn_sock = (pj_turn_sock*)
|
|
pj_turn_session_get_user_data(sess);
|
|
pj_status_t status = PJ_SUCCESS;
|
|
|
|
@@ -1210,8 +1301,8 @@ static void turn_on_state(pj_turn_session *sess,
|
|
* we're switching to alternate TURN server when either TCP
|
|
* connection or ALLOCATE request failed.
|
|
*/
|
|
- if ((turn_sock->conn_type != PJ_TURN_TP_TLS) &&
|
|
- (turn_sock->active_sock))
|
|
+ if ((turn_sock->conn_type != PJ_TURN_TP_TLS) &&
|
|
+ (turn_sock->active_sock))
|
|
{
|
|
pj_activesock_close(turn_sock->active_sock);
|
|
turn_sock->active_sock = NULL;
|
|
@@ -1241,7 +1332,7 @@ static void turn_on_state(pj_turn_session *sess,
|
|
max_bind_retry = turn_sock->setting.port_range;
|
|
}
|
|
pj_sockaddr_init(turn_sock->af, &bound_addr, NULL, 0);
|
|
- if (cfg_bind_addr->addr.sa_family == pj_AF_INET() ||
|
|
+ if (cfg_bind_addr->addr.sa_family == pj_AF_INET() ||
|
|
cfg_bind_addr->addr.sa_family == pj_AF_INET6())
|
|
{
|
|
pj_sockaddr_cp(&bound_addr, cfg_bind_addr);
|
|
@@ -1271,7 +1362,7 @@ static void turn_on_state(pj_turn_session *sess,
|
|
&turn_sock->setting.qos_params,
|
|
(turn_sock->setting.qos_ignore_error?2:1),
|
|
turn_sock->pool->obj_name, NULL);
|
|
- if (status != PJ_SUCCESS && !turn_sock->setting.qos_ignore_error)
|
|
+ if (status != PJ_SUCCESS && !turn_sock->setting.qos_ignore_error)
|
|
{
|
|
pj_sock_close(sock);
|
|
turn_sock_destroy(turn_sock, status);
|
|
@@ -1331,7 +1422,7 @@ static void turn_on_state(pj_turn_session *sess,
|
|
sock_type, &asock_cfg,
|
|
turn_sock->cfg.ioqueue, &asock_cb,
|
|
turn_sock,
|
|
- &turn_sock->active_sock);
|
|
+ &turn_sock->active_sock);
|
|
if (status != PJ_SUCCESS)
|
|
pj_sock_close(sock);
|
|
}
|
|
@@ -1436,8 +1527,8 @@ static void turn_on_state(pj_turn_session *sess,
|
|
}
|
|
|
|
PJ_LOG(5,(turn_sock->pool->obj_name,
|
|
- "Connecting to %s",
|
|
- pj_sockaddr_print(&info.server, addrtxt,
|
|
+ "Connecting to %s",
|
|
+ pj_sockaddr_print(&info.server, addrtxt,
|
|
sizeof(addrtxt), 3)));
|
|
|
|
/* Initiate non-blocking connect */
|
|
@@ -1447,12 +1538,12 @@ static void turn_on_state(pj_turn_session *sess,
|
|
#if PJ_HAS_TCP
|
|
else if (turn_sock->conn_type == PJ_TURN_TP_TCP) {
|
|
status=pj_activesock_start_connect(
|
|
- turn_sock->active_sock,
|
|
+ turn_sock->active_sock,
|
|
turn_sock->pool,
|
|
- &info.server,
|
|
+ &info.server,
|
|
pj_sockaddr_get_len(&info.server));
|
|
- }
|
|
-#endif
|
|
+ }
|
|
+#endif
|
|
#if PJ_HAS_SSL_SOCK
|
|
else if (turn_sock->conn_type == PJ_TURN_TP_TLS) {
|
|
pj_ssl_start_connect_param connect_param;
|
|
@@ -1478,7 +1569,7 @@ static void turn_on_state(pj_turn_session *sess,
|
|
return;
|
|
}
|
|
|
|
- /* Done for now. Subsequent work will be done in
|
|
+ /* Done for now. Subsequent work will be done in
|
|
* on_connect_complete() callback.
|
|
*/
|
|
}
|
|
@@ -1486,9 +1577,6 @@ static void turn_on_state(pj_turn_session *sess,
|
|
if (new_state >= PJ_TURN_STATE_DESTROYING && turn_sock->sess) {
|
|
pj_time_val delay = {0, 0};
|
|
|
|
- turn_sock->sess = NULL;
|
|
- pj_turn_session_set_user_data(sess, NULL);
|
|
-
|
|
pj_timer_heap_cancel_if_active(turn_sock->cfg.timer_heap,
|
|
&turn_sock->timer, 0);
|
|
pj_timer_heap_schedule_w_grp_lock(turn_sock->cfg.timer_heap,
|
|
@@ -1525,8 +1613,12 @@ static pj_bool_t dataconn_on_data_read(pj_activesock_t *asock,
|
|
|
|
if (size == 0 && status != PJ_SUCCESS) {
|
|
/* Connection gone, release data connection */
|
|
- dataconn_cleanup(conn);
|
|
- --turn_sock->data_conn_cnt;
|
|
+ if (conn->state == DATACONN_STATE_CONN_BINDING) {
|
|
+ // TODO cancel request (and do not cleanup there)
|
|
+ } else if (conn->state == DATACONN_STATE_READY) {
|
|
+ dataconn_cleanup(conn);
|
|
+ --turn_sock->data_conn_cnt;
|
|
+ }
|
|
pj_grp_lock_release(turn_sock->grp_lock);
|
|
return PJ_FALSE;
|
|
}
|
|
@@ -1592,6 +1684,14 @@ static pj_bool_t dataconn_on_connect_complete(pj_activesock_t *asock,
|
|
|
|
pj_grp_lock_acquire(turn_sock->grp_lock);
|
|
|
|
+ if (pj_turn_sock_get_user_data(turn_sock) == NULL) {
|
|
+ // It's possible for a TURN socket to be destroyed by ice_close_remaining_tcp
|
|
+ // after the on_connect_complete event has been put into an ioqueue, but
|
|
+ // before the callback is actually called, so we need to check for this.
|
|
+ PJ_LOG(4,(turn_sock->obj_name, "Socket is being destroyed, can't be used to establish a data connection"));
|
|
+ status = PJ_ECANCELLED;
|
|
+ }
|
|
+
|
|
if (status == PJ_SUCCESS) {
|
|
status = pj_activesock_start_read(asock, turn_sock->pool,
|
|
turn_sock->setting.max_pkt_size, 0);
|
|
@@ -1621,7 +1721,7 @@ static void turn_on_connection_attempt(pj_turn_session *sess,
|
|
const pj_sockaddr_t *peer_addr,
|
|
unsigned addr_len)
|
|
{
|
|
- pj_turn_sock *turn_sock = (pj_turn_sock*)
|
|
+ pj_turn_sock *turn_sock = (pj_turn_sock*)
|
|
pj_turn_session_get_user_data(sess);
|
|
pj_pool_t *pool;
|
|
tcp_data_conn_t *new_conn;
|
|
@@ -1796,7 +1896,7 @@ on_return:
|
|
pj_sockaddr_print(peer_addr, addrtxt, sizeof(addrtxt), 3));
|
|
|
|
if (!new_conn->asock && sock != PJ_INVALID_SOCKET)
|
|
- pj_sock_close(sock);
|
|
+ pj_sock_close(sock);
|
|
|
|
dataconn_cleanup(new_conn);
|
|
--turn_sock->data_conn_cnt;
|
|
@@ -1816,7 +1916,7 @@ static void turn_on_connection_bind_status(pj_turn_session *sess,
|
|
const pj_sockaddr_t *peer_addr,
|
|
unsigned addr_len)
|
|
{
|
|
- pj_turn_sock *turn_sock = (pj_turn_sock*)
|
|
+ pj_turn_sock *turn_sock = (pj_turn_sock*)
|
|
pj_turn_session_get_user_data(sess);
|
|
tcp_data_conn_t *conn = NULL;
|
|
unsigned i;
|
|
@@ -1860,7 +1960,7 @@ static void turn_on_connect_complete(pj_turn_session *sess,
|
|
const pj_sockaddr_t *peer_addr,
|
|
unsigned addr_len)
|
|
{
|
|
- pj_turn_sock *turn_sock = (pj_turn_sock*)
|
|
+ pj_turn_sock *turn_sock = (pj_turn_sock*)
|
|
pj_turn_session_get_user_data(sess);
|
|
pj_pool_t *pool;
|
|
tcp_data_conn_t *new_conn;
|
|
@@ -2030,7 +2130,7 @@ on_return:
|
|
pj_sockaddr_print(peer_addr, addrtxt, sizeof(addrtxt), 3));
|
|
|
|
if (!new_conn->asock && sock != PJ_INVALID_SOCKET)
|
|
- pj_sock_close(sock);
|
|
+ pj_sock_close(sock);
|
|
|
|
dataconn_cleanup(new_conn);
|
|
--turn_sock->data_conn_cnt;
|
|
@@ -2043,3 +2143,20 @@ on_return:
|
|
}
|
|
pj_grp_lock_release(turn_sock->grp_lock);
|
|
}
|
|
+
|
|
+pj_bool_t pj_turn_sock_has_dataconn(pj_turn_sock *turn_sock,
|
|
+ const pj_sockaddr_t *peer)
|
|
+{
|
|
+ if (!turn_sock) return PJ_FALSE;
|
|
+
|
|
+ for (int i = 0; i < turn_sock->data_conn_cnt; ++i) {
|
|
+ tcp_data_conn_t* dataconn = &turn_sock->data_conn[i];
|
|
+ if (dataconn) {
|
|
+ pj_sockaddr_t* conn_peer = &dataconn->peer_addr;
|
|
+ if (pj_sockaddr_cmp(conn_peer, peer) == 0)
|
|
+ return PJ_TRUE;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return PJ_FALSE;
|
|
+}
|
|
diff --git a/pjnath/src/pjturn-client/client_main.c b/pjnath/src/pjturn-client/client_main.c
|
|
index 686a81e70..b18a7be32 100644
|
|
--- a/pjnath/src/pjturn-client/client_main.c
|
|
+++ b/pjnath/src/pjturn-client/client_main.c
|
|
@@ -1,4 +1,4 @@
|
|
-/*
|
|
+/*
|
|
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
|
|
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
|
|
*
|
|
@@ -14,7 +14,7 @@
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#include <pjnath.h>
|
|
#include <pjlib-util.h>
|
|
@@ -78,7 +78,7 @@ static void turn_on_rx_data(pj_turn_sock *relay,
|
|
unsigned addr_len);
|
|
static void turn_on_state(pj_turn_sock *relay, pj_turn_state_t old_state,
|
|
pj_turn_state_t new_state);
|
|
-static pj_bool_t stun_sock_on_status(pj_stun_sock *stun_sock,
|
|
+static pj_bool_t stun_sock_on_status(pj_stun_sock *stun_sock,
|
|
pj_stun_sock_op op,
|
|
pj_status_t status);
|
|
static pj_bool_t stun_sock_on_rx_data(pj_stun_sock *stun_sock,
|
|
@@ -130,7 +130,7 @@ static int init()
|
|
/* Create global ioqueue */
|
|
CHECK( pj_ioqueue_create(g.pool, 16, &g.stun_config.ioqueue) );
|
|
|
|
- /*
|
|
+ /*
|
|
* Create peers
|
|
*/
|
|
for (i=0; i<(int)PJ_ARRAY_SIZE(g.peer); ++i) {
|
|
@@ -153,8 +153,8 @@ static int init()
|
|
#endif
|
|
|
|
name[strlen(name)-1] = '0'+i;
|
|
- status = pj_stun_sock_create(&g.stun_config, name, pj_AF_INET(),
|
|
- &stun_sock_cb, &ss_cfg,
|
|
+ status = pj_stun_sock_create(&g.stun_config, name, pj_AF_INET(),
|
|
+ PJ_STUN_TP_UDP, &stun_sock_cb, &ss_cfg,
|
|
&g.peer[i], &g.peer[i].stun_sock);
|
|
if (status != PJ_SUCCESS) {
|
|
my_perror("pj_stun_sock_create()", status);
|
|
@@ -168,7 +168,7 @@ static int init()
|
|
server = pj_str(o.srv_addr);
|
|
port = (pj_uint16_t)(o.srv_port?atoi(o.srv_port):PJ_STUN_PORT);
|
|
}
|
|
- status = pj_stun_sock_start(g.peer[i].stun_sock, &server,
|
|
+ status = pj_stun_sock_start(g.peer[i].stun_sock, &server,
|
|
port, NULL);
|
|
if (status != PJ_SUCCESS) {
|
|
my_perror("pj_stun_sock_start()", status);
|
|
@@ -257,8 +257,8 @@ static pj_status_t create_relay(void)
|
|
if (o.nameserver) {
|
|
pj_str_t ns = pj_str(o.nameserver);
|
|
|
|
- status = pj_dns_resolver_create(&g.cp.factory, "resolver", 0,
|
|
- g.stun_config.timer_heap,
|
|
+ status = pj_dns_resolver_create(&g.cp.factory, "resolver", 0,
|
|
+ g.stun_config.timer_heap,
|
|
g.stun_config.ioqueue, &g.resolver);
|
|
if (status != PJ_SUCCESS) {
|
|
PJ_LOG(1,(THIS_FILE, "Error creating resolver (err=%d)", status));
|
|
@@ -275,7 +275,7 @@ static pj_status_t create_relay(void)
|
|
pj_bzero(&rel_cb, sizeof(rel_cb));
|
|
rel_cb.on_rx_data = &turn_on_rx_data;
|
|
rel_cb.on_state = &turn_on_state;
|
|
- CHECK( pj_turn_sock_create(&g.stun_config, pj_AF_INET(),
|
|
+ CHECK( pj_turn_sock_create(&g.stun_config, pj_AF_INET(),
|
|
(o.use_tcp? PJ_TURN_TP_TCP : PJ_TURN_TP_UDP),
|
|
&rel_cb, 0,
|
|
NULL, &g.relay) );
|
|
@@ -332,7 +332,7 @@ static void turn_on_rx_data(pj_turn_sock *relay,
|
|
static void turn_on_state(pj_turn_sock *relay, pj_turn_state_t old_state,
|
|
pj_turn_state_t new_state)
|
|
{
|
|
- PJ_LOG(3,(THIS_FILE, "State %s --> %s", pj_turn_state_name(old_state),
|
|
+ PJ_LOG(3,(THIS_FILE, "State %s --> %s", pj_turn_state_name(old_state),
|
|
pj_turn_state_name(new_state)));
|
|
|
|
if (new_state == PJ_TURN_STATE_READY) {
|
|
@@ -345,7 +345,7 @@ static void turn_on_state(pj_turn_sock *relay, pj_turn_state_t old_state,
|
|
}
|
|
}
|
|
|
|
-static pj_bool_t stun_sock_on_status(pj_stun_sock *stun_sock,
|
|
+static pj_bool_t stun_sock_on_status(pj_stun_sock *stun_sock,
|
|
pj_stun_sock_op op,
|
|
pj_status_t status)
|
|
{
|
|
@@ -458,7 +458,7 @@ static void console_main(void)
|
|
|
|
if (fgets(input, sizeof(input), stdin) == NULL)
|
|
break;
|
|
-
|
|
+
|
|
switch (input[0]) {
|
|
case 'a':
|
|
create_relay();
|
|
@@ -477,9 +477,9 @@ static void console_main(void)
|
|
peer = &g.peer[1];
|
|
|
|
pj_ansi_strxcpy(input, "Hello from client", sizeof(input));
|
|
- status = pj_turn_sock_sendto(g.relay, (const pj_uint8_t*)input,
|
|
- strlen(input)+1,
|
|
- &peer->mapped_addr,
|
|
+ status = pj_turn_sock_sendto(g.relay, (const pj_uint8_t*)input,
|
|
+ strlen(input)+1,
|
|
+ &peer->mapped_addr,
|
|
pj_sockaddr_get_len(&peer->mapped_addr));
|
|
if (status != PJ_SUCCESS && status != PJ_EPENDING)
|
|
my_perror("turn_udp_sendto() failed", status);
|
|
@@ -622,10 +622,10 @@ int main(int argc, char *argv[])
|
|
|
|
if ((status=init()) != 0)
|
|
goto on_return;
|
|
-
|
|
+
|
|
//if ((status=create_relay()) != 0)
|
|
// goto on_return;
|
|
-
|
|
+
|
|
console_main();
|
|
|
|
on_return:
|
|
diff --git a/pjnath/src/pjturn-srv/allocation.c b/pjnath/src/pjturn-srv/allocation.c
|
|
index 640c87a3d..dc6b3ee0c 100644
|
|
--- a/pjnath/src/pjturn-srv/allocation.c
|
|
+++ b/pjnath/src/pjturn-srv/allocation.c
|
|
@@ -338,7 +338,7 @@ PJ_DEF(pj_status_t) pj_turn_allocation_create(pj_turn_transport *transport,
|
|
sess_cb.on_rx_request = &stun_on_rx_request;
|
|
sess_cb.on_rx_indication = &stun_on_rx_indication;
|
|
status = pj_stun_session_create(&srv->core.stun_cfg, alloc->obj_name,
|
|
- &sess_cb, PJ_FALSE, NULL, &alloc->sess);
|
|
+ &sess_cb, PJ_FALSE, NULL, &alloc->sess, PJ_STUN_TP_UDP);
|
|
if (status != PJ_SUCCESS) {
|
|
goto on_error;
|
|
}
|
|
diff --git a/pjnath/src/pjturn-srv/server.c b/pjnath/src/pjturn-srv/server.c
|
|
index 48b62ce13..997c84a9f 100644
|
|
--- a/pjnath/src/pjturn-srv/server.c
|
|
+++ b/pjnath/src/pjturn-srv/server.c
|
|
@@ -155,7 +155,7 @@ PJ_DEF(pj_status_t) pj_turn_srv_create(pj_pool_factory *pf,
|
|
|
|
status = pj_stun_session_create(&srv->core.stun_cfg, srv->obj_name,
|
|
&sess_cb, PJ_FALSE, NULL,
|
|
- &srv->core.stun_sess);
|
|
+ &srv->core.stun_sess, PJ_STUN_TP_UDP);
|
|
if (status != PJ_SUCCESS) {
|
|
goto on_error;
|
|
}
|
|
diff --git a/pjsip-apps/src/samples/icedemo.c b/pjsip-apps/src/samples/icedemo.c
|
|
index ed8f010a6..c10f86f87 100644
|
|
--- a/pjsip-apps/src/samples/icedemo.c
|
|
+++ b/pjsip-apps/src/samples/icedemo.c
|
|
@@ -1,4 +1,4 @@
|
|
-/*
|
|
+/*
|
|
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
@@ -13,7 +13,7 @@
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
@@ -43,6 +43,7 @@ static struct app_t
|
|
pj_str_t stun_srv;
|
|
pj_str_t turn_srv;
|
|
pj_bool_t turn_tcp;
|
|
+ pj_bool_t ice_tcp;
|
|
pj_str_t turn_username;
|
|
pj_str_t turn_password;
|
|
pj_bool_t turn_fingerprint;
|
|
@@ -92,7 +93,7 @@ static void err_exit(const char *title, pj_status_t status)
|
|
|
|
if (icedemo.icest)
|
|
pj_ice_strans_destroy(icedemo.icest);
|
|
-
|
|
+
|
|
pj_thread_sleep(500);
|
|
|
|
icedemo.thread_quit_flag = PJ_TRUE;
|
|
@@ -150,13 +151,13 @@ static pj_status_t handle_events(unsigned max_msec, unsigned *p_count)
|
|
pj_assert(timeout.sec >= 0 && timeout.msec >= 0);
|
|
if (timeout.msec >= 1000) timeout.msec = 999;
|
|
|
|
- /* compare the value with the timeout to wait from timer, and use the
|
|
- * minimum value.
|
|
+ /* compare the value with the timeout to wait from timer, and use the
|
|
+ * minimum value.
|
|
*/
|
|
if (PJ_TIME_VAL_GT(timeout, max_timeout))
|
|
timeout = max_timeout;
|
|
|
|
- /* Poll ioqueue.
|
|
+ /* Poll ioqueue.
|
|
* Repeat polling the ioqueue while we have immediate events, because
|
|
* timer heap may process more than one events, so if we only process
|
|
* one network events at a time (such as when IOCP backend is used),
|
|
@@ -212,7 +213,7 @@ static int icedemo_worker_thread(void *unused)
|
|
* as STUN connectivity checks or TURN signaling).
|
|
*/
|
|
static void cb_on_rx_data(pj_ice_strans *ice_st,
|
|
- unsigned comp_id,
|
|
+ unsigned comp_id,
|
|
void *pkt, pj_size_t size,
|
|
const pj_sockaddr_t *src_addr,
|
|
unsigned src_addr_len)
|
|
@@ -237,11 +238,11 @@ static void cb_on_rx_data(pj_ice_strans *ice_st,
|
|
* This is the callback that is registered to the ICE stream transport to
|
|
* receive notification about ICE state progression.
|
|
*/
|
|
-static void cb_on_ice_complete(pj_ice_strans *ice_st,
|
|
+static void cb_on_ice_complete(pj_ice_strans *ice_st,
|
|
pj_ice_strans_op op,
|
|
pj_status_t status)
|
|
{
|
|
- const char *opname =
|
|
+ const char *opname =
|
|
(op==PJ_ICE_STRANS_OP_INIT? "initialization" :
|
|
(op==PJ_ICE_STRANS_OP_NEGOTIATION ? "negotiation" : "unknown_op"));
|
|
|
|
@@ -269,7 +270,7 @@ static void log_func(int level, const char *data, int len)
|
|
|
|
/*
|
|
* This is the main application initialization function. It is called
|
|
- * once (and only once) during application initialization sequence by
|
|
+ * once (and only once) during application initialization sequence by
|
|
* main().
|
|
*/
|
|
static pj_status_t icedemo_init(void)
|
|
@@ -295,18 +296,18 @@ static pj_status_t icedemo_init(void)
|
|
icedemo.ice_cfg.stun_cfg.pf = &icedemo.cp.factory;
|
|
|
|
/* Create application memory pool */
|
|
- icedemo.pool = pj_pool_create(&icedemo.cp.factory, "icedemo",
|
|
+ icedemo.pool = pj_pool_create(&icedemo.cp.factory, "icedemo",
|
|
512, 512, NULL);
|
|
|
|
/* Create timer heap for timer stuff */
|
|
- CHECK( pj_timer_heap_create(icedemo.pool, 100,
|
|
+ CHECK( pj_timer_heap_create(icedemo.pool, 100,
|
|
&icedemo.ice_cfg.stun_cfg.timer_heap) );
|
|
|
|
/* and create ioqueue for network I/O stuff */
|
|
- CHECK( pj_ioqueue_create(icedemo.pool, 16,
|
|
+ CHECK( pj_ioqueue_create(icedemo.pool, 16,
|
|
&icedemo.ice_cfg.stun_cfg.ioqueue) );
|
|
|
|
- /* something must poll the timer heap and ioqueue,
|
|
+ /* something must poll the timer heap and ioqueue,
|
|
* unless we're on Symbian where the timer heap and ioqueue run
|
|
* on themselves.
|
|
*/
|
|
@@ -317,14 +318,14 @@ static pj_status_t icedemo_init(void)
|
|
|
|
/* Create DNS resolver if nameserver is set */
|
|
if (icedemo.opt.ns.slen) {
|
|
- CHECK( pj_dns_resolver_create(&icedemo.cp.factory,
|
|
- "resolver",
|
|
- 0,
|
|
+ CHECK( pj_dns_resolver_create(&icedemo.cp.factory,
|
|
+ "resolver",
|
|
+ 0,
|
|
icedemo.ice_cfg.stun_cfg.timer_heap,
|
|
- icedemo.ice_cfg.stun_cfg.ioqueue,
|
|
+ icedemo.ice_cfg.stun_cfg.ioqueue,
|
|
&icedemo.ice_cfg.resolver) );
|
|
|
|
- CHECK( pj_dns_resolver_set_ns(icedemo.ice_cfg.resolver, 1,
|
|
+ CHECK( pj_dns_resolver_set_ns(icedemo.ice_cfg.resolver, 1,
|
|
&icedemo.opt.ns, NULL) );
|
|
}
|
|
|
|
@@ -340,6 +341,12 @@ static pj_status_t icedemo_init(void)
|
|
else
|
|
icedemo.ice_cfg.opt.aggressive = PJ_TRUE;
|
|
|
|
+ /* Connection type to STUN server */
|
|
+ if (icedemo.opt.ice_tcp)
|
|
+ icedemo.ice_cfg.stun.conn_type = PJ_STUN_TP_TCP;
|
|
+ else
|
|
+ icedemo.ice_cfg.stun.conn_type = PJ_STUN_TP_UDP;
|
|
+
|
|
/* Configure STUN/srflx candidate resolution */
|
|
if (icedemo.opt.stun_srv.slen) {
|
|
char *pos;
|
|
@@ -394,6 +401,10 @@ static pj_status_t icedemo_init(void)
|
|
icedemo.ice_cfg.turn.alloc_param.ka_interval = KA_INTERVAL;
|
|
}
|
|
|
|
+ if (icedemo.opt.ice_tcp) {
|
|
+ icedemo.ice_cfg.protocol = PJ_ICE_TP_TCP;
|
|
+ }
|
|
+
|
|
/* -= That's it for now, initialization is complete =- */
|
|
return PJ_SUCCESS;
|
|
}
|
|
@@ -462,8 +473,8 @@ static void icedemo_destroy_instance(void)
|
|
*/
|
|
static void icedemo_init_session(unsigned rolechar)
|
|
{
|
|
- pj_ice_sess_role role = (pj_tolower((pj_uint8_t)rolechar)=='o' ?
|
|
- PJ_ICE_SESS_ROLE_CONTROLLING :
|
|
+ pj_ice_sess_role role = (pj_tolower((pj_uint8_t)rolechar)=='o' ?
|
|
+ PJ_ICE_SESS_ROLE_CONTROLLING :
|
|
PJ_ICE_SESS_ROLE_CONTROLLED);
|
|
pj_status_t status;
|
|
|
|
@@ -529,18 +540,36 @@ static int print_cand(char buffer[], unsigned maxlen,
|
|
char *p = buffer;
|
|
int printed;
|
|
|
|
- PRINT("a=candidate:%.*s %u UDP %u %s %u typ ",
|
|
+ PRINT("a=candidate:%.*s %u %s %u %s %u typ ",
|
|
(int)cand->foundation.slen,
|
|
cand->foundation.ptr,
|
|
(unsigned)cand->comp_id,
|
|
+ cand->transport == PJ_CAND_UDP? "UDP" : "TCP",
|
|
cand->prio,
|
|
- pj_sockaddr_print(&cand->addr, ipaddr,
|
|
+ pj_sockaddr_print(&cand->addr, ipaddr,
|
|
sizeof(ipaddr), 0),
|
|
(unsigned)pj_sockaddr_get_port(&cand->addr));
|
|
|
|
PRINT("%s\n",
|
|
pj_ice_get_cand_type_name(cand->type));
|
|
|
|
+ if (cand->transport != PJ_CAND_UDP) {
|
|
+ PRINT(" tcptype");
|
|
+ switch (cand->transport) {
|
|
+ case PJ_CAND_TCP_ACTIVE:
|
|
+ PRINT(" active");
|
|
+ break;
|
|
+ case PJ_CAND_TCP_PASSIVE:
|
|
+ PRINT(" passive");
|
|
+ break;
|
|
+ case PJ_CAND_TCP_SO:
|
|
+ default:
|
|
+ PRINT(" so");
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ PRINT("\n");
|
|
+
|
|
if (p == buffer+maxlen)
|
|
return -PJ_ETOOSMALL;
|
|
|
|
@@ -549,7 +578,7 @@ static int print_cand(char buffer[], unsigned maxlen,
|
|
return (int)(p-buffer);
|
|
}
|
|
|
|
-/*
|
|
+/*
|
|
* Encode ICE information in SDP.
|
|
*/
|
|
static int encode_session(char buffer[], unsigned maxlen)
|
|
@@ -607,6 +636,26 @@ static int encode_session(char buffer[], unsigned maxlen)
|
|
sizeof(ipaddr), 0));
|
|
}
|
|
|
|
+ if (cand[0].transport != PJ_CAND_UDP) {
|
|
+ /** RFC 6544, Section 4.5:
|
|
+ * If the default candidate is TCP-based, the agent MUST include the
|
|
+ * a=setup and a=connection attributes from RFC 4145 [RFC4145],
|
|
+ * following the procedures defined there as if ICE were not in use.
|
|
+ */
|
|
+ PRINT("a=setup:");
|
|
+ switch (cand[0].transport) {
|
|
+ case PJ_CAND_TCP_ACTIVE:
|
|
+ PRINT("active\n");
|
|
+ break;
|
|
+ case PJ_CAND_TCP_PASSIVE:
|
|
+ PRINT("passive\n");
|
|
+ break;
|
|
+ default:
|
|
+ return PJ_EINVALIDOP;
|
|
+ }
|
|
+ PRINT("a=connection:new\n");
|
|
+ }
|
|
+
|
|
/* Enumerate all candidates for this component */
|
|
cand_cnt = PJ_ARRAY_SIZE(cand);
|
|
status = pj_ice_strans_enum_cands(icedemo.icest, comp+1,
|
|
@@ -663,7 +712,7 @@ static void icedemo_show_ice(void)
|
|
return;
|
|
}
|
|
|
|
- printf("Negotiated comp_cnt: %d\n",
|
|
+ printf("Negotiated comp_cnt: %d\n",
|
|
pj_ice_strans_get_running_comp_cnt(icedemo.icest));
|
|
printf("Role : %s\n",
|
|
pj_ice_strans_get_role(icedemo.icest)==PJ_ICE_SESS_ROLE_CONTROLLED ?
|
|
@@ -703,12 +752,12 @@ static void icedemo_show_ice(void)
|
|
|
|
|
|
/*
|
|
- * Input and parse SDP from the remote (containing remote's ICE information)
|
|
+ * Input and parse SDP from the remote (containing remote's ICE information)
|
|
* and save it to global variables.
|
|
*/
|
|
static void icedemo_input_remote(void)
|
|
{
|
|
- char linebuf[80];
|
|
+ char linebuf[120];
|
|
unsigned media_cnt = 0;
|
|
unsigned comp0_port = 0;
|
|
char comp0_addr[80];
|
|
@@ -764,14 +813,14 @@ static void icedemo_input_remote(void)
|
|
}
|
|
|
|
comp0_port = atoi(portstr);
|
|
-
|
|
+
|
|
}
|
|
break;
|
|
case 'c':
|
|
{
|
|
int cnt;
|
|
char c[32], net[32], ip[80];
|
|
-
|
|
+
|
|
cnt = sscanf(line+2, "%s %s %s", c, net, ip);
|
|
if (cnt != 3) {
|
|
PJ_LOG(1,(THIS_FILE, "Error parsing connection line"));
|
|
@@ -822,28 +871,34 @@ static void icedemo_input_remote(void)
|
|
} else if (strcmp(attr, "candidate")==0) {
|
|
char *sdpcand = attr+strlen(attr)+1;
|
|
int af, cnt;
|
|
- char foundation[32], transport[12], ipaddr[80], type[32];
|
|
+ char foundation[32], transport[12], ipaddr[80], type[32], tcp_type[32];
|
|
pj_str_t tmpaddr;
|
|
int comp_id, prio, port;
|
|
pj_ice_sess_cand *cand;
|
|
pj_status_t status;
|
|
+ pj_bool_t is_tcp = PJ_FALSE;
|
|
|
|
- cnt = sscanf(sdpcand, "%s %d %s %d %s %d typ %s",
|
|
+ cnt = sscanf(sdpcand, "%s %d %s %d %s %d typ %s tcptype %s\n",
|
|
foundation,
|
|
&comp_id,
|
|
transport,
|
|
&prio,
|
|
ipaddr,
|
|
&port,
|
|
- type);
|
|
- if (cnt != 7) {
|
|
+ type,
|
|
+ tcp_type);
|
|
+ if (cnt != 7 && cnt != 8) {
|
|
PJ_LOG(1, (THIS_FILE, "error: Invalid ICE candidate line"));
|
|
goto on_error;
|
|
}
|
|
|
|
+ if (strcmp(transport, "TCP") == 0) {
|
|
+ is_tcp = PJ_TRUE;
|
|
+ }
|
|
+
|
|
cand = &icedemo.rem.cand[icedemo.rem.cand_cnt];
|
|
pj_bzero(cand, sizeof(*cand));
|
|
-
|
|
+
|
|
if (strcmp(type, "host")==0)
|
|
cand->type = PJ_ICE_CAND_TYPE_HOST;
|
|
else if (strcmp(type, "srflx")==0)
|
|
@@ -851,15 +906,32 @@ static void icedemo_input_remote(void)
|
|
else if (strcmp(type, "relay")==0)
|
|
cand->type = PJ_ICE_CAND_TYPE_RELAYED;
|
|
else {
|
|
- PJ_LOG(1, (THIS_FILE, "Error: invalid candidate type '%s'",
|
|
+ PJ_LOG(1, (THIS_FILE, "Error: invalid candidate type '%s'",
|
|
type));
|
|
goto on_error;
|
|
}
|
|
|
|
+ if (is_tcp) {
|
|
+ if (strcmp(tcp_type, "active") == 0)
|
|
+ cand->transport = PJ_CAND_TCP_ACTIVE;
|
|
+ else if (strcmp(tcp_type, "passive") == 0)
|
|
+ cand->transport = PJ_CAND_TCP_PASSIVE;
|
|
+ else if (strcmp(tcp_type, "so") == 0)
|
|
+ cand->transport = PJ_CAND_TCP_SO;
|
|
+ else {
|
|
+ PJ_LOG(1, (THIS_FILE,
|
|
+ "Error: invalid transport type '%s'",
|
|
+ tcp_type));
|
|
+ goto on_error;
|
|
+ }
|
|
+ } else {
|
|
+ cand->transport = PJ_CAND_UDP;
|
|
+ }
|
|
+
|
|
cand->comp_id = (pj_uint8_t)comp_id;
|
|
pj_strdup2(icedemo.pool, &cand->foundation, foundation);
|
|
cand->prio = prio;
|
|
-
|
|
+
|
|
if (strchr(ipaddr, ':'))
|
|
af = pj_AF_INET6();
|
|
else
|
|
@@ -919,7 +991,7 @@ static void icedemo_input_remote(void)
|
|
pj_sockaddr_set_port(&icedemo.rem.def_addr[0], (pj_uint16_t)comp0_port);
|
|
}
|
|
|
|
- PJ_LOG(3, (THIS_FILE, "Done, %d remote candidate(s) added",
|
|
+ PJ_LOG(3, (THIS_FILE, "Done, %d remote candidate(s) added",
|
|
icedemo.rem.cand_cnt));
|
|
return;
|
|
|
|
@@ -953,7 +1025,7 @@ static void icedemo_start_nego(void)
|
|
|
|
PJ_LOG(3,(THIS_FILE, "Starting ICE negotiation.."));
|
|
|
|
- status = pj_ice_strans_start_ice(icedemo.icest,
|
|
+ status = pj_ice_strans_start_ice(icedemo.icest,
|
|
pj_cstr(&rufrag, icedemo.rem.ufrag),
|
|
pj_cstr(&rpwd, icedemo.rem.pwd),
|
|
icedemo.rem.cand_cnt,
|
|
diff --git a/pjsip/src/pjsip/sip_transport.c b/pjsip/src/pjsip/sip_transport.c
|
|
index c5885f305..e1e7c6f2f 100644
|
|
--- a/pjsip/src/pjsip/sip_transport.c
|
|
+++ b/pjsip/src/pjsip/sip_transport.c
|
|
@@ -1,4 +1,4 @@
|
|
-/*
|
|
+/*
|
|
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
|
|
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
|
|
*
|
|
@@ -14,7 +14,7 @@
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#include <pjsip/sip_transport.h>
|
|
#include <pjsip/sip_endpoint.h>
|
|
@@ -44,7 +44,7 @@
|
|
static const char *addr_string(const pj_sockaddr_t *addr)
|
|
{
|
|
static char str[PJ_INET6_ADDRSTRLEN];
|
|
- pj_inet_ntop(((const pj_sockaddr*)addr)->addr.sa_family,
|
|
+ pj_inet_ntop(((const pj_sockaddr*)addr)->addr.sa_family,
|
|
pj_sockaddr_get_addr(addr),
|
|
str, sizeof(str));
|
|
return str;
|
|
@@ -110,9 +110,9 @@ enum timer_id {
|
|
static pj_status_t mod_on_tx_msg(pjsip_tx_data *tdata);
|
|
|
|
/* This module has sole purpose to print transmit data to contigous buffer
|
|
- * before actually transmitted to the wire.
|
|
+ * before actually transmitted to the wire.
|
|
*/
|
|
-static pjsip_module mod_msg_print =
|
|
+static pjsip_module mod_msg_print =
|
|
{
|
|
NULL, NULL, /* prev and next */
|
|
{ "mod-msg-print", 13}, /* Name. */
|
|
@@ -140,7 +140,7 @@ typedef struct transport
|
|
/*
|
|
* Transport manager.
|
|
*/
|
|
-struct pjsip_tpmgr
|
|
+struct pjsip_tpmgr
|
|
{
|
|
pj_hash_table_t *table;
|
|
pj_lock_t *lock;
|
|
@@ -204,76 +204,76 @@ static struct transport_names_t
|
|
const char *description; /* Longer description */
|
|
unsigned flag; /* Flags */
|
|
char name_buf[16]; /* For user's transport */
|
|
-} transport_names[16] =
|
|
+} transport_names[16] =
|
|
{
|
|
- {
|
|
- PJSIP_TRANSPORT_UNSPECIFIED,
|
|
- 0,
|
|
- {"Unspecified", 11},
|
|
- "Unspecified",
|
|
+ {
|
|
+ PJSIP_TRANSPORT_UNSPECIFIED,
|
|
+ 0,
|
|
+ {"Unspecified", 11},
|
|
+ "Unspecified",
|
|
0
|
|
},
|
|
- {
|
|
- PJSIP_TRANSPORT_UDP,
|
|
- 5060,
|
|
- {"UDP", 3},
|
|
- "UDP transport",
|
|
+ {
|
|
+ PJSIP_TRANSPORT_UDP,
|
|
+ 5060,
|
|
+ {"UDP", 3},
|
|
+ "UDP transport",
|
|
PJSIP_TRANSPORT_DATAGRAM
|
|
},
|
|
- {
|
|
- PJSIP_TRANSPORT_TCP,
|
|
- 5060,
|
|
- {"TCP", 3},
|
|
- "TCP transport",
|
|
+ {
|
|
+ PJSIP_TRANSPORT_TCP,
|
|
+ 5060,
|
|
+ {"TCP", 3},
|
|
+ "TCP transport",
|
|
PJSIP_TRANSPORT_RELIABLE
|
|
},
|
|
- {
|
|
- PJSIP_TRANSPORT_TLS,
|
|
- 5061,
|
|
- {"TLS", 3},
|
|
- "TLS transport",
|
|
+ {
|
|
+ PJSIP_TRANSPORT_TLS,
|
|
+ 5061,
|
|
+ {"TLS", 3},
|
|
+ "TLS transport",
|
|
PJSIP_TRANSPORT_RELIABLE | PJSIP_TRANSPORT_SECURE
|
|
},
|
|
- {
|
|
+ {
|
|
PJSIP_TRANSPORT_DTLS,
|
|
- 5061,
|
|
- {"DTLS", 4},
|
|
- "DTLS transport",
|
|
+ 5061,
|
|
+ {"DTLS", 4},
|
|
+ "DTLS transport",
|
|
PJSIP_TRANSPORT_SECURE
|
|
},
|
|
- {
|
|
- PJSIP_TRANSPORT_SCTP,
|
|
- 5060,
|
|
- {"SCTP", 4},
|
|
- "SCTP transport",
|
|
+ {
|
|
+ PJSIP_TRANSPORT_SCTP,
|
|
+ 5060,
|
|
+ {"SCTP", 4},
|
|
+ "SCTP transport",
|
|
PJSIP_TRANSPORT_RELIABLE
|
|
},
|
|
- {
|
|
- PJSIP_TRANSPORT_LOOP,
|
|
- 15060,
|
|
- {"LOOP", 4},
|
|
- "Loopback transport",
|
|
+ {
|
|
+ PJSIP_TRANSPORT_LOOP,
|
|
+ 15060,
|
|
+ {"LOOP", 4},
|
|
+ "Loopback transport",
|
|
PJSIP_TRANSPORT_RELIABLE
|
|
- },
|
|
- {
|
|
- PJSIP_TRANSPORT_LOOP_DGRAM,
|
|
- 15060,
|
|
- {"LOOP-DGRAM", 10},
|
|
- "Loopback datagram transport",
|
|
+ },
|
|
+ {
|
|
+ PJSIP_TRANSPORT_LOOP_DGRAM,
|
|
+ 15060,
|
|
+ {"LOOP-DGRAM", 10},
|
|
+ "Loopback datagram transport",
|
|
PJSIP_TRANSPORT_DATAGRAM
|
|
},
|
|
- {
|
|
- PJSIP_TRANSPORT_UDP6,
|
|
- 5060,
|
|
- {"UDP", 3},
|
|
- "UDP IPv6 transport",
|
|
+ {
|
|
+ PJSIP_TRANSPORT_UDP6,
|
|
+ 5060,
|
|
+ {"UDP", 3},
|
|
+ "UDP IPv6 transport",
|
|
PJSIP_TRANSPORT_DATAGRAM
|
|
},
|
|
- {
|
|
- PJSIP_TRANSPORT_TCP6,
|
|
- 5060,
|
|
- {"TCP", 3},
|
|
- "TCP IPv6 transport",
|
|
+ {
|
|
+ PJSIP_TRANSPORT_TCP6,
|
|
+ 5060,
|
|
+ {"TCP", 3},
|
|
+ "TCP IPv6 transport",
|
|
PJSIP_TRANSPORT_RELIABLE
|
|
},
|
|
{
|
|
@@ -322,12 +322,12 @@ PJ_DEF(pj_status_t) pjsip_transport_register_type( unsigned tp_flag,
|
|
pjsip_transport_type_e parent = 0;
|
|
|
|
PJ_ASSERT_RETURN(tp_flag && tp_name && def_port, PJ_EINVAL);
|
|
- PJ_ASSERT_RETURN(pj_ansi_strlen(tp_name) <
|
|
- PJ_ARRAY_SIZE(transport_names[0].name_buf),
|
|
+ PJ_ASSERT_RETURN(pj_ansi_strlen(tp_name) <
|
|
+ PJ_ARRAY_SIZE(transport_names[0].name_buf),
|
|
PJ_ENAMETOOLONG);
|
|
|
|
for (i=1; i<PJ_ARRAY_SIZE(transport_names); ++i) {
|
|
- if (tp_flag & PJSIP_TRANSPORT_IPV6 &&
|
|
+ if (tp_flag & PJSIP_TRANSPORT_IPV6 &&
|
|
pj_stricmp2(&transport_names[i].name, tp_name) == 0)
|
|
{
|
|
parent = transport_names[i].type;
|
|
@@ -511,7 +511,7 @@ PJ_DEF(pj_status_t) pjsip_tx_data_create( pjsip_tpmgr *mgr,
|
|
pjsip_endpt_release_pool( mgr->endpt, tdata->pool );
|
|
return status;
|
|
}
|
|
-
|
|
+
|
|
//status = pj_lock_create_simple_mutex(pool, "tdta%p", &tdata->lock);
|
|
status = pj_lock_create_null_mutex(pool, "tdta%p", &tdata->lock);
|
|
if (status != PJ_SUCCESS) {
|
|
@@ -574,7 +574,7 @@ static void tx_data_destroy(pjsip_tx_data *tdata)
|
|
PJ_DEF(pj_status_t) pjsip_tx_data_dec_ref( pjsip_tx_data *tdata )
|
|
{
|
|
pj_atomic_value_t ref_cnt;
|
|
-
|
|
+
|
|
PJ_ASSERT_RETURN(tdata && tdata->ref_cnt, PJ_EINVAL);
|
|
|
|
ref_cnt = pj_atomic_dec_and_get(tdata->ref_cnt);
|
|
@@ -607,7 +607,7 @@ PJ_DEF(pj_status_t) pjsip_tx_data_encode(pjsip_tx_data *tdata)
|
|
PJ_USE_EXCEPTION;
|
|
|
|
PJ_TRY {
|
|
- tdata->buf.start = (char*)
|
|
+ tdata->buf.start = (char*)
|
|
pj_pool_alloc(tdata->pool, PJSIP_MAX_PKT_LEN);
|
|
}
|
|
PJ_CATCH_ANY {
|
|
@@ -623,7 +623,7 @@ PJ_DEF(pj_status_t) pjsip_tx_data_encode(pjsip_tx_data *tdata)
|
|
if (!pjsip_tx_data_is_valid(tdata)) {
|
|
pj_ssize_t size;
|
|
|
|
- size = pjsip_msg_print( tdata->msg, tdata->buf.start,
|
|
+ size = pjsip_msg_print( tdata->msg, tdata->buf.start,
|
|
tdata->buf.end - tdata->buf.start);
|
|
if (size < 0) {
|
|
return PJSIP_EMSGTOOLONG;
|
|
@@ -652,7 +652,7 @@ static char *get_msg_info(pj_pool_t *pool, const char *obj_name,
|
|
PJ_ASSERT_RETURN(cseq != NULL, "INVALID MSG");
|
|
|
|
if (msg->type == PJSIP_REQUEST_MSG) {
|
|
- len = pj_ansi_snprintf(info_buf, sizeof(info_buf),
|
|
+ len = pj_ansi_snprintf(info_buf, sizeof(info_buf),
|
|
"Request msg %.*s/cseq=%d (%s)",
|
|
(int)msg->line.req.method.name.slen,
|
|
msg->line.req.method.name.ptr,
|
|
@@ -918,7 +918,7 @@ static pj_status_t mod_on_tx_msg(pjsip_tx_data *tdata)
|
|
/*
|
|
* Send a SIP message using the specified transport.
|
|
*/
|
|
-PJ_DEF(pj_status_t) pjsip_transport_send( pjsip_transport *tr,
|
|
+PJ_DEF(pj_status_t) pjsip_transport_send( pjsip_transport *tr,
|
|
pjsip_tx_data *tdata,
|
|
const pj_sockaddr_t *addr,
|
|
int addr_len,
|
|
@@ -932,7 +932,7 @@ PJ_DEF(pj_status_t) pjsip_transport_send( pjsip_transport *tr,
|
|
/* Is it currently being sent? */
|
|
if (tdata->is_pending) {
|
|
pj_assert(!"Invalid operation step!");
|
|
- PJ_LOG(2,(THIS_FILE, "Unable to send %s: message is pending",
|
|
+ PJ_LOG(2,(THIS_FILE, "Unable to send %s: message is pending",
|
|
pjsip_tx_data_get_info(tdata)));
|
|
return PJSIP_EPENDINGTX;
|
|
}
|
|
@@ -953,7 +953,7 @@ PJ_DEF(pj_status_t) pjsip_transport_send( pjsip_transport *tr,
|
|
sizeof(tdata->tp_info.dst_name));
|
|
tdata->tp_info.dst_port = pj_sockaddr_get_port(addr);
|
|
|
|
- /* Distribute to modules.
|
|
+ /* Distribute to modules.
|
|
* When the message reach mod_msg_print, the contents of the message will
|
|
* be "printed" to contiguous buffer.
|
|
*/
|
|
@@ -976,7 +976,7 @@ PJ_DEF(pj_status_t) pjsip_transport_send( pjsip_transport *tr,
|
|
tdata->is_pending = 1;
|
|
|
|
/* Send to transport. */
|
|
- status = (*tr->send_msg)(tr, tdata, addr, addr_len, (void*)tdata,
|
|
+ status = (*tr->send_msg)(tr, tdata, addr, addr_len, (void*)tdata,
|
|
&transport_send_callback);
|
|
|
|
if (status != PJ_EPENDING) {
|
|
@@ -1035,7 +1035,7 @@ PJ_DEF(pj_status_t) pjsip_tpmgr_send_raw(pjsip_tpmgr *mgr,
|
|
{
|
|
pjsip_transport *tr;
|
|
pj_status_t status;
|
|
-
|
|
+
|
|
/* Acquire the transport */
|
|
status = pjsip_tpmgr_acquire_transport(mgr, tp_type, addr, addr_len,
|
|
sel, &tr);
|
|
@@ -1064,13 +1064,13 @@ PJ_DEF(pj_status_t) pjsip_tpmgr_send_raw(pjsip_tpmgr *mgr,
|
|
tdata->buf.start = (char*) pj_pool_alloc(tdata->pool, data_len+1);
|
|
tdata->buf.end = tdata->buf.start + data_len + 1;
|
|
}
|
|
-
|
|
+
|
|
/* Copy data, if any! (application may send zero len packet) */
|
|
if (data_len) {
|
|
pj_memcpy(tdata->buf.start, raw_data, data_len);
|
|
}
|
|
tdata->buf.cur = tdata->buf.start + data_len;
|
|
-
|
|
+
|
|
/* Save callback data. */
|
|
tdata->token = token;
|
|
tdata->cb = cb;
|
|
@@ -1081,7 +1081,7 @@ PJ_DEF(pj_status_t) pjsip_tpmgr_send_raw(pjsip_tpmgr *mgr,
|
|
/* Send to transport */
|
|
status = tr->send_msg(tr, tdata, addr, addr_len,
|
|
tdata, &send_raw_callback);
|
|
-
|
|
+
|
|
if (status != PJ_EPENDING) {
|
|
/* callback will not be called, so destroy tdata now. */
|
|
pjsip_tx_data_dec_ref(tdata);
|
|
@@ -1115,7 +1115,7 @@ static void transport_idle_callback(pj_timer_heap_t *timer_heap,
|
|
if (pj_atomic_get(tp->ref_cnt) == 0) {
|
|
tp->is_destroying = PJ_TRUE;
|
|
PJ_LOG(4, (THIS_FILE, "Transport %s is being destroyed "
|
|
- "due to timeout in %s timer", tp->obj_name,
|
|
+ "due to timeout in %s timer", tp->obj_name,
|
|
(entry_id == IDLE_TIMER_ID)?"idle":"initial"));
|
|
if (entry_id == INITIAL_IDLE_TIMER_ID) {
|
|
if (tp->last_recv_len > 0 && tp->tpmgr->tp_drop_data_cb) {
|
|
@@ -1225,7 +1225,7 @@ PJ_DEF(pj_status_t) pjsip_transport_dec_ref( pjsip_transport *tp )
|
|
!tp->is_destroying && pj_atomic_get(tp->ref_cnt) == 0)
|
|
{
|
|
pj_time_val delay;
|
|
-
|
|
+
|
|
int timer_id = IDLE_TIMER_ID;
|
|
|
|
/* If transport is in graceful shutdown, then this is the
|
|
@@ -1240,7 +1240,7 @@ PJ_DEF(pj_status_t) pjsip_transport_dec_ref( pjsip_transport *tp )
|
|
} else {
|
|
delay.sec = PJSIP_TRANSPORT_SERVER_IDLE_TIME;
|
|
if (tp->last_recv_ts.u64 == 0 && tp->initial_timeout) {
|
|
- PJ_LOG(4, (THIS_FILE,
|
|
+ PJ_LOG(4, (THIS_FILE,
|
|
"Starting transport %s initial timer",
|
|
tp->obj_name));
|
|
timer_id = INITIAL_IDLE_TIMER_ID;
|
|
@@ -1309,7 +1309,7 @@ PJ_DEF(pj_status_t) pjsip_transport_register( pjsip_tpmgr *mgr,
|
|
if (!tp_add){
|
|
pj_lock_release(mgr->lock);
|
|
return PJ_ENOMEM;
|
|
- }
|
|
+ }
|
|
pj_list_init(tp_add);
|
|
pj_list_push_back(&mgr->tp_entry_freelist, tp_add);
|
|
}
|
|
@@ -1319,7 +1319,7 @@ PJ_DEF(pj_status_t) pjsip_transport_register( pjsip_tpmgr *mgr,
|
|
pj_list_erase(tp_add);
|
|
|
|
if (tp_ref) {
|
|
- /* There'a already a transport list from the hash table. Add the
|
|
+ /* There'a already a transport list from the hash table. Add the
|
|
* new transport to the list.
|
|
*/
|
|
pj_list_push_back(tp_ref, tp_add);
|
|
@@ -1442,7 +1442,7 @@ static pj_status_t destroy_transport( pjsip_tpmgr *mgr,
|
|
|
|
|
|
/*
|
|
- * Start graceful shutdown procedure for this transport.
|
|
+ * Start graceful shutdown procedure for this transport.
|
|
*/
|
|
PJ_DEF(pj_status_t) pjsip_transport_shutdown(pjsip_transport *tp)
|
|
{
|
|
@@ -1451,7 +1451,7 @@ PJ_DEF(pj_status_t) pjsip_transport_shutdown(pjsip_transport *tp)
|
|
|
|
|
|
/*
|
|
- * Start shutdown procedure for this transport.
|
|
+ * Start shutdown procedure for this transport.
|
|
*/
|
|
PJ_DEF(pj_status_t) pjsip_transport_shutdown2(pjsip_transport *tp,
|
|
pj_bool_t force)
|
|
@@ -1551,6 +1551,12 @@ PJ_DEF(pj_status_t) pjsip_tpmgr_register_tpfactory( pjsip_tpmgr *mgr,
|
|
/* Check that no same factory has been registered. */
|
|
status = PJ_SUCCESS;
|
|
for (p=mgr->factory_list.next; p!=&mgr->factory_list; p=p->next) {
|
|
+ if (p->type == tpf->type &&
|
|
+ !pj_sockaddr_cmp(&tpf->local_addr, &p->local_addr))
|
|
+ {
|
|
+ status = PJSIP_ETYPEEXISTS;
|
|
+ break;
|
|
+ }
|
|
if (p == tpf) {
|
|
status = PJ_EEXISTS;
|
|
break;
|
|
@@ -1702,7 +1708,7 @@ static pj_status_t get_net_interface(pjsip_transport_type_e tp_type,
|
|
status = pj_getipinterface(af, dst, &itf_addr, PJ_TRUE, NULL);
|
|
}
|
|
|
|
- if (status != PJ_SUCCESS) {
|
|
+ if (status != PJ_SUCCESS) {
|
|
status = pj_getipinterface(af, dst, &itf_addr, PJ_FALSE, NULL);
|
|
if (status != PJ_SUCCESS) {
|
|
/* If it fails, e.g: on WM6(http://support.microsoft.com/kb/129065),
|
|
@@ -1727,7 +1733,7 @@ static pj_status_t get_net_interface(pjsip_transport_type_e tp_type,
|
|
|
|
/*
|
|
* Find out the appropriate local address info (IP address and port) to
|
|
- * advertise in Contact header based on the remote address to be
|
|
+ * advertise in Contact header based on the remote address to be
|
|
* contacted. The local address info would be the address name of the
|
|
* transport or listener which will be used to send the request.
|
|
*
|
|
@@ -2089,9 +2095,9 @@ PJ_DEF(pj_ssize_t) pjsip_tpmgr_receive_packet( pjsip_tpmgr *mgr,
|
|
|
|
tr->last_recv_len = rdata->pkt_info.len;
|
|
pj_get_timestamp(&tr->last_recv_ts);
|
|
-
|
|
- /* Must NULL terminate buffer. This is the requirement of the
|
|
- * parser etc.
|
|
+
|
|
+ /* Must NULL terminate buffer. This is the requirement of the
|
|
+ * parser etc.
|
|
*/
|
|
current_pkt[remaining_len] = '\0';
|
|
|
|
@@ -2134,7 +2140,7 @@ PJ_DEF(pj_ssize_t) pjsip_tpmgr_receive_packet( pjsip_tpmgr *mgr,
|
|
/* Initialize default fragment size. */
|
|
msg_fragment_size = remaining_len;
|
|
|
|
- /* Clear and init msg_info in rdata.
|
|
+ /* Clear and init msg_info in rdata.
|
|
* Endpoint might inspect the values there when we call the callback
|
|
* to report some errors.
|
|
*/
|
|
@@ -2164,7 +2170,7 @@ PJ_DEF(pj_ssize_t) pjsip_tpmgr_receive_packet( pjsip_tpmgr *mgr,
|
|
}
|
|
}
|
|
|
|
- msg_status = pjsip_find_msg(current_pkt, remaining_len, PJ_FALSE,
|
|
+ msg_status = pjsip_find_msg(current_pkt, remaining_len, PJ_FALSE,
|
|
&msg_fragment_size);
|
|
if (msg_status != PJ_SUCCESS) {
|
|
pj_status_t dd_status = msg_status;
|
|
@@ -2207,10 +2213,10 @@ PJ_DEF(pj_ssize_t) pjsip_tpmgr_receive_packet( pjsip_tpmgr *mgr,
|
|
}
|
|
|
|
if (msg_status == PJSIP_EPARTIALMSG) {
|
|
- if (rdata->tp_info.transport->idle_timer.id ==
|
|
+ if (rdata->tp_info.transport->idle_timer.id ==
|
|
INITIAL_IDLE_TIMER_ID)
|
|
{
|
|
- /* We are not getting the first valid SIP message
|
|
+ /* We are not getting the first valid SIP message
|
|
* as expected, close the transport.
|
|
*/
|
|
PJ_LOG(4, (THIS_FILE, "Unexpected data was received "\
|
|
@@ -2238,7 +2244,7 @@ PJ_DEF(pj_ssize_t) pjsip_tpmgr_receive_packet( pjsip_tpmgr *mgr,
|
|
current_pkt[msg_fragment_size] = '\0';
|
|
|
|
/* Parse the message. */
|
|
- rdata->msg_info.msg = msg =
|
|
+ rdata->msg_info.msg = msg =
|
|
pjsip_parse_rdata( current_pkt, msg_fragment_size, rdata);
|
|
|
|
/* Restore null termination */
|
|
@@ -2299,7 +2305,7 @@ PJ_DEF(pj_ssize_t) pjsip_tpmgr_receive_packet( pjsip_tpmgr *mgr,
|
|
dd.len = msg_fragment_size;
|
|
dd.status = PJSIP_EINVALIDMSG;
|
|
(*mgr->tp_drop_data_cb)(&dd);
|
|
-
|
|
+
|
|
if (dd.len > 0 && dd.len < msg_fragment_size)
|
|
msg_fragment_size = dd.len;
|
|
}
|
|
@@ -2309,11 +2315,11 @@ PJ_DEF(pj_ssize_t) pjsip_tpmgr_receive_packet( pjsip_tpmgr *mgr,
|
|
|
|
/* Perform basic header checking. */
|
|
if (rdata->msg_info.cid == NULL ||
|
|
- rdata->msg_info.cid->id.slen == 0 ||
|
|
- rdata->msg_info.from == NULL ||
|
|
- rdata->msg_info.to == NULL ||
|
|
- rdata->msg_info.via == NULL ||
|
|
- rdata->msg_info.cseq == NULL)
|
|
+ rdata->msg_info.cid->id.slen == 0 ||
|
|
+ rdata->msg_info.from == NULL ||
|
|
+ rdata->msg_info.to == NULL ||
|
|
+ rdata->msg_info.via == NULL ||
|
|
+ rdata->msg_info.cseq == NULL)
|
|
{
|
|
mgr->on_rx_msg(mgr->endpt, PJSIP_EMISSINGHDR, rdata);
|
|
|
|
@@ -2325,7 +2331,7 @@ PJ_DEF(pj_ssize_t) pjsip_tpmgr_receive_packet( pjsip_tpmgr *mgr,
|
|
dd.data = current_pkt;
|
|
dd.len = msg_fragment_size;
|
|
dd.status = PJSIP_EMISSINGHDR;
|
|
- (*mgr->tp_drop_data_cb)(&dd);
|
|
+ (*mgr->tp_drop_data_cb)(&dd);
|
|
}
|
|
goto finish_process_fragment;
|
|
}
|
|
@@ -2333,8 +2339,8 @@ PJ_DEF(pj_ssize_t) pjsip_tpmgr_receive_packet( pjsip_tpmgr *mgr,
|
|
/* For request: */
|
|
if (rdata->msg_info.msg->type == PJSIP_REQUEST_MSG) {
|
|
/* always add received parameter to the via. */
|
|
- pj_strdup2(rdata->tp_info.pool,
|
|
- &rdata->msg_info.via->recvd_param,
|
|
+ pj_strdup2(rdata->tp_info.pool,
|
|
+ &rdata->msg_info.via->recvd_param,
|
|
rdata->pkt_info.src_name);
|
|
|
|
/* RFC 3581:
|
|
@@ -2358,7 +2364,7 @@ PJ_DEF(pj_ssize_t) pjsip_tpmgr_receive_packet( pjsip_tpmgr *mgr,
|
|
dd.data = current_pkt;
|
|
dd.len = msg_fragment_size;
|
|
dd.status = PJSIP_EINVALIDSTATUS;
|
|
- (*mgr->tp_drop_data_cb)(&dd);
|
|
+ (*mgr->tp_drop_data_cb)(&dd);
|
|
}
|
|
goto finish_process_fragment;
|
|
}
|
|
@@ -2463,10 +2469,13 @@ PJ_DEF(pj_status_t) pjsip_tpmgr_acquire_transport2(pjsip_tpmgr *mgr,
|
|
* for the destination.
|
|
*/
|
|
if (sel && sel->type == PJSIP_TPSELECTOR_TRANSPORT &&
|
|
- sel->u.transport)
|
|
+ sel->u.transport)
|
|
{
|
|
pjsip_transport *seltp = sel->u.transport;
|
|
|
|
+ pjsip_transport_type_e type_no_ipv6 = type % PJSIP_TRANSPORT_IPV6;
|
|
+ pjsip_transport_type_e key_type_no_ipv6 = seltp->key.type %
|
|
+ PJSIP_TRANSPORT_IPV6;
|
|
/* See if the transport is (not) suitable */
|
|
if (seltp->key.type != type) {
|
|
pj_lock_release(mgr->lock);
|
|
@@ -2765,7 +2774,7 @@ PJ_DEF(void) pjsip_tpmgr_dump_transports(pjsip_tpmgr *mgr)
|
|
PJ_LOG(3, (THIS_FILE, " Dumping listeners:"));
|
|
factory = mgr->factory_list.next;
|
|
while (factory != &mgr->factory_list) {
|
|
- PJ_LOG(3, (THIS_FILE, " %s %s:%.*s:%d",
|
|
+ PJ_LOG(3, (THIS_FILE, " %s %s:%.*s:%d",
|
|
factory->obj_name,
|
|
factory->type_name,
|
|
(int)factory->addr_name.host.slen,
|
|
@@ -2933,7 +2942,7 @@ PJ_DEF(pj_status_t) pjsip_transport_add_state_listener (
|
|
}
|
|
|
|
/**
|
|
- * Remove a listener from the specified transport for transport state
|
|
+ * Remove a listener from the specified transport for transport state
|
|
* notification.
|
|
*/
|
|
PJ_DEF(pj_status_t) pjsip_transport_remove_state_listener (
|
|
diff --git a/pjsip/src/pjsua-lib/pjsua_core.c b/pjsip/src/pjsua-lib/pjsua_core.c
|
|
index a29b5cdd6..74f6f527c 100644
|
|
--- a/pjsip/src/pjsua-lib/pjsua_core.c
|
|
+++ b/pjsip/src/pjsua-lib/pjsua_core.c
|
|
@@ -1,4 +1,4 @@
|
|
-/*
|
|
+/*
|
|
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
|
|
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
|
|
*
|
|
@@ -14,7 +14,7 @@
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
#include <pjsua-lib/pjsua.h>
|
|
#include <pjsua-lib/pjsua_internal.h>
|
|
@@ -40,7 +40,7 @@ PJ_DEF(struct pjsua_data*) pjsua_get_var(void)
|
|
|
|
|
|
/* Display error */
|
|
-PJ_DEF(void) pjsua_perror( const char *sender, const char *title,
|
|
+PJ_DEF(void) pjsua_perror( const char *sender, const char *title,
|
|
pj_status_t status)
|
|
{
|
|
char errmsg[PJ_ERR_MSG_SIZE];
|
|
@@ -58,7 +58,7 @@ static void init_data()
|
|
|
|
for (i=0; i<PJ_ARRAY_SIZE(pjsua_var.acc); ++i)
|
|
pjsua_var.acc[i].index = i;
|
|
-
|
|
+
|
|
for (i=0; i<PJ_ARRAY_SIZE(pjsua_var.tpdata); ++i)
|
|
pjsua_var.tpdata[i].index = i;
|
|
|
|
@@ -82,7 +82,7 @@ PJ_DEF(void) pjsua_logging_config_default(pjsua_logging_config *cfg)
|
|
cfg->msg_logging = PJ_TRUE;
|
|
cfg->level = 5;
|
|
cfg->console_level = 4;
|
|
- cfg->decor = PJ_LOG_HAS_SENDER | PJ_LOG_HAS_TIME |
|
|
+ cfg->decor = PJ_LOG_HAS_SENDER | PJ_LOG_HAS_TIME |
|
|
PJ_LOG_HAS_MICRO_SEC | PJ_LOG_HAS_NEWLINE |
|
|
PJ_LOG_HAS_SPACE | PJ_LOG_HAS_THREAD_SWC |
|
|
PJ_LOG_HAS_INDENT;
|
|
@@ -285,13 +285,13 @@ PJ_DEF(void) pjsua_srtp_opt_dup( pj_pool_t *pool, pjsua_srtp_opt *dst,
|
|
pj_bool_t check_str)
|
|
{
|
|
pjsua_srtp_opt backup_dst;
|
|
-
|
|
+
|
|
if (check_str) pj_memcpy(&backup_dst, dst, sizeof(*dst));
|
|
pj_memcpy(dst, src, sizeof(*src));
|
|
|
|
if (pool) {
|
|
unsigned i;
|
|
-
|
|
+
|
|
for (i = 0; i < src->crypto_count; i++) {
|
|
if (!check_str ||
|
|
pj_stricmp(&backup_dst.crypto[i].key, &src->crypto[i].key))
|
|
@@ -396,7 +396,7 @@ PJ_DEF(void) pjsua_media_config_default(pjsua_media_config *cfg)
|
|
{
|
|
const pj_sys_info *si = pj_get_sys_info();
|
|
pj_str_t dev_model = {"iPhone5", 7};
|
|
-
|
|
+
|
|
pj_bzero(cfg, sizeof(*cfg));
|
|
|
|
cfg->clock_rate = PJSUA_DEFAULT_CLOCK_RATE;
|
|
@@ -462,15 +462,15 @@ static pj_bool_t logging_on_rx_msg(pjsip_rx_data *rdata)
|
|
"--end msg--",
|
|
rdata->msg_info.len,
|
|
pjsip_rx_data_get_info(rdata),
|
|
- rdata->tp_info.transport->type_name,
|
|
- pj_addr_str_print(&input_str,
|
|
- rdata->pkt_info.src_port,
|
|
+ rdata->tp_info.transport->type_name,
|
|
+ pj_addr_str_print(&input_str,
|
|
+ rdata->pkt_info.src_port,
|
|
addr,
|
|
- sizeof(addr),
|
|
+ sizeof(addr),
|
|
1),
|
|
(int)rdata->msg_info.len,
|
|
rdata->msg_info.msg_buf));
|
|
-
|
|
+
|
|
/* Always return false, otherwise messages will not get processed! */
|
|
return PJ_FALSE;
|
|
}
|
|
@@ -480,7 +480,7 @@ static pj_status_t logging_on_tx_msg(pjsip_tx_data *tdata)
|
|
{
|
|
char addr[PJ_INET6_ADDRSTRLEN+10];
|
|
pj_str_t input_str = pj_str(tdata->tp_info.dst_name);
|
|
-
|
|
+
|
|
/* Important note:
|
|
* tp_info field is only valid after outgoing messages has passed
|
|
* transport layer. So don't try to access tp_info when the module
|
|
@@ -492,10 +492,10 @@ static pj_status_t logging_on_tx_msg(pjsip_tx_data *tdata)
|
|
(int)(tdata->buf.cur - tdata->buf.start),
|
|
pjsip_tx_data_get_info(tdata),
|
|
tdata->tp_info.transport->type_name,
|
|
- pj_addr_str_print(&input_str,
|
|
- tdata->tp_info.dst_port,
|
|
+ pj_addr_str_print(&input_str,
|
|
+ tdata->tp_info.dst_port,
|
|
addr,
|
|
- sizeof(addr),
|
|
+ sizeof(addr),
|
|
1),
|
|
(int)(tdata->buf.cur - tdata->buf.start),
|
|
tdata->buf.start));
|
|
@@ -506,7 +506,7 @@ static pj_status_t logging_on_tx_msg(pjsip_tx_data *tdata)
|
|
}
|
|
|
|
/* The module instance. */
|
|
-static pjsip_module pjsua_msg_logger =
|
|
+static pjsip_module pjsua_msg_logger =
|
|
{
|
|
NULL, NULL, /* prev, next. */
|
|
{ "mod-pjsua-log", 13 }, /* Name. */
|
|
@@ -546,14 +546,14 @@ static pj_bool_t options_on_rx_request(pjsip_rx_data *rdata)
|
|
|
|
/* Don't want to handle if shutdown is in progress */
|
|
if (pjsua_var.thread_quit_flag) {
|
|
- pjsip_endpt_respond_stateless(pjsua_var.endpt, rdata,
|
|
+ pjsip_endpt_respond_stateless(pjsua_var.endpt, rdata,
|
|
PJSIP_SC_TEMPORARILY_UNAVAILABLE, NULL,
|
|
NULL, NULL);
|
|
return PJ_TRUE;
|
|
}
|
|
|
|
/* Create basic response. */
|
|
- status = pjsip_endpt_create_response(pjsua_var.endpt, rdata, 200, NULL,
|
|
+ status = pjsip_endpt_create_response(pjsua_var.endpt, rdata, 200, NULL,
|
|
&tdata);
|
|
if (status != PJ_SUCCESS) {
|
|
pjsua_perror(THIS_FILE, "Unable to create OPTIONS response", status);
|
|
@@ -563,28 +563,28 @@ static pj_bool_t options_on_rx_request(pjsip_rx_data *rdata)
|
|
/* Add Allow header */
|
|
cap_hdr = pjsip_endpt_get_capability(pjsua_var.endpt, PJSIP_H_ALLOW, NULL);
|
|
if (cap_hdr) {
|
|
- pjsip_msg_add_hdr(tdata->msg,
|
|
+ pjsip_msg_add_hdr(tdata->msg,
|
|
(pjsip_hdr*) pjsip_hdr_clone(tdata->pool, cap_hdr));
|
|
}
|
|
|
|
/* Add Accept header */
|
|
cap_hdr = pjsip_endpt_get_capability(pjsua_var.endpt, PJSIP_H_ACCEPT, NULL);
|
|
if (cap_hdr) {
|
|
- pjsip_msg_add_hdr(tdata->msg,
|
|
+ pjsip_msg_add_hdr(tdata->msg,
|
|
(pjsip_hdr*) pjsip_hdr_clone(tdata->pool, cap_hdr));
|
|
}
|
|
|
|
/* Add Supported header */
|
|
cap_hdr = pjsip_endpt_get_capability(pjsua_var.endpt, PJSIP_H_SUPPORTED, NULL);
|
|
if (cap_hdr) {
|
|
- pjsip_msg_add_hdr(tdata->msg,
|
|
+ pjsip_msg_add_hdr(tdata->msg,
|
|
(pjsip_hdr*) pjsip_hdr_clone(tdata->pool, cap_hdr));
|
|
}
|
|
|
|
/* Add Allow-Events header from the evsub module */
|
|
cap_hdr = pjsip_evsub_get_allow_events_hdr(NULL);
|
|
if (cap_hdr) {
|
|
- pjsip_msg_add_hdr(tdata->msg,
|
|
+ pjsip_msg_add_hdr(tdata->msg,
|
|
(pjsip_hdr*) pjsip_hdr_clone(tdata->pool, cap_hdr));
|
|
}
|
|
|
|
@@ -628,7 +628,7 @@ static pj_bool_t options_on_rx_request(pjsip_rx_data *rdata)
|
|
|
|
|
|
/* The module instance. */
|
|
-static pjsip_module pjsua_options_handler =
|
|
+static pjsip_module pjsua_options_handler =
|
|
{
|
|
NULL, NULL, /* prev, next. */
|
|
{ "mod-pjsua-options", 17 }, /* Name. */
|
|
@@ -768,9 +768,9 @@ PJ_DEF(pj_status_t) pjsua_reconfigure_logging(const pjsua_logging_config *cfg)
|
|
if (pjsua_var.log_cfg.log_filename.slen) {
|
|
unsigned flags = PJ_O_WRONLY | PJ_O_CLOEXEC;
|
|
flags |= pjsua_var.log_cfg.log_file_flags;
|
|
- status = pj_file_open(pjsua_var.pool,
|
|
+ status = pj_file_open(pjsua_var.pool,
|
|
pjsua_var.log_cfg.log_filename.ptr,
|
|
- flags,
|
|
+ flags,
|
|
&pjsua_var.log_file);
|
|
|
|
if (status != PJ_SUCCESS) {
|
|
@@ -974,9 +974,9 @@ PJ_DEF(pj_status_t) pjsua_create(void)
|
|
pj_shutdown();
|
|
return status;
|
|
}
|
|
-
|
|
+
|
|
/* Create mutex */
|
|
- status = pj_mutex_create_recursive(pjsua_var.pool, "pjsua",
|
|
+ status = pj_mutex_create_recursive(pjsua_var.pool, "pjsua",
|
|
&pjsua_var.mutex);
|
|
if (status != PJ_SUCCESS) {
|
|
pj_log_pop_indent();
|
|
@@ -988,8 +988,8 @@ PJ_DEF(pj_status_t) pjsua_create(void)
|
|
/* Must create SIP endpoint to initialize SIP parser. The parser
|
|
* is needed for example when application needs to call pjsua_verify_url().
|
|
*/
|
|
- status = pjsip_endpt_create(&pjsua_var.cp.factory,
|
|
- pj_gethostname()->ptr,
|
|
+ status = pjsip_endpt_create(&pjsua_var.cp.factory,
|
|
+ pj_gethostname()->ptr,
|
|
&pjsua_var.endpt);
|
|
if (status != PJ_SUCCESS) {
|
|
pj_log_pop_indent();
|
|
@@ -1004,7 +1004,7 @@ PJ_DEF(pj_status_t) pjsua_create(void)
|
|
pj_list_init(&pjsua_var.event_list);
|
|
|
|
/* Create timer mutex */
|
|
- status = pj_mutex_create_recursive(pjsua_var.pool, "pjsua_timer",
|
|
+ status = pj_mutex_create_recursive(pjsua_var.pool, "pjsua_timer",
|
|
&pjsua_var.timer_mutex);
|
|
if (status != PJ_SUCCESS) {
|
|
pj_log_pop_indent();
|
|
@@ -1028,7 +1028,7 @@ static void upnp_cb(pj_status_t status)
|
|
#endif
|
|
|
|
/*
|
|
- * Initialize pjsua with the specified settings. All the settings are
|
|
+ * Initialize pjsua with the specified settings. All the settings are
|
|
* optional, and the default values will be used when the config is not
|
|
* specified.
|
|
*/
|
|
@@ -1081,7 +1081,7 @@ PJ_DEF(pj_status_t) pjsua_init( const pjsua_config *ua_cfg,
|
|
unsigned ii;
|
|
|
|
/* Create DNS resolver */
|
|
- status = pjsip_endpt_create_resolver(pjsua_var.endpt,
|
|
+ status = pjsip_endpt_create_resolver(pjsua_var.endpt,
|
|
&pjsua_var.resolver);
|
|
if (status != PJ_SUCCESS) {
|
|
pjsua_perror(THIS_FILE, "Error creating resolver", status);
|
|
@@ -1089,7 +1089,7 @@ PJ_DEF(pj_status_t) pjsua_init( const pjsua_config *ua_cfg,
|
|
}
|
|
|
|
/* Configure nameserver for the DNS resolver */
|
|
- status = pj_dns_resolver_set_ns(pjsua_var.resolver,
|
|
+ status = pj_dns_resolver_set_ns(pjsua_var.resolver,
|
|
ua_cfg->nameserver_count,
|
|
ua_cfg->nameserver, NULL);
|
|
if (status != PJ_SUCCESS) {
|
|
@@ -1111,7 +1111,7 @@ PJ_DEF(pj_status_t) pjsua_init( const pjsua_config *ua_cfg,
|
|
ua_cfg->nameserver[ii].ptr));
|
|
}
|
|
#else
|
|
- PJ_LOG(2,(THIS_FILE,
|
|
+ PJ_LOG(2,(THIS_FILE,
|
|
"DNS resolver is disabled (PJSIP_HAS_RESOLVER==0)"));
|
|
#endif
|
|
}
|
|
@@ -1146,7 +1146,7 @@ PJ_DEF(pj_status_t) pjsua_init( const pjsua_config *ua_cfg,
|
|
|
|
/* Initialize and register PJSUA application module. */
|
|
{
|
|
- const pjsip_module mod_initializer =
|
|
+ const pjsip_module mod_initializer =
|
|
{
|
|
NULL, NULL, /* prev, next. */
|
|
{ "mod-pjsua", 9 }, /* Name. */
|
|
@@ -1201,7 +1201,7 @@ PJ_DEF(pj_status_t) pjsua_init( const pjsua_config *ua_cfg,
|
|
|
|
pj_list_push_back(&pjsua_var.outbound_proxy, r);
|
|
}
|
|
-
|
|
+
|
|
|
|
/* Initialize PJSUA call subsystem: */
|
|
status = pjsua_call_subsys_init(ua_cfg);
|
|
@@ -1211,11 +1211,11 @@ PJ_DEF(pj_status_t) pjsua_init( const pjsua_config *ua_cfg,
|
|
/* Convert deprecated STUN settings */
|
|
if (pjsua_var.ua_cfg.stun_srv_cnt==0) {
|
|
if (pjsua_var.ua_cfg.stun_domain.slen) {
|
|
- pjsua_var.ua_cfg.stun_srv[pjsua_var.ua_cfg.stun_srv_cnt++] =
|
|
+ pjsua_var.ua_cfg.stun_srv[pjsua_var.ua_cfg.stun_srv_cnt++] =
|
|
pjsua_var.ua_cfg.stun_domain;
|
|
}
|
|
if (pjsua_var.ua_cfg.stun_host.slen) {
|
|
- pjsua_var.ua_cfg.stun_srv[pjsua_var.ua_cfg.stun_srv_cnt++] =
|
|
+ pjsua_var.ua_cfg.stun_srv[pjsua_var.ua_cfg.stun_srv_cnt++] =
|
|
pjsua_var.ua_cfg.stun_host;
|
|
}
|
|
}
|
|
@@ -1311,7 +1311,7 @@ PJ_DEF(pj_status_t) pjsua_init( const pjsua_config *ua_cfg,
|
|
|
|
for (ii=0; ii<pjsua_var.ua_cfg.thread_cnt; ++ii) {
|
|
char tname[16];
|
|
-
|
|
+
|
|
pj_ansi_snprintf(tname, sizeof(tname), "pjsua_%d", ii);
|
|
|
|
#if PJSUA_SEPARATE_WORKER_FOR_TIMER
|
|
@@ -1331,7 +1331,7 @@ PJ_DEF(pj_status_t) pjsua_init( const pjsua_config *ua_cfg,
|
|
if (status != PJ_SUCCESS)
|
|
goto on_error;
|
|
}
|
|
- PJ_LOG(4,(THIS_FILE, "%d SIP worker threads created",
|
|
+ PJ_LOG(4,(THIS_FILE, "%d SIP worker threads created",
|
|
pjsua_var.ua_cfg.thread_cnt));
|
|
} else {
|
|
PJ_LOG(4,(THIS_FILE, "No SIP worker threads created"));
|
|
@@ -1339,7 +1339,7 @@ PJ_DEF(pj_status_t) pjsua_init( const pjsua_config *ua_cfg,
|
|
|
|
/* Done! */
|
|
|
|
- PJ_LOG(3,(THIS_FILE, "pjsua version %s for %s initialized",
|
|
+ PJ_LOG(3,(THIS_FILE, "pjsua version %s for %s initialized",
|
|
pj_get_version(), pj_get_sys_info()->info.ptr));
|
|
|
|
pjsua_set_state(PJSUA_STATE_INIT);
|
|
@@ -1435,7 +1435,7 @@ static void stun_resolve_dec_ref(pjsua_stun_resolve *sess)
|
|
* is allowed to destroy the session, otherwise it may cause deadlock.
|
|
*/
|
|
if ((ref_cnt > 0) ||
|
|
- (sess->blocking && (sess->waiter != pj_thread_this())))
|
|
+ (sess->blocking && (sess->waiter != pj_thread_this())))
|
|
{
|
|
return;
|
|
}
|
|
@@ -1465,7 +1465,7 @@ static void stun_resolve_complete(pjsua_stun_resolve *sess)
|
|
if (result.status == PJ_SUCCESS) {
|
|
char addr[PJ_INET6_ADDRSTRLEN+10];
|
|
pj_sockaddr_print(&result.addr, addr, sizeof(addr), 3);
|
|
- PJ_LOG(4,(THIS_FILE,
|
|
+ PJ_LOG(4,(THIS_FILE,
|
|
"STUN resolution success, using %.*s, address is %s",
|
|
(int)sess->srv[sess->idx].slen,
|
|
sess->srv[sess->idx].ptr,
|
|
@@ -1488,7 +1488,7 @@ on_return:
|
|
* to report it's state. We use this as part of testing the
|
|
* STUN server.
|
|
*/
|
|
-static pj_bool_t test_stun_on_status(pj_stun_sock *stun_sock,
|
|
+static pj_bool_t test_stun_on_status(pj_stun_sock *stun_sock,
|
|
pj_stun_sock_op op,
|
|
pj_status_t status)
|
|
{
|
|
@@ -1553,7 +1553,7 @@ static pj_bool_t test_stun_on_status(pj_stun_sock *stun_sock,
|
|
|
|
} else
|
|
return PJ_TRUE;
|
|
-
|
|
+
|
|
}
|
|
|
|
/* This is an internal function to resolve and test current
|
|
@@ -1574,7 +1574,7 @@ static void resolve_stun_entry(pjsua_stun_resolve *sess)
|
|
pj_str_t hostpart;
|
|
pj_uint16_t port;
|
|
pj_stun_sock_cb stun_sock_cb;
|
|
-
|
|
+
|
|
pj_assert(sess->idx < sess->count);
|
|
|
|
if (pjsua_var.ua_cfg.stun_try_ipv6 &&
|
|
@@ -1584,7 +1584,7 @@ static void resolve_stun_entry(pjsua_stun_resolve *sess)
|
|
/* Skip IPv4 STUN resolution if NAT64 is not disabled. */
|
|
PJ_LOG(4,(THIS_FILE, "Skipping IPv4 resolution of STUN server "
|
|
"%s (%d of %d)", target,
|
|
- sess->idx+1, sess->count));
|
|
+ sess->idx+1, sess->count));
|
|
continue;
|
|
}
|
|
|
|
@@ -1599,7 +1599,7 @@ static void resolve_stun_entry(pjsua_stun_resolve *sess)
|
|
PJ_LOG(2,(THIS_FILE, "Invalid STUN server entry %s", target));
|
|
continue;
|
|
}
|
|
-
|
|
+
|
|
/* Use default port if not specified */
|
|
if (port == 0)
|
|
port = PJ_STUN_PORT;
|
|
@@ -1615,12 +1615,12 @@ static void resolve_stun_entry(pjsua_stun_resolve *sess)
|
|
stun_sock_cb.on_status = &test_stun_on_status;
|
|
sess->async_wait = PJ_FALSE;
|
|
status = pj_stun_sock_create(&pjsua_var.stun_cfg, "stunresolve",
|
|
- sess->af, &stun_sock_cb,
|
|
+ sess->af, PJ_STUN_TP_UDP, &stun_sock_cb,
|
|
NULL, sess, &sess->stun_sock);
|
|
if (status != PJ_SUCCESS) {
|
|
char errmsg[PJ_ERR_MSG_SIZE];
|
|
pj_strerror(status, errmsg, sizeof(errmsg));
|
|
- PJ_LOG(4,(THIS_FILE,
|
|
+ PJ_LOG(4,(THIS_FILE,
|
|
"Error creating STUN socket for %s: %s",
|
|
target, errmsg));
|
|
|
|
@@ -1632,7 +1632,7 @@ static void resolve_stun_entry(pjsua_stun_resolve *sess)
|
|
if (status != PJ_SUCCESS) {
|
|
char errmsg[PJ_ERR_MSG_SIZE];
|
|
pj_strerror(status, errmsg, sizeof(errmsg));
|
|
- PJ_LOG(4,(THIS_FILE,
|
|
+ PJ_LOG(4,(THIS_FILE,
|
|
"Error starting STUN socket for %s: %s",
|
|
target, errmsg));
|
|
|
|
@@ -1672,7 +1672,7 @@ PJ_DEF(pj_status_t) pjsua_update_stun_servers(unsigned count, pj_str_t srv[],
|
|
pj_status_t status;
|
|
|
|
PJ_ASSERT_RETURN(count && srv, PJ_EINVAL);
|
|
-
|
|
+
|
|
PJSUA_LOCK();
|
|
|
|
pjsua_var.ua_cfg.stun_srv_cnt = count;
|
|
@@ -1683,7 +1683,7 @@ PJ_DEF(pj_status_t) pjsua_update_stun_servers(unsigned count, pj_str_t srv[],
|
|
pjsua_var.stun_status = PJ_EUNKNOWN;
|
|
|
|
PJSUA_UNLOCK();
|
|
-
|
|
+
|
|
status = resolve_stun_server(wait, PJ_FALSE, 0);
|
|
if (wait == PJ_FALSE && status == PJ_EPENDING)
|
|
status = PJ_SUCCESS;
|
|
@@ -1743,7 +1743,7 @@ PJ_DEF(pj_status_t) pjsua_resolve_stun_servers( unsigned count,
|
|
*/
|
|
max_wait_ms = count * pjsua_var.stun_cfg.rto_msec * (1 << 7);
|
|
pj_get_timestamp(&start);
|
|
-
|
|
+
|
|
while ((sess->status == PJ_EPENDING) && (!sess->destroy_flag)) {
|
|
/* If there is no worker thread or
|
|
* the function is called from the only worker thread,
|
|
@@ -1794,7 +1794,7 @@ PJ_DEF(pj_status_t) pjsua_cancel_stun_resolution( void *token,
|
|
result.status = PJ_ECANCELLED;
|
|
|
|
sess->cb(&result);
|
|
- }
|
|
+ }
|
|
++cancelled_count;
|
|
}
|
|
|
|
@@ -1820,7 +1820,7 @@ static void internal_stun_resolve_cb(const pj_stun_resolve_result *result)
|
|
pjsua_detect_nat_type();
|
|
}
|
|
}
|
|
-
|
|
+
|
|
if (pjsua_var.ua_cfg.cb.on_stun_resolution_complete)
|
|
(*pjsua_var.ua_cfg.cb.on_stun_resolution_complete)(result);
|
|
}
|
|
@@ -1879,7 +1879,7 @@ pj_status_t resolve_stun_server(pj_bool_t wait, pj_bool_t retry_if_cur_error,
|
|
pjsua_var.stun_cfg.rto_msec * (1 << 7);
|
|
pj_get_timestamp(&start);
|
|
|
|
- while (pjsua_var.stun_status == PJ_EPENDING) {
|
|
+ while (pjsua_var.stun_status == PJ_EPENDING) {
|
|
/* If there is no worker thread or
|
|
* the function is called from the only worker thread,
|
|
* we have to handle the events here.
|
|
@@ -1904,7 +1904,7 @@ pj_status_t resolve_stun_server(pj_bool_t wait, pj_bool_t retry_if_cur_error,
|
|
pjsua_var.stun_status != PJ_SUCCESS &&
|
|
pjsua_var.ua_cfg.stun_ignore_failure)
|
|
{
|
|
- PJ_LOG(2,(THIS_FILE,
|
|
+ PJ_LOG(2,(THIS_FILE,
|
|
"Ignoring STUN resolution failure (by setting)"));
|
|
//pjsua_var.stun_status = PJ_SUCCESS;
|
|
return PJ_SUCCESS;
|
|
@@ -1932,7 +1932,7 @@ PJ_DEF(pj_status_t) pjsua_destroy2(unsigned flags)
|
|
|
|
/* Signal threads to quit: */
|
|
pjsua_stop_worker_threads();
|
|
-
|
|
+
|
|
if (pjsua_var.endpt) {
|
|
unsigned max_wait;
|
|
|
|
@@ -1973,7 +1973,7 @@ PJ_DEF(pj_status_t) pjsua_destroy2(unsigned flags)
|
|
if (pjsua_var.acc[i].cfg.unpublish_max_wait_time_msec > max_wait)
|
|
max_wait = pjsua_var.acc[i].cfg.unpublish_max_wait_time_msec;
|
|
}
|
|
-
|
|
+
|
|
/* No waiting if RX is disabled */
|
|
if (flags & PJSUA_DESTROY_NO_RX_MSG) {
|
|
max_wait = 0;
|
|
@@ -2027,7 +2027,7 @@ PJ_DEF(pj_status_t) pjsua_destroy2(unsigned flags)
|
|
if (pjsua_var.acc[i].cfg.unreg_timeout > max_wait)
|
|
max_wait = pjsua_var.acc[i].cfg.unreg_timeout;
|
|
}
|
|
-
|
|
+
|
|
/* No waiting if RX is disabled */
|
|
if (flags & PJSUA_DESTROY_NO_RX_MSG) {
|
|
max_wait = 0;
|
|
@@ -2051,14 +2051,14 @@ PJ_DEF(pj_status_t) pjsua_destroy2(unsigned flags)
|
|
/* Note variable 'i' is used below */
|
|
|
|
/* Wait for some time to allow unregistration and ICE/TURN
|
|
- * transports shutdown to complete:
|
|
+ * transports shutdown to complete:
|
|
*/
|
|
if (i < 20 && (flags & PJSUA_DESTROY_NO_RX_MSG) == 0) {
|
|
busy_sleep(1000 - i*50);
|
|
}
|
|
|
|
PJ_LOG(4,(THIS_FILE, "Destroying..."));
|
|
-
|
|
+
|
|
/* Terminate any pending STUN resolution */
|
|
if (!pj_list_empty(&pjsua_var.stun_res)) {
|
|
pjsua_stun_resolve *sess = pjsua_var.stun_res.next;
|
|
@@ -2073,7 +2073,7 @@ PJ_DEF(pj_status_t) pjsua_destroy2(unsigned flags)
|
|
for (i = 0; i < (int)PJ_ARRAY_SIZE(pjsua_var.tpdata); i++) {
|
|
if (pjsua_var.tpdata[i].data.ptr) {
|
|
pjsip_transport_type_e tp_type;
|
|
-
|
|
+
|
|
tp_type = pjsua_var.tpdata[i].type & ~PJSIP_TRANSPORT_IPV6;
|
|
if ((flags & PJSUA_DESTROY_NO_TX_MSG) &&
|
|
tp_type == PJSIP_TRANSPORT_UDP &&
|
|
@@ -2131,7 +2131,7 @@ PJ_DEF(pj_status_t) pjsua_destroy2(unsigned flags)
|
|
pj_mutex_destroy(pjsua_var.mutex);
|
|
pjsua_var.mutex = NULL;
|
|
}
|
|
-
|
|
+
|
|
if (pjsua_var.timer_mutex) {
|
|
pj_mutex_destroy(pjsua_var.timer_mutex);
|
|
pjsua_var.timer_mutex = NULL;
|
|
@@ -2202,7 +2202,7 @@ PJ_DEF(pj_status_t) pjsua_destroy(void)
|
|
/**
|
|
* Application is recommended to call this function after all initialization
|
|
* is done, so that the library can do additional checking set up
|
|
- * additional
|
|
+ * additional
|
|
*
|
|
* @return PJ_SUCCESS on success, or the appropriate error code.
|
|
*/
|
|
@@ -2259,7 +2259,7 @@ PJ_DEF(int) pjsua_handle_events(unsigned msec_timeout)
|
|
return -status;
|
|
|
|
return count;
|
|
-
|
|
+
|
|
#endif
|
|
}
|
|
|
|
@@ -2271,7 +2271,7 @@ PJ_DEF(pj_pool_t*) pjsua_pool_create( const char *name, pj_size_t init_size,
|
|
pj_size_t increment)
|
|
{
|
|
/* Pool factory is thread safe, no need to lock */
|
|
- return pj_pool_create(&pjsua_var.cp.factory, name, init_size, increment,
|
|
+ return pj_pool_create(&pjsua_var.cp.factory, name, init_size, increment,
|
|
NULL);
|
|
}
|
|
|
|
@@ -2314,7 +2314,7 @@ static const char *addr_string(const pj_sockaddr_t *addr)
|
|
{
|
|
static char str[128];
|
|
str[0] = '\0';
|
|
- pj_inet_ntop(((const pj_sockaddr*)addr)->addr.sa_family,
|
|
+ pj_inet_ntop(((const pj_sockaddr*)addr)->addr.sa_family,
|
|
pj_sockaddr_get_addr(addr),
|
|
str, sizeof(str));
|
|
return str;
|
|
@@ -2378,11 +2378,11 @@ static pj_status_t create_sip_udp_sock(int af,
|
|
|
|
/* Initialize bound address */
|
|
if (cfg->bound_addr.slen) {
|
|
- status = pj_sockaddr_init(af, &bind_addr, &cfg->bound_addr,
|
|
+ status = pj_sockaddr_init(af, &bind_addr, &cfg->bound_addr,
|
|
(pj_uint16_t)port);
|
|
if (status != PJ_SUCCESS) {
|
|
- pjsua_perror(THIS_FILE,
|
|
- "Unable to resolve transport bound address",
|
|
+ pjsua_perror(THIS_FILE,
|
|
+ "Unable to resolve transport bound address",
|
|
status);
|
|
return status;
|
|
}
|
|
@@ -2398,8 +2398,8 @@ static pj_status_t create_sip_udp_sock(int af,
|
|
}
|
|
|
|
/* Apply QoS, if specified */
|
|
- status = pj_sock_apply_qos2(sock, cfg->qos_type,
|
|
- &cfg->qos_params,
|
|
+ status = pj_sock_apply_qos2(sock, cfg->qos_type,
|
|
+ &cfg->qos_params,
|
|
2, THIS_FILE, "SIP UDP socket");
|
|
|
|
/* Apply sockopt, if specified */
|
|
@@ -2446,7 +2446,7 @@ static pj_status_t create_sip_udp_sock(int af,
|
|
status = PJ_SUCCESS;
|
|
if (pj_sockaddr_has_addr(p_pub_addr)) {
|
|
/*
|
|
- * Public address is already specified, no need to resolve the
|
|
+ * Public address is already specified, no need to resolve the
|
|
* address, only set the port.
|
|
*/
|
|
if (pj_sockaddr_get_port(p_pub_addr) == 0)
|
|
@@ -2466,14 +2466,14 @@ static pj_status_t create_sip_udp_sock(int af,
|
|
stun_opt.use_stun2 = pjsua_var.ua_cfg.stun_map_use_stun2;
|
|
stun_opt.af = pjsua_var.stun_srv.addr.sa_family;
|
|
stun_opt.srv1 = stun_opt.srv2 = stun_srv;
|
|
- stun_opt.port1 = stun_opt.port2 =
|
|
+ stun_opt.port1 = stun_opt.port2 =
|
|
pj_sockaddr_get_port(&pjsua_var.stun_srv);
|
|
status = pjstun_get_mapped_addr2(&pjsua_var.cp.factory, &stun_opt,
|
|
1, &sock, &p_pub_addr->ipv4);
|
|
if (status != PJ_SUCCESS) {
|
|
/* Failed getting mapped address via STUN */
|
|
pjsua_perror(THIS_FILE, "Error contacting STUN server", status);
|
|
-
|
|
+
|
|
/* Return error if configured to not ignore STUN failure */
|
|
if (!pjsua_var.ua_cfg.stun_ignore_failure) {
|
|
pj_sock_close(sock);
|
|
@@ -2573,20 +2573,20 @@ PJ_DEF(pj_status_t) pjsua_transport_create( pjsip_transport_type_e type,
|
|
}
|
|
|
|
/* Initialize the public address from the config, if any */
|
|
- pj_sockaddr_init(pjsip_transport_type_get_af(type), &pub_addr,
|
|
+ pj_sockaddr_init(pjsip_transport_type_get_af(type), &pub_addr,
|
|
NULL, (pj_uint16_t)cfg->port);
|
|
if (cfg->public_addr.slen) {
|
|
status = pj_sockaddr_set_str_addr(pjsip_transport_type_get_af(type),
|
|
&pub_addr, &cfg->public_addr);
|
|
if (status != PJ_SUCCESS) {
|
|
- pjsua_perror(THIS_FILE,
|
|
- "Unable to resolve transport public address",
|
|
+ pjsua_perror(THIS_FILE,
|
|
+ "Unable to resolve transport public address",
|
|
status);
|
|
goto on_return;
|
|
}
|
|
}
|
|
|
|
- /* Create the socket and possibly resolve the address with STUN
|
|
+ /* Create the socket and possibly resolve the address with STUN
|
|
* (only when public address is not specified).
|
|
*/
|
|
status = create_sip_udp_sock(pjsip_transport_type_get_af(type),
|
|
@@ -2602,7 +2602,7 @@ PJ_DEF(pj_status_t) pjsua_transport_create( pjsip_transport_type_e type,
|
|
status = pjsip_udp_transport_attach2(pjsua_var.endpt, type, sock,
|
|
&addr_name, 1, &tp);
|
|
if (status != PJ_SUCCESS) {
|
|
- pjsua_perror(THIS_FILE, "Error creating SIP UDP transport",
|
|
+ pjsua_perror(THIS_FILE, "Error creating SIP UDP transport",
|
|
status);
|
|
pj_sock_close(sock);
|
|
goto on_return;
|
|
@@ -2642,12 +2642,12 @@ PJ_DEF(pj_status_t) pjsua_transport_create( pjsip_transport_type_e type,
|
|
pj_sockaddr_set_port(&tcp_cfg.bind_addr, (pj_uint16_t)cfg->port);
|
|
|
|
if (cfg->bound_addr.slen) {
|
|
- status = pj_sockaddr_set_str_addr(tcp_cfg.af,
|
|
+ status = pj_sockaddr_set_str_addr(tcp_cfg.af,
|
|
&tcp_cfg.bind_addr,
|
|
&cfg->bound_addr);
|
|
if (status != PJ_SUCCESS) {
|
|
- pjsua_perror(THIS_FILE,
|
|
- "Unable to resolve transport bound address",
|
|
+ pjsua_perror(THIS_FILE,
|
|
+ "Unable to resolve transport bound address",
|
|
status);
|
|
goto on_return;
|
|
}
|
|
@@ -2659,7 +2659,7 @@ PJ_DEF(pj_status_t) pjsua_transport_create( pjsip_transport_type_e type,
|
|
|
|
/* Copy the QoS settings */
|
|
tcp_cfg.qos_type = cfg->qos_type;
|
|
- pj_memcpy(&tcp_cfg.qos_params, &cfg->qos_params,
|
|
+ pj_memcpy(&tcp_cfg.qos_params, &cfg->qos_params,
|
|
sizeof(cfg->qos_params));
|
|
|
|
/* Copy the sockopt */
|
|
@@ -2670,7 +2670,7 @@ PJ_DEF(pj_status_t) pjsua_transport_create( pjsip_transport_type_e type,
|
|
status = pjsip_tcp_transport_start3(pjsua_var.endpt, &tcp_cfg, &tcp);
|
|
|
|
if (status != PJ_SUCCESS) {
|
|
- pjsua_perror(THIS_FILE, "Error creating SIP TCP listener",
|
|
+ pjsua_perror(THIS_FILE, "Error creating SIP TCP listener",
|
|
status);
|
|
goto on_return;
|
|
}
|
|
@@ -2711,8 +2711,8 @@ PJ_DEF(pj_status_t) pjsua_transport_create( pjsip_transport_type_e type,
|
|
status = pj_sockaddr_set_str_addr(af, &local_addr,
|
|
&cfg->bound_addr);
|
|
if (status != PJ_SUCCESS) {
|
|
- pjsua_perror(THIS_FILE,
|
|
- "Unable to resolve transport bound address",
|
|
+ pjsua_perror(THIS_FILE,
|
|
+ "Unable to resolve transport bound address",
|
|
status);
|
|
goto on_return;
|
|
}
|
|
@@ -2726,7 +2726,7 @@ PJ_DEF(pj_status_t) pjsua_transport_create( pjsip_transport_type_e type,
|
|
status = pjsip_tls_transport_start2(pjsua_var.endpt, &cfg->tls_setting,
|
|
&local_addr, &a_name, 1, &tls);
|
|
if (status != PJ_SUCCESS) {
|
|
- pjsua_perror(THIS_FILE, "Error creating SIP TLS listener",
|
|
+ pjsua_perror(THIS_FILE, "Error creating SIP TLS listener",
|
|
status);
|
|
goto on_return;
|
|
}
|
|
@@ -2847,8 +2847,8 @@ PJ_DEF(pj_status_t) pjsua_enum_transports( pjsua_transport_id id[],
|
|
|
|
PJSUA_LOCK();
|
|
|
|
- for (i=0, count=0; i<PJ_ARRAY_SIZE(pjsua_var.tpdata) && count<*p_count;
|
|
- ++i)
|
|
+ for (i=0, count=0; i<PJ_ARRAY_SIZE(pjsua_var.tpdata) && count<*p_count;
|
|
+ ++i)
|
|
{
|
|
if (!pjsua_var.tpdata[i].data.ptr)
|
|
continue;
|
|
@@ -2876,7 +2876,7 @@ PJ_DEF(pj_status_t) pjsua_transport_get_info( pjsua_transport_id id,
|
|
pj_bzero(info, sizeof(*info));
|
|
|
|
/* Make sure id is in range. */
|
|
- PJ_ASSERT_RETURN(id>=0 && id<(int)PJ_ARRAY_SIZE(pjsua_var.tpdata),
|
|
+ PJ_ASSERT_RETURN(id>=0 && id<(int)PJ_ARRAY_SIZE(pjsua_var.tpdata),
|
|
PJ_EINVAL);
|
|
|
|
/* Make sure that transport exists */
|
|
@@ -2892,7 +2892,7 @@ PJ_DEF(pj_status_t) pjsua_transport_get_info( pjsua_transport_id id,
|
|
PJSUA_UNLOCK();
|
|
return PJ_EINVALIDOP;
|
|
}
|
|
-
|
|
+
|
|
info->id = id;
|
|
info->type = (pjsip_transport_type_e) tp->key.type;
|
|
info->type_name = pj_str(tp->type_name);
|
|
@@ -2915,7 +2915,7 @@ PJ_DEF(pj_status_t) pjsua_transport_get_info( pjsua_transport_id id,
|
|
PJSUA_UNLOCK();
|
|
return PJ_EINVALIDOP;
|
|
}
|
|
-
|
|
+
|
|
info->id = id;
|
|
info->type = t->type;
|
|
info->type_name = pj_str(factory->type_name);
|
|
@@ -2948,7 +2948,7 @@ PJ_DEF(pj_status_t) pjsua_transport_set_enable( pjsua_transport_id id,
|
|
pj_bool_t enabled)
|
|
{
|
|
/* Make sure id is in range. */
|
|
- PJ_ASSERT_RETURN(id>=0 && id<(int)PJ_ARRAY_SIZE(pjsua_var.tpdata),
|
|
+ PJ_ASSERT_RETURN(id>=0 && id<(int)PJ_ARRAY_SIZE(pjsua_var.tpdata),
|
|
PJ_EINVAL);
|
|
|
|
/* Make sure that transport exists */
|
|
@@ -2974,7 +2974,7 @@ PJ_DEF(pj_status_t) pjsua_transport_close( pjsua_transport_id id,
|
|
pjsip_transport_type_e tp_type;
|
|
|
|
/* Make sure id is in range. */
|
|
- PJ_ASSERT_RETURN(id>=0 && id<(int)PJ_ARRAY_SIZE(pjsua_var.tpdata),
|
|
+ PJ_ASSERT_RETURN(id>=0 && id<(int)PJ_ARRAY_SIZE(pjsua_var.tpdata),
|
|
PJ_EINVAL);
|
|
|
|
/* Make sure that transport exists */
|
|
@@ -2989,7 +2989,7 @@ PJ_DEF(pj_status_t) pjsua_transport_close( pjsua_transport_id id,
|
|
*/
|
|
PJ_LOG(1, (THIS_FILE, "pjsua_transport_close(force=PJ_TRUE) is "
|
|
"deprecated."));
|
|
-
|
|
+
|
|
/* To minimize the effect to users, we shouldn't hard-deprecate this
|
|
* and let it continue as if force is false.
|
|
*/
|
|
@@ -3019,7 +3019,7 @@ PJ_DEF(pj_status_t) pjsua_transport_close( pjsua_transport_id id,
|
|
case PJSIP_TRANSPORT_TLS:
|
|
case PJSIP_TRANSPORT_TCP:
|
|
/* This will close the TCP listener, but existing TCP/TLS
|
|
- * connections (if any) will still linger
|
|
+ * connections (if any) will still linger
|
|
*/
|
|
status = (*pjsua_var.tpdata[id].data.factory->destroy)
|
|
(pjsua_var.tpdata[id].data.factory);
|
|
@@ -3048,31 +3048,31 @@ PJ_DEF(pj_status_t) pjsua_transport_lis_start(pjsua_transport_id id,
|
|
pjsip_transport_type_e tp_type;
|
|
|
|
/* Make sure id is in range. */
|
|
- PJ_ASSERT_RETURN(id>=0 && id<(int)PJ_ARRAY_SIZE(pjsua_var.tpdata),
|
|
+ PJ_ASSERT_RETURN(id>=0 && id<(int)PJ_ARRAY_SIZE(pjsua_var.tpdata),
|
|
PJ_EINVAL);
|
|
|
|
/* Make sure that transport exists */
|
|
PJ_ASSERT_RETURN(pjsua_var.tpdata[id].data.ptr != NULL, PJ_EINVAL);
|
|
|
|
tp_type = pjsua_var.tpdata[id].type & ~PJSIP_TRANSPORT_IPV6;
|
|
-
|
|
+
|
|
if ((tp_type == PJSIP_TRANSPORT_TLS) || (tp_type == PJSIP_TRANSPORT_TCP)) {
|
|
pj_sockaddr bind_addr;
|
|
pjsip_host_port addr_name;
|
|
pjsip_tpfactory *factory = pjsua_var.tpdata[id].data.factory;
|
|
-
|
|
+
|
|
int af = pjsip_transport_type_get_af(factory->type);
|
|
|
|
if (cfg->port)
|
|
pj_sockaddr_init(af, &bind_addr, NULL, (pj_uint16_t)cfg->port);
|
|
|
|
if (cfg->bound_addr.slen) {
|
|
- status = pj_sockaddr_set_str_addr(af,
|
|
+ status = pj_sockaddr_set_str_addr(af,
|
|
&bind_addr,
|
|
&cfg->bound_addr);
|
|
if (status != PJ_SUCCESS) {
|
|
- pjsua_perror(THIS_FILE,
|
|
- "Unable to resolve transport bound address",
|
|
+ pjsua_perror(THIS_FILE,
|
|
+ "Unable to resolve transport bound address",
|
|
status);
|
|
return status;
|
|
}
|
|
@@ -3090,9 +3090,9 @@ PJ_DEF(pj_status_t) pjsua_transport_lis_start(pjsua_transport_id id,
|
|
#if defined(PJSIP_HAS_TLS_TRANSPORT) && PJSIP_HAS_TLS_TRANSPORT!=0
|
|
else {
|
|
status = pjsip_tls_transport_lis_start(factory, &bind_addr,
|
|
- &addr_name);
|
|
+ &addr_name);
|
|
}
|
|
-#endif
|
|
+#endif
|
|
} else if (tp_type == PJSIP_TRANSPORT_UDP) {
|
|
status = PJ_SUCCESS;
|
|
} else {
|
|
@@ -3113,13 +3113,13 @@ void pjsua_process_msg_data(pjsip_tx_data *tdata,
|
|
const pjsip_hdr *hdr;
|
|
|
|
/* Always add User-Agent */
|
|
- if (pjsua_var.ua_cfg.user_agent.slen &&
|
|
- tdata->msg->type == PJSIP_REQUEST_MSG)
|
|
+ if (pjsua_var.ua_cfg.user_agent.slen &&
|
|
+ tdata->msg->type == PJSIP_REQUEST_MSG)
|
|
{
|
|
const pj_str_t STR_USER_AGENT = { "User-Agent", 10 };
|
|
pjsip_hdr *h;
|
|
- h = (pjsip_hdr*)pjsip_generic_string_hdr_create(tdata->pool,
|
|
- &STR_USER_AGENT,
|
|
+ h = (pjsip_hdr*)pjsip_generic_string_hdr_create(tdata->pool,
|
|
+ &STR_USER_AGENT,
|
|
&pjsua_var.ua_cfg.user_agent);
|
|
pjsip_msg_add_hdr(tdata->msg, h);
|
|
}
|
|
@@ -3141,7 +3141,7 @@ void pjsua_process_msg_data(pjsip_tx_data *tdata,
|
|
|
|
if (allow_body && msg_data->content_type.slen && msg_data->msg_body.slen) {
|
|
pjsip_media_type ctype;
|
|
- pjsip_msg_body *body;
|
|
+ pjsip_msg_body *body;
|
|
|
|
pjsua_parse_media_type(tdata->pool, &msg_data->content_type, &ctype);
|
|
body = pjsip_msg_body_create(tdata->pool, &ctype.type, &ctype.subtype,
|
|
@@ -3218,9 +3218,9 @@ void pjsua_parse_media_type( pj_pool_t *pool,
|
|
|
|
pos = pj_strchr(&tmp, '/');
|
|
if (pos) {
|
|
- media_type->type.ptr = tmp.ptr;
|
|
+ media_type->type.ptr = tmp.ptr;
|
|
media_type->type.slen = (pos-tmp.ptr);
|
|
- media_type->subtype.ptr = pos+1;
|
|
+ media_type->subtype.ptr = pos+1;
|
|
media_type->subtype.slen = tmp.ptr+tmp.slen-pos-1;
|
|
} else {
|
|
media_type->type = tmp;
|
|
@@ -3242,7 +3242,7 @@ void pjsua_init_tpselector(pjsua_acc_id acc_id,
|
|
pjsua_transport_data *tpdata;
|
|
unsigned flag;
|
|
|
|
- PJ_ASSERT_RETURN(acc->cfg.transport_id >= 0 &&
|
|
+ PJ_ASSERT_RETURN(acc->cfg.transport_id >= 0 &&
|
|
acc->cfg.transport_id <
|
|
(int)PJ_ARRAY_SIZE(pjsua_var.tpdata), );
|
|
tpdata = &pjsua_var.tpdata[acc->cfg.transport_id];
|
|
@@ -3273,7 +3273,7 @@ PJ_DEF(void) pjsua_ip_change_param_default(pjsua_ip_change_param *param)
|
|
|
|
|
|
/* Callback upon NAT detection completion */
|
|
-static void nat_detect_cb(void *user_data,
|
|
+static void nat_detect_cb(void *user_data,
|
|
const pj_stun_nat_detect_result *res)
|
|
{
|
|
PJ_UNUSED_ARG(user_data);
|
|
@@ -3312,8 +3312,8 @@ PJ_DEF(pj_status_t) pjsua_detect_nat_type()
|
|
return PJNATH_ESTUNINSERVER;
|
|
}
|
|
|
|
- status = pj_stun_detect_nat_type2(&pjsua_var.stun_srv,
|
|
- &pjsua_var.stun_cfg,
|
|
+ status = pj_stun_detect_nat_type2(&pjsua_var.stun_srv,
|
|
+ &pjsua_var.stun_cfg,
|
|
NULL, &nat_detect_cb);
|
|
|
|
if (status != PJ_SUCCESS) {
|
|
@@ -3391,7 +3391,7 @@ PJ_DEF(pj_status_t) pjsua_verify_sip_url(const char *c_url)
|
|
}
|
|
|
|
/*
|
|
- * Schedule a timer entry.
|
|
+ * Schedule a timer entry.
|
|
*/
|
|
#if PJ_TIMER_DEBUG
|
|
PJ_DEF(pj_status_t) pjsua_schedule_timer_dbg( pj_timer_entry *entry,
|
|
@@ -3430,7 +3430,7 @@ static void timer_cb( pj_timer_heap_t *th,
|
|
}
|
|
|
|
/*
|
|
- * Schedule a timer callback.
|
|
+ * Schedule a timer callback.
|
|
*/
|
|
#if PJ_TIMER_DEBUG
|
|
PJ_DEF(pj_status_t) pjsua_schedule_timer2_dbg( void (*cb)(void *user_data),
|
|
@@ -3488,7 +3488,7 @@ PJ_DEF(void) pjsua_cancel_timer(pj_timer_entry *entry)
|
|
pjsip_endpt_cancel_timer(pjsua_var.endpt, entry);
|
|
}
|
|
|
|
-/**
|
|
+/**
|
|
* Normalize route URI (check for ";lr" and append one if it doesn't
|
|
* exist and pjsua_config.force_lr is set.
|
|
*/
|
|
@@ -3507,16 +3507,16 @@ pj_status_t normalize_route_uri(pj_pool_t *pool, pj_str_t *uri)
|
|
|
|
uri_obj = pjsip_parse_uri(tmp_pool, tmp_uri.ptr, tmp_uri.slen, 0);
|
|
if (!uri_obj) {
|
|
- PJ_LOG(1,(THIS_FILE, "Invalid route URI: %.*s",
|
|
+ PJ_LOG(1,(THIS_FILE, "Invalid route URI: %.*s",
|
|
(int)uri->slen, uri->ptr));
|
|
pj_pool_release(tmp_pool);
|
|
return PJSIP_EINVALIDURI;
|
|
}
|
|
|
|
- if (!PJSIP_URI_SCHEME_IS_SIP(uri_obj) &&
|
|
+ if (!PJSIP_URI_SCHEME_IS_SIP(uri_obj) &&
|
|
!PJSIP_URI_SCHEME_IS_SIPS(uri_obj))
|
|
{
|
|
- PJ_LOG(1,(THIS_FILE, "Route URI must be SIP URI: %.*s",
|
|
+ PJ_LOG(1,(THIS_FILE, "Route URI must be SIP URI: %.*s",
|
|
(int)uri->slen, uri->ptr));
|
|
pj_pool_release(tmp_pool);
|
|
return PJSIP_EINVALIDSCHEME;
|
|
@@ -3535,10 +3535,10 @@ pj_status_t normalize_route_uri(pj_pool_t *pool, pj_str_t *uri)
|
|
|
|
/* Print the URI */
|
|
tmp_uri.ptr = (char*) pj_pool_alloc(tmp_pool, PJSIP_MAX_URL_SIZE);
|
|
- tmp_uri.slen = pjsip_uri_print(PJSIP_URI_IN_ROUTING_HDR, uri_obj,
|
|
+ tmp_uri.slen = pjsip_uri_print(PJSIP_URI_IN_ROUTING_HDR, uri_obj,
|
|
tmp_uri.ptr, PJSIP_MAX_URL_SIZE);
|
|
if (tmp_uri.slen < 1) {
|
|
- PJ_LOG(1,(THIS_FILE, "Route URI is too long: %.*s",
|
|
+ PJ_LOG(1,(THIS_FILE, "Route URI is too long: %.*s",
|
|
(int)uri->slen, uri->ptr));
|
|
pj_pool_release(tmp_pool);
|
|
return PJSIP_EURITOOLONG;
|
|
@@ -3623,7 +3623,7 @@ PJ_DEF(void) pjsua_dump(pj_bool_t detail)
|
|
pjsip_tsx_layer_dump(detail);
|
|
pjsip_ua_dump(detail);
|
|
|
|
-// Dumping complete call states may require a 'large' buffer
|
|
+// Dumping complete call states may require a 'large' buffer
|
|
// (about 3KB per call session, including RTCP XR).
|
|
#if 0
|
|
/* Dump all invite sessions: */
|
|
@@ -3638,7 +3638,7 @@ PJ_DEF(void) pjsua_dump(pj_bool_t detail)
|
|
|
|
for (i=0; i<pjsua_var.ua_cfg.max_calls; ++i) {
|
|
if (pjsua_call_is_active(i)) {
|
|
- /* Tricky logging, since call states log string tends to be
|
|
+ /* Tricky logging, since call states log string tends to be
|
|
* longer than PJ_LOG_MAX_SIZE.
|
|
*/
|
|
char buf[1024 * 3];
|
|
@@ -3651,7 +3651,7 @@ PJ_DEF(void) pjsua_dump(pj_bool_t detail)
|
|
call_dump_len = strlen(buf);
|
|
|
|
log_decor = pj_log_get_decor();
|
|
- pj_log_set_decor(log_decor & ~(PJ_LOG_HAS_NEWLINE |
|
|
+ pj_log_set_decor(log_decor & ~(PJ_LOG_HAS_NEWLINE |
|
|
PJ_LOG_HAS_CR));
|
|
PJ_LOG(3,(THIS_FILE, "\n"));
|
|
pj_log_set_decor(0);
|
|
@@ -3869,7 +3869,7 @@ static pj_status_t restart_listener(pjsua_transport_id id,
|
|
|
|
switch (tp_info.type) {
|
|
case PJSIP_TRANSPORT_UDP:
|
|
- case PJSIP_TRANSPORT_UDP6:
|
|
+ case PJSIP_TRANSPORT_UDP6:
|
|
{
|
|
unsigned num_locks = 0;
|
|
|