2012-10-25 19:29:58 +00:00
|
|
|
/* GStreamer
|
|
|
|
* Copyright (C) 2008 Wim Taymans <wim.taymans at gmail.com>
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Library General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Library General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Library General Public
|
|
|
|
* License along with this library; if not, write to the
|
2012-11-04 00:14:25 +00:00
|
|
|
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
|
|
|
|
* Boston, MA 02110-1301, USA.
|
2012-10-25 19:29:58 +00:00
|
|
|
*/
|
2013-07-11 14:57:14 +00:00
|
|
|
/**
|
|
|
|
* SECTION:rtsp-stream-transport
|
|
|
|
* @short_description: A media stream transport configuration
|
2013-07-16 10:32:51 +00:00
|
|
|
* @see_also: #GstRTSPStream, #GstRTSPSessionMedia
|
2013-07-11 14:57:14 +00:00
|
|
|
*
|
2013-07-16 10:32:51 +00:00
|
|
|
* The #GstRTSPStreamTransport configures the transport used by a
|
|
|
|
* #GstRTSPStream. It is usually manages by a #GstRTSPSessionMedia object.
|
|
|
|
*
|
|
|
|
* With gst_rtsp_stream_transport_set_callbacks(), callbacks can be configured
|
|
|
|
* to handle the RTP and RTCP packets from the stream, for example when they
|
|
|
|
* need to be sent over TCP.
|
|
|
|
*
|
2019-11-01 11:01:41 +00:00
|
|
|
* With gst_rtsp_stream_transport_set_active() the transports are added and
|
2013-07-16 10:32:51 +00:00
|
|
|
* removed from the stream.
|
|
|
|
*
|
|
|
|
* A #GstRTSPStream will call gst_rtsp_stream_transport_keep_alive() when RTCP
|
|
|
|
* is received from the client. It will also call
|
|
|
|
* gst_rtsp_stream_transport_set_timed_out() when a receiver has timed out.
|
|
|
|
*
|
Limit queued TCP data messages to one per stream
Before, the watch backlog size in GstRTSPClient was changed
dynamically between unlimited and a fixed size, trying to avoid both
unlimited memory usage and deadlocks while waiting for place in the
queue. (Some of the deadlocks were described in a long comment in
handle_request().)
In the previous commit, we changed to a fixed backlog size of 100.
This is possible, because we now handle RTP/RTCP data messages differently
from RTSP request/response messages.
The data messages are messages tunneled over TCP. We allow at most one
queued data message per stream in GstRTSPClient at a time, and
successfully sent data messages are acked by sending a "message-sent"
callback from the GstStreamTransport. Until that ack comes, the
GstRTSPStream does not call pull_sample() on its appsink, and
therefore the streaming thread in the pipeline will not be blocked
inside GstRTSPClient, waiting for a place in the queue.
pull_sample() is called when we have both an ack and a "new-sample"
signal from the appsink. Then, we know there is a buffer to write.
RTSP request/response messages are not acked in the same way as data
messages. The rest of the 100 places in the queue are used for
them. If the queue becomes full of request/response messages, we
return an error and close the connection to the client.
Change-Id: I275310bc90a219ceb2473c098261acc78be84c97
2018-06-28 09:22:21 +00:00
|
|
|
* A #GstRTSPClient will call gst_rtsp_stream_transport_message_sent() when it
|
|
|
|
* has sent a data message for the transport.
|
|
|
|
*
|
2013-07-16 10:32:51 +00:00
|
|
|
* Last reviewed on 2013-07-16 (1.0.0)
|
2013-07-11 14:57:14 +00:00
|
|
|
*/
|
2018-09-24 08:36:21 +00:00
|
|
|
#ifdef HAVE_CONFIG_H
|
|
|
|
#include "config.h"
|
|
|
|
#endif
|
2012-10-25 19:29:58 +00:00
|
|
|
|
|
|
|
#include <string.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
|
|
|
#include "rtsp-stream-transport.h"
|
2019-10-15 17:08:32 +00:00
|
|
|
#include "rtsp-server-internal.h"
|
2012-10-25 19:29:58 +00:00
|
|
|
|
2012-11-29 10:11:05 +00:00
|
|
|
struct _GstRTSPStreamTransportPrivate
|
|
|
|
{
|
|
|
|
GstRTSPStream *stream;
|
|
|
|
|
|
|
|
GstRTSPSendFunc send_rtp;
|
|
|
|
GstRTSPSendFunc send_rtcp;
|
|
|
|
gpointer user_data;
|
|
|
|
GDestroyNotify notify;
|
|
|
|
|
2018-06-27 10:17:07 +00:00
|
|
|
GstRTSPSendListFunc send_rtp_list;
|
|
|
|
GstRTSPSendListFunc send_rtcp_list;
|
|
|
|
gpointer list_user_data;
|
|
|
|
GDestroyNotify list_notify;
|
|
|
|
|
2019-10-15 17:08:32 +00:00
|
|
|
GstRTSPBackPressureFunc back_pressure_func;
|
|
|
|
gpointer back_pressure_func_data;
|
|
|
|
GDestroyNotify back_pressure_func_notify;
|
|
|
|
|
2012-11-29 10:11:05 +00:00
|
|
|
GstRTSPKeepAliveFunc keep_alive;
|
|
|
|
gpointer ka_user_data;
|
|
|
|
GDestroyNotify ka_notify;
|
|
|
|
gboolean timed_out;
|
|
|
|
|
Limit queued TCP data messages to one per stream
Before, the watch backlog size in GstRTSPClient was changed
dynamically between unlimited and a fixed size, trying to avoid both
unlimited memory usage and deadlocks while waiting for place in the
queue. (Some of the deadlocks were described in a long comment in
handle_request().)
In the previous commit, we changed to a fixed backlog size of 100.
This is possible, because we now handle RTP/RTCP data messages differently
from RTSP request/response messages.
The data messages are messages tunneled over TCP. We allow at most one
queued data message per stream in GstRTSPClient at a time, and
successfully sent data messages are acked by sending a "message-sent"
callback from the GstStreamTransport. Until that ack comes, the
GstRTSPStream does not call pull_sample() on its appsink, and
therefore the streaming thread in the pipeline will not be blocked
inside GstRTSPClient, waiting for a place in the queue.
pull_sample() is called when we have both an ack and a "new-sample"
signal from the appsink. Then, we know there is a buffer to write.
RTSP request/response messages are not acked in the same way as data
messages. The rest of the 100 places in the queue are used for
them. If the queue becomes full of request/response messages, we
return an error and close the connection to the client.
Change-Id: I275310bc90a219ceb2473c098261acc78be84c97
2018-06-28 09:22:21 +00:00
|
|
|
GstRTSPMessageSentFunc message_sent;
|
|
|
|
gpointer ms_user_data;
|
|
|
|
GDestroyNotify ms_notify;
|
|
|
|
|
2020-05-01 07:42:17 +00:00
|
|
|
GstRTSPMessageSentFuncFull message_sent_full;
|
|
|
|
gpointer msf_user_data;
|
|
|
|
GDestroyNotify msf_notify;
|
|
|
|
|
2012-11-29 10:11:05 +00:00
|
|
|
GstRTSPTransport *transport;
|
2013-11-28 16:35:45 +00:00
|
|
|
GstRTSPUrl *url;
|
2012-11-29 10:11:05 +00:00
|
|
|
|
|
|
|
GObject *rtpsource;
|
2019-10-15 17:08:32 +00:00
|
|
|
|
|
|
|
/* TCP backlog */
|
|
|
|
GstClockTime first_rtp_timestamp;
|
2024-04-18 14:07:25 +00:00
|
|
|
GstVecDeque *items;
|
2020-02-05 19:28:19 +00:00
|
|
|
GRecMutex backlog_lock;
|
2012-11-29 10:11:05 +00:00
|
|
|
};
|
|
|
|
|
2019-10-15 17:08:32 +00:00
|
|
|
#define MAX_BACKLOG_DURATION (10 * GST_SECOND)
|
|
|
|
#define MAX_BACKLOG_SIZE 100
|
|
|
|
|
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
GstBuffer *buffer;
|
|
|
|
GstBufferList *buffer_list;
|
|
|
|
gboolean is_rtp;
|
|
|
|
} BackLogItem;
|
|
|
|
|
|
|
|
|
2012-10-25 19:29:58 +00:00
|
|
|
enum
|
|
|
|
{
|
|
|
|
PROP_0,
|
|
|
|
PROP_LAST
|
|
|
|
};
|
|
|
|
|
|
|
|
GST_DEBUG_CATEGORY_STATIC (rtsp_stream_transport_debug);
|
|
|
|
#define GST_CAT_DEFAULT rtsp_stream_transport_debug
|
|
|
|
|
|
|
|
static void gst_rtsp_stream_transport_finalize (GObject * obj);
|
|
|
|
|
2018-06-24 10:44:26 +00:00
|
|
|
G_DEFINE_TYPE_WITH_PRIVATE (GstRTSPStreamTransport, gst_rtsp_stream_transport,
|
2012-10-25 19:29:58 +00:00
|
|
|
G_TYPE_OBJECT);
|
|
|
|
|
|
|
|
static void
|
|
|
|
gst_rtsp_stream_transport_class_init (GstRTSPStreamTransportClass * klass)
|
|
|
|
{
|
|
|
|
GObjectClass *gobject_class;
|
|
|
|
|
|
|
|
gobject_class = G_OBJECT_CLASS (klass);
|
|
|
|
|
|
|
|
gobject_class->finalize = gst_rtsp_stream_transport_finalize;
|
|
|
|
|
|
|
|
GST_DEBUG_CATEGORY_INIT (rtsp_stream_transport_debug, "rtspmediatransport",
|
|
|
|
0, "GstRTSPStreamTransport");
|
|
|
|
}
|
|
|
|
|
2019-10-15 17:08:32 +00:00
|
|
|
static void
|
|
|
|
clear_backlog_item (BackLogItem * item)
|
|
|
|
{
|
|
|
|
gst_clear_buffer (&item->buffer);
|
|
|
|
gst_clear_buffer_list (&item->buffer_list);
|
|
|
|
}
|
|
|
|
|
2012-10-25 19:29:58 +00:00
|
|
|
static void
|
|
|
|
gst_rtsp_stream_transport_init (GstRTSPStreamTransport * trans)
|
|
|
|
{
|
2018-06-24 10:44:26 +00:00
|
|
|
trans->priv = gst_rtsp_stream_transport_get_instance_private (trans);
|
2024-04-18 14:07:25 +00:00
|
|
|
trans->priv->items = gst_vec_deque_new_for_struct (sizeof (BackLogItem), 0);
|
2019-10-15 17:08:32 +00:00
|
|
|
trans->priv->first_rtp_timestamp = GST_CLOCK_TIME_NONE;
|
2024-04-18 14:07:25 +00:00
|
|
|
gst_vec_deque_set_clear_func (trans->priv->items,
|
2019-10-15 17:08:32 +00:00
|
|
|
(GDestroyNotify) clear_backlog_item);
|
2020-02-05 19:28:19 +00:00
|
|
|
g_rec_mutex_init (&trans->priv->backlog_lock);
|
2012-10-25 19:29:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gst_rtsp_stream_transport_finalize (GObject * obj)
|
|
|
|
{
|
2012-11-29 10:11:05 +00:00
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
2012-10-25 19:29:58 +00:00
|
|
|
GstRTSPStreamTransport *trans;
|
|
|
|
|
|
|
|
trans = GST_RTSP_STREAM_TRANSPORT (obj);
|
2012-11-29 10:11:05 +00:00
|
|
|
priv = trans->priv;
|
2012-10-25 19:29:58 +00:00
|
|
|
|
|
|
|
/* remove callbacks now */
|
|
|
|
gst_rtsp_stream_transport_set_callbacks (trans, NULL, NULL, NULL, NULL);
|
|
|
|
gst_rtsp_stream_transport_set_keepalive (trans, NULL, NULL, NULL);
|
Limit queued TCP data messages to one per stream
Before, the watch backlog size in GstRTSPClient was changed
dynamically between unlimited and a fixed size, trying to avoid both
unlimited memory usage and deadlocks while waiting for place in the
queue. (Some of the deadlocks were described in a long comment in
handle_request().)
In the previous commit, we changed to a fixed backlog size of 100.
This is possible, because we now handle RTP/RTCP data messages differently
from RTSP request/response messages.
The data messages are messages tunneled over TCP. We allow at most one
queued data message per stream in GstRTSPClient at a time, and
successfully sent data messages are acked by sending a "message-sent"
callback from the GstStreamTransport. Until that ack comes, the
GstRTSPStream does not call pull_sample() on its appsink, and
therefore the streaming thread in the pipeline will not be blocked
inside GstRTSPClient, waiting for a place in the queue.
pull_sample() is called when we have both an ack and a "new-sample"
signal from the appsink. Then, we know there is a buffer to write.
RTSP request/response messages are not acked in the same way as data
messages. The rest of the 100 places in the queue are used for
them. If the queue becomes full of request/response messages, we
return an error and close the connection to the client.
Change-Id: I275310bc90a219ceb2473c098261acc78be84c97
2018-06-28 09:22:21 +00:00
|
|
|
gst_rtsp_stream_transport_set_message_sent (trans, NULL, NULL, NULL);
|
2012-10-25 19:29:58 +00:00
|
|
|
|
2014-11-28 11:45:14 +00:00
|
|
|
if (priv->stream)
|
|
|
|
g_object_unref (priv->stream);
|
|
|
|
|
2012-11-29 10:11:05 +00:00
|
|
|
if (priv->transport)
|
|
|
|
gst_rtsp_transport_free (priv->transport);
|
2012-10-25 19:29:58 +00:00
|
|
|
|
2013-11-29 14:50:52 +00:00
|
|
|
if (priv->url)
|
|
|
|
gst_rtsp_url_free (priv->url);
|
|
|
|
|
2024-04-18 14:07:25 +00:00
|
|
|
gst_vec_deque_free (priv->items);
|
2019-10-15 17:08:32 +00:00
|
|
|
|
2020-02-05 19:28:19 +00:00
|
|
|
g_rec_mutex_clear (&priv->backlog_lock);
|
|
|
|
|
2012-10-25 19:29:58 +00:00
|
|
|
G_OBJECT_CLASS (gst_rtsp_stream_transport_parent_class)->finalize (obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_new:
|
|
|
|
* @stream: a #GstRTSPStream
|
2012-10-27 22:23:57 +00:00
|
|
|
* @tr: (transfer full): a GstRTSPTransport
|
2012-10-25 19:29:58 +00:00
|
|
|
*
|
2012-10-27 21:49:24 +00:00
|
|
|
* Create a new #GstRTSPStreamTransport that can be used to manage
|
2012-10-27 22:23:57 +00:00
|
|
|
* @stream with transport @tr.
|
2012-10-25 19:29:58 +00:00
|
|
|
*
|
2014-03-23 20:24:48 +00:00
|
|
|
* Returns: (transfer full): a new #GstRTSPStreamTransport
|
2012-10-25 19:29:58 +00:00
|
|
|
*/
|
|
|
|
GstRTSPStreamTransport *
|
2012-11-15 15:18:29 +00:00
|
|
|
gst_rtsp_stream_transport_new (GstRTSPStream * stream, GstRTSPTransport * tr)
|
2012-10-25 19:29:58 +00:00
|
|
|
{
|
2012-11-29 10:11:05 +00:00
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
2012-10-25 19:29:58 +00:00
|
|
|
GstRTSPStreamTransport *trans;
|
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_RTSP_STREAM (stream), NULL);
|
2012-10-27 22:23:57 +00:00
|
|
|
g_return_val_if_fail (tr != NULL, NULL);
|
2012-10-25 19:29:58 +00:00
|
|
|
|
|
|
|
trans = g_object_new (GST_TYPE_RTSP_STREAM_TRANSPORT, NULL);
|
2012-11-29 10:11:05 +00:00
|
|
|
priv = trans->priv;
|
|
|
|
priv->stream = stream;
|
2014-11-28 11:45:14 +00:00
|
|
|
priv->stream = g_object_ref (priv->stream);
|
2012-11-29 10:11:05 +00:00
|
|
|
priv->transport = tr;
|
2012-10-25 19:29:58 +00:00
|
|
|
|
|
|
|
return trans;
|
|
|
|
}
|
|
|
|
|
2012-11-29 10:11:05 +00:00
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_get_stream:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
|
|
|
*
|
|
|
|
* Get the #GstRTSPStream used when constructing @trans.
|
|
|
|
*
|
2018-02-09 23:07:25 +00:00
|
|
|
* Returns: (transfer none) (nullable): the stream used when constructing @trans.
|
2012-11-29 10:11:05 +00:00
|
|
|
*/
|
|
|
|
GstRTSPStream *
|
|
|
|
gst_rtsp_stream_transport_get_stream (GstRTSPStreamTransport * trans)
|
|
|
|
{
|
|
|
|
g_return_val_if_fail (GST_IS_RTSP_STREAM_TRANSPORT (trans), NULL);
|
|
|
|
|
|
|
|
return trans->priv->stream;
|
|
|
|
}
|
|
|
|
|
2012-10-25 19:29:58 +00:00
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_set_callbacks:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
|
|
|
* @send_rtp: (scope notified): a callback called when RTP should be sent
|
|
|
|
* @send_rtcp: (scope notified): a callback called when RTCP should be sent
|
2014-03-23 20:24:48 +00:00
|
|
|
* @user_data: (closure): user data passed to callbacks
|
|
|
|
* @notify: (allow-none): called with the user_data when no longer needed.
|
2012-10-25 19:29:58 +00:00
|
|
|
*
|
|
|
|
* Install callbacks that will be called when data for a stream should be sent
|
|
|
|
* to a client. This is usually used when sending RTP/RTCP over TCP.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_rtsp_stream_transport_set_callbacks (GstRTSPStreamTransport * trans,
|
|
|
|
GstRTSPSendFunc send_rtp, GstRTSPSendFunc send_rtcp,
|
|
|
|
gpointer user_data, GDestroyNotify notify)
|
|
|
|
{
|
2012-11-29 10:11:05 +00:00
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
|
|
|
|
g_return_if_fail (GST_IS_RTSP_STREAM_TRANSPORT (trans));
|
|
|
|
|
|
|
|
priv = trans->priv;
|
|
|
|
|
|
|
|
priv->send_rtp = send_rtp;
|
|
|
|
priv->send_rtcp = send_rtcp;
|
|
|
|
if (priv->notify)
|
|
|
|
priv->notify (priv->user_data);
|
|
|
|
priv->user_data = user_data;
|
|
|
|
priv->notify = notify;
|
2012-10-25 19:29:58 +00:00
|
|
|
}
|
|
|
|
|
2018-06-27 10:17:07 +00:00
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_set_list_callbacks:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
|
|
|
* @send_rtp_list: (scope notified): a callback called when RTP should be sent
|
|
|
|
* @send_rtcp_list: (scope notified): a callback called when RTCP should be sent
|
|
|
|
* @user_data: (closure): user data passed to callbacks
|
|
|
|
* @notify: (allow-none): called with the user_data when no longer needed.
|
|
|
|
*
|
|
|
|
* Install callbacks that will be called when data for a stream should be sent
|
|
|
|
* to a client. This is usually used when sending RTP/RTCP over TCP.
|
|
|
|
*
|
|
|
|
* Since: 1.16
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_rtsp_stream_transport_set_list_callbacks (GstRTSPStreamTransport * trans,
|
|
|
|
GstRTSPSendListFunc send_rtp_list, GstRTSPSendListFunc send_rtcp_list,
|
|
|
|
gpointer user_data, GDestroyNotify notify)
|
|
|
|
{
|
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
|
|
|
|
g_return_if_fail (GST_IS_RTSP_STREAM_TRANSPORT (trans));
|
|
|
|
|
|
|
|
priv = trans->priv;
|
|
|
|
|
|
|
|
priv->send_rtp_list = send_rtp_list;
|
|
|
|
priv->send_rtcp_list = send_rtcp_list;
|
|
|
|
if (priv->list_notify)
|
|
|
|
priv->list_notify (priv->list_user_data);
|
|
|
|
priv->list_user_data = user_data;
|
|
|
|
priv->list_notify = notify;
|
|
|
|
}
|
|
|
|
|
2019-10-15 17:08:32 +00:00
|
|
|
void
|
|
|
|
gst_rtsp_stream_transport_set_back_pressure_callback (GstRTSPStreamTransport *
|
|
|
|
trans, GstRTSPBackPressureFunc back_pressure_func, gpointer user_data,
|
|
|
|
GDestroyNotify notify)
|
|
|
|
{
|
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
|
|
|
|
g_return_if_fail (GST_IS_RTSP_STREAM_TRANSPORT (trans));
|
|
|
|
|
|
|
|
priv = trans->priv;
|
|
|
|
|
|
|
|
priv->back_pressure_func = back_pressure_func;
|
|
|
|
if (priv->back_pressure_func_notify)
|
|
|
|
priv->back_pressure_func_notify (priv->back_pressure_func_data);
|
|
|
|
priv->back_pressure_func_data = user_data;
|
|
|
|
priv->back_pressure_func_notify = notify;
|
|
|
|
}
|
|
|
|
|
|
|
|
gboolean
|
|
|
|
gst_rtsp_stream_transport_check_back_pressure (GstRTSPStreamTransport * trans,
|
2020-01-09 13:10:44 +00:00
|
|
|
gboolean is_rtp)
|
2019-10-15 17:08:32 +00:00
|
|
|
{
|
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
gboolean ret = FALSE;
|
2020-01-09 13:10:44 +00:00
|
|
|
guint8 channel;
|
2019-10-15 17:08:32 +00:00
|
|
|
|
|
|
|
priv = trans->priv;
|
|
|
|
|
2020-01-09 13:10:44 +00:00
|
|
|
if (is_rtp)
|
|
|
|
channel = priv->transport->interleaved.min;
|
|
|
|
else
|
|
|
|
channel = priv->transport->interleaved.max;
|
|
|
|
|
2019-10-15 17:08:32 +00:00
|
|
|
if (priv->back_pressure_func)
|
|
|
|
ret = priv->back_pressure_func (channel, priv->back_pressure_func_data);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-10-25 19:29:58 +00:00
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_set_keepalive:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
2014-03-23 20:24:48 +00:00
|
|
|
* @keep_alive: (scope notified): a callback called when the receiver is active
|
|
|
|
* @user_data: (closure): user data passed to callback
|
|
|
|
* @notify: (allow-none): called with the user_data when no longer needed.
|
2012-10-25 19:29:58 +00:00
|
|
|
*
|
|
|
|
* Install callbacks that will be called when RTCP packets are received from the
|
|
|
|
* receiver of @trans.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_rtsp_stream_transport_set_keepalive (GstRTSPStreamTransport * trans,
|
|
|
|
GstRTSPKeepAliveFunc keep_alive, gpointer user_data, GDestroyNotify notify)
|
|
|
|
{
|
2012-11-29 10:11:05 +00:00
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
|
|
|
|
g_return_if_fail (GST_IS_RTSP_STREAM_TRANSPORT (trans));
|
|
|
|
|
|
|
|
priv = trans->priv;
|
|
|
|
|
|
|
|
priv->keep_alive = keep_alive;
|
|
|
|
if (priv->ka_notify)
|
|
|
|
priv->ka_notify (priv->ka_user_data);
|
|
|
|
priv->ka_user_data = user_data;
|
|
|
|
priv->ka_notify = notify;
|
2012-10-25 19:29:58 +00:00
|
|
|
}
|
|
|
|
|
Limit queued TCP data messages to one per stream
Before, the watch backlog size in GstRTSPClient was changed
dynamically between unlimited and a fixed size, trying to avoid both
unlimited memory usage and deadlocks while waiting for place in the
queue. (Some of the deadlocks were described in a long comment in
handle_request().)
In the previous commit, we changed to a fixed backlog size of 100.
This is possible, because we now handle RTP/RTCP data messages differently
from RTSP request/response messages.
The data messages are messages tunneled over TCP. We allow at most one
queued data message per stream in GstRTSPClient at a time, and
successfully sent data messages are acked by sending a "message-sent"
callback from the GstStreamTransport. Until that ack comes, the
GstRTSPStream does not call pull_sample() on its appsink, and
therefore the streaming thread in the pipeline will not be blocked
inside GstRTSPClient, waiting for a place in the queue.
pull_sample() is called when we have both an ack and a "new-sample"
signal from the appsink. Then, we know there is a buffer to write.
RTSP request/response messages are not acked in the same way as data
messages. The rest of the 100 places in the queue are used for
them. If the queue becomes full of request/response messages, we
return an error and close the connection to the client.
Change-Id: I275310bc90a219ceb2473c098261acc78be84c97
2018-06-28 09:22:21 +00:00
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_set_message_sent:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
|
|
|
* @message_sent: (scope notified): a callback called when a message has been sent
|
|
|
|
* @user_data: (closure): user data passed to callback
|
|
|
|
* @notify: (allow-none): called with the user_data when no longer needed
|
|
|
|
*
|
|
|
|
* Install a callback that will be called when a message has been sent on @trans.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_rtsp_stream_transport_set_message_sent (GstRTSPStreamTransport * trans,
|
|
|
|
GstRTSPMessageSentFunc message_sent, gpointer user_data,
|
|
|
|
GDestroyNotify notify)
|
|
|
|
{
|
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
|
|
|
|
g_return_if_fail (GST_IS_RTSP_STREAM_TRANSPORT (trans));
|
|
|
|
|
|
|
|
priv = trans->priv;
|
|
|
|
|
|
|
|
priv->message_sent = message_sent;
|
|
|
|
if (priv->ms_notify)
|
|
|
|
priv->ms_notify (priv->ms_user_data);
|
|
|
|
priv->ms_user_data = user_data;
|
|
|
|
priv->ms_notify = notify;
|
|
|
|
}
|
|
|
|
|
2020-05-01 07:42:17 +00:00
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_set_message_sent_full:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
|
|
|
* @message_sent: (scope notified): a callback called when a message has been sent
|
|
|
|
* @user_data: (closure): user data passed to callback
|
|
|
|
* @notify: (allow-none): called with the user_data when no longer needed
|
|
|
|
*
|
|
|
|
* Install a callback that will be called when a message has been sent on @trans.
|
|
|
|
*
|
|
|
|
* Since: 1.18
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_rtsp_stream_transport_set_message_sent_full (GstRTSPStreamTransport * trans,
|
|
|
|
GstRTSPMessageSentFuncFull message_sent, gpointer user_data,
|
|
|
|
GDestroyNotify notify)
|
|
|
|
{
|
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
|
|
|
|
g_return_if_fail (GST_IS_RTSP_STREAM_TRANSPORT (trans));
|
|
|
|
|
|
|
|
priv = trans->priv;
|
|
|
|
|
|
|
|
priv->message_sent_full = message_sent;
|
|
|
|
if (priv->msf_notify)
|
|
|
|
priv->msf_notify (priv->msf_user_data);
|
|
|
|
priv->msf_user_data = user_data;
|
|
|
|
priv->msf_notify = notify;
|
|
|
|
}
|
2012-10-25 19:29:58 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_set_transport:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
2012-10-27 22:23:57 +00:00
|
|
|
* @tr: (transfer full): a client #GstRTSPTransport
|
2012-10-25 19:29:58 +00:00
|
|
|
*
|
2012-11-15 15:18:29 +00:00
|
|
|
* Set @tr as the client transport. This function takes ownership of the
|
|
|
|
* passed @tr.
|
2012-10-25 19:29:58 +00:00
|
|
|
*/
|
2012-10-27 21:49:24 +00:00
|
|
|
void
|
2012-10-25 19:29:58 +00:00
|
|
|
gst_rtsp_stream_transport_set_transport (GstRTSPStreamTransport * trans,
|
2012-11-15 15:18:29 +00:00
|
|
|
GstRTSPTransport * tr)
|
2012-10-25 19:29:58 +00:00
|
|
|
{
|
2012-11-29 10:11:05 +00:00
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
|
2012-10-27 21:49:24 +00:00
|
|
|
g_return_if_fail (GST_IS_RTSP_STREAM_TRANSPORT (trans));
|
2012-10-27 22:23:57 +00:00
|
|
|
g_return_if_fail (tr != NULL);
|
2012-10-25 19:29:58 +00:00
|
|
|
|
2012-11-29 10:11:05 +00:00
|
|
|
priv = trans->priv;
|
|
|
|
|
2012-10-25 19:29:58 +00:00
|
|
|
/* keep track of the transports in the stream. */
|
2012-11-29 10:11:05 +00:00
|
|
|
if (priv->transport)
|
|
|
|
gst_rtsp_transport_free (priv->transport);
|
|
|
|
priv->transport = tr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_get_transport:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
|
|
|
*
|
|
|
|
* Get the transport configured in @trans.
|
|
|
|
*
|
2018-02-09 23:07:25 +00:00
|
|
|
* Returns: (transfer none) (nullable): the transport configured in @trans. It remains
|
2014-03-23 20:24:48 +00:00
|
|
|
* valid for as long as @trans is valid.
|
2012-11-29 10:11:05 +00:00
|
|
|
*/
|
|
|
|
const GstRTSPTransport *
|
|
|
|
gst_rtsp_stream_transport_get_transport (GstRTSPStreamTransport * trans)
|
|
|
|
{
|
|
|
|
g_return_val_if_fail (GST_IS_RTSP_STREAM_TRANSPORT (trans), NULL);
|
|
|
|
|
|
|
|
return trans->priv->transport;
|
|
|
|
}
|
|
|
|
|
2013-11-28 16:35:45 +00:00
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_set_url:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
2018-02-09 23:07:25 +00:00
|
|
|
* @url: (transfer none) (nullable): a client #GstRTSPUrl
|
2013-11-28 16:35:45 +00:00
|
|
|
*
|
|
|
|
* Set @url as the client url.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_rtsp_stream_transport_set_url (GstRTSPStreamTransport * trans,
|
|
|
|
const GstRTSPUrl * url)
|
|
|
|
{
|
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
|
|
|
|
g_return_if_fail (GST_IS_RTSP_STREAM_TRANSPORT (trans));
|
|
|
|
|
|
|
|
priv = trans->priv;
|
|
|
|
|
|
|
|
/* keep track of the transports in the stream. */
|
|
|
|
if (priv->url)
|
|
|
|
gst_rtsp_url_free (priv->url);
|
|
|
|
priv->url = (url ? gst_rtsp_url_copy (url) : NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_get_url:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
|
|
|
*
|
|
|
|
* Get the url configured in @trans.
|
|
|
|
*
|
2018-02-09 23:07:25 +00:00
|
|
|
* Returns: (transfer none) (nullable): the url configured in @trans.
|
|
|
|
* It remains valid for as long as @trans is valid.
|
2013-11-28 16:35:45 +00:00
|
|
|
*/
|
|
|
|
const GstRTSPUrl *
|
|
|
|
gst_rtsp_stream_transport_get_url (GstRTSPStreamTransport * trans)
|
|
|
|
{
|
|
|
|
g_return_val_if_fail (GST_IS_RTSP_STREAM_TRANSPORT (trans), NULL);
|
|
|
|
|
|
|
|
return trans->priv->url;
|
|
|
|
}
|
|
|
|
|
2013-12-26 14:41:14 +00:00
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_get_rtpinfo:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
2013-12-26 15:28:59 +00:00
|
|
|
* @start_time: a star time
|
2013-12-26 14:41:14 +00:00
|
|
|
*
|
2014-02-07 15:39:49 +00:00
|
|
|
* Get the RTP-Info string for @trans and @start_time.
|
2013-12-26 14:41:14 +00:00
|
|
|
*
|
2014-06-11 23:38:36 +00:00
|
|
|
* Returns: (transfer full) (nullable): the RTPInfo string for @trans
|
|
|
|
* and @start_time or %NULL when the RTP-Info could not be
|
|
|
|
* determined. g_free() after usage.
|
2013-12-26 14:41:14 +00:00
|
|
|
*/
|
|
|
|
gchar *
|
2013-12-26 15:28:59 +00:00
|
|
|
gst_rtsp_stream_transport_get_rtpinfo (GstRTSPStreamTransport * trans,
|
|
|
|
GstClockTime start_time)
|
2013-12-26 14:41:14 +00:00
|
|
|
{
|
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
gchar *url_str;
|
|
|
|
GString *rtpinfo;
|
2013-12-26 16:02:50 +00:00
|
|
|
guint rtptime, seq, clock_rate;
|
|
|
|
GstClockTime running_time = GST_CLOCK_TIME_NONE;
|
2013-12-26 14:41:14 +00:00
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_RTSP_STREAM_TRANSPORT (trans), NULL);
|
|
|
|
|
|
|
|
priv = trans->priv;
|
|
|
|
|
2018-06-05 06:44:44 +00:00
|
|
|
if (!gst_rtsp_stream_is_sender (priv->stream))
|
|
|
|
return NULL;
|
2013-12-26 16:02:50 +00:00
|
|
|
if (!gst_rtsp_stream_get_rtpinfo (priv->stream, &rtptime, &seq, &clock_rate,
|
|
|
|
&running_time))
|
2013-12-26 14:41:14 +00:00
|
|
|
return NULL;
|
|
|
|
|
2013-12-26 16:02:50 +00:00
|
|
|
GST_DEBUG ("RTP time %u, seq %u, rate %u, running-time %" GST_TIME_FORMAT,
|
|
|
|
rtptime, seq, clock_rate, GST_TIME_ARGS (running_time));
|
|
|
|
|
|
|
|
if (GST_CLOCK_TIME_IS_VALID (running_time)
|
|
|
|
&& GST_CLOCK_TIME_IS_VALID (start_time)) {
|
|
|
|
if (running_time > start_time) {
|
|
|
|
rtptime -=
|
|
|
|
gst_util_uint64_scale_int (running_time - start_time, clock_rate,
|
|
|
|
GST_SECOND);
|
|
|
|
} else {
|
|
|
|
rtptime +=
|
|
|
|
gst_util_uint64_scale_int (start_time - running_time, clock_rate,
|
|
|
|
GST_SECOND);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
GST_DEBUG ("RTP time %u, for start-time %" GST_TIME_FORMAT,
|
|
|
|
rtptime, GST_TIME_ARGS (start_time));
|
|
|
|
|
2013-12-26 14:41:14 +00:00
|
|
|
rtpinfo = g_string_new ("");
|
|
|
|
|
|
|
|
url_str = gst_rtsp_url_get_request_uri (trans->priv->url);
|
|
|
|
g_string_append_printf (rtpinfo, "url=%s;seq=%u;rtptime=%u",
|
|
|
|
url_str, seq, rtptime);
|
|
|
|
g_free (url_str);
|
|
|
|
|
|
|
|
return g_string_free (rtpinfo, FALSE);
|
|
|
|
}
|
|
|
|
|
2012-11-29 10:11:05 +00:00
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_set_active:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
|
|
|
* @active: new state of @trans
|
|
|
|
*
|
|
|
|
* Activate or deactivate datatransfer configured in @trans.
|
|
|
|
*
|
|
|
|
* Returns: %TRUE when the state was changed.
|
|
|
|
*/
|
|
|
|
gboolean
|
|
|
|
gst_rtsp_stream_transport_set_active (GstRTSPStreamTransport * trans,
|
|
|
|
gboolean active)
|
|
|
|
{
|
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
gboolean res;
|
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_RTSP_STREAM_TRANSPORT (trans), FALSE);
|
|
|
|
|
|
|
|
priv = trans->priv;
|
|
|
|
|
|
|
|
if (active)
|
|
|
|
res = gst_rtsp_stream_add_transport (priv->stream, trans);
|
|
|
|
else
|
|
|
|
res = gst_rtsp_stream_remove_transport (priv->stream, trans);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_set_timed_out:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
|
|
|
* @timedout: timed out value
|
|
|
|
*
|
|
|
|
* Set the timed out state of @trans to @timedout
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_rtsp_stream_transport_set_timed_out (GstRTSPStreamTransport * trans,
|
|
|
|
gboolean timedout)
|
|
|
|
{
|
|
|
|
g_return_if_fail (GST_IS_RTSP_STREAM_TRANSPORT (trans));
|
|
|
|
|
|
|
|
trans->priv->timed_out = timedout;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_is_timed_out:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
|
|
|
*
|
|
|
|
* Check if @trans is timed out.
|
|
|
|
*
|
|
|
|
* Returns: %TRUE if @trans timed out.
|
|
|
|
*/
|
|
|
|
gboolean
|
|
|
|
gst_rtsp_stream_transport_is_timed_out (GstRTSPStreamTransport * trans)
|
|
|
|
{
|
|
|
|
g_return_val_if_fail (GST_IS_RTSP_STREAM_TRANSPORT (trans), FALSE);
|
|
|
|
|
|
|
|
return trans->priv->timed_out;
|
2012-10-25 19:29:58 +00:00
|
|
|
}
|
2012-11-12 16:06:42 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_send_rtp:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
2014-03-23 20:24:48 +00:00
|
|
|
* @buffer: (transfer none): a #GstBuffer
|
2012-11-12 16:06:42 +00:00
|
|
|
*
|
|
|
|
* Send @buffer to the installed RTP callback for @trans.
|
|
|
|
*
|
|
|
|
* Returns: %TRUE on success
|
|
|
|
*/
|
|
|
|
gboolean
|
|
|
|
gst_rtsp_stream_transport_send_rtp (GstRTSPStreamTransport * trans,
|
|
|
|
GstBuffer * buffer)
|
|
|
|
{
|
2012-11-29 10:11:05 +00:00
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
2012-11-12 16:06:42 +00:00
|
|
|
gboolean res = FALSE;
|
|
|
|
|
2018-02-09 23:07:25 +00:00
|
|
|
g_return_val_if_fail (GST_IS_BUFFER (buffer), FALSE);
|
|
|
|
|
2012-11-29 10:11:05 +00:00
|
|
|
priv = trans->priv;
|
|
|
|
|
|
|
|
if (priv->send_rtp)
|
2012-11-12 16:06:42 +00:00
|
|
|
res =
|
2012-11-29 10:11:05 +00:00
|
|
|
priv->send_rtp (buffer, priv->transport->interleaved.min,
|
|
|
|
priv->user_data);
|
2012-11-12 16:06:42 +00:00
|
|
|
|
2014-09-19 16:28:50 +00:00
|
|
|
if (res)
|
|
|
|
gst_rtsp_stream_transport_keep_alive (trans);
|
|
|
|
|
2012-11-12 16:06:42 +00:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_send_rtcp:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
2014-03-23 20:24:48 +00:00
|
|
|
* @buffer: (transfer none): a #GstBuffer
|
2012-11-12 16:06:42 +00:00
|
|
|
*
|
|
|
|
* Send @buffer to the installed RTCP callback for @trans.
|
|
|
|
*
|
|
|
|
* Returns: %TRUE on success
|
|
|
|
*/
|
|
|
|
gboolean
|
|
|
|
gst_rtsp_stream_transport_send_rtcp (GstRTSPStreamTransport * trans,
|
|
|
|
GstBuffer * buffer)
|
|
|
|
{
|
2012-11-29 10:11:05 +00:00
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
2012-11-12 16:06:42 +00:00
|
|
|
gboolean res = FALSE;
|
|
|
|
|
2018-02-09 23:07:25 +00:00
|
|
|
g_return_val_if_fail (GST_IS_BUFFER (buffer), FALSE);
|
|
|
|
|
2012-11-29 10:11:05 +00:00
|
|
|
priv = trans->priv;
|
|
|
|
|
|
|
|
if (priv->send_rtcp)
|
2012-11-12 16:06:42 +00:00
|
|
|
res =
|
2012-11-29 10:11:05 +00:00
|
|
|
priv->send_rtcp (buffer, priv->transport->interleaved.max,
|
|
|
|
priv->user_data);
|
2012-11-12 16:06:42 +00:00
|
|
|
|
2014-09-19 16:28:50 +00:00
|
|
|
if (res)
|
|
|
|
gst_rtsp_stream_transport_keep_alive (trans);
|
|
|
|
|
2012-11-12 16:06:42 +00:00
|
|
|
return res;
|
|
|
|
}
|
2012-11-12 16:11:18 +00:00
|
|
|
|
2018-06-27 10:17:07 +00:00
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_send_rtp_list:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
|
|
|
* @buffer_list: (transfer none): a #GstBufferList
|
|
|
|
*
|
|
|
|
* Send @buffer_list to the installed RTP callback for @trans.
|
|
|
|
*
|
|
|
|
* Returns: %TRUE on success
|
|
|
|
*
|
|
|
|
* Since: 1.16
|
|
|
|
*/
|
|
|
|
gboolean
|
|
|
|
gst_rtsp_stream_transport_send_rtp_list (GstRTSPStreamTransport * trans,
|
|
|
|
GstBufferList * buffer_list)
|
|
|
|
{
|
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
gboolean res = FALSE;
|
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_BUFFER_LIST (buffer_list), FALSE);
|
|
|
|
|
|
|
|
priv = trans->priv;
|
|
|
|
|
|
|
|
if (priv->send_rtp_list) {
|
|
|
|
res =
|
|
|
|
priv->send_rtp_list (buffer_list, priv->transport->interleaved.min,
|
|
|
|
priv->list_user_data);
|
|
|
|
} else if (priv->send_rtp) {
|
|
|
|
guint n = gst_buffer_list_length (buffer_list), i;
|
|
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
GstBuffer *buffer = gst_buffer_list_get (buffer_list, i);
|
|
|
|
|
|
|
|
res =
|
|
|
|
priv->send_rtp (buffer, priv->transport->interleaved.min,
|
|
|
|
priv->user_data);
|
|
|
|
if (!res)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (res)
|
|
|
|
gst_rtsp_stream_transport_keep_alive (trans);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_send_rtcp_list:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
|
|
|
* @buffer_list: (transfer none): a #GstBuffer
|
|
|
|
*
|
|
|
|
* Send @buffer_list to the installed RTCP callback for @trans.
|
|
|
|
*
|
|
|
|
* Returns: %TRUE on success
|
|
|
|
*
|
|
|
|
* Since: 1.16
|
|
|
|
*/
|
|
|
|
gboolean
|
|
|
|
gst_rtsp_stream_transport_send_rtcp_list (GstRTSPStreamTransport * trans,
|
|
|
|
GstBufferList * buffer_list)
|
|
|
|
{
|
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
gboolean res = FALSE;
|
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_BUFFER_LIST (buffer_list), FALSE);
|
|
|
|
|
|
|
|
priv = trans->priv;
|
|
|
|
|
|
|
|
if (priv->send_rtcp_list) {
|
|
|
|
res =
|
|
|
|
priv->send_rtcp_list (buffer_list, priv->transport->interleaved.max,
|
|
|
|
priv->list_user_data);
|
|
|
|
} else if (priv->send_rtcp) {
|
|
|
|
guint n = gst_buffer_list_length (buffer_list), i;
|
|
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
GstBuffer *buffer = gst_buffer_list_get (buffer_list, i);
|
|
|
|
|
|
|
|
res =
|
|
|
|
priv->send_rtcp (buffer, priv->transport->interleaved.max,
|
|
|
|
priv->user_data);
|
|
|
|
if (!res)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (res)
|
|
|
|
gst_rtsp_stream_transport_keep_alive (trans);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2012-11-12 16:11:18 +00:00
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_keep_alive:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
|
|
|
*
|
|
|
|
* Signal the installed keep_alive callback for @trans.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_rtsp_stream_transport_keep_alive (GstRTSPStreamTransport * trans)
|
|
|
|
{
|
2012-11-29 10:11:05 +00:00
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
|
|
|
|
priv = trans->priv;
|
|
|
|
|
|
|
|
if (priv->keep_alive)
|
|
|
|
priv->keep_alive (priv->ka_user_data);
|
2012-11-12 16:11:18 +00:00
|
|
|
}
|
2014-09-09 16:10:12 +00:00
|
|
|
|
Limit queued TCP data messages to one per stream
Before, the watch backlog size in GstRTSPClient was changed
dynamically between unlimited and a fixed size, trying to avoid both
unlimited memory usage and deadlocks while waiting for place in the
queue. (Some of the deadlocks were described in a long comment in
handle_request().)
In the previous commit, we changed to a fixed backlog size of 100.
This is possible, because we now handle RTP/RTCP data messages differently
from RTSP request/response messages.
The data messages are messages tunneled over TCP. We allow at most one
queued data message per stream in GstRTSPClient at a time, and
successfully sent data messages are acked by sending a "message-sent"
callback from the GstStreamTransport. Until that ack comes, the
GstRTSPStream does not call pull_sample() on its appsink, and
therefore the streaming thread in the pipeline will not be blocked
inside GstRTSPClient, waiting for a place in the queue.
pull_sample() is called when we have both an ack and a "new-sample"
signal from the appsink. Then, we know there is a buffer to write.
RTSP request/response messages are not acked in the same way as data
messages. The rest of the 100 places in the queue are used for
them. If the queue becomes full of request/response messages, we
return an error and close the connection to the client.
Change-Id: I275310bc90a219ceb2473c098261acc78be84c97
2018-06-28 09:22:21 +00:00
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_message_sent:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
|
|
|
*
|
2020-05-01 07:42:17 +00:00
|
|
|
* Signal the installed message_sent / message_sent_full callback for @trans.
|
2019-04-23 11:38:05 +00:00
|
|
|
*
|
|
|
|
* Since: 1.16
|
Limit queued TCP data messages to one per stream
Before, the watch backlog size in GstRTSPClient was changed
dynamically between unlimited and a fixed size, trying to avoid both
unlimited memory usage and deadlocks while waiting for place in the
queue. (Some of the deadlocks were described in a long comment in
handle_request().)
In the previous commit, we changed to a fixed backlog size of 100.
This is possible, because we now handle RTP/RTCP data messages differently
from RTSP request/response messages.
The data messages are messages tunneled over TCP. We allow at most one
queued data message per stream in GstRTSPClient at a time, and
successfully sent data messages are acked by sending a "message-sent"
callback from the GstStreamTransport. Until that ack comes, the
GstRTSPStream does not call pull_sample() on its appsink, and
therefore the streaming thread in the pipeline will not be blocked
inside GstRTSPClient, waiting for a place in the queue.
pull_sample() is called when we have both an ack and a "new-sample"
signal from the appsink. Then, we know there is a buffer to write.
RTSP request/response messages are not acked in the same way as data
messages. The rest of the 100 places in the queue are used for
them. If the queue becomes full of request/response messages, we
return an error and close the connection to the client.
Change-Id: I275310bc90a219ceb2473c098261acc78be84c97
2018-06-28 09:22:21 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_rtsp_stream_transport_message_sent (GstRTSPStreamTransport * trans)
|
|
|
|
{
|
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
|
|
|
|
priv = trans->priv;
|
|
|
|
|
2020-05-01 07:42:17 +00:00
|
|
|
if (priv->message_sent_full)
|
|
|
|
priv->message_sent_full (trans, priv->msf_user_data);
|
Limit queued TCP data messages to one per stream
Before, the watch backlog size in GstRTSPClient was changed
dynamically between unlimited and a fixed size, trying to avoid both
unlimited memory usage and deadlocks while waiting for place in the
queue. (Some of the deadlocks were described in a long comment in
handle_request().)
In the previous commit, we changed to a fixed backlog size of 100.
This is possible, because we now handle RTP/RTCP data messages differently
from RTSP request/response messages.
The data messages are messages tunneled over TCP. We allow at most one
queued data message per stream in GstRTSPClient at a time, and
successfully sent data messages are acked by sending a "message-sent"
callback from the GstStreamTransport. Until that ack comes, the
GstRTSPStream does not call pull_sample() on its appsink, and
therefore the streaming thread in the pipeline will not be blocked
inside GstRTSPClient, waiting for a place in the queue.
pull_sample() is called when we have both an ack and a "new-sample"
signal from the appsink. Then, we know there is a buffer to write.
RTSP request/response messages are not acked in the same way as data
messages. The rest of the 100 places in the queue are used for
them. If the queue becomes full of request/response messages, we
return an error and close the connection to the client.
Change-Id: I275310bc90a219ceb2473c098261acc78be84c97
2018-06-28 09:22:21 +00:00
|
|
|
if (priv->message_sent)
|
2020-05-01 07:42:17 +00:00
|
|
|
priv->message_sent (priv->ms_user_data);
|
Limit queued TCP data messages to one per stream
Before, the watch backlog size in GstRTSPClient was changed
dynamically between unlimited and a fixed size, trying to avoid both
unlimited memory usage and deadlocks while waiting for place in the
queue. (Some of the deadlocks were described in a long comment in
handle_request().)
In the previous commit, we changed to a fixed backlog size of 100.
This is possible, because we now handle RTP/RTCP data messages differently
from RTSP request/response messages.
The data messages are messages tunneled over TCP. We allow at most one
queued data message per stream in GstRTSPClient at a time, and
successfully sent data messages are acked by sending a "message-sent"
callback from the GstStreamTransport. Until that ack comes, the
GstRTSPStream does not call pull_sample() on its appsink, and
therefore the streaming thread in the pipeline will not be blocked
inside GstRTSPClient, waiting for a place in the queue.
pull_sample() is called when we have both an ack and a "new-sample"
signal from the appsink. Then, we know there is a buffer to write.
RTSP request/response messages are not acked in the same way as data
messages. The rest of the 100 places in the queue are used for
them. If the queue becomes full of request/response messages, we
return an error and close the connection to the client.
Change-Id: I275310bc90a219ceb2473c098261acc78be84c97
2018-06-28 09:22:21 +00:00
|
|
|
}
|
|
|
|
|
2014-09-09 16:10:12 +00:00
|
|
|
/**
|
|
|
|
* gst_rtsp_stream_transport_recv_data:
|
|
|
|
* @trans: a #GstRTSPStreamTransport
|
|
|
|
* @channel: a channel
|
|
|
|
* @buffer: (transfer full): a #GstBuffer
|
|
|
|
*
|
|
|
|
* Receive @buffer on @channel @trans.
|
|
|
|
*
|
|
|
|
* Returns: a #GstFlowReturn. Returns GST_FLOW_NOT_LINKED when @channel is not
|
|
|
|
* configured in the transport of @trans.
|
|
|
|
*/
|
|
|
|
GstFlowReturn
|
|
|
|
gst_rtsp_stream_transport_recv_data (GstRTSPStreamTransport * trans,
|
|
|
|
guint channel, GstBuffer * buffer)
|
|
|
|
{
|
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
const GstRTSPTransport *tr;
|
|
|
|
GstFlowReturn res;
|
|
|
|
|
2018-02-09 23:07:25 +00:00
|
|
|
g_return_val_if_fail (GST_IS_BUFFER (buffer), GST_FLOW_ERROR);
|
|
|
|
|
2014-09-09 16:10:12 +00:00
|
|
|
priv = trans->priv;
|
|
|
|
tr = priv->transport;
|
|
|
|
|
|
|
|
if (tr->interleaved.min == channel) {
|
|
|
|
res = gst_rtsp_stream_recv_rtp (priv->stream, buffer);
|
|
|
|
} else if (tr->interleaved.max == channel) {
|
|
|
|
res = gst_rtsp_stream_recv_rtcp (priv->stream, buffer);
|
|
|
|
} else {
|
|
|
|
res = GST_FLOW_NOT_LINKED;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
2019-10-15 17:08:32 +00:00
|
|
|
|
|
|
|
static GstClockTime
|
|
|
|
get_backlog_item_timestamp (BackLogItem * item)
|
|
|
|
{
|
|
|
|
GstClockTime ret = GST_CLOCK_TIME_NONE;
|
|
|
|
|
|
|
|
if (item->buffer) {
|
|
|
|
ret = GST_BUFFER_DTS_OR_PTS (item->buffer);
|
|
|
|
} else if (item->buffer_list) {
|
|
|
|
g_assert (gst_buffer_list_length (item->buffer_list) > 0);
|
|
|
|
ret = GST_BUFFER_DTS_OR_PTS (gst_buffer_list_get (item->buffer_list, 0));
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static GstClockTime
|
|
|
|
get_first_backlog_timestamp (GstRTSPStreamTransport * trans)
|
|
|
|
{
|
|
|
|
GstRTSPStreamTransportPrivate *priv = trans->priv;
|
|
|
|
GstClockTime ret = GST_CLOCK_TIME_NONE;
|
|
|
|
guint i, l;
|
|
|
|
|
2024-04-18 14:07:25 +00:00
|
|
|
l = gst_vec_deque_get_length (priv->items);
|
2019-10-15 17:08:32 +00:00
|
|
|
|
|
|
|
for (i = 0; i < l; i++) {
|
|
|
|
BackLogItem *item = (BackLogItem *)
|
2024-04-18 14:07:25 +00:00
|
|
|
gst_vec_deque_peek_nth_struct (priv->items, i);
|
2019-10-15 17:08:32 +00:00
|
|
|
|
|
|
|
if (item->is_rtp) {
|
|
|
|
ret = get_backlog_item_timestamp (item);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-02-05 19:28:19 +00:00
|
|
|
/* Not MT-safe, caller should ensure consistent locking (see
|
|
|
|
* gst_rtsp_stream_transport_lock_backlog()). Ownership
|
2019-10-15 17:08:32 +00:00
|
|
|
* of @buffer and @buffer_list is transfered to the transport */
|
|
|
|
gboolean
|
|
|
|
gst_rtsp_stream_transport_backlog_push (GstRTSPStreamTransport * trans,
|
|
|
|
GstBuffer * buffer, GstBufferList * buffer_list, gboolean is_rtp)
|
|
|
|
{
|
|
|
|
gboolean ret = TRUE;
|
|
|
|
BackLogItem item = { 0, };
|
|
|
|
GstClockTime item_timestamp;
|
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
|
|
|
|
priv = trans->priv;
|
|
|
|
|
|
|
|
if (buffer)
|
|
|
|
item.buffer = buffer;
|
|
|
|
if (buffer_list)
|
|
|
|
item.buffer_list = buffer_list;
|
|
|
|
item.is_rtp = is_rtp;
|
|
|
|
|
2024-04-18 14:07:25 +00:00
|
|
|
gst_vec_deque_push_tail_struct (priv->items, &item);
|
2019-10-15 17:08:32 +00:00
|
|
|
|
|
|
|
item_timestamp = get_backlog_item_timestamp (&item);
|
|
|
|
|
|
|
|
if (is_rtp && priv->first_rtp_timestamp != GST_CLOCK_TIME_NONE) {
|
|
|
|
GstClockTimeDiff queue_duration;
|
|
|
|
|
|
|
|
g_assert (GST_CLOCK_TIME_IS_VALID (item_timestamp));
|
|
|
|
|
|
|
|
queue_duration = GST_CLOCK_DIFF (priv->first_rtp_timestamp, item_timestamp);
|
|
|
|
|
|
|
|
g_assert (queue_duration >= 0);
|
|
|
|
|
|
|
|
if (queue_duration > MAX_BACKLOG_DURATION &&
|
2024-04-18 14:07:25 +00:00
|
|
|
gst_vec_deque_get_length (priv->items) > MAX_BACKLOG_SIZE) {
|
2019-10-15 17:08:32 +00:00
|
|
|
ret = FALSE;
|
|
|
|
}
|
|
|
|
} else if (is_rtp) {
|
|
|
|
priv->first_rtp_timestamp = item_timestamp;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-02-05 19:28:19 +00:00
|
|
|
/* Not MT-safe, caller should ensure consistent locking (see
|
|
|
|
* gst_rtsp_stream_transport_lock_backlog()). Ownership
|
2020-02-14 13:59:25 +00:00
|
|
|
* of @buffer and @buffer_list is transfered back to the caller,
|
|
|
|
* if either of those is NULL the underlying object is unreffed */
|
2019-10-15 17:08:32 +00:00
|
|
|
gboolean
|
|
|
|
gst_rtsp_stream_transport_backlog_pop (GstRTSPStreamTransport * trans,
|
|
|
|
GstBuffer ** buffer, GstBufferList ** buffer_list, gboolean * is_rtp)
|
|
|
|
{
|
|
|
|
BackLogItem *item;
|
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
|
|
|
|
g_return_val_if_fail (!gst_rtsp_stream_transport_backlog_is_empty (trans),
|
|
|
|
FALSE);
|
|
|
|
|
|
|
|
priv = trans->priv;
|
|
|
|
|
2024-04-18 14:07:25 +00:00
|
|
|
item = (BackLogItem *) gst_vec_deque_pop_head_struct (priv->items);
|
2019-10-15 17:08:32 +00:00
|
|
|
|
|
|
|
priv->first_rtp_timestamp = get_first_backlog_timestamp (trans);
|
|
|
|
|
2020-02-14 13:59:25 +00:00
|
|
|
if (buffer)
|
|
|
|
*buffer = item->buffer;
|
|
|
|
else if (item->buffer)
|
|
|
|
gst_buffer_unref (item->buffer);
|
|
|
|
|
|
|
|
if (buffer_list)
|
|
|
|
*buffer_list = item->buffer_list;
|
|
|
|
else if (item->buffer_list)
|
|
|
|
gst_buffer_list_unref (item->buffer_list);
|
|
|
|
|
|
|
|
if (is_rtp)
|
|
|
|
*is_rtp = item->is_rtp;
|
2019-10-15 17:08:32 +00:00
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
2022-08-24 11:50:19 +00:00
|
|
|
/* Not MT-safe, caller should ensure consistent locking.
|
|
|
|
* See gst_rtsp_stream_transport_lock_backlog() */
|
|
|
|
gboolean
|
|
|
|
gst_rtsp_stream_transport_backlog_peek_is_rtp (GstRTSPStreamTransport * trans)
|
|
|
|
{
|
|
|
|
BackLogItem *item;
|
|
|
|
GstRTSPStreamTransportPrivate *priv;
|
|
|
|
|
|
|
|
g_return_val_if_fail (!gst_rtsp_stream_transport_backlog_is_empty (trans),
|
|
|
|
FALSE);
|
|
|
|
|
|
|
|
priv = trans->priv;
|
|
|
|
|
2024-04-18 14:07:25 +00:00
|
|
|
item = (BackLogItem *) gst_vec_deque_peek_head_struct (priv->items);
|
2022-08-24 11:50:19 +00:00
|
|
|
|
|
|
|
return item->is_rtp;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-02-05 19:28:19 +00:00
|
|
|
/* Not MT-safe, caller should ensure consistent locking.
|
|
|
|
* See gst_rtsp_stream_transport_lock_backlog() */
|
2019-10-15 17:08:32 +00:00
|
|
|
gboolean
|
|
|
|
gst_rtsp_stream_transport_backlog_is_empty (GstRTSPStreamTransport * trans)
|
|
|
|
{
|
2024-04-18 14:07:25 +00:00
|
|
|
return gst_vec_deque_is_empty (trans->priv->items);
|
2019-10-15 17:08:32 +00:00
|
|
|
}
|
2020-02-05 19:28:19 +00:00
|
|
|
|
2020-02-14 13:59:25 +00:00
|
|
|
/* Not MT-safe, caller should ensure consistent locking.
|
|
|
|
* See gst_rtsp_stream_transport_lock_backlog() */
|
|
|
|
void
|
|
|
|
gst_rtsp_stream_transport_clear_backlog (GstRTSPStreamTransport * trans)
|
|
|
|
{
|
|
|
|
while (!gst_rtsp_stream_transport_backlog_is_empty (trans)) {
|
|
|
|
gst_rtsp_stream_transport_backlog_pop (trans, NULL, NULL, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-05 19:28:19 +00:00
|
|
|
/* Internal API, protects access to the TCP backlog. Safe to
|
|
|
|
* call recursively */
|
|
|
|
void
|
|
|
|
gst_rtsp_stream_transport_lock_backlog (GstRTSPStreamTransport * trans)
|
|
|
|
{
|
|
|
|
g_rec_mutex_lock (&trans->priv->backlog_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* See gst_rtsp_stream_transport_lock_backlog() */
|
|
|
|
void
|
|
|
|
gst_rtsp_stream_transport_unlock_backlog (GstRTSPStreamTransport * trans)
|
|
|
|
{
|
|
|
|
g_rec_mutex_unlock (&trans->priv->backlog_lock);
|
|
|
|
}
|