queue2: remove redundant check to avoid posting the same buffering value twice

Remove the percent_changed check to determine whether a buffering message should
be posted. The check on the last posted buffering value is sufficient, and the
removal doesn't introduce additional complexity.

Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/6165>
This commit is contained in:
Arnaud Vrac 2024-02-21 12:05:18 +01:00 committed by GStreamer Marge Bot
parent 517498dd11
commit 15a18281d7
2 changed files with 19 additions and 30 deletions

View file

@ -264,7 +264,6 @@ static GParamSpec *obj_props[PROP_LAST] = { NULL, };
#define SET_PERCENT(q, perc) G_STMT_START { \
if (perc != q->buffering_percent) { \
q->buffering_percent = perc; \
q->percent_changed = TRUE; \
GST_DEBUG_OBJECT (q, "buffering %d percent", perc); \
get_buffering_stats (q, perc, &q->mode, &q->avg_in, &q->avg_out, \
&q->buffering_left); \
@ -1181,28 +1180,27 @@ static GstMessage *
gst_queue2_get_buffering_message (GstQueue2 * queue, gint * percent)
{
GstMessage *msg = NULL;
if (queue->percent_changed) {
/* Don't change the buffering level if the sinkpad is waiting for
* space to become available. This prevents the situation where,
* upstream is pushing buffers larger than our limits so only 1 buffer
* is ever in the queue at a time.
* Changing the level causes a buffering message to be posted saying that
* we are buffering which the application may pause to wait for another
* 100% buffering message which would be posted very soon after the
* waiting sink thread adds it's buffer to the queue */
/* FIXME: This situation above can still occur later if
* the sink pad is waiting to push a serialized event into the queue and
* the queue becomes empty for a short period of time. */
if (!queue->waiting_del
&& queue->last_posted_buffering_percent != queue->buffering_percent) {
*percent = queue->buffering_percent;
GST_DEBUG_OBJECT (queue, "Going to post buffering: %d%%", *percent);
msg = gst_message_new_buffering (GST_OBJECT_CAST (queue), *percent);
/* Don't change the buffering level if the sinkpad is waiting for
* space to become available. This prevents the situation where,
* upstream is pushing buffers larger than our limits so only 1 buffer
* is ever in the queue at a time.
* Changing the level causes a buffering message to be posted saying that
* we are buffering which the application may pause to wait for another
* 100% buffering message which would be posted very soon after the
* waiting sink thread adds it's buffer to the queue */
/* FIXME: This situation above can still occur later if
* the sink pad is waiting to push a serialized event into the queue and
* the queue becomes empty for a short period of time. */
if (!queue->waiting_del
&& queue->last_posted_buffering_percent != queue->buffering_percent) {
*percent = queue->buffering_percent;
gst_message_set_buffering_stats (msg, queue->mode, queue->avg_in,
queue->avg_out, queue->buffering_left);
}
GST_DEBUG_OBJECT (queue, "Going to post buffering: %d%%", *percent);
msg = gst_message_new_buffering (GST_OBJECT_CAST (queue), *percent);
gst_message_set_buffering_stats (msg, queue->mode, queue->avg_in,
queue->avg_out, queue->buffering_left);
}
return msg;
@ -1221,15 +1219,7 @@ gst_queue2_post_buffering (GstQueue2 * queue)
if (msg != NULL) {
if (gst_element_post_message (GST_ELEMENT_CAST (queue), msg)) {
GST_QUEUE2_MUTEX_LOCK (queue);
/* Set these states only if posting the message succeeded. Otherwise,
* this post attempt failed, and the next one won't be done, because
* gst_queue2_get_buffering_message() checks these states and decides
* based on their values that it won't produce a message. */
queue->last_posted_buffering_percent = percent;
if (percent == queue->buffering_percent)
queue->percent_changed = FALSE;
GST_QUEUE2_MUTEX_UNLOCK (queue);
GST_DEBUG_OBJECT (queue, "successfully posted %d%% buffering message",
percent);
} else

View file

@ -171,7 +171,6 @@ struct _GstQueue2
gint64 buffering_left;
gint avg_in;
gint avg_out;
gboolean percent_changed;
GMutex buffering_post_lock; /* assures only one posted at a time */
};