mirror of
https://gitlab.freedesktop.org/gstreamer/gstreamer.git
synced 2024-11-26 19:51:11 +00:00
tests/check/elements/multiqueue.c: Use a GStaticMutex to protect all cases where libcheck fail_if/fail_unless macros ...
Original commit message from CVS: * tests/check/elements/multiqueue.c: (mq_dummypad_chain), (mq_dummypad_event), (run_output_order_test): Use a GStaticMutex to protect all cases where libcheck fail_if/fail_unless macros might be called from multiple threads simultaneously to avoid errors like: "check_pack.c:107: :-1081725400:Bad message type arg"
This commit is contained in:
parent
54eb646c3e
commit
4fb00301dd
2 changed files with 22 additions and 1 deletions
|
@ -1,3 +1,12 @@
|
|||
2007-07-16 Jan Schmidt <thaytan@mad.scientist.com>
|
||||
|
||||
* tests/check/elements/multiqueue.c: (mq_dummypad_chain),
|
||||
(mq_dummypad_event), (run_output_order_test):
|
||||
Use a GStaticMutex to protect all cases where libcheck
|
||||
fail_if/fail_unless macros might be called from multiple threads
|
||||
simultaneously to avoid errors like:
|
||||
"check_pack.c:107: :-1081725400:Bad message type arg"
|
||||
|
||||
2007-07-16 Jan Schmidt <thaytan@mad.scientist.com>
|
||||
|
||||
* tests/check/pipelines/stress.c: (GST_START_TEST):
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
|
||||
#include <gst/check/gstcheck.h>
|
||||
|
||||
static GStaticMutex _check_lock = G_STATIC_MUTEX_INIT;
|
||||
|
||||
static GstElement *
|
||||
setup_multiqueue (GstElement * pipe, GstElement * inputs[],
|
||||
GstElement * outputs[], guint num)
|
||||
|
@ -295,11 +297,13 @@ mq_dummypad_chain (GstPad * sinkpad, GstBuffer * buf)
|
|||
struct PadData *pad_data;
|
||||
|
||||
pad_data = gst_pad_get_element_private (sinkpad);
|
||||
fail_if (pad_data == NULL);
|
||||
|
||||
g_static_mutex_lock (&_check_lock);
|
||||
fail_if (pad_data == NULL);
|
||||
/* Read an ID from the first 4 bytes of the buffer data and check it's
|
||||
* what we expect */
|
||||
fail_unless (GST_BUFFER_SIZE (buf) >= 4);
|
||||
g_static_mutex_unlock (&_check_lock);
|
||||
cur_id = GST_READ_UINT32_BE (GST_BUFFER_DATA (buf));
|
||||
|
||||
g_mutex_lock (pad_data->mutex);
|
||||
|
@ -309,10 +313,12 @@ mq_dummypad_chain (GstPad * sinkpad, GstBuffer * buf)
|
|||
if (!pad_data->is_linked) {
|
||||
/* If there are no linked pads, we can't track a max_id for them :) */
|
||||
if (pad_data->n_linked > 0) {
|
||||
g_static_mutex_lock (&_check_lock);
|
||||
fail_unless (cur_id <= *(pad_data->max_linked_id_ptr) + 1,
|
||||
"Got buffer %u on pad %u before buffer %u was seen on a "
|
||||
"linked pad (max: %u)", cur_id, pad_data->pad_num, cur_id - 1,
|
||||
*(pad_data->max_linked_id_ptr));
|
||||
g_static_mutex_unlock (&_check_lock);
|
||||
}
|
||||
} else {
|
||||
/* Update the max_id value */
|
||||
|
@ -335,7 +341,9 @@ mq_dummypad_event (GstPad * sinkpad, GstEvent * event)
|
|||
struct PadData *pad_data;
|
||||
|
||||
pad_data = gst_pad_get_element_private (sinkpad);
|
||||
g_static_mutex_lock (&_check_lock);
|
||||
fail_if (pad_data == NULL);
|
||||
g_static_mutex_unlock (&_check_lock);
|
||||
|
||||
if (GST_EVENT_TYPE (event) == GST_EVENT_EOS) {
|
||||
g_mutex_lock (pad_data->mutex);
|
||||
|
@ -442,10 +450,13 @@ run_output_order_test (gint n_linked)
|
|||
cur_pad = pad_pattern[i % n];
|
||||
|
||||
buf = gst_buffer_new_and_alloc (4);
|
||||
g_static_mutex_lock (&_check_lock);
|
||||
fail_if (buf == NULL);
|
||||
g_static_mutex_unlock (&_check_lock);
|
||||
GST_WRITE_UINT32_BE (GST_BUFFER_DATA (buf), i);
|
||||
|
||||
ret = gst_pad_push (inputpads[cur_pad], buf);
|
||||
g_static_mutex_lock (&_check_lock);
|
||||
if (pad_data[cur_pad].is_linked) {
|
||||
fail_unless (ret == GST_FLOW_OK,
|
||||
"Push on pad %d returned %d when FLOW_OK was expected", cur_pad, ret);
|
||||
|
@ -455,6 +466,7 @@ run_output_order_test (gint n_linked)
|
|||
"Push on pad %d returned %d when FLOW_OK or NOT_LINKED was expected",
|
||||
cur_pad, ret);
|
||||
}
|
||||
g_static_mutex_unlock (&_check_lock);
|
||||
}
|
||||
for (i = 0; i < NPADS; i++) {
|
||||
gst_pad_push_event (inputpads[i], gst_event_new_eos ());
|
||||
|
|
Loading…
Reference in a new issue