systemclock: Add a test for sync/async clockid interactions

This test randomly hangs if there are problems with the reliability of
unscheduling sync and async clockID's on the system clock.
This commit is contained in:
Jan Schmidt 2009-03-25 21:37:38 +00:00
parent 10bc5670a3
commit b35f424858

View file

@ -279,6 +279,117 @@ GST_START_TEST (test_async_order)
GST_END_TEST;
struct test_async_sync_interaction_data
{
GMutex *lock;
GstClockID sync_id;
GstClockID sync_id2;
GstClockID async_id;
GstClockID async_id2;
GstClockID async_id3;
};
static gboolean
test_async_sync_interaction_cb (GstClock * clock, GstClockTime time,
GstClockID id, gpointer user_data)
{
struct test_async_sync_interaction_data *td =
(struct test_async_sync_interaction_data *) (user_data);
g_mutex_lock (td->lock);
/* The first async callback is ignored */
if (id == td->async_id)
goto out;
if (id != td->async_id2 && id != td->async_id3)
goto out;
/* Unschedule the sync callback */
if (id == td->async_id3) {
gst_clock_id_unschedule (td->sync_id);
gst_clock_id_unschedule (td->async_id2);
}
out:
g_mutex_unlock (td->lock);
return FALSE;
}
GST_START_TEST (test_async_sync_interaction)
{
/* This test schedules an async callback, then before it completes, schedules
* an earlier async callback, and quickly unschedules the first, and inserts
* a THIRD even earlier async callback. It then attempts to wait on a
* sync clock ID. While that's sleeping, the 3rd async callback should fire
* and unschedule it. This tests for problems with unscheduling async and
* sync callbacks on the system clock. */
GstClock *clock;
GstClockReturn result;
GstClockTime base;
GstClockTimeDiff jitter;
struct test_async_sync_interaction_data td;
int i;
clock = gst_system_clock_obtain ();
fail_unless (clock != NULL, "Could not create instance of GstSystemClock");
td.lock = g_mutex_new ();
for (i = 0; i < 50; i++) {
gst_clock_debug (clock);
base = gst_clock_get_time (clock);
g_mutex_lock (td.lock);
td.async_id = gst_clock_new_single_shot_id (clock, base + 40 * GST_MSECOND);
td.async_id2 =
gst_clock_new_single_shot_id (clock, base + 30 * GST_MSECOND);
td.async_id3 =
gst_clock_new_single_shot_id (clock, base + 20 * GST_MSECOND);
td.sync_id2 = gst_clock_new_single_shot_id (clock, base + 10 * GST_MSECOND);
td.sync_id = gst_clock_new_single_shot_id (clock, base + 50 * GST_MSECOND);
g_mutex_unlock (td.lock);
result = gst_clock_id_wait_async (td.async_id,
test_async_sync_interaction_cb, &td);
fail_unless (result == GST_CLOCK_OK, "Waiting did not return OK");
/* Wait 10ms, then unschedule async_id and schedule async_id2 */
result = gst_clock_id_wait (td.sync_id2, &jitter);
fail_unless (result == GST_CLOCK_OK || result == GST_CLOCK_EARLY,
"Waiting did not return OK or EARLY");
/* async_id2 is earlier than async_id - should become head of the queue */
result = gst_clock_id_wait_async (td.async_id2,
test_async_sync_interaction_cb, &td);
fail_unless (result == GST_CLOCK_OK, "Waiting did not return OK");
gst_clock_id_unschedule (td.async_id);
/* async_id3 is earlier than async_id2 - should become head of the queue */
result = gst_clock_id_wait_async (td.async_id3,
test_async_sync_interaction_cb, &td);
fail_unless (result == GST_CLOCK_OK, "Waiting did not return OK");
/* While this is sleeping, the async3 id should fire and unschedule it */
result = gst_clock_id_wait (td.sync_id, &jitter);
fail_unless (result == GST_CLOCK_UNSCHEDULED || result == GST_CLOCK_EARLY,
"Waiting did not return UNSCHEDULED");
gst_clock_id_unschedule (td.async_id3);
g_mutex_lock (td.lock);
gst_clock_id_unref (td.sync_id);
gst_clock_id_unref (td.sync_id2);
gst_clock_id_unref (td.async_id);
gst_clock_id_unref (td.async_id2);
gst_clock_id_unref (td.async_id3);
g_mutex_unlock (td.lock);
}
g_mutex_free (td.lock);
gst_object_unref (clock);
}
GST_END_TEST;
GST_START_TEST (test_periodic_multi)
{
GstClock *clock;
@ -422,6 +533,7 @@ gst_systemclock_suite (void)
tcase_add_test (tc_chain, test_periodic_shot);
tcase_add_test (tc_chain, test_periodic_multi);
tcase_add_test (tc_chain, test_async_order);
tcase_add_test (tc_chain, test_async_sync_interaction);
tcase_add_test (tc_chain, test_diff);
tcase_add_test (tc_chain, test_mixed);