Added more docs.

Original commit message from CVS:
* docs/design/part-clocks.txt:
* docs/design/part-events.txt:
* gst/elements/gstfakesrc.c: (gst_fakesrc_get_event_mask),
(gst_fakesrc_event_handler), (gst_fakesrc_loop),
(gst_fakesrc_activate):
* gst/gstclock.c: (gst_clock_id_ref), (gst_clock_id_unref),
(gst_clock_id_compare_func), (gst_clock_id_wait),
(gst_clock_id_wait_async), (gst_clock_init),
(gst_clock_adjust_unlocked), (gst_clock_get_time):
* gst/gstevent.c: (gst_event_new_discontinuous_valist),
(gst_event_discont_get_value), (gst_event_new_segment_seek):
* gst/gstevent.h:
* gst/gstpipeline.c: (gst_pipeline_init), (is_eos),
(pipeline_bus_handler), (gst_pipeline_change_state):
* gst/gstpipeline.h:
* gst/gstsystemclock.c: (gst_system_clock_init),
(gst_system_clock_async_thread),
(gst_system_clock_id_wait_unlocked),
(gst_system_clock_id_wait_async), (gst_system_clock_id_unschedule):
* libs/gst/dataprotocol/dataprotocol.c: (gst_dp_packet_from_event),
(gst_dp_event_from_packet):
* testsuite/clock/Makefile.am:
* testsuite/clock/clock4.c: (gst_clock_debug), (ok_callback),
(error_callback), (main):
Added more docs.
Remove more silly events.
Implement periodic clock notifications.
Add async testsuite.
This commit is contained in:
Wim Taymans 2005-02-11 15:50:53 +00:00
parent 98e3377631
commit 484d9a3224
16 changed files with 407 additions and 70 deletions

View file

@ -1,3 +1,34 @@
2005-02-11 Wim Taymans <wim@fluendo.com>
* docs/design/part-clocks.txt:
* docs/design/part-events.txt:
* gst/elements/gstfakesrc.c: (gst_fakesrc_get_event_mask),
(gst_fakesrc_event_handler), (gst_fakesrc_loop),
(gst_fakesrc_activate):
* gst/gstclock.c: (gst_clock_id_ref), (gst_clock_id_unref),
(gst_clock_id_compare_func), (gst_clock_id_wait),
(gst_clock_id_wait_async), (gst_clock_init),
(gst_clock_adjust_unlocked), (gst_clock_get_time):
* gst/gstevent.c: (gst_event_new_discontinuous_valist),
(gst_event_discont_get_value), (gst_event_new_segment_seek):
* gst/gstevent.h:
* gst/gstpipeline.c: (gst_pipeline_init), (is_eos),
(pipeline_bus_handler), (gst_pipeline_change_state):
* gst/gstpipeline.h:
* gst/gstsystemclock.c: (gst_system_clock_init),
(gst_system_clock_async_thread),
(gst_system_clock_id_wait_unlocked),
(gst_system_clock_id_wait_async), (gst_system_clock_id_unschedule):
* libs/gst/dataprotocol/dataprotocol.c: (gst_dp_packet_from_event),
(gst_dp_event_from_packet):
* testsuite/clock/Makefile.am:
* testsuite/clock/clock4.c: (gst_clock_debug), (ok_callback),
(error_callback), (main):
Added more docs.
Remove more silly events.
Implement periodic clock notifications.
Add async testsuite.
2005-02-11 Andy Wingo <wingo@pobox.com>
* tools/gst-inspect.c (print_field): Change prototype for

102
docs/design/part-clocks.txt Normal file
View file

@ -0,0 +1,102 @@
Clocks
------
To synchronize the different elements, the GstPipeline is responsible for
selecting and distributing a global GstClock for all the elements in it.
This selection happens whenever an element is added or removed from the
pipeline. Whever the clock changes in a pipeline, a message is posted on
the bus signaling the new clock to the application.
The GstClock returns a monotonically increasing time with the method
_get_time(). Its accuracy and base time depends on the specific clock
implementation but time is always expessed in nanoseconds. Since the
baseline of the clock is undefined, the clock time returned is not
meaningfull in itself, what matters are the deltas between two clock
times.
The time reported by the clock is called the absolute time.
Time in GStreamer
-----------------
The absolute time is used to calculate the stream time. The stream time
is defined as follows:
- If the pipeline is NULL/READY, the stream time is undefined.
- In PAUSED, the stream time remains at the time when it was last
PAUSED. When the stream is PAUSED for the first time, the stream time
is 0.
- In PLAYING, the stream time is the delta between the absolute time
and the base time. The base time is defined as the absolute time minus
the stream time at the time when the pipeline is set to PLAYING.
- after a seek, the stream time is set to 0 again.
Timestamps
----------
Timestamps on buffers are always expressed in stream time. This means that
all elements that require synchronizing to the clock need to be aware of
the clock base time in order to know the absolute time of the timestamp.
Converting a timestamp (in stream time) to absolute time is performed using
the following formula:
AT = BT + ST where: AT = absolute time
BT = base time
ST = stream time
The pipeline base time is propagated to all the element during the PAUSED
to PLAYING state change. All elements are therefore able to convert the
stream time to the absolute time. It is possible to specify an aditional
delay to the base time to compensate for the delay it takes to perform
the state change.
Clock features
--------------
The clock supports periodic and single shot clock notifications both
synchronous and asynchronous.
One first needs to create a GstClockID for the periodic or single shot
notification using _clock_new_single_shot_id() or _clock_new_periodic_id().
To perform a blocking wait for the specific time of the GstClockID use the
gst_clock_id_wait(). To receive a callback when the specific time is reached
in the clock use gst_clock_id_wait_async(). Both these calls can be interrupted
with the gst_clock_id_unschedule() call. If the blocking wait is unscheduled
a return value of GST_CLOCK_UNSCHEDULED is returned.
The async callbacks can happen from any thread, either provided by the
core or from a streaming thread. The application should be prepared for this.
A GstClockID that has been unscheduled cannot be used again for any wait
operation.
It is possible to perform a blocking wait on the same ID from multiple
threads. However, registering the same ID for multiple async notifications is
not possible, the callback will only be called once.
None of the wait operations unref the GstClockID, the application is
responsible for unreffing the ids itself.
These clock operations do not operate on the stream time, so the callbacks
will also occur when not in PLAYING state as if the clock just keeps on
running.
Clock implementations
---------------------
The GStreamer core provides a GstSystemClock based on the system time.
Asynchronous callbacks are scheduled from an internal thread.
Clock implementors are encouraged to subclass this systemclock as it
implements the async notification.
Subclasses can however override all of the important methods for sync and
async notifications to implement their own callback methods or blocking
wait operations.

View file

@ -24,9 +24,9 @@ Different types of events exist to implement various functionalities.
EOS
---
The EOS event can only be send on a sinkpad. It is typically emited by the
source element when it has finished sending data. This event is mainly send
in the streaming thread but can also be send from the application thread.
The EOS event can only be sent on a sinkpad. It is typically emited by the
source element when it has finished sending data. This event is mainly sent
in the streaming thread but can also be sent from the application thread.
The downstream element should forward the EOS event to its downstream peer
elements. This way the event will eventually reach the renderers which should
@ -58,7 +58,7 @@ goes to PLAYING.
FLUSH
-----
A flush event is send both downstream and upstream to clear any pending data
A flush event is sent both downstream and upstream to clear any pending data
from the pipeline. This might be neede to make the graph more responsive
when the normal dataflow gets interrupted by for example a seek event.
@ -90,10 +90,10 @@ unlocks and any pending buffers are cleared in the upstream elements.
DISCONTINUOUS
-------------
A discont event is send downstream by an element to indicate that the following
A discont event is sent downstream by an element to indicate that the following
group of buffers start and end at the specified time.
After a seek event for example, a discont event is send.
After a seek event for example, a discont event is sent.
SEEK
@ -168,7 +168,7 @@ Navigation events travel downstream.
TAG
---
The tag event is send downstream when an element has discovered metadata
The tag event is sent downstream when an element has discovered metadata
tags in a media file. Encoders can use this event to adjust their tagging
system.

View file

@ -419,8 +419,7 @@ static const GstEventMask *
gst_fakesrc_get_event_mask (GstPad * pad)
{
static const GstEventMask masks[] = {
{GST_EVENT_SEEK, GST_SEEK_FLAG_FLUSH},
{GST_EVENT_SEEK_SEGMENT, GST_SEEK_FLAG_FLUSH | GST_SEEK_FLAG_SEGMENT_LOOP},
{GST_EVENT_SEEK, GST_SEEK_FLAG_FLUSH | GST_SEEK_FLAG_SEGMENT_LOOP},
{GST_EVENT_FLUSH, 0},
{0, 0},
};
@ -437,18 +436,12 @@ gst_fakesrc_event_handler (GstPad * pad, GstEvent * event)
switch (GST_EVENT_TYPE (event)) {
case GST_EVENT_SEEK:
src->buffer_count = GST_EVENT_SEEK_OFFSET (event);
if (!GST_EVENT_SEEK_FLAGS (event) & GST_SEEK_FLAG_FLUSH) {
break;
}
/* else we do a flush too */
case GST_EVENT_SEEK_SEGMENT:
src->segment_start = GST_EVENT_SEEK_OFFSET (event);
src->segment_end = GST_EVENT_SEEK_ENDOFFSET (event);
src->buffer_count = src->segment_start;
src->segment_loop =
GST_EVENT_SEEK_FLAGS (event) & GST_SEEK_FLAG_SEGMENT_LOOP;
src->need_flush = GST_EVENT_SEEK_FLAGS (event) & GST_SEEK_FLAG_FLUSH;
break;
case GST_EVENT_FLUSH:
src->need_flush = TRUE;
@ -795,7 +788,7 @@ gst_fakesrc_loop (GstPad * pad)
if (src->buffer_count == src->segment_end) {
if (src->segment_loop) {
gst_pad_push_event (pad, gst_event_new (GST_EVENT_SEGMENT_DONE));
//gst_pad_push_event (pad, gst_event_new (GST_EVENT_SEGMENT_DONE));
} else {
gst_pad_push_event (pad, gst_event_new (GST_EVENT_EOS));
gst_task_pause (src->task);

View file

@ -211,7 +211,14 @@ gst_clock_id_compare_func (gconstpointer id1, gconstpointer id2)
entry1 = (GstClockEntry *) id1;
entry2 = (GstClockEntry *) id2;
return GST_CLOCK_ENTRY_TIME (entry1) - GST_CLOCK_ENTRY_TIME (entry2);
if (GST_CLOCK_ENTRY_TIME (entry1) > GST_CLOCK_ENTRY_TIME (entry2)) {
return 1;
}
if (GST_CLOCK_ENTRY_TIME (entry1) < GST_CLOCK_ENTRY_TIME (entry2)) {
return -1;
}
return entry1 - entry2;
}
/**
@ -262,6 +269,9 @@ gst_clock_id_wait (GstClockID id, GstClockTimeDiff * jitter)
if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (requested)))
goto invalid_time;
if (G_UNLIKELY (entry->status == GST_CLOCK_UNSCHEDULED))
goto unscheduled;
clock = GST_CLOCK_ENTRY_CLOCK (entry);
cclass = GST_CLOCK_GET_CLASS (clock);
@ -276,6 +286,9 @@ gst_clock_id_wait (GstClockID id, GstClockTimeDiff * jitter)
*jitter = now - requested;
}
if (entry->type == GST_CLOCK_ENTRY_PERIODIC) {
entry->time += entry->interval;
}
if (clock->stats) {
gst_clock_update_stats (clock);
@ -291,6 +304,11 @@ invalid_time:
GST_CAT_DEBUG (GST_CAT_CLOCK, "invalid time requested, returning _BADTIME");
return GST_CLOCK_BADTIME;
}
unscheduled:
{
GST_CAT_DEBUG (GST_CAT_CLOCK, "entry was unscheduled return _UNSCHEDULED");
return GST_CLOCK_UNSCHEDULED;
}
}
/**
@ -329,6 +347,9 @@ gst_clock_id_wait_async (GstClockID id,
if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (requested)))
goto invalid_time;
if (G_UNLIKELY (entry->status == GST_CLOCK_UNSCHEDULED))
goto unscheduled;
cclass = GST_CLOCK_GET_CLASS (clock);
if (cclass->wait_async) {
@ -348,6 +369,11 @@ invalid_time:
GST_CAT_DEBUG (GST_CAT_CLOCK, "invalid time requested, returning _BADTIME");
return GST_CLOCK_BADTIME;
}
unscheduled:
{
GST_CAT_DEBUG (GST_CAT_CLOCK, "entry was unscheduled return _UNSCHEDULED");
return GST_CLOCK_UNSCHEDULED;
}
}
/**
@ -356,6 +382,8 @@ invalid_time:
*
* Cancel an outstanding request with the given ID. This can either
* be an outstanding async notification or a pending sync notification.
* After this call, the @id cannot be used anymore to receive sync or
* async notifications, you need to create a new GstClockID.
*
* MT safe.
*/

View file

@ -354,7 +354,7 @@ gst_event_new_segment_seek (GstSeekType type, gint64 start, gint64 stop)
g_return_val_if_fail (start < stop, NULL);
event = gst_event_new (GST_EVENT_SEEK_SEGMENT);
event = gst_event_new (GST_EVENT_SEEK);
GST_EVENT_SEEK_TYPE (event) = type;
GST_EVENT_SEEK_OFFSET (event) = start;

View file

@ -41,8 +41,6 @@ typedef enum {
GST_EVENT_DISCONTINUOUS = 3,
GST_EVENT_QOS = 4,
GST_EVENT_SEEK = 5,
GST_EVENT_SEEK_SEGMENT = 6,
GST_EVENT_SEGMENT_DONE = 7,
GST_EVENT_SIZE = 8,
GST_EVENT_RATE = 9,
GST_EVENT_NAVIGATION = 10,

View file

@ -134,6 +134,7 @@ gst_pipeline_init (GTypeInstance * instance, gpointer g_class)
gst_bus_set_sync_handler (bus,
(GstBusSyncHandler) pipeline_bus_handler, pipeline);
pipeline->eosed = NULL;
pipeline->delay = 0;
/* we are our own manager */
GST_ELEMENT_MANAGER (pipeline) = pipeline;
gst_element_set_bus (GST_ELEMENT (pipeline), bus);
@ -288,13 +289,17 @@ gst_pipeline_change_state (GstElement * element)
}
case GST_STATE_PAUSED_TO_PLAYING:
if (element->clock) {
/* we set time slightly ahead because of context switches */
pipeline->start_time = gst_clock_get_time (element->clock); // + 10*GST_MSECOND;
element->base_time = pipeline->start_time - pipeline->stream_time;
GstClockTime start_time = gst_clock_get_time (element->clock);
element->base_time = start_time -
pipeline->stream_time + pipeline->delay;
GST_DEBUG ("stream_time=%" GST_TIME_FORMAT ", start_time=%"
GST_TIME_FORMAT, GST_TIME_ARGS (pipeline->stream_time),
GST_TIME_ARGS (start_time));
} else {
element->base_time = 0;
GST_DEBUG ("no clock, using base time of 0");
}
GST_DEBUG ("stream_time=%" GST_TIME_FORMAT ", start_time=%"
GST_TIME_FORMAT, GST_TIME_ARGS (pipeline->stream_time),
GST_TIME_ARGS (pipeline->start_time));
break;
case GST_STATE_PLAYING_TO_PAUSED:
case GST_STATE_PAUSED_TO_READY:
@ -315,9 +320,8 @@ gst_pipeline_change_state (GstElement * element)
pipeline->stream_time = gst_clock_get_time (element->clock) -
element->base_time;
}
GST_DEBUG ("stream_time=%" GST_TIME_FORMAT ", start_time=%"
GST_TIME_FORMAT, GST_TIME_ARGS (pipeline->stream_time),
GST_TIME_ARGS (pipeline->start_time));
GST_DEBUG ("stream_time=%" GST_TIME_FORMAT,
GST_TIME_ARGS (pipeline->stream_time));
break;
case GST_STATE_PAUSED_TO_READY:
break;

View file

@ -50,8 +50,8 @@ struct _GstPipeline {
/*< public >*/ /* with LOCK */
GstClock *fixed_clock; /* fixed clock if any */
GstClockTime start_time;
GstClockTime stream_time;
GstClockTime delay;
GList *eosed; /* list of elements that posted EOS */

View file

@ -244,6 +244,7 @@ gst_system_clock_async_thread (GstClock * clock)
goto next_entry;
case GST_CLOCK_OK:
case GST_CLOCK_EARLY:
{
/* entry timed out normally, fire the callback and move to the next
* entry */
GST_CAT_DEBUG (GST_CAT_CLOCK, "async entry %p unlocked", entry);
@ -251,7 +252,18 @@ gst_system_clock_async_thread (GstClock * clock)
entry->func (clock, entry->time, (GstClockID) entry,
entry->user_data);
}
goto next_entry;
if (entry->type == GST_CLOCK_ENTRY_PERIODIC) {
/* adjust time now */
entry->time += entry->interval;
/* and resort the list now */
clock->entries =
g_list_sort (clock->entries, gst_clock_id_compare_func);
/* and restart */
continue;
} else {
goto next_entry;
}
}
case GST_CLOCK_BUSY:
/* somebody unlocked the entry but is was not canceled, This means that
* either a new entry was added in front of the queue or some other entry
@ -313,6 +325,7 @@ gst_system_clock_id_wait_unlocked (GstClock * clock, GstClockEntry * entry)
GstClockTime real, current, target;
GstClockTimeDiff diff;
/* need to call the overridden method */
real = GST_CLOCK_GET_CLASS (clock)->get_internal_time (clock);
target = GST_CLOCK_ENTRY_TIME (entry);
@ -389,7 +402,7 @@ gst_system_clock_id_wait_async (GstClock * clock, GstClockEntry * entry)
/* only need to send the signal if the entry was added to the
* front, else the thread is just waiting for another entry and
* will discard this entry automatically. */
* will get to this entry automatically. */
if (clock->entries->data == entry) {
GST_CAT_DEBUG (GST_CAT_CLOCK, "send signal");
GST_CLOCK_SIGNAL (clock);

View file

@ -330,15 +330,6 @@ gst_dp_packet_from_event (const GstEvent * event, GstDPHeaderFlag flags,
*payload = NULL;
break;
case GST_EVENT_SEEK:
pl_length = 4 + 8 + 4;
*payload = g_malloc0 (pl_length);
GST_WRITE_UINT32_BE (*payload, (guint32) GST_EVENT_SEEK_TYPE (event));
GST_WRITE_UINT64_BE (*payload + 4,
(guint64) GST_EVENT_SEEK_OFFSET (event));
GST_WRITE_UINT32_BE (*payload + 12,
(guint32) GST_EVENT_SEEK_ACCURACY (event));
break;
case GST_EVENT_SEEK_SEGMENT:
pl_length = 4 + 8 + 8 + 4;
*payload = g_malloc0 (pl_length);
GST_WRITE_UINT32_BE (*payload, (guint32) GST_EVENT_SEEK_TYPE (event));
@ -350,7 +341,6 @@ gst_dp_packet_from_event (const GstEvent * event, GstDPHeaderFlag flags,
(guint32) GST_EVENT_SEEK_ACCURACY (event));
break;
case GST_EVENT_QOS:
case GST_EVENT_SEGMENT_DONE:
case GST_EVENT_SIZE:
case GST_EVENT_RATE:
case GST_EVENT_NAVIGATION:
@ -490,20 +480,6 @@ gst_dp_event_from_packet (guint header_length, const guint8 * header,
GST_EVENT_TIMESTAMP (event) = GST_DP_HEADER_TIMESTAMP (header);
break;
case GST_EVENT_SEEK:
{
GstSeekType type;
gint64 offset;
GstSeekAccuracy accuracy;
type = (GstSeekType) GST_READ_UINT32_BE (payload);
offset = (gint64) GST_READ_UINT64_BE (payload + 4);
accuracy = (GstSeekAccuracy) GST_READ_UINT32_BE (payload + 12);
event = gst_event_new_seek (type, offset);
GST_EVENT_TIMESTAMP (event) = GST_DP_HEADER_TIMESTAMP (header);
GST_EVENT_SEEK_ACCURACY (event) = accuracy;
break;
}
case GST_EVENT_SEEK_SEGMENT:
{
GstSeekType type;
gint64 offset, endoffset;
@ -519,7 +495,6 @@ gst_dp_event_from_packet (guint header_length, const guint8 * header,
break;
}
case GST_EVENT_QOS:
case GST_EVENT_SEGMENT_DONE:
case GST_EVENT_SIZE:
case GST_EVENT_RATE:
case GST_EVENT_NAVIGATION:

View file

@ -419,8 +419,7 @@ static const GstEventMask *
gst_fakesrc_get_event_mask (GstPad * pad)
{
static const GstEventMask masks[] = {
{GST_EVENT_SEEK, GST_SEEK_FLAG_FLUSH},
{GST_EVENT_SEEK_SEGMENT, GST_SEEK_FLAG_FLUSH | GST_SEEK_FLAG_SEGMENT_LOOP},
{GST_EVENT_SEEK, GST_SEEK_FLAG_FLUSH | GST_SEEK_FLAG_SEGMENT_LOOP},
{GST_EVENT_FLUSH, 0},
{0, 0},
};
@ -437,18 +436,12 @@ gst_fakesrc_event_handler (GstPad * pad, GstEvent * event)
switch (GST_EVENT_TYPE (event)) {
case GST_EVENT_SEEK:
src->buffer_count = GST_EVENT_SEEK_OFFSET (event);
if (!GST_EVENT_SEEK_FLAGS (event) & GST_SEEK_FLAG_FLUSH) {
break;
}
/* else we do a flush too */
case GST_EVENT_SEEK_SEGMENT:
src->segment_start = GST_EVENT_SEEK_OFFSET (event);
src->segment_end = GST_EVENT_SEEK_ENDOFFSET (event);
src->buffer_count = src->segment_start;
src->segment_loop =
GST_EVENT_SEEK_FLAGS (event) & GST_SEEK_FLAG_SEGMENT_LOOP;
src->need_flush = GST_EVENT_SEEK_FLAGS (event) & GST_SEEK_FLAG_FLUSH;
break;
case GST_EVENT_FLUSH:
src->need_flush = TRUE;
@ -795,7 +788,7 @@ gst_fakesrc_loop (GstPad * pad)
if (src->buffer_count == src->segment_end) {
if (src->segment_loop) {
gst_pad_push_event (pad, gst_event_new (GST_EVENT_SEGMENT_DONE));
//gst_pad_push_event (pad, gst_event_new (GST_EVENT_SEGMENT_DONE));
} else {
gst_pad_push_event (pad, gst_event_new (GST_EVENT_EOS));
gst_task_pause (src->task);

View file

@ -1,5 +1,5 @@
include ../Rules
tests_pass = signedness clock1 clock2 clock3
tests_pass = signedness clock1 clock2 clock3 clock4
tests_fail =
tests_ignore =

View file

@ -0,0 +1,100 @@
/*
* testsuite program to test clock behaviour
*
* creates a fakesrc ! identity ! fakesink pipeline
* registers a callback on fakesrc and one on fakesink
* also register a normal GLib timeout which should not be reached
*/
#include <gst/gst.h>
static GstClock *clock = NULL;
void
gst_clock_debug (GstClock * clock)
{
GstClockTime time;
time = gst_clock_get_time (clock);
g_print ("Clock info: time %" G_GUINT64_FORMAT "\n", time);
}
static gboolean
ok_callback (GstClock * clock, GstClockTime time,
GstClockID id, gpointer user_data)
{
g_print ("unlocked async id %p\n", id);
return FALSE;
}
static gboolean
error_callback (GstClock * clock, GstClockTime time,
GstClockID id, gpointer user_data)
{
g_print ("unlocked unscheduled async id %p, this is wrong\n", id);
g_assert_not_reached ();
return FALSE;
}
int
main (int argc, char *argv[])
{
GstClockID id, id2;
GstClockTime base;
GstClockReturn result;
gst_init (&argc, &argv);
clock = gst_system_clock_obtain ();
g_assert (clock != NULL);
gst_clock_debug (clock);
base = gst_clock_get_time (clock);
/* signal every half a second */
id = gst_clock_new_periodic_id (clock, base + GST_SECOND, GST_SECOND / 2);
g_assert (id);
g_print ("waiting one second\n");
result = gst_clock_id_wait (id, NULL);
gst_clock_debug (clock);
g_assert (result == GST_CLOCK_OK);
g_print ("waiting for the next\n");
result = gst_clock_id_wait (id, NULL);
gst_clock_debug (clock);
g_assert (result == GST_CLOCK_OK);
g_print ("waiting for the next async %p\n", id);
result = gst_clock_id_wait_async (id, ok_callback, NULL);
g_assert (result == GST_CLOCK_OK);
g_usleep (2 * G_USEC_PER_SEC);
g_print ("waiting some more for the next async %p\n", id);
result = gst_clock_id_wait_async (id, ok_callback, NULL);
g_assert (result == GST_CLOCK_OK);
g_usleep (2 * G_USEC_PER_SEC);
id2 = gst_clock_new_periodic_id (clock, base + GST_SECOND, GST_SECOND / 2);
g_assert (id2);
g_print ("waiting some more for another async %p\n", id2);
result = gst_clock_id_wait_async (id2, ok_callback, NULL);
g_assert (result == GST_CLOCK_OK);
g_usleep (2 * G_USEC_PER_SEC);
g_print ("unschedule %p\n", id);
gst_clock_id_unschedule (id);
/* entry cannot be used again */
result = gst_clock_id_wait_async (id, error_callback, NULL);
g_assert (result == GST_CLOCK_UNSCHEDULED);
result = gst_clock_id_wait (id, NULL);
g_assert (result == GST_CLOCK_UNSCHEDULED);
g_usleep (2 * G_USEC_PER_SEC);
/* success */
return 0;
}

View file

@ -1,5 +1,5 @@
include ../Rules
tests_pass = signedness clock1 clock2 clock3
tests_pass = signedness clock1 clock2 clock3 clock4
tests_fail =
tests_ignore =

100
testsuite/clock/clock4.c Normal file
View file

@ -0,0 +1,100 @@
/*
* testsuite program to test clock behaviour
*
* creates a fakesrc ! identity ! fakesink pipeline
* registers a callback on fakesrc and one on fakesink
* also register a normal GLib timeout which should not be reached
*/
#include <gst/gst.h>
static GstClock *clock = NULL;
void
gst_clock_debug (GstClock * clock)
{
GstClockTime time;
time = gst_clock_get_time (clock);
g_print ("Clock info: time %" G_GUINT64_FORMAT "\n", time);
}
static gboolean
ok_callback (GstClock * clock, GstClockTime time,
GstClockID id, gpointer user_data)
{
g_print ("unlocked async id %p\n", id);
return FALSE;
}
static gboolean
error_callback (GstClock * clock, GstClockTime time,
GstClockID id, gpointer user_data)
{
g_print ("unlocked unscheduled async id %p, this is wrong\n", id);
g_assert_not_reached ();
return FALSE;
}
int
main (int argc, char *argv[])
{
GstClockID id, id2;
GstClockTime base;
GstClockReturn result;
gst_init (&argc, &argv);
clock = gst_system_clock_obtain ();
g_assert (clock != NULL);
gst_clock_debug (clock);
base = gst_clock_get_time (clock);
/* signal every half a second */
id = gst_clock_new_periodic_id (clock, base + GST_SECOND, GST_SECOND / 2);
g_assert (id);
g_print ("waiting one second\n");
result = gst_clock_id_wait (id, NULL);
gst_clock_debug (clock);
g_assert (result == GST_CLOCK_OK);
g_print ("waiting for the next\n");
result = gst_clock_id_wait (id, NULL);
gst_clock_debug (clock);
g_assert (result == GST_CLOCK_OK);
g_print ("waiting for the next async %p\n", id);
result = gst_clock_id_wait_async (id, ok_callback, NULL);
g_assert (result == GST_CLOCK_OK);
g_usleep (2 * G_USEC_PER_SEC);
g_print ("waiting some more for the next async %p\n", id);
result = gst_clock_id_wait_async (id, ok_callback, NULL);
g_assert (result == GST_CLOCK_OK);
g_usleep (2 * G_USEC_PER_SEC);
id2 = gst_clock_new_periodic_id (clock, base + GST_SECOND, GST_SECOND / 2);
g_assert (id2);
g_print ("waiting some more for another async %p\n", id2);
result = gst_clock_id_wait_async (id2, ok_callback, NULL);
g_assert (result == GST_CLOCK_OK);
g_usleep (2 * G_USEC_PER_SEC);
g_print ("unschedule %p\n", id);
gst_clock_id_unschedule (id);
/* entry cannot be used again */
result = gst_clock_id_wait_async (id, error_callback, NULL);
g_assert (result == GST_CLOCK_UNSCHEDULED);
result = gst_clock_id_wait (id, NULL);
g_assert (result == GST_CLOCK_UNSCHEDULED);
g_usleep (2 * G_USEC_PER_SEC);
/* success */
return 0;
}