tests: fakesink: make notify stress test work better on Windows

Set up all ten pipelines and preroll them first, and only set
them to playing to run wild after they're all set up. If we set
them to PLAYING directly and let those threads run wild, then
it might take ages (many seconds) for the other pipelines to
even get up and running, especially on machines with only one
or two cores, and operating systems that suck at scheduling.

Now the fakesink test takes 19 secs instead of 71 secs on a
single-cpu windows machine.
This commit is contained in:
Tim-Philipp Müller 2018-01-11 19:52:41 +00:00
parent a69dc71f79
commit c33e1224a7

View file

@ -953,12 +953,13 @@ test_notify_race_setup_pipeline (NotifyRacePipeline * p)
gst_bin_add (GST_BIN (p->pipe), p->sink); gst_bin_add (GST_BIN (p->pipe), p->sink);
gst_element_link_many (p->src, p->queue, p->sink, NULL); gst_element_link_many (p->src, p->queue, p->sink, NULL);
GST_DEBUG ("Setting pipeline to PLAYING"); GST_DEBUG ("Setting pipeline to PAUSED..");
fail_unless_equals_int (gst_element_set_state (p->pipe, GST_STATE_PLAYING), fail_unless_equals_int (gst_element_set_state (p->pipe, GST_STATE_PAUSED),
GST_STATE_CHANGE_ASYNC); GST_STATE_CHANGE_ASYNC);
GST_DEBUG ("Getting state"); GST_DEBUG ("Waiting for pipeline to preroll..");
fail_unless_equals_int (gst_element_get_state (p->pipe, NULL, NULL, -1), fail_unless_equals_int (gst_element_get_state (p->pipe, NULL, NULL, -1),
GST_STATE_CHANGE_SUCCESS); GST_STATE_CHANGE_SUCCESS);
GST_DEBUG ("Ready to party!");
} }
static void static void
@ -977,10 +978,15 @@ GST_START_TEST (test_notify_race)
int i; int i;
for (i = 0; i < G_N_ELEMENTS (pipelines); ++i) { for (i = 0; i < G_N_ELEMENTS (pipelines); ++i) {
GST_DEBUG ("Starting up pipeline %d", i); GST_DEBUG ("Setting up pipeline %d", i);
test_notify_race_setup_pipeline (&pipelines[i]); test_notify_race_setup_pipeline (&pipelines[i]);
} }
for (i = 0; i < G_N_ELEMENTS (pipelines); ++i) {
GST_DEBUG ("Starting pipeline %d", i);
gst_element_set_state (pipelines[i].pipe, GST_STATE_PLAYING);
}
g_usleep (2 * G_USEC_PER_SEC); g_usleep (2 * G_USEC_PER_SEC);
for (i = 0; i < G_N_ELEMENTS (pipelines); ++i) { for (i = 0; i < G_N_ELEMENTS (pipelines); ++i) {