2012-03-07 09:18:49 +00:00
|
|
|
/* GStreamer
|
|
|
|
* Copyright (C) 2008 David Schleef <ds@schleef.org>
|
|
|
|
* Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
|
|
|
|
* Copyright (C) 2011 Nokia Corporation. All rights reserved.
|
|
|
|
* Contact: Stefan Kost <stefan.kost@nokia.com>
|
|
|
|
* Copyright (C) 2012 Collabora Ltd.
|
|
|
|
* Author : Edward Hervey <edward@collabora.com>
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Library General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Library General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Library General Public
|
|
|
|
* License along with this library; if not, write to the
|
2012-11-03 23:05:09 +00:00
|
|
|
* Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
|
|
|
|
* Boston, MA 02110-1301, USA.
|
2012-03-07 09:18:49 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* SECTION:gstvideodecoder
|
2017-01-23 19:36:11 +00:00
|
|
|
* @title: GstVideoDecoder
|
2012-03-07 09:18:49 +00:00
|
|
|
* @short_description: Base class for video decoders
|
|
|
|
*
|
|
|
|
* This base class is for video decoders turning encoded data into raw video
|
|
|
|
* frames.
|
|
|
|
*
|
2015-08-10 11:17:09 +00:00
|
|
|
* The GstVideoDecoder base class and derived subclasses should cooperate as
|
|
|
|
* follows:
|
2017-01-23 19:36:11 +00:00
|
|
|
*
|
|
|
|
* ## Configuration
|
|
|
|
*
|
|
|
|
* * Initially, GstVideoDecoder calls @start when the decoder element
|
2012-06-19 13:16:12 +00:00
|
|
|
* is activated, which allows the subclass to perform any global setup.
|
2017-01-23 19:36:11 +00:00
|
|
|
*
|
|
|
|
* * GstVideoDecoder calls @set_format to inform the subclass of caps
|
2012-03-07 09:18:49 +00:00
|
|
|
* describing input video data that it is about to receive, including
|
|
|
|
* possibly configuration data.
|
|
|
|
* While unlikely, it might be called more than once, if changing input
|
|
|
|
* parameters require reconfiguration.
|
2017-01-23 19:36:11 +00:00
|
|
|
*
|
|
|
|
* * Incoming data buffers are processed as needed, described in Data
|
2015-08-10 11:17:09 +00:00
|
|
|
* Processing below.
|
2017-01-23 19:36:11 +00:00
|
|
|
*
|
|
|
|
* * GstVideoDecoder calls @stop at end of all processing.
|
|
|
|
*
|
|
|
|
* ## Data processing
|
|
|
|
*
|
2020-03-21 13:03:44 +00:00
|
|
|
* * The base class gathers input data, and optionally allows subclass
|
|
|
|
* to parse this into subsequently manageable chunks, typically
|
|
|
|
* corresponding to and referred to as 'frames'.
|
|
|
|
*
|
|
|
|
* * Each input frame is provided in turn to the subclass' @handle_frame
|
|
|
|
* callback.
|
|
|
|
* The ownership of the frame is given to the @handle_frame callback.
|
|
|
|
*
|
|
|
|
* * If codec processing results in decoded data, the subclass should call
|
|
|
|
* @gst_video_decoder_finish_frame to have decoded data pushed.
|
|
|
|
* downstream. Otherwise, the subclass must call
|
|
|
|
* @gst_video_decoder_drop_frame, to allow the base class to do timestamp
|
|
|
|
* and offset tracking, and possibly to requeue the frame for a later
|
|
|
|
* attempt in the case of reverse playback.
|
2017-01-23 19:36:11 +00:00
|
|
|
*
|
|
|
|
* ## Shutdown phase
|
|
|
|
*
|
|
|
|
* * The GstVideoDecoder class calls @stop to inform the subclass that data
|
2012-03-07 09:18:49 +00:00
|
|
|
* parsing will be stopped.
|
2017-01-23 19:36:11 +00:00
|
|
|
*
|
|
|
|
* ## Additional Notes
|
|
|
|
*
|
|
|
|
* * Seeking/Flushing
|
|
|
|
*
|
|
|
|
* * When the pipeline is seeked or otherwise flushed, the subclass is
|
|
|
|
* informed via a call to its @reset callback, with the hard parameter
|
|
|
|
* set to true. This indicates the subclass should drop any internal data
|
|
|
|
* queues and timestamps and prepare for a fresh set of buffers to arrive
|
|
|
|
* for parsing and decoding.
|
|
|
|
*
|
|
|
|
* * End Of Stream
|
|
|
|
*
|
|
|
|
* * At end-of-stream, the subclass @parse function may be called some final
|
|
|
|
* times with the at_eos parameter set to true, indicating that the element
|
|
|
|
* should not expect any more data to be arriving, and it should parse and
|
|
|
|
* remaining frames and call gst_video_decoder_have_frame() if possible.
|
2012-03-07 09:18:49 +00:00
|
|
|
*
|
2012-06-19 13:16:12 +00:00
|
|
|
* The subclass is responsible for providing pad template caps for
|
2012-03-07 09:18:49 +00:00
|
|
|
* source and sink pads. The pads need to be named "sink" and "src". It also
|
2019-06-15 01:41:29 +00:00
|
|
|
* needs to provide information about the output caps, when they are known.
|
2012-06-19 13:16:12 +00:00
|
|
|
* This may be when the base class calls the subclass' @set_format function,
|
|
|
|
* though it might be during decoding, before calling
|
|
|
|
* @gst_video_decoder_finish_frame. This is done via
|
|
|
|
* @gst_video_decoder_set_output_state
|
2012-03-07 09:18:49 +00:00
|
|
|
*
|
2012-06-19 13:16:12 +00:00
|
|
|
* The subclass is also responsible for providing (presentation) timestamps
|
2012-03-07 09:18:49 +00:00
|
|
|
* (likely based on corresponding input ones). If that is not applicable
|
2012-06-19 13:16:12 +00:00
|
|
|
* or possible, the base class provides limited framerate based interpolation.
|
2012-03-07 09:18:49 +00:00
|
|
|
*
|
2012-06-19 13:16:12 +00:00
|
|
|
* Similarly, the base class provides some limited (legacy) seeking support
|
|
|
|
* if specifically requested by the subclass, as full-fledged support
|
2012-03-07 09:18:49 +00:00
|
|
|
* should rather be left to upstream demuxer, parser or alike. This simple
|
|
|
|
* approach caters for seeking and duration reporting using estimated input
|
2012-06-19 13:16:12 +00:00
|
|
|
* bitrates. To enable it, a subclass should call
|
2015-08-10 11:17:09 +00:00
|
|
|
* @gst_video_decoder_set_estimate_rate to enable handling of incoming
|
|
|
|
* byte-streams.
|
2012-03-07 09:18:49 +00:00
|
|
|
*
|
2012-06-19 13:16:12 +00:00
|
|
|
* The base class provides some support for reverse playback, in particular
|
2012-03-07 09:18:49 +00:00
|
|
|
* in case incoming data is not packetized or upstream does not provide
|
2015-08-10 11:17:09 +00:00
|
|
|
* fragments on keyframe boundaries. However, the subclass should then be
|
|
|
|
* prepared for the parsing and frame processing stage to occur separately
|
|
|
|
* (in normal forward processing, the latter immediately follows the former),
|
|
|
|
* The subclass also needs to ensure the parsing stage properly marks
|
|
|
|
* keyframes, unless it knows the upstream elements will do so properly for
|
|
|
|
* incoming data.
|
2012-03-07 09:18:49 +00:00
|
|
|
*
|
2012-06-19 13:16:12 +00:00
|
|
|
* The bare minimum that a functional subclass needs to implement is:
|
2017-01-23 19:36:11 +00:00
|
|
|
*
|
|
|
|
* * Provide pad templates
|
|
|
|
* * Inform the base class of output caps via
|
2015-08-10 11:17:09 +00:00
|
|
|
* @gst_video_decoder_set_output_state
|
2017-01-23 19:36:11 +00:00
|
|
|
*
|
|
|
|
* * Parse input data, if it is not considered packetized from upstream
|
2015-08-10 11:17:09 +00:00
|
|
|
* Data will be provided to @parse which should invoke
|
|
|
|
* @gst_video_decoder_add_to_frame and @gst_video_decoder_have_frame to
|
|
|
|
* separate the data belonging to each video frame.
|
2017-01-23 19:36:11 +00:00
|
|
|
*
|
|
|
|
* * Accept data in @handle_frame and provide decoded results to
|
2012-06-19 13:16:12 +00:00
|
|
|
* @gst_video_decoder_finish_frame, or call @gst_video_decoder_drop_frame.
|
2012-03-07 09:18:49 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef HAVE_CONFIG_H
|
|
|
|
#include "config.h"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* TODO
|
|
|
|
*
|
|
|
|
* * Add a flag/boolean for I-frame-only/image decoders so we can do extra
|
|
|
|
* features, like applying QoS on input (as opposed to after the frame is
|
|
|
|
* decoded).
|
|
|
|
* * Add a flag/boolean for decoders that require keyframes, so the base
|
|
|
|
* class can automatically discard non-keyframes before one has arrived
|
2012-04-24 17:35:24 +00:00
|
|
|
* * Detect reordered frame/timestamps and fix the pts/dts
|
2012-03-07 09:18:49 +00:00
|
|
|
* * Support for GstIndex (or shall we not care ?)
|
|
|
|
* * Calculate actual latency based on input/output timestamp/frame_number
|
|
|
|
* and if it exceeds the recorded one, save it and emit a GST_MESSAGE_LATENCY
|
|
|
|
* * Emit latency message when it changes
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2012-06-19 13:16:12 +00:00
|
|
|
/* Implementation notes:
|
|
|
|
* The Video Decoder base class operates in 2 primary processing modes, depending
|
|
|
|
* on whether forward or reverse playback is requested.
|
|
|
|
*
|
|
|
|
* Forward playback:
|
2015-08-10 11:17:09 +00:00
|
|
|
* * Incoming buffer -> @parse() -> add_to_frame()/have_frame() ->
|
|
|
|
* handle_frame() -> push downstream
|
2012-06-19 13:16:12 +00:00
|
|
|
*
|
2015-08-10 11:17:09 +00:00
|
|
|
* Reverse playback is more complicated, since it involves gathering incoming
|
|
|
|
* data regions as we loop backwards through the upstream data. The processing
|
|
|
|
* concept (using incoming buffers as containing one frame each to simplify
|
|
|
|
* things) is:
|
2012-06-19 13:16:12 +00:00
|
|
|
*
|
|
|
|
* Upstream data we want to play:
|
|
|
|
* Buffer encoded order: 1 2 3 4 5 6 7 8 9 EOS
|
2015-08-10 11:17:09 +00:00
|
|
|
* Keyframe flag: K K
|
2012-06-19 13:16:12 +00:00
|
|
|
* Groupings: AAAAAAA BBBBBBB CCCCCCC
|
|
|
|
*
|
|
|
|
* Input:
|
|
|
|
* Buffer reception order: 7 8 9 4 5 6 1 2 3 EOS
|
|
|
|
* Keyframe flag: K K
|
|
|
|
* Discont flag: D D D
|
|
|
|
*
|
|
|
|
* - Each Discont marks a discont in the decoding order.
|
|
|
|
* - The keyframes mark where we can start decoding.
|
|
|
|
*
|
|
|
|
* Initially, we prepend incoming buffers to the gather queue. Whenever the
|
|
|
|
* discont flag is set on an incoming buffer, the gather queue is flushed out
|
|
|
|
* before the new buffer is collected.
|
|
|
|
*
|
|
|
|
* The above data will be accumulated in the gather queue like this:
|
|
|
|
*
|
|
|
|
* gather queue: 9 8 7
|
|
|
|
* D
|
|
|
|
*
|
2015-04-09 17:09:17 +00:00
|
|
|
* When buffer 4 is received (with a DISCONT), we flush the gather queue like
|
2012-06-19 13:16:12 +00:00
|
|
|
* this:
|
|
|
|
*
|
|
|
|
* while (gather)
|
2015-08-10 11:17:09 +00:00
|
|
|
* take head of queue and prepend to parse queue (this reverses the
|
|
|
|
* sequence, so parse queue is 7 -> 8 -> 9)
|
2012-06-19 13:16:12 +00:00
|
|
|
*
|
2015-08-10 11:17:09 +00:00
|
|
|
* Next, we process the parse queue, which now contains all un-parsed packets
|
|
|
|
* (including any leftover ones from the previous decode section)
|
2012-06-19 13:16:12 +00:00
|
|
|
*
|
|
|
|
* for each buffer now in the parse queue:
|
|
|
|
* Call the subclass parse function, prepending each resulting frame to
|
|
|
|
* the parse_gather queue. Buffers which precede the first one that
|
2015-08-10 11:17:09 +00:00
|
|
|
* produces a parsed frame are retained in the parse queue for
|
|
|
|
* re-processing on the next cycle of parsing.
|
2012-06-19 13:16:12 +00:00
|
|
|
*
|
2015-08-10 11:17:09 +00:00
|
|
|
* The parse_gather queue now contains frame objects ready for decoding,
|
|
|
|
* in reverse order.
|
2012-06-19 13:16:12 +00:00
|
|
|
* parse_gather: 9 -> 8 -> 7
|
|
|
|
*
|
|
|
|
* while (parse_gather)
|
|
|
|
* Take the head of the queue and prepend it to the decode queue
|
|
|
|
* If the frame was a keyframe, process the decode queue
|
|
|
|
* decode is now 7-8-9
|
|
|
|
*
|
|
|
|
* Processing the decode queue results in frames with attached output buffers
|
|
|
|
* stored in the 'output_queue' ready for outputting in reverse order.
|
|
|
|
*
|
2015-08-10 11:17:09 +00:00
|
|
|
* After we flushed the gather queue and parsed it, we add 4 to the (now empty)
|
|
|
|
* gather queue. We get the following situation:
|
2012-06-19 13:16:12 +00:00
|
|
|
*
|
|
|
|
* gather queue: 4
|
|
|
|
* decode queue: 7 8 9
|
|
|
|
*
|
|
|
|
* After we received 5 (Keyframe) and 6:
|
|
|
|
*
|
|
|
|
* gather queue: 6 5 4
|
|
|
|
* decode queue: 7 8 9
|
|
|
|
*
|
|
|
|
* When we receive 1 (DISCONT) which triggers a flush of the gather queue:
|
|
|
|
*
|
|
|
|
* Copy head of the gather queue (6) to decode queue:
|
|
|
|
*
|
|
|
|
* gather queue: 5 4
|
|
|
|
* decode queue: 6 7 8 9
|
|
|
|
*
|
|
|
|
* Copy head of the gather queue (5) to decode queue. This is a keyframe so we
|
|
|
|
* can start decoding.
|
|
|
|
*
|
|
|
|
* gather queue: 4
|
|
|
|
* decode queue: 5 6 7 8 9
|
|
|
|
*
|
|
|
|
* Decode frames in decode queue, store raw decoded data in output queue, we
|
|
|
|
* can take the head of the decode queue and prepend the decoded result in the
|
|
|
|
* output queue:
|
|
|
|
*
|
|
|
|
* gather queue: 4
|
2015-08-10 11:17:09 +00:00
|
|
|
* decode queue:
|
2012-06-19 13:16:12 +00:00
|
|
|
* output queue: 9 8 7 6 5
|
|
|
|
*
|
|
|
|
* Now output all the frames in the output queue, picking a frame from the
|
|
|
|
* head of the queue.
|
|
|
|
*
|
|
|
|
* Copy head of the gather queue (4) to decode queue, we flushed the gather
|
|
|
|
* queue and can now store input buffer in the gather queue:
|
|
|
|
*
|
|
|
|
* gather queue: 1
|
|
|
|
* decode queue: 4
|
|
|
|
*
|
|
|
|
* When we receive EOS, the queue looks like:
|
|
|
|
*
|
|
|
|
* gather queue: 3 2 1
|
|
|
|
* decode queue: 4
|
|
|
|
*
|
|
|
|
* Fill decode queue, first keyframe we copy is 2:
|
|
|
|
*
|
|
|
|
* gather queue: 1
|
|
|
|
* decode queue: 2 3 4
|
|
|
|
*
|
|
|
|
* Decoded output:
|
|
|
|
*
|
|
|
|
* gather queue: 1
|
2015-08-10 11:17:09 +00:00
|
|
|
* decode queue:
|
2012-06-19 13:16:12 +00:00
|
|
|
* output queue: 4 3 2
|
|
|
|
*
|
|
|
|
* Leftover buffer 1 cannot be decoded and must be discarded.
|
|
|
|
*/
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
#include "gstvideodecoder.h"
|
|
|
|
#include "gstvideoutils.h"
|
2014-12-08 19:33:33 +00:00
|
|
|
#include "gstvideoutilsprivate.h"
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-12-12 17:13:10 +00:00
|
|
|
#include <gst/video/video.h>
|
2012-09-03 06:19:09 +00:00
|
|
|
#include <gst/video/video-event.h>
|
2012-04-24 18:04:48 +00:00
|
|
|
#include <gst/video/gstvideopool.h>
|
|
|
|
#include <gst/video/gstvideometa.h>
|
2012-03-07 09:18:49 +00:00
|
|
|
#include <string.h>
|
|
|
|
|
|
|
|
GST_DEBUG_CATEGORY (videodecoder_debug);
|
|
|
|
#define GST_CAT_DEFAULT videodecoder_debug
|
|
|
|
|
2018-10-02 16:04:14 +00:00
|
|
|
/* properties */
|
|
|
|
#define DEFAULT_QOS TRUE
|
|
|
|
|
|
|
|
enum
|
|
|
|
{
|
|
|
|
PROP_0,
|
|
|
|
PROP_QOS,
|
|
|
|
};
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
struct _GstVideoDecoderPrivate
|
|
|
|
{
|
|
|
|
/* FIXME introduce a context ? */
|
|
|
|
|
2012-04-24 18:04:48 +00:00
|
|
|
GstBufferPool *pool;
|
2012-04-26 16:11:08 +00:00
|
|
|
GstAllocator *allocator;
|
|
|
|
GstAllocationParams params;
|
2012-04-24 18:04:48 +00:00
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
/* parse tracking */
|
|
|
|
/* input data */
|
|
|
|
GstAdapter *input_adapter;
|
|
|
|
/* assembles current frame */
|
|
|
|
GstAdapter *output_adapter;
|
|
|
|
|
|
|
|
/* Whether we attempt to convert newsegment from bytes to
|
|
|
|
* time using a bitrate estimation */
|
|
|
|
gboolean do_estimate_rate;
|
|
|
|
|
|
|
|
/* Whether input is considered packetized or not */
|
|
|
|
gboolean packetized;
|
|
|
|
|
|
|
|
/* Error handling */
|
|
|
|
gint max_errors;
|
|
|
|
gint error_count;
|
2013-10-29 17:40:23 +00:00
|
|
|
gboolean had_output_data;
|
|
|
|
gboolean had_input_data;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2013-12-05 10:34:36 +00:00
|
|
|
gboolean needs_format;
|
2015-02-22 20:13:35 +00:00
|
|
|
/* input_segment are output_segment identical */
|
|
|
|
gboolean in_out_segment_sync;
|
2012-07-26 12:28:26 +00:00
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
/* ... being tracked here;
|
|
|
|
* only available during parsing */
|
|
|
|
GstVideoCodecFrame *current_frame;
|
|
|
|
/* events that should apply to the current frame */
|
|
|
|
GList *current_frame_events;
|
2013-05-09 13:34:10 +00:00
|
|
|
/* events that should be pushed before the next frame */
|
|
|
|
GList *pending_events;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
/* relative offset of input data */
|
|
|
|
guint64 input_offset;
|
|
|
|
/* relative offset of frame */
|
|
|
|
guint64 frame_offset;
|
|
|
|
/* tracking ts and offsets */
|
|
|
|
GList *timestamps;
|
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
/* last outgoing ts */
|
2012-06-19 14:36:38 +00:00
|
|
|
GstClockTime last_timestamp_out;
|
2012-09-27 09:31:34 +00:00
|
|
|
/* incoming pts - dts */
|
|
|
|
GstClockTime pts_delta;
|
2012-10-10 13:04:07 +00:00
|
|
|
gboolean reordered_output;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
/* reverse playback */
|
|
|
|
/* collect input */
|
|
|
|
GList *gather;
|
|
|
|
/* to-be-parsed */
|
|
|
|
GList *parse;
|
|
|
|
/* collected parsed frames */
|
|
|
|
GList *parse_gather;
|
|
|
|
/* frames to be handled == decoded */
|
|
|
|
GList *decode;
|
2012-06-19 13:46:44 +00:00
|
|
|
/* collected output - of buffer objects, not frames */
|
|
|
|
GList *output_queued;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-06-19 14:36:38 +00:00
|
|
|
|
|
|
|
/* base_picture_number is the picture number of the reference picture */
|
2012-03-07 09:18:49 +00:00
|
|
|
guint64 base_picture_number;
|
2012-06-19 14:36:38 +00:00
|
|
|
/* combine with base_picture_number, framerate and calcs to yield (presentation) ts */
|
|
|
|
GstClockTime base_timestamp;
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
/* FIXME : reorder_depth is never set */
|
|
|
|
int reorder_depth;
|
|
|
|
int distance_from_sync;
|
|
|
|
|
2012-07-26 16:36:53 +00:00
|
|
|
guint32 system_frame_number;
|
|
|
|
guint32 decode_frame_number;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
GList *frames; /* Protected with OBJECT_LOCK */
|
|
|
|
GstVideoCodecState *input_state;
|
2012-09-29 00:07:43 +00:00
|
|
|
GstVideoCodecState *output_state; /* OBJECT_LOCK and STREAM_LOCK */
|
2012-03-07 09:18:49 +00:00
|
|
|
gboolean output_state_changed;
|
|
|
|
|
|
|
|
/* QoS properties */
|
2018-10-02 16:04:14 +00:00
|
|
|
gboolean do_qos;
|
2012-09-24 09:16:09 +00:00
|
|
|
gdouble proportion; /* OBJECT_LOCK */
|
|
|
|
GstClockTime earliest_time; /* OBJECT_LOCK */
|
|
|
|
GstClockTime qos_frame_duration; /* OBJECT_LOCK */
|
2012-03-07 09:18:49 +00:00
|
|
|
gboolean discont;
|
|
|
|
/* qos messages: frames dropped/processed */
|
|
|
|
guint dropped;
|
|
|
|
guint processed;
|
|
|
|
|
|
|
|
/* Outgoing byte size ? */
|
|
|
|
gint64 bytes_out;
|
|
|
|
gint64 time;
|
|
|
|
|
|
|
|
gint64 min_latency;
|
|
|
|
gint64 max_latency;
|
2012-08-09 14:02:42 +00:00
|
|
|
|
2015-08-16 16:55:22 +00:00
|
|
|
/* upstream stream tags (global tags are passed through as-is) */
|
|
|
|
GstTagList *upstream_tags;
|
|
|
|
|
|
|
|
/* subclass tags */
|
2012-08-09 14:02:42 +00:00
|
|
|
GstTagList *tags;
|
2015-08-16 16:55:22 +00:00
|
|
|
GstTagMergeMode tags_merge_mode;
|
|
|
|
|
2012-08-09 14:02:42 +00:00
|
|
|
gboolean tags_changed;
|
2015-08-15 10:20:25 +00:00
|
|
|
|
|
|
|
/* flags */
|
|
|
|
gboolean use_default_pad_acceptcaps;
|
2017-03-16 02:56:10 +00:00
|
|
|
|
|
|
|
#ifndef GST_DISABLE_DEBUG
|
|
|
|
/* Diagnostic time for reporting the time
|
|
|
|
* from flush to first output */
|
|
|
|
GstClockTime last_reset_time;
|
|
|
|
#endif
|
2012-03-07 09:18:49 +00:00
|
|
|
};
|
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
static GstElementClass *parent_class = NULL;
|
2018-06-23 19:33:16 +00:00
|
|
|
static gint private_offset = 0;
|
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
static void gst_video_decoder_class_init (GstVideoDecoderClass * klass);
|
|
|
|
static void gst_video_decoder_init (GstVideoDecoder * dec,
|
|
|
|
GstVideoDecoderClass * klass);
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
static void gst_video_decoder_finalize (GObject * object);
|
2018-10-02 16:04:14 +00:00
|
|
|
static void gst_video_decoder_get_property (GObject * object, guint property_id,
|
|
|
|
GValue * value, GParamSpec * pspec);
|
|
|
|
static void gst_video_decoder_set_property (GObject * object, guint property_id,
|
|
|
|
const GValue * value, GParamSpec * pspec);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
static gboolean gst_video_decoder_setcaps (GstVideoDecoder * dec,
|
|
|
|
GstCaps * caps);
|
|
|
|
static gboolean gst_video_decoder_sink_event (GstPad * pad, GstObject * parent,
|
|
|
|
GstEvent * event);
|
|
|
|
static gboolean gst_video_decoder_src_event (GstPad * pad, GstObject * parent,
|
|
|
|
GstEvent * event);
|
|
|
|
static GstFlowReturn gst_video_decoder_chain (GstPad * pad, GstObject * parent,
|
|
|
|
GstBuffer * buf);
|
|
|
|
static gboolean gst_video_decoder_sink_query (GstPad * pad, GstObject * parent,
|
|
|
|
GstQuery * query);
|
|
|
|
static GstStateChangeReturn gst_video_decoder_change_state (GstElement *
|
|
|
|
element, GstStateChange transition);
|
|
|
|
static gboolean gst_video_decoder_src_query (GstPad * pad, GstObject * parent,
|
|
|
|
GstQuery * query);
|
2013-08-15 12:15:05 +00:00
|
|
|
static void gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full,
|
|
|
|
gboolean flush_hard);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-06-19 13:28:08 +00:00
|
|
|
static GstFlowReturn gst_video_decoder_decode_frame (GstVideoDecoder * decoder,
|
|
|
|
GstVideoCodecFrame * frame);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2015-08-10 02:23:05 +00:00
|
|
|
static void gst_video_decoder_push_event_list (GstVideoDecoder * decoder,
|
|
|
|
GList * events);
|
2012-06-19 14:36:38 +00:00
|
|
|
static GstClockTime gst_video_decoder_get_frame_duration (GstVideoDecoder *
|
|
|
|
decoder, GstVideoCodecFrame * frame);
|
2012-03-07 09:18:49 +00:00
|
|
|
static GstVideoCodecFrame *gst_video_decoder_new_frame (GstVideoDecoder *
|
|
|
|
decoder);
|
2012-06-19 14:08:57 +00:00
|
|
|
static GstFlowReturn gst_video_decoder_clip_and_push_buf (GstVideoDecoder *
|
|
|
|
decoder, GstBuffer * buf);
|
2012-06-19 14:46:05 +00:00
|
|
|
static GstFlowReturn gst_video_decoder_flush_parse (GstVideoDecoder * dec,
|
|
|
|
gboolean at_eos);
|
2012-06-19 14:22:25 +00:00
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
static void gst_video_decoder_clear_queues (GstVideoDecoder * dec);
|
|
|
|
|
2012-04-24 17:51:30 +00:00
|
|
|
static gboolean gst_video_decoder_sink_event_default (GstVideoDecoder * decoder,
|
|
|
|
GstEvent * event);
|
|
|
|
static gboolean gst_video_decoder_src_event_default (GstVideoDecoder * decoder,
|
|
|
|
GstEvent * event);
|
2012-04-25 10:37:39 +00:00
|
|
|
static gboolean gst_video_decoder_decide_allocation_default (GstVideoDecoder *
|
|
|
|
decoder, GstQuery * query);
|
2012-06-15 14:06:12 +00:00
|
|
|
static gboolean gst_video_decoder_propose_allocation_default (GstVideoDecoder *
|
|
|
|
decoder, GstQuery * query);
|
2012-08-09 12:35:22 +00:00
|
|
|
static gboolean gst_video_decoder_negotiate_default (GstVideoDecoder * decoder);
|
2013-01-02 11:15:25 +00:00
|
|
|
static GstFlowReturn gst_video_decoder_parse_available (GstVideoDecoder * dec,
|
2013-03-31 16:29:07 +00:00
|
|
|
gboolean at_eos, gboolean new_buffer);
|
2013-12-05 14:31:25 +00:00
|
|
|
static gboolean gst_video_decoder_negotiate_unlocked (GstVideoDecoder *
|
|
|
|
decoder);
|
2015-08-15 11:14:00 +00:00
|
|
|
static gboolean gst_video_decoder_sink_query_default (GstVideoDecoder * decoder,
|
|
|
|
GstQuery * query);
|
2013-11-27 21:39:52 +00:00
|
|
|
static gboolean gst_video_decoder_src_query_default (GstVideoDecoder * decoder,
|
|
|
|
GstQuery * query);
|
2012-04-24 17:51:30 +00:00
|
|
|
|
2015-06-29 13:58:38 +00:00
|
|
|
static gboolean gst_video_decoder_transform_meta_default (GstVideoDecoder *
|
|
|
|
decoder, GstVideoCodecFrame * frame, GstMeta * meta);
|
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
/* we can't use G_DEFINE_ABSTRACT_TYPE because we need the klass in the _init
|
|
|
|
* method to get to the padtemplates */
|
|
|
|
GType
|
|
|
|
gst_video_decoder_get_type (void)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
2012-04-24 17:35:24 +00:00
|
|
|
static volatile gsize type = 0;
|
|
|
|
|
|
|
|
if (g_once_init_enter (&type)) {
|
|
|
|
GType _type;
|
|
|
|
static const GTypeInfo info = {
|
|
|
|
sizeof (GstVideoDecoderClass),
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
(GClassInitFunc) gst_video_decoder_class_init,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
sizeof (GstVideoDecoder),
|
|
|
|
0,
|
|
|
|
(GInstanceInitFunc) gst_video_decoder_init,
|
|
|
|
};
|
|
|
|
|
|
|
|
_type = g_type_register_static (GST_TYPE_ELEMENT,
|
|
|
|
"GstVideoDecoder", &info, G_TYPE_FLAG_ABSTRACT);
|
2018-06-23 19:33:16 +00:00
|
|
|
|
|
|
|
private_offset =
|
|
|
|
g_type_add_instance_private (_type, sizeof (GstVideoDecoderPrivate));
|
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
g_once_init_leave (&type, _type);
|
|
|
|
}
|
|
|
|
return type;
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
2018-06-23 19:33:16 +00:00
|
|
|
static inline GstVideoDecoderPrivate *
|
|
|
|
gst_video_decoder_get_instance_private (GstVideoDecoder * self)
|
|
|
|
{
|
|
|
|
return (G_STRUCT_MEMBER_P (self, private_offset));
|
|
|
|
}
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
static void
|
|
|
|
gst_video_decoder_class_init (GstVideoDecoderClass * klass)
|
|
|
|
{
|
|
|
|
GObjectClass *gobject_class;
|
|
|
|
GstElementClass *gstelement_class;
|
|
|
|
|
|
|
|
gobject_class = G_OBJECT_CLASS (klass);
|
|
|
|
gstelement_class = GST_ELEMENT_CLASS (klass);
|
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
GST_DEBUG_CATEGORY_INIT (videodecoder_debug, "videodecoder", 0,
|
|
|
|
"Base Video Decoder");
|
|
|
|
|
|
|
|
parent_class = g_type_class_peek_parent (klass);
|
2018-06-23 19:33:16 +00:00
|
|
|
|
|
|
|
if (private_offset != 0)
|
|
|
|
g_type_class_adjust_private_offset (klass, &private_offset);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
gobject_class->finalize = gst_video_decoder_finalize;
|
2018-10-02 16:04:14 +00:00
|
|
|
gobject_class->get_property = gst_video_decoder_get_property;
|
|
|
|
gobject_class->set_property = gst_video_decoder_set_property;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
gstelement_class->change_state =
|
|
|
|
GST_DEBUG_FUNCPTR (gst_video_decoder_change_state);
|
2012-04-24 17:51:30 +00:00
|
|
|
|
|
|
|
klass->sink_event = gst_video_decoder_sink_event_default;
|
|
|
|
klass->src_event = gst_video_decoder_src_event_default;
|
2012-04-25 10:37:39 +00:00
|
|
|
klass->decide_allocation = gst_video_decoder_decide_allocation_default;
|
2012-06-15 14:06:12 +00:00
|
|
|
klass->propose_allocation = gst_video_decoder_propose_allocation_default;
|
2012-08-09 12:35:22 +00:00
|
|
|
klass->negotiate = gst_video_decoder_negotiate_default;
|
2013-11-27 21:39:52 +00:00
|
|
|
klass->sink_query = gst_video_decoder_sink_query_default;
|
|
|
|
klass->src_query = gst_video_decoder_src_query_default;
|
2015-06-29 13:58:38 +00:00
|
|
|
klass->transform_meta = gst_video_decoder_transform_meta_default;
|
2018-10-02 16:04:14 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* GstVideoDecoder:qos:
|
|
|
|
*
|
|
|
|
* If set to %TRUE the decoder will handle QoS events received
|
|
|
|
* from downstream elements.
|
|
|
|
* This includes dropping output frames which are detected as late
|
|
|
|
* using the metrics reported by those events.
|
|
|
|
*
|
|
|
|
* Since: 1.18
|
|
|
|
*/
|
|
|
|
g_object_class_install_property (gobject_class, PROP_QOS,
|
|
|
|
g_param_spec_boolean ("qos", "Quality of Service",
|
|
|
|
"Handle Quality-of-Service events from downstream",
|
|
|
|
DEFAULT_QOS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gst_video_decoder_init (GstVideoDecoder * decoder, GstVideoDecoderClass * klass)
|
|
|
|
{
|
|
|
|
GstPadTemplate *pad_template;
|
|
|
|
GstPad *pad;
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (decoder, "gst_video_decoder_init");
|
|
|
|
|
2018-06-23 19:33:16 +00:00
|
|
|
decoder->priv = gst_video_decoder_get_instance_private (decoder);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
pad_template =
|
|
|
|
gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink");
|
|
|
|
g_return_if_fail (pad_template != NULL);
|
|
|
|
|
|
|
|
decoder->sinkpad = pad = gst_pad_new_from_template (pad_template, "sink");
|
|
|
|
|
|
|
|
gst_pad_set_chain_function (pad, GST_DEBUG_FUNCPTR (gst_video_decoder_chain));
|
|
|
|
gst_pad_set_event_function (pad,
|
|
|
|
GST_DEBUG_FUNCPTR (gst_video_decoder_sink_event));
|
|
|
|
gst_pad_set_query_function (pad,
|
|
|
|
GST_DEBUG_FUNCPTR (gst_video_decoder_sink_query));
|
|
|
|
gst_element_add_pad (GST_ELEMENT (decoder), decoder->sinkpad);
|
|
|
|
|
|
|
|
pad_template =
|
|
|
|
gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
|
|
|
|
g_return_if_fail (pad_template != NULL);
|
|
|
|
|
|
|
|
decoder->srcpad = pad = gst_pad_new_from_template (pad_template, "src");
|
|
|
|
|
|
|
|
gst_pad_set_event_function (pad,
|
|
|
|
GST_DEBUG_FUNCPTR (gst_video_decoder_src_event));
|
|
|
|
gst_pad_set_query_function (pad,
|
|
|
|
GST_DEBUG_FUNCPTR (gst_video_decoder_src_query));
|
|
|
|
gst_element_add_pad (GST_ELEMENT (decoder), decoder->srcpad);
|
|
|
|
|
|
|
|
gst_segment_init (&decoder->input_segment, GST_FORMAT_TIME);
|
|
|
|
gst_segment_init (&decoder->output_segment, GST_FORMAT_TIME);
|
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
g_rec_mutex_init (&decoder->stream_lock);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
decoder->priv->input_adapter = gst_adapter_new ();
|
|
|
|
decoder->priv->output_adapter = gst_adapter_new ();
|
|
|
|
decoder->priv->packetized = TRUE;
|
2013-12-05 10:34:36 +00:00
|
|
|
decoder->priv->needs_format = FALSE;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2018-10-02 16:04:14 +00:00
|
|
|
/* properties */
|
|
|
|
decoder->priv->do_qos = DEFAULT_QOS;
|
|
|
|
|
2015-02-03 11:23:06 +00:00
|
|
|
decoder->priv->min_latency = 0;
|
2015-02-11 12:43:11 +00:00
|
|
|
decoder->priv->max_latency = 0;
|
2015-02-03 11:23:06 +00:00
|
|
|
|
2013-08-15 12:15:05 +00:00
|
|
|
gst_video_decoder_reset (decoder, TRUE, TRUE);
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static GstVideoCodecState *
|
|
|
|
_new_input_state (GstCaps * caps)
|
|
|
|
{
|
|
|
|
GstVideoCodecState *state;
|
|
|
|
GstStructure *structure;
|
|
|
|
const GValue *codec_data;
|
|
|
|
|
|
|
|
state = g_slice_new0 (GstVideoCodecState);
|
|
|
|
state->ref_count = 1;
|
|
|
|
gst_video_info_init (&state->info);
|
|
|
|
if (G_UNLIKELY (!gst_video_info_from_caps (&state->info, caps)))
|
|
|
|
goto parse_fail;
|
|
|
|
state->caps = gst_caps_ref (caps);
|
|
|
|
|
|
|
|
structure = gst_caps_get_structure (caps, 0);
|
|
|
|
|
|
|
|
codec_data = gst_structure_get_value (structure, "codec_data");
|
|
|
|
if (codec_data && G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER)
|
2012-04-24 17:35:24 +00:00
|
|
|
state->codec_data = GST_BUFFER (g_value_dup_boxed (codec_data));
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
return state;
|
|
|
|
|
|
|
|
parse_fail:
|
|
|
|
{
|
|
|
|
g_slice_free (GstVideoCodecState, state);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static GstVideoCodecState *
|
2018-07-06 13:48:35 +00:00
|
|
|
_new_output_state (GstVideoFormat fmt, GstVideoInterlaceMode mode, guint width,
|
|
|
|
guint height, GstVideoCodecState * reference)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
|
|
|
GstVideoCodecState *state;
|
|
|
|
|
|
|
|
state = g_slice_new0 (GstVideoCodecState);
|
|
|
|
state->ref_count = 1;
|
|
|
|
gst_video_info_init (&state->info);
|
2018-07-06 13:48:35 +00:00
|
|
|
if (!gst_video_info_set_interlaced_format (&state->info, fmt, mode, width,
|
|
|
|
height)) {
|
2016-11-23 18:10:34 +00:00
|
|
|
g_slice_free (GstVideoCodecState, state);
|
|
|
|
return NULL;
|
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
if (reference) {
|
|
|
|
GstVideoInfo *tgt, *ref;
|
|
|
|
|
|
|
|
tgt = &state->info;
|
|
|
|
ref = &reference->info;
|
|
|
|
|
|
|
|
/* Copy over extra fields from reference state */
|
|
|
|
tgt->interlace_mode = ref->interlace_mode;
|
|
|
|
tgt->flags = ref->flags;
|
2012-06-04 16:17:42 +00:00
|
|
|
/* only copy values that are not unknown so that we don't override the
|
|
|
|
* defaults. subclasses should really fill these in when they know. */
|
2014-01-13 15:29:00 +00:00
|
|
|
if (ref->chroma_site)
|
|
|
|
tgt->chroma_site = ref->chroma_site;
|
2012-06-04 16:17:42 +00:00
|
|
|
if (ref->colorimetry.range)
|
|
|
|
tgt->colorimetry.range = ref->colorimetry.range;
|
|
|
|
if (ref->colorimetry.matrix)
|
|
|
|
tgt->colorimetry.matrix = ref->colorimetry.matrix;
|
|
|
|
if (ref->colorimetry.transfer)
|
|
|
|
tgt->colorimetry.transfer = ref->colorimetry.transfer;
|
|
|
|
if (ref->colorimetry.primaries)
|
|
|
|
tgt->colorimetry.primaries = ref->colorimetry.primaries;
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_DEBUG ("reference par %d/%d fps %d/%d",
|
|
|
|
ref->par_n, ref->par_d, ref->fps_n, ref->fps_d);
|
|
|
|
tgt->par_n = ref->par_n;
|
|
|
|
tgt->par_d = ref->par_d;
|
|
|
|
tgt->fps_n = ref->fps_n;
|
|
|
|
tgt->fps_d = ref->fps_d;
|
2015-06-12 15:35:52 +00:00
|
|
|
tgt->views = ref->views;
|
2016-09-29 11:48:29 +00:00
|
|
|
|
|
|
|
GST_VIDEO_INFO_FIELD_ORDER (tgt) = GST_VIDEO_INFO_FIELD_ORDER (ref);
|
|
|
|
|
2015-06-12 15:35:52 +00:00
|
|
|
if (GST_VIDEO_INFO_MULTIVIEW_MODE (ref) != GST_VIDEO_MULTIVIEW_MODE_NONE) {
|
|
|
|
GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_INFO_MULTIVIEW_MODE (ref);
|
|
|
|
GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) =
|
|
|
|
GST_VIDEO_INFO_MULTIVIEW_FLAGS (ref);
|
2017-03-24 00:43:06 +00:00
|
|
|
} else {
|
|
|
|
/* Default to MONO, overridden as needed by sub-classes */
|
|
|
|
GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_MULTIVIEW_MODE_MONO;
|
|
|
|
GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) = GST_VIDEO_MULTIVIEW_FLAGS_NONE;
|
2015-06-12 15:35:52 +00:00
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
GST_DEBUG ("reference par %d/%d fps %d/%d",
|
|
|
|
state->info.par_n, state->info.par_d,
|
|
|
|
state->info.fps_n, state->info.fps_d);
|
|
|
|
|
|
|
|
return state;
|
|
|
|
}
|
|
|
|
|
|
|
|
static gboolean
|
2012-04-24 17:35:24 +00:00
|
|
|
gst_video_decoder_setcaps (GstVideoDecoder * decoder, GstCaps * caps)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
|
|
|
GstVideoDecoderClass *decoder_class;
|
|
|
|
GstVideoCodecState *state;
|
|
|
|
gboolean ret = TRUE;
|
|
|
|
|
|
|
|
decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (decoder, "setcaps %" GST_PTR_FORMAT, caps);
|
|
|
|
|
2013-04-10 18:07:00 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
|
|
|
|
|
|
|
if (decoder->priv->input_state) {
|
|
|
|
GST_DEBUG_OBJECT (decoder,
|
|
|
|
"Checking if caps changed old %" GST_PTR_FORMAT " new %" GST_PTR_FORMAT,
|
|
|
|
decoder->priv->input_state->caps, caps);
|
|
|
|
if (gst_caps_is_equal (decoder->priv->input_state->caps, caps))
|
|
|
|
goto caps_not_changed;
|
|
|
|
}
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
state = _new_input_state (caps);
|
|
|
|
|
|
|
|
if (G_UNLIKELY (state == NULL))
|
|
|
|
goto parse_fail;
|
|
|
|
|
|
|
|
if (decoder_class->set_format)
|
|
|
|
ret = decoder_class->set_format (decoder, state);
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
goto refused_format;
|
|
|
|
|
|
|
|
if (decoder->priv->input_state)
|
|
|
|
gst_video_codec_state_unref (decoder->priv->input_state);
|
|
|
|
decoder->priv->input_state = state;
|
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
2013-04-10 18:07:00 +00:00
|
|
|
caps_not_changed:
|
|
|
|
{
|
|
|
|
GST_DEBUG_OBJECT (decoder, "Caps did not change - ignore");
|
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
return TRUE;
|
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2013-04-10 18:07:00 +00:00
|
|
|
/* ERRORS */
|
2012-03-07 09:18:49 +00:00
|
|
|
parse_fail:
|
|
|
|
{
|
|
|
|
GST_WARNING_OBJECT (decoder, "Failed to parse caps");
|
2013-04-10 18:07:00 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
2012-03-07 09:18:49 +00:00
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
refused_format:
|
|
|
|
{
|
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
GST_WARNING_OBJECT (decoder, "Subclass refused caps");
|
|
|
|
gst_video_codec_state_unref (state);
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gst_video_decoder_finalize (GObject * object)
|
|
|
|
{
|
|
|
|
GstVideoDecoder *decoder;
|
|
|
|
|
|
|
|
decoder = GST_VIDEO_DECODER (object);
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (object, "finalize");
|
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
g_rec_mutex_clear (&decoder->stream_lock);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
if (decoder->priv->input_adapter) {
|
|
|
|
g_object_unref (decoder->priv->input_adapter);
|
|
|
|
decoder->priv->input_adapter = NULL;
|
|
|
|
}
|
|
|
|
if (decoder->priv->output_adapter) {
|
|
|
|
g_object_unref (decoder->priv->output_adapter);
|
|
|
|
decoder->priv->output_adapter = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (decoder->priv->input_state)
|
|
|
|
gst_video_codec_state_unref (decoder->priv->input_state);
|
|
|
|
if (decoder->priv->output_state)
|
|
|
|
gst_video_codec_state_unref (decoder->priv->output_state);
|
|
|
|
|
2012-04-24 18:04:48 +00:00
|
|
|
if (decoder->priv->pool) {
|
2012-04-26 16:11:08 +00:00
|
|
|
gst_object_unref (decoder->priv->pool);
|
|
|
|
decoder->priv->pool = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (decoder->priv->allocator) {
|
2012-07-09 14:26:17 +00:00
|
|
|
gst_object_unref (decoder->priv->allocator);
|
2012-07-04 07:14:27 +00:00
|
|
|
decoder->priv->allocator = NULL;
|
2012-04-24 18:04:48 +00:00
|
|
|
}
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
G_OBJECT_CLASS (parent_class)->finalize (object);
|
|
|
|
}
|
|
|
|
|
2018-10-02 16:04:14 +00:00
|
|
|
static void
|
|
|
|
gst_video_decoder_get_property (GObject * object, guint property_id,
|
|
|
|
GValue * value, GParamSpec * pspec)
|
|
|
|
{
|
|
|
|
GstVideoDecoderPrivate *priv = GST_VIDEO_DECODER (object)->priv;
|
|
|
|
|
|
|
|
switch (property_id) {
|
|
|
|
case PROP_QOS:
|
|
|
|
g_value_set_boolean (value, priv->do_qos);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gst_video_decoder_set_property (GObject * object, guint property_id,
|
|
|
|
const GValue * value, GParamSpec * pspec)
|
|
|
|
{
|
|
|
|
GstVideoDecoderPrivate *priv = GST_VIDEO_DECODER (object)->priv;
|
|
|
|
|
|
|
|
switch (property_id) {
|
|
|
|
case PROP_QOS:
|
|
|
|
priv->do_qos = g_value_get_boolean (value);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
/* hard == FLUSH, otherwise discont */
|
|
|
|
static GstFlowReturn
|
|
|
|
gst_video_decoder_flush (GstVideoDecoder * dec, gboolean hard)
|
|
|
|
{
|
2013-08-14 14:55:55 +00:00
|
|
|
GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (dec);
|
2012-03-07 09:18:49 +00:00
|
|
|
GstFlowReturn ret = GST_FLOW_OK;
|
|
|
|
|
|
|
|
GST_LOG_OBJECT (dec, "flush hard %d", hard);
|
|
|
|
|
2013-08-14 14:55:55 +00:00
|
|
|
/* Inform subclass */
|
|
|
|
if (klass->reset) {
|
|
|
|
GST_FIXME_OBJECT (dec, "GstVideoDecoder::reset() is deprecated");
|
|
|
|
klass->reset (dec, hard);
|
|
|
|
}
|
|
|
|
|
2013-08-15 12:15:05 +00:00
|
|
|
if (klass->flush)
|
2013-08-15 10:44:56 +00:00
|
|
|
klass->flush (dec);
|
2013-07-25 08:20:01 +00:00
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
/* and get (re)set for the sequel */
|
2013-08-15 12:15:05 +00:00
|
|
|
gst_video_decoder_reset (dec, FALSE, hard);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-08-16 16:55:22 +00:00
|
|
|
static GstEvent *
|
|
|
|
gst_video_decoder_create_merged_tags_event (GstVideoDecoder * dec)
|
|
|
|
{
|
|
|
|
GstTagList *merged_tags;
|
|
|
|
|
|
|
|
GST_LOG_OBJECT (dec, "upstream : %" GST_PTR_FORMAT, dec->priv->upstream_tags);
|
|
|
|
GST_LOG_OBJECT (dec, "decoder : %" GST_PTR_FORMAT, dec->priv->tags);
|
|
|
|
GST_LOG_OBJECT (dec, "mode : %d", dec->priv->tags_merge_mode);
|
|
|
|
|
|
|
|
merged_tags =
|
|
|
|
gst_tag_list_merge (dec->priv->upstream_tags, dec->priv->tags,
|
|
|
|
dec->priv->tags_merge_mode);
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (dec, "merged : %" GST_PTR_FORMAT, merged_tags);
|
|
|
|
|
|
|
|
if (merged_tags == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (gst_tag_list_is_empty (merged_tags)) {
|
|
|
|
gst_tag_list_unref (merged_tags);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return gst_event_new_tag (merged_tags);
|
|
|
|
}
|
|
|
|
|
2012-04-24 17:51:30 +00:00
|
|
|
static gboolean
|
|
|
|
gst_video_decoder_push_event (GstVideoDecoder * decoder, GstEvent * event)
|
|
|
|
{
|
|
|
|
switch (GST_EVENT_TYPE (event)) {
|
|
|
|
case GST_EVENT_SEGMENT:
|
|
|
|
{
|
|
|
|
GstSegment segment;
|
|
|
|
|
|
|
|
gst_event_copy_segment (event, &segment);
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (decoder, "segment %" GST_SEGMENT_FORMAT, &segment);
|
|
|
|
|
|
|
|
if (segment.format != GST_FORMAT_TIME) {
|
|
|
|
GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-08-08 10:01:20 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
2012-04-24 17:51:30 +00:00
|
|
|
decoder->output_segment = segment;
|
2015-02-22 20:13:35 +00:00
|
|
|
decoder->priv->in_out_segment_sync =
|
2015-03-19 10:48:15 +00:00
|
|
|
gst_segment_is_equal (&decoder->input_segment, &segment);
|
2014-08-08 10:01:20 +00:00
|
|
|
decoder->priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
|
2018-03-01 04:12:39 +00:00
|
|
|
decoder->priv->earliest_time = GST_CLOCK_TIME_NONE;
|
2012-04-24 17:51:30 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-03-29 09:23:05 +00:00
|
|
|
GST_DEBUG_OBJECT (decoder, "pushing event %s",
|
|
|
|
gst_event_type_get_name (GST_EVENT_TYPE (event)));
|
|
|
|
|
2012-04-24 17:51:30 +00:00
|
|
|
return gst_pad_push_event (decoder->srcpad, event);
|
|
|
|
}
|
|
|
|
|
2013-01-02 11:15:25 +00:00
|
|
|
static GstFlowReturn
|
2013-03-31 16:29:07 +00:00
|
|
|
gst_video_decoder_parse_available (GstVideoDecoder * dec, gboolean at_eos,
|
|
|
|
gboolean new_buffer)
|
2013-01-02 11:15:25 +00:00
|
|
|
{
|
|
|
|
GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
|
|
|
|
GstVideoDecoderPrivate *priv = dec->priv;
|
|
|
|
GstFlowReturn ret = GST_FLOW_OK;
|
2014-06-20 16:02:31 +00:00
|
|
|
gsize was_available, available;
|
|
|
|
guint inactive = 0;
|
2013-01-02 11:15:25 +00:00
|
|
|
|
|
|
|
available = gst_adapter_available (priv->input_adapter);
|
|
|
|
|
2014-06-20 16:02:31 +00:00
|
|
|
while (available || new_buffer) {
|
2013-03-31 16:29:07 +00:00
|
|
|
new_buffer = FALSE;
|
2013-01-02 11:15:25 +00:00
|
|
|
/* current frame may have been parsed and handled,
|
|
|
|
* so we need to set up a new one when asking subclass to parse */
|
|
|
|
if (priv->current_frame == NULL)
|
|
|
|
priv->current_frame = gst_video_decoder_new_frame (dec);
|
|
|
|
|
2014-06-20 16:02:31 +00:00
|
|
|
was_available = available;
|
2013-01-02 11:15:25 +00:00
|
|
|
ret = decoder_class->parse (dec, priv->current_frame,
|
|
|
|
priv->input_adapter, at_eos);
|
2014-06-20 16:02:31 +00:00
|
|
|
if (ret != GST_FLOW_OK)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* if the subclass returned success (GST_FLOW_OK), it is expected
|
|
|
|
* to have collected and submitted a frame, i.e. it should have
|
|
|
|
* called gst_video_decoder_have_frame(), or at least consumed a
|
|
|
|
* few bytes through gst_video_decoder_add_to_frame().
|
|
|
|
*
|
|
|
|
* Otherwise, this is an implementation bug, and we error out
|
|
|
|
* after 2 failed attempts */
|
2013-01-02 11:15:25 +00:00
|
|
|
available = gst_adapter_available (priv->input_adapter);
|
2014-06-20 16:02:31 +00:00
|
|
|
if (!priv->current_frame || available != was_available)
|
|
|
|
inactive = 0;
|
|
|
|
else if (++inactive == 2)
|
|
|
|
goto error_inactive;
|
2013-01-02 11:15:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2014-06-20 16:02:31 +00:00
|
|
|
|
|
|
|
/* ERRORS */
|
|
|
|
error_inactive:
|
|
|
|
{
|
|
|
|
GST_ERROR_OBJECT (dec, "Failed to consume data. Error in subclass?");
|
|
|
|
return GST_FLOW_ERROR;
|
|
|
|
}
|
2013-01-02 11:15:25 +00:00
|
|
|
}
|
|
|
|
|
2016-06-30 14:36:27 +00:00
|
|
|
/* This function has to be called with the stream lock taken. */
|
2012-06-19 14:46:05 +00:00
|
|
|
static GstFlowReturn
|
2012-09-03 06:19:09 +00:00
|
|
|
gst_video_decoder_drain_out (GstVideoDecoder * dec, gboolean at_eos)
|
2012-06-19 14:46:05 +00:00
|
|
|
{
|
|
|
|
GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
|
|
|
|
GstVideoDecoderPrivate *priv = dec->priv;
|
|
|
|
GstFlowReturn ret = GST_FLOW_OK;
|
|
|
|
|
|
|
|
if (dec->input_segment.rate > 0.0) {
|
|
|
|
/* Forward mode, if unpacketized, give the child class
|
|
|
|
* a final chance to flush out packets */
|
|
|
|
if (!priv->packetized) {
|
2013-03-31 16:29:07 +00:00
|
|
|
ret = gst_video_decoder_parse_available (dec, TRUE, FALSE);
|
2012-06-19 14:46:05 +00:00
|
|
|
}
|
2014-03-27 19:15:01 +00:00
|
|
|
|
|
|
|
if (at_eos) {
|
|
|
|
if (decoder_class->finish)
|
|
|
|
ret = decoder_class->finish (dec);
|
2015-02-07 18:19:25 +00:00
|
|
|
} else {
|
|
|
|
if (decoder_class->drain) {
|
|
|
|
ret = decoder_class->drain (dec);
|
|
|
|
} else {
|
|
|
|
GST_FIXME_OBJECT (dec, "Sub-class should implement drain()");
|
|
|
|
}
|
2014-03-27 19:15:01 +00:00
|
|
|
}
|
2012-06-19 14:46:05 +00:00
|
|
|
} else {
|
|
|
|
/* Reverse playback mode */
|
|
|
|
ret = gst_video_decoder_flush_parse (dec, TRUE);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-10-14 21:56:55 +00:00
|
|
|
static GList *
|
|
|
|
_flush_events (GstPad * pad, GList * events)
|
|
|
|
{
|
|
|
|
GList *tmp;
|
|
|
|
|
|
|
|
for (tmp = events; tmp; tmp = tmp->next) {
|
2014-09-17 12:11:21 +00:00
|
|
|
if (GST_EVENT_TYPE (tmp->data) != GST_EVENT_EOS &&
|
|
|
|
GST_EVENT_TYPE (tmp->data) != GST_EVENT_SEGMENT &&
|
|
|
|
GST_EVENT_IS_STICKY (tmp->data)) {
|
2013-10-14 21:56:55 +00:00
|
|
|
gst_pad_store_sticky_event (pad, GST_EVENT_CAST (tmp->data));
|
|
|
|
}
|
2014-09-17 12:11:21 +00:00
|
|
|
gst_event_unref (tmp->data);
|
2013-10-14 21:56:55 +00:00
|
|
|
}
|
|
|
|
g_list_free (events);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-04-09 03:45:58 +00:00
|
|
|
/* Must be called holding the GST_VIDEO_DECODER_STREAM_LOCK */
|
|
|
|
static gboolean
|
|
|
|
gst_video_decoder_negotiate_default_caps (GstVideoDecoder * decoder)
|
|
|
|
{
|
2016-01-28 09:06:44 +00:00
|
|
|
GstCaps *caps, *templcaps;
|
2015-04-09 03:45:58 +00:00
|
|
|
GstVideoCodecState *state;
|
|
|
|
GstVideoInfo info;
|
|
|
|
gint i;
|
|
|
|
gint caps_size;
|
|
|
|
GstStructure *structure;
|
|
|
|
|
2016-01-28 09:06:44 +00:00
|
|
|
templcaps = gst_pad_get_pad_template_caps (decoder->srcpad);
|
|
|
|
caps = gst_pad_peer_query_caps (decoder->srcpad, templcaps);
|
2016-01-28 12:21:33 +00:00
|
|
|
if (caps)
|
2016-01-28 09:06:44 +00:00
|
|
|
gst_caps_unref (templcaps);
|
2016-01-28 12:21:33 +00:00
|
|
|
else
|
2016-01-28 09:06:44 +00:00
|
|
|
caps = templcaps;
|
2016-01-28 12:21:33 +00:00
|
|
|
templcaps = NULL;
|
2016-01-28 09:06:44 +00:00
|
|
|
|
2015-04-09 03:45:58 +00:00
|
|
|
if (!caps || gst_caps_is_empty (caps) || gst_caps_is_any (caps))
|
|
|
|
goto caps_error;
|
|
|
|
|
2016-01-28 09:06:44 +00:00
|
|
|
GST_LOG_OBJECT (decoder, "peer caps %" GST_PTR_FORMAT, caps);
|
|
|
|
|
2015-04-09 03:45:58 +00:00
|
|
|
/* before fixating, try to use whatever upstream provided */
|
|
|
|
caps = gst_caps_make_writable (caps);
|
|
|
|
caps_size = gst_caps_get_size (caps);
|
|
|
|
if (decoder->priv->input_state && decoder->priv->input_state->caps) {
|
|
|
|
GstCaps *sinkcaps = decoder->priv->input_state->caps;
|
|
|
|
GstStructure *structure = gst_caps_get_structure (sinkcaps, 0);
|
|
|
|
gint width, height;
|
|
|
|
|
|
|
|
if (gst_structure_get_int (structure, "width", &width)) {
|
|
|
|
for (i = 0; i < caps_size; i++) {
|
|
|
|
gst_structure_set (gst_caps_get_structure (caps, i), "width",
|
|
|
|
G_TYPE_INT, width, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gst_structure_get_int (structure, "height", &height)) {
|
|
|
|
for (i = 0; i < caps_size; i++) {
|
|
|
|
gst_structure_set (gst_caps_get_structure (caps, i), "height",
|
|
|
|
G_TYPE_INT, height, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < caps_size; i++) {
|
|
|
|
structure = gst_caps_get_structure (caps, i);
|
2019-02-04 10:48:25 +00:00
|
|
|
/* Random I420 1280x720 for fixation */
|
2016-10-13 10:41:29 +00:00
|
|
|
if (gst_structure_has_field (structure, "format"))
|
|
|
|
gst_structure_fixate_field_string (structure, "format", "I420");
|
|
|
|
else
|
|
|
|
gst_structure_set (structure, "format", G_TYPE_STRING, "I420", NULL);
|
|
|
|
|
2016-07-04 09:16:55 +00:00
|
|
|
if (gst_structure_has_field (structure, "width"))
|
|
|
|
gst_structure_fixate_field_nearest_int (structure, "width", 1280);
|
|
|
|
else
|
|
|
|
gst_structure_set (structure, "width", G_TYPE_INT, 1280, NULL);
|
|
|
|
|
|
|
|
if (gst_structure_has_field (structure, "height"))
|
|
|
|
gst_structure_fixate_field_nearest_int (structure, "height", 720);
|
|
|
|
else
|
|
|
|
gst_structure_set (structure, "height", G_TYPE_INT, 720, NULL);
|
2015-04-09 03:45:58 +00:00
|
|
|
}
|
|
|
|
caps = gst_caps_fixate (caps);
|
|
|
|
|
|
|
|
if (!caps || !gst_video_info_from_caps (&info, caps))
|
|
|
|
goto caps_error;
|
|
|
|
|
|
|
|
GST_INFO_OBJECT (decoder,
|
|
|
|
"Chose default caps %" GST_PTR_FORMAT " for initial gap", caps);
|
|
|
|
state =
|
|
|
|
gst_video_decoder_set_output_state (decoder, info.finfo->format,
|
|
|
|
info.width, info.height, decoder->priv->input_state);
|
|
|
|
gst_video_codec_state_unref (state);
|
|
|
|
gst_caps_unref (caps);
|
|
|
|
|
|
|
|
return TRUE;
|
|
|
|
|
|
|
|
caps_error:
|
|
|
|
{
|
|
|
|
if (caps)
|
|
|
|
gst_caps_unref (caps);
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
static gboolean
|
2012-04-24 17:51:30 +00:00
|
|
|
gst_video_decoder_sink_event_default (GstVideoDecoder * decoder,
|
|
|
|
GstEvent * event)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
|
|
|
GstVideoDecoderPrivate *priv;
|
2012-04-24 17:51:30 +00:00
|
|
|
gboolean ret = FALSE;
|
2012-09-03 06:19:09 +00:00
|
|
|
gboolean forward_immediate = FALSE;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
priv = decoder->priv;
|
|
|
|
|
|
|
|
switch (GST_EVENT_TYPE (event)) {
|
2013-03-30 18:13:47 +00:00
|
|
|
case GST_EVENT_STREAM_START:
|
|
|
|
{
|
|
|
|
GstFlowReturn flow_ret = GST_FLOW_OK;
|
|
|
|
|
2016-06-30 14:36:27 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
2013-03-30 18:13:47 +00:00
|
|
|
flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
|
|
|
|
ret = (flow_ret == GST_FLOW_OK);
|
|
|
|
|
2013-07-30 21:37:43 +00:00
|
|
|
GST_DEBUG_OBJECT (decoder, "received STREAM_START. Clearing taglist");
|
2015-08-16 16:55:22 +00:00
|
|
|
/* Flush upstream tags after a STREAM_START */
|
|
|
|
if (priv->upstream_tags) {
|
|
|
|
gst_tag_list_unref (priv->upstream_tags);
|
|
|
|
priv->upstream_tags = NULL;
|
|
|
|
priv->tags_changed = TRUE;
|
|
|
|
}
|
2013-07-30 21:37:43 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
|
2013-03-30 18:13:47 +00:00
|
|
|
/* Forward STREAM_START immediately. Everything is drained after
|
|
|
|
* the STREAM_START event and we can forward this event immediately
|
|
|
|
* now without having buffers out of order.
|
|
|
|
*/
|
|
|
|
forward_immediate = TRUE;
|
|
|
|
break;
|
|
|
|
}
|
2012-04-24 17:35:24 +00:00
|
|
|
case GST_EVENT_CAPS:
|
|
|
|
{
|
2014-07-11 19:51:05 +00:00
|
|
|
GstCaps *caps;
|
|
|
|
|
|
|
|
gst_event_parse_caps (event, &caps);
|
|
|
|
ret = gst_video_decoder_setcaps (decoder, caps);
|
2012-04-24 17:35:24 +00:00
|
|
|
gst_event_unref (event);
|
2012-04-24 17:51:30 +00:00
|
|
|
event = NULL;
|
2012-04-24 17:35:24 +00:00
|
|
|
break;
|
|
|
|
}
|
2015-04-07 02:20:00 +00:00
|
|
|
case GST_EVENT_SEGMENT_DONE:
|
|
|
|
{
|
|
|
|
GstFlowReturn flow_ret = GST_FLOW_OK;
|
|
|
|
|
2016-06-30 14:36:27 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
2015-04-07 02:20:00 +00:00
|
|
|
flow_ret = gst_video_decoder_drain_out (decoder, TRUE);
|
2016-06-30 14:36:27 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
2015-04-07 02:20:00 +00:00
|
|
|
ret = (flow_ret == GST_FLOW_OK);
|
|
|
|
|
|
|
|
/* Forward SEGMENT_DONE immediately. This is required
|
|
|
|
* because no buffer or serialized event might come
|
|
|
|
* after SEGMENT_DONE and nothing could trigger another
|
|
|
|
* _finish_frame() call.
|
|
|
|
*
|
|
|
|
* The subclass can override this behaviour by overriding
|
|
|
|
* the ::sink_event() vfunc and not chaining up to the
|
|
|
|
* parent class' ::sink_event() until a later time.
|
|
|
|
*/
|
|
|
|
forward_immediate = TRUE;
|
|
|
|
break;
|
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
case GST_EVENT_EOS:
|
|
|
|
{
|
|
|
|
GstFlowReturn flow_ret = GST_FLOW_OK;
|
|
|
|
|
2016-06-30 14:36:27 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
2012-09-03 06:19:09 +00:00
|
|
|
flow_ret = gst_video_decoder_drain_out (decoder, TRUE);
|
2016-06-30 14:36:27 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
2012-04-24 17:51:30 +00:00
|
|
|
ret = (flow_ret == GST_FLOW_OK);
|
2013-10-29 17:40:23 +00:00
|
|
|
|
|
|
|
/* Error out even if EOS was ok when we had input, but no output */
|
|
|
|
if (ret && priv->had_input_data && !priv->had_output_data) {
|
|
|
|
GST_ELEMENT_ERROR (decoder, STREAM, DECODE,
|
|
|
|
("No valid frames decoded before end of stream"),
|
|
|
|
("no valid frames found"));
|
|
|
|
}
|
|
|
|
|
2012-09-20 08:03:32 +00:00
|
|
|
/* Forward EOS immediately. This is required because no
|
|
|
|
* buffer or serialized event will come after EOS and
|
|
|
|
* nothing could trigger another _finish_frame() call.
|
|
|
|
*
|
|
|
|
* The subclass can override this behaviour by overriding
|
|
|
|
* the ::sink_event() vfunc and not chaining up to the
|
|
|
|
* parent class' ::sink_event() until a later time.
|
|
|
|
*/
|
2012-09-03 06:19:09 +00:00
|
|
|
forward_immediate = TRUE;
|
2012-03-07 09:18:49 +00:00
|
|
|
break;
|
|
|
|
}
|
2012-09-11 01:44:56 +00:00
|
|
|
case GST_EVENT_GAP:
|
|
|
|
{
|
|
|
|
GstFlowReturn flow_ret = GST_FLOW_OK;
|
2015-05-08 13:02:48 +00:00
|
|
|
gboolean needs_reconfigure = FALSE;
|
2015-08-10 02:23:05 +00:00
|
|
|
GList *events;
|
|
|
|
GList *frame_events;
|
2012-09-11 01:44:56 +00:00
|
|
|
|
2016-06-30 14:36:27 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
2018-07-09 18:03:04 +00:00
|
|
|
if (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)
|
|
|
|
flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
|
2012-09-11 01:44:56 +00:00
|
|
|
ret = (flow_ret == GST_FLOW_OK);
|
2012-09-20 08:03:32 +00:00
|
|
|
|
2015-04-09 03:45:58 +00:00
|
|
|
/* Ensure we have caps before forwarding the event */
|
|
|
|
if (!decoder->priv->output_state) {
|
|
|
|
if (!gst_video_decoder_negotiate_default_caps (decoder)) {
|
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
GST_ELEMENT_ERROR (decoder, STREAM, FORMAT, (NULL),
|
|
|
|
("Decoder output not negotiated before GAP event."));
|
2015-04-11 17:51:54 +00:00
|
|
|
forward_immediate = TRUE;
|
|
|
|
break;
|
2015-04-09 03:45:58 +00:00
|
|
|
}
|
2015-05-08 13:02:48 +00:00
|
|
|
needs_reconfigure = TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad)
|
|
|
|
|| needs_reconfigure;
|
|
|
|
if (decoder->priv->output_state_changed || needs_reconfigure) {
|
|
|
|
if (!gst_video_decoder_negotiate_unlocked (decoder)) {
|
|
|
|
GST_WARNING_OBJECT (decoder, "Failed to negotiate with downstream");
|
|
|
|
gst_pad_mark_reconfigure (decoder->srcpad);
|
|
|
|
}
|
2015-04-09 03:45:58 +00:00
|
|
|
}
|
2015-08-10 02:23:05 +00:00
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (decoder, "Pushing all pending serialized events"
|
|
|
|
" before the gap");
|
|
|
|
events = decoder->priv->pending_events;
|
|
|
|
frame_events = decoder->priv->current_frame_events;
|
|
|
|
decoder->priv->pending_events = NULL;
|
|
|
|
decoder->priv->current_frame_events = NULL;
|
|
|
|
|
2015-04-09 03:45:58 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
|
2015-08-10 02:23:05 +00:00
|
|
|
gst_video_decoder_push_event_list (decoder, events);
|
|
|
|
gst_video_decoder_push_event_list (decoder, frame_events);
|
|
|
|
|
2012-09-20 08:03:32 +00:00
|
|
|
/* Forward GAP immediately. Everything is drained after
|
|
|
|
* the GAP event and we can forward this event immediately
|
|
|
|
* now without having buffers out of order.
|
|
|
|
*/
|
2012-09-11 01:44:56 +00:00
|
|
|
forward_immediate = TRUE;
|
|
|
|
break;
|
|
|
|
}
|
2012-09-03 06:19:09 +00:00
|
|
|
case GST_EVENT_CUSTOM_DOWNSTREAM:
|
2012-09-03 06:19:09 +00:00
|
|
|
{
|
|
|
|
gboolean in_still;
|
|
|
|
GstFlowReturn flow_ret = GST_FLOW_OK;
|
|
|
|
|
|
|
|
if (gst_video_event_parse_still_frame (event, &in_still)) {
|
|
|
|
if (in_still) {
|
2012-09-03 06:19:09 +00:00
|
|
|
GST_DEBUG_OBJECT (decoder, "draining current data for still-frame");
|
2016-06-30 14:36:27 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
2012-09-03 06:19:09 +00:00
|
|
|
flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
|
2016-06-30 14:36:27 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
2012-09-03 06:19:09 +00:00
|
|
|
ret = (flow_ret == GST_FLOW_OK);
|
|
|
|
}
|
2012-09-20 08:03:32 +00:00
|
|
|
/* Forward STILL_FRAME immediately. Everything is drained after
|
|
|
|
* the STILL_FRAME event and we can forward this event immediately
|
|
|
|
* now without having buffers out of order.
|
|
|
|
*/
|
2012-09-03 06:19:09 +00:00
|
|
|
forward_immediate = TRUE;
|
2012-09-03 06:19:09 +00:00
|
|
|
}
|
2012-09-03 06:19:09 +00:00
|
|
|
break;
|
2012-09-03 06:19:09 +00:00
|
|
|
}
|
2012-04-24 17:35:24 +00:00
|
|
|
case GST_EVENT_SEGMENT:
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
2012-04-24 17:35:24 +00:00
|
|
|
GstSegment segment;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
gst_event_copy_segment (event, &segment);
|
|
|
|
|
|
|
|
if (segment.format == GST_FORMAT_TIME) {
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_DEBUG_OBJECT (decoder,
|
2012-04-24 17:35:24 +00:00
|
|
|
"received TIME SEGMENT %" GST_SEGMENT_FORMAT, &segment);
|
2012-03-07 09:18:49 +00:00
|
|
|
} else {
|
2012-04-24 17:35:24 +00:00
|
|
|
gint64 start;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (decoder,
|
2012-04-24 17:35:24 +00:00
|
|
|
"received SEGMENT %" GST_SEGMENT_FORMAT, &segment);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
/* handle newsegment as a result from our legacy simple seeking */
|
|
|
|
/* note that initial 0 should convert to 0 in any case */
|
|
|
|
if (priv->do_estimate_rate &&
|
2012-04-24 17:35:24 +00:00
|
|
|
gst_pad_query_convert (decoder->sinkpad, GST_FORMAT_BYTES,
|
|
|
|
segment.start, GST_FORMAT_TIME, &start)) {
|
2012-03-07 09:18:49 +00:00
|
|
|
/* best attempt convert */
|
|
|
|
/* as these are only estimates, stop is kept open-ended to avoid
|
|
|
|
* premature cutting */
|
|
|
|
GST_DEBUG_OBJECT (decoder,
|
|
|
|
"converted to TIME start %" GST_TIME_FORMAT,
|
|
|
|
GST_TIME_ARGS (start));
|
2012-04-24 17:35:24 +00:00
|
|
|
segment.start = start;
|
|
|
|
segment.stop = GST_CLOCK_TIME_NONE;
|
|
|
|
segment.time = start;
|
2012-03-07 09:18:49 +00:00
|
|
|
/* replace event */
|
|
|
|
gst_event_unref (event);
|
2012-04-24 17:35:24 +00:00
|
|
|
event = gst_event_new_segment (&segment);
|
2012-03-07 09:18:49 +00:00
|
|
|
} else {
|
|
|
|
goto newseg_wrong_format;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-08 10:01:20 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
|
|
|
|
2012-06-19 14:36:38 +00:00
|
|
|
priv->base_timestamp = GST_CLOCK_TIME_NONE;
|
|
|
|
priv->base_picture_number = 0;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
decoder->input_segment = segment;
|
2015-02-22 20:13:35 +00:00
|
|
|
decoder->priv->in_out_segment_sync = FALSE;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case GST_EVENT_FLUSH_STOP:
|
|
|
|
{
|
2013-10-14 21:56:55 +00:00
|
|
|
GList *l;
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
2013-10-14 21:56:55 +00:00
|
|
|
for (l = priv->frames; l; l = l->next) {
|
|
|
|
GstVideoCodecFrame *frame = l->data;
|
|
|
|
|
|
|
|
frame->events = _flush_events (decoder->srcpad, frame->events);
|
|
|
|
}
|
|
|
|
priv->current_frame_events = _flush_events (decoder->srcpad,
|
|
|
|
decoder->priv->current_frame_events);
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
/* well, this is kind of worse than a DISCONT */
|
|
|
|
gst_video_decoder_flush (decoder, TRUE);
|
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
2012-09-20 08:03:32 +00:00
|
|
|
/* Forward FLUSH_STOP immediately. This is required because it is
|
|
|
|
* expected to be forwarded immediately and no buffers are queued
|
|
|
|
* anyway.
|
|
|
|
*/
|
2012-09-03 06:19:09 +00:00
|
|
|
forward_immediate = TRUE;
|
2012-08-09 18:57:49 +00:00
|
|
|
break;
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
2012-08-09 14:19:32 +00:00
|
|
|
case GST_EVENT_TAG:
|
|
|
|
{
|
|
|
|
GstTagList *tags;
|
|
|
|
|
|
|
|
gst_event_parse_tag (event, &tags);
|
|
|
|
|
|
|
|
if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
|
2015-08-16 16:55:22 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
|
|
|
if (priv->upstream_tags != tags) {
|
|
|
|
if (priv->upstream_tags)
|
|
|
|
gst_tag_list_unref (priv->upstream_tags);
|
|
|
|
priv->upstream_tags = gst_tag_list_ref (tags);
|
|
|
|
GST_INFO_OBJECT (decoder, "upstream tags: %" GST_PTR_FORMAT, tags);
|
|
|
|
}
|
2012-08-09 14:19:32 +00:00
|
|
|
gst_event_unref (event);
|
2015-08-16 16:55:22 +00:00
|
|
|
event = gst_video_decoder_create_merged_tags_event (decoder);
|
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
2015-08-18 13:28:42 +00:00
|
|
|
if (!event)
|
|
|
|
ret = TRUE;
|
2012-08-09 14:19:32 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-09-20 08:03:32 +00:00
|
|
|
/* Forward non-serialized events immediately, and all other
|
|
|
|
* events which can be forwarded immediately without potentially
|
|
|
|
* causing the event to go out of order with other events and
|
|
|
|
* buffers as decided above.
|
2012-04-24 17:51:30 +00:00
|
|
|
*/
|
|
|
|
if (event) {
|
2012-09-03 06:19:09 +00:00
|
|
|
if (!GST_EVENT_IS_SERIALIZED (event) || forward_immediate) {
|
2012-04-24 17:51:30 +00:00
|
|
|
ret = gst_video_decoder_push_event (decoder, event);
|
|
|
|
} else {
|
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
|
|
|
decoder->priv->current_frame_events =
|
|
|
|
g_list_prepend (decoder->priv->current_frame_events, event);
|
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
2012-04-24 19:32:08 +00:00
|
|
|
ret = TRUE;
|
2012-04-24 17:51:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
newseg_wrong_format:
|
|
|
|
{
|
|
|
|
GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
|
|
|
|
gst_event_unref (event);
|
|
|
|
/* SWALLOW EVENT */
|
2012-04-24 19:32:08 +00:00
|
|
|
return TRUE;
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static gboolean
|
2012-04-24 17:35:24 +00:00
|
|
|
gst_video_decoder_sink_event (GstPad * pad, GstObject * parent,
|
|
|
|
GstEvent * event)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
|
|
|
GstVideoDecoder *decoder;
|
|
|
|
GstVideoDecoderClass *decoder_class;
|
|
|
|
gboolean ret = FALSE;
|
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
decoder = GST_VIDEO_DECODER (parent);
|
2012-03-07 09:18:49 +00:00
|
|
|
decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
|
|
|
|
GST_EVENT_TYPE_NAME (event));
|
|
|
|
|
|
|
|
if (decoder_class->sink_event)
|
2012-04-24 17:51:30 +00:00
|
|
|
ret = decoder_class->sink_event (decoder, event);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* perform upstream byte <-> time conversion (duration, seeking)
|
|
|
|
* if subclass allows and if enough data for moderately decent conversion */
|
|
|
|
static inline gboolean
|
|
|
|
gst_video_decoder_do_byte (GstVideoDecoder * dec)
|
|
|
|
{
|
2016-07-04 08:55:07 +00:00
|
|
|
gboolean ret;
|
|
|
|
|
|
|
|
GST_OBJECT_LOCK (dec);
|
|
|
|
ret = dec->priv->do_estimate_rate && (dec->priv->bytes_out > 0)
|
2012-03-07 09:18:49 +00:00
|
|
|
&& (dec->priv->time > GST_SECOND);
|
2016-07-04 08:55:07 +00:00
|
|
|
GST_OBJECT_UNLOCK (dec);
|
|
|
|
|
|
|
|
return ret;
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
gst_video_decoder_do_seek (GstVideoDecoder * dec, GstEvent * event)
|
|
|
|
{
|
2012-04-24 17:35:24 +00:00
|
|
|
GstFormat format;
|
2012-03-07 09:18:49 +00:00
|
|
|
GstSeekFlags flags;
|
|
|
|
GstSeekType start_type, end_type;
|
|
|
|
gdouble rate;
|
|
|
|
gint64 start, start_time, end_time;
|
|
|
|
GstSegment seek_segment;
|
|
|
|
guint32 seqnum;
|
|
|
|
|
|
|
|
gst_event_parse_seek (event, &rate, &format, &flags, &start_type,
|
|
|
|
&start_time, &end_type, &end_time);
|
|
|
|
|
|
|
|
/* we'll handle plain open-ended flushing seeks with the simple approach */
|
|
|
|
if (rate != 1.0) {
|
|
|
|
GST_DEBUG_OBJECT (dec, "unsupported seek: rate");
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (start_type != GST_SEEK_TYPE_SET) {
|
|
|
|
GST_DEBUG_OBJECT (dec, "unsupported seek: start time");
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
2014-08-28 14:06:22 +00:00
|
|
|
if ((end_type != GST_SEEK_TYPE_SET && end_type != GST_SEEK_TYPE_NONE) ||
|
2012-03-07 09:18:49 +00:00
|
|
|
(end_type == GST_SEEK_TYPE_SET && end_time != GST_CLOCK_TIME_NONE)) {
|
|
|
|
GST_DEBUG_OBJECT (dec, "unsupported seek: end time");
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(flags & GST_SEEK_FLAG_FLUSH)) {
|
|
|
|
GST_DEBUG_OBJECT (dec, "unsupported seek: not flushing");
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy (&seek_segment, &dec->output_segment, sizeof (seek_segment));
|
2012-04-24 17:35:24 +00:00
|
|
|
gst_segment_do_seek (&seek_segment, rate, format, flags, start_type,
|
2012-03-07 09:18:49 +00:00
|
|
|
start_time, end_type, end_time, NULL);
|
2012-04-24 17:35:24 +00:00
|
|
|
start_time = seek_segment.position;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
if (!gst_pad_query_convert (dec->sinkpad, GST_FORMAT_TIME, start_time,
|
2012-04-24 17:35:24 +00:00
|
|
|
GST_FORMAT_BYTES, &start)) {
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_DEBUG_OBJECT (dec, "conversion failed");
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
seqnum = gst_event_get_seqnum (event);
|
|
|
|
event = gst_event_new_seek (1.0, GST_FORMAT_BYTES, flags,
|
|
|
|
GST_SEEK_TYPE_SET, start, GST_SEEK_TYPE_NONE, -1);
|
|
|
|
gst_event_set_seqnum (event, seqnum);
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (dec, "seeking to %" GST_TIME_FORMAT " at byte offset %"
|
|
|
|
G_GINT64_FORMAT, GST_TIME_ARGS (start_time), start);
|
|
|
|
|
|
|
|
return gst_pad_push_event (dec->sinkpad, event);
|
|
|
|
}
|
|
|
|
|
|
|
|
static gboolean
|
2012-04-24 17:51:30 +00:00
|
|
|
gst_video_decoder_src_event_default (GstVideoDecoder * decoder,
|
|
|
|
GstEvent * event)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
|
|
|
GstVideoDecoderPrivate *priv;
|
|
|
|
gboolean res = FALSE;
|
|
|
|
|
|
|
|
priv = decoder->priv;
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (decoder,
|
|
|
|
"received event %d, %s", GST_EVENT_TYPE (event),
|
|
|
|
GST_EVENT_TYPE_NAME (event));
|
|
|
|
|
|
|
|
switch (GST_EVENT_TYPE (event)) {
|
|
|
|
case GST_EVENT_SEEK:
|
|
|
|
{
|
2012-04-24 17:35:24 +00:00
|
|
|
GstFormat format;
|
2012-03-07 09:18:49 +00:00
|
|
|
gdouble rate;
|
|
|
|
GstSeekFlags flags;
|
2012-07-27 13:21:51 +00:00
|
|
|
GstSeekType start_type, stop_type;
|
|
|
|
gint64 start, stop;
|
|
|
|
gint64 tstart, tstop;
|
2012-03-07 09:18:49 +00:00
|
|
|
guint32 seqnum;
|
|
|
|
|
2012-07-27 13:21:51 +00:00
|
|
|
gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start,
|
2012-03-07 09:18:49 +00:00
|
|
|
&stop_type, &stop);
|
|
|
|
seqnum = gst_event_get_seqnum (event);
|
|
|
|
|
|
|
|
/* upstream gets a chance first */
|
|
|
|
if ((res = gst_pad_push_event (decoder->sinkpad, event)))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* if upstream fails for a time seek, maybe we can help if allowed */
|
|
|
|
if (format == GST_FORMAT_TIME) {
|
|
|
|
if (gst_video_decoder_do_byte (decoder))
|
|
|
|
res = gst_video_decoder_do_seek (decoder, event);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ... though a non-time seek can be aided as well */
|
|
|
|
/* First bring the requested format to time */
|
2012-04-24 17:35:24 +00:00
|
|
|
if (!(res =
|
2012-07-27 13:21:51 +00:00
|
|
|
gst_pad_query_convert (decoder->srcpad, format, start,
|
|
|
|
GST_FORMAT_TIME, &tstart)))
|
2012-03-07 09:18:49 +00:00
|
|
|
goto convert_error;
|
2012-04-24 17:35:24 +00:00
|
|
|
if (!(res =
|
2012-04-24 17:51:30 +00:00
|
|
|
gst_pad_query_convert (decoder->srcpad, format, stop,
|
|
|
|
GST_FORMAT_TIME, &tstop)))
|
2012-03-07 09:18:49 +00:00
|
|
|
goto convert_error;
|
|
|
|
|
|
|
|
/* then seek with time on the peer */
|
|
|
|
event = gst_event_new_seek (rate, GST_FORMAT_TIME,
|
2012-07-27 13:21:51 +00:00
|
|
|
flags, start_type, tstart, stop_type, tstop);
|
2012-03-07 09:18:49 +00:00
|
|
|
gst_event_set_seqnum (event, seqnum);
|
|
|
|
|
|
|
|
res = gst_pad_push_event (decoder->sinkpad, event);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case GST_EVENT_QOS:
|
|
|
|
{
|
2012-04-24 17:35:24 +00:00
|
|
|
GstQOSType type;
|
2012-03-07 09:18:49 +00:00
|
|
|
gdouble proportion;
|
|
|
|
GstClockTimeDiff diff;
|
|
|
|
GstClockTime timestamp;
|
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
gst_event_parse_qos (event, &type, &proportion, &diff, ×tamp);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
GST_OBJECT_LOCK (decoder);
|
|
|
|
priv->proportion = proportion;
|
|
|
|
if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (timestamp))) {
|
|
|
|
if (G_UNLIKELY (diff > 0)) {
|
2012-09-24 09:16:09 +00:00
|
|
|
priv->earliest_time = timestamp + 2 * diff + priv->qos_frame_duration;
|
2012-03-07 09:18:49 +00:00
|
|
|
} else {
|
|
|
|
priv->earliest_time = timestamp + diff;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
priv->earliest_time = GST_CLOCK_TIME_NONE;
|
|
|
|
}
|
|
|
|
GST_OBJECT_UNLOCK (decoder);
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (decoder,
|
2015-11-03 13:44:39 +00:00
|
|
|
"got QoS %" GST_TIME_FORMAT ", %" GST_STIME_FORMAT ", %g",
|
|
|
|
GST_TIME_ARGS (timestamp), GST_STIME_ARGS (diff), proportion);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
res = gst_pad_push_event (decoder->sinkpad, event);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
res = gst_pad_push_event (decoder->sinkpad, event);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
done:
|
|
|
|
return res;
|
|
|
|
|
|
|
|
convert_error:
|
|
|
|
GST_DEBUG_OBJECT (decoder, "could not convert format");
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2012-04-24 17:51:30 +00:00
|
|
|
static gboolean
|
|
|
|
gst_video_decoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
|
|
|
|
{
|
|
|
|
GstVideoDecoder *decoder;
|
|
|
|
GstVideoDecoderClass *decoder_class;
|
|
|
|
gboolean ret = FALSE;
|
|
|
|
|
|
|
|
decoder = GST_VIDEO_DECODER (parent);
|
|
|
|
decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
|
|
|
|
GST_EVENT_TYPE_NAME (event));
|
|
|
|
|
|
|
|
if (decoder_class->src_event)
|
|
|
|
ret = decoder_class->src_event (decoder, event);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
static gboolean
|
2013-11-27 21:39:52 +00:00
|
|
|
gst_video_decoder_src_query_default (GstVideoDecoder * dec, GstQuery * query)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
2013-11-27 21:39:52 +00:00
|
|
|
GstPad *pad = GST_VIDEO_DECODER_SRC_PAD (dec);
|
2012-03-07 09:18:49 +00:00
|
|
|
gboolean res = TRUE;
|
|
|
|
|
|
|
|
GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
|
|
|
|
|
|
|
|
switch (GST_QUERY_TYPE (query)) {
|
|
|
|
case GST_QUERY_POSITION:
|
|
|
|
{
|
|
|
|
GstFormat format;
|
|
|
|
gint64 time, value;
|
|
|
|
|
|
|
|
/* upstream gets a chance first */
|
|
|
|
if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
|
|
|
|
GST_LOG_OBJECT (dec, "returning peer response");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-11-15 12:36:41 +00:00
|
|
|
/* Refuse BYTES format queries. If it made sense to
|
|
|
|
* answer them, upstream would have already */
|
2016-11-15 17:32:24 +00:00
|
|
|
gst_query_parse_position (query, &format, NULL);
|
|
|
|
|
2016-11-15 12:36:41 +00:00
|
|
|
if (format == GST_FORMAT_BYTES) {
|
|
|
|
GST_LOG_OBJECT (dec, "Ignoring BYTES position query");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
/* we start from the last seen time */
|
2012-06-19 14:36:38 +00:00
|
|
|
time = dec->priv->last_timestamp_out;
|
2012-03-07 09:18:49 +00:00
|
|
|
/* correct for the segment values */
|
|
|
|
time = gst_segment_to_stream_time (&dec->output_segment,
|
|
|
|
GST_FORMAT_TIME, time);
|
|
|
|
|
|
|
|
GST_LOG_OBJECT (dec,
|
|
|
|
"query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time));
|
|
|
|
|
|
|
|
/* and convert to the final format */
|
|
|
|
if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time,
|
2012-04-24 17:35:24 +00:00
|
|
|
format, &value)))
|
2012-03-07 09:18:49 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
gst_query_set_position (query, format, value);
|
|
|
|
|
|
|
|
GST_LOG_OBJECT (dec,
|
|
|
|
"query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value,
|
|
|
|
format);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case GST_QUERY_DURATION:
|
|
|
|
{
|
|
|
|
GstFormat format;
|
|
|
|
|
|
|
|
/* upstream in any case */
|
2013-11-27 21:39:52 +00:00
|
|
|
if ((res = gst_pad_query_default (pad, GST_OBJECT (dec), query)))
|
2012-03-07 09:18:49 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
gst_query_parse_duration (query, &format, NULL);
|
|
|
|
/* try answering TIME by converting from BYTE if subclass allows */
|
|
|
|
if (format == GST_FORMAT_TIME && gst_video_decoder_do_byte (dec)) {
|
|
|
|
gint64 value;
|
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
if (gst_pad_peer_query_duration (dec->sinkpad, GST_FORMAT_BYTES,
|
|
|
|
&value)) {
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value);
|
|
|
|
if (gst_pad_query_convert (dec->sinkpad,
|
2012-04-24 17:35:24 +00:00
|
|
|
GST_FORMAT_BYTES, value, GST_FORMAT_TIME, &value)) {
|
2012-03-07 09:18:49 +00:00
|
|
|
gst_query_set_duration (query, GST_FORMAT_TIME, value);
|
|
|
|
res = TRUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case GST_QUERY_CONVERT:
|
|
|
|
{
|
|
|
|
GstFormat src_fmt, dest_fmt;
|
|
|
|
gint64 src_val, dest_val;
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (dec, "convert query");
|
|
|
|
|
|
|
|
gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
|
2012-09-29 00:07:43 +00:00
|
|
|
GST_OBJECT_LOCK (dec);
|
2012-09-20 01:16:01 +00:00
|
|
|
if (dec->priv->output_state != NULL)
|
2016-07-04 08:47:36 +00:00
|
|
|
res = __gst_video_rawvideo_convert (dec->priv->output_state,
|
2012-09-20 01:16:01 +00:00
|
|
|
src_fmt, src_val, &dest_fmt, &dest_val);
|
|
|
|
else
|
|
|
|
res = FALSE;
|
2012-09-29 00:07:43 +00:00
|
|
|
GST_OBJECT_UNLOCK (dec);
|
2012-03-07 09:18:49 +00:00
|
|
|
if (!res)
|
|
|
|
goto error;
|
|
|
|
gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case GST_QUERY_LATENCY:
|
|
|
|
{
|
|
|
|
gboolean live;
|
|
|
|
GstClockTime min_latency, max_latency;
|
|
|
|
|
|
|
|
res = gst_pad_peer_query (dec->sinkpad, query);
|
|
|
|
if (res) {
|
|
|
|
gst_query_parse_latency (query, &live, &min_latency, &max_latency);
|
2012-09-20 01:16:01 +00:00
|
|
|
GST_DEBUG_OBJECT (dec, "Peer qlatency: live %d, min %"
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live,
|
|
|
|
GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
|
|
|
|
|
|
|
|
GST_OBJECT_LOCK (dec);
|
|
|
|
min_latency += dec->priv->min_latency;
|
2015-02-11 12:43:11 +00:00
|
|
|
if (max_latency == GST_CLOCK_TIME_NONE
|
|
|
|
|| dec->priv->max_latency == GST_CLOCK_TIME_NONE)
|
|
|
|
max_latency = GST_CLOCK_TIME_NONE;
|
|
|
|
else
|
2012-03-07 09:18:49 +00:00
|
|
|
max_latency += dec->priv->max_latency;
|
|
|
|
GST_OBJECT_UNLOCK (dec);
|
|
|
|
|
|
|
|
gst_query_set_latency (query, live, min_latency, max_latency);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
2013-11-27 21:39:52 +00:00
|
|
|
res = gst_pad_query_default (pad, GST_OBJECT (dec), query);
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
return res;
|
|
|
|
|
|
|
|
error:
|
|
|
|
GST_ERROR_OBJECT (dec, "query failed");
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static gboolean
|
2013-11-27 21:39:52 +00:00
|
|
|
gst_video_decoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
|
|
|
GstVideoDecoder *decoder;
|
2013-11-27 21:39:52 +00:00
|
|
|
GstVideoDecoderClass *decoder_class;
|
|
|
|
gboolean ret = FALSE;
|
|
|
|
|
|
|
|
decoder = GST_VIDEO_DECODER (parent);
|
|
|
|
decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
|
|
|
|
GST_QUERY_TYPE_NAME (query));
|
|
|
|
|
|
|
|
if (decoder_class->src_query)
|
|
|
|
ret = decoder_class->src_query (decoder, query);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-12-17 17:18:03 +00:00
|
|
|
/**
|
|
|
|
* gst_video_decoder_proxy_getcaps:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
* @caps: (allow-none): initial caps
|
|
|
|
* @filter: (allow-none): filter caps
|
|
|
|
*
|
|
|
|
* Returns caps that express @caps (or sink template caps if @caps == NULL)
|
|
|
|
* restricted to resolution/format/... combinations supported by downstream
|
|
|
|
* elements.
|
|
|
|
*
|
2015-02-19 18:51:19 +00:00
|
|
|
* Returns: (transfer full): a #GstCaps owned by caller
|
2014-12-17 17:18:03 +00:00
|
|
|
*
|
2015-02-19 18:51:19 +00:00
|
|
|
* Since: 1.6
|
2014-12-17 17:18:03 +00:00
|
|
|
*/
|
|
|
|
GstCaps *
|
|
|
|
gst_video_decoder_proxy_getcaps (GstVideoDecoder * decoder, GstCaps * caps,
|
|
|
|
GstCaps * filter)
|
|
|
|
{
|
|
|
|
return __gst_video_element_proxy_getcaps (GST_ELEMENT_CAST (decoder),
|
|
|
|
GST_VIDEO_DECODER_SINK_PAD (decoder),
|
|
|
|
GST_VIDEO_DECODER_SRC_PAD (decoder), caps, filter);
|
|
|
|
}
|
|
|
|
|
|
|
|
static GstCaps *
|
|
|
|
gst_video_decoder_sink_getcaps (GstVideoDecoder * decoder, GstCaps * filter)
|
|
|
|
{
|
|
|
|
GstVideoDecoderClass *klass;
|
|
|
|
GstCaps *caps;
|
|
|
|
|
|
|
|
klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
|
|
|
|
|
|
|
|
if (klass->getcaps)
|
|
|
|
caps = klass->getcaps (decoder, filter);
|
|
|
|
else
|
|
|
|
caps = gst_video_decoder_proxy_getcaps (decoder, NULL, filter);
|
|
|
|
|
|
|
|
GST_LOG_OBJECT (decoder, "Returning caps %" GST_PTR_FORMAT, caps);
|
|
|
|
|
|
|
|
return caps;
|
|
|
|
}
|
|
|
|
|
2015-08-15 11:14:00 +00:00
|
|
|
static gboolean
|
2013-11-27 21:39:52 +00:00
|
|
|
gst_video_decoder_sink_query_default (GstVideoDecoder * decoder,
|
|
|
|
GstQuery * query)
|
|
|
|
{
|
|
|
|
GstPad *pad = GST_VIDEO_DECODER_SINK_PAD (decoder);
|
2012-03-07 09:18:49 +00:00
|
|
|
GstVideoDecoderPrivate *priv;
|
|
|
|
gboolean res = FALSE;
|
|
|
|
|
|
|
|
priv = decoder->priv;
|
|
|
|
|
|
|
|
GST_LOG_OBJECT (decoder, "handling query: %" GST_PTR_FORMAT, query);
|
|
|
|
|
|
|
|
switch (GST_QUERY_TYPE (query)) {
|
|
|
|
case GST_QUERY_CONVERT:
|
|
|
|
{
|
|
|
|
GstFormat src_fmt, dest_fmt;
|
|
|
|
gint64 src_val, dest_val;
|
|
|
|
|
|
|
|
gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
|
2016-07-04 08:55:07 +00:00
|
|
|
GST_OBJECT_LOCK (decoder);
|
2012-03-07 09:18:49 +00:00
|
|
|
res =
|
2016-07-04 08:47:36 +00:00
|
|
|
__gst_video_encoded_video_convert (priv->bytes_out, priv->time,
|
|
|
|
src_fmt, src_val, &dest_fmt, &dest_val);
|
2016-07-04 08:55:07 +00:00
|
|
|
GST_OBJECT_UNLOCK (decoder);
|
2012-03-07 09:18:49 +00:00
|
|
|
if (!res)
|
|
|
|
goto error;
|
|
|
|
gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
|
|
|
|
break;
|
|
|
|
}
|
2012-06-15 14:06:12 +00:00
|
|
|
case GST_QUERY_ALLOCATION:{
|
|
|
|
GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
|
|
|
|
|
|
|
|
if (klass->propose_allocation)
|
|
|
|
res = klass->propose_allocation (decoder, query);
|
2014-12-08 19:33:33 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case GST_QUERY_CAPS:{
|
2014-12-17 17:18:03 +00:00
|
|
|
GstCaps *filter, *caps;
|
2014-12-08 19:33:33 +00:00
|
|
|
|
|
|
|
gst_query_parse_caps (query, &filter);
|
2014-12-17 17:18:03 +00:00
|
|
|
caps = gst_video_decoder_sink_getcaps (decoder, filter);
|
|
|
|
gst_query_set_caps_result (query, caps);
|
|
|
|
gst_caps_unref (caps);
|
2014-12-08 19:33:33 +00:00
|
|
|
res = TRUE;
|
2014-12-15 21:46:21 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case GST_QUERY_ACCEPT_CAPS:{
|
2015-08-15 10:20:25 +00:00
|
|
|
if (decoder->priv->use_default_pad_acceptcaps) {
|
|
|
|
res =
|
|
|
|
gst_pad_query_default (GST_VIDEO_DECODER_SINK_PAD (decoder),
|
|
|
|
GST_OBJECT_CAST (decoder), query);
|
|
|
|
} else {
|
|
|
|
GstCaps *caps;
|
|
|
|
GstCaps *allowed_caps;
|
|
|
|
GstCaps *template_caps;
|
|
|
|
gboolean accept;
|
2014-12-15 21:46:21 +00:00
|
|
|
|
2015-08-15 10:20:25 +00:00
|
|
|
gst_query_parse_accept_caps (query, &caps);
|
2014-12-15 21:46:21 +00:00
|
|
|
|
2015-08-15 10:20:25 +00:00
|
|
|
template_caps = gst_pad_get_pad_template_caps (pad);
|
|
|
|
accept = gst_caps_is_subset (caps, template_caps);
|
|
|
|
gst_caps_unref (template_caps);
|
2014-12-15 21:46:21 +00:00
|
|
|
|
2015-08-15 10:20:25 +00:00
|
|
|
if (accept) {
|
|
|
|
allowed_caps =
|
|
|
|
gst_pad_query_caps (GST_VIDEO_DECODER_SINK_PAD (decoder), caps);
|
2014-12-15 21:46:21 +00:00
|
|
|
|
2015-08-15 10:20:25 +00:00
|
|
|
accept = gst_caps_can_intersect (caps, allowed_caps);
|
2014-12-15 21:46:21 +00:00
|
|
|
|
2015-08-15 10:20:25 +00:00
|
|
|
gst_caps_unref (allowed_caps);
|
|
|
|
}
|
2014-12-15 21:46:21 +00:00
|
|
|
|
2015-08-15 10:20:25 +00:00
|
|
|
gst_query_set_accept_caps_result (query, accept);
|
|
|
|
res = TRUE;
|
|
|
|
}
|
2012-06-15 14:06:12 +00:00
|
|
|
break;
|
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
default:
|
2013-11-27 21:39:52 +00:00
|
|
|
res = gst_pad_query_default (pad, GST_OBJECT (decoder), query);
|
2012-03-07 09:18:49 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
done:
|
|
|
|
|
|
|
|
return res;
|
|
|
|
error:
|
|
|
|
GST_DEBUG_OBJECT (decoder, "query failed");
|
|
|
|
goto done;
|
2013-11-27 21:39:52 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
gst_video_decoder_sink_query (GstPad * pad, GstObject * parent,
|
|
|
|
GstQuery * query)
|
|
|
|
{
|
|
|
|
GstVideoDecoder *decoder;
|
|
|
|
GstVideoDecoderClass *decoder_class;
|
|
|
|
gboolean ret = FALSE;
|
|
|
|
|
|
|
|
decoder = GST_VIDEO_DECODER (parent);
|
|
|
|
decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
|
|
|
|
GST_QUERY_TYPE_NAME (query));
|
|
|
|
|
|
|
|
if (decoder_class->sink_query)
|
|
|
|
ret = decoder_class->sink_query (decoder, query);
|
|
|
|
|
|
|
|
return ret;
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct _Timestamp Timestamp;
|
|
|
|
struct _Timestamp
|
|
|
|
{
|
|
|
|
guint64 offset;
|
2012-06-27 11:48:58 +00:00
|
|
|
GstClockTime pts;
|
|
|
|
GstClockTime dts;
|
2012-03-07 09:18:49 +00:00
|
|
|
GstClockTime duration;
|
2015-04-09 17:09:17 +00:00
|
|
|
guint flags;
|
2012-03-07 09:18:49 +00:00
|
|
|
};
|
|
|
|
|
2012-06-27 14:42:10 +00:00
|
|
|
static void
|
|
|
|
timestamp_free (Timestamp * ts)
|
|
|
|
{
|
|
|
|
g_slice_free (Timestamp, ts);
|
|
|
|
}
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
static void
|
2017-03-03 13:10:27 +00:00
|
|
|
gst_video_decoder_add_buffer_info (GstVideoDecoder * decoder,
|
|
|
|
GstBuffer * buffer)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
|
|
|
GstVideoDecoderPrivate *priv = decoder->priv;
|
|
|
|
Timestamp *ts;
|
|
|
|
|
2017-03-03 13:10:27 +00:00
|
|
|
if (!GST_BUFFER_PTS_IS_VALID (buffer) &&
|
|
|
|
!GST_BUFFER_DTS_IS_VALID (buffer) &&
|
|
|
|
!GST_BUFFER_DURATION_IS_VALID (buffer) &&
|
|
|
|
GST_BUFFER_FLAGS (buffer) == 0) {
|
|
|
|
/* Save memory - don't bother storing info
|
|
|
|
* for buffers with no distinguishing info */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-03-06 08:14:48 +00:00
|
|
|
ts = g_slice_new (Timestamp);
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_LOG_OBJECT (decoder,
|
2012-06-27 11:48:58 +00:00
|
|
|
"adding PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT
|
|
|
|
" (offset:%" G_GUINT64_FORMAT ")",
|
|
|
|
GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
|
|
|
|
GST_TIME_ARGS (GST_BUFFER_DTS (buffer)), priv->input_offset);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
ts->offset = priv->input_offset;
|
2012-06-27 11:48:58 +00:00
|
|
|
ts->pts = GST_BUFFER_PTS (buffer);
|
|
|
|
ts->dts = GST_BUFFER_DTS (buffer);
|
2012-03-07 09:18:49 +00:00
|
|
|
ts->duration = GST_BUFFER_DURATION (buffer);
|
2015-04-09 17:09:17 +00:00
|
|
|
ts->flags = GST_BUFFER_FLAGS (buffer);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
priv->timestamps = g_list_append (priv->timestamps, ts);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-03-03 13:10:27 +00:00
|
|
|
gst_video_decoder_get_buffer_info_at_offset (GstVideoDecoder *
|
2012-06-27 11:48:58 +00:00
|
|
|
decoder, guint64 offset, GstClockTime * pts, GstClockTime * dts,
|
2015-04-09 17:09:17 +00:00
|
|
|
GstClockTime * duration, guint * flags)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
2012-06-19 13:43:27 +00:00
|
|
|
#ifndef GST_DISABLE_GST_DEBUG
|
|
|
|
guint64 got_offset = 0;
|
|
|
|
#endif
|
2012-03-07 09:18:49 +00:00
|
|
|
Timestamp *ts;
|
|
|
|
GList *g;
|
|
|
|
|
2012-06-27 11:48:58 +00:00
|
|
|
*pts = GST_CLOCK_TIME_NONE;
|
|
|
|
*dts = GST_CLOCK_TIME_NONE;
|
2012-03-07 09:18:49 +00:00
|
|
|
*duration = GST_CLOCK_TIME_NONE;
|
2015-04-09 17:09:17 +00:00
|
|
|
*flags = 0;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
g = decoder->priv->timestamps;
|
|
|
|
while (g) {
|
|
|
|
ts = g->data;
|
|
|
|
if (ts->offset <= offset) {
|
2012-06-19 13:43:27 +00:00
|
|
|
#ifndef GST_DISABLE_GST_DEBUG
|
|
|
|
got_offset = ts->offset;
|
|
|
|
#endif
|
2012-06-27 11:48:58 +00:00
|
|
|
*pts = ts->pts;
|
|
|
|
*dts = ts->dts;
|
2012-03-07 09:18:49 +00:00
|
|
|
*duration = ts->duration;
|
2015-04-09 17:09:17 +00:00
|
|
|
*flags = ts->flags;
|
2012-03-07 09:18:49 +00:00
|
|
|
g = g->next;
|
|
|
|
decoder->priv->timestamps = g_list_remove (decoder->priv->timestamps, ts);
|
2014-05-12 15:17:07 +00:00
|
|
|
timestamp_free (ts);
|
2012-03-07 09:18:49 +00:00
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
GST_LOG_OBJECT (decoder,
|
2017-03-03 13:10:27 +00:00
|
|
|
"got PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT " flags %x @ offs %"
|
2012-06-27 11:48:58 +00:00
|
|
|
G_GUINT64_FORMAT " (wanted offset:%" G_GUINT64_FORMAT ")",
|
2017-03-03 13:10:27 +00:00
|
|
|
GST_TIME_ARGS (*pts), GST_TIME_ARGS (*dts), *flags, got_offset, offset);
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gst_video_decoder_clear_queues (GstVideoDecoder * dec)
|
|
|
|
{
|
|
|
|
GstVideoDecoderPrivate *priv = dec->priv;
|
|
|
|
|
2012-06-19 13:46:44 +00:00
|
|
|
g_list_free_full (priv->output_queued,
|
|
|
|
(GDestroyNotify) gst_mini_object_unref);
|
|
|
|
priv->output_queued = NULL;
|
|
|
|
|
2012-06-27 12:13:02 +00:00
|
|
|
g_list_free_full (priv->gather, (GDestroyNotify) gst_mini_object_unref);
|
2012-03-07 09:18:49 +00:00
|
|
|
priv->gather = NULL;
|
2012-06-27 12:13:02 +00:00
|
|
|
g_list_free_full (priv->decode, (GDestroyNotify) gst_video_codec_frame_unref);
|
2012-03-07 09:18:49 +00:00
|
|
|
priv->decode = NULL;
|
2012-06-27 12:13:02 +00:00
|
|
|
g_list_free_full (priv->parse, (GDestroyNotify) gst_mini_object_unref);
|
2012-03-07 09:18:49 +00:00
|
|
|
priv->parse = NULL;
|
2012-06-27 12:13:02 +00:00
|
|
|
g_list_free_full (priv->parse_gather,
|
|
|
|
(GDestroyNotify) gst_video_codec_frame_unref);
|
2012-03-07 09:18:49 +00:00
|
|
|
priv->parse_gather = NULL;
|
2012-06-27 12:13:02 +00:00
|
|
|
g_list_free_full (priv->frames, (GDestroyNotify) gst_video_codec_frame_unref);
|
2012-03-07 09:18:49 +00:00
|
|
|
priv->frames = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2013-08-15 12:15:05 +00:00
|
|
|
gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full,
|
|
|
|
gboolean flush_hard)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
|
|
|
GstVideoDecoderPrivate *priv = decoder->priv;
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (decoder, "reset full %d", full);
|
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
|
|
|
|
2013-08-15 12:15:05 +00:00
|
|
|
if (full || flush_hard) {
|
2012-03-07 09:18:49 +00:00
|
|
|
gst_segment_init (&decoder->input_segment, GST_FORMAT_UNDEFINED);
|
|
|
|
gst_segment_init (&decoder->output_segment, GST_FORMAT_UNDEFINED);
|
|
|
|
gst_video_decoder_clear_queues (decoder);
|
2015-02-22 20:13:35 +00:00
|
|
|
decoder->priv->in_out_segment_sync = TRUE;
|
2013-08-15 12:15:05 +00:00
|
|
|
|
|
|
|
if (priv->current_frame) {
|
|
|
|
gst_video_codec_frame_unref (priv->current_frame);
|
|
|
|
priv->current_frame = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
g_list_free_full (priv->current_frame_events,
|
|
|
|
(GDestroyNotify) gst_event_unref);
|
|
|
|
priv->current_frame_events = NULL;
|
|
|
|
g_list_free_full (priv->pending_events, (GDestroyNotify) gst_event_unref);
|
|
|
|
priv->pending_events = NULL;
|
|
|
|
|
|
|
|
priv->error_count = 0;
|
|
|
|
priv->max_errors = GST_VIDEO_DECODER_MAX_ERRORS;
|
2013-10-29 17:40:23 +00:00
|
|
|
priv->had_output_data = FALSE;
|
|
|
|
priv->had_input_data = FALSE;
|
2013-08-15 12:15:05 +00:00
|
|
|
|
|
|
|
GST_OBJECT_LOCK (decoder);
|
|
|
|
priv->earliest_time = GST_CLOCK_TIME_NONE;
|
|
|
|
priv->proportion = 0.5;
|
|
|
|
GST_OBJECT_UNLOCK (decoder);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (full) {
|
2012-03-07 09:18:49 +00:00
|
|
|
if (priv->input_state)
|
|
|
|
gst_video_codec_state_unref (priv->input_state);
|
|
|
|
priv->input_state = NULL;
|
2012-09-29 00:07:43 +00:00
|
|
|
GST_OBJECT_LOCK (decoder);
|
2012-03-07 09:18:49 +00:00
|
|
|
if (priv->output_state)
|
|
|
|
gst_video_codec_state_unref (priv->output_state);
|
|
|
|
priv->output_state = NULL;
|
2012-09-24 09:16:09 +00:00
|
|
|
|
|
|
|
priv->qos_frame_duration = 0;
|
|
|
|
GST_OBJECT_UNLOCK (decoder);
|
|
|
|
|
2012-08-09 14:02:42 +00:00
|
|
|
if (priv->tags)
|
|
|
|
gst_tag_list_unref (priv->tags);
|
|
|
|
priv->tags = NULL;
|
2015-08-16 16:55:22 +00:00
|
|
|
priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
|
|
|
|
if (priv->upstream_tags) {
|
|
|
|
gst_tag_list_unref (priv->upstream_tags);
|
|
|
|
priv->upstream_tags = NULL;
|
|
|
|
}
|
2012-08-09 14:02:42 +00:00
|
|
|
priv->tags_changed = FALSE;
|
2012-10-10 13:04:07 +00:00
|
|
|
priv->reordered_output = FALSE;
|
2013-08-15 12:15:05 +00:00
|
|
|
|
|
|
|
priv->dropped = 0;
|
|
|
|
priv->processed = 0;
|
|
|
|
|
|
|
|
priv->decode_frame_number = 0;
|
|
|
|
priv->base_picture_number = 0;
|
2013-12-16 15:53:41 +00:00
|
|
|
|
|
|
|
if (priv->pool) {
|
2015-12-11 14:42:09 +00:00
|
|
|
GST_DEBUG_OBJECT (decoder, "deactivate pool %" GST_PTR_FORMAT,
|
|
|
|
priv->pool);
|
2013-12-16 15:53:41 +00:00
|
|
|
gst_buffer_pool_set_active (priv->pool, FALSE);
|
|
|
|
gst_object_unref (priv->pool);
|
|
|
|
priv->pool = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (priv->allocator) {
|
|
|
|
gst_object_unref (priv->allocator);
|
|
|
|
priv->allocator = NULL;
|
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
priv->discont = TRUE;
|
|
|
|
|
2012-06-19 14:36:38 +00:00
|
|
|
priv->base_timestamp = GST_CLOCK_TIME_NONE;
|
|
|
|
priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
|
2012-09-27 09:31:34 +00:00
|
|
|
priv->pts_delta = GST_CLOCK_TIME_NONE;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
priv->input_offset = 0;
|
|
|
|
priv->frame_offset = 0;
|
|
|
|
gst_adapter_clear (priv->input_adapter);
|
|
|
|
gst_adapter_clear (priv->output_adapter);
|
2012-06-27 14:42:10 +00:00
|
|
|
g_list_free_full (priv->timestamps, (GDestroyNotify) timestamp_free);
|
2012-03-07 09:18:49 +00:00
|
|
|
priv->timestamps = NULL;
|
|
|
|
|
2016-07-04 08:55:07 +00:00
|
|
|
GST_OBJECT_LOCK (decoder);
|
2012-03-07 09:18:49 +00:00
|
|
|
priv->bytes_out = 0;
|
|
|
|
priv->time = 0;
|
2016-07-04 08:55:07 +00:00
|
|
|
GST_OBJECT_UNLOCK (decoder);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2017-03-16 02:56:10 +00:00
|
|
|
#ifndef GST_DISABLE_DEBUG
|
|
|
|
priv->last_reset_time = gst_util_get_timestamp ();
|
|
|
|
#endif
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
}
|
|
|
|
|
|
|
|
static GstFlowReturn
|
2012-06-19 14:46:05 +00:00
|
|
|
gst_video_decoder_chain_forward (GstVideoDecoder * decoder,
|
|
|
|
GstBuffer * buf, gboolean at_eos)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
|
|
|
GstVideoDecoderPrivate *priv;
|
|
|
|
GstVideoDecoderClass *klass;
|
|
|
|
GstFlowReturn ret = GST_FLOW_OK;
|
|
|
|
|
|
|
|
klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
|
|
|
|
priv = decoder->priv;
|
|
|
|
|
|
|
|
g_return_val_if_fail (priv->packetized || klass->parse, GST_FLOW_ERROR);
|
|
|
|
|
2016-06-09 15:53:54 +00:00
|
|
|
/* Draining on DISCONT is handled in chain_reverse() for reverse playback,
|
|
|
|
* and this function would only be called to get everything collected GOP
|
|
|
|
* by GOP in the parse_gather list */
|
2018-07-09 18:03:04 +00:00
|
|
|
if (decoder->input_segment.rate > 0.0 && GST_BUFFER_IS_DISCONT (buf)
|
|
|
|
&& (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
|
2016-06-04 07:49:00 +00:00
|
|
|
ret = gst_video_decoder_drain_out (decoder, FALSE);
|
|
|
|
|
2012-06-19 13:28:08 +00:00
|
|
|
if (priv->current_frame == NULL)
|
2012-03-07 09:18:49 +00:00
|
|
|
priv->current_frame = gst_video_decoder_new_frame (decoder);
|
|
|
|
|
2017-03-03 13:10:27 +00:00
|
|
|
if (!priv->packetized)
|
|
|
|
gst_video_decoder_add_buffer_info (decoder, buf);
|
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
priv->input_offset += gst_buffer_get_size (buf);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
if (priv->packetized) {
|
2016-06-04 07:51:17 +00:00
|
|
|
gboolean was_keyframe = FALSE;
|
2012-03-07 09:18:49 +00:00
|
|
|
if (!GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT)) {
|
2016-06-04 07:51:17 +00:00
|
|
|
was_keyframe = TRUE;
|
2017-03-03 13:10:27 +00:00
|
|
|
GST_LOG_OBJECT (decoder, "Marking current_frame as sync point");
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (priv->current_frame);
|
|
|
|
}
|
|
|
|
|
2012-06-19 13:28:08 +00:00
|
|
|
priv->current_frame->input_buffer = buf;
|
|
|
|
|
|
|
|
if (decoder->input_segment.rate < 0.0) {
|
|
|
|
priv->parse_gather =
|
|
|
|
g_list_prepend (priv->parse_gather, priv->current_frame);
|
|
|
|
} else {
|
|
|
|
ret = gst_video_decoder_decode_frame (decoder, priv->current_frame);
|
|
|
|
}
|
|
|
|
priv->current_frame = NULL;
|
2016-06-04 07:51:17 +00:00
|
|
|
/* If in trick mode and it was a keyframe, drain decoder to avoid extra
|
2016-06-09 15:53:54 +00:00
|
|
|
* latency. Only do this for forwards playback as reverse playback handles
|
|
|
|
* draining on keyframes in flush_parse(), and would otherwise call back
|
|
|
|
* from drain_out() to here causing an infinite loop.
|
|
|
|
* Also this function is only called for reverse playback to gather frames
|
|
|
|
* GOP by GOP, and does not do any actual decoding. That would be done by
|
|
|
|
* flush_decode() */
|
2017-02-28 13:11:42 +00:00
|
|
|
if (ret == GST_FLOW_OK && was_keyframe && decoder->input_segment.rate > 0.0
|
2016-07-07 15:10:17 +00:00
|
|
|
&& (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
|
2017-02-28 13:11:42 +00:00
|
|
|
ret = gst_video_decoder_drain_out (decoder, FALSE);
|
2012-03-07 09:18:49 +00:00
|
|
|
} else {
|
|
|
|
gst_adapter_push (priv->input_adapter, buf);
|
|
|
|
|
2013-03-31 16:29:07 +00:00
|
|
|
ret = gst_video_decoder_parse_available (decoder, at_eos, TRUE);
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ret == GST_VIDEO_DECODER_FLOW_NEED_DATA)
|
|
|
|
return GST_FLOW_OK;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static GstFlowReturn
|
|
|
|
gst_video_decoder_flush_decode (GstVideoDecoder * dec)
|
|
|
|
{
|
|
|
|
GstVideoDecoderPrivate *priv = dec->priv;
|
|
|
|
GstFlowReturn res = GST_FLOW_OK;
|
|
|
|
GList *walk;
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (dec, "flushing buffers to decode");
|
|
|
|
|
2012-06-19 14:22:25 +00:00
|
|
|
walk = priv->decode;
|
2012-03-07 09:18:49 +00:00
|
|
|
while (walk) {
|
|
|
|
GList *next;
|
|
|
|
GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
|
|
|
|
|
2012-06-27 11:48:58 +00:00
|
|
|
GST_DEBUG_OBJECT (dec, "decoding frame %p buffer %p, PTS %" GST_TIME_FORMAT
|
2012-10-02 07:29:27 +00:00
|
|
|
", DTS %" GST_TIME_FORMAT, frame, frame->input_buffer,
|
|
|
|
GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
|
|
|
|
GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
next = walk->next;
|
|
|
|
|
2012-06-19 14:22:25 +00:00
|
|
|
priv->decode = g_list_delete_link (priv->decode, walk);
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
/* decode buffer, resulting data prepended to queue */
|
2012-06-19 13:28:08 +00:00
|
|
|
res = gst_video_decoder_decode_frame (dec, frame);
|
2012-06-19 14:22:25 +00:00
|
|
|
if (res != GST_FLOW_OK)
|
|
|
|
break;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
walk = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2012-06-19 14:22:25 +00:00
|
|
|
/* gst_video_decoder_flush_parse is called from the
|
|
|
|
* chain_reverse() function when a buffer containing
|
|
|
|
* a DISCONT - indicating that reverse playback
|
|
|
|
* looped back to the next data block, and therefore
|
|
|
|
* all available data should be fed through the
|
|
|
|
* decoder and frames gathered for reversed output
|
|
|
|
*/
|
2012-03-07 09:18:49 +00:00
|
|
|
static GstFlowReturn
|
2012-06-19 14:46:05 +00:00
|
|
|
gst_video_decoder_flush_parse (GstVideoDecoder * dec, gboolean at_eos)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
|
|
|
GstVideoDecoderPrivate *priv = dec->priv;
|
|
|
|
GstFlowReturn res = GST_FLOW_OK;
|
|
|
|
GList *walk;
|
2014-03-30 16:26:59 +00:00
|
|
|
GstVideoDecoderClass *decoder_class;
|
|
|
|
|
|
|
|
decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (dec, "flushing buffers to parsing");
|
|
|
|
|
2012-06-19 14:22:25 +00:00
|
|
|
/* Reverse the gather list, and prepend it to the parse list,
|
|
|
|
* then flush to parse whatever we can */
|
|
|
|
priv->gather = g_list_reverse (priv->gather);
|
|
|
|
priv->parse = g_list_concat (priv->gather, priv->parse);
|
|
|
|
priv->gather = NULL;
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
/* clear buffer and decoder state */
|
|
|
|
gst_video_decoder_flush (dec, FALSE);
|
|
|
|
|
2012-06-19 14:22:25 +00:00
|
|
|
walk = priv->parse;
|
2012-03-07 09:18:49 +00:00
|
|
|
while (walk) {
|
|
|
|
GstBuffer *buf = GST_BUFFER_CAST (walk->data);
|
2012-06-19 14:22:25 +00:00
|
|
|
GList *next = walk->next;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-06-27 11:48:58 +00:00
|
|
|
GST_DEBUG_OBJECT (dec, "parsing buffer %p, PTS %" GST_TIME_FORMAT
|
2017-03-03 13:10:27 +00:00
|
|
|
", DTS %" GST_TIME_FORMAT " flags %x", buf,
|
|
|
|
GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
|
|
|
|
GST_TIME_ARGS (GST_BUFFER_DTS (buf)), GST_BUFFER_FLAGS (buf));
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
/* parse buffer, resulting frames prepended to parse_gather queue */
|
|
|
|
gst_buffer_ref (buf);
|
2012-06-19 14:46:05 +00:00
|
|
|
res = gst_video_decoder_chain_forward (dec, buf, at_eos);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
/* if we generated output, we can discard the buffer, else we
|
|
|
|
* keep it in the queue */
|
|
|
|
if (priv->parse_gather) {
|
|
|
|
GST_DEBUG_OBJECT (dec, "parsed buffer to %p", priv->parse_gather->data);
|
|
|
|
priv->parse = g_list_delete_link (priv->parse, walk);
|
|
|
|
gst_buffer_unref (buf);
|
|
|
|
} else {
|
|
|
|
GST_DEBUG_OBJECT (dec, "buffer did not decode, keeping");
|
|
|
|
}
|
|
|
|
walk = next;
|
|
|
|
}
|
|
|
|
|
2012-06-19 14:22:25 +00:00
|
|
|
walk = priv->parse_gather;
|
|
|
|
while (walk) {
|
|
|
|
GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
|
2014-03-30 15:54:11 +00:00
|
|
|
GList *walk2;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2014-01-06 23:53:15 +00:00
|
|
|
/* this is reverse playback, check if we need to apply some segment
|
|
|
|
* to the output before decoding, as during decoding the segment.rate
|
|
|
|
* must be used to determine if a buffer should be pushed or added to
|
|
|
|
* the output list for reverse pushing.
|
|
|
|
*
|
|
|
|
* The new segment is not immediately pushed here because we must
|
|
|
|
* wait for negotiation to happen before it can be pushed to avoid
|
|
|
|
* pushing a segment before caps event. Negotiation only happens
|
|
|
|
* when finish_frame is called.
|
|
|
|
*/
|
2014-03-30 15:54:11 +00:00
|
|
|
for (walk2 = frame->events; walk2;) {
|
|
|
|
GList *cur = walk2;
|
|
|
|
GstEvent *event = walk2->data;
|
2014-01-06 23:53:15 +00:00
|
|
|
|
2014-03-30 15:54:11 +00:00
|
|
|
walk2 = g_list_next (walk2);
|
2014-01-06 23:53:15 +00:00
|
|
|
if (GST_EVENT_TYPE (event) <= GST_EVENT_SEGMENT) {
|
|
|
|
|
|
|
|
if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
|
|
|
|
GstSegment segment;
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (dec, "Segment at frame %p %" GST_TIME_FORMAT,
|
|
|
|
frame, GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)));
|
|
|
|
gst_event_copy_segment (event, &segment);
|
|
|
|
if (segment.format == GST_FORMAT_TIME) {
|
|
|
|
dec->output_segment = segment;
|
2015-02-22 20:13:35 +00:00
|
|
|
dec->priv->in_out_segment_sync =
|
2015-03-19 10:48:15 +00:00
|
|
|
gst_segment_is_equal (&dec->input_segment, &segment);
|
2014-01-06 23:53:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
dec->priv->pending_events =
|
|
|
|
g_list_append (dec->priv->pending_events, event);
|
|
|
|
frame->events = g_list_delete_link (frame->events, cur);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-30 15:54:11 +00:00
|
|
|
walk = walk->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* now we can process frames. Start by moving each frame from the parse_gather
|
|
|
|
* to the decode list, reverse the order as we go, and stopping when/if we
|
|
|
|
* copy a keyframe. */
|
|
|
|
GST_DEBUG_OBJECT (dec, "checking parsed frames for a keyframe to decode");
|
|
|
|
walk = priv->parse_gather;
|
|
|
|
while (walk) {
|
|
|
|
GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
|
|
|
|
|
|
|
|
/* remove from the gather list */
|
|
|
|
priv->parse_gather = g_list_remove_link (priv->parse_gather, walk);
|
|
|
|
|
|
|
|
/* move it to the front of the decode queue */
|
|
|
|
priv->decode = g_list_concat (walk, priv->decode);
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
/* if we copied a keyframe, flush and decode the decode queue */
|
|
|
|
if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
|
2012-06-27 11:48:58 +00:00
|
|
|
GST_DEBUG_OBJECT (dec, "found keyframe %p with PTS %" GST_TIME_FORMAT
|
|
|
|
", DTS %" GST_TIME_FORMAT, frame,
|
|
|
|
GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
|
|
|
|
GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
|
2012-03-07 09:18:49 +00:00
|
|
|
res = gst_video_decoder_flush_decode (dec);
|
2012-06-19 14:22:25 +00:00
|
|
|
if (res != GST_FLOW_OK)
|
|
|
|
goto done;
|
|
|
|
|
2015-02-07 18:19:25 +00:00
|
|
|
/* We need to tell the subclass to drain now.
|
|
|
|
* We prefer the drain vfunc, but for backward-compat
|
|
|
|
* we use a finish() vfunc if drain isn't implemented */
|
|
|
|
if (decoder_class->drain) {
|
|
|
|
GST_DEBUG_OBJECT (dec, "Draining");
|
|
|
|
res = decoder_class->drain (dec);
|
|
|
|
} else if (decoder_class->finish) {
|
|
|
|
GST_FIXME_OBJECT (dec, "Sub-class should implement drain(). "
|
|
|
|
"Calling finish() for backwards-compat");
|
videodecoder: In reverse playback, flush the output queue after decoding each keyframe chain
This fixes the reverse playback scenario when upstream is not fully
parsing the stream and does not send every keyframe chain separately
with the DISCONT flag on the keyframe.
To explain this, let's suppose we have this stream:
0 1 2 3 4 5 6 7 8
K K K
In most circumstances, the upstream parser will chain in the
decoder the buffers in the following order:
6 7 8 3 4 5 0 1 2
D D D
In this case, GstVideoDecoder will flush the parse queue every time
it receives discont (D) and we will eventually get in the output queue:
(flush here) 8 7 6 (flush here) 5 4 3 (flush here) 2 1 0
In case the upstream parser doesn't do this work, though,
GstVideoDecoder will receive the whole stream at once and will flush
the parse queue afterwards:
0 1 2 3 4 5 6 7 8
D
During the flush, it will look backwards for keyframes and will
decode in this order:
6 7 8 3 4 5 0 1 2
This is the same order that it would receive from upstream if
upstream was parsing and looking for the keyframes, only that now
there is no flushing of the output queue in between keyframes,
which will result in the output queue looking like this:
2 1 0 6 5 3 8 7 6
This will confuse downstream obviously and will play incorrectly.
This patch forces the decoder to flush the output queue every time
it picks a new keyframe to decode, so it will end up decoding 6 7 8
and then flushing before picking 3 for decoding, so the output will
get 8 7 6 before 6 5 3 and the video will play back correctly.
https://bugzilla.gnome.org/show_bug.cgi?id=734441
2014-08-07 15:10:41 +00:00
|
|
|
res = decoder_class->finish (dec);
|
2015-02-07 18:19:25 +00:00
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
|
videodecoder: In reverse playback, flush the output queue after decoding each keyframe chain
This fixes the reverse playback scenario when upstream is not fully
parsing the stream and does not send every keyframe chain separately
with the DISCONT flag on the keyframe.
To explain this, let's suppose we have this stream:
0 1 2 3 4 5 6 7 8
K K K
In most circumstances, the upstream parser will chain in the
decoder the buffers in the following order:
6 7 8 3 4 5 0 1 2
D D D
In this case, GstVideoDecoder will flush the parse queue every time
it receives discont (D) and we will eventually get in the output queue:
(flush here) 8 7 6 (flush here) 5 4 3 (flush here) 2 1 0
In case the upstream parser doesn't do this work, though,
GstVideoDecoder will receive the whole stream at once and will flush
the parse queue afterwards:
0 1 2 3 4 5 6 7 8
D
During the flush, it will look backwards for keyframes and will
decode in this order:
6 7 8 3 4 5 0 1 2
This is the same order that it would receive from upstream if
upstream was parsing and looking for the keyframes, only that now
there is no flushing of the output queue in between keyframes,
which will result in the output queue looking like this:
2 1 0 6 5 3 8 7 6
This will confuse downstream obviously and will play incorrectly.
This patch forces the decoder to flush the output queue every time
it picks a new keyframe to decode, so it will end up decoding 6 7 8
and then flushing before picking 3 for decoding, so the output will
get 8 7 6 before 6 5 3 and the video will play back correctly.
https://bugzilla.gnome.org/show_bug.cgi?id=734441
2014-08-07 15:10:41 +00:00
|
|
|
if (res != GST_FLOW_OK)
|
|
|
|
goto done;
|
2014-03-27 19:15:01 +00:00
|
|
|
|
videodecoder: In reverse playback, flush the output queue after decoding each keyframe chain
This fixes the reverse playback scenario when upstream is not fully
parsing the stream and does not send every keyframe chain separately
with the DISCONT flag on the keyframe.
To explain this, let's suppose we have this stream:
0 1 2 3 4 5 6 7 8
K K K
In most circumstances, the upstream parser will chain in the
decoder the buffers in the following order:
6 7 8 3 4 5 0 1 2
D D D
In this case, GstVideoDecoder will flush the parse queue every time
it receives discont (D) and we will eventually get in the output queue:
(flush here) 8 7 6 (flush here) 5 4 3 (flush here) 2 1 0
In case the upstream parser doesn't do this work, though,
GstVideoDecoder will receive the whole stream at once and will flush
the parse queue afterwards:
0 1 2 3 4 5 6 7 8
D
During the flush, it will look backwards for keyframes and will
decode in this order:
6 7 8 3 4 5 0 1 2
This is the same order that it would receive from upstream if
upstream was parsing and looking for the keyframes, only that now
there is no flushing of the output queue in between keyframes,
which will result in the output queue looking like this:
2 1 0 6 5 3 8 7 6
This will confuse downstream obviously and will play incorrectly.
This patch forces the decoder to flush the output queue every time
it picks a new keyframe to decode, so it will end up decoding 6 7 8
and then flushing before picking 3 for decoding, so the output will
get 8 7 6 before 6 5 3 and the video will play back correctly.
https://bugzilla.gnome.org/show_bug.cgi?id=734441
2014-08-07 15:10:41 +00:00
|
|
|
/* now send queued data downstream */
|
|
|
|
walk = priv->output_queued;
|
|
|
|
while (walk) {
|
|
|
|
GstBuffer *buf = GST_BUFFER_CAST (walk->data);
|
|
|
|
|
2018-09-08 02:15:35 +00:00
|
|
|
priv->output_queued =
|
|
|
|
g_list_delete_link (priv->output_queued, priv->output_queued);
|
|
|
|
|
videodecoder: In reverse playback, flush the output queue after decoding each keyframe chain
This fixes the reverse playback scenario when upstream is not fully
parsing the stream and does not send every keyframe chain separately
with the DISCONT flag on the keyframe.
To explain this, let's suppose we have this stream:
0 1 2 3 4 5 6 7 8
K K K
In most circumstances, the upstream parser will chain in the
decoder the buffers in the following order:
6 7 8 3 4 5 0 1 2
D D D
In this case, GstVideoDecoder will flush the parse queue every time
it receives discont (D) and we will eventually get in the output queue:
(flush here) 8 7 6 (flush here) 5 4 3 (flush here) 2 1 0
In case the upstream parser doesn't do this work, though,
GstVideoDecoder will receive the whole stream at once and will flush
the parse queue afterwards:
0 1 2 3 4 5 6 7 8
D
During the flush, it will look backwards for keyframes and will
decode in this order:
6 7 8 3 4 5 0 1 2
This is the same order that it would receive from upstream if
upstream was parsing and looking for the keyframes, only that now
there is no flushing of the output queue in between keyframes,
which will result in the output queue looking like this:
2 1 0 6 5 3 8 7 6
This will confuse downstream obviously and will play incorrectly.
This patch forces the decoder to flush the output queue every time
it picks a new keyframe to decode, so it will end up decoding 6 7 8
and then flushing before picking 3 for decoding, so the output will
get 8 7 6 before 6 5 3 and the video will play back correctly.
https://bugzilla.gnome.org/show_bug.cgi?id=734441
2014-08-07 15:10:41 +00:00
|
|
|
if (G_LIKELY (res == GST_FLOW_OK)) {
|
|
|
|
/* avoid stray DISCONT from forward processing,
|
|
|
|
* which have no meaning in reverse pushing */
|
|
|
|
GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
|
|
|
|
|
|
|
|
/* Last chance to calculate a timestamp as we loop backwards
|
|
|
|
* through the list */
|
|
|
|
if (GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE)
|
|
|
|
priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
|
|
|
|
else if (priv->last_timestamp_out != GST_CLOCK_TIME_NONE &&
|
|
|
|
GST_BUFFER_DURATION (buf) != GST_CLOCK_TIME_NONE) {
|
|
|
|
GST_BUFFER_TIMESTAMP (buf) =
|
|
|
|
priv->last_timestamp_out - GST_BUFFER_DURATION (buf);
|
|
|
|
priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
|
|
|
|
GST_LOG_OBJECT (dec,
|
|
|
|
"Calculated TS %" GST_TIME_FORMAT " working backwards",
|
|
|
|
GST_TIME_ARGS (priv->last_timestamp_out));
|
|
|
|
}
|
2014-03-27 19:15:01 +00:00
|
|
|
|
videodecoder: In reverse playback, flush the output queue after decoding each keyframe chain
This fixes the reverse playback scenario when upstream is not fully
parsing the stream and does not send every keyframe chain separately
with the DISCONT flag on the keyframe.
To explain this, let's suppose we have this stream:
0 1 2 3 4 5 6 7 8
K K K
In most circumstances, the upstream parser will chain in the
decoder the buffers in the following order:
6 7 8 3 4 5 0 1 2
D D D
In this case, GstVideoDecoder will flush the parse queue every time
it receives discont (D) and we will eventually get in the output queue:
(flush here) 8 7 6 (flush here) 5 4 3 (flush here) 2 1 0
In case the upstream parser doesn't do this work, though,
GstVideoDecoder will receive the whole stream at once and will flush
the parse queue afterwards:
0 1 2 3 4 5 6 7 8
D
During the flush, it will look backwards for keyframes and will
decode in this order:
6 7 8 3 4 5 0 1 2
This is the same order that it would receive from upstream if
upstream was parsing and looking for the keyframes, only that now
there is no flushing of the output queue in between keyframes,
which will result in the output queue looking like this:
2 1 0 6 5 3 8 7 6
This will confuse downstream obviously and will play incorrectly.
This patch forces the decoder to flush the output queue every time
it picks a new keyframe to decode, so it will end up decoding 6 7 8
and then flushing before picking 3 for decoding, so the output will
get 8 7 6 before 6 5 3 and the video will play back correctly.
https://bugzilla.gnome.org/show_bug.cgi?id=734441
2014-08-07 15:10:41 +00:00
|
|
|
res = gst_video_decoder_clip_and_push_buf (dec, buf);
|
|
|
|
} else {
|
|
|
|
gst_buffer_unref (buf);
|
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
|
videodecoder: In reverse playback, flush the output queue after decoding each keyframe chain
This fixes the reverse playback scenario when upstream is not fully
parsing the stream and does not send every keyframe chain separately
with the DISCONT flag on the keyframe.
To explain this, let's suppose we have this stream:
0 1 2 3 4 5 6 7 8
K K K
In most circumstances, the upstream parser will chain in the
decoder the buffers in the following order:
6 7 8 3 4 5 0 1 2
D D D
In this case, GstVideoDecoder will flush the parse queue every time
it receives discont (D) and we will eventually get in the output queue:
(flush here) 8 7 6 (flush here) 5 4 3 (flush here) 2 1 0
In case the upstream parser doesn't do this work, though,
GstVideoDecoder will receive the whole stream at once and will flush
the parse queue afterwards:
0 1 2 3 4 5 6 7 8
D
During the flush, it will look backwards for keyframes and will
decode in this order:
6 7 8 3 4 5 0 1 2
This is the same order that it would receive from upstream if
upstream was parsing and looking for the keyframes, only that now
there is no flushing of the output queue in between keyframes,
which will result in the output queue looking like this:
2 1 0 6 5 3 8 7 6
This will confuse downstream obviously and will play incorrectly.
This patch forces the decoder to flush the output queue every time
it picks a new keyframe to decode, so it will end up decoding 6 7 8
and then flushing before picking 3 for decoding, so the output will
get 8 7 6 before 6 5 3 and the video will play back correctly.
https://bugzilla.gnome.org/show_bug.cgi?id=734441
2014-08-07 15:10:41 +00:00
|
|
|
walk = priv->output_queued;
|
2012-06-19 14:22:25 +00:00
|
|
|
}
|
|
|
|
|
videodecoder: In reverse playback, flush the output queue after decoding each keyframe chain
This fixes the reverse playback scenario when upstream is not fully
parsing the stream and does not send every keyframe chain separately
with the DISCONT flag on the keyframe.
To explain this, let's suppose we have this stream:
0 1 2 3 4 5 6 7 8
K K K
In most circumstances, the upstream parser will chain in the
decoder the buffers in the following order:
6 7 8 3 4 5 0 1 2
D D D
In this case, GstVideoDecoder will flush the parse queue every time
it receives discont (D) and we will eventually get in the output queue:
(flush here) 8 7 6 (flush here) 5 4 3 (flush here) 2 1 0
In case the upstream parser doesn't do this work, though,
GstVideoDecoder will receive the whole stream at once and will flush
the parse queue afterwards:
0 1 2 3 4 5 6 7 8
D
During the flush, it will look backwards for keyframes and will
decode in this order:
6 7 8 3 4 5 0 1 2
This is the same order that it would receive from upstream if
upstream was parsing and looking for the keyframes, only that now
there is no flushing of the output queue in between keyframes,
which will result in the output queue looking like this:
2 1 0 6 5 3 8 7 6
This will confuse downstream obviously and will play incorrectly.
This patch forces the decoder to flush the output queue every time
it picks a new keyframe to decode, so it will end up decoding 6 7 8
and then flushing before picking 3 for decoding, so the output will
get 8 7 6 before 6 5 3 and the video will play back correctly.
https://bugzilla.gnome.org/show_bug.cgi?id=734441
2014-08-07 15:10:41 +00:00
|
|
|
/* clear buffer and decoder state again
|
|
|
|
* before moving to the previous keyframe */
|
|
|
|
gst_video_decoder_flush (dec, FALSE);
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
videodecoder: In reverse playback, flush the output queue after decoding each keyframe chain
This fixes the reverse playback scenario when upstream is not fully
parsing the stream and does not send every keyframe chain separately
with the DISCONT flag on the keyframe.
To explain this, let's suppose we have this stream:
0 1 2 3 4 5 6 7 8
K K K
In most circumstances, the upstream parser will chain in the
decoder the buffers in the following order:
6 7 8 3 4 5 0 1 2
D D D
In this case, GstVideoDecoder will flush the parse queue every time
it receives discont (D) and we will eventually get in the output queue:
(flush here) 8 7 6 (flush here) 5 4 3 (flush here) 2 1 0
In case the upstream parser doesn't do this work, though,
GstVideoDecoder will receive the whole stream at once and will flush
the parse queue afterwards:
0 1 2 3 4 5 6 7 8
D
During the flush, it will look backwards for keyframes and will
decode in this order:
6 7 8 3 4 5 0 1 2
This is the same order that it would receive from upstream if
upstream was parsing and looking for the keyframes, only that now
there is no flushing of the output queue in between keyframes,
which will result in the output queue looking like this:
2 1 0 6 5 3 8 7 6
This will confuse downstream obviously and will play incorrectly.
This patch forces the decoder to flush the output queue every time
it picks a new keyframe to decode, so it will end up decoding 6 7 8
and then flushing before picking 3 for decoding, so the output will
get 8 7 6 before 6 5 3 and the video will play back correctly.
https://bugzilla.gnome.org/show_bug.cgi?id=734441
2014-08-07 15:10:41 +00:00
|
|
|
walk = priv->parse_gather;
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
2012-06-19 14:22:25 +00:00
|
|
|
done:
|
2012-03-07 09:18:49 +00:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static GstFlowReturn
|
|
|
|
gst_video_decoder_chain_reverse (GstVideoDecoder * dec, GstBuffer * buf)
|
|
|
|
{
|
|
|
|
GstVideoDecoderPrivate *priv = dec->priv;
|
|
|
|
GstFlowReturn result = GST_FLOW_OK;
|
|
|
|
|
|
|
|
/* if we have a discont, move buffers to the decode list */
|
|
|
|
if (!buf || GST_BUFFER_IS_DISCONT (buf)) {
|
|
|
|
GST_DEBUG_OBJECT (dec, "received discont");
|
2012-06-19 14:22:25 +00:00
|
|
|
|
|
|
|
/* parse and decode stuff in the gather and parse queues */
|
2014-05-27 00:24:07 +00:00
|
|
|
result = gst_video_decoder_flush_parse (dec, FALSE);
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (G_LIKELY (buf)) {
|
2012-04-24 17:35:24 +00:00
|
|
|
GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %" G_GSIZE_FORMAT ", "
|
2012-06-27 11:48:58 +00:00
|
|
|
"PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
|
|
|
|
GST_TIME_FORMAT, buf, gst_buffer_get_size (buf),
|
|
|
|
GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
|
|
|
|
GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
|
|
|
|
|
|
|
|
/* add buffer to gather queue */
|
|
|
|
priv->gather = g_list_prepend (priv->gather, buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static GstFlowReturn
|
2012-04-24 17:35:24 +00:00
|
|
|
gst_video_decoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
|
|
|
GstVideoDecoder *decoder;
|
|
|
|
GstFlowReturn ret = GST_FLOW_OK;
|
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
decoder = GST_VIDEO_DECODER (parent);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2013-12-05 10:34:36 +00:00
|
|
|
if (G_UNLIKELY (!decoder->priv->input_state && decoder->priv->needs_format))
|
|
|
|
goto not_negotiated;
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_LOG_OBJECT (decoder,
|
2012-06-27 11:48:58 +00:00
|
|
|
"chain PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT " duration %"
|
2017-03-03 13:10:27 +00:00
|
|
|
GST_TIME_FORMAT " size %" G_GSIZE_FORMAT " flags %x",
|
2012-06-27 11:48:58 +00:00
|
|
|
GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
|
|
|
|
GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
|
2017-03-03 13:10:27 +00:00
|
|
|
GST_TIME_ARGS (GST_BUFFER_DURATION (buf)),
|
|
|
|
gst_buffer_get_size (buf), GST_BUFFER_FLAGS (buf));
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
|
|
|
|
|
|
|
/* NOTE:
|
|
|
|
* requiring the pad to be negotiated makes it impossible to use
|
|
|
|
* oggdemux or filesrc ! decoder */
|
|
|
|
|
|
|
|
if (decoder->input_segment.format == GST_FORMAT_UNDEFINED) {
|
|
|
|
GstEvent *event;
|
2012-04-24 17:35:24 +00:00
|
|
|
GstSegment *segment = &decoder->input_segment;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
GST_WARNING_OBJECT (decoder,
|
|
|
|
"Received buffer without a new-segment. "
|
|
|
|
"Assuming timestamps start from 0.");
|
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
gst_segment_init (segment, GST_FORMAT_TIME);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
event = gst_event_new_segment (segment);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
decoder->priv->current_frame_events =
|
|
|
|
g_list_prepend (decoder->priv->current_frame_events, event);
|
|
|
|
}
|
|
|
|
|
2013-10-29 17:40:23 +00:00
|
|
|
decoder->priv->had_input_data = TRUE;
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
if (decoder->input_segment.rate > 0.0)
|
2012-06-19 14:46:05 +00:00
|
|
|
ret = gst_video_decoder_chain_forward (decoder, buf, FALSE);
|
2012-03-07 09:18:49 +00:00
|
|
|
else
|
|
|
|
ret = gst_video_decoder_chain_reverse (decoder, buf);
|
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
return ret;
|
2012-07-26 12:28:26 +00:00
|
|
|
|
|
|
|
/* ERRORS */
|
|
|
|
not_negotiated:
|
|
|
|
{
|
|
|
|
GST_ELEMENT_ERROR (decoder, CORE, NEGOTIATION, (NULL),
|
2013-05-09 13:34:10 +00:00
|
|
|
("decoder not initialized"));
|
2012-07-26 12:28:26 +00:00
|
|
|
gst_buffer_unref (buf);
|
|
|
|
return GST_FLOW_NOT_NEGOTIATED;
|
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static GstStateChangeReturn
|
|
|
|
gst_video_decoder_change_state (GstElement * element, GstStateChange transition)
|
|
|
|
{
|
|
|
|
GstVideoDecoder *decoder;
|
|
|
|
GstVideoDecoderClass *decoder_class;
|
|
|
|
GstStateChangeReturn ret;
|
|
|
|
|
|
|
|
decoder = GST_VIDEO_DECODER (element);
|
|
|
|
decoder_class = GST_VIDEO_DECODER_GET_CLASS (element);
|
|
|
|
|
|
|
|
switch (transition) {
|
|
|
|
case GST_STATE_CHANGE_NULL_TO_READY:
|
|
|
|
/* open device/library if needed */
|
|
|
|
if (decoder_class->open && !decoder_class->open (decoder))
|
|
|
|
goto open_failed;
|
|
|
|
break;
|
|
|
|
case GST_STATE_CHANGE_READY_TO_PAUSED:
|
2013-07-26 08:22:32 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
2013-08-15 12:15:05 +00:00
|
|
|
gst_video_decoder_reset (decoder, TRUE, TRUE);
|
2013-07-26 08:22:32 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
2014-12-22 10:38:20 +00:00
|
|
|
|
|
|
|
/* Initialize device/library if needed */
|
|
|
|
if (decoder_class->start && !decoder_class->start (decoder))
|
|
|
|
goto start_failed;
|
2012-03-07 09:18:49 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
|
|
|
|
|
|
|
|
switch (transition) {
|
2014-01-17 13:17:29 +00:00
|
|
|
case GST_STATE_CHANGE_PAUSED_TO_READY:{
|
|
|
|
gboolean stopped = TRUE;
|
|
|
|
|
2014-12-22 10:33:14 +00:00
|
|
|
if (decoder_class->stop)
|
|
|
|
stopped = decoder_class->stop (decoder);
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
2013-08-15 12:15:05 +00:00
|
|
|
gst_video_decoder_reset (decoder, TRUE, TRUE);
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
2014-01-17 13:17:29 +00:00
|
|
|
|
|
|
|
if (!stopped)
|
2013-07-26 08:22:32 +00:00
|
|
|
goto stop_failed;
|
2014-01-17 13:17:29 +00:00
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
break;
|
2014-01-17 13:17:29 +00:00
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
case GST_STATE_CHANGE_READY_TO_NULL:
|
|
|
|
/* close device/library if needed */
|
|
|
|
if (decoder_class->close && !decoder_class->close (decoder))
|
|
|
|
goto close_failed;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Errors */
|
|
|
|
open_failed:
|
|
|
|
{
|
|
|
|
GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
|
|
|
|
("Failed to open decoder"));
|
|
|
|
return GST_STATE_CHANGE_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
start_failed:
|
|
|
|
{
|
|
|
|
GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
|
|
|
|
("Failed to start decoder"));
|
|
|
|
return GST_STATE_CHANGE_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
stop_failed:
|
|
|
|
{
|
|
|
|
GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
|
|
|
|
("Failed to stop decoder"));
|
|
|
|
return GST_STATE_CHANGE_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
close_failed:
|
|
|
|
{
|
|
|
|
GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
|
|
|
|
("Failed to close decoder"));
|
|
|
|
return GST_STATE_CHANGE_FAILURE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static GstVideoCodecFrame *
|
|
|
|
gst_video_decoder_new_frame (GstVideoDecoder * decoder)
|
|
|
|
{
|
|
|
|
GstVideoDecoderPrivate *priv = decoder->priv;
|
|
|
|
GstVideoCodecFrame *frame;
|
|
|
|
|
|
|
|
frame = g_slice_new0 (GstVideoCodecFrame);
|
|
|
|
|
|
|
|
frame->ref_count = 1;
|
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
|
|
|
frame->system_frame_number = priv->system_frame_number;
|
|
|
|
priv->system_frame_number++;
|
|
|
|
frame->decode_frame_number = priv->decode_frame_number;
|
|
|
|
priv->decode_frame_number++;
|
|
|
|
|
|
|
|
frame->dts = GST_CLOCK_TIME_NONE;
|
|
|
|
frame->pts = GST_CLOCK_TIME_NONE;
|
|
|
|
frame->duration = GST_CLOCK_TIME_NONE;
|
|
|
|
frame->events = priv->current_frame_events;
|
|
|
|
priv->current_frame_events = NULL;
|
2014-03-30 15:54:11 +00:00
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
|
2012-05-01 12:45:46 +00:00
|
|
|
GST_LOG_OBJECT (decoder, "Created new frame %p (sfn:%d)",
|
|
|
|
frame, frame->system_frame_number);
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
return frame;
|
|
|
|
}
|
|
|
|
|
2015-08-10 02:23:05 +00:00
|
|
|
static void
|
|
|
|
gst_video_decoder_push_event_list (GstVideoDecoder * decoder, GList * events)
|
|
|
|
{
|
|
|
|
GList *l;
|
|
|
|
|
|
|
|
/* events are stored in reverse order */
|
|
|
|
for (l = g_list_last (events); l; l = g_list_previous (l)) {
|
|
|
|
GST_LOG_OBJECT (decoder, "pushing %s event", GST_EVENT_TYPE_NAME (l->data));
|
|
|
|
gst_video_decoder_push_event (decoder, l->data);
|
|
|
|
}
|
|
|
|
g_list_free (events);
|
|
|
|
}
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
static void
|
|
|
|
gst_video_decoder_prepare_finish_frame (GstVideoDecoder *
|
2012-06-12 15:58:05 +00:00
|
|
|
decoder, GstVideoCodecFrame * frame, gboolean dropping)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
|
|
|
GstVideoDecoderPrivate *priv = decoder->priv;
|
|
|
|
GList *l, *events = NULL;
|
2013-07-24 07:24:45 +00:00
|
|
|
gboolean sync;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
#ifndef GST_DISABLE_GST_DEBUG
|
2012-05-20 20:27:42 +00:00
|
|
|
GST_LOG_OBJECT (decoder, "n %d in %" G_GSIZE_FORMAT " out %" G_GSIZE_FORMAT,
|
2012-03-07 09:18:49 +00:00
|
|
|
g_list_length (priv->frames),
|
|
|
|
gst_adapter_available (priv->input_adapter),
|
|
|
|
gst_adapter_available (priv->output_adapter));
|
|
|
|
#endif
|
|
|
|
|
2013-07-24 07:24:45 +00:00
|
|
|
sync = GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame);
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_LOG_OBJECT (decoder,
|
2012-06-27 11:48:58 +00:00
|
|
|
"finish frame %p (#%d) sync:%d PTS:%" GST_TIME_FORMAT " DTS:%"
|
2012-06-19 14:42:42 +00:00
|
|
|
GST_TIME_FORMAT,
|
|
|
|
frame, frame->system_frame_number,
|
2013-07-24 07:24:45 +00:00
|
|
|
sync, GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts));
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
/* Push all pending events that arrived before this frame */
|
|
|
|
for (l = priv->frames; l; l = l->next) {
|
|
|
|
GstVideoCodecFrame *tmp = l->data;
|
|
|
|
|
|
|
|
if (tmp->events) {
|
2018-03-08 10:28:58 +00:00
|
|
|
events = g_list_concat (tmp->events, events);
|
2012-03-07 09:18:49 +00:00
|
|
|
tmp->events = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tmp == frame)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-05-09 13:34:10 +00:00
|
|
|
if (dropping || !decoder->priv->output_state) {
|
|
|
|
/* Push before the next frame that is not dropped */
|
|
|
|
decoder->priv->pending_events =
|
2018-03-08 10:28:58 +00:00
|
|
|
g_list_concat (events, decoder->priv->pending_events);
|
2013-05-09 13:34:10 +00:00
|
|
|
} else {
|
2015-08-10 02:23:05 +00:00
|
|
|
gst_video_decoder_push_event_list (decoder, decoder->priv->pending_events);
|
2013-05-09 13:34:10 +00:00
|
|
|
decoder->priv->pending_events = NULL;
|
|
|
|
|
2015-08-10 02:23:05 +00:00
|
|
|
gst_video_decoder_push_event_list (decoder, events);
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if the data should not be displayed. For example altref/invisible
|
|
|
|
* frame in vp8. In this case we should not update the timestamps. */
|
2012-04-24 17:35:24 +00:00
|
|
|
if (GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame))
|
2012-03-07 09:18:49 +00:00
|
|
|
return;
|
|
|
|
|
2012-06-19 14:42:42 +00:00
|
|
|
/* If the frame is meant to be output but we don't have an output_buffer
|
2012-04-24 17:35:24 +00:00
|
|
|
* we have a problem :) */
|
2013-05-09 08:37:06 +00:00
|
|
|
if (G_UNLIKELY ((frame->output_buffer == NULL) && !dropping))
|
2012-04-24 17:35:24 +00:00
|
|
|
goto no_output_buffer;
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
if (GST_CLOCK_TIME_IS_VALID (frame->pts)) {
|
2012-06-19 14:36:38 +00:00
|
|
|
if (frame->pts != priv->base_timestamp) {
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_DEBUG_OBJECT (decoder,
|
2015-10-29 14:52:31 +00:00
|
|
|
"sync timestamp %" GST_TIME_FORMAT " diff %" GST_STIME_FORMAT,
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_TIME_ARGS (frame->pts),
|
2015-11-02 12:09:42 +00:00
|
|
|
GST_STIME_ARGS (GST_CLOCK_DIFF (frame->pts,
|
|
|
|
decoder->output_segment.start)));
|
2012-06-19 14:36:38 +00:00
|
|
|
priv->base_timestamp = frame->pts;
|
|
|
|
priv->base_picture_number = frame->decode_frame_number;
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
}
|
2012-06-19 14:36:38 +00:00
|
|
|
|
2012-06-19 17:40:29 +00:00
|
|
|
if (frame->duration == GST_CLOCK_TIME_NONE) {
|
2012-03-07 09:18:49 +00:00
|
|
|
frame->duration = gst_video_decoder_get_frame_duration (decoder, frame);
|
2012-06-19 17:40:29 +00:00
|
|
|
GST_LOG_OBJECT (decoder,
|
|
|
|
"Guessing duration %" GST_TIME_FORMAT " for frame...",
|
|
|
|
GST_TIME_ARGS (frame->duration));
|
|
|
|
}
|
2012-06-19 14:36:38 +00:00
|
|
|
|
2012-09-28 11:59:24 +00:00
|
|
|
/* PTS is expected montone ascending,
|
|
|
|
* so a good guess is lowest unsent DTS */
|
|
|
|
{
|
|
|
|
GstClockTime min_ts = GST_CLOCK_TIME_NONE;
|
|
|
|
GstVideoCodecFrame *oframe = NULL;
|
|
|
|
gboolean seen_none = FALSE;
|
|
|
|
|
|
|
|
/* some maintenance regardless */
|
|
|
|
for (l = priv->frames; l; l = l->next) {
|
|
|
|
GstVideoCodecFrame *tmp = l->data;
|
|
|
|
|
|
|
|
if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts)) {
|
|
|
|
seen_none = TRUE;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts < min_ts) {
|
|
|
|
min_ts = tmp->abidata.ABI.ts;
|
|
|
|
oframe = tmp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* save a ts if needed */
|
|
|
|
if (oframe && oframe != frame) {
|
|
|
|
oframe->abidata.ABI.ts = frame->abidata.ABI.ts;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* and set if needed;
|
|
|
|
* valid delta means we have reasonable DTS input */
|
2012-10-10 13:04:07 +00:00
|
|
|
/* also, if we ended up reordered, means this approach is conflicting
|
|
|
|
* with some sparse existing PTS, and so it does not work out */
|
|
|
|
if (!priv->reordered_output &&
|
|
|
|
!GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none &&
|
2012-09-28 11:59:24 +00:00
|
|
|
GST_CLOCK_TIME_IS_VALID (priv->pts_delta)) {
|
|
|
|
frame->pts = min_ts + priv->pts_delta;
|
|
|
|
GST_DEBUG_OBJECT (decoder,
|
|
|
|
"no valid PTS, using oldest DTS %" GST_TIME_FORMAT,
|
|
|
|
GST_TIME_ARGS (frame->pts));
|
2012-09-27 09:31:34 +00:00
|
|
|
}
|
2012-10-10 13:04:07 +00:00
|
|
|
|
|
|
|
/* some more maintenance, ts2 holds PTS */
|
|
|
|
min_ts = GST_CLOCK_TIME_NONE;
|
|
|
|
seen_none = FALSE;
|
|
|
|
for (l = priv->frames; l; l = l->next) {
|
|
|
|
GstVideoCodecFrame *tmp = l->data;
|
|
|
|
|
|
|
|
if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts2)) {
|
|
|
|
seen_none = TRUE;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts2 < min_ts) {
|
|
|
|
min_ts = tmp->abidata.ABI.ts2;
|
|
|
|
oframe = tmp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* save a ts if needed */
|
|
|
|
if (oframe && oframe != frame) {
|
|
|
|
oframe->abidata.ABI.ts2 = frame->abidata.ABI.ts2;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if we detected reordered output, then PTS are void,
|
|
|
|
* however those were obtained; bogus input, subclass etc */
|
|
|
|
if (priv->reordered_output && !seen_none) {
|
2013-11-26 19:51:58 +00:00
|
|
|
GST_DEBUG_OBJECT (decoder, "invalidating PTS");
|
2012-10-10 13:04:07 +00:00
|
|
|
frame->pts = GST_CLOCK_TIME_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none) {
|
|
|
|
frame->pts = min_ts;
|
|
|
|
GST_DEBUG_OBJECT (decoder,
|
|
|
|
"no valid PTS, using oldest PTS %" GST_TIME_FORMAT,
|
|
|
|
GST_TIME_ARGS (frame->pts));
|
|
|
|
}
|
2012-09-27 09:31:34 +00:00
|
|
|
}
|
|
|
|
|
2012-09-28 11:59:24 +00:00
|
|
|
|
2012-06-19 17:40:29 +00:00
|
|
|
if (frame->pts == GST_CLOCK_TIME_NONE) {
|
2012-06-19 14:36:38 +00:00
|
|
|
/* Last ditch timestamp guess: Just add the duration to the previous
|
2013-09-19 00:42:55 +00:00
|
|
|
* frame. If it's the first frame, just use the segment start. */
|
|
|
|
if (frame->duration != GST_CLOCK_TIME_NONE) {
|
|
|
|
if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out))
|
|
|
|
frame->pts = priv->last_timestamp_out + frame->duration;
|
2017-03-03 13:18:07 +00:00
|
|
|
else if (decoder->output_segment.rate > 0.0)
|
2013-09-19 00:42:55 +00:00
|
|
|
frame->pts = decoder->output_segment.start;
|
2012-06-19 14:36:38 +00:00
|
|
|
GST_LOG_OBJECT (decoder,
|
|
|
|
"Guessing timestamp %" GST_TIME_FORMAT " for frame...",
|
|
|
|
GST_TIME_ARGS (frame->pts));
|
2013-07-24 07:24:45 +00:00
|
|
|
} else if (sync && frame->dts != GST_CLOCK_TIME_NONE) {
|
|
|
|
frame->pts = frame->dts;
|
|
|
|
GST_LOG_OBJECT (decoder,
|
|
|
|
"Setting DTS as PTS %" GST_TIME_FORMAT " for frame...",
|
|
|
|
GST_TIME_ARGS (frame->pts));
|
2012-06-19 14:36:38 +00:00
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
2012-06-19 14:36:38 +00:00
|
|
|
if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out)) {
|
|
|
|
if (frame->pts < priv->last_timestamp_out) {
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_WARNING_OBJECT (decoder,
|
|
|
|
"decreasing timestamp (%" GST_TIME_FORMAT " < %"
|
|
|
|
GST_TIME_FORMAT ")",
|
2012-06-19 14:36:38 +00:00
|
|
|
GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (priv->last_timestamp_out));
|
2012-10-10 13:04:07 +00:00
|
|
|
priv->reordered_output = TRUE;
|
2013-11-21 20:33:59 +00:00
|
|
|
/* make it a bit less weird downstream */
|
|
|
|
frame->pts = priv->last_timestamp_out;
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
}
|
2012-06-19 14:36:38 +00:00
|
|
|
|
|
|
|
if (GST_CLOCK_TIME_IS_VALID (frame->pts))
|
|
|
|
priv->last_timestamp_out = frame->pts;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-04-24 13:07:31 +00:00
|
|
|
return;
|
2012-04-24 17:35:24 +00:00
|
|
|
|
|
|
|
/* ERRORS */
|
|
|
|
no_output_buffer:
|
|
|
|
{
|
|
|
|
GST_ERROR_OBJECT (decoder, "No buffer to output !");
|
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
2013-11-26 19:50:33 +00:00
|
|
|
/**
|
|
|
|
* gst_video_decoder_release_frame:
|
|
|
|
* @dec: a #GstVideoDecoder
|
|
|
|
* @frame: (transfer full): the #GstVideoCodecFrame to release
|
|
|
|
*
|
|
|
|
* Similar to gst_video_decoder_drop_frame(), but simply releases @frame
|
|
|
|
* without any processing other than removing it from list of pending frames,
|
|
|
|
* after which it is considered finished and released.
|
|
|
|
*
|
2013-12-13 13:36:41 +00:00
|
|
|
* Since: 1.2.2
|
2013-11-26 19:50:33 +00:00
|
|
|
*/
|
|
|
|
void
|
2012-06-19 14:22:25 +00:00
|
|
|
gst_video_decoder_release_frame (GstVideoDecoder * dec,
|
2012-03-07 09:18:49 +00:00
|
|
|
GstVideoCodecFrame * frame)
|
|
|
|
{
|
2012-06-02 13:34:15 +00:00
|
|
|
GList *link;
|
|
|
|
|
2012-05-01 12:46:06 +00:00
|
|
|
/* unref once from the list */
|
2013-11-26 19:50:33 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (dec);
|
2012-06-02 13:34:15 +00:00
|
|
|
link = g_list_find (dec->priv->frames, frame);
|
|
|
|
if (link) {
|
|
|
|
gst_video_codec_frame_unref (frame);
|
|
|
|
dec->priv->frames = g_list_delete_link (dec->priv->frames, link);
|
|
|
|
}
|
2014-01-11 04:24:44 +00:00
|
|
|
if (frame->events) {
|
|
|
|
dec->priv->pending_events =
|
2018-03-08 10:28:58 +00:00
|
|
|
g_list_concat (frame->events, dec->priv->pending_events);
|
2014-01-11 04:24:44 +00:00
|
|
|
frame->events = NULL;
|
|
|
|
}
|
2013-11-26 19:50:33 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-05-01 12:46:06 +00:00
|
|
|
/* unref because this function takes ownership */
|
2012-03-07 09:18:49 +00:00
|
|
|
gst_video_codec_frame_unref (frame);
|
|
|
|
}
|
|
|
|
|
2018-10-02 15:09:33 +00:00
|
|
|
/* called with STREAM_LOCK */
|
|
|
|
static void
|
|
|
|
gst_video_decoder_post_qos_drop (GstVideoDecoder * dec, GstClockTime timestamp)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
2018-10-02 15:09:33 +00:00
|
|
|
GstClockTime stream_time, jitter, earliest_time, qostime;
|
2012-03-07 09:18:49 +00:00
|
|
|
GstSegment *segment;
|
|
|
|
GstMessage *qos_msg;
|
|
|
|
gdouble proportion;
|
|
|
|
dec->priv->dropped++;
|
|
|
|
|
|
|
|
/* post QoS message */
|
2012-09-24 09:16:09 +00:00
|
|
|
GST_OBJECT_LOCK (dec);
|
2012-03-07 09:18:49 +00:00
|
|
|
proportion = dec->priv->proportion;
|
2012-09-24 09:16:09 +00:00
|
|
|
earliest_time = dec->priv->earliest_time;
|
|
|
|
GST_OBJECT_UNLOCK (dec);
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
segment = &dec->output_segment;
|
2013-06-17 06:58:13 +00:00
|
|
|
if (G_UNLIKELY (segment->format == GST_FORMAT_UNDEFINED))
|
|
|
|
segment = &dec->input_segment;
|
2012-03-07 09:18:49 +00:00
|
|
|
stream_time =
|
|
|
|
gst_segment_to_stream_time (segment, GST_FORMAT_TIME, timestamp);
|
|
|
|
qostime = gst_segment_to_running_time (segment, GST_FORMAT_TIME, timestamp);
|
|
|
|
jitter = GST_CLOCK_DIFF (qostime, earliest_time);
|
|
|
|
qos_msg =
|
|
|
|
gst_message_new_qos (GST_OBJECT_CAST (dec), FALSE, qostime, stream_time,
|
|
|
|
timestamp, GST_CLOCK_TIME_NONE);
|
|
|
|
gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000);
|
|
|
|
gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS,
|
|
|
|
dec->priv->processed, dec->priv->dropped);
|
|
|
|
gst_element_post_message (GST_ELEMENT_CAST (dec), qos_msg);
|
2018-10-02 15:09:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_drop_frame:
|
|
|
|
* @dec: a #GstVideoDecoder
|
|
|
|
* @frame: (transfer full): the #GstVideoCodecFrame to drop
|
|
|
|
*
|
|
|
|
* Similar to gst_video_decoder_finish_frame(), but drops @frame in any
|
|
|
|
* case and posts a QoS message with the frame's details on the bus.
|
|
|
|
* In any case, the frame is considered finished and released.
|
|
|
|
*
|
|
|
|
* Returns: a #GstFlowReturn, usually GST_FLOW_OK.
|
|
|
|
*/
|
|
|
|
GstFlowReturn
|
|
|
|
gst_video_decoder_drop_frame (GstVideoDecoder * dec, GstVideoCodecFrame * frame)
|
|
|
|
{
|
|
|
|
GST_LOG_OBJECT (dec, "drop frame %p", frame);
|
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (dec);
|
|
|
|
|
|
|
|
gst_video_decoder_prepare_finish_frame (dec, frame, TRUE);
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (dec, "dropping frame %" GST_TIME_FORMAT,
|
|
|
|
GST_TIME_ARGS (frame->pts));
|
|
|
|
|
|
|
|
gst_video_decoder_post_qos_drop (dec, frame->pts);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
/* now free the frame */
|
2012-06-19 14:22:25 +00:00
|
|
|
gst_video_decoder_release_frame (dec, frame);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
|
|
|
|
|
|
|
|
return GST_FLOW_OK;
|
|
|
|
}
|
|
|
|
|
2015-06-29 13:58:38 +00:00
|
|
|
static gboolean
|
|
|
|
gst_video_decoder_transform_meta_default (GstVideoDecoder *
|
|
|
|
decoder, GstVideoCodecFrame * frame, GstMeta * meta)
|
|
|
|
{
|
|
|
|
const GstMetaInfo *info = meta->info;
|
|
|
|
const gchar *const *tags;
|
|
|
|
|
|
|
|
tags = gst_meta_api_type_get_tags (info->api);
|
|
|
|
|
|
|
|
if (!tags || (g_strv_length ((gchar **) tags) == 1
|
|
|
|
&& gst_meta_api_type_has_tag (info->api,
|
|
|
|
g_quark_from_string (GST_META_TAG_VIDEO_STR))))
|
|
|
|
return TRUE;
|
|
|
|
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
GstVideoDecoder *decoder;
|
|
|
|
GstVideoCodecFrame *frame;
|
|
|
|
} CopyMetaData;
|
|
|
|
|
|
|
|
static gboolean
|
|
|
|
foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
|
|
|
|
{
|
|
|
|
CopyMetaData *data = user_data;
|
|
|
|
GstVideoDecoder *decoder = data->decoder;
|
|
|
|
GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
|
|
|
|
GstVideoCodecFrame *frame = data->frame;
|
|
|
|
const GstMetaInfo *info = (*meta)->info;
|
|
|
|
gboolean do_copy = FALSE;
|
|
|
|
|
2015-07-01 08:58:07 +00:00
|
|
|
if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)) {
|
2015-06-29 13:58:38 +00:00
|
|
|
/* never call the transform_meta with memory specific metadata */
|
|
|
|
GST_DEBUG_OBJECT (decoder, "not copying memory specific metadata %s",
|
|
|
|
g_type_name (info->api));
|
|
|
|
do_copy = FALSE;
|
|
|
|
} else if (klass->transform_meta) {
|
|
|
|
do_copy = klass->transform_meta (decoder, frame, *meta);
|
|
|
|
GST_DEBUG_OBJECT (decoder, "transformed metadata %s: copy: %d",
|
|
|
|
g_type_name (info->api), do_copy);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we only copy metadata when the subclass implemented a transform_meta
|
|
|
|
* function and when it returns %TRUE */
|
2017-05-02 11:31:14 +00:00
|
|
|
if (do_copy && info->transform_func) {
|
2015-06-29 13:58:38 +00:00
|
|
|
GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
|
|
|
|
GST_DEBUG_OBJECT (decoder, "copy metadata %s", g_type_name (info->api));
|
|
|
|
/* simply copy then */
|
|
|
|
info->transform_func (frame->output_buffer, *meta, inbuf,
|
|
|
|
_gst_meta_transform_copy, ©_data);
|
|
|
|
}
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
/**
|
|
|
|
* gst_video_decoder_finish_frame:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
* @frame: (transfer full): a decoded #GstVideoCodecFrame
|
|
|
|
*
|
|
|
|
* @frame should have a valid decoded data buffer, whose metadata fields
|
|
|
|
* are then appropriately set according to frame data and pushed downstream.
|
|
|
|
* If no output data is provided, @frame is considered skipped.
|
|
|
|
* In any case, the frame is considered finished and released.
|
|
|
|
*
|
2012-07-05 12:29:42 +00:00
|
|
|
* After calling this function the output buffer of the frame is to be
|
|
|
|
* considered read-only. This function will also change the metadata
|
|
|
|
* of the buffer.
|
|
|
|
*
|
2012-03-07 09:18:49 +00:00
|
|
|
* Returns: a #GstFlowReturn resulting from sending data downstream
|
|
|
|
*/
|
|
|
|
GstFlowReturn
|
|
|
|
gst_video_decoder_finish_frame (GstVideoDecoder * decoder,
|
|
|
|
GstVideoCodecFrame * frame)
|
|
|
|
{
|
2012-06-19 14:08:57 +00:00
|
|
|
GstFlowReturn ret = GST_FLOW_OK;
|
2015-06-29 13:58:38 +00:00
|
|
|
GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
|
2012-03-07 09:18:49 +00:00
|
|
|
GstVideoDecoderPrivate *priv = decoder->priv;
|
|
|
|
GstBuffer *output_buffer;
|
2013-12-05 14:31:25 +00:00
|
|
|
gboolean needs_reconfigure = FALSE;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-05-01 13:35:47 +00:00
|
|
|
GST_LOG_OBJECT (decoder, "finish frame %p", frame);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-07-23 09:50:11 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
|
|
|
|
2013-12-05 14:31:25 +00:00
|
|
|
needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
|
2012-04-24 20:35:29 +00:00
|
|
|
if (G_UNLIKELY (priv->output_state_changed || (priv->output_state
|
2013-12-05 14:31:25 +00:00
|
|
|
&& needs_reconfigure))) {
|
|
|
|
if (!gst_video_decoder_negotiate_unlocked (decoder)) {
|
2013-09-12 07:35:00 +00:00
|
|
|
gst_pad_mark_reconfigure (decoder->srcpad);
|
2013-06-30 16:17:15 +00:00
|
|
|
if (GST_PAD_IS_FLUSHING (decoder->srcpad))
|
|
|
|
ret = GST_FLOW_FLUSHING;
|
|
|
|
else
|
|
|
|
ret = GST_FLOW_NOT_NEGOTIATED;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-06-12 15:58:05 +00:00
|
|
|
gst_video_decoder_prepare_finish_frame (decoder, frame, FALSE);
|
2012-03-07 09:18:49 +00:00
|
|
|
priv->processed++;
|
2012-06-19 14:08:57 +00:00
|
|
|
|
2015-08-16 16:55:22 +00:00
|
|
|
if (priv->tags_changed) {
|
|
|
|
GstEvent *tags_event;
|
|
|
|
|
|
|
|
tags_event = gst_video_decoder_create_merged_tags_event (decoder);
|
|
|
|
|
|
|
|
if (tags_event != NULL)
|
|
|
|
gst_video_decoder_push_event (decoder, tags_event);
|
|
|
|
|
2012-08-09 14:02:42 +00:00
|
|
|
priv->tags_changed = FALSE;
|
|
|
|
}
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
/* no buffer data means this frame is skipped */
|
|
|
|
if (!frame->output_buffer || GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame)) {
|
|
|
|
GST_DEBUG_OBJECT (decoder, "skipping frame %" GST_TIME_FORMAT,
|
|
|
|
GST_TIME_ARGS (frame->pts));
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2017-07-24 13:48:17 +00:00
|
|
|
/* We need a writable buffer for the metadata changes below */
|
|
|
|
output_buffer = frame->output_buffer =
|
|
|
|
gst_buffer_make_writable (frame->output_buffer);
|
2012-07-05 11:38:48 +00:00
|
|
|
|
|
|
|
GST_BUFFER_FLAG_UNSET (output_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-07-05 11:38:48 +00:00
|
|
|
GST_BUFFER_PTS (output_buffer) = frame->pts;
|
2014-08-07 08:44:03 +00:00
|
|
|
GST_BUFFER_DTS (output_buffer) = GST_CLOCK_TIME_NONE;
|
2012-07-05 11:38:48 +00:00
|
|
|
GST_BUFFER_DURATION (output_buffer) = frame->duration;
|
2012-06-19 14:08:57 +00:00
|
|
|
|
2012-07-05 11:38:48 +00:00
|
|
|
GST_BUFFER_OFFSET (output_buffer) = GST_BUFFER_OFFSET_NONE;
|
|
|
|
GST_BUFFER_OFFSET_END (output_buffer) = GST_BUFFER_OFFSET_NONE;
|
2012-06-19 14:08:57 +00:00
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
if (priv->discont) {
|
2012-07-05 11:38:48 +00:00
|
|
|
GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_DISCONT);
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
2015-06-29 13:58:38 +00:00
|
|
|
if (decoder_class->transform_meta) {
|
|
|
|
if (G_LIKELY (frame->input_buffer)) {
|
|
|
|
CopyMetaData data;
|
|
|
|
|
|
|
|
data.decoder = decoder;
|
|
|
|
data.frame = frame;
|
|
|
|
gst_buffer_foreach_meta (frame->input_buffer, foreach_metadata, &data);
|
|
|
|
} else {
|
|
|
|
GST_WARNING_OBJECT (decoder,
|
|
|
|
"Can't copy metadata because input frame disappeared");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-05 12:29:42 +00:00
|
|
|
/* Get an additional ref to the buffer, which is going to be pushed
|
|
|
|
* downstream, the original ref is owned by the frame
|
2017-07-24 13:48:17 +00:00
|
|
|
*/
|
2012-07-05 12:29:42 +00:00
|
|
|
output_buffer = gst_buffer_ref (output_buffer);
|
2013-04-25 14:13:10 +00:00
|
|
|
|
|
|
|
/* Release frame so the buffer is writable when we push it downstream
|
|
|
|
* if possible, i.e. if the subclass does not hold additional references
|
|
|
|
* to the frame
|
|
|
|
*/
|
|
|
|
gst_video_decoder_release_frame (decoder, frame);
|
|
|
|
frame = NULL;
|
|
|
|
|
2017-01-10 14:59:55 +00:00
|
|
|
if (decoder->output_segment.rate < 0.0
|
|
|
|
&& !(decoder->output_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)) {
|
2012-06-19 14:08:57 +00:00
|
|
|
GST_LOG_OBJECT (decoder, "queued frame");
|
|
|
|
priv->output_queued = g_list_prepend (priv->output_queued, output_buffer);
|
|
|
|
} else {
|
|
|
|
ret = gst_video_decoder_clip_and_push_buf (decoder, output_buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
2013-04-25 14:13:10 +00:00
|
|
|
if (frame)
|
|
|
|
gst_video_decoder_release_frame (decoder, frame);
|
2012-06-19 14:08:57 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* With stream lock, takes the frame reference */
|
|
|
|
static GstFlowReturn
|
|
|
|
gst_video_decoder_clip_and_push_buf (GstVideoDecoder * decoder, GstBuffer * buf)
|
|
|
|
{
|
|
|
|
GstFlowReturn ret = GST_FLOW_OK;
|
|
|
|
GstVideoDecoderPrivate *priv = decoder->priv;
|
|
|
|
guint64 start, stop;
|
2012-06-19 14:36:38 +00:00
|
|
|
guint64 cstart, cstop;
|
2012-06-19 14:08:57 +00:00
|
|
|
GstSegment *segment;
|
|
|
|
GstClockTime duration;
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
/* Check for clipping */
|
2012-06-27 11:48:58 +00:00
|
|
|
start = GST_BUFFER_PTS (buf);
|
2012-06-19 14:08:57 +00:00
|
|
|
duration = GST_BUFFER_DURATION (buf);
|
|
|
|
|
2013-10-29 17:40:23 +00:00
|
|
|
/* store that we have valid decoded data */
|
|
|
|
priv->had_output_data = TRUE;
|
|
|
|
|
2012-06-19 14:08:57 +00:00
|
|
|
stop = GST_CLOCK_TIME_NONE;
|
|
|
|
|
|
|
|
if (GST_CLOCK_TIME_IS_VALID (start) && GST_CLOCK_TIME_IS_VALID (duration)) {
|
|
|
|
stop = start + duration;
|
2015-05-06 10:40:48 +00:00
|
|
|
} else if (GST_CLOCK_TIME_IS_VALID (start)
|
|
|
|
&& !GST_CLOCK_TIME_IS_VALID (duration)) {
|
2015-12-03 15:38:45 +00:00
|
|
|
/* If we don't clip away buffers that far before the segment we
|
|
|
|
* can cause the pipeline to lockup. This can happen if audio is
|
|
|
|
* properly clipped, and thus the audio sink does not preroll yet
|
|
|
|
* but the video sink prerolls because we already outputted a
|
|
|
|
* buffer here... and then queues run full.
|
2015-05-06 10:40:48 +00:00
|
|
|
*
|
|
|
|
* In the worst case we will clip one buffer too many here now if no
|
|
|
|
* framerate is given, no buffer duration is given and the actual
|
2015-12-03 15:38:45 +00:00
|
|
|
* framerate is lower than 25fps */
|
|
|
|
stop = start + 40 * GST_MSECOND;
|
2012-06-19 14:08:57 +00:00
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
segment = &decoder->output_segment;
|
2012-06-19 14:36:38 +00:00
|
|
|
if (gst_segment_clip (segment, GST_FORMAT_TIME, start, stop, &cstart, &cstop)) {
|
2012-06-27 11:48:58 +00:00
|
|
|
GST_BUFFER_PTS (buf) = cstart;
|
2012-06-19 14:36:38 +00:00
|
|
|
|
2015-05-06 10:40:48 +00:00
|
|
|
if (stop != GST_CLOCK_TIME_NONE && GST_CLOCK_TIME_IS_VALID (duration))
|
2012-06-19 14:36:38 +00:00
|
|
|
GST_BUFFER_DURATION (buf) = cstop - cstart;
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_LOG_OBJECT (decoder,
|
|
|
|
"accepting buffer inside segment: %" GST_TIME_FORMAT " %"
|
|
|
|
GST_TIME_FORMAT " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
|
|
|
|
" time %" GST_TIME_FORMAT,
|
2015-05-06 10:40:48 +00:00
|
|
|
GST_TIME_ARGS (cstart),
|
|
|
|
GST_TIME_ARGS (cstop),
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_TIME_ARGS (segment->start), GST_TIME_ARGS (segment->stop),
|
|
|
|
GST_TIME_ARGS (segment->time));
|
|
|
|
} else {
|
|
|
|
GST_LOG_OBJECT (decoder,
|
|
|
|
"dropping buffer outside segment: %" GST_TIME_FORMAT
|
|
|
|
" %" GST_TIME_FORMAT
|
|
|
|
" seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
|
|
|
|
" time %" GST_TIME_FORMAT,
|
2012-06-19 14:08:57 +00:00
|
|
|
GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_TIME_ARGS (segment->start),
|
|
|
|
GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->time));
|
2015-02-22 20:13:35 +00:00
|
|
|
/* only check and return EOS if upstream still
|
|
|
|
* in the same segment and interested as such */
|
|
|
|
if (decoder->priv->in_out_segment_sync) {
|
|
|
|
if (segment->rate >= 0) {
|
|
|
|
if (GST_BUFFER_PTS (buf) >= segment->stop)
|
|
|
|
ret = GST_FLOW_EOS;
|
|
|
|
} else if (GST_BUFFER_PTS (buf) < segment->start) {
|
2014-05-26 15:44:48 +00:00
|
|
|
ret = GST_FLOW_EOS;
|
2015-02-22 20:13:35 +00:00
|
|
|
}
|
2014-05-26 15:44:48 +00:00
|
|
|
}
|
2012-06-19 14:08:57 +00:00
|
|
|
gst_buffer_unref (buf);
|
2012-03-07 09:18:49 +00:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2016-06-07 07:48:35 +00:00
|
|
|
/* Is buffer too late (QoS) ? */
|
2018-10-02 16:04:14 +00:00
|
|
|
if (priv->do_qos && GST_CLOCK_TIME_IS_VALID (priv->earliest_time)
|
2016-06-07 07:48:35 +00:00
|
|
|
&& GST_CLOCK_TIME_IS_VALID (cstart)) {
|
|
|
|
GstClockTime deadline =
|
|
|
|
gst_segment_to_running_time (segment, GST_FORMAT_TIME, cstart);
|
|
|
|
if (GST_CLOCK_TIME_IS_VALID (deadline) && deadline < priv->earliest_time) {
|
2018-10-02 15:09:33 +00:00
|
|
|
GST_WARNING_OBJECT (decoder,
|
2016-06-07 07:48:35 +00:00
|
|
|
"Dropping frame due to QoS. start:%" GST_TIME_FORMAT " deadline:%"
|
|
|
|
GST_TIME_FORMAT " earliest_time:%" GST_TIME_FORMAT,
|
|
|
|
GST_TIME_ARGS (start), GST_TIME_ARGS (deadline),
|
|
|
|
GST_TIME_ARGS (priv->earliest_time));
|
2018-10-02 15:09:33 +00:00
|
|
|
gst_video_decoder_post_qos_drop (decoder, cstart);
|
2016-06-07 07:48:35 +00:00
|
|
|
gst_buffer_unref (buf);
|
|
|
|
priv->discont = TRUE;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-23 13:11:53 +00:00
|
|
|
/* Set DISCONT flag here ! */
|
2016-06-07 07:48:35 +00:00
|
|
|
|
2016-05-23 13:11:53 +00:00
|
|
|
if (priv->discont) {
|
|
|
|
GST_DEBUG_OBJECT (decoder, "Setting discont on output buffer");
|
|
|
|
GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
|
|
|
|
priv->discont = FALSE;
|
|
|
|
}
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
/* update rate estimate */
|
2016-07-04 08:55:07 +00:00
|
|
|
GST_OBJECT_LOCK (decoder);
|
2012-06-19 14:08:57 +00:00
|
|
|
priv->bytes_out += gst_buffer_get_size (buf);
|
|
|
|
if (GST_CLOCK_TIME_IS_VALID (duration)) {
|
|
|
|
priv->time += duration;
|
2012-03-07 09:18:49 +00:00
|
|
|
} else {
|
|
|
|
/* FIXME : Use difference between current and previous outgoing
|
|
|
|
* timestamp, and relate to difference between current and previous
|
|
|
|
* bytes */
|
|
|
|
/* better none than nothing valid */
|
|
|
|
priv->time = GST_CLOCK_TIME_NONE;
|
|
|
|
}
|
2016-07-04 08:55:07 +00:00
|
|
|
GST_OBJECT_UNLOCK (decoder);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-06-19 14:22:25 +00:00
|
|
|
GST_DEBUG_OBJECT (decoder, "pushing buffer %p of size %" G_GSIZE_FORMAT ", "
|
2012-06-27 11:48:58 +00:00
|
|
|
"PTS %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
|
2012-06-19 14:22:25 +00:00
|
|
|
gst_buffer_get_size (buf),
|
2012-06-27 11:48:58 +00:00
|
|
|
GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
|
2012-06-19 14:08:57 +00:00
|
|
|
GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
|
|
|
|
|
|
|
|
/* we got data, so note things are looking up again, reduce
|
|
|
|
* the error count, if there is one */
|
2012-03-07 09:18:49 +00:00
|
|
|
if (G_UNLIKELY (priv->error_count))
|
2012-11-09 15:46:15 +00:00
|
|
|
priv->error_count = 0;
|
2012-06-19 14:22:25 +00:00
|
|
|
|
2017-03-16 02:56:10 +00:00
|
|
|
#ifndef GST_DISABLE_DEBUG
|
|
|
|
if (G_UNLIKELY (priv->last_reset_time != GST_CLOCK_TIME_NONE)) {
|
|
|
|
GstClockTime elapsed = gst_util_get_timestamp () - priv->last_reset_time;
|
|
|
|
|
|
|
|
/* First buffer since reset, report how long we took */
|
|
|
|
GST_INFO_OBJECT (decoder, "First buffer since flush took %" GST_TIME_FORMAT
|
|
|
|
" to produce", GST_TIME_ARGS (elapsed));
|
|
|
|
priv->last_reset_time = GST_CLOCK_TIME_NONE;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-11-25 16:45:18 +00:00
|
|
|
/* release STREAM_LOCK not to block upstream
|
|
|
|
* while pushing buffer downstream */
|
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
2012-06-19 14:22:25 +00:00
|
|
|
ret = gst_pad_push (decoder->srcpad, buf);
|
2013-11-25 16:45:18 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
done:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_add_to_frame:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
2012-05-22 13:49:58 +00:00
|
|
|
* @n_bytes: the number of bytes to add
|
2012-03-07 09:18:49 +00:00
|
|
|
*
|
|
|
|
* Removes next @n_bytes of input data and adds it to currently parsed frame.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_video_decoder_add_to_frame (GstVideoDecoder * decoder, int n_bytes)
|
|
|
|
{
|
|
|
|
GstVideoDecoderPrivate *priv = decoder->priv;
|
|
|
|
GstBuffer *buf;
|
|
|
|
|
|
|
|
GST_LOG_OBJECT (decoder, "add %d bytes to frame", n_bytes);
|
|
|
|
|
|
|
|
if (n_bytes == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
|
|
|
if (gst_adapter_available (priv->output_adapter) == 0) {
|
|
|
|
priv->frame_offset =
|
|
|
|
priv->input_offset - gst_adapter_available (priv->input_adapter);
|
|
|
|
}
|
|
|
|
buf = gst_adapter_take_buffer (priv->input_adapter, n_bytes);
|
|
|
|
|
|
|
|
gst_adapter_push (priv->output_adapter, buf);
|
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
}
|
|
|
|
|
2014-01-18 12:54:22 +00:00
|
|
|
/**
|
|
|
|
* gst_video_decoder_get_pending_frame_size:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
*
|
|
|
|
* Returns the number of bytes previously added to the current frame
|
|
|
|
* by calling gst_video_decoder_add_to_frame().
|
|
|
|
*
|
|
|
|
* Returns: The number of bytes pending for the current frame
|
|
|
|
*
|
|
|
|
* Since: 1.4
|
|
|
|
*/
|
|
|
|
gsize
|
|
|
|
gst_video_decoder_get_pending_frame_size (GstVideoDecoder * decoder)
|
|
|
|
{
|
|
|
|
GstVideoDecoderPrivate *priv = decoder->priv;
|
|
|
|
gsize ret;
|
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
|
|
|
ret = gst_adapter_available (priv->output_adapter);
|
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
|
|
|
|
GST_LOG_OBJECT (decoder, "Current pending frame has %" G_GSIZE_FORMAT "bytes",
|
|
|
|
ret);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
static guint64
|
|
|
|
gst_video_decoder_get_frame_duration (GstVideoDecoder * decoder,
|
|
|
|
GstVideoCodecFrame * frame)
|
|
|
|
{
|
|
|
|
GstVideoCodecState *state = decoder->priv->output_state;
|
|
|
|
|
2012-06-27 14:38:38 +00:00
|
|
|
/* it's possible that we don't have a state yet when we are dropping the
|
|
|
|
* initial buffers */
|
|
|
|
if (state == NULL)
|
|
|
|
return GST_CLOCK_TIME_NONE;
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
if (state->info.fps_d == 0 || state->info.fps_n == 0) {
|
|
|
|
return GST_CLOCK_TIME_NONE;
|
|
|
|
}
|
|
|
|
|
2012-04-25 16:21:03 +00:00
|
|
|
/* FIXME: For interlaced frames this needs to take into account
|
|
|
|
* the number of valid fields in the frame
|
|
|
|
*/
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-04-25 16:21:03 +00:00
|
|
|
return gst_util_uint64_scale (GST_SECOND, state->info.fps_d,
|
2012-03-07 09:18:49 +00:00
|
|
|
state->info.fps_n);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_have_frame:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
*
|
|
|
|
* Gathers all data collected for currently parsed frame, gathers corresponding
|
|
|
|
* metadata and passes it along for further processing, i.e. @handle_frame.
|
|
|
|
*
|
|
|
|
* Returns: a #GstFlowReturn
|
|
|
|
*/
|
|
|
|
GstFlowReturn
|
|
|
|
gst_video_decoder_have_frame (GstVideoDecoder * decoder)
|
|
|
|
{
|
2012-06-19 13:28:08 +00:00
|
|
|
GstVideoDecoderPrivate *priv = decoder->priv;
|
2012-03-07 09:18:49 +00:00
|
|
|
GstBuffer *buffer;
|
|
|
|
int n_available;
|
2012-06-27 11:48:58 +00:00
|
|
|
GstClockTime pts, dts, duration;
|
2015-04-09 17:09:17 +00:00
|
|
|
guint flags;
|
2012-03-07 09:18:49 +00:00
|
|
|
GstFlowReturn ret = GST_FLOW_OK;
|
|
|
|
|
2017-03-10 07:22:27 +00:00
|
|
|
GST_LOG_OBJECT (decoder, "have_frame at offset %" G_GUINT64_FORMAT,
|
2017-03-03 13:10:27 +00:00
|
|
|
priv->frame_offset);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
|
|
|
|
2012-06-19 13:43:27 +00:00
|
|
|
n_available = gst_adapter_available (priv->output_adapter);
|
2012-03-07 09:18:49 +00:00
|
|
|
if (n_available) {
|
2012-06-19 13:43:27 +00:00
|
|
|
buffer = gst_adapter_take_buffer (priv->output_adapter, n_available);
|
2012-03-07 09:18:49 +00:00
|
|
|
} else {
|
|
|
|
buffer = gst_buffer_new_and_alloc (0);
|
|
|
|
}
|
|
|
|
|
2012-06-19 13:43:27 +00:00
|
|
|
priv->current_frame->input_buffer = buffer;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2017-03-03 13:10:27 +00:00
|
|
|
gst_video_decoder_get_buffer_info_at_offset (decoder,
|
2015-04-09 17:09:17 +00:00
|
|
|
priv->frame_offset, &pts, &dts, &duration, &flags);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-06-27 11:48:58 +00:00
|
|
|
GST_BUFFER_PTS (buffer) = pts;
|
|
|
|
GST_BUFFER_DTS (buffer) = dts;
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_BUFFER_DURATION (buffer) = duration;
|
2015-04-09 17:09:17 +00:00
|
|
|
GST_BUFFER_FLAGS (buffer) = flags;
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_LOG_OBJECT (decoder, "collected frame size %d, "
|
2012-06-27 11:48:58 +00:00
|
|
|
"PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
|
|
|
|
GST_TIME_FORMAT, n_available, GST_TIME_ARGS (pts), GST_TIME_ARGS (dts),
|
|
|
|
GST_TIME_ARGS (duration));
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2017-03-03 13:10:27 +00:00
|
|
|
if (!GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT)) {
|
|
|
|
GST_LOG_OBJECT (decoder, "Marking as sync point");
|
|
|
|
GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (priv->current_frame);
|
|
|
|
}
|
|
|
|
|
2012-06-19 13:28:08 +00:00
|
|
|
/* In reverse playback, just capture and queue frames for later processing */
|
2015-04-09 17:16:10 +00:00
|
|
|
if (decoder->input_segment.rate < 0.0) {
|
2012-06-19 13:28:08 +00:00
|
|
|
priv->parse_gather =
|
|
|
|
g_list_prepend (priv->parse_gather, priv->current_frame);
|
|
|
|
} else {
|
|
|
|
/* Otherwise, decode the frame, which gives away our ref */
|
|
|
|
ret = gst_video_decoder_decode_frame (decoder, priv->current_frame);
|
|
|
|
}
|
|
|
|
/* Current frame is gone now, either way */
|
|
|
|
priv->current_frame = NULL;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-06-19 13:28:08 +00:00
|
|
|
/* Pass the frame in priv->current_frame through the
|
2017-01-23 19:36:11 +00:00
|
|
|
* handle_frame() callback for decoding and passing to gvd_finish_frame(),
|
2012-06-19 13:28:08 +00:00
|
|
|
* or dropping by passing to gvd_drop_frame() */
|
2012-03-07 09:18:49 +00:00
|
|
|
static GstFlowReturn
|
2012-06-19 13:28:08 +00:00
|
|
|
gst_video_decoder_decode_frame (GstVideoDecoder * decoder,
|
|
|
|
GstVideoCodecFrame * frame)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
|
|
|
GstVideoDecoderPrivate *priv = decoder->priv;
|
|
|
|
GstVideoDecoderClass *decoder_class;
|
|
|
|
GstFlowReturn ret = GST_FLOW_OK;
|
|
|
|
|
|
|
|
decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
|
|
|
|
|
2017-01-23 19:36:11 +00:00
|
|
|
/* FIXME : This should only have to be checked once (either the subclass has an
|
2012-03-07 09:18:49 +00:00
|
|
|
* implementation, or it doesn't) */
|
|
|
|
g_return_val_if_fail (decoder_class->handle_frame != NULL, GST_FLOW_ERROR);
|
|
|
|
|
|
|
|
frame->distance_from_sync = priv->distance_from_sync;
|
|
|
|
priv->distance_from_sync++;
|
2012-06-27 11:48:58 +00:00
|
|
|
frame->pts = GST_BUFFER_PTS (frame->input_buffer);
|
|
|
|
frame->dts = GST_BUFFER_DTS (frame->input_buffer);
|
2012-03-07 09:18:49 +00:00
|
|
|
frame->duration = GST_BUFFER_DURATION (frame->input_buffer);
|
|
|
|
|
2013-02-08 23:31:28 +00:00
|
|
|
/* For keyframes, PTS = DTS + constant_offset, usually 0 to 3 frame
|
|
|
|
* durations. */
|
2012-10-10 13:04:07 +00:00
|
|
|
/* FIXME upstream can be quite wrong about the keyframe aspect,
|
|
|
|
* so we could be going off here as well,
|
|
|
|
* maybe let subclass decide if it really is/was a keyframe */
|
2013-02-08 23:31:28 +00:00
|
|
|
if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame) &&
|
|
|
|
GST_CLOCK_TIME_IS_VALID (frame->pts)
|
|
|
|
&& GST_CLOCK_TIME_IS_VALID (frame->dts)) {
|
|
|
|
/* just in case they are not equal as might ideally be,
|
|
|
|
* e.g. quicktime has a (positive) delta approach */
|
|
|
|
priv->pts_delta = frame->pts - frame->dts;
|
|
|
|
GST_DEBUG_OBJECT (decoder, "PTS delta %d ms",
|
|
|
|
(gint) (priv->pts_delta / GST_MSECOND));
|
2012-06-27 11:48:58 +00:00
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-10-10 13:04:07 +00:00
|
|
|
frame->abidata.ABI.ts = frame->dts;
|
|
|
|
frame->abidata.ABI.ts2 = frame->pts;
|
|
|
|
|
2015-04-09 17:18:58 +00:00
|
|
|
GST_LOG_OBJECT (decoder, "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT
|
|
|
|
", dist %d", GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts),
|
|
|
|
frame->distance_from_sync);
|
2012-06-27 12:13:02 +00:00
|
|
|
|
|
|
|
gst_video_codec_frame_ref (frame);
|
2012-03-07 09:18:49 +00:00
|
|
|
priv->frames = g_list_append (priv->frames, frame);
|
2013-02-11 19:54:46 +00:00
|
|
|
|
|
|
|
if (g_list_length (priv->frames) > 10) {
|
2013-06-04 15:49:55 +00:00
|
|
|
GST_DEBUG_OBJECT (decoder, "decoder frame list getting long: %d frames,"
|
2013-02-11 19:54:46 +00:00
|
|
|
"possible internal leaking?", g_list_length (priv->frames));
|
|
|
|
}
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
frame->deadline =
|
|
|
|
gst_segment_to_running_time (&decoder->input_segment, GST_FORMAT_TIME,
|
|
|
|
frame->pts);
|
|
|
|
|
|
|
|
/* do something with frame */
|
|
|
|
ret = decoder_class->handle_frame (decoder, frame);
|
|
|
|
if (ret != GST_FLOW_OK)
|
|
|
|
GST_DEBUG_OBJECT (decoder, "flow error %s", gst_flow_get_name (ret));
|
|
|
|
|
2012-06-19 13:28:08 +00:00
|
|
|
/* the frame has either been added to parse_gather or sent to
|
2012-03-07 09:18:49 +00:00
|
|
|
handle frame so there is no need to unref it */
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_get_output_state:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
*
|
|
|
|
* Get the #GstVideoCodecState currently describing the output stream.
|
|
|
|
*
|
|
|
|
* Returns: (transfer full): #GstVideoCodecState describing format of video data.
|
|
|
|
*/
|
|
|
|
GstVideoCodecState *
|
|
|
|
gst_video_decoder_get_output_state (GstVideoDecoder * decoder)
|
|
|
|
{
|
|
|
|
GstVideoCodecState *state = NULL;
|
|
|
|
|
2012-09-29 00:07:43 +00:00
|
|
|
GST_OBJECT_LOCK (decoder);
|
2012-03-07 09:18:49 +00:00
|
|
|
if (decoder->priv->output_state)
|
|
|
|
state = gst_video_codec_state_ref (decoder->priv->output_state);
|
2012-09-29 00:07:43 +00:00
|
|
|
GST_OBJECT_UNLOCK (decoder);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
return state;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_set_output_state:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
* @fmt: a #GstVideoFormat
|
|
|
|
* @width: The width in pixels
|
|
|
|
* @height: The height in pixels
|
|
|
|
* @reference: (allow-none) (transfer none): An optional reference #GstVideoCodecState
|
|
|
|
*
|
|
|
|
* Creates a new #GstVideoCodecState with the specified @fmt, @width and @height
|
|
|
|
* as the output state for the decoder.
|
|
|
|
* Any previously set output state on @decoder will be replaced by the newly
|
|
|
|
* created one.
|
|
|
|
*
|
|
|
|
* If the subclass wishes to copy over existing fields (like pixel aspec ratio,
|
|
|
|
* or framerate) from an existing #GstVideoCodecState, it can be provided as a
|
|
|
|
* @reference.
|
|
|
|
*
|
|
|
|
* If the subclass wishes to override some fields from the output state (like
|
|
|
|
* pixel-aspect-ratio or framerate) it can do so on the returned #GstVideoCodecState.
|
|
|
|
*
|
|
|
|
* The new output state will only take effect (set on pads and buffers) starting
|
|
|
|
* from the next call to #gst_video_decoder_finish_frame().
|
|
|
|
*
|
|
|
|
* Returns: (transfer full): the newly configured output state.
|
|
|
|
*/
|
|
|
|
GstVideoCodecState *
|
|
|
|
gst_video_decoder_set_output_state (GstVideoDecoder * decoder,
|
|
|
|
GstVideoFormat fmt, guint width, guint height,
|
|
|
|
GstVideoCodecState * reference)
|
2018-07-06 13:48:35 +00:00
|
|
|
{
|
|
|
|
return gst_video_decoder_set_interlaced_output_state (decoder, fmt,
|
|
|
|
GST_VIDEO_INTERLACE_MODE_PROGRESSIVE, width, height, reference);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_set_interlaced_output_state:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
* @fmt: a #GstVideoFormat
|
|
|
|
* @width: The width in pixels
|
|
|
|
* @height: The height in pixels
|
|
|
|
* @mode: A #GstVideoInterlaceMode
|
|
|
|
* @reference: (allow-none) (transfer none): An optional reference #GstVideoCodecState
|
|
|
|
*
|
|
|
|
* Same as #gst_video_decoder_set_output_state() but also allows you to also set
|
|
|
|
* the interlacing mode.
|
|
|
|
*
|
|
|
|
* Returns: (transfer full): the newly configured output state.
|
|
|
|
*
|
|
|
|
* Since: 1.16.
|
|
|
|
*/
|
|
|
|
GstVideoCodecState *
|
|
|
|
gst_video_decoder_set_interlaced_output_state (GstVideoDecoder * decoder,
|
|
|
|
GstVideoFormat fmt, GstVideoInterlaceMode mode, guint width, guint height,
|
|
|
|
GstVideoCodecState * reference)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
|
|
|
GstVideoDecoderPrivate *priv = decoder->priv;
|
|
|
|
GstVideoCodecState *state;
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (decoder, "fmt:%d, width:%d, height:%d, reference:%p",
|
|
|
|
fmt, width, height, reference);
|
|
|
|
|
|
|
|
/* Create the new output state */
|
2018-07-06 13:48:35 +00:00
|
|
|
state = _new_output_state (fmt, mode, width, height, reference);
|
2016-11-23 18:10:34 +00:00
|
|
|
if (!state)
|
|
|
|
return NULL;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
2012-09-29 00:07:43 +00:00
|
|
|
|
|
|
|
GST_OBJECT_LOCK (decoder);
|
2012-03-07 09:18:49 +00:00
|
|
|
/* Replace existing output state by new one */
|
|
|
|
if (priv->output_state)
|
|
|
|
gst_video_codec_state_unref (priv->output_state);
|
|
|
|
priv->output_state = gst_video_codec_state_ref (state);
|
|
|
|
|
2012-09-24 09:16:09 +00:00
|
|
|
if (priv->output_state != NULL && priv->output_state->info.fps_n > 0) {
|
2012-09-29 00:07:43 +00:00
|
|
|
priv->qos_frame_duration =
|
2012-09-24 09:16:09 +00:00
|
|
|
gst_util_uint64_scale (GST_SECOND, priv->output_state->info.fps_d,
|
|
|
|
priv->output_state->info.fps_n);
|
|
|
|
} else {
|
2012-09-29 00:07:43 +00:00
|
|
|
priv->qos_frame_duration = 0;
|
2012-09-24 09:16:09 +00:00
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
priv->output_state_changed = TRUE;
|
2012-09-24 09:16:09 +00:00
|
|
|
GST_OBJECT_UNLOCK (decoder);
|
|
|
|
|
2012-09-29 00:07:43 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
return state;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_get_oldest_frame:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
*
|
|
|
|
* Get the oldest pending unfinished #GstVideoCodecFrame
|
|
|
|
*
|
2012-05-01 14:55:13 +00:00
|
|
|
* Returns: (transfer full): oldest pending unfinished #GstVideoCodecFrame.
|
2012-03-07 09:18:49 +00:00
|
|
|
*/
|
|
|
|
GstVideoCodecFrame *
|
|
|
|
gst_video_decoder_get_oldest_frame (GstVideoDecoder * decoder)
|
|
|
|
{
|
2012-05-01 14:55:13 +00:00
|
|
|
GstVideoCodecFrame *frame = NULL;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
2012-05-01 14:55:13 +00:00
|
|
|
if (decoder->priv->frames)
|
|
|
|
frame = gst_video_codec_frame_ref (decoder->priv->frames->data);
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
|
2012-05-01 14:55:13 +00:00
|
|
|
return (GstVideoCodecFrame *) frame;
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_get_frame:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
* @frame_number: system_frame_number of a frame
|
|
|
|
*
|
|
|
|
* Get a pending unfinished #GstVideoCodecFrame
|
2017-01-23 19:36:11 +00:00
|
|
|
*
|
2012-06-19 08:25:00 +00:00
|
|
|
* Returns: (transfer full): pending unfinished #GstVideoCodecFrame identified by @frame_number.
|
2012-03-07 09:18:49 +00:00
|
|
|
*/
|
|
|
|
GstVideoCodecFrame *
|
|
|
|
gst_video_decoder_get_frame (GstVideoDecoder * decoder, int frame_number)
|
|
|
|
{
|
|
|
|
GList *g;
|
|
|
|
GstVideoCodecFrame *frame = NULL;
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (decoder, "frame_number : %d", frame_number);
|
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
|
|
|
for (g = decoder->priv->frames; g; g = g->next) {
|
|
|
|
GstVideoCodecFrame *tmp = g->data;
|
|
|
|
|
|
|
|
if (tmp->system_frame_number == frame_number) {
|
2012-06-19 08:25:00 +00:00
|
|
|
frame = gst_video_codec_frame_ref (tmp);
|
2012-03-07 09:18:49 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
|
|
|
|
return frame;
|
|
|
|
}
|
|
|
|
|
2012-08-16 10:12:06 +00:00
|
|
|
/**
|
|
|
|
* gst_video_decoder_get_frames:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
*
|
|
|
|
* Get all pending unfinished #GstVideoCodecFrame
|
2017-01-23 19:36:11 +00:00
|
|
|
*
|
2012-08-16 10:12:06 +00:00
|
|
|
* Returns: (transfer full) (element-type GstVideoCodecFrame): pending unfinished #GstVideoCodecFrame.
|
|
|
|
*/
|
|
|
|
GList *
|
|
|
|
gst_video_decoder_get_frames (GstVideoDecoder * decoder)
|
|
|
|
{
|
|
|
|
GList *frames;
|
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
|
|
|
frames = g_list_copy (decoder->priv->frames);
|
|
|
|
g_list_foreach (frames, (GFunc) gst_video_codec_frame_ref, NULL);
|
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
|
|
|
|
return frames;
|
|
|
|
}
|
|
|
|
|
2012-04-24 20:35:29 +00:00
|
|
|
static gboolean
|
2012-04-25 10:37:39 +00:00
|
|
|
gst_video_decoder_decide_allocation_default (GstVideoDecoder * decoder,
|
|
|
|
GstQuery * query)
|
2012-04-24 20:35:29 +00:00
|
|
|
{
|
2013-11-11 12:10:53 +00:00
|
|
|
GstCaps *outcaps = NULL;
|
2012-04-26 16:11:08 +00:00
|
|
|
GstBufferPool *pool = NULL;
|
|
|
|
guint size, min, max;
|
|
|
|
GstAllocator *allocator = NULL;
|
|
|
|
GstAllocationParams params;
|
|
|
|
GstStructure *config;
|
|
|
|
gboolean update_pool, update_allocator;
|
|
|
|
GstVideoInfo vinfo;
|
|
|
|
|
|
|
|
gst_query_parse_allocation (query, &outcaps, NULL);
|
|
|
|
gst_video_info_init (&vinfo);
|
2013-11-11 12:10:53 +00:00
|
|
|
if (outcaps)
|
|
|
|
gst_video_info_from_caps (&vinfo, outcaps);
|
2012-04-26 16:11:08 +00:00
|
|
|
|
|
|
|
/* we got configuration from our peer or the decide_allocation method,
|
|
|
|
* parse them */
|
|
|
|
if (gst_query_get_n_allocation_params (query) > 0) {
|
|
|
|
/* try the allocator */
|
|
|
|
gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
|
|
|
|
update_allocator = TRUE;
|
|
|
|
} else {
|
|
|
|
allocator = NULL;
|
|
|
|
gst_allocation_params_init (¶ms);
|
|
|
|
update_allocator = FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gst_query_get_n_allocation_pools (query) > 0) {
|
|
|
|
gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);
|
|
|
|
size = MAX (size, vinfo.size);
|
|
|
|
update_pool = TRUE;
|
|
|
|
} else {
|
|
|
|
pool = NULL;
|
|
|
|
size = vinfo.size;
|
|
|
|
min = max = 0;
|
|
|
|
|
|
|
|
update_pool = FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pool == NULL) {
|
|
|
|
/* no pool, we can make our own */
|
|
|
|
GST_DEBUG_OBJECT (decoder, "no pool, making new pool");
|
|
|
|
pool = gst_video_buffer_pool_new ();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* now configure */
|
|
|
|
config = gst_buffer_pool_get_config (pool);
|
|
|
|
gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
|
|
|
|
gst_buffer_pool_config_set_allocator (config, allocator, ¶ms);
|
2014-04-15 18:51:46 +00:00
|
|
|
|
2015-12-11 14:42:09 +00:00
|
|
|
GST_DEBUG_OBJECT (decoder,
|
|
|
|
"setting config %" GST_PTR_FORMAT " in pool %" GST_PTR_FORMAT, config,
|
|
|
|
pool);
|
2014-04-15 18:51:46 +00:00
|
|
|
if (!gst_buffer_pool_set_config (pool, config)) {
|
|
|
|
config = gst_buffer_pool_get_config (pool);
|
|
|
|
|
|
|
|
/* If change are not acceptable, fallback to generic pool */
|
|
|
|
if (!gst_buffer_pool_config_validate_params (config, outcaps, size, min,
|
|
|
|
max)) {
|
2019-08-29 17:42:39 +00:00
|
|
|
GST_DEBUG_OBJECT (decoder, "unsupported pool, making new pool");
|
2014-04-15 18:51:46 +00:00
|
|
|
|
|
|
|
gst_object_unref (pool);
|
|
|
|
pool = gst_video_buffer_pool_new ();
|
|
|
|
gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
|
|
|
|
gst_buffer_pool_config_set_allocator (config, allocator, ¶ms);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!gst_buffer_pool_set_config (pool, config))
|
|
|
|
goto config_failed;
|
|
|
|
}
|
2012-04-26 16:11:08 +00:00
|
|
|
|
|
|
|
if (update_allocator)
|
|
|
|
gst_query_set_nth_allocation_param (query, 0, allocator, ¶ms);
|
|
|
|
else
|
|
|
|
gst_query_add_allocation_param (query, allocator, ¶ms);
|
|
|
|
if (allocator)
|
2012-07-09 14:26:17 +00:00
|
|
|
gst_object_unref (allocator);
|
2012-04-26 16:11:08 +00:00
|
|
|
|
|
|
|
if (update_pool)
|
|
|
|
gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);
|
|
|
|
else
|
|
|
|
gst_query_add_allocation_pool (query, pool, size, min, max);
|
|
|
|
|
|
|
|
if (pool)
|
|
|
|
gst_object_unref (pool);
|
|
|
|
|
2012-04-24 20:35:29 +00:00
|
|
|
return TRUE;
|
2014-04-15 18:51:46 +00:00
|
|
|
|
|
|
|
config_failed:
|
2014-09-15 14:23:57 +00:00
|
|
|
if (allocator)
|
|
|
|
gst_object_unref (allocator);
|
|
|
|
if (pool)
|
|
|
|
gst_object_unref (pool);
|
2014-04-15 18:51:46 +00:00
|
|
|
GST_ELEMENT_ERROR (decoder, RESOURCE, SETTINGS,
|
|
|
|
("Failed to configure the buffer pool"),
|
|
|
|
("Configuration is most likely invalid, please report this issue."));
|
|
|
|
return FALSE;
|
2012-04-24 20:35:29 +00:00
|
|
|
}
|
|
|
|
|
2012-06-15 14:06:12 +00:00
|
|
|
static gboolean
|
|
|
|
gst_video_decoder_propose_allocation_default (GstVideoDecoder * decoder,
|
|
|
|
GstQuery * query)
|
|
|
|
{
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
2012-08-09 12:35:22 +00:00
|
|
|
static gboolean
|
2013-11-11 12:10:53 +00:00
|
|
|
gst_video_decoder_negotiate_pool (GstVideoDecoder * decoder, GstCaps * caps)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
2012-04-24 20:35:29 +00:00
|
|
|
GstVideoDecoderClass *klass;
|
2012-04-25 10:37:39 +00:00
|
|
|
GstQuery *query = NULL;
|
2012-04-26 16:11:08 +00:00
|
|
|
GstBufferPool *pool = NULL;
|
2012-04-25 10:37:39 +00:00
|
|
|
GstAllocator *allocator;
|
|
|
|
GstAllocationParams params;
|
|
|
|
gboolean ret = TRUE;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-04-24 20:35:29 +00:00
|
|
|
klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
|
|
|
|
|
2013-11-11 12:10:53 +00:00
|
|
|
query = gst_query_new_allocation (caps, TRUE);
|
2012-04-24 18:04:48 +00:00
|
|
|
|
2015-12-11 14:42:09 +00:00
|
|
|
GST_DEBUG_OBJECT (decoder, "do query ALLOCATION");
|
|
|
|
|
2012-04-24 18:04:48 +00:00
|
|
|
if (!gst_pad_peer_query (decoder->srcpad, query)) {
|
|
|
|
GST_DEBUG_OBJECT (decoder, "didn't get downstream ALLOCATION hints");
|
|
|
|
}
|
|
|
|
|
2012-04-26 16:11:08 +00:00
|
|
|
g_assert (klass->decide_allocation != NULL);
|
|
|
|
ret = klass->decide_allocation (decoder, query);
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (decoder, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, ret,
|
|
|
|
query);
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
goto no_decide_allocation;
|
2012-04-25 10:37:39 +00:00
|
|
|
|
|
|
|
/* we got configuration from our peer or the decide_allocation method,
|
|
|
|
* parse them */
|
|
|
|
if (gst_query_get_n_allocation_params (query) > 0) {
|
|
|
|
gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
|
|
|
|
} else {
|
|
|
|
allocator = NULL;
|
|
|
|
gst_allocation_params_init (¶ms);
|
|
|
|
}
|
|
|
|
|
2012-04-26 16:11:08 +00:00
|
|
|
if (gst_query_get_n_allocation_pools (query) > 0)
|
|
|
|
gst_query_parse_nth_allocation_pool (query, 0, &pool, NULL, NULL, NULL);
|
|
|
|
if (!pool) {
|
|
|
|
if (allocator)
|
2012-07-09 14:26:17 +00:00
|
|
|
gst_object_unref (allocator);
|
2012-04-26 16:11:08 +00:00
|
|
|
ret = FALSE;
|
|
|
|
goto no_decide_allocation;
|
2012-04-24 18:04:48 +00:00
|
|
|
}
|
|
|
|
|
2012-04-26 16:11:08 +00:00
|
|
|
if (decoder->priv->allocator)
|
2012-07-09 14:26:17 +00:00
|
|
|
gst_object_unref (decoder->priv->allocator);
|
2012-04-26 16:11:08 +00:00
|
|
|
decoder->priv->allocator = allocator;
|
|
|
|
decoder->priv->params = params;
|
2012-04-25 10:37:39 +00:00
|
|
|
|
2012-04-24 18:04:48 +00:00
|
|
|
if (decoder->priv->pool) {
|
2014-04-02 10:20:43 +00:00
|
|
|
/* do not set the bufferpool to inactive here, it will be done
|
|
|
|
* on its finalize function. As videodecoder do late renegotiation
|
|
|
|
* it might happen that some element downstream is already using this
|
|
|
|
* same bufferpool and deactivating it will make it fail.
|
|
|
|
* Happens when a downstream element changes from passthrough to
|
|
|
|
* non-passthrough and gets this same bufferpool to use */
|
2015-12-11 14:42:09 +00:00
|
|
|
GST_DEBUG_OBJECT (decoder, "unref pool %" GST_PTR_FORMAT,
|
|
|
|
decoder->priv->pool);
|
2012-04-24 18:04:48 +00:00
|
|
|
gst_object_unref (decoder->priv->pool);
|
|
|
|
}
|
|
|
|
decoder->priv->pool = pool;
|
|
|
|
|
|
|
|
/* and activate */
|
2015-12-11 14:42:09 +00:00
|
|
|
GST_DEBUG_OBJECT (decoder, "activate pool %" GST_PTR_FORMAT, pool);
|
2012-04-25 10:37:39 +00:00
|
|
|
gst_buffer_pool_set_active (pool, TRUE);
|
2012-04-24 18:04:48 +00:00
|
|
|
|
2012-04-25 10:37:39 +00:00
|
|
|
done:
|
|
|
|
if (query)
|
|
|
|
gst_query_unref (query);
|
2012-04-24 18:04:48 +00:00
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
return ret;
|
2012-04-26 16:11:08 +00:00
|
|
|
|
|
|
|
/* Errors */
|
|
|
|
no_decide_allocation:
|
|
|
|
{
|
|
|
|
GST_WARNING_OBJECT (decoder, "Subclass failed to decide allocation");
|
|
|
|
goto done;
|
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
2013-11-11 12:10:53 +00:00
|
|
|
static gboolean
|
|
|
|
gst_video_decoder_negotiate_default (GstVideoDecoder * decoder)
|
|
|
|
{
|
|
|
|
GstVideoCodecState *state = decoder->priv->output_state;
|
|
|
|
gboolean ret = TRUE;
|
|
|
|
GstVideoCodecFrame *frame;
|
|
|
|
GstCaps *prevcaps;
|
2019-04-27 13:29:25 +00:00
|
|
|
GstCaps *incaps;
|
2013-11-11 12:10:53 +00:00
|
|
|
|
|
|
|
if (!state) {
|
|
|
|
GST_DEBUG_OBJECT (decoder,
|
|
|
|
"Trying to negotiate the pool with out setting the o/p format");
|
|
|
|
ret = gst_video_decoder_negotiate_pool (decoder, NULL);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
g_return_val_if_fail (GST_VIDEO_INFO_WIDTH (&state->info) != 0, FALSE);
|
|
|
|
g_return_val_if_fail (GST_VIDEO_INFO_HEIGHT (&state->info) != 0, FALSE);
|
|
|
|
|
2017-03-24 00:43:06 +00:00
|
|
|
/* If the base class didn't set any multiview params, assume mono
|
|
|
|
* now */
|
|
|
|
if (GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) ==
|
|
|
|
GST_VIDEO_MULTIVIEW_MODE_NONE) {
|
|
|
|
GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) =
|
|
|
|
GST_VIDEO_MULTIVIEW_MODE_MONO;
|
|
|
|
GST_VIDEO_INFO_MULTIVIEW_FLAGS (&state->info) =
|
|
|
|
GST_VIDEO_MULTIVIEW_FLAGS_NONE;
|
|
|
|
}
|
|
|
|
|
2013-11-11 12:10:53 +00:00
|
|
|
GST_DEBUG_OBJECT (decoder, "output_state par %d/%d fps %d/%d",
|
|
|
|
state->info.par_n, state->info.par_d,
|
|
|
|
state->info.fps_n, state->info.fps_d);
|
|
|
|
|
|
|
|
if (state->caps == NULL)
|
|
|
|
state->caps = gst_video_info_to_caps (&state->info);
|
2019-04-27 13:29:25 +00:00
|
|
|
|
|
|
|
incaps = gst_pad_get_current_caps (GST_VIDEO_DECODER_SINK_PAD (decoder));
|
|
|
|
if (incaps) {
|
|
|
|
GstStructure *in_struct;
|
|
|
|
|
|
|
|
in_struct = gst_caps_get_structure (incaps, 0);
|
|
|
|
if (gst_structure_has_field (in_struct, "mastering-display-info") ||
|
|
|
|
gst_structure_has_field (in_struct, "content-light-level")) {
|
|
|
|
const gchar *s;
|
|
|
|
|
|
|
|
/* prefer upstream information */
|
|
|
|
state->caps = gst_caps_make_writable (state->caps);
|
|
|
|
if ((s = gst_structure_get_string (in_struct, "mastering-display-info"))) {
|
|
|
|
gst_caps_set_simple (state->caps,
|
|
|
|
"mastering-display-info", G_TYPE_STRING, s, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((s = gst_structure_get_string (in_struct, "content-light-level"))) {
|
|
|
|
gst_caps_set_simple (state->caps,
|
|
|
|
"content-light-level", G_TYPE_STRING, s, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
gst_caps_unref (incaps);
|
|
|
|
}
|
|
|
|
|
2016-03-31 13:31:31 +00:00
|
|
|
if (state->allocation_caps == NULL)
|
|
|
|
state->allocation_caps = gst_caps_ref (state->caps);
|
2013-11-11 12:10:53 +00:00
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (decoder, "setting caps %" GST_PTR_FORMAT, state->caps);
|
|
|
|
|
|
|
|
/* Push all pending pre-caps events of the oldest frame before
|
|
|
|
* setting caps */
|
|
|
|
frame = decoder->priv->frames ? decoder->priv->frames->data : NULL;
|
|
|
|
if (frame || decoder->priv->current_frame_events) {
|
|
|
|
GList **events, *l;
|
|
|
|
|
|
|
|
if (frame) {
|
|
|
|
events = &frame->events;
|
|
|
|
} else {
|
|
|
|
events = &decoder->priv->current_frame_events;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (l = g_list_last (*events); l;) {
|
|
|
|
GstEvent *event = GST_EVENT (l->data);
|
|
|
|
GList *tmp;
|
|
|
|
|
|
|
|
if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) {
|
|
|
|
gst_video_decoder_push_event (decoder, event);
|
|
|
|
tmp = l;
|
|
|
|
l = l->prev;
|
|
|
|
*events = g_list_delete_link (*events, tmp);
|
|
|
|
} else {
|
|
|
|
l = l->prev;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
prevcaps = gst_pad_get_current_caps (decoder->srcpad);
|
2015-12-11 14:42:09 +00:00
|
|
|
if (!prevcaps || !gst_caps_is_equal (prevcaps, state->caps)) {
|
|
|
|
if (!prevcaps) {
|
|
|
|
GST_DEBUG_OBJECT (decoder, "decoder src pad has currently NULL caps");
|
|
|
|
}
|
2013-11-11 12:10:53 +00:00
|
|
|
ret = gst_pad_set_caps (decoder->srcpad, state->caps);
|
2015-12-11 14:42:09 +00:00
|
|
|
} else {
|
2013-11-11 12:10:53 +00:00
|
|
|
ret = TRUE;
|
2015-12-11 14:42:09 +00:00
|
|
|
GST_DEBUG_OBJECT (decoder,
|
|
|
|
"current src pad and output state caps are the same");
|
|
|
|
}
|
2013-11-11 12:10:53 +00:00
|
|
|
if (prevcaps)
|
|
|
|
gst_caps_unref (prevcaps);
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
goto done;
|
|
|
|
decoder->priv->output_state_changed = FALSE;
|
|
|
|
/* Negotiate pool */
|
2016-03-31 13:31:31 +00:00
|
|
|
ret = gst_video_decoder_negotiate_pool (decoder, state->allocation_caps);
|
2013-11-11 12:10:53 +00:00
|
|
|
|
|
|
|
done:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-12-05 14:31:25 +00:00
|
|
|
static gboolean
|
|
|
|
gst_video_decoder_negotiate_unlocked (GstVideoDecoder * decoder)
|
|
|
|
{
|
|
|
|
GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
|
|
|
|
gboolean ret = TRUE;
|
|
|
|
|
|
|
|
if (G_LIKELY (klass->negotiate))
|
|
|
|
ret = klass->negotiate (decoder);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-08-09 12:35:22 +00:00
|
|
|
/**
|
|
|
|
* gst_video_decoder_negotiate:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
*
|
2013-12-05 14:31:25 +00:00
|
|
|
* Negotiate with downstream elements to currently configured #GstVideoCodecState.
|
|
|
|
* Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if
|
|
|
|
* negotiate fails.
|
2012-08-09 12:35:22 +00:00
|
|
|
*
|
2017-10-03 21:31:18 +00:00
|
|
|
* Returns: %TRUE if the negotiation succeeded, else %FALSE.
|
2012-08-09 12:35:22 +00:00
|
|
|
*/
|
|
|
|
gboolean
|
|
|
|
gst_video_decoder_negotiate (GstVideoDecoder * decoder)
|
|
|
|
{
|
|
|
|
GstVideoDecoderClass *klass;
|
|
|
|
gboolean ret = TRUE;
|
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), FALSE);
|
|
|
|
|
|
|
|
klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
|
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
2013-12-05 14:31:25 +00:00
|
|
|
gst_pad_check_reconfigure (decoder->srcpad);
|
|
|
|
if (klass->negotiate) {
|
2012-08-09 12:35:22 +00:00
|
|
|
ret = klass->negotiate (decoder);
|
2013-12-05 14:31:25 +00:00
|
|
|
if (!ret)
|
|
|
|
gst_pad_mark_reconfigure (decoder->srcpad);
|
|
|
|
}
|
2012-08-09 12:35:22 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
/**
|
2012-07-23 08:18:41 +00:00
|
|
|
* gst_video_decoder_allocate_output_buffer:
|
2012-03-07 09:18:49 +00:00
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
*
|
2012-05-16 11:40:07 +00:00
|
|
|
* Helper function that allocates a buffer to hold a video frame for @decoder's
|
2012-03-07 09:18:49 +00:00
|
|
|
* current #GstVideoCodecState.
|
|
|
|
*
|
2012-10-17 09:55:01 +00:00
|
|
|
* You should use gst_video_decoder_allocate_output_frame() instead of this
|
|
|
|
* function, if possible at all.
|
|
|
|
*
|
|
|
|
* Returns: (transfer full): allocated buffer, or NULL if no buffer could be
|
|
|
|
* allocated (e.g. when downstream is flushing or shutting down)
|
2012-03-07 09:18:49 +00:00
|
|
|
*/
|
|
|
|
GstBuffer *
|
2012-07-23 08:18:41 +00:00
|
|
|
gst_video_decoder_allocate_output_buffer (GstVideoDecoder * decoder)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
2012-10-17 09:55:01 +00:00
|
|
|
GstFlowReturn flow;
|
2013-05-24 14:51:17 +00:00
|
|
|
GstBuffer *buffer = NULL;
|
2013-12-05 14:31:25 +00:00
|
|
|
gboolean needs_reconfigure = FALSE;
|
2013-05-24 14:51:17 +00:00
|
|
|
|
2012-04-24 17:35:24 +00:00
|
|
|
GST_DEBUG ("alloc src buffer");
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
2013-12-05 14:31:25 +00:00
|
|
|
needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
|
2013-11-11 12:10:53 +00:00
|
|
|
if (G_UNLIKELY (!decoder->priv->output_state
|
2013-12-05 14:31:25 +00:00
|
|
|
|| decoder->priv->output_state_changed || needs_reconfigure)) {
|
|
|
|
if (!gst_video_decoder_negotiate_unlocked (decoder)) {
|
2013-11-11 12:10:53 +00:00
|
|
|
if (decoder->priv->output_state) {
|
|
|
|
GST_DEBUG_OBJECT (decoder, "Failed to negotiate, fallback allocation");
|
|
|
|
gst_pad_mark_reconfigure (decoder->srcpad);
|
|
|
|
goto fallback;
|
|
|
|
} else {
|
|
|
|
GST_DEBUG_OBJECT (decoder, "Failed to negotiate, output_buffer=NULL");
|
|
|
|
goto failed_allocation;
|
|
|
|
}
|
2013-05-24 14:51:17 +00:00
|
|
|
}
|
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-10-17 09:55:01 +00:00
|
|
|
flow = gst_buffer_pool_acquire_buffer (decoder->priv->pool, &buffer, NULL);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-10-17 09:55:01 +00:00
|
|
|
if (flow != GST_FLOW_OK) {
|
|
|
|
GST_INFO_OBJECT (decoder, "couldn't allocate output buffer, flow %s",
|
|
|
|
gst_flow_get_name (flow));
|
2013-11-11 12:10:53 +00:00
|
|
|
if (decoder->priv->output_state && decoder->priv->output_state->info.size)
|
|
|
|
goto fallback;
|
|
|
|
else
|
|
|
|
goto failed_allocation;
|
2012-10-17 09:55:01 +00:00
|
|
|
}
|
2013-05-24 14:51:17 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
|
|
|
|
return buffer;
|
|
|
|
|
|
|
|
fallback:
|
2013-11-11 12:10:53 +00:00
|
|
|
GST_INFO_OBJECT (decoder,
|
|
|
|
"Fallback allocation, creating new buffer which doesn't belongs to any buffer pool");
|
2013-05-24 14:51:17 +00:00
|
|
|
buffer =
|
|
|
|
gst_buffer_new_allocate (NULL, decoder->priv->output_state->info.size,
|
|
|
|
NULL);
|
|
|
|
|
2013-11-11 12:10:53 +00:00
|
|
|
failed_allocation:
|
|
|
|
GST_ERROR_OBJECT (decoder, "Failed to allocate the buffer..");
|
2013-05-24 14:51:17 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
2012-10-17 09:55:01 +00:00
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
return buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2012-07-23 08:18:41 +00:00
|
|
|
* gst_video_decoder_allocate_output_frame:
|
2012-03-07 09:18:49 +00:00
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
* @frame: a #GstVideoCodecFrame
|
|
|
|
*
|
2012-05-16 11:40:07 +00:00
|
|
|
* Helper function that allocates a buffer to hold a video frame for @decoder's
|
|
|
|
* current #GstVideoCodecState. Subclass should already have configured video
|
|
|
|
* state and set src pad caps.
|
2012-03-07 09:18:49 +00:00
|
|
|
*
|
2012-07-05 12:29:42 +00:00
|
|
|
* The buffer allocated here is owned by the frame and you should only
|
|
|
|
* keep references to the frame, not the buffer.
|
|
|
|
*
|
2012-05-16 11:40:07 +00:00
|
|
|
* Returns: %GST_FLOW_OK if an output buffer could be allocated
|
2012-03-07 09:18:49 +00:00
|
|
|
*/
|
|
|
|
GstFlowReturn
|
2012-07-23 08:18:41 +00:00
|
|
|
gst_video_decoder_allocate_output_frame (GstVideoDecoder *
|
2012-03-07 09:18:49 +00:00
|
|
|
decoder, GstVideoCodecFrame * frame)
|
2016-10-14 14:14:14 +00:00
|
|
|
{
|
|
|
|
return gst_video_decoder_allocate_output_frame_with_params (decoder, frame,
|
|
|
|
NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_allocate_output_frame_with_params:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
* @frame: a #GstVideoCodecFrame
|
|
|
|
* @params: a #GstBufferPoolAcquireParams
|
|
|
|
*
|
|
|
|
* Same as #gst_video_decoder_allocate_output_frame except it allows passing
|
|
|
|
* #GstBufferPoolAcquireParams to the sub call gst_buffer_pool_acquire_buffer.
|
|
|
|
*
|
|
|
|
* Returns: %GST_FLOW_OK if an output buffer could be allocated
|
|
|
|
*
|
|
|
|
* Since: 1.12
|
|
|
|
*/
|
|
|
|
GstFlowReturn
|
|
|
|
gst_video_decoder_allocate_output_frame_with_params (GstVideoDecoder *
|
|
|
|
decoder, GstVideoCodecFrame * frame, GstBufferPoolAcquireParams * params)
|
2012-03-07 09:18:49 +00:00
|
|
|
{
|
|
|
|
GstFlowReturn flow_ret;
|
2012-09-20 01:16:01 +00:00
|
|
|
GstVideoCodecState *state;
|
|
|
|
int num_bytes;
|
2013-12-05 14:31:25 +00:00
|
|
|
gboolean needs_reconfigure = FALSE;
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2013-05-24 14:51:17 +00:00
|
|
|
g_return_val_if_fail (decoder->priv->output_state, GST_FLOW_NOT_NEGOTIATED);
|
2012-07-04 07:14:51 +00:00
|
|
|
g_return_val_if_fail (frame->output_buffer == NULL, GST_FLOW_ERROR);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
2012-07-23 09:50:11 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
2012-09-20 01:16:01 +00:00
|
|
|
|
|
|
|
state = decoder->priv->output_state;
|
|
|
|
if (state == NULL) {
|
|
|
|
g_warning ("Output state should be set before allocating frame");
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
num_bytes = GST_VIDEO_INFO_SIZE (&state->info);
|
|
|
|
if (num_bytes == 0) {
|
|
|
|
g_warning ("Frame size should not be 0");
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2013-12-05 14:31:25 +00:00
|
|
|
needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
|
|
|
|
if (G_UNLIKELY (decoder->priv->output_state_changed || needs_reconfigure)) {
|
|
|
|
if (!gst_video_decoder_negotiate_unlocked (decoder)) {
|
2013-09-12 07:35:00 +00:00
|
|
|
GST_DEBUG_OBJECT (decoder, "Failed to negotiate, fallback allocation");
|
|
|
|
gst_pad_mark_reconfigure (decoder->srcpad);
|
|
|
|
}
|
|
|
|
}
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
GST_LOG_OBJECT (decoder, "alloc buffer size %d", num_bytes);
|
|
|
|
|
2012-04-24 18:04:48 +00:00
|
|
|
flow_ret = gst_buffer_pool_acquire_buffer (decoder->priv->pool,
|
2016-10-14 14:14:14 +00:00
|
|
|
&frame->output_buffer, params);
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
|
|
|
|
return flow_ret;
|
2012-09-20 01:16:01 +00:00
|
|
|
|
|
|
|
error:
|
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
return GST_FLOW_ERROR;
|
2012-03-07 09:18:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_get_max_decode_time:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
* @frame: a #GstVideoCodecFrame
|
|
|
|
*
|
|
|
|
* Determines maximum possible decoding time for @frame that will
|
|
|
|
* allow it to decode and arrive in time (as determined by QoS events).
|
|
|
|
* In particular, a negative result means decoding in time is no longer possible
|
|
|
|
* and should therefore occur as soon/skippy as possible.
|
|
|
|
*
|
|
|
|
* Returns: max decoding time.
|
|
|
|
*/
|
|
|
|
GstClockTimeDiff
|
|
|
|
gst_video_decoder_get_max_decode_time (GstVideoDecoder *
|
|
|
|
decoder, GstVideoCodecFrame * frame)
|
|
|
|
{
|
|
|
|
GstClockTimeDiff deadline;
|
|
|
|
GstClockTime earliest_time;
|
|
|
|
|
|
|
|
GST_OBJECT_LOCK (decoder);
|
|
|
|
earliest_time = decoder->priv->earliest_time;
|
2012-06-19 17:45:14 +00:00
|
|
|
if (GST_CLOCK_TIME_IS_VALID (earliest_time)
|
|
|
|
&& GST_CLOCK_TIME_IS_VALID (frame->deadline))
|
2012-03-07 09:18:49 +00:00
|
|
|
deadline = GST_CLOCK_DIFF (earliest_time, frame->deadline);
|
|
|
|
else
|
|
|
|
deadline = G_MAXINT64;
|
|
|
|
|
|
|
|
GST_LOG_OBJECT (decoder, "earliest %" GST_TIME_FORMAT
|
2015-10-29 14:01:26 +00:00
|
|
|
", frame deadline %" GST_TIME_FORMAT ", deadline %" GST_STIME_FORMAT,
|
2012-03-07 09:18:49 +00:00
|
|
|
GST_TIME_ARGS (earliest_time), GST_TIME_ARGS (frame->deadline),
|
2015-10-29 14:01:26 +00:00
|
|
|
GST_STIME_ARGS (deadline));
|
2012-03-07 09:18:49 +00:00
|
|
|
|
|
|
|
GST_OBJECT_UNLOCK (decoder);
|
|
|
|
|
|
|
|
return deadline;
|
|
|
|
}
|
|
|
|
|
2012-11-09 14:37:57 +00:00
|
|
|
/**
|
2012-11-20 11:08:26 +00:00
|
|
|
* gst_video_decoder_get_qos_proportion:
|
2012-11-09 14:37:57 +00:00
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
* current QoS proportion, or %NULL
|
|
|
|
*
|
2012-11-20 11:21:08 +00:00
|
|
|
* Returns: The current QoS proportion.
|
2012-11-09 14:37:57 +00:00
|
|
|
*
|
|
|
|
* Since: 1.0.3
|
|
|
|
*/
|
2012-11-20 11:21:08 +00:00
|
|
|
gdouble
|
|
|
|
gst_video_decoder_get_qos_proportion (GstVideoDecoder * decoder)
|
2012-11-09 14:37:57 +00:00
|
|
|
{
|
2012-11-20 11:21:08 +00:00
|
|
|
gdouble proportion;
|
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), 1.0);
|
2012-11-09 14:37:57 +00:00
|
|
|
|
|
|
|
GST_OBJECT_LOCK (decoder);
|
2012-11-20 11:21:08 +00:00
|
|
|
proportion = decoder->priv->proportion;
|
2012-11-09 14:37:57 +00:00
|
|
|
GST_OBJECT_UNLOCK (decoder);
|
|
|
|
|
2012-11-20 11:21:08 +00:00
|
|
|
return proportion;
|
2012-11-09 14:37:57 +00:00
|
|
|
}
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
GstFlowReturn
|
|
|
|
_gst_video_decoder_error (GstVideoDecoder * dec, gint weight,
|
|
|
|
GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file,
|
|
|
|
const gchar * function, gint line)
|
|
|
|
{
|
|
|
|
if (txt)
|
|
|
|
GST_WARNING_OBJECT (dec, "error: %s", txt);
|
|
|
|
if (dbg)
|
|
|
|
GST_WARNING_OBJECT (dec, "error: %s", dbg);
|
|
|
|
dec->priv->error_count += weight;
|
|
|
|
dec->priv->discont = TRUE;
|
2013-10-29 17:11:51 +00:00
|
|
|
if (dec->priv->max_errors >= 0 &&
|
|
|
|
dec->priv->error_count > dec->priv->max_errors) {
|
2012-03-07 09:18:49 +00:00
|
|
|
gst_element_message_full (GST_ELEMENT (dec), GST_MESSAGE_ERROR,
|
|
|
|
domain, code, txt, dbg, file, function, line);
|
|
|
|
return GST_FLOW_ERROR;
|
|
|
|
} else {
|
2012-10-20 10:37:33 +00:00
|
|
|
g_free (txt);
|
|
|
|
g_free (dbg);
|
2012-03-07 09:18:49 +00:00
|
|
|
return GST_FLOW_OK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_set_max_errors:
|
|
|
|
* @dec: a #GstVideoDecoder
|
|
|
|
* @num: max tolerated errors
|
|
|
|
*
|
|
|
|
* Sets numbers of tolerated decoder errors, where a tolerated one is then only
|
2013-10-29 17:11:51 +00:00
|
|
|
* warned about, but more than tolerated will lead to fatal error. You can set
|
|
|
|
* -1 for never returning fatal errors. Default is set to
|
|
|
|
* GST_VIDEO_DECODER_MAX_ERRORS.
|
|
|
|
*
|
|
|
|
* The '-1' option was added in 1.4
|
2012-03-07 09:18:49 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_video_decoder_set_max_errors (GstVideoDecoder * dec, gint num)
|
|
|
|
{
|
|
|
|
g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
|
|
|
|
|
|
|
|
dec->priv->max_errors = num;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_get_max_errors:
|
|
|
|
* @dec: a #GstVideoDecoder
|
|
|
|
*
|
|
|
|
* Returns: currently configured decoder tolerated error count.
|
|
|
|
*/
|
|
|
|
gint
|
|
|
|
gst_video_decoder_get_max_errors (GstVideoDecoder * dec)
|
|
|
|
{
|
|
|
|
g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), 0);
|
|
|
|
|
|
|
|
return dec->priv->max_errors;
|
|
|
|
}
|
|
|
|
|
2013-12-05 10:34:36 +00:00
|
|
|
/**
|
|
|
|
* gst_video_decoder_set_needs_format:
|
|
|
|
* @dec: a #GstVideoDecoder
|
|
|
|
* @enabled: new state
|
|
|
|
*
|
|
|
|
* Configures decoder format needs. If enabled, subclass needs to be
|
|
|
|
* negotiated with format caps before it can process any data. It will then
|
|
|
|
* never be handed any data before it has been configured.
|
|
|
|
* Otherwise, it might be handed data without having been configured and
|
|
|
|
* is then expected being able to do so either by default
|
|
|
|
* or based on the input data.
|
|
|
|
*
|
|
|
|
* Since: 1.4
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_video_decoder_set_needs_format (GstVideoDecoder * dec, gboolean enabled)
|
|
|
|
{
|
|
|
|
g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
|
|
|
|
|
|
|
|
dec->priv->needs_format = enabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_get_needs_format:
|
|
|
|
* @dec: a #GstVideoDecoder
|
|
|
|
*
|
|
|
|
* Queries decoder required format handling.
|
|
|
|
*
|
2014-07-01 23:14:43 +00:00
|
|
|
* Returns: %TRUE if required format handling is enabled.
|
2013-12-05 10:34:36 +00:00
|
|
|
*
|
|
|
|
* Since: 1.4
|
|
|
|
*/
|
|
|
|
gboolean
|
|
|
|
gst_video_decoder_get_needs_format (GstVideoDecoder * dec)
|
|
|
|
{
|
|
|
|
gboolean result;
|
|
|
|
|
|
|
|
g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), FALSE);
|
|
|
|
|
|
|
|
result = dec->priv->needs_format;
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2012-03-07 09:18:49 +00:00
|
|
|
/**
|
|
|
|
* gst_video_decoder_set_packetized:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
* @packetized: whether the input data should be considered as packetized.
|
|
|
|
*
|
|
|
|
* Allows baseclass to consider input data as packetized or not. If the
|
|
|
|
* input is packetized, then the @parse method will not be called.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_video_decoder_set_packetized (GstVideoDecoder * decoder,
|
|
|
|
gboolean packetized)
|
|
|
|
{
|
|
|
|
decoder->priv->packetized = packetized;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_get_packetized:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
*
|
|
|
|
* Queries whether input data is considered packetized or not by the
|
|
|
|
* base class.
|
|
|
|
*
|
|
|
|
* Returns: TRUE if input data is considered packetized.
|
|
|
|
*/
|
|
|
|
gboolean
|
|
|
|
gst_video_decoder_get_packetized (GstVideoDecoder * decoder)
|
|
|
|
{
|
|
|
|
return decoder->priv->packetized;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_set_estimate_rate:
|
|
|
|
* @dec: a #GstVideoDecoder
|
|
|
|
* @enabled: whether to enable byte to time conversion
|
|
|
|
*
|
|
|
|
* Allows baseclass to perform byte to time estimated conversion.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_video_decoder_set_estimate_rate (GstVideoDecoder * dec, gboolean enabled)
|
|
|
|
{
|
|
|
|
g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
|
|
|
|
|
|
|
|
dec->priv->do_estimate_rate = enabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_get_estimate_rate:
|
|
|
|
* @dec: a #GstVideoDecoder
|
|
|
|
*
|
|
|
|
* Returns: currently configured byte to time conversion setting
|
|
|
|
*/
|
|
|
|
gboolean
|
|
|
|
gst_video_decoder_get_estimate_rate (GstVideoDecoder * dec)
|
|
|
|
{
|
|
|
|
g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), 0);
|
|
|
|
|
|
|
|
return dec->priv->do_estimate_rate;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_set_latency:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
* @min_latency: minimum latency
|
|
|
|
* @max_latency: maximum latency
|
|
|
|
*
|
2012-05-16 11:40:07 +00:00
|
|
|
* Lets #GstVideoDecoder sub-classes tell the baseclass what the decoder
|
|
|
|
* latency is. Will also post a LATENCY message on the bus so the pipeline
|
|
|
|
* can reconfigure its global latency.
|
2012-03-07 09:18:49 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_video_decoder_set_latency (GstVideoDecoder * decoder,
|
|
|
|
GstClockTime min_latency, GstClockTime max_latency)
|
|
|
|
{
|
|
|
|
g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min_latency));
|
|
|
|
g_return_if_fail (max_latency >= min_latency);
|
|
|
|
|
|
|
|
GST_OBJECT_LOCK (decoder);
|
|
|
|
decoder->priv->min_latency = min_latency;
|
|
|
|
decoder->priv->max_latency = max_latency;
|
|
|
|
GST_OBJECT_UNLOCK (decoder);
|
|
|
|
|
|
|
|
gst_element_post_message (GST_ELEMENT_CAST (decoder),
|
|
|
|
gst_message_new_latency (GST_OBJECT_CAST (decoder)));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_get_latency:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
2012-05-16 11:40:07 +00:00
|
|
|
* @min_latency: (out) (allow-none): address of variable in which to store the
|
|
|
|
* configured minimum latency, or %NULL
|
|
|
|
* @max_latency: (out) (allow-none): address of variable in which to store the
|
|
|
|
* configured mximum latency, or %NULL
|
2012-03-07 09:18:49 +00:00
|
|
|
*
|
2012-05-16 11:40:07 +00:00
|
|
|
* Query the configured decoder latency. Results will be returned via
|
|
|
|
* @min_latency and @max_latency.
|
2012-03-07 09:18:49 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_video_decoder_get_latency (GstVideoDecoder * decoder,
|
|
|
|
GstClockTime * min_latency, GstClockTime * max_latency)
|
|
|
|
{
|
|
|
|
GST_OBJECT_LOCK (decoder);
|
|
|
|
if (min_latency)
|
|
|
|
*min_latency = decoder->priv->min_latency;
|
|
|
|
if (max_latency)
|
|
|
|
*max_latency = decoder->priv->max_latency;
|
|
|
|
GST_OBJECT_UNLOCK (decoder);
|
|
|
|
}
|
2012-08-09 14:02:42 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_merge_tags:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
2015-08-16 16:55:22 +00:00
|
|
|
* @tags: (allow-none): a #GstTagList to merge, or NULL to unset
|
|
|
|
* previously-set tags
|
|
|
|
* @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE
|
2012-08-09 14:02:42 +00:00
|
|
|
*
|
2015-08-16 16:55:22 +00:00
|
|
|
* Sets the audio decoder tags and how they should be merged with any
|
|
|
|
* upstream stream tags. This will override any tags previously-set
|
|
|
|
* with gst_audio_decoder_merge_tags().
|
2012-08-09 14:02:42 +00:00
|
|
|
*
|
|
|
|
* Note that this is provided for convenience, and the subclass is
|
|
|
|
* not required to use this and can still do tag handling on its own.
|
|
|
|
*
|
|
|
|
* MT safe.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_video_decoder_merge_tags (GstVideoDecoder * decoder,
|
|
|
|
const GstTagList * tags, GstTagMergeMode mode)
|
|
|
|
{
|
|
|
|
g_return_if_fail (GST_IS_VIDEO_DECODER (decoder));
|
|
|
|
g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
|
2015-08-16 16:55:22 +00:00
|
|
|
g_return_if_fail (tags == NULL || mode != GST_TAG_MERGE_UNDEFINED);
|
2012-08-09 14:02:42 +00:00
|
|
|
|
|
|
|
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
|
2015-08-16 16:55:22 +00:00
|
|
|
if (decoder->priv->tags != tags) {
|
|
|
|
if (decoder->priv->tags) {
|
|
|
|
gst_tag_list_unref (decoder->priv->tags);
|
|
|
|
decoder->priv->tags = NULL;
|
|
|
|
decoder->priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
|
|
|
|
}
|
|
|
|
if (tags) {
|
|
|
|
decoder->priv->tags = gst_tag_list_ref ((GstTagList *) tags);
|
|
|
|
decoder->priv->tags_merge_mode = mode;
|
|
|
|
}
|
|
|
|
|
|
|
|
GST_DEBUG_OBJECT (decoder, "set decoder tags to %" GST_PTR_FORMAT, tags);
|
|
|
|
decoder->priv->tags_changed = TRUE;
|
|
|
|
}
|
2012-08-09 14:02:42 +00:00
|
|
|
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
|
|
|
|
}
|
2012-08-06 18:18:30 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_get_buffer_pool:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
*
|
|
|
|
* Returns: (transfer full): the instance of the #GstBufferPool used
|
|
|
|
* by the decoder; free it after use it
|
|
|
|
*/
|
|
|
|
GstBufferPool *
|
|
|
|
gst_video_decoder_get_buffer_pool (GstVideoDecoder * decoder)
|
|
|
|
{
|
|
|
|
g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), NULL);
|
|
|
|
|
|
|
|
if (decoder->priv->pool)
|
|
|
|
return gst_object_ref (decoder->priv->pool);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_get_allocator:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
* @allocator: (out) (allow-none) (transfer full): the #GstAllocator
|
|
|
|
* used
|
|
|
|
* @params: (out) (allow-none) (transfer full): the
|
2018-04-02 06:34:58 +00:00
|
|
|
* #GstAllocationParams of @allocator
|
2012-08-06 18:18:30 +00:00
|
|
|
*
|
|
|
|
* Lets #GstVideoDecoder sub-classes to know the memory @allocator
|
|
|
|
* used by the base class and its @params.
|
|
|
|
*
|
|
|
|
* Unref the @allocator after use it.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_video_decoder_get_allocator (GstVideoDecoder * decoder,
|
|
|
|
GstAllocator ** allocator, GstAllocationParams * params)
|
|
|
|
{
|
|
|
|
g_return_if_fail (GST_IS_VIDEO_DECODER (decoder));
|
|
|
|
|
|
|
|
if (allocator)
|
|
|
|
*allocator = decoder->priv->allocator ?
|
|
|
|
gst_object_ref (decoder->priv->allocator) : NULL;
|
|
|
|
|
|
|
|
if (params)
|
|
|
|
*params = decoder->priv->params;
|
|
|
|
}
|
2015-08-15 10:20:25 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* gst_video_decoder_set_use_default_pad_acceptcaps:
|
|
|
|
* @decoder: a #GstVideoDecoder
|
|
|
|
* @use: if the default pad accept-caps query handling should be used
|
|
|
|
*
|
|
|
|
* Lets #GstVideoDecoder sub-classes decide if they want the sink pad
|
|
|
|
* to use the default pad query handler to reply to accept-caps queries.
|
|
|
|
*
|
|
|
|
* By setting this to true it is possible to further customize the default
|
|
|
|
* handler with %GST_PAD_SET_ACCEPT_INTERSECT and
|
|
|
|
* %GST_PAD_SET_ACCEPT_TEMPLATE
|
|
|
|
*
|
|
|
|
* Since: 1.6
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
gst_video_decoder_set_use_default_pad_acceptcaps (GstVideoDecoder * decoder,
|
|
|
|
gboolean use)
|
|
|
|
{
|
|
|
|
decoder->priv->use_default_pad_acceptcaps = use;
|
|
|
|
}
|