launch: Simplify encoding profile description

Use a 'simple' synthax to describe encoding profiles
This commit is contained in:
Thibault Saunier 2013-08-30 20:32:56 -04:00
parent c97b9f7e04
commit d8cb8fec52

View file

@ -89,44 +89,8 @@ str_to_time (char *time)
return 0;
}
static GstEncodingProfile *
make_encoding_profile (gchar * audio, gchar * video, gchar * video_restriction,
gchar * audio_preset, gchar * video_preset, gchar * container)
{
GstEncodingContainerProfile *profile;
GstEncodingProfile *stream;
GstCaps *caps;
caps = gst_caps_from_string (container);
profile =
gst_encoding_container_profile_new ((gchar *) "ges-test4", NULL, caps,
NULL);
gst_caps_unref (caps);
if (audio) {
caps = gst_caps_from_string (audio);
stream = (GstEncodingProfile *)
gst_encoding_audio_profile_new (caps, audio_preset, NULL, 0);
gst_encoding_container_profile_add_profile (profile, stream);
gst_caps_unref (caps);
}
if (video) {
caps = gst_caps_from_string (video);
stream = (GstEncodingProfile *)
gst_encoding_video_profile_new (caps, video_preset, NULL, 0);
if (video_restriction)
gst_encoding_profile_set_restriction (stream,
gst_caps_from_string (video_restriction));
gst_encoding_container_profile_add_profile (profile, stream);
gst_caps_unref (caps);
}
return (GstEncodingProfile *) profile;
}
static GESTimeline *
create_timeline (int nbargs, gchar ** argv, gchar * audio, gchar * video)
create_timeline (int nbargs, gchar ** argv)
{
GESLayer *layer;
GESTrack *tracka = NULL, *trackv = NULL;
@ -136,18 +100,16 @@ create_timeline (int nbargs, gchar ** argv, gchar * audio, gchar * video)
timeline = GES_TIMELINE (ges_asset_extract (GES_ASSET (project), NULL));
if (audio)
tracka = GES_TRACK (ges_audio_track_new ());
if (video)
trackv = GES_TRACK (ges_video_track_new ());
tracka = GES_TRACK (ges_audio_track_new ());
trackv = GES_TRACK (ges_video_track_new ());
/* We are only going to be doing one layer of clips */
layer = (GESLayer *) ges_simple_layer_new ();
/* Add the tracks and the layer to the timeline */
if (!ges_timeline_add_layer (timeline, layer) ||
!(!audio || ges_timeline_add_track (timeline, tracka)) ||
!(!video || ges_timeline_add_track (timeline, trackv)))
!(ges_timeline_add_track (timeline, tracka)) ||
!(ges_timeline_add_track (timeline, trackv)))
goto build_failure;
/* Here we've finished initializing our timeline, we're
@ -256,7 +218,7 @@ build_failure:
static GESPipeline *
create_pipeline (GESTimeline ** ret_timeline, gchar * load_path,
gchar * save_path, int argc, char **argv, gchar * audio, gchar * video)
gchar * save_path, int argc, char **argv)
{
GESPipeline *pipeline = NULL;
GESTimeline *timeline = NULL;
@ -280,7 +242,7 @@ create_pipeline (GESTimeline ** ret_timeline, gchar * load_path,
g_free (uri);
} else {
/* Normal timeline creation */
if (!(timeline = create_timeline (argc, argv, audio, video)))
if (!(timeline = create_timeline (argc, argv)))
goto failure;
}
ges_timeline_commit (timeline);
@ -403,17 +365,137 @@ _print_position (void)
return TRUE;
}
static GstEncodingProfile *
_parse_encoding_profile (const gchar * format)
{
GstCaps *caps;
char *preset_name = NULL;
GstEncodingProfile *encoding_profile;
gchar **restriction_format, **preset_v;
guint i, presence = 0;
GstCaps *restrictioncaps = NULL;
gchar **strpresence_v, **strcaps_v = g_strsplit (format, ":", 0);
if (strcaps_v[0] && *strcaps_v[0]) {
caps = gst_caps_from_string (strcaps_v[0]);
if (caps == NULL) {
g_printerr ("Could not parse caps %s", strcaps_v[0]);
return FALSE;
}
encoding_profile =
GST_ENCODING_PROFILE (gst_encoding_container_profile_new
("User profile", "User profile", caps, NULL));
gst_caps_unref (caps);
} else {
encoding_profile = NULL;
}
for (i = 1; strcaps_v[i]; i++) {
GstEncodingProfile *profile = NULL;
gchar *strcaps, *strpresence;
restriction_format = g_strsplit (strcaps_v[i], "->", 0);
if (restriction_format[1]) {
restrictioncaps = gst_caps_from_string (restriction_format[0]);
strcaps = g_strdup (restriction_format[1]);
} else {
restrictioncaps = NULL;
strcaps = g_strdup (restriction_format[0]);
}
g_strfreev (restriction_format);
preset_v = g_strsplit (strcaps, "+", 0);
if (preset_v[1]) {
strpresence = preset_v[1];
g_free (strcaps);
strcaps = g_strdup (preset_v[0]);
} else {
strpresence = preset_v[0];
}
strpresence_v = g_strsplit (strpresence, "|", 0);
if (strpresence_v[1]) { /* We have a presence */
gchar *endptr;
if (preset_v[1]) { /* We have preset and presence */
preset_name = g_strdup (strpresence_v[0]);
} else { /* We have a presence but no preset */
g_free (strcaps);
strcaps = g_strdup (strpresence_v[0]);
}
presence = strtoll (strpresence_v[1], &endptr, 10);
if (endptr == strpresence_v[1]) {
g_printerr ("Wrong presence %s\n", strpresence_v[1]);
return FALSE;
}
} else { /* We have no presence */
if (preset_v[1]) { /* Not presence but preset */
preset_name = g_strdup (preset_v[1]);
g_free (strcaps);
strcaps = g_strdup (preset_v[0]);
} /* Else we have no presence nor preset */
}
g_strfreev (strpresence_v);
g_strfreev (preset_v);
GST_DEBUG ("Creating preset with restrictions: %" GST_PTR_FORMAT
", caps: %s, preset %s, presence %d", restrictioncaps, strcaps,
preset_name ? preset_name : "none", presence);
caps = gst_caps_from_string (strcaps);
g_free (strcaps);
if (caps == NULL) {
g_warning ("Could not create caps for %s", strcaps_v[i]);
return FALSE;
}
if (g_str_has_prefix (strcaps_v[i], "audio/")) {
profile = GST_ENCODING_PROFILE (gst_encoding_audio_profile_new (caps,
preset_name, restrictioncaps, presence));
} else if (g_str_has_prefix (strcaps_v[i], "video/") ||
g_str_has_prefix (strcaps_v[i], "image/")) {
profile = GST_ENCODING_PROFILE (gst_encoding_video_profile_new (caps,
preset_name, restrictioncaps, presence));
}
g_free (preset_name);
gst_caps_unref (caps);
if (restrictioncaps)
gst_caps_unref (restrictioncaps);
if (profile == NULL) {
g_warning ("No way to create a preset for caps: %s", strcaps_v[i]);
return NULL;
}
if (encoding_profile) {
if (gst_encoding_container_profile_add_profile
(GST_ENCODING_CONTAINER_PROFILE (encoding_profile),
profile) == FALSE) {
g_warning ("Can not create a preset for caps: %s", strcaps_v[i]);
return NULL;
}
} else {
encoding_profile = profile;
}
}
g_strfreev (strcaps_v);
return encoding_profile;
}
int
main (int argc, gchar ** argv)
{
GError *err = NULL;
gchar *outputuri = NULL;
gchar *container = (gchar *) "application/ogg";
gchar *audio = (gchar *) "audio/x-vorbis";
gchar *video = (gchar *) "video/x-theora";
gchar *video_restriction = (gchar *) "ANY";
gchar *audio_preset = NULL;
gchar *video_preset = NULL;
const gchar *format = NULL;
gchar *exclude_args = NULL;
static gboolean smartrender = FALSE;
static gboolean list_transitions = FALSE;
@ -429,18 +511,15 @@ main (int argc, gchar ** argv)
"Render to outputuri, and avoid decoding/reencoding", NULL},
{"outputuri", 'o', 0, G_OPTION_ARG_STRING, &outputuri,
"URI to encode to", "URI (<protocol>://<location>)"},
{"format", 'f', 0, G_OPTION_ARG_STRING, &container,
"Container format", "<GstCaps>"},
{"vformat", 'v', 0, G_OPTION_ARG_STRING, &video,
"Video format", "<GstCaps>"},
{"aformat", 'a', 0, G_OPTION_ARG_STRING, &audio,
"Audio format", "<GstCaps>"},
{"vrestriction", 'x', 0, G_OPTION_ARG_STRING, &video_restriction,
"Video restriction", "<GstCaps>"},
{"apreset", 0, 0, G_OPTION_ARG_STRING, &audio_preset,
"Encoding audio profile preset", "<GstPresetName>"},
{"vpreset", 0, 0, G_OPTION_ARG_STRING, &video_preset,
"Encoding video profile preset", "<GstPresetName>"},
{"format", 'f', 0, G_OPTION_ARG_STRING, &format,
"Set the properties to use for the encoding profile "
"(in case of transcoding.) For example:\n"
"video/mpegts:video/x-raw,width=1920,height=1080->video/x-h264:audio/x-ac3\n"
"A preset name can be used by adding +presetname, eg:\n"
"video/webm:video/x-vp8+mypreset:audio/x-vorbis\n"
"The presence property of the profile can be specified with |<presence>, eg:\n"
"video/webm:video/x-vp8|<presence>:audio/x-vorbis\n",
"properties-values"},
{"repeat", 'r', 0, G_OPTION_ARG_INT, &repeat,
"Number of time to repeat timeline", NULL},
{"list-transitions", 't', 0, G_OPTION_ARG_NONE, &list_transitions,
@ -514,34 +593,29 @@ main (int argc, gchar ** argv)
g_option_context_free (ctx);
/* normalize */
if (strcmp (audio, "none") == 0)
audio = NULL;
if (strcmp (video, "none") == 0)
video = NULL;
/* Create the pipeline */
pipeline =
create_pipeline (&timeline, load_path, save_path, argc - 1, argv + 1,
audio, video);
create_pipeline (&timeline, load_path, save_path, argc - 1, argv + 1);
if (!pipeline)
exit (1);
/* Setup profile/encoding if needed */
if (smartrender || outputuri) {
GstEncodingProfile *prof = NULL;
GESProject *proj =
GES_PROJECT (ges_extractable_get_asset (GES_EXTRACTABLE (timeline)));
if (proj) {
if (!format) {
GESProject *proj =
GES_PROJECT (ges_extractable_get_asset (GES_EXTRACTABLE (timeline)));
const GList *profiles = ges_project_list_encoding_profiles (proj);
prof = profiles ? profiles->data : NULL;
}
if (!prof) {
make_encoding_profile (audio, video, video_restriction, audio_preset,
video_preset, container);
if (format == NULL)
format = "application/ogg:video/x-theora:audio/x-vorbis";
prof = _parse_encoding_profile (format);
}
if (!prof || !ges_pipeline_set_render_settings (pipeline, outputuri, prof)