[FFmpeg-user] Delay filter reconfiguration

androiddevmar11 androiddevmar11 at gmail.com
Mon Jun 16 09:31:25 CEST 2014


Hello, 
after last tests I noticed one more problem with this approach. In order to
read data from input 1 I am doing seek to the moment where I want to read
from. Based on frame index I am calculating time in ms where I am in the
input file. Where I reach appropriate moment reading of input file is
finished. Now I am destroying whole graph and recreating it with new value
of delay. But the problem probably is that not all data is read from sink.
Some times piece of sound form input 1 can be found at correct place in out
file some times not. I would say it is random. So I cannot event mix data
from one input to the AAC file. Here is the function which is doing mixing:

static int process_all(JNIEnv * env, jobject this) {
	int ret = 0;

	int data_present = 0;
	int finished = 0;

	int nb_inputs = utarray_len(audio_mixing_data_list);

	int total_out_samples = 0;
	int nb_finished = 0;
	AudioMixingDataType *element = NULL;

	while (nb_finished < nb_inputs) {
		int data_present_in_graph = 0;

		for (int i = 0; i < nb_inputs; i++) {

			element = (AudioMixingDataType*)get_element_at(audio_mixing_data_list,
i);
			if (element == NULL || element->input_finished || element->input_to_read
== 0) {
				continue;
			}

			element->input_to_read = 0;

			AVFrame *frame = NULL;

			if (init_input_frame(&frame) > 0) {
				process_all_handle_error(ret);
				return 0;
			}

			// Decode one frame with of audio samples.
			if ((ret = decode_audio_frame(frame, element->input_format_context,
					element->input_codec_context, &data_present, &finished, element))) {
				process_all_handle_error(ret);
				return 0;
			}

			if ((element->in_file_range_ms.length != -1) &&
get_time_stamp_for_frame_index(element->current_frame_index, element) >
element->in_file_range_ms.length){

				*// in this moment reading of of piece of sound from video file is
finished:
				finished = 1;
				data_present = 0;*
			}

			/**
			 * If we are at the end of the file and there are no more samples
			 * in the decoder which are delayed, we are actually finished.
			 * This must not be treated as an error.
			 */
			if (finished && !data_present) {

*				// get next range (location, length) form java code:
				Range outFileRange =  get_next_out_file_subtrack_range(env, this);
				Range inFileRange =  get_next_in_file_subtrack_range(env, this);

				if (outFileRange.location != -1 && outFileRange.length != -1 &&
inFileRange.location != -1 && inFileRange.length != -1){

					free_graph();
					// seek to next piece of sound in video file:
					set_input_file_subtrack_range(inFileRange.location, inFileRange.length,
element);
					set_output_file_subtrack_range_and_delay(outFileRange.location,
outFileRange.length, element);
					int err = init_filter_graph(&graph, &sink);
					LOGE("Init err = %s\n", get_error_text(err));
					finished = 0;
					element->current_frame_index = 0;*
				}else{

					element->input_finished = 1;
					nb_finished++;
					ret = 0;
					LOGE("Input n°%d finished. Write NULL frame \n", i);

					ret = av_buffersrc_write_frame(element->buffer_filter_context, NULL);
					if (ret < 0) {
						av_log(NULL, AV_LOG_ERROR,
								"Error writing EOF null frame for input %d\n", i);
						process_all_handle_error(ret);
						return 0;
					}
				}
			} else if (data_present) {
				 process_all_data_present(element, frame);
			}
			if (frame != NULL){
				av_frame_free(&frame);
			}
			data_present_in_graph = data_present | data_present_in_graph;
		}
		process_all_data_present_in_graph(data_present_in_graph, data_present,
element);
	}

	return 0;
}


static void process_all_data_present(AudioMixingDataType * const element,
AVFrame * const frame){
	/** If there is decoded data, convert and store it */
	/* push the audio data from decoded frame into the filter graph */
	int ret = av_buffersrc_write_frame(element->buffer_filter_context, frame);
	if (ret < 0) {
		LOGE("Error while feeding the audio filtergraph\n");
		process_all_handle_error(ret);
		return;
	}
}

static void process_all_data_present_in_graph(int data_present_in_graph, int
data_present, AudioMixingDataType * const element){
	int nb_inputs = utarray_len(audio_mixing_data_list);
	if (data_present_in_graph) {
		AVFrame *filt_frame = av_frame_alloc();

		/* pull filtered audio from the filter graph */
		while (1) {
			int ret = av_buffersink_get_frame(sink, filt_frame);
			if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
				for (int i = 0; i < nb_inputs; i++) {
					if (av_buffersrc_get_nb_failed_requests(
							element->buffer_filter_context) > 0) {
						element->input_to_read = 1;
						
						LOGE("Need to read input %d\n", i);
					}
				}

				break;
			}
			if (ret < 0){
				process_all_handle_error(ret);
				return;
			}

			ret = encode_audio_frame(filt_frame, output_format_context,
					output_codec_context, &data_present);
			if (ret < 0){
				process_all_handle_error(ret);
				return;
			}
			av_frame_unref(filt_frame);
		}

		av_frame_free(&filt_frame);
	} else {
		av_log(NULL, AV_LOG_INFO, "No data in graph\n");
		for (int i = 0; i < nb_inputs; i++) {
			AudioMixingDataType *el = get_element_at(audio_mixing_data_list, i);
			if (el != NULL){
				el->input_to_read = 1;
			}
		}
	}
}

static void set_input_file_subtrack_range(jint location, jint length,
AudioMixingDataType * const el){

	if (el != NULL){
		if (location != -1 && length != -1){
			el->in_file_range_ms.location = location;
			el->in_file_range_ms.length = length;
			seek_frame(location, el);
		}
	}
}

static void set_output_file_subtrack_range_and_delay(jint location, jint
length, AudioMixingDataType* element){
	if (element != NULL)
	{
		int delay = location;
		if (element->delay_in_output_file != -1 &&
element->out_file_range_ms.location != -1 &&
element->out_file_range_ms.length){
			delay = location - (element->out_file_range_ms.location +
element->out_file_range_ms.length);
		}
		element->out_file_range_ms.location = location;
		element->out_file_range_ms.length = length;
		element->delay_in_output_file = delay;
		LOGE("Set_output_file_subtrack_range_and_delay %d:",delay);
	}
}

In this code "element" is just data gathered for one input. One element in
UTArray list. Could sombody look at it and propose correct solution of
destroying and recreating filters? Thank you very much.

BR, M




--
View this message in context: http://ffmpeg-users.933282.n4.nabble.com/Delay-filter-reconfiguration-tp4665727p4665873.html
Sent from the FFmpeg-users mailing list archive at Nabble.com.


More information about the ffmpeg-user mailing list