36 #define RGBA(r,g,b,a) (((unsigned)(a) << 24) | ((r) << 16) | ((g) << 8) | (b)) 37 #define MAX_EPOCH_PALETTES 8 // Max 8 allowed per PGS epoch 38 #define MAX_EPOCH_OBJECTS 64 // Max 64 allowed per PGS epoch 39 #define MAX_OBJECT_REFS 2 // Max objects per display set 119 for (i = 0; i < objects->
count; i++) {
130 for (i = 0; i < palettes->
count; i++) {
162 const uint8_t *buf,
unsigned int buf_size)
165 int pixel_count, line_count;
167 rle_bitmap_end = buf + buf_size;
177 while (buf < rle_bitmap_end && line_count < rect->
h) {
181 color = bytestream_get_byte(&buf);
185 flags = bytestream_get_byte(&buf);
188 run = (run << 8) + bytestream_get_byte(&buf);
189 color = flags & 0x80 ? bytestream_get_byte(&buf) : 0;
192 if (run > 0 && pixel_count + run <= rect->
w * rect->
h) {
193 memset(rect->
data[0] + pixel_count, color, run);
200 if (pixel_count % rect->
w > 0) {
202 pixel_count % rect->
w, rect->
w);
211 if (pixel_count < rect->
w * rect->
h) {
216 ff_dlog(avctx,
"Pixel Count = %d, Area = %d\n", pixel_count, rect->
w * rect->
h);
232 const uint8_t *buf,
int buf_size)
245 id = bytestream_get_be16(&buf);
260 sequence_desc = bytestream_get_byte(&buf);
262 if (!(sequence_desc & 0x80)) {
268 object->rle_data_len += buf_size;
269 object->rle_remaining_len -= buf_size;
279 rle_bitmap_len = bytestream_get_be24(&buf) - 2*2;
281 if (buf_size > rle_bitmap_len) {
283 "Buffer dimension %d larger than the expected RLE data %d\n",
284 buf_size, rle_bitmap_len);
289 width = bytestream_get_be16(&buf);
290 height = bytestream_get_be16(&buf);
293 if (avctx->
width < width || avctx->
height < height || !width || !height) {
304 object->rle_data_len = 0;
305 object->rle_remaining_len = 0;
309 memcpy(object->
rle, buf, buf_size);
310 object->rle_data_len = buf_size;
311 object->rle_remaining_len = rle_bitmap_len - buf_size;
327 const uint8_t *buf,
int buf_size)
332 const uint8_t *buf_end = buf + buf_size;
336 int r,
g,
b, r_add, g_add, b_add;
339 id = bytestream_get_byte(&buf);
353 while (buf < buf_end) {
354 color_id = bytestream_get_byte(&buf);
355 y = bytestream_get_byte(&buf);
356 cr = bytestream_get_byte(&buf);
357 cb = bytestream_get_byte(&buf);
358 alpha = bytestream_get_byte(&buf);
369 ff_dlog(avctx,
"Color %d := (%d,%d,%d,%d)\n", color_id, r, g, b, alpha);
372 palette->
clut[color_id] =
RGBA(r,g,b,alpha);
389 const uint8_t *buf,
int buf_size,
394 const uint8_t *buf_end = buf + buf_size;
397 int w = bytestream_get_be16(&buf);
398 int h = bytestream_get_be16(&buf);
402 ff_dlog(avctx,
"Video Dimensions %dx%d\n",
422 state = bytestream_get_byte(&buf) >> 6;
435 "Invalid number of presentation objects %d\n",
447 if (buf_end - buf < 8) {
468 ff_dlog(avctx,
"Subtitle Placement x=%d, y=%d\n",
473 av_log(avctx,
AV_LOG_ERROR,
"Subtitle out of video bounds. x = %d, y = %d, video width = %d, video height = %d.\n",
499 const uint8_t *buf,
int buf_size)
508 memset(sub, 0,
sizeof(*sub));
537 if (!sub->
rects[i]) {
606 for (j = 0; j < 4; j++) {
621 int buf_size = avpkt->
size;
628 ff_dlog(avctx,
"PGS sub packet:\n");
630 for (i = 0; i < buf_size; i++) {
631 ff_dlog(avctx,
"%02x ", buf[i]);
645 buf_end = buf + buf_size;
648 while (buf < buf_end) {
649 segment_type = bytestream_get_byte(&buf);
650 segment_length = bytestream_get_be16(&buf);
652 ff_dlog(avctx,
"Segment Length %d, Segment Type %x\n", segment_length, segment_type);
658 switch (segment_type) {
690 segment_type, segment_length);
700 buf += segment_length;
706 #define OFFSET(x) offsetof(PGSSubContext, x) 707 #define SD AV_OPT_FLAG_SUBTITLE_PARAM | AV_OPT_FLAG_DECODING_PARAM 709 {
"forced_subs_only",
"Only show forced subtitles",
OFFSET(forced_subs_only),
AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1,
SD},
729 .priv_class = &pgsdec_class,
static int parse_object_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Parse the picture segment packet.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int x
top left corner of pict, undefined when pict is not set
ptrdiff_t const GLvoid * data
#define LIBAVUTIL_VERSION_INT
static int parse_presentation_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int64_t pts)
Parse the presentation segment packet.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static av_cold int init(AVCodecContext *avctx)
int nb_colors
number of colors in pict, undefined when pict is not set
const char * av_default_item_name(void *ptr)
Return the context name.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
AVCodec ff_pgssub_decoder
PGSSubPresentation presentation
attribute_deprecated AVPicture pict
int w
width of pict, undefined when pict is not set
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
static double cb(void *priv, double x, double y)
8 bits with AV_PIX_FMT_RGB32 palette
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
attribute_deprecated int linesize[AV_NUM_DATA_POINTERS]
number of bytes per line
static const AVClass pgsdec_class
int h
height of pict, undefined when pict is not set
static int parse_palette_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Parse the palette segment packet.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Various defines for YUV<->RGB conversion.
int y
top left corner of pict, undefined when pict is not set
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static int decode_rle(AVCodecContext *avctx, AVSubtitleRect *rect, const uint8_t *buf, unsigned int buf_size)
Decode the RLE data.
const char * name
Name of the codec implementation.
static const AVOption options[]
uint32_t end_display_time
int64_t pts
Same as packet pts, in AV_TIME_BASE.
PGSSubObject object[MAX_EPOCH_OBJECTS]
A bitmap, pict will be set.
#define AV_SUBTITLE_FLAG_FORCED
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
static av_cold int close_decoder(AVCodecContext *avctx)
int width
picture width / height.
#define YUV_TO_RGB2_CCIR(r, g, b, y1)
static PGSSubObject * find_object(int id, PGSSubObjects *objects)
attribute_deprecated uint8_t * data[AV_NUM_DATA_POINTERS]
pointers to the image data planes
#define AV_EF_EXPLODE
abort decoding on minor error detection
static av_cold int init_decoder(AVCodecContext *avctx)
static int display_end_segment(AVCodecContext *avctx, void *data, const uint8_t *buf, int buf_size)
Parse the display segment packet.
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
#define MAX_EPOCH_OBJECTS
#define YUV_TO_RGB1_CCIR(cb1, cr1)
Libavcodec external API header.
static const int16_t alpha[]
main external API structure.
static void flush_cache(AVCodecContext *avctx)
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Describe the class of an AVClass context structure.
PGSSubPalette palette[MAX_EPOCH_PALETTES]
PGSSubObjectRef objects[MAX_OBJECT_REFS]
static int decode(AVCodecContext *avctx, void *data, int *got_sub_ptr, AVPacket *avpkt)
#define flags(name, subs,...)
unsigned int rle_remaining_len
#define FF_DISABLE_DEPRECATION_WARNINGS
common internal api header.
unsigned int rle_data_len
uint32_t start_display_time
static PGSSubPalette * find_palette(int id, PGSSubPalettes *palettes)
#define FF_ENABLE_DEPRECATION_WARNINGS
unsigned int rle_buffer_size
#define YUV_TO_RGB1_CCIR_BT709(cb1, cr1)
static float sub(float src0, float src1)
#define av_malloc_array(a, b)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static double cr(void *priv, double x, double y)
This structure stores compressed data.
#define AV_NOPTS_VALUE
Undefined timestamp value.
#define MAX_EPOCH_PALETTES
void * av_mallocz_array(size_t nmemb, size_t size)