Use FFMpeg to capture video

pull/1036/head
Hleb Valoshka 2018-12-24 23:14:22 +03:00
parent 5af65a58f3
commit 8dab250f22
23 changed files with 1011 additions and 1019 deletions

View File

@ -49,7 +49,7 @@ jobs:
uses: actions/checkout@v2
- name: 'Configure CMake'
run: cmake -B ${{github.workspace}}/build -DCMAKE_TOOLCHAIN_FILE=c:/vcpkg/scripts/buildsystems/vcpkg.cmake -DVCPKG_TARGET_TRIPLET=${{matrix.platform}}-windows -DCMAKE_GENERATOR_PLATFORM=${{matrix.generator_platform}} -DENABLE_SPICE=ON -DENABLE_TOOLS=ON -DENABLE_TESTS=ON -DENABLE_SDL=OFF -DENABLE_QT=ON -DENABLE_DATA=OFF
run: cmake -B ${{github.workspace}}/build -DCMAKE_TOOLCHAIN_FILE=c:/vcpkg/scripts/buildsystems/vcpkg.cmake -DVCPKG_TARGET_TRIPLET=${{matrix.platform}}-windows -DCMAKE_GENERATOR_PLATFORM=${{matrix.generator_platform}} -DENABLE_SPICE=ON -DENABLE_TOOLS=ON -DENABLE_TESTS=ON -DENABLE_SDL=OFF -DENABLE_QT=ON -DENABLE_FFMPEG=ON -DENABLE_DATA=OFF
- name: Build
run: cmake --build ${{github.workspace}}/build --config ${{env.BUILD_TYPE}} -- /maxcpucount:2 /nologo

View File

@ -50,7 +50,7 @@ option(ENABLE_GTK "Build GTK2 frontend (Unix only)? (Default: off)" OFF)
option(ENABLE_QT "Build Qt frontend? (Default: on)" ON)
option(ENABLE_SDL "Build SDL frontend? (Default: off)" OFF)
option(ENABLE_WIN "Build Windows native frontend? (Default: on)" ON)
option(ENABLE_THEORA "Support video capture to OGG Theora? (Default: on)" ON)
option(ENABLE_FFMPEG "Support video capture using FFMPEG (Default: off)" OFF)
option(ENABLE_TOOLS "Build different tools? (Default: off)" OFF)
option(NATIVE_OSX_APP "Support native OSX paths read data from (Default: off)" OFF)
option(FAST_MATH "Build with unsafe fast-math compiller option (Default: off)" OFF)
@ -202,16 +202,14 @@ else()
message(STATUS "NAIF SPICE is disabled. Not looking for cspice library.")
endif()
if(_UNIX)
find_package(PkgConfig)
if(ENABLE_FFMPEG)
include(FindFFMPEG)
find_package(FFMPEG REQUIRED COMPONENTS avcodec avutil avformat swscale)
add_definitions(-DUSE_FFMPEG)
endif()
if(_UNIX AND ENABLE_THEORA)
pkg_search_module(THEORA theora REQUIRED)
include_directories(${THEORA_INCLUDE_DIRS})
link_directories(${THEORA_LIBRARY_DIRS})
link_libraries(${THEORA_LIBRARIES})
add_definitions(-DTHEORA)
if(_UNIX)
find_package(PkgConfig)
endif()
if(WIN32)

View File

@ -22,7 +22,10 @@ steps:
sudo apt update
sudo apt install -y libeigen3-dev \
libepoxy-dev \
libtheora-dev \
libavcodec-dev \
libavformat-dev \
libavutil-dev \
libswscale-dev \
libjpeg-dev \
libpng-dev \
libglu1-mesa-dev \
@ -41,7 +44,7 @@ steps:
- script: |
brew install pkg-config \
eigen \
theora \
ffmpeg \
gtk+ \
gtkglext \
cspice \
@ -62,7 +65,7 @@ steps:
- script: |
mkdir build
cd build
cmake -DENABLE_SPICE=ON -DENABLE_TOOLS=ON -DENABLE_TESTS=ON -DENABLE_SDL=ON -DENABLE_GTK=ON ..
cmake -DENABLE_SPICE=ON -DENABLE_TOOLS=ON -DENABLE_TESTS=ON -DENABLE_SDL=ON -DENABLE_GTK=ON -DENABLE_FFMPEG=ON ..
make -j $(nproc || echo 4)
CTEST_OUTPUT_ON_FAILURE=1 ctest
workingDirectory: "$(system.defaultworkingdirectory)"

View File

@ -388,4 +388,11 @@ StarTextures
#------------------------------------------------------------------------
# LogSize 1000
#------------------------------------------------------------------------
# The following define options for x264 and ffvhuff video codecs when
# Celestia is compiled with ffmpeg library support for video capture.
#------------------------------------------------------------------------
# X264EncoderOptions ""
# FFVHEncoderOptions ""
}

View File

@ -0,0 +1,81 @@
macro(find_ffmpeg_lib)
if(NOT(${ARGC} EQUAL 3))
message(FATAL_ERROR "find_ffmpeg_lib requires exactly 3 arguments")
endif()
set(__name ${ARGV0})
set(__header ${ARGV1})
set(__lib ${ARGV2})
find_library(${__name}_LIBRARY ${__lib})
find_path(${__name}_INCLUDE_DIR ${__header})
find_package_handle_standard_args(${__name}
FOUND_VAR ${__name}_FOUND
REQUIRED_VARS ${__name}_INCLUDE_DIR ${__name}_LIBRARY
FAIL_MESSAGE "Failed to find ${__name}")
set(${__name}_INCLUDE_DIRS ${${__name}_INCLUDE_DIR})
set(${__name}_LIBRARIES ${${__name}_LIBRARY})
list(APPEND FFMPEG_INCLUDE_DIRS ${${__name}_INCLUDE_DIR})
list(REMOVE_DUPLICATES FFMPEG_INCLUDE_DIRS)
list(APPEND FFMPEG_LIBRARIES ${${__name}_LIBRARY})
list(REMOVE_DUPLICATES FFMPEG_LIBRARIES)
if(NOT TARGET FFMPEG::${__name})
add_library(FFMPEG::${__name} UNKNOWN IMPORTED)
set_target_properties(FFMPEG::${__name} PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES "${${__name}_INCLUDE_DIR}"
IMPORTED_LINK_INTERFACE_LANGUAGES "C"
IMPORTED_LOCATION "${${__name}_LIBRARY}")
endif()
mark_as_advanced(${__name}_INCLUDE_DIR ${__name}_LIBRARY ${__name}_INCLUDE_DIRS ${__name}_LIBRARIES)
endmacro()
include(FindPackageHandleStandardArgs)
if(FFMPEG_FIND_COMPONENTS)
foreach(component ${FFMPEG_FIND_COMPONENTS})
string(TOUPPER ${component} _COMPONENT)
set(FFMPEG_USE_${_COMPONENT} 1)
endforeach()
endif()
set(FFMPEG_INCLUDE_DIRS)
set(FFMPEG_LIBRARIES)
if(FFMPEG_USE_AVCODEC)
find_ffmpeg_lib(AVCODEC libavcodec/avcodec.h avcodec)
endif()
if(FFMPEG_USE_AVFORMAT)
find_ffmpeg_lib(AVFORMAT libavformat/avformat.h avformat)
endif()
if(FFMPEG_USE_AVUTIL)
find_ffmpeg_lib(AVUTIL libavutil/avutil.h avutil)
endif()
if(FFMPEG_USE_AVDEVICE)
find_ffmpeg_lib(AVDEVICE libavdevice/avdevice.h avdevice)
endif()
if(FFMPEG_USE_SWSCALE)
find_ffmpeg_lib(SWSCALE libswscale/swscale.h swscale)
endif()
if(NOT TARGET FFMPEG::FFMPEG)
add_library(FFMPEG::FFMPEG UNKNOWN IMPORTED)
set_target_properties(FFMPEG::FFMPEG PROPERTIES IMPORTED_LINK_INTERFACE_LANGUAGES "C")
if(TARGET FFMPEG::AVCODEC)
set_target_properties(FFMPEG::FFMPEG PROPERTIES INTERFACE_LINK_LIBRARIES FFMPEG::AVCODEC)
endif()
if(TARGET FFMPEG::AVUTIL)
set_target_properties(FFMPEG::FFMPEG PROPERTIES INTERFACE_LINK_LIBRARIES FFMPEG::AVUTIL)
endif()
if(TARGET FFMPEG::AVUTIL)
set_target_properties(FFMPEG::FFMPEG PROPERTIES INTERFACE_LINK_LIBRARIES FFMPEG::AVUTIL)
endif()
if(TARGET FFMPEG::AVFORMAT)
set_target_properties(FFMPEG::FFMPEG PROPERTIES INTERFACE_LINK_LIBRARIES FFMPEG::AVFORMAT)
endif()
if(TARGET FFMPEG::SWSCALE)
set_target_properties(FFMPEG::FFMPEG PROPERTIES INTERFACE_LINK_LIBRARIES FFMPEG::SWSCALE)
endif()
endif()

View File

@ -15,7 +15,6 @@ set(CELESTIA_SOURCES
helper.h
imagecapture.cpp
imagecapture.h
moviecapture.h
scriptmenu.cpp
scriptmenu.h
url.cpp
@ -24,15 +23,11 @@ set(CELESTIA_SOURCES
view.h
)
if(WIN32)
if(ENABLE_FFMPEG)
list(APPEND CELESTIA_SOURCES
avicapture.cpp
avicapture.h
)
elseif(_UNIX AND ENABLE_THEORA)
list(APPEND CELESTIA_SOURCES
oggtheoracapture.cpp
oggtheoracapture.h
ffmpegcapture.cpp
ffmpegcapture.h
moviecapture.h
)
endif()
@ -73,6 +68,9 @@ if(APPLE)
target_link_libraries(celestia "-framework Foundation")
endif()
if(ENABLE_FFMPEG)
target_link_libraries(celestia ${FFMPEG_LIBRARIES})
endif()
install(TARGETS celestia LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} NAMELINK_SKIP)

View File

@ -1,224 +0,0 @@
// avicapture.cpp
//
// Copyright (C) 2001-2008, Chris Laurel <claurel@shatters.net>
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; either version 2
// of the License, or (at your option) any later version.
#include <cmath>
#include <windowsx.h>
#include <celutil/debug.h>
#include "avicapture.h"
using namespace std;
AVICapture::AVICapture(const Renderer *r) :
MovieCapture(r)
{
AVIFileInit();
}
AVICapture::~AVICapture()
{
cleanup();
AVIFileExit();
}
bool AVICapture::start(const string& filename,
int w, int h,
float fps)
{
if (capturing)
return false;
width = w;
height = h;
frameRate = fps;
if (HIWORD(VideoForWindowsVersion()) < 0x010a)
{
// We need to be running on version 1.1 or later
return false;
}
// Compute the width of a row in bytes; pad so that rows are aligned on
// 4 byte boundaries.
int rowBytes = (width * 3 + 3) & ~0x3;
image = new unsigned char[rowBytes * height];
HRESULT hr = AVIFileOpenA(&aviFile,
filename.c_str(),
OF_WRITE | OF_CREATE,
nullptr);
if (hr != AVIERR_OK)
{
DPRINTF(0, "Erroring creating avi file for capture.\n");
return false;
}
AVISTREAMINFO info;
ZeroMemory(&info, sizeof info);
info.fccType = streamtypeVIDEO;
info.fccHandler = 0;
info.dwScale = 1;
info.dwRate = (DWORD) floor(frameRate + 0.5f);
info.dwSuggestedBufferSize = rowBytes * height;
SetRect(&info.rcFrame, 0, 0, width, height);
hr = AVIFileCreateStream(aviFile, &aviStream, &info);
if (hr != AVIERR_OK)
{
DPRINTF(0, "Error %08x creating AVI stream.\n", hr);
cleanup();
return false;
}
// Display a dialog to allow the user to select compression options
AVICOMPRESSOPTIONS options;
AVICOMPRESSOPTIONS* arrOptions[1] = { &options };
ZeroMemory(&options, sizeof options);
if (!AVISaveOptions(nullptr, 0, 1, &aviStream,
(LPAVICOMPRESSOPTIONS*) &arrOptions))
{
// The user either clicked on cancel or there was an error
cleanup();
return false;
}
hr = AVIMakeCompressedStream(&compAviStream, aviStream, &options, nullptr);
if (hr != AVIERR_OK)
{
DPRINTF(0, "Error %08x creating compressed AVI stream.\n", hr);
cleanup();
return false;
}
BITMAPINFOHEADER bi;
ZeroMemory(&bi, sizeof bi);
bi.biSize = sizeof bi;
bi.biWidth = width;
bi.biHeight = height;
bi.biPlanes = 1;
bi.biBitCount = 24;
bi.biCompression = BI_RGB;
bi.biSizeImage = rowBytes * height;
bi.biXPelsPerMeter = 0;
bi.biYPelsPerMeter = 0;
bi.biClrUsed = 0;
bi.biClrImportant = 0;
hr = AVIStreamSetFormat(compAviStream, 0, &bi, sizeof bi);
if (hr != AVIERR_OK)
{
DPRINTF(0, "AVIStreamSetFormat failed: %08x\n", hr);
cleanup();
return false;
}
capturing = true;
frameCounter = 0;
return true;
}
bool AVICapture::end()
{
capturing = false;
cleanup();
return true;
}
bool AVICapture::captureFrame()
{
if (!capturing)
return false;
// Get the dimensions of the current viewport
int x, y, w, h;
renderer->getViewport(&x, &y, &w, &h);
x += (w - width) / 2;
y += (h - height) / 2;
renderer->captureFrame(x, y, width, height,
Renderer::PixelFormat::BGR_EXT,
image);
int rowBytes = (width * 3 + 3) & ~0x3;
LONG samplesWritten = 0;
LONG bytesWritten = 0;
HRESULT hr = AVIStreamWrite(compAviStream,
frameCounter,
1,
image,
rowBytes * height,
AVIIF_KEYFRAME,
&samplesWritten,
&bytesWritten);
if (hr != AVIERR_OK)
{
DPRINTF(0, "AVIStreamWrite failed on frame %d\n", frameCounter);
return false;
}
// fmt::printf("Writing frame: %d %d => %d bytes\n",
// frameCounter, rowBytes * height, bytesWritten);
frameCounter++;
return true;
}
void AVICapture::cleanup()
{
if (aviStream != nullptr)
{
AVIStreamRelease(aviStream);
aviStream = nullptr;
}
if (compAviStream != nullptr)
{
AVIStreamRelease(compAviStream);
compAviStream = nullptr;
}
if (aviFile != nullptr)
{
AVIFileRelease(aviFile);
aviFile = nullptr;
}
if (image != nullptr)
{
delete[] image;
image = nullptr;
}
}
int AVICapture::getWidth() const
{
return width;
}
int AVICapture::getHeight() const
{
return height;
}
float AVICapture::getFrameRate() const
{
return frameRate;
}
int AVICapture::getFrameCount() const
{
return frameCounter;
}

View File

@ -1,54 +0,0 @@
// avicapture.h
//
// Copyright (C) 2001, Chris Laurel <claurel@shatters.net>
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; either version 2
// of the License, or (at your option) any later version.
#ifndef _AVICAPTURE_H_
#define _AVICAPTURE_H_
#include <windows.h>
#include <windowsx.h>
#include <vfw.h>
#include "moviecapture.h"
class AVICapture : public MovieCapture
{
public:
AVICapture(const Renderer *);
virtual ~AVICapture();
bool start(const std::string& filename, int w, int h, float fps);
bool end();
bool captureFrame();
int getWidth() const;
int getHeight() const;
float getFrameRate() const;
int getFrameCount() const;
// These are unused for now:
virtual void setAspectRatio(int, int) {};
virtual void setQuality(float) {};
virtual void recordingStatus(bool) {};
private:
void cleanup();
private:
int width{ -1 };
int height{ -1 };
float frameRate{ 30.0f };
int frameCounter{ 0 };
bool capturing{ false };
PAVIFILE aviFile{ nullptr };
PAVISTREAM aviStream{ nullptr };
PAVISTREAM compAviStream{ nullptr };
unsigned char* image{ nullptr };
};
#endif // _AVICAPTURE_H_

View File

@ -3522,7 +3522,7 @@ void CelestiaCore::renderOverlay()
overlay->moveBy((float) ((width - movieWidth) / 2),
(float) ((height + movieHeight) / 2 + 2));
overlay->beginText();
fmt::fprintf(*overlay, _("%dx%d at %f fps %s"),
fmt::fprintf(*overlay, _("%dx%d at %.2f fps %s"),
movieWidth, movieHeight,
movieCapture->getFrameRate(),
recording ? _("Recording") : _("Paused"));

View File

@ -97,6 +97,8 @@ CelestiaConfig* ReadCelestiaConfig(const fs::path& filename, CelestiaConfig *con
configParams->getString("ProjectionMode", config->projectionMode);
configParams->getString("ViewportEffect", config->viewportEffect);
configParams->getString("WarpMeshFile", config->warpMeshFile);
configParams->getString("X264EncoderOptions", config->x264EncoderOptions);
configParams->getString("FFVHEncoderOptions", config->ffvhEncoderOptions);
float maxDist = 1.0;
configParams->getNumber("SolarSystemMaxDistance", maxDist);

View File

@ -82,6 +82,9 @@ public:
std::string projectionMode;
std::string viewportEffect;
std::string warpMeshFile;
std::string x264EncoderOptions;
std::string ffvhEncoderOptions;
};
CelestiaConfig* ReadCelestiaConfig(const fs::path& filename, CelestiaConfig* config = nullptr);

View File

@ -0,0 +1,595 @@
#define AVCODEC_DEBUG 0
#include "ffmpegcapture.h"
#define __STDC_CONSTANT_MACROS
extern "C"
{
#include <libavutil/timestamp.h>
#include <libavutil/pixdesc.h>
#include <libavutil/opt.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
}
#include <iostream>
#include <vector>
#include <fmt/format.h>
using namespace std;
// a wrapper around a single output AVStream
class FFMPEGCapturePrivate
{
FFMPEGCapturePrivate() = default;
~FFMPEGCapturePrivate();
bool init(const fs::path& fn);
bool addStream(int w, int h, float fps);
bool openVideo();
bool start();
bool writeVideoFrame(bool = false);
void finish();
void setVideoCodec(int);
bool isSupportedPixelFormat(enum AVPixelFormat) const;
int writePacket();
AVStream *st { nullptr };
AVFrame *frame { nullptr };
AVFrame *tmpfr { nullptr };
AVCodecContext *enc { nullptr };
AVFormatContext *oc { nullptr };
AVCodec *vc { nullptr };
AVPacket *pkt { nullptr };
SwsContext *swsc { nullptr };
const Renderer *renderer { nullptr };
// pts of the next frame that will be generated
int64_t nextPts { 0 };
// requested bitrate
int64_t bit_rate { 400000 };
AVCodecID vc_id { AV_CODEC_ID_FFVHUFF };
float fps { 0 };
bool capturing { false };
fs::path filename;
std::string vc_options;
public:
#if (LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 10, 100)) // ffmpeg < 4.0
static bool registered;
#endif
friend class FFMPEGCapture;
};
#if (LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 10, 100)) // ffmpeg < 4.0
bool FFMPEGCapturePrivate::registered = false;
#endif
bool FFMPEGCapturePrivate::init(const fs::path& filename)
{
this->filename = filename;
#if (LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 10, 100)) // ffmpeg < 4.0
if (!FFMPEGCapturePrivate::registered)
{
av_register_all();
FFMPEGCapturePrivate::registered = true;
}
#endif
// always use matroska (*.mkv) as a container
// don't change filename.string().c_str() -> filename.c_str()!
// on windows c_str() return wchar_t*
avformat_alloc_output_context2(&oc, nullptr, "matroska", filename.string().c_str());
return oc != nullptr;
}
bool FFMPEGCapturePrivate::isSupportedPixelFormat(enum AVPixelFormat format) const
{
const enum AVPixelFormat *p = vc->pix_fmts;
if (p == nullptr)
return false;
for (; *p != -1; p++)
{
if (*p == format)
return true;
}
return false;
}
#if AVCODEC_DEBUG
static const char* to_str(AVOptionType type)
{
switch(type)
{
case AV_OPT_TYPE_INT:
return "int";
case AV_OPT_TYPE_INT64:
return "int64";
case AV_OPT_TYPE_DOUBLE:
return "double";
case AV_OPT_TYPE_FLOAT:
return "float";
case AV_OPT_TYPE_STRING:
return "string";
case AV_OPT_TYPE_BINARY:
return "binary";
default:
return "other";
}
}
static void listCodecOptions(const AVCodecContext *enc)
{
const AVOption *opt = nullptr;
cout << "supported options:\n";
while ((opt = av_opt_next(enc->priv_data, opt)) != nullptr)
{
if (opt->type == AV_OPT_TYPE_CONST)
{
fmt::print("\tname: {}\n", opt->name);
}
else
{
fmt::print("\tname: {}, type: {}, help: {}, min: {}, max: {}\n",
opt->name, to_str(opt->type), opt->help, opt->min, opt->max);
}
}
}
static void listEncoderParameters(const AVCodec *vc)
{
fmt::print("codec: {} ({})\n", vc->name, vc->long_name);
cout << "supported framerates:\n";
const AVRational *f = vc->supported_framerates;
if (f != nullptr)
{
for (; f->num != 0 && f->den != 0; f++)
fmt::print("\t{} {}\n", f->num, f->den);
}
else
{
cout << "\tany\n";
}
cout << "supported pixel formats:\n";
const enum AVPixelFormat *p = vc->pix_fmts;
if (p != nullptr)
{
for (; *p != -1; p++)
fmt::print("\t{}\n", av_pix_fmt_desc_get(*p)->name);
}
else
{
cout << "\tunknown\n";
}
cout << "recognized profiles:\n";
const AVProfile *r = vc->profiles;
if (r != nullptr)
{
for (; r->profile != FF_PROFILE_UNKNOWN; r++)
fmt::print("\t{} {}\n", r->profile, r->name);
}
else
{
cout << "\tunknown\n";
}
}
#endif
int FFMPEGCapturePrivate::writePacket()
{
// rescale output packet timestamp values from codec to stream timebase
av_packet_rescale_ts(pkt, enc->time_base, st->time_base);
pkt->stream_index = st->index;
// Write the compressed frame to the media file.
return av_interleaved_write_frame(oc, pkt);
}
// add an output stream
bool FFMPEGCapturePrivate::addStream(int width, int height, float fps)
{
this->fps = fps;
// find the encoder
vc = avcodec_find_encoder(vc_id);
if (vc == nullptr)
{
cout << "Video codec isn't found\n";
return false;
}
#if AVCODEC_DEBUG
listEncoderParameters(vc);
#endif
st = avformat_new_stream(oc, nullptr);
if (st == nullptr)
{
cout << "Unable to alloc a new stream\n";
return false;
}
st->id = oc->nb_streams - 1;
enc = avcodec_alloc_context3(vc);
if (enc == nullptr)
{
cout << "Unable to alloc a new context\n";
return false;
}
enc->codec_id = oc->oformat->video_codec = vc_id;
enc->bit_rate = bit_rate;
#if 0
enc->rc_min_rate = ...;
enc->rc_max_rate = ...;
enc->bit_rate_tolerance = 0;
#endif
// Resolution must be a multiple of two
enc->width = width;
enc->height = height;
// timebase: This is the fundamental unit of time (in seconds) in terms
// of which frame timestamps are represented. For fixed-fps content,
// timebase should be 1/framerate and timestamp increments should be
// identical to 1.
if (abs(fps - 29.97f) < 1e-5f)
st->time_base = { 1001, 30000 };
else if (abs(fps - 23.976f) < 1e-5f)
st->time_base = { 1001, 24000 };
else
st->time_base = { 1, (int) fps };
enc->time_base = st->time_base;
enc->framerate = st->avg_frame_rate = { st->time_base.den, st->time_base.num };
enc->gop_size = 12; // emit one intra frame every twelve frames at most
// find a best pixel format to convert to from AV_PIX_FMT_RGB24
if (isSupportedPixelFormat(AV_PIX_FMT_YUV420P))
{
enc->pix_fmt = AV_PIX_FMT_YUV420P;
}
else
{
enc->pix_fmt = avcodec_find_best_pix_fmt_of_list(vc->pix_fmts, AV_PIX_FMT_RGB24, 0, nullptr);
if (enc->pix_fmt == AV_PIX_FMT_NONE)
avcodec_default_get_format(enc, &(enc->pix_fmt));
}
if (enc->codec_id == AV_CODEC_ID_MPEG1VIDEO)
{
// Need to avoid usage of macroblocks in which some coeffs overflow.
// This does not happen with normal video, it just happens here as
// the motion of the chroma plane does not match the luma plane.
enc->mb_decision = 2;
}
// Some formats want stream headers to be separate.
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
enc->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
#if AVCODEC_DEBUG
listCodecOptions(enc);
#endif
return true;
}
bool FFMPEGCapturePrivate::start()
{
// open the output file, if needed
if ((oc->oformat->flags & AVFMT_NOFILE) == 0)
{
if (avio_open(&oc->pb, filename.string().c_str(), AVIO_FLAG_WRITE) < 0)
{
cout << "Failed to open video file\n";
return false;
}
}
// Write the stream header, if any.
if (avformat_write_header(oc, nullptr) < 0)
{
cout << "Failed to write header\n";
return false;
}
av_dump_format(oc, 0, filename.string().c_str(), 1);
if ((pkt = av_packet_alloc()) == nullptr)
{
cout << "Failed to allocate a packet\n";
return false;
}
return true;
}
bool FFMPEGCapturePrivate::openVideo()
{
AVDictionary *opts = nullptr;
const char *str = "";
if (av_dict_parse_string(&opts, vc_options.c_str(), "=", ",", 0) != 0)
cout << "Failed to parse error codec parameters\n";
// open the codec
if (avcodec_open2(enc, vc, &opts) < 0)
{
cout << "Failed to open the codec\n";
av_dict_free(&opts);
return false;
}
if (av_dict_count(opts) > 0)
{
cout << "Unrecognized options:\n";
AVDictionaryEntry *t = nullptr;
while ((t = av_dict_get(opts, "", t, AV_DICT_IGNORE_SUFFIX)) != nullptr)
fmt::print("\t{}={}\n", t->key, t->value);
}
av_dict_free(&opts);
// allocate and init a re-usable frame
if ((frame = av_frame_alloc()) == nullptr)
{
cout << "Failed to allocate destination frame\n";
return false;
}
frame->format = enc->pix_fmt;
frame->width = enc->width;
frame->height = enc->height;
// allocate the buffers for the frame data
if (av_frame_get_buffer(frame, 32) < 0)
{
cout << "Failed to allocate destination frame buffer\n";
return false;
}
if (enc->pix_fmt != AV_PIX_FMT_RGB24)
{
// as we only grab a RGB24 picture, we must convert it
// to the codec pixel format if needed
swsc = sws_getContext(enc->width, enc->height, AV_PIX_FMT_RGB24,
enc->width, enc->height, enc->pix_fmt,
SWS_BITEXACT, nullptr, nullptr, nullptr);
if (swsc == nullptr)
{
cout << "Failed to allocate SWS context\n";
return false;
}
// allocate and init a temporary frame
if((tmpfr = av_frame_alloc()) == nullptr)
{
cout << "Failed to allocate temp frame\n";
return false;
}
tmpfr->format = AV_PIX_FMT_RGB24;
tmpfr->width = enc->width;
tmpfr->height = enc->height;
// allocate the buffers for the frame data
if (av_frame_get_buffer(tmpfr, 32) < 0)
{
cout << "Failed to allocate temp frame buffer\n";
return false;
}
}
// copy the stream parameters to the muxer
if (avcodec_parameters_from_context(st->codecpar, enc) < 0)
{
cout << "Failed to copy the stream parameters to the muxer\n";
return false;
}
return true;
}
static void captureImage(AVFrame *pict, int width, int height, const Renderer *r)
{
int x, y, w, h;
r->getViewport(&x, &y, &w, &h);
x += (w - width) / 2;
y += (h - height) / 2;
r->captureFrame(x, y, width, height,
Renderer::PixelFormat::RGB,
pict->data[0]);
// Read image is vertically flipped
// TODO: this should go to Renderer::captureFrame()
int realWidth = width * 3; // 3 bytes per pixel
uint8_t *tempLine = new uint8_t[realWidth];
uint8_t *fb = pict->data[0];
for (int i = 0, p = realWidth * (height - 1); i < p; i += realWidth, p -= realWidth)
{
memcpy(tempLine, &fb[i], realWidth);
memcpy(&fb[i], &fb[p], realWidth);
memcpy(&fb[p], tempLine, realWidth);
}
delete[] tempLine;
}
// encode one video frame and send it to the muxer
// return 1 when encoding is finished, 0 otherwise
bool FFMPEGCapturePrivate::writeVideoFrame(bool finalize)
{
AVFrame *frame = finalize ? nullptr : this->frame;
// check if we want to generate more frames
if (!finalize)
{
// when we pass a frame to the encoder, it may keep a reference to it
// internally; make sure we do not overwrite it here
if (av_frame_make_writable(frame) < 0)
{
cout << "Failed to make the frame writable\n";
return false;
}
if (enc->pix_fmt != AV_PIX_FMT_RGB24)
{
captureImage(tmpfr, enc->width, enc->height, renderer);
// we need to compute the correct line width of our source
// data. as we grab as RGB24, we multiply the width by 3.
const int linesize = 3 * enc->width;
sws_scale(swsc, tmpfr->data, &linesize, 0, enc->height,
frame->data, frame->linesize);
}
else
{
captureImage(frame, enc->width, enc->height, renderer);
}
frame->pts = nextPts++;
}
av_init_packet(pkt);
// encode the image
if (avcodec_send_frame(enc, frame) < 0)
{
cout << "Failed to send the frame\n";
return false;
}
for (;;)
{
int ret = avcodec_receive_packet(enc, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret >= 0)
{
ret = writePacket();
av_packet_unref(pkt);
}
if (ret < 0)
{
cout << "Failed to receive/unref the packet\n";
return false;
}
}
return true;
}
void FFMPEGCapturePrivate::finish()
{
writeVideoFrame(true);
// Write the trailer, if any. The trailer must be written before you
// close the CodecContexts open when you wrote the header; otherwise
// av_write_trailer() may try to use memory that was freed on
// av_codec_close().
av_write_trailer(oc);
if (!(oc->oformat->flags & AVFMT_NOFILE))
avio_closep(&oc->pb);
}
FFMPEGCapturePrivate::~FFMPEGCapturePrivate()
{
avcodec_free_context(&enc);
av_frame_free(&frame);
if (tmpfr != nullptr)
av_frame_free(&tmpfr);
avformat_free_context(oc);
av_packet_free(&pkt);
}
FFMPEGCapture::FFMPEGCapture(const Renderer *r) :
MovieCapture(r),
d(new FFMPEGCapturePrivate)
{
d->renderer = r;
}
FFMPEGCapture::~FFMPEGCapture()
{
delete d;
}
int FFMPEGCapture::getFrameCount() const
{
return d->nextPts;
}
int FFMPEGCapture::getWidth() const
{
return d->enc->width;
}
int FFMPEGCapture::getHeight() const
{
return d->enc->height;
}
float FFMPEGCapture::getFrameRate() const
{
return d->fps;
}
bool FFMPEGCapture::start(const fs::path& filename, int width, int height, float fps)
{
if (!d->init(filename) ||
!d->addStream(width, height, fps) ||
!d->openVideo() ||
!d->start())
{
return false;
}
d->capturing = true; // XXX
return true;
}
bool FFMPEGCapture::end()
{
if (!d->capturing)
return false;
d->finish();
d->capturing = false;
return true;
}
bool FFMPEGCapture::captureFrame()
{
return d->capturing && d->writeVideoFrame();
}
void FFMPEGCapture::setVideoCodec(AVCodecID vc_id)
{
d->vc_id = vc_id;
}
void FFMPEGCapture::setBitRate(int64_t bit_rate)
{
d->bit_rate = bit_rate;
}
void FFMPEGCapture::setEncoderOptions(const std::string &s)
{
d->vc_options = s;
}

View File

@ -0,0 +1,39 @@
#pragma once
#include "moviecapture.h"
#define __STDC_CONSTANT_MACROS
extern "C"
{
#include <libavformat/avformat.h>
}
#include <celengine/hash.h>
class FFMPEGCapturePrivate;
class FFMPEGCapture : public MovieCapture
{
public:
FFMPEGCapture(const Renderer *r);
~FFMPEGCapture() override;
bool start(const fs::path&, int, int, float) override;
bool end() override;
bool captureFrame() override;
int getFrameCount() const override;
int getWidth() const override;
int getHeight() const override;
float getFrameRate() const override;
void setAspectRatio(int, int) override {};
void setQuality(float) override {};
void recordingStatus(bool) override {};
void setVideoCodec(AVCodecID);
void setBitRate(int64_t);
void setEncoderOptions(const std::string&);
private:
FFMPEGCapturePrivate *d{ nullptr };
};

View File

@ -27,10 +27,11 @@
#include <celestia/celestiacore.h>
#include <celestia/helper.h>
#include <celestia/url.h>
#include <celcompat/charconv.h>
#include <celutil/filetype.h>
#include <celutil/gettext.h>
#ifdef THEORA
#include <celestia/oggtheoracapture.h>
#ifdef USE_FFMPEG
#include <celestia/ffmpegcapture.h>
#endif
#include "actions.h"
@ -54,14 +55,54 @@ using namespace std;
/* Declarations: Action Helpers */
static void openScript(const char* filename, AppData* app);
static void captureImage(const char* filename, AppData* app);
#ifdef THEORA
static void captureMovie(const char* filename, int aspect, float fps, float quality, AppData* app);
#ifdef USE_FFMPEG
static void captureMovie(const char* filename, const int resolution[], float fps,
AVCodecID codec, float bitrate, AppData* app);
#endif
static void textInfoDialog(const char *txt, const char *title, AppData* app);
static void setRenderFlag(AppData* a, uint64_t flag, gboolean state);
static void setOrbitMask(AppData* a, int mask, gboolean state);
static void setLabelMode(AppData* a, int mode, gboolean state);
#ifdef USE_FFMPEG
static const int MovieSizes[][2] =
{
{ 160, 120 },
{ 320, 240 },
{ 640, 480 },
{ 720, 480 },
{ 720, 576 },
{ 1024, 768 },
{ 1280, 720 },
{ 1920, 1080 }
};
static const float MovieFramerates[] = { 15.0f, 23.976f, 24.0f, 25.0f, 29.97f, 30.0f, 60.0f };
struct MovieCodec
{
AVCodecID codecId;
const char *codecDesc;
};
static MovieCodec MovieCodecs[2] =
{
{ AV_CODEC_ID_FFVHUFF, N_("Lossless") },
{ AV_CODEC_ID_H264, N_("Lossy (H.264)") }
};
static void insert_text_event(GtkEditable *editable, const gchar *text, gint length, gint *position, gpointer data)
{
for (int i = 0; i < length; i++)
{
if (!isdigit(text[i]))
{
g_signal_stop_emission_by_name(G_OBJECT(editable), "insert-text");
return;
}
}
}
#endif
/* File -> Copy URL */
void actionCopyURL(GtkAction*, AppData* app)
@ -184,7 +225,7 @@ void actionCaptureImage(GtkAction*, AppData* app)
/* File -> Capture Movie... */
void actionCaptureMovie(GtkAction*, AppData* app)
{
#ifdef THEORA
#ifdef USE_FFMPEG
// TODO: The menu item should be disable so that the user doesn't even
// have the opportunity to record two movies simultaneously; the only
// thing missing to make this happen is notification when recording
@ -201,7 +242,7 @@ void actionCaptureMovie(GtkAction*, AppData* app)
return;
}
GtkWidget* fs = gtk_file_chooser_dialog_new("Save Ogg Theora Movie to File",
GtkWidget* fs = gtk_file_chooser_dialog_new("Save Matroska Movie to File",
GTK_WINDOW(app->mainWindow),
GTK_FILE_CHOOSER_ACTION_SAVE,
GTK_STOCK_CANCEL, GTK_RESPONSE_CANCEL,
@ -209,8 +250,8 @@ void actionCaptureMovie(GtkAction*, AppData* app)
NULL);
GtkFileFilter* filter = gtk_file_filter_new();
gtk_file_filter_set_name(filter, "Ogg Files");
gtk_file_filter_add_pattern(filter, "*.ogg");
gtk_file_filter_set_name(filter, "Matroska Files");
gtk_file_filter_add_pattern(filter, "*.mkv");
gtk_file_chooser_add_filter(GTK_FILE_CHOOSER(fs), filter);
#if GTK_CHECK_VERSION(2, 7, 0)
@ -223,31 +264,48 @@ void actionCaptureMovie(GtkAction*, AppData* app)
GtkWidget* hbox = gtk_hbox_new(FALSE, CELSPACING);
gtk_container_set_border_width(GTK_CONTAINER(hbox), CELSPACING);
GtkWidget* rlabel = gtk_label_new("Aspect Ratio:");
GtkWidget* rlabel = gtk_label_new("Resolution:");
gtk_box_pack_start(GTK_BOX(hbox), rlabel, TRUE, TRUE, 0);
GtkWidget* aspectmenubox = gtk_combo_box_text_new();
gtk_combo_box_text_append_text(GTK_COMBO_BOX_TEXT(aspectmenubox), "1:1");
gtk_combo_box_text_append_text(GTK_COMBO_BOX_TEXT(aspectmenubox), "4:3");
gtk_combo_box_text_append_text(GTK_COMBO_BOX_TEXT(aspectmenubox), "16:9");
gtk_combo_box_text_append_text(GTK_COMBO_BOX_TEXT(aspectmenubox), "Display");
gtk_combo_box_set_active(GTK_COMBO_BOX(aspectmenubox), 0);
gtk_box_pack_start(GTK_BOX(hbox), aspectmenubox, FALSE, FALSE, 0);
GtkWidget* vscombo = gtk_combo_box_text_new();
for (const auto& size : MovieSizes)
{
gtk_combo_box_text_append_text(GTK_COMBO_BOX_TEXT(vscombo),
fmt::format("{} x {}", size[0], size[1]).c_str());
}
gtk_combo_box_set_active(GTK_COMBO_BOX(vscombo), 0);
gtk_box_pack_start(GTK_BOX(hbox), vscombo, FALSE, FALSE, 0);
GtkWidget* flabel = gtk_label_new("Frame Rate:");
gtk_box_pack_start(GTK_BOX(hbox), flabel, TRUE, TRUE, 0);
GtkWidget* fpsspin = gtk_spin_button_new_with_range(5.0, 30.0, 0.01);
gtk_box_pack_start(GTK_BOX(hbox), fpsspin, TRUE, TRUE, 0);
gtk_spin_button_set_value(GTK_SPIN_BUTTON(fpsspin), 12.0);
gtk_spin_button_set_increments(GTK_SPIN_BUTTON(fpsspin), 0.01, 1.0);
GtkWidget* frcombo = gtk_combo_box_text_new();
for (float i : MovieFramerates)
{
gtk_combo_box_text_append_text(GTK_COMBO_BOX_TEXT(frcombo),
fmt::format("{:.3f}", i).c_str());
}
gtk_combo_box_set_active(GTK_COMBO_BOX(frcombo), 0);
gtk_box_pack_start(GTK_BOX(hbox), frcombo, FALSE, FALSE, 0);
GtkWidget* qlabel = gtk_label_new("Video Quality:");
gtk_box_pack_start(GTK_BOX(hbox), qlabel, TRUE, TRUE, 0);
GtkWidget* vclabel = gtk_label_new("Video Codec:");
gtk_box_pack_start(GTK_BOX(hbox), vclabel, TRUE, TRUE, 0);
GtkWidget* qspin = gtk_spin_button_new_with_range(0.0, 10.0, 1.0);
gtk_box_pack_start(GTK_BOX(hbox), qspin, TRUE, TRUE, 0);
gtk_spin_button_set_value(GTK_SPIN_BUTTON(qspin), 10.0);
GtkWidget* vccombo = gtk_combo_box_text_new();
for (const auto &mcodec : MovieCodecs)
{
gtk_combo_box_text_append_text(GTK_COMBO_BOX_TEXT(vccombo),
mcodec.codecDesc);
}
gtk_combo_box_set_active(GTK_COMBO_BOX(vccombo), 0);
gtk_box_pack_start(GTK_BOX(hbox), vccombo, FALSE, FALSE, 0);
GtkWidget* brlabel = gtk_label_new("Bitrate:");
gtk_box_pack_start(GTK_BOX(hbox), brlabel, TRUE, TRUE, 0);
GtkWidget* brentry = gtk_entry_new();
gtk_entry_set_text(GTK_ENTRY(brentry), "400000");
g_signal_connect(G_OBJECT(brentry), "insert-text", G_CALLBACK(insert_text_event), NULL);
gtk_box_pack_start(GTK_BOX(hbox), brentry, TRUE, TRUE, 0);
gtk_widget_show_all(hbox);
gtk_file_chooser_set_extra_widget(GTK_FILE_CHOOSER(fs), hbox);
@ -255,14 +313,21 @@ void actionCaptureMovie(GtkAction*, AppData* app)
if (gtk_dialog_run(GTK_DIALOG(fs)) == GTK_RESPONSE_ACCEPT)
{
char* filename = gtk_file_chooser_get_filename(GTK_FILE_CHOOSER(fs));
int aspect = gtk_combo_box_get_active(GTK_COMBO_BOX(aspectmenubox));
double fps = gtk_spin_button_get_value(GTK_SPIN_BUTTON(fpsspin));
double quality = gtk_spin_button_get_value(GTK_SPIN_BUTTON(qspin));
int vsidx = gtk_combo_box_get_active(GTK_COMBO_BOX(vscombo));
int fridx = gtk_combo_box_get_active(GTK_COMBO_BOX(frcombo));
int vcidx = gtk_combo_box_get_active(GTK_COMBO_BOX(vccombo));
const gchar *brtext = gtk_entry_get_text(GTK_ENTRY(brentry));
const int *resolution = MovieSizes[vsidx];
float fps = MovieFramerates[fridx];
AVCodecID codec = MovieCodecs[vcidx].codecId;
float bitrate = 400000;
const gchar *last = &brtext[gtk_entry_get_text_length(GTK_ENTRY(brentry))];
std::from_chars(brtext, last, bitrate);
gtk_widget_destroy(fs);
for (int i=0; i < 10 && gtk_events_pending ();i++)
gtk_main_iteration ();
captureMovie(filename, aspect, fps, quality, app);
captureMovie(filename, resolution, fps, codec, bitrate, app);
g_free(filename);
}
else
@ -1046,34 +1111,23 @@ static void captureImage(const char* filename, AppData* app)
}
/* Image capturing helper called by actionCaptureImage() */
#ifdef THEORA
static void captureMovie(const char* filename, int aspect, float fps, float quality, AppData* app)
#ifdef USE_FFMPEG
static void captureMovie(const char* filename, const int resolution[], float fps,
AVCodecID codec, float bitrate, AppData* app)
{
/* Get the dimensions of the current viewport */
array<int, 4> viewport;
app->renderer->getViewport(viewport);
auto* movieCapture = new FFMPEGCapture(app->renderer);
movieCapture->setVideoCodec(codec);
movieCapture->setBitRate(bitrate);
if (codec == AV_CODEC_ID_H264)
movieCapture->setEncoderOptions(app->core->getConfig()->x264EncoderOptions);
else
movieCapture->setEncoderOptions(app->core->getConfig()->ffvhEncoderOptions);
MovieCapture* movieCapture = new OggTheoraCapture(app->renderer);
switch (aspect)
{
case 0:
movieCapture->setAspectRatio(1, 1);
break;
case 1:
movieCapture->setAspectRatio(4, 3);
break;
case 2:
movieCapture->setAspectRatio(16, 9);
break;
default:
movieCapture->setAspectRatio(viewport[2], viewport[3]);
break;
}
movieCapture->setQuality(quality);
bool success = movieCapture->start(filename, viewport[2], viewport[3], fps);
bool success = movieCapture->start(filename, resolution[0], resolution[1], fps);
if (success)
{
app->core->initMovieCapture(movieCapture);
}
else
{
delete movieCapture;

View File

@ -11,8 +11,9 @@
#define _MOVIECAPTURE_H_
#include <string>
#include <vector>
#include <celengine/render.h>
#include <celcompat/filesystem.h>
class MovieCapture
{
@ -20,7 +21,7 @@ class MovieCapture
MovieCapture(const Renderer *r) : renderer(r) {};
virtual ~MovieCapture() {};
virtual bool start(const std::string& filename,
virtual bool start(const fs::path& filename,
int width, int height,
float fps) = 0;
virtual bool end() = 0;

View File

@ -1,512 +0,0 @@
/*
* Copyright (C) 2006, William K Volkman <wkvsf@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* http://www.fourcc.org/fccyvrgb.php
* Ey = 0.299R+0.587G+0.114B
* Ecr = 0.713(R - Ey) = 0.500R-0.419G-0.081B
* Ecb = 0.564(B - Ey) = -0.169R-0.331G+0.500B
*
* the defined range for Y is [16,235] (220 steps) and the valid ranges
* for Cr and Cb are [16,239] (235 steps)
*
* http://www.neuro.sfc.keio.ac.jp/~aly/polygon/info/color-space-faq.html
* RGB -> YUV | YUV -> RGB
* Y = 0.299*Red+0.587*Green+0.114*Blue | Red = Y+0.000*U+1.140*V
* U = -0.147*Red-0.289*Green+0.436*Blue | Green = Y-0.396*U-0.581*V
* V = 0.615*Red-0.515*Green-0.100*Blue | Blue = Y+2.029*U+0.000*V
*
* +----------------+---------------+-----------------+----------------+
* | Recommendation | Coef. for red | Coef. for Green | Coef. for Blue |
* +----------------+---------------+-----------------+----------------+
* | Rec 601-1 | 0.299 | 0.587 | 0.114 |
* | Rec 709 | 0.2125 | 0.7154 | 0.0721 |
* | ITU | 0.2125 | 0.7154 | 0.0721 |
* +----------------+---------------+-----------------+----------------+
* RGB -> YCbCr
* Y = Coef. for red*Red+Coef. for green*Green+Coef. for blue*Blue
* Cb = (Blue-Y)/(2-2*Coef. for blue)
* Cr = (Red-Y)/(2-2*Coef. for red)
*
* RGB -> YCbCr (with Rec 601-1 specs) | YCbCr (with Rec 601-1 specs) -> RGB
* Y= 0.2989*Red+0.5866*Green+0.1145*Blue | Red= Y+0.0000*Cb+1.4022*Cr
* Cb=-0.1687*Red-0.3312*Green+0.5000*Blue | Green=Y-0.3456*Cb-0.7145*Cr
* Cr= 0.5000*Red-0.4183*Green-0.0816*Blue | Blue= Y+1.7710*Cb+0.0000*Cr
*
* http://en.wikipedia.org/wiki/YUV/RGB_conversion_formulas
* Y := min(abs(r * 2104 + g * 4130 + b * 802 + 4096 + 131072) >> 13,235)
* U := min(abs(r * -1214 + g * -2384 + b * 3598 + 4096 + 1048576) >> 13,240)
* V := min(abs(r * 3598 + g * -3013 + b * -585 + 4096 + 1048576) >> 13,240)
*/
#ifndef _GNU_SOURCE
# define _GNU_SOURCE
#endif
#ifndef _LARGEFILE_SOURCE
# define _LARGEFILE_SOURCE
#endif
#ifndef _LARGEFILE64_SOURCE
# define _LARGEFILE64_SOURCE
#endif
#ifndef _FILE_OFFSET_BITS
# define _FILE_OFFSET_BITS 64
#endif
#include <config.h>
#ifndef _REENTRANT
# define _REENTRANT
#endif
#include <cstdlib>
#include <cmath>
#include <celutil/debug.h>
#include <celutil/gettext.h>
#include <string>
#include <theora/theora.h>
using namespace std;
#include "oggtheoracapture.h"
// {"video-rate-target",required_argument,nullptr,'V'},
// {"video-quality",required_argument,nullptr,'v'},
// {"aspect-numerator",optional_argument,nullptr,'s'},
// {"aspect-denominator",optional_argument,nullptr,'S'},
// {"framerate-numerator",optional_argument,nullptr,'f'},
// {"framerate-denominator",optional_argument,nullptr,'F'},
OggTheoraCapture::OggTheoraCapture(const Renderer *r):
MovieCapture(r),
video_x(0),
video_y(0),
frame_x(0),
frame_y(0),
frame_x_offset(0),
frame_y_offset(0),
video_an(4),
video_ad(3),
video_hzn(12),
video_hzd(1),
video_r(-1), // 45000 <= video_r <= 2000000 (45Kbps - 2000Kbps)
video_q(63), // 0-63 aka 0-10 * 6.3 the higher the value the faster the encoding and the larger the output file
capturing(false),
video_frame_count(0),
video_bytesout(0),
rowStride(0),
pixels(nullptr),
outfile(nullptr)
{
yuvframe[0] = nullptr;
yuvframe[1] = nullptr;
// Just being anal
memset(&yuv, 0, sizeof(yuv));
memset(&to, 0, sizeof(to));
memset(&videopage, 0, sizeof(videopage));
memset(&op, 0, sizeof(op));
memset(&td, 0, sizeof(td));
memset(&ti, 0, sizeof(ti));
memset(&tc, 0, sizeof(tc));
}
void OggTheoraCapture::setAspectRatio(int aspect_numerator, int aspect_denominator)
{
int a = aspect_numerator;
int b = aspect_denominator;
while (a != b)
{
if (a > b)
a = a - b;
else
b = b - a;
}
if (a > 1) {
video_an = aspect_numerator / a;
video_ad = aspect_denominator / a;
}
else
{
video_an = aspect_numerator;
video_ad = aspect_denominator;
}
}
void OggTheoraCapture::setQuality(float quality)
{
if (quality < 0.0)
video_q = 7;
else if (quality < 1.0)
video_q = 0;
else if (quality <= 10.00)
video_q = (int)ceil(quality * 6.3);
else
video_q = (int)ceil(quality);
}
bool OggTheoraCapture::start(const std::string& filename,
int w, int h,
float fps)
{
if (capturing)
return false;
outfile = fopen(filename.c_str(), "wb");
if (!outfile)
{
DPRINTF(LOG_LEVEL_ERROR, _("Error in creating ogg file %s for capture.\n"), filename.c_str());
return false;
}
/* Set up Ogg output stream */
#ifdef _WIN32
std::srand(std::time(nullptr));
#else
std::srand(time(nullptr));
#endif
ogg_stream_init(&to,std::rand());
frame_x = w;
frame_y = h;
if (fps > 0.05) {
if (fabs(fps - (30000.0/1001.0)) < 1e-5)
{
video_hzn = 30000;
video_hzd = 1001;
}
else if (fabs(fps - (24000.0/1001.0)) < 1e-5)
{
video_hzn = 24000;
video_hzd = 1001;
}
else
{
video_hzn = (int)ceil(fps*1000.0);
video_hzd = 1000;
int a = video_hzn;
int b = video_hzd;
while (a != b)
{
if (a > b)
a = a - b;
else
b = b - a;
}
if (a > 1)
{
video_hzn /= a;
video_hzd /= a;
}
}
}
/* Theora has a divisible-by-sixteen restriction for the encoded video size */
/* scale the frame size up to the nearest /16 and calculate offsets */
video_x=((frame_x + 15) >>4)<<4;
video_y=((frame_y + 15) >>4)<<4;
/* We force the offset to be even.
This ensures that the chroma samples align properly with the luma
samples. */
frame_x_offset=((video_x-frame_x)/2)&~1;
frame_y_offset=((video_y-frame_y)/2)&~1;
theora_info_init(&ti);
ti.width=video_x;
ti.height=video_y;
ti.frame_width=frame_x;
ti.frame_height=frame_y;
ti.offset_x=frame_x_offset;
ti.offset_y=frame_y_offset;
ti.fps_numerator=video_hzn;
ti.fps_denominator=video_hzd;
ti.aspect_numerator=video_an;
ti.aspect_denominator=video_ad;
if (frame_x == 720 && frame_y == 576)
ti.colorspace=OC_CS_ITU_REC_470BG; //OC_CS_UNSPECIFIED;
else
ti.colorspace=OC_CS_ITU_REC_470M; //OC_CS_UNSPECIFIED;
//ti.pixelformat=OC_PF_420;
ti.target_bitrate=video_r;
ti.quality=video_q;
ti.dropframes_p=0;
ti.quick_p=1;
ti.keyframe_auto_p=1;
ti.keyframe_frequency=64;
ti.keyframe_frequency_force=64;
ti.keyframe_data_target_bitrate=(int)(video_r*1.5);
ti.keyframe_auto_threshold=80;
ti.keyframe_mindistance=8;
ti.noise_sensitivity=1;
theora_encode_init(&td,&ti);
theora_info_clear(&ti);
/* first packet will get its own page automatically */
theora_encode_header(&td,&op);
ogg_stream_packetin(&to,&op);
if(ogg_stream_pageout(&to,&videopage) != 1){
cerr << _("Internal Ogg library error.\n");
return false;
}
fwrite(videopage.header, 1, videopage.header_len, outfile);
fwrite(videopage.body, 1, videopage.body_len, outfile);
/* create the remaining theora headers */
theora_comment_init(&tc);
theora_encode_comment(&tc,&op);
theora_comment_clear(&tc);
ogg_stream_packetin(&to,&op);
theora_encode_tables(&td,&op);
ogg_stream_packetin(&to,&op);
while(1)
{
int result = ogg_stream_flush(&to,&videopage);
if( result<0 )
{
/* can't get here */
cerr << _("Internal Ogg library error.\n");
return false;
}
if( result==0 )
break;
fwrite(videopage.header,1,videopage.header_len,outfile);
fwrite(videopage.body,1, videopage.body_len,outfile);
}
/* Initialize the double frame buffer.
* We allocate enough for a 4:4:4 color sampling
*/
yuvframe[0]= new unsigned char[video_x*video_y*3];
yuvframe[1]= new unsigned char[video_x*video_y*3];
// Now the buffer for reading the GL RGB pixels
rowStride = (frame_x * 3 + 3) & ~0x3;
pixels = new unsigned char[rowStride*frame_y];
/* clear initial frame as it may be larger than actual video data */
/* fill Y plane with 0x10 and UV planes with 0x80, for black data */
// The UV plane must be 4:2:0
memset(yuvframe[0],0x10,video_x*video_y);
memset(yuvframe[0]+video_x*video_y,0x80,video_x*video_y*2);
memset(yuvframe[1],0x10,video_x*video_y);
memset(yuvframe[1]+video_x*video_y,0x80,video_x*video_y*2);
yuv.y_width=video_x;
yuv.y_height=video_y;
yuv.y_stride=video_x;
// Note we lie here by saying it's 4:2:0 and we will convert 4:4:4 to 4:2;0 later
yuv.uv_width=video_x/2;
yuv.uv_height=video_y/2;
yuv.uv_stride=video_x/2;
DPRINTF(LOG_LEVEL_VERBOSE,
_("OggTheoraCapture::start() - Theora video: %s %.2f(%d/%d) fps quality %d %dx%d offset (%dx%d)\n"),
filename.c_str(),
(double)video_hzn/(double)video_hzd,
video_hzn,video_hzd,
video_q,
video_x,video_y,
frame_x_offset,frame_y_offset);
capturing = true;
return true;
}
bool OggTheoraCapture::captureFrame()
{
if (!capturing)
return false;
while (ogg_stream_pageout(&to,&videopage)>0)
{
/* flush a video page */
video_bytesout+=fwrite(videopage.header,1,videopage.header_len,outfile);
video_bytesout+=fwrite(videopage.body,1,videopage.body_len,outfile);
}
if(ogg_stream_eos(&to)) return false;
// Get the dimensions of the current viewport
int x, y, w, h;
renderer->getViewport(&x, &y, &w, &h);
x += (w - frame_x) / 2;
y += (h - frame_y) / 2;
renderer->captureFrame(x, y, frame_x, frame_y,
Renderer::PixelFormat::RGB,
pixels);
unsigned char *ybase = yuvframe[0];
unsigned char *ubase = yuvframe[0]+ video_x*video_y;
unsigned char *vbase = yuvframe[0]+ video_x*video_y*2;
// We go ahead and build 4:4:4 frames
for (int y=0; y<frame_y; y++)
{
unsigned char *yptr = ybase + (video_x*(y+frame_y_offset))+frame_x_offset;
unsigned char *uptr = ubase + (video_x*(y+frame_y_offset))+frame_x_offset;
unsigned char *vptr = vbase + (video_x*(y+frame_y_offset))+frame_x_offset;
unsigned char *rgb = pixels + ((frame_y-1-y)*rowStride); // The video is inverted
for (int x=0; x<frame_x; x++)
{
unsigned char r = *rgb++;
unsigned char g = *rgb++;
unsigned char b = *rgb++;
*yptr++ = min(abs(r * 2104 + g * 4130 + b * 802 + 4096 + 131072) >> 13,235);
*uptr++ = min(abs(r * -1214 + g * -2384 + b * 3598 + 4096 + 1048576) >> 13,240);
*vptr++ = min(abs(r * 3598 + g * -3013 + b * -585 + 4096 + 1048576) >> 13,240);
}
}
/*
* The video strategy is to capture one frame ahead so when we're at end of
* stream we can mark last video frame as such. Have two YUV frames before
* encoding. Theora is a one-frame-in,one-frame-out system; submit a frame
* for compression and pull out the packet
*/
if (video_frame_count > 0)
{
yuv.y= yuvframe[1];
yuv.u= yuvframe[1]+ video_x*video_y;
yuv.v= yuvframe[1]+ video_x*video_y*2;
// Convert to 4:2:0
unsigned char * uin0 = yuv.u;
unsigned char * uin1 = yuv.u + video_x;
unsigned char * uout = yuv.u;
unsigned char * vin0 = yuv.v;
unsigned char * vin1 = yuv.v + video_x;
unsigned char * vout = yuv.v;
for (int y = 0; y < video_y; y += 2)
{
for (int x = 0; x < video_x; x += 2)
{
*uout = (uin0[0] + uin0[1] + uin1[0] + uin1[1]) >> 2;
uin0 += 2;
uin1 += 2;
uout++;
*vout = (vin0[0] + vin0[1] + vin1[0] + vin1[1]) >> 2;
vin0 += 2;
vin1 += 2;
vout++;
}
uin0 += video_x;
uin1 += video_x;
vin0 += video_x;
vin1 += video_x;
}
theora_encode_YUVin(&td,&yuv);
theora_encode_packetout(&td,0,&op);
ogg_stream_packetin(&to,&op);
}
video_frame_count += 1;
//if ((video_frame_count % 10) == 0)
// DPRINTF(LOG_LEVEL_VERBOSE, "Writing frame %d\n", video_frame_count);
unsigned char *temp = yuvframe[0];
yuvframe[0] = yuvframe[1];
yuvframe[1] = temp;
frameCaptured();
return true;
}
void OggTheoraCapture::cleanup()
{
capturing = false;
/* clear out state */
if(outfile)
{
DPRINTF(LOG_LEVEL_VERBOSE, _("OggTheoraCapture::cleanup() - wrote %d frames\n"), video_frame_count);
if (video_frame_count > 0)
{
yuv.y= yuvframe[1];
yuv.u= yuvframe[1]+ video_x*video_y;
yuv.v= yuvframe[1]+ video_x*video_y*2 ;
// Convert to 4:2:0
unsigned char * uin0 = yuv.u;
unsigned char * uin1 = yuv.u + video_x;
unsigned char * uout = yuv.u;
unsigned char * vin0 = yuv.v;
unsigned char * vin1 = yuv.v + video_x;
unsigned char * vout = yuv.v;
for (int y = 0; y < video_y; y += 2)
{
for (int x = 0; x < video_x; x += 2)
{
*uout = (uin0[0] + uin0[1] + uin1[0] + uin1[1]) >> 2;
uin0 += 2;
uin1 += 2;
uout++;
*vout = (vin0[0] + vin0[1] + vin1[0] + vin1[1]) >> 2;
vin0 += 2;
vin1 += 2;
vout++;
}
uin0 += video_x;
uin1 += video_x;
vin0 += video_x;
vin1 += video_x;
}
theora_encode_YUVin(&td,&yuv);
theora_encode_packetout(&td,1,&op);
ogg_stream_packetin(&to,&op);
}
while(ogg_stream_pageout(&to,&videopage)>0)
{
/* flush a video page */
video_bytesout+=fwrite(videopage.header,1,videopage.header_len,outfile);
video_bytesout+=fwrite(videopage.body,1,videopage.body_len,outfile);
}
if(ogg_stream_flush(&to,&videopage)>0)
{
/* flush a video page */
video_bytesout+=fwrite(videopage.header,1,videopage.header_len,outfile);
video_bytesout+=fwrite(videopage.body,1,videopage.body_len,outfile);
}
theora_clear(&td);
ogg_stream_clear(&to);
//ogg_stream_destroy(&to); /* Documentation says to do this however we are seeing a double free libogg 1.1.2 */
std::fclose(outfile);
outfile = nullptr;
delete [] yuvframe[0];
delete [] yuvframe[1];
delete [] pixels;
}
}
bool OggTheoraCapture::end()
{
cleanup();
return true;
}
int OggTheoraCapture::getWidth() const
{
return frame_x;
}
int OggTheoraCapture::getHeight() const
{
return frame_y;
}
int OggTheoraCapture::getFrameCount() const
{
return video_frame_count;
}
float OggTheoraCapture::getFrameRate() const
{
return float(video_hzn)/float(video_hzd);
}
OggTheoraCapture::~OggTheoraCapture()
{
cleanup();
}

View File

@ -1,68 +0,0 @@
// Header section
#ifndef _OGGTHEORACAPTURE_H_
#define _OGGTHEORACAPTURE_H_
#include "theora/theora.h"
#include "moviecapture.h"
class OggTheoraCapture : public MovieCapture
{
public:
OggTheoraCapture(const Renderer*);
virtual ~OggTheoraCapture();
bool start(const std::string& filename, int w, int h, float fps);
bool end();
bool captureFrame();
int getWidth() const;
int getHeight() const;
float getFrameRate() const;
int getFrameCount() const;
int getBytesOut() const { return video_bytesout; } ;
void setAspectRatio(int, int);
void setQuality(float);
void recordingStatus(bool) {}; // Added to allow GTK compilation
private:
void cleanup();
private:
int video_x;
int video_y;
int frame_x;
int frame_y;
int frame_x_offset;
int frame_y_offset;
int video_an;
int video_ad;
int video_hzn;
int video_hzd;
int video_r; // 45000 <= video_r <= 2000000 (45Kbps - 2000Kbps)
int video_q; // 0-63 aka 0-10 * 6.3
bool capturing;
int video_frame_count;
int video_bytesout;
// Consider RGB to YUV Color converstion table - jpeglib has one
// but according the standards it's incorrect (generates values 0-255,
// instead of clamped to 16-240).
int rowStride;
unsigned char *pixels;
unsigned char *yuvframe[2];
yuv_buffer yuv;
FILE *outfile;
ogg_stream_state to; /* take physical pages, weld into a logical
stream of packets */
ogg_page videopage; /* one Ogg bitstream page. Theora packets are inside */
ogg_packet op; /* one raw packet of data for encode */
theora_state td;
theora_info ti;
theora_comment tc;
virtual void frameCaptured() {}; /* to update UI status indicator */
};
#endif // _OGGTHEORACAPTURE_H_

View File

@ -11,7 +11,8 @@
// of the License, or (at your option) any later version.
#include <ctime>
//#include <ctime>
#include <memory>
#include <QStandardPaths>
#include <QActionGroup>
@ -64,13 +65,8 @@
#include <celestia/url.h>
#include "qtbookmark.h"
#if defined(_WIN32)
#include "celestia/avicapture.h"
// TODO: Add Mac support
#elif !defined(__APPLE__)
#ifdef THEORA
#include "celestia/oggtheoracapture.h"
#endif
#ifdef USE_FFMPEG
#include "celestia/ffmpegcapture.h"
#endif
#ifndef CONFIG_DATA_DIR
@ -93,6 +89,22 @@ static const int CELESTIA_MAIN_WINDOW_VERSION = 12;
static int fps_to_ms(int fps) { return fps > 0 ? 1000 / fps : 0; }
static int ms_to_fps(int ms) { return ms > 0? 1000 / ms : 0; }
#ifdef USE_FFMPEG
static const int videoSizes[][2] =
{
{ 160, 120 },
{ 320, 240 },
{ 640, 480 },
{ 720, 480 },
{ 720, 576 },
{ 1024, 768 },
{ 1280, 720 },
{ 1920, 1080 }
};
static const float videoFrameRates[] = { 15.0f, 23.976f, 24.0f, 25.0f, 29.97f, 30.0f, 60.0f };
#endif
// Progress notifier class receives update messages from CelestiaCore
// at startup. This simple implementation just forwards messages on
// to the main Celestia window.
@ -592,20 +604,15 @@ void CelestiaAppWindow::slotShowSelectionContextMenu(const QPoint& pos,
menu->popupAtCenter(pos);
}
void CelestiaAppWindow::slotGrabImage()
{
QString dir;
QSettings settings;
settings.beginGroup("Preferences");
if (settings.contains("GrabImageDir"))
{
dir = settings.value("GrabImageDir").toString();
}
else
{
dir = QDir::current().path();
}
QString saveAsName = QFileDialog::getSaveFileName(this,
_("Save Image"),
@ -628,72 +635,61 @@ void CelestiaAppWindow::slotGrabImage()
void CelestiaAppWindow::slotCaptureVideo()
{
// TODO: Add Mac support
#if defined(_WIN32) || (defined(THEORA) && !defined(__APPLE__))
#ifdef USE_FFMPEG
QString dir;
QSettings settings;
settings.beginGroup("Preferences");
if (settings.contains("CaptureVideoDir"))
{
dir = settings.value("CaptureVideoDir").toString();
}
else
{
dir = QDir::current().path();
}
settings.endGroup();
int videoSizes[8][2] =
{
{ 160, 120 },
{ 320, 240 },
{ 640, 480 },
{ 720, 480 },
{ 720, 576 },
{ 1024, 768 },
{ 1280, 720 },
{ 1920, 1080 }
};
QFileDialog fileDialog(this, _("Capture Video"), dir, _("Matroska Video (*.mkv)"));
float videoFrameRates[5] = { 15.0f, 24.0f, 25.0f, 29.97f, 30.0f };
QString saveAsName;
if (fileDialog.exec())
saveAsName = fileDialog.selectedFiles().at(0);
#ifdef _WIN32
QString saveAsName = QFileDialog::getSaveFileName(this,
_("Capture Video"),
dir,
_("Video (*.avi)"));
#else
QString saveAsName = QFileDialog::getSaveFileName(this,
_("Capture Video"),
dir,
_("Video (*.ogv)"));
#endif
if (!saveAsName.isEmpty())
{
#ifndef _WIN32
if (!saveAsName.endsWith(".mkv", Qt::CaseInsensitive))
saveAsName.append(".mkv");
#endif
QDialog videoInfoDialog(this);
videoInfoDialog.setWindowTitle("Capture Video");
videoInfoDialog.setWindowTitle(_("Capture Video"));
QGridLayout* layout = new QGridLayout(&videoInfoDialog);
QComboBox* resolutionCombo = new QComboBox(&videoInfoDialog);
layout->addWidget(new QLabel(_("Resolution:"), &videoInfoDialog), 0, 0);
layout->addWidget(resolutionCombo, 0, 1);
for (unsigned int i = 0; i < sizeof(videoSizes) / sizeof(videoSizes[0]); i++)
{
resolutionCombo->addItem(QString(_("%1 x %2")).arg(videoSizes[i][0]).arg(videoSizes[i][1]), QSize(videoSizes[i][0], videoSizes[i][1]));
}
for (const auto& size : videoSizes)
resolutionCombo->addItem(QString(_("%1 x %2")).arg(size[0]).arg(size[1]), QSize(size[0], size[1]));
QComboBox* frameRateCombo = new QComboBox(&videoInfoDialog);
layout->addWidget(new QLabel(_("Frame rate:"), &videoInfoDialog), 1, 0);
layout->addWidget(frameRateCombo, 1, 1);
for (unsigned int i = 0; i < sizeof(videoFrameRates) / sizeof(videoFrameRates[0]); i++)
{
frameRateCombo->addItem(QString("%1").arg(videoFrameRates[i]), videoFrameRates[i]);
}
for (float i : videoFrameRates)
frameRateCombo->addItem(QString::number(i), i);
QComboBox* codecCombo = new QComboBox(&videoInfoDialog);
layout->addWidget(new QLabel(_("Video codec:"), &videoInfoDialog), 2, 0);
layout->addWidget(codecCombo, 2, 1);
codecCombo->addItem(_("Lossless"), AV_CODEC_ID_FFVHUFF);
codecCombo->addItem(_("Lossy (H.264)"), AV_CODEC_ID_H264);
QLineEdit* bitrateEdit = new QLineEdit("400000", &videoInfoDialog);
bitrateEdit->setInputMask("D000000000");
layout->addWidget(new QLabel(_("Bitrate:"), &videoInfoDialog), 3, 0);
layout->addWidget(bitrateEdit, 3, 1);
QDialogButtonBox* buttons = new QDialogButtonBox(QDialogButtonBox::Ok | QDialogButtonBox::Cancel, Qt::Horizontal, &videoInfoDialog);
connect(buttons, SIGNAL(accepted()), &videoInfoDialog, SLOT(accept()));
connect(buttons, SIGNAL(rejected()), &videoInfoDialog, SLOT(reject()));
layout->addWidget(buttons, 2, 0, 1, 2);
layout->addWidget(buttons, 4, 0, 1, 2);
videoInfoDialog.setLayout(layout);
@ -701,14 +697,18 @@ void CelestiaAppWindow::slotCaptureVideo()
{
QSize videoSize = resolutionCombo->itemData(resolutionCombo->currentIndex()).toSize();
float frameRate = frameRateCombo->itemData(frameRateCombo->currentIndex()).toFloat();
AVCodecID vc = static_cast<AVCodecID>(codecCombo->itemData(codecCombo->currentIndex()).toInt());
int br = bitrateEdit->text().toLongLong();
#ifdef _WIN32
MovieCapture* movieCapture = new AVICapture(m_appCore->getRenderer());
#else
MovieCapture* movieCapture = new OggTheoraCapture(m_appCore->getRenderer());
movieCapture->setAspectRatio(1, 1);
#endif
bool ok = movieCapture->start(saveAsName.toLatin1().data(),
auto *movieCapture = new FFMPEGCapture(m_appCore->getRenderer());
movieCapture->setVideoCodec(vc);
movieCapture->setBitRate(br);
if (vc == AV_CODEC_ID_H264)
movieCapture->setEncoderOptions(m_appCore->getConfig()->x264EncoderOptions);
else
movieCapture->setEncoderOptions(m_appCore->getConfig()->ffvhEncoderOptions);
bool ok = movieCapture->start(saveAsName.toStdString(),
videoSize.width(), videoSize.height(),
frameRate);
if (ok)
@ -717,10 +717,10 @@ void CelestiaAppWindow::slotCaptureVideo()
delete movieCapture;
}
settings.beginGroup("Preferences");
settings.setValue("CaptureVideoDir", QFileInfo(saveAsName).absolutePath());
settings.endGroup();
}
settings.endGroup();
#endif
}
@ -1186,7 +1186,7 @@ void CelestiaAppWindow::createMenus()
QAction* captureVideoAction = new QAction(QIcon(":/icons/capture-video.png"),
_("Capture &video"), this);
// TODO: Add Mac support for video capture
#if defined(__APPLE__) || (!defined(_WIN32) && !defined(THEORA))
#ifndef USE_FFMPEG
captureVideoAction->setEnabled(false);
#endif
captureVideoAction->setShortcut(QString(_("Shift+F10")));

View File

@ -479,15 +479,19 @@ FONT 8, "Segoe UI", 0, 0, 0
LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
IDD_MOVIE_PARAMS_CHOOSER DIALOGEX 20, 20, 280, 22
IDD_MOVIE_PARAMS_CHOOSER DIALOGEX 20, 20, 280, 34
STYLE DS_3DLOOK | DS_CONTROL | DS_SETFONT | WS_VISIBLE | WS_CHILDWINDOW | WS_CLIPSIBLINGS
FONT 8, "Segoe UI", 0, 0, 0
{
CONTROL "", -1, WC_NATIVEFONTCTL, NFS_ALL, 0, 0, 0, 0
LTEXT "Size:", IDC_STATIC, 6, 4, 30, 8, SS_LEFT, WS_EX_LEFT
COMBOBOX IDC_COMBO_MOVIE_SIZE, 38, 2, 54, 90, WS_TABSTOP | WS_VSCROLL | CBS_DROPDOWNLIST, WS_EX_LEFT
RTEXT "Frame rate:", IDC_STATIC, 96, 4, 74, 8, SS_RIGHT, WS_EX_LEFT
COMBOBOX IDC_COMBO_MOVIE_FRAMERATE, 172, 2, 40, 90, WS_TABSTOP | WS_VSCROLL | CBS_DROPDOWNLIST, WS_EX_LEFT
COMBOBOX IDC_COMBO_MOVIE_SIZE, 38, 2, 82, 93, WS_TABSTOP | WS_VSCROLL | CBS_DROPDOWNLIST, WS_EX_LEFT
RTEXT "Frame rate:", IDC_STATIC, 120, 4, 74, 8, SS_RIGHT, WS_EX_LEFT
COMBOBOX IDC_COMBO_MOVIE_FRAMERATE, 196, 2, 82, 90, WS_TABSTOP | WS_VSCROLL | CBS_DROPDOWNLIST, WS_EX_LEFT
LTEXT "Codec:", 0, 6, 20, 30, 8, SS_LEFT, WS_EX_LEFT
COMBOBOX IDC_COMBO_MOVIE_CODEC, 38, 18, 82, 30, CBS_DROPDOWNLIST | CBS_HASSTRINGS, WS_EX_LEFT
RTEXT "Bitrate:", 0, 120, 20, 74, 8, SS_RIGHT, WS_EX_LEFT
EDITTEXT IDC_EDIT_MOVIE_BITRATE, 196, 18, 82, 14, ES_AUTOHSCROLL | ES_NUMBER, WS_EX_LEFT
}

View File

@ -193,6 +193,8 @@
#define IDC_SHOWFADINGORBITS 1163
#define IDC_SHOWRINGS 1164
#define IDC_SHOWPARTIALTRAJECTORIES 1165
#define IDC_COMBO_MOVIE_CODEC 1166
#define IDC_EDIT_MOVIE_BITRATE 1167
#define ID_SELECTSOL 40001
#define ID_GOTOSELECTION 40002
#define ID_FOLLOWSELECTION 40003

View File

@ -29,6 +29,7 @@
#include <celengine/glsupport.h>
#include <celcompat/charconv.h>
#include <celmath/mathlib.h>
#include <celutil/array_view.h>
#include <celutil/debug.h>
@ -41,7 +42,7 @@
#include <celscript/legacy/cmdparser.h>
#include "celestia/celestiacore.h"
#include "celestia/avicapture.h"
#include "celestia/ffmpegcapture.h"
#include "celestia/helper.h"
#include "celestia/scriptmenu.h"
#include "celestia/url.h"
@ -139,9 +140,22 @@ static int MovieSizes[8][2] = {
static float MovieFramerates[5] = { 15.0f, 24.0f, 25.0f, 29.97f, 30.0f };
struct MovieCodec
{
AVCodecID codecId;
const char *codecDesc;
};
static MovieCodec MovieCodecs[2] =
{
{ AV_CODEC_ID_FFVHUFF, N_("Lossless") },
{ AV_CODEC_ID_H264, N_("Lossy (H.264)") }
};
static int movieSize = 1;
static int movieFramerate = 1;
static int movieCodec = 1;
static int64_t movieBitrate = 400000;
astro::Date newTime(0.0);
@ -430,9 +444,17 @@ static void ShowLocalTime(CelestiaCore* appCore)
static bool BeginMovieCapture(const Renderer* renderer,
const std::string& filename,
int width, int height,
float framerate)
float framerate,
AVCodecID codec,
int64_t bitrate)
{
MovieCapture* movieCapture = new AVICapture(renderer);
auto* movieCapture = new FFMPEGCapture(renderer);
movieCapture->setVideoCodec(codec);
movieCapture->setBitRate(bitrate);
if (vc == AV_CODEC_ID_H264)
movieCapture->setEncoderOptions(appCore->getConfig()->x264EncoderOptions);
else
movieCapture->setEncoderOptions(appCore->getConfig()->ffvhEncoderOptions);
bool success = movieCapture->start(filename, width, height, framerate);
if (success)
@ -666,25 +688,36 @@ UINT CALLBACK ChooseMovieParamsProc(HWND hDlg, UINT message,
HWND hwnd = GetDlgItem(hDlg, IDC_COMBO_MOVIE_SIZE);
int nSizes = sizeof MovieSizes / sizeof MovieSizes[0];
int i;
for (i = 0; i < nSizes; i++)
for (int i = 0; i < nSizes; i++)
{
sprintf(buf, "%d x %d", MovieSizes[i][0], MovieSizes[i][1]);
sprintf(buf, _("%d x %d"), MovieSizes[i][0], MovieSizes[i][1]);
SendMessage(hwnd, CB_INSERTSTRING, -1,
reinterpret_cast<LPARAM>(buf));
}
SendMessage(hwnd, CB_SETCURSEL, movieSize, 0);
hwnd = GetDlgItem(hDlg, IDC_COMBO_MOVIE_FRAMERATE);
int nFramerates = sizeof MovieFramerates / sizeof MovieFramerates[0];
for (i = 0; i < nFramerates; i++)
for (int i = 0; i < nFramerates; i++)
{
sprintf(buf, "%.2f", MovieFramerates[i]);
SendMessage(hwnd, CB_INSERTSTRING, -1,
reinterpret_cast<LPARAM>(buf));
}
SendMessage(hwnd, CB_SETCURSEL, movieFramerate, 0);
hwnd = GetDlgItem(hDlg, IDC_COMBO_MOVIE_CODEC);
int nCodecs = sizeof MovieCodecs / sizeof MovieCodecs[0];
for (int i = 0; i < nCodecs; i++)
{
SendMessage(hwnd, CB_INSERTSTRING,
reinterpret_cast<WPARAM>(MovieCodecs[i].codecId),
reinterpret_cast<LPARAM>(_(MovieCodecs[i].codecDesc)));
}
SendMessage(hwnd, CB_SETCURSEL, movieCodec, 0);
hwnd = GetDlgItem(hDlg, IDC_EDIT_MOVIE_BITRATE);
SetWindowText(hwnd, "400000");
}
return TRUE;
@ -711,6 +744,34 @@ UINT CALLBACK ChooseMovieParamsProc(HWND hDlg, UINT message,
}
return TRUE;
}
else if (LOWORD(wParam) == IDC_COMBO_MOVIE_CODEC)
{
if (HIWORD(wParam) == CBN_SELCHANGE)
{
HWND hwnd = reinterpret_cast<HWND>(lParam);
int item = SendMessage(hwnd, CB_GETCURSEL, 0, 0);
if (item != CB_ERR)
movieCodec = item;
}
}
else if (LOWORD(wParam) == IDOK IDC_EDIT_MOVIE_BITRATE)
{
char buf[24], out[24];
wchar_t wbuff[48];
int len = GetDlgItemText(hDlg, IDC_EDIT_MOVIE_BITRATE, buf, sizeof(buf));
if (len > 0)
{
int wlen = MultiByteToWideChar(CP_ACP, 0, buf, -1, wbuff, sizeof(wbuff));
WideCharToMultiByte(CP_UTF8, 0, wbuff, wlen, out, sizeof(out), NULL, NULL);
}
auto result = std::from_chars(out, out+wlen, movieBitrate);
if (result.ec != std::errc())
movieBitrate = 400000;
EndDialog(hDlg, 0);
return TRUE;
}
}
return FALSE;
@ -2682,7 +2743,7 @@ static void HandleCaptureMovie(HWND hWnd)
ZeroMemory(&Ofn, sizeof(OPENFILENAME));
Ofn.lStructSize = sizeof(OPENFILENAME);
Ofn.hwndOwner = hWnd;
Ofn.lpstrFilter = "Microsoft AVI\0*.avi\0";
Ofn.lpstrFilter = "Matroska (*.mkv)\0*.mkv\0";
Ofn.lpstrFile= szFile;
Ofn.nMaxFile = sizeof(szFile);
Ofn.lpstrFileTitle = szFileTitle;
@ -2710,7 +2771,7 @@ static void HandleCaptureMovie(HWND hWnd)
bool success = false;
DWORD nFileType=0;
char defaultExtensions[][4] = { "avi" };
char defaultExtensions[][4] = { "mkv" };
if (Ofn.nFileExtension == 0)
{
// If no extension was specified, use the selection of filter to
@ -2732,7 +2793,7 @@ static void HandleCaptureMovie(HWND hWnd)
{
switch (DetermineFileType(Ofn.lpstrFile))
{
case Content_AVI:
case Content_MKV:
nFileType = 1;
break;
default:
@ -2752,7 +2813,9 @@ static void HandleCaptureMovie(HWND hWnd)
string(Ofn.lpstrFile),
MovieSizes[movieSize][0],
MovieSizes[movieSize][1],
MovieFramerates[movieFramerate]);
MovieFramerates[movieFramerate],
MovieCodecs[movieCodec],
movieBitrate);
}
if (!success)

View File

@ -25,7 +25,7 @@ static const char CelestiaMeshExt[] = ".cms";
static const char CelestiaCatalogExt[] = ".ssc";
static const char CelestiaStarCatalogExt[] = ".stc";
static const char CelestiaDeepSkyCatalogExt[] = ".dsc";
static const char AVIExt[] = ".avi";
static const char MKVExt[] = ".mkv";
static const char DDSExt[] = ".dds";
static const char DXT5NormalMapExt[] = ".dxt5nm";
static const char CelestiaLegacyScriptExt[] = ".cel";
@ -63,8 +63,8 @@ ContentType DetermineFileType(const fs::path& filename)
return Content_CelestiaStarCatalog;
if (compareIgnoringCase(CelestiaDeepSkyCatalogExt, ext) == 0)
return Content_CelestiaDeepSkyCatalog;
if (compareIgnoringCase(AVIExt, ext) == 0)
return Content_AVI;
if (compareIgnoringCase(MKVExt, ext) == 0)
return Content_MKV;
if (compareIgnoringCase(DDSExt, ext) == 0)
return Content_DDS;
if (compareIgnoringCase(CelestiaLegacyScriptExt, ext) == 0)

View File

@ -23,7 +23,7 @@ enum ContentType
Content_CelestiaTexture = 6,
Content_3DStudio = 7,
Content_CelestiaMesh = 8,
Content_AVI = 9,
Content_MKV = 9,
Content_CelestiaCatalog = 10,
Content_DDS = 11,
Content_CelestiaStarCatalog = 12,