mirror of
https://github.com/anidl/multi-downloader-nx.git
synced 2026-04-21 00:12:05 +00:00
chapter with ffmpeg
- Convert chapters to ffmpeg format when using ffmpeg as muxer - Chapter names no longer have Start or End - Recap chapters are ignored for now. They were rarely used anyway.
This commit is contained in:
parent
b72d170be1
commit
e6c71ee226
3 changed files with 99 additions and 20 deletions
58
crunchy.ts
58
crunchy.ts
|
|
@ -1558,12 +1558,12 @@ export default class Crunchy implements ServiceClass {
|
|||
if (chapterData.startTime > 1) {
|
||||
compiledChapters.push(
|
||||
`CHAPTER${(compiledChapters.length/2)+1}=00:00:00.00`,
|
||||
`CHAPTER${(compiledChapters.length/2)+1}NAME=Prologue`
|
||||
`CHAPTER${(compiledChapters.length/2)+1}NAME=Episode`
|
||||
);
|
||||
}
|
||||
compiledChapters.push(
|
||||
`CHAPTER${(compiledChapters.length/2)+1}=${startFormatted}`,
|
||||
`CHAPTER${(compiledChapters.length/2)+1}NAME=Opening`
|
||||
`CHAPTER${(compiledChapters.length/2)+1}NAME=Intro`
|
||||
);
|
||||
compiledChapters.push(
|
||||
`CHAPTER${(compiledChapters.length/2)+1}=${endFormatted}`,
|
||||
|
|
@ -1586,7 +1586,8 @@ export default class Crunchy implements ServiceClass {
|
|||
if (chapters.length > 0) {
|
||||
chapters.sort((a, b) => a.start - b.start);
|
||||
//Check if chapters has an intro
|
||||
if (!(chapters.find(c => c.type === 'intro') || chapters.find(c => c.type === 'recap'))) {
|
||||
//if (!(chapters.find(c => c.type === 'intro') || chapters.find(c => c.type === 'recap'))) {
|
||||
if (!(chapters.find(c => c.type === 'intro'))) {
|
||||
compiledChapters.push(
|
||||
`CHAPTER${(compiledChapters.length/2)+1}=00:00:00.00`,
|
||||
`CHAPTER${(compiledChapters.length/2)+1}NAME=Episode`
|
||||
|
|
@ -1602,32 +1603,53 @@ export default class Crunchy implements ServiceClass {
|
|||
endTime.setSeconds(chapter.end);
|
||||
const startFormatted = startTime.toISOString().substring(11, 19)+'.00';
|
||||
const endFormatted = endTime.toISOString().substring(11, 19)+'.00';
|
||||
|
||||
//Find the max start time from the chapters
|
||||
const maxStart = Math.max(
|
||||
...chapters
|
||||
.map(obj => obj.start)
|
||||
.filter((start): start is number => start !== null && start !== undefined)
|
||||
);
|
||||
//We need the duration of the ep
|
||||
let epDuration: number | undefined;
|
||||
const epiMeta = await this.req.getData(`${api.cms}/objects/${currentMediaId}?force_locale=&preferred_audio_language=ja-JP&locale=${this.locale}`, AuthHeaders);
|
||||
if(!epiMeta.ok || !epiMeta.res){
|
||||
console.warn('EP Meta info Request FAILED! Maybe the chapters are not working properly...');
|
||||
epDuration = 7200;
|
||||
} else {
|
||||
epDuration = Math.floor((await epiMeta.res.json()).data[0].episode_metadata.duration_ms / 1000 - 3);
|
||||
}
|
||||
|
||||
//Push generated chapters
|
||||
if (chapter.type == 'intro') {
|
||||
if (chapter.start > 0) {
|
||||
compiledChapters.push(
|
||||
`CHAPTER${(compiledChapters.length/2)+1}=00:00:00.00`,
|
||||
`CHAPTER${(compiledChapters.length/2)+1}NAME=Prologue`
|
||||
`CHAPTER${(compiledChapters.length/2)+1}NAME=Episode`
|
||||
);
|
||||
}
|
||||
compiledChapters.push(
|
||||
`CHAPTER${(compiledChapters.length/2)+1}=${startFormatted}`,
|
||||
`CHAPTER${(compiledChapters.length/2)+1}NAME=Opening`
|
||||
);
|
||||
compiledChapters.push(
|
||||
`CHAPTER${(compiledChapters.length/2)+1}=${endFormatted}`,
|
||||
`CHAPTER${(compiledChapters.length/2)+1}NAME=Episode`
|
||||
`CHAPTER${(compiledChapters.length/2)+1}NAME=${chapter.type.charAt(0).toUpperCase() + chapter.type.slice(1)}`
|
||||
);
|
||||
if (chapter.end < epDuration && chapter.end != maxStart) {
|
||||
compiledChapters.push(
|
||||
`CHAPTER${(compiledChapters.length/2)+1}=${endFormatted}`,
|
||||
`CHAPTER${(compiledChapters.length/2)+1}NAME=Episode`
|
||||
);
|
||||
}
|
||||
} else {
|
||||
compiledChapters.push(
|
||||
`CHAPTER${(compiledChapters.length/2)+1}=${startFormatted}`,
|
||||
`CHAPTER${(compiledChapters.length/2)+1}NAME=${chapter.type.charAt(0).toUpperCase() + chapter.type.slice(1)} Start`
|
||||
);
|
||||
compiledChapters.push(
|
||||
`CHAPTER${(compiledChapters.length/2)+1}=${endFormatted}`,
|
||||
`CHAPTER${(compiledChapters.length/2)+1}NAME=${chapter.type.charAt(0).toUpperCase() + chapter.type.slice(1)} End`
|
||||
);
|
||||
if (chapter.type !== 'recap') {
|
||||
compiledChapters.push(
|
||||
`CHAPTER${(compiledChapters.length/2)+1}=${startFormatted}`,
|
||||
`CHAPTER${(compiledChapters.length/2)+1}NAME=${chapter.type.charAt(0).toUpperCase() + chapter.type.slice(1)}`
|
||||
);
|
||||
if (chapter.end < epDuration && chapter.end != maxStart) {
|
||||
compiledChapters.push(
|
||||
`CHAPTER${(compiledChapters.length/2)+1}=${endFormatted}`,
|
||||
`CHAPTER${(compiledChapters.length/2)+1}NAME=Episode`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
51
modules/module.ffmpegChapter.ts
Normal file
51
modules/module.ffmpegChapter.ts
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
import fs from 'fs';
|
||||
|
||||
export function convertChaptersToFFmpegFormat(inputFilePath: string): string {
|
||||
const content = fs.readFileSync(inputFilePath, 'utf-8');
|
||||
|
||||
const chapterMatches = Array.from(content.matchAll(/CHAPTER(\d+)=([\d:.]+)/g));
|
||||
const nameMatches = Array.from(content.matchAll(/CHAPTER(\d+)NAME=([^\n]+)/g));
|
||||
|
||||
const chapters = chapterMatches.map((m) => ({
|
||||
index: parseInt(m[1], 10),
|
||||
time: m[2],
|
||||
})).sort((a, b) => a.index - b.index);
|
||||
|
||||
const nameDict: Record<number, string> = {};
|
||||
nameMatches.forEach((m) => {
|
||||
nameDict[parseInt(m[1], 10)] = m[2];
|
||||
});
|
||||
|
||||
let ffmpegContent = ';FFMETADATA1\n';
|
||||
let startTimeInNs = 0;
|
||||
|
||||
for (let i = 0; i < chapters.length; i++) {
|
||||
const chapterStartTime = timeToNanoSeconds(chapters[i].time);
|
||||
const chapterEndTime = (i + 1 < chapters.length)
|
||||
? timeToNanoSeconds(chapters[i + 1].time)
|
||||
: chapterStartTime + 1000000000;
|
||||
|
||||
const chapterName = nameDict[chapters[i].index] || `Chapter ${chapters[i].index}`;
|
||||
|
||||
ffmpegContent += '[CHAPTER]\n';
|
||||
ffmpegContent += 'TIMEBASE=1/1000000000\n';
|
||||
ffmpegContent += `START=${startTimeInNs}\n`;
|
||||
ffmpegContent += `END=${chapterEndTime}\n`;
|
||||
ffmpegContent += `title=${chapterName}\n`;
|
||||
|
||||
startTimeInNs = chapterEndTime;
|
||||
}
|
||||
|
||||
return ffmpegContent;
|
||||
}
|
||||
|
||||
export function timeToNanoSeconds(time: string): number {
|
||||
const parts = time.split(':');
|
||||
const hours = parseInt(parts[0], 10);
|
||||
const minutes = parseInt(parts[1], 10);
|
||||
const secondsAndMs = parts[2].split('.');
|
||||
const seconds = parseInt(secondsAndMs[0], 10);
|
||||
const milliseconds = parseInt(secondsAndMs[1], 10);
|
||||
|
||||
return (hours * 3600 + minutes * 60 + seconds) * 1000000000 + milliseconds * 1000000;
|
||||
}
|
||||
|
|
@ -8,6 +8,7 @@ import { AvailableMuxer } from './module.args';
|
|||
import { console } from './log';
|
||||
import ffprobe from 'ffprobe';
|
||||
import Helper from './module.helper';
|
||||
import { convertChaptersToFFmpegFormat } from './module.ffmpegChapter';
|
||||
|
||||
export type MergerInput = {
|
||||
path: string,
|
||||
|
|
@ -164,6 +165,13 @@ class Merger {
|
|||
args.push(`-i "${sub.file}"`);
|
||||
}
|
||||
|
||||
if (this.options.chapters && this.options.chapters.length > 0) {
|
||||
const chapterFilePath = this.options.chapters[0].path;
|
||||
const chapterData = convertChaptersToFFmpegFormat(this.options.chapters[0].path);
|
||||
fs.writeFileSync(chapterFilePath, chapterData, 'utf-8');
|
||||
args.push(`-i "${chapterFilePath}" -map_metadata 1`);
|
||||
}
|
||||
|
||||
if (this.options.output.split('.').pop() === 'mkv') {
|
||||
if (this.options.fonts) {
|
||||
let fontIndex = 0;
|
||||
|
|
@ -174,8 +182,6 @@ class Merger {
|
|||
}
|
||||
}
|
||||
|
||||
//TODO: Make it possible for chapters to work with ffmpeg merging
|
||||
|
||||
args.push(...metaData);
|
||||
args.push(...this.options.subtitles.map((_, subIndex) => `-map ${subIndex + index}`));
|
||||
args.push(
|
||||
|
|
|
|||
Loading…
Reference in a new issue