feat: eslint config

wip: tosho provider
This commit is contained in:
ThaUnknown 2023-06-25 01:31:28 +02:00
parent 273d5edb3e
commit f2de180f9a
9 changed files with 2221 additions and 1004 deletions

45
.eslintrc Normal file
View file

@ -0,0 +1,45 @@
{
"globals": {
"launchQueue": "readonly",
"FileSystemHandle": "readonly",
"FileSystemFileHandle": "readonly",
"FileSystemDirectoryHandle": "readonly",
"FileSystemWritableFileStream": "readonly",
"ClipboardItem": "readonly",
"queryLocalFonts": "readonly"
},
"env": {
"es2022": true,
"browser": true,
"worker": true,
"node": true,
"serviceworker": true
},
"extends": ["plugin:svelte/recommended", "standard"],
"plugins": ["svelte"],
"parser": "@typescript-eslint/parser",
"ignorePatterns": ["*.min.*", "*.build.*"],
"overrides": [
{
"files": ["*.svelte"],
"parser": "svelte-eslint-parser",
"rules": {
"svelte/indent": ["error", { "indent": 2, "indentScript": true }],
"no-self-assign": 0,
"a11y-media-has-caption": 0,
"no-use-before-define": 0,
"svelte/html-self-closing": ["error", "always"],
"svelte/html-closing-bracket-spacing": ["error", { "startTag": "never", "endTag": "never", "selfClosingTag": "always" }],
"svelte/html-quotes": [
"error",
{
"prefer": "single",
"dynamic": { "quoted": false, "avoidInvalidUnquotedInHTML": false }
}
],
"svelte/shorthand-attribute": ["warn", { "prefer": "always" }],
"svelte/spaced-html-comment": ["error", "always"]
}
}
]
}

View file

@ -1,6 +1,6 @@
{
"name": "Miru",
"version": "4.1.0",
"version": "4.1.1",
"author": "ThaUnknown_ <ThaUnknown@users.noreply.github.com>",
"description": "Stream anime torrents, real-time with no waiting for downloads.",
"main": "build/main.js",
@ -14,6 +14,7 @@
"publish": "npm run web:build && electron-builder -p always"
},
"devDependencies": {
"@typescript-eslint/parser": "^5.60.0",
"anitomyscript": "github:ThaUnknown/anitomyscript#42290c4b3f256893be08a4e89051f448ff5e9d00",
"bottleneck": "^2.19.5",
"browser-event-target-emitter": "^1.0.0",
@ -27,7 +28,11 @@
"electron-log": "^4.4.8",
"electron-notarize": "^1.2.2",
"electron-updater": "^4.6.5",
"eslint": "^8.43.0",
"eslint-config-standard": "^17.1.0",
"eslint-plugin-svelte": "^2.31.1",
"html-webpack-plugin": "^5.5.1",
"install": "^0.13.0",
"jassub": "1.7.1",
"js-levenshtein": "^1.1.6",
"matroska-subtitles": "github:ThaUnknown/matroska-subtitles#446d0628ff0bcf13eb95184777615f3a0e6d8ae8",
@ -37,6 +42,7 @@
"quartermoon": "^1.2.3",
"simple-store-svelte": "^1.0.0",
"svelte": "^3.59.1",
"svelte-eslint-parser": "^0.31.0",
"svelte-keybinds": "1.0.5",
"svelte-loader": "^3.1.8",
"svelte-miniplayer": "1.0.3",

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
import { add } from './torrent.js'
import { DOMPARSER, PromiseBatch } from './util.js'
import { DOMPARSER, PromiseBatch, binarySearch } from './util.js'
import { alRequest, alSearch } from './anilist.js'
import anitomyscript from 'anitomyscript'
import { media } from '../views/Player/MediaHandler.svelte'
@ -391,3 +391,23 @@ export async function getEpisodeMetadataForMedia (media) {
episodeMetadataMap[media.id] = episodes
return episodes
}
let seadex = []
requestIdleCallback(async () => {
const res = await fetch('https://sneedex.moe/api/public/nyaa')
const json = await res.json()
seadex = json.flatMap(({ nyaaIDs }) => nyaaIDs).sort((a, b) => a - b) // sort for binary search
})
export function mapBestRelease (entries) {
return entries.map(entry => {
if (entry.id) {
if (entry.id === '?') return entry
if (binarySearch(seadex, entry.id)) entry.best = true
return entry
}
const match = entry.link.match(/\d+/i)
if (match && binarySearch(seadex, Number(match[0]))) entry.best = true
return entry
})
}

View file

@ -0,0 +1,148 @@
import { alRequest } from '@/modules/anilist.js'
import { set } from '@/views/Settings.svelte'
import { findEdge, resolveSeason, getMediaMaxEp } from './anime.js'
import { exclusions, getRSSContent, parseRSSNodes } from '../rss.js'
import { mapBestRelease } from '../anime.js'
export default async function getRSSEntries ({ media, episode, mode, ignoreQuality }) {
// mode cuts down on the amt of queries made 'check' || 'batch'
const titles = createTitle(media).join(')|(')
const prequel = findEdge(media, 'PREQUEL')?.node
const sequel = findEdge(media, 'SEQUEL')?.node
const isBatch = media.status === 'FINISHED' && media.episodes !== 1
// if media has multiple seasons, and this S is > 1, then get the absolute episode number of the episode
const absolute = prequel && !mode && (await resolveSeason({ media, episode, force: true }))
const absoluteep = absolute?.offset + episode
const episodes = [episode]
// only use absolute episode number if its smaller than max episodes this series has, ex:
// looking for E1 of S2, S1 has 12 ep and S2 has 13, absolute will be 13
// so this would find the 13th ep of the 2nd season too if this check wasnt here
if (absolute && absoluteep < (getMediaMaxEp(media) || episode)) {
episodes.push(absoluteep)
}
let ep = ''
if (media.episodes !== 1 && mode !== 'batch') {
if (isBatch) {
const digits = Math.max(2, Math.log(media.episodes) * Math.LOG10E + 1 | 0)
ep = `"${zeropad(1, digits)}-${zeropad(media.episodes, digits)}"|"${zeropad(1, digits)}~${zeropad(media.episodes, digits)}"|"Batch"|"Complete"|"${zeropad(episode)}+"|"${zeropad(episode)}v"`
} else {
ep = `(${episodes.map(epstring).join('|')})`
}
}
const excl = exclusions.join('|')
const quality = (!ignoreQuality && (`"${set.rssQuality}"` || '"1080"')) || ''
const url = new URL(`${set.catURL}/?page=rss&c=1_2&f=0&s=seeders&o=desc&q=(${titles})${ep}${quality}-(${excl})`)
let nodes = [...(await getRSSContent(url)).querySelectorAll('item')]
if (absolute) {
// if this is S > 1 aka absolute ep number exists get entries for S1title + absoluteEP
// the reason this isnt done with recursion like sequelEntries is because that would include the S1 media dates
// we want the dates of the target media as the S1 title might be used for SX releases
const titles = createTitle(absolute.media).join(')|(')
const url = new URL(`${set.catURL}/?page=rss&c=1_2&f=0&s=seeders&o=desc&q=(${titles})${epstring(absoluteep)}${quality}-(${excl})`)
nodes = [...nodes, ...(await getRSSContent(url)).querySelectorAll('item')]
}
let entries = parseRSSNodes(nodes)
const checkSequelDate = media.status === 'FINISHED' && (sequel?.status === 'FINISHED' || sequel?.status === 'RELEASING') && sequel.startDate
const sequelStartDate = checkSequelDate && new Date(Object.values(checkSequelDate).join(' '))
// recursive, get all entries for media sequel, and its sequel, and its sequel
const sequelEntries =
(sequel?.status === 'FINISHED' || sequel?.status === 'RELEASING') &&
(await getRSSEntries({ media: (await alRequest({ method: 'SearchIDSingle', id: sequel.id })).data.Media, episode, mode: mode || 'check' }))
const checkPrequelDate = (media.status === 'FINISHED' || media.status === 'RELEASING') && prequel?.status === 'FINISHED' && prequel?.endDate
const prequelEndDate = checkPrequelDate && new Date(Object.values(checkPrequelDate).join(' '))
// 1 month in MS, a bit of jitter for pre-releases and releasers being late as fuck, lets hope it doesnt cause issues
const month = 2674848460
if (prequelEndDate) {
entries = entries.filter(entry => entry.date > new Date(+prequelEndDate + month))
}
if (sequelStartDate && media.format === 'TV') {
entries = entries.filter(entry => entry.date < new Date(+sequelStartDate - month))
}
if (sequelEntries?.length) {
if (mode === 'check') {
entries = [...entries, ...sequelEntries]
} else {
entries = entries.filter(entry => !sequelEntries.find(sequel => sequel.link === entry.link))
}
}
// this gets entries without any episode limiting, and for batches
const batchEntries = !mode && isBatch && (await getRSSEntries({ media, episode, ignoreQuality, mode: 'batch' })).filter(entry => {
return !epNumRx.test(entry.title)
})
if (batchEntries?.length) {
entries = [...entries, ...batchEntries]
}
// some archaic shows only have shit DVD's in weird qualities, so try to look up without any quality restrictions when there are no results
if (!entries.length && !ignoreQuality && !mode) {
entries = await getRSSEntries({ media, episode, ignoreQuality: true })
}
// dedupe
const ids = entries.map(e => e.link)
return mapBestRelease(entries.filter(({ link }, index) => !ids.includes(link, index + 1)))
}
// padleft a variable with 0 ex: 1 => '01'
function zeropad (v = 1, l = 2) {
return (typeof v === 'string' ? v : v.toString()).padStart(l, '0')
}
const epstring = ep => `"E${zeropad(ep)}+"|"E${zeropad(ep)}v"|"+${zeropad(ep)}+"|"+${zeropad(ep)}v"`
// [EO]?[-EPD _—]\d{2}(?:[-v _.—]|$)
// /[EO]?[-EPD]\d{2}(?:[-v.]|$)|[EO]?[EPD ]\d{2}(?:[v .]|$)|[EO]?[EPD_]\d{2}(?:[v_.]|$)|[EO]?[EPD—]\d{2}(?:[v.—]|$)|\d{2} ?[-~—] ?\d{2}/i
// matches: OP01 ED01 EP01 E01 01v 01. -01- _01_ with spaces and stuff
const epNumRx = /[EO]?[-EPD]\d{2}(?:[-v.]|$)|[EO]?[EPD ]\d{2}(?:[v .]|$)|[EO]?[EPD_]\d{2}(?:[v_.]|$)|[EO]?[EPD—]\d{2}(?:[v.—]|$)|\d{2} ?[-~—] ?\d{2}/i
// create an array of potentially valid titles from a given media
function createTitle (media) {
// group and de-duplicate
const grouped = [
...new Set(
Object.values(media.title)
.concat(media.synonyms)
.filter(name => name != null && name.length > 3)
)
]
const titles = []
const appendTitle = t => {
// replace & with encoded
const title = t.replace(/&/g, '%26').replace(/\?/g, '%3F').replace(/#/g, '%23')
titles.push(title)
// replace Season 2 with S2, else replace 2nd Season with S2, but keep the original title
const match1 = title.match(/(\d)(?:nd|rd|th) Season/i)
const match2 = title.match(/Season (\d)/i)
if (match2) {
titles.push(title.replace(/Season \d/i, `S${match2[1]}`))
} else if (match1) {
titles.push(title.replace(/(\d)(?:nd|rd|th) Season/i, `S${match1[1]}`))
}
}
for (const t of grouped) {
appendTitle(t)
if (t.includes('-')) appendTitle(t.replaceAll('-', ''))
}
return titles
}

View file

@ -0,0 +1,59 @@
import { mapBestRelease } from '../anime.js'
import { fastPrettyBytes } from '../util.js'
const toshoURL = decodeURIComponent(atob('aHR0cHM6Ly9mZWVkLmFuaW1ldG9zaG8ub3JnL2pzb24/'))
// TODO: exclude unsupported codecd, query resolution
export default async function ({ media, episode }) {
const mappings = await fetch('https://api.ani.zip/mappings?anilist_id=' + media.id)
const { episodes, mappings: map } = await mappings.json()
const { anidbEid } = episodes[Number(episode)]
const torrents = await fetch(toshoURL + 'eid=' + anidbEid)
const entries = (await torrents.json())
// look for batches
if (media.status === 'FINISHED' && media.episodes && media.episodes !== 1) {
const torrents = await fetch(toshoURL + 'aid=' + map.anidb_id + '&order=size-d')
const batches = (await torrents.json()).filter(entry => {
return entry.num_files >= media.episodes
})
entries.push(...batches)
}
const mapped = mapTosho2dDeDupedEntry(entries)
return mapBestRelease(mapped)
}
function mapTosho2dDeDupedEntry (entries) {
const deduped = {}
for (const entry of entries) {
if (deduped[entry.info_hash]) {
const dupe = deduped[entry.info_hash]
dupe.title ??= entry.torrent_name || entry.title
dupe.id ||= entry.nyaa_id
dupe.seeders ||= entry.seeders ?? 0
dupe.leechers ||= entry.leechers ?? 0
dupe.size ||= entry.total_size && fastPrettyBytes(entry.total_size)
dupe.date ||= entry.timestamp && new Date(entry.timestamp * 1000)
} else {
deduped[entry.info_hash] = {
title: entry.torrent_name || entry.title,
link: entry.magnet_uri,
id: entry.nyaa_id,
seeders: entry.seeders,
leechers: entry.leechers,
size: entry.total_size && fastPrettyBytes(entry.total_size),
date: entry.timestamp && new Date(entry.timestamp * 1000)
}
}
}
return Object.values(deduped)
}

View file

@ -1,45 +1,10 @@
import { DOMPARSER } from '@/modules/util.js'
import { set } from '@/views/Settings.svelte'
import { addToast } from '@/components/Toasts.svelte'
import { alRequest } from '@/modules/anilist.js'
import { add } from '@/modules/torrent.js'
import { findEdge, resolveSeason, getMediaMaxEp, resolveFileMedia, getEpisodeMetadataForMedia } from './anime.js'
import { resolveFileMedia, getEpisodeMetadataForMedia } from './anime.js'
function binarySearch (arr, el) {
let left = 0
let right = arr.length - 1
while (left <= right) {
// Using bitwise or instead of Math.floor as it is slightly faster
const mid = ((right + left) / 2) | 0
if (arr[mid] === el) {
return true
} else if (el < arr[mid]) {
right = mid - 1
} else {
left = mid + 1
}
}
return false
}
let seadex = []
requestIdleCallback(async () => {
const res = await fetch('https://sneedex.moe/api/public/nyaa')
const json = await res.json()
seadex = json.flatMap(({ nyaaIDs }) => nyaaIDs).sort((a, b) => a - b) // sort for binary search
})
function mapBestRelease (entries) {
return entries.map(entry => {
const match = entry.link.match(/\d+/i)
if (match && binarySearch(seadex, Number(match[0]))) entry.best = true
return entry
})
}
const exclusions = ['DTS']
export const exclusions = ['DTS']
const isDev = location.hostname === 'localhost'
const video = document.createElement('video')
@ -92,148 +57,6 @@ export async function getRSSContent (url) {
return DOMPARSER(await res.text(), 'text/xml')
}
// padleft a variable with 0 ex: 1 => '01'
function zeropad (v = 1, l = 2) {
return (typeof v === 'string' ? v : v.toString()).padStart(l, '0')
}
const epstring = ep => `"E${zeropad(ep)}+"|"E${zeropad(ep)}v"|"+${zeropad(ep)}+"|"+${zeropad(ep)}v"`
// [EO]?[-EPD _—]\d{2}(?:[-v _.—]|$)
// /[EO]?[-EPD]\d{2}(?:[-v.]|$)|[EO]?[EPD ]\d{2}(?:[v .]|$)|[EO]?[EPD_]\d{2}(?:[v_.]|$)|[EO]?[EPD—]\d{2}(?:[v.—]|$)|\d{2} ?[-~—] ?\d{2}/i
// matches: OP01 ED01 EP01 E01 01v 01. -01- _01_ with spaces and stuff
const epNumRx = /[EO]?[-EPD]\d{2}(?:[-v.]|$)|[EO]?[EPD ]\d{2}(?:[v .]|$)|[EO]?[EPD_]\d{2}(?:[v_.]|$)|[EO]?[EPD—]\d{2}(?:[v.—]|$)|\d{2} ?[-~—] ?\d{2}/i
export async function getRSSEntries ({ media, episode, mode, ignoreQuality }) {
// mode cuts down on the amt of queries made 'check' || 'batch'
const titles = createTitle(media).join(')|(')
const prequel = findEdge(media, 'PREQUEL')?.node
const sequel = findEdge(media, 'SEQUEL')?.node
const isBatch = media.status === 'FINISHED' && media.episodes !== 1
// if media has multiple seasons, and this S is > 1, then get the absolute episode number of the episode
const absolute = prequel && !mode && (await resolveSeason({ media, episode, force: true }))
const absoluteep = absolute?.offset + episode
const episodes = [episode]
// only use absolute episode number if its smaller than max episodes this series has, ex:
// looking for E1 of S2, S1 has 12 ep and S2 has 13, absolute will be 13
// so this would find the 13th ep of the 2nd season too if this check wasnt here
if (absolute && absoluteep < (getMediaMaxEp(media) || episode)) {
episodes.push(absoluteep)
}
let ep = ''
if (media.episodes !== 1 && mode !== 'batch') {
if (isBatch) {
const digits = Math.max(2, Math.log(media.episodes) * Math.LOG10E + 1 | 0)
ep = `"${zeropad(1, digits)}-${zeropad(media.episodes, digits)}"|"${zeropad(1, digits)}~${zeropad(media.episodes, digits)}"|"Batch"|"Complete"|"${zeropad(episode)}+"|"${zeropad(episode)}v"`
} else {
ep = `(${episodes.map(epstring).join('|')})`
}
}
const excl = exclusions.join('|')
const quality = (!ignoreQuality && (`"${set.rssQuality}"` || '"1080"')) || ''
const url = new URL(`${set.catURL}/?page=rss&c=1_2&f=0&s=seeders&o=desc&q=(${titles})${ep}${quality}-(${excl})`)
let nodes = [...(await getRSSContent(url)).querySelectorAll('item')]
if (absolute) {
// if this is S > 1 aka absolute ep number exists get entries for S1title + absoluteEP
// the reason this isnt done with recursion like sequelEntries is because that would include the S1 media dates
// we want the dates of the target media as the S1 title might be used for SX releases
const titles = createTitle(absolute.media).join(')|(')
const url = new URL(`${set.catURL}/?page=rss&c=1_2&f=0&s=seeders&o=desc&q=(${titles})${epstring(absoluteep)}${quality}-(${excl})`)
nodes = [...nodes, ...(await getRSSContent(url)).querySelectorAll('item')]
}
let entries = parseRSSNodes(nodes)
const checkSequelDate = media.status === 'FINISHED' && (sequel?.status === 'FINISHED' || sequel?.status === 'RELEASING') && sequel.startDate
const sequelStartDate = checkSequelDate && new Date(Object.values(checkSequelDate).join(' '))
// recursive, get all entries for media sequel, and its sequel, and its sequel
const sequelEntries =
(sequel?.status === 'FINISHED' || sequel?.status === 'RELEASING') &&
(await getRSSEntries({ media: (await alRequest({ method: 'SearchIDSingle', id: sequel.id })).data.Media, episode, mode: mode || 'check' }))
const checkPrequelDate = (media.status === 'FINISHED' || media.status === 'RELEASING') && prequel?.status === 'FINISHED' && prequel?.endDate
const prequelEndDate = checkPrequelDate && new Date(Object.values(checkPrequelDate).join(' '))
// 1 month in MS, a bit of jitter for pre-releases and releasers being late as fuck, lets hope it doesnt cause issues
const month = 2674848460
if (prequelEndDate) {
entries = entries.filter(entry => entry.date > new Date(+prequelEndDate + month))
}
if (sequelStartDate && media.format === 'TV') {
entries = entries.filter(entry => entry.date < new Date(+sequelStartDate - month))
}
if (sequelEntries?.length) {
if (mode === 'check') {
entries = [...entries, ...sequelEntries]
} else {
entries = entries.filter(entry => !sequelEntries.find(sequel => sequel.link === entry.link))
}
}
// this gets entries without any episode limiting, and for batches
const batchEntries = !mode && isBatch && (await getRSSEntries({ media, episode, ignoreQuality, mode: 'batch' })).filter(entry => {
return !epNumRx.test(entry.title)
})
if (batchEntries?.length) {
entries = [...entries, ...batchEntries]
}
// some archaic shows only have shit DVD's in weird qualities, so try to look up without any quality restrictions when there are no results
if (!entries.length && !ignoreQuality && !mode) {
entries = await getRSSEntries({ media, episode, ignoreQuality: true })
}
// dedupe
const ids = entries.map(e => e.link)
return mapBestRelease(entries.filter(({ link }, index) => !ids.includes(link, index + 1)))
}
// create an array of potentially valid titles from a given media
function createTitle (media) {
// group and de-duplicate
const grouped = [
...new Set(
Object.values(media.title)
.concat(media.synonyms)
.filter(name => name != null && name.length > 3)
)
]
const titles = []
const appendTitle = t => {
// replace & with encoded
const title = t.replace(/&/g, '%26').replace(/\?/g, '%3F').replace(/#/g, '%23')
titles.push(title)
// replace Season 2 with S2, else replace 2nd Season with S2, but keep the original title
const match1 = title.match(/(\d)(?:nd|rd|th) Season/i)
const match2 = title.match(/Season (\d)/i)
if (match2) {
titles.push(title.replace(/Season \d/i, `S${match2[1]}`))
} else if (match1) {
titles.push(title.replace(/(\d)(?:nd|rd|th) Season/i, `S${match1[1]}`))
}
}
for (const t of grouped) {
appendTitle(t)
if (t.includes('-')) appendTitle(t.replaceAll('-', ''))
}
return titles
}
class RSSMediaManager {
constructor () {
this.resultMap = {}

View file

@ -112,3 +112,22 @@ export function wrapEnter (fn) {
if (key === 'Enter') fn()
}
}
export function binarySearch (arr, el) {
let left = 0
let right = arr.length - 1
while (left <= right) {
// Using bitwise or instead of Math.floor as it is slightly faster
const mid = ((right + left) / 2) | 0
if (arr[mid] === el) {
return true
} else if (el < arr[mid]) {
right = mid - 1
} else {
left = mid + 1
}
}
return false
}

View file

@ -3,7 +3,7 @@
import { set } from './Settings.svelte'
import { addToast } from '../components/Toasts.svelte'
import { findInCurrent } from './Player/MediaHandler.svelte'
import { getRSSEntries } from '@/modules/rss.js'
import getRSSEntries from '@/modules/providers/tosho.js'
import { writable } from 'svelte/store'
@ -96,9 +96,9 @@
<th>{index + 1}</th>
<td>{row.title}</td>
<td>{row.size}</td>
<td>{row.seeders}</td>
<td>{row.leechers}</td>
<td>{row.downloads}</td>
<td>{row.seeders ?? '?'}</td>
<td>{row.leechers ?? '?'}</td>
<td>{row.downloads ?? '?'}</td>
<td>{since(row.date)}</td>
<td class='material-symbols-outlined font-size-20'>play_arrow</td>
</tr>