diff --git a/common/App.svelte b/common/App.svelte
index cd34e06..b27c7d6 100644
--- a/common/App.svelte
+++ b/common/App.svelte
@@ -20,7 +20,7 @@
import Sidebar from './components/Sidebar.svelte'
import Router from './Router.svelte'
import ViewAnime from './views/ViewAnime/ViewAnime.svelte'
- import RSSView from './views/RSSView.svelte'
+ import TorrentModal from './views/TorrentSearch/TorrentModal.svelte'
import Menubar from './components/Menubar.svelte'
import IspBlock from './views/IspBlock.svelte'
import { Toaster } from 'svelte-sonner'
@@ -38,7 +38,7 @@
diff --git a/common/modules/anime.js b/common/modules/anime.js
index 70e9423..57e299f 100644
--- a/common/modules/anime.js
+++ b/common/modules/anime.js
@@ -8,7 +8,7 @@ import clipboard from './clipboard.js'
import { search, key } from '@/views/Search.svelte'
-import { playAnime } from '../views/RSSView.svelte'
+import { playAnime } from '@/views/TorrentSearch/TorrentModal.svelte'
const imageRx = /\.(jpeg|jpg|gif|png|webp)/i
diff --git a/common/modules/extensions/index.js b/common/modules/extensions/index.js
new file mode 100644
index 0000000..5982b39
--- /dev/null
+++ b/common/modules/extensions/index.js
@@ -0,0 +1,197 @@
+import { settings } from '@/modules/settings.js'
+import { exclusions } from '../rss.js'
+import { sleep } from '../util.js'
+import { anilistClient } from '../anilist.js'
+import { anitomyscript } from '../anime.js'
+import { client } from '@/modules/torrent.js'
+import { extensionsWorker } from '@/views/Settings/TorrentSettings.svelte'
+
+/** @typedef {import('@thaunknown/ani-resourced/sources/types.d.ts').Options} Options */
+/** @typedef {import('@thaunknown/ani-resourced/sources/types.d.ts').Result} Result */
+
+/**
+ * @param {{media: import('../al.js').Media, episode?: number, batch: boolean, movie: boolean, resolution: string}} opts
+ * @returns {Promise<(Result & { parseObject: import('anitomyscript').AnitomyResult })[]>}
+ * **/
+export default async function getResultsFromExtensions ({ media, episode, batch, movie, resolution }) {
+ const aniDBMeta = await ALToAniDB(media)
+ const anidbAid = aniDBMeta?.mappings?.anidb_id
+ const anidbEid = anidbAid && (await ALtoAniDBEpisode({ media, episode }, aniDBMeta))?.anidbEid
+
+ const worker = await /** @type {ReturnType
} */(extensionsWorker)
+
+ /** @type {Options} */
+ const options = {
+ anilistId: media.id,
+ episodeCount: media.episodes,
+ episode,
+ anidbAid,
+ anidbEid,
+ titles: createTitles(media),
+ resolution,
+ exclusions
+ }
+
+ const results = await worker.query(options, { movie, batch }, settings.value.sources)
+
+ const deduped = dedupe(results)
+
+ if (!deduped?.length) throw new Error('No results found')
+
+ const parseObjects = await anitomyscript(deduped.map(({ title }) => title))
+ // @ts-ignore
+ for (const i in parseObjects) deduped[i].parseObject = parseObjects[i]
+
+ return updatePeerCounts(deduped)
+}
+
+async function updatePeerCounts (entries) {
+ const id = crypto.randomUUID()
+
+ const updated = await Promise.race([
+ new Promise(resolve => {
+ function check ({ detail }) {
+ if (detail.id !== id) return
+ client.removeListener('scrape', check)
+ resolve(detail.result)
+ }
+ client.on('scrape', check)
+ client.send('scrape', { id, infoHashes: entries.map(({ hash }) => hash) })
+ }),
+ sleep(5000)
+ ])
+
+ for (const { hash, complete, downloaded, incomplete } of updated || []) {
+ const found = entries.find(mapped => mapped.hash === hash)
+ found.downloads = downloaded
+ found.leechers = incomplete
+ found.seeders = complete
+ }
+ return entries
+}
+
+/** @param {import('../al.js').Media} media */
+async function ALToAniDB (media) {
+ const mappingsResponse = await fetch('https://api.ani.zip/mappings?anilist_id=' + media.id)
+ const json = await mappingsResponse.json()
+ if (json.mappings?.anidb_id) return json
+
+ const parentID = getParentForSpecial(media)
+ if (!parentID) return
+
+ const parentResponse = await fetch('https://api.ani.zip/mappings?anilist_id=' + parentID)
+ return parentResponse.json()
+}
+
+/** @param {import('../al.js').Media} media */
+function getParentForSpecial (media) {
+ if (!['SPECIAL', 'OVA', 'ONA'].some(format => media.format === format)) return false
+ const animeRelations = media.relations.edges.filter(({ node }) => node.type === 'ANIME')
+
+ return getRelation(animeRelations, 'PARENT') || getRelation(animeRelations, 'PREQUEL') || getRelation(animeRelations, 'SEQUEL')
+}
+
+function getRelation (list, type) {
+ return list.find(({ relationType }) => relationType === type)?.node.id
+}
+
+// TODO: https://anilist.co/anime/13055/
+/**
+ * @param {{media: import('../al.js').Media, episode: number}} param0
+ * @param {{episodes: any, episodeCount: number, specialCount: number}} param1
+ * */
+async function ALtoAniDBEpisode ({ media, episode }, { episodes, episodeCount, specialCount }) {
+ if (!episode || !Object.values(episodes).length) return
+ // if media has no specials or their episode counts don't match
+ if (!specialCount || (media.episodes && media.episodes === episodeCount && episodes[Number(episode)])) return episodes[Number(episode)]
+ const res = await anilistClient.episodeDate({ id: media.id, ep: episode })
+ // TODO: if media only has one episode, and airdate doesn't exist use start/release/end dates
+ const alDate = new Date((res.data.AiringSchedule?.airingAt || 0) * 1000)
+
+ return episodeByAirDate(alDate, episodes, episode)
+}
+
+/**
+ * @param {Date} alDate
+ * @param {any} episodes
+ * @param {number} episode
+ **/
+export function episodeByAirDate (alDate, episodes, episode) {
+ if (!+alDate) return episodes[Number(episode)] || episodes[1] // what the fuck, are you braindead anilist?, the source episode number to play is from an array created from AL ep count, so how come it's missing?
+ // 1 is key for episod 1, not index
+
+ // find closest episodes by air date, multiple episodes can have the same air date distance
+ // ineffcient but reliable
+ const closestEpisodes = Object.values(episodes).reduce((prev, curr) => {
+ if (!prev[0]) return [curr]
+ const prevDate = Math.abs(+new Date(prev[0]?.airdate) - +alDate)
+ const currDate = Math.abs(+new Date(curr.airdate) - +alDate)
+ if (prevDate === currDate) {
+ prev.push(curr)
+ return prev
+ }
+ if (currDate < prevDate) return [curr]
+ return prev
+ }, [])
+
+ return closestEpisodes.reduce((prev, curr) => {
+ return Math.abs(curr.episodeNumber - episode) < Math.abs(prev.episodeNumber - episode) ? curr : prev
+ })
+}
+
+/** @param {import('../al.js').Media} media */
+function createTitles (media) {
+ // group and de-duplicate
+ const grouped = [...new Set(
+ Object.values(media.title)
+ .concat(media.synonyms)
+ .filter(name => name != null && name.length > 3)
+ )]
+ const titles = []
+ /** @param {string} title */
+ const appendTitle = title => {
+ // replace & with encoded
+ // title = title.replace(/&/g, '%26').replace(/\?/g, '%3F').replace(/#/g, '%23')
+ titles.push(title)
+
+ // replace Season 2 with S2, else replace 2nd Season with S2, but keep the original title
+ const match1 = title.match(/(\d)(?:nd|rd|th) Season/i)
+ const match2 = title.match(/Season (\d)/i)
+
+ if (match2) {
+ titles.push(title.replace(/Season \d/i, `S${match2[1]}`))
+ } else if (match1) {
+ titles.push(title.replace(/(\d)(?:nd|rd|th) Season/i, `S${match1[1]}`))
+ }
+ }
+ for (const t of grouped) {
+ appendTitle(t)
+ if (t.includes('-')) appendTitle(t.replaceAll('-', ''))
+ }
+ return titles
+}
+
+/** @param {Result[]} entries */
+function dedupe (entries) {
+ /** @type {Record} */
+ const deduped = {}
+ for (const entry of entries) {
+ if (deduped[entry.hash]) {
+ const dupe = deduped[entry.hash]
+ dupe.title ??= entry.title
+ dupe.link ??= entry.link
+ dupe.id ||= entry.id
+ dupe.seeders ||= entry.seeders >= 30000 ? 0 : entry.seeders
+ dupe.leechers ||= entry.leechers >= 30000 ? 0 : entry.leechers
+ dupe.downloads ||= entry.downloads
+ dupe.size ||= entry.size
+ dupe.verified ||= entry.verified
+ dupe.date ||= entry.date
+ dupe.type ??= entry.type
+ } else {
+ deduped[entry.hash] = entry
+ }
+ }
+
+ return Object.values(deduped)
+}
diff --git a/common/modules/extensions/worker.js b/common/modules/extensions/worker.js
new file mode 100644
index 0000000..4c94e4f
--- /dev/null
+++ b/common/modules/extensions/worker.js
@@ -0,0 +1,53 @@
+import { expose, proxy } from 'comlink'
+
+/** @typedef {import('@thaunknown/ani-resourced/sources/types.d.ts').Options} Options */
+/** @typedef {import('@thaunknown/ani-resourced/sources/types.d.ts').Result} Result */
+/** @typedef {import('@thaunknown/ani-resourced/sources/abstract.js').default} AbstractSource */
+
+class Extensions {
+ sources
+ metadata
+ /** @param {AbstractSource[]} sources */
+ constructor (sources) {
+ this.sources = sources
+ this.metadata = sources.map(({ accuracy, name, description, config }) => ({ accuracy, name, description, config }))
+ }
+
+ /**
+ * @param {Options} options
+ * @param {{ movie: boolean, batch: boolean }} param1
+ * @param {Record} sources
+ */
+ async query (options, { movie, batch }, sources) {
+ /** @type {Promise[]} */
+ const promises = []
+ for (const source of Object.values(this.sources)) {
+ if (!sources[source.name]) continue
+ if (movie) promises.push(source.movie(options))
+ if (batch) promises.push(source.batch(options))
+ promises.push(source.single(options))
+ }
+ /** @type {Result[]} */
+ const results = []
+ for (const result of await Promise.allSettled(promises)) {
+ if (result.status === 'fulfilled') results.push(...result.value)
+ }
+ return results.flat()
+ }
+}
+
+/** @param {string[]} extensions */
+export async function loadExtensions (extensions) {
+ // TODO: handle import errors
+ const sources = (await Promise.all(extensions.map(async extension => {
+ try {
+ if (!extension.startsWith('http')) extension = `https://esm.sh/${extension}`
+ return Object.values(await import(/* webpackIgnore: true */extension))
+ } catch (error) {
+ return []
+ }
+ }))).flat()
+ return proxy(new Extensions(sources))
+}
+
+expose(loadExtensions)
diff --git a/common/modules/providers/cat.js b/common/modules/providers/cat.js
deleted file mode 100644
index 3a1e03a..0000000
--- a/common/modules/providers/cat.js
+++ /dev/null
@@ -1,147 +0,0 @@
-import { anilistClient } from '@/modules/anilist.js'
-import { settings } from '@/modules/settings.js'
-import { findEdge, resolveSeason, getMediaMaxEp, mapBestRelease } from '../anime.js'
-import { exclusions, getRSSContent, parseRSSNodes } from '../rss.js'
-
-export default async function getRSSEntries ({ media, episode, mode, ignoreQuality }) {
- // mode cuts down on the amt of queries made 'check' || 'batch'
- const titles = createTitle(media).join(')|(')
-
- const prequel = findEdge(media, 'PREQUEL')?.node
- const sequel = findEdge(media, 'SEQUEL')?.node
- const isBatch = media.status === 'FINISHED' && media.episodes !== 1
-
- // if media has multiple seasons, and this S is > 1, then get the absolute episode number of the episode
- const absolute = prequel && !mode && (await resolveSeason({ media, episode, force: true }))
- const absoluteep = absolute?.offset + episode
- const episodes = [episode]
-
- // only use absolute episode number if its smaller than max episodes this series has, ex:
- // looking for E1 of S2, S1 has 12 ep and S2 has 13, absolute will be 13
- // so this would find the 13th ep of the 2nd season too if this check wasnt here
- if (absolute && absoluteep < (getMediaMaxEp(media) || episode)) {
- episodes.push(absoluteep)
- }
-
- let ep = ''
- if (media.episodes !== 1 && mode !== 'batch') {
- if (isBatch) {
- const digits = Math.max(2, Math.log(media.episodes) * Math.LOG10E + 1 | 0)
- ep = `"${zeropad(1, digits)}-${zeropad(media.episodes, digits)}"|"${zeropad(1, digits)}~${zeropad(media.episodes, digits)}"|"Batch"|"Complete"|"${zeropad(episode)}+"|"${zeropad(episode)}v"`
- } else {
- ep = `(${episodes.map(epstring).join('|')})`
- }
- }
-
- const excl = exclusions.join('|')
- const quality = (!ignoreQuality && (`"${settings.value.rssQuality}"` || '"1080"')) || ''
- const url = new URL(`${settings.value.catURL}/?page=rss&c=1_2&f=0&s=seeders&o=desc&q=(${titles})${ep}${quality}-(${excl})`)
-
- let nodes = [...(await getRSSContent(url)).querySelectorAll('item')]
-
- if (absolute) {
- // if this is S > 1 aka absolute ep number exists get entries for S1title + absoluteEP
- // the reason this isnt done with recursion like sequelEntries is because that would include the S1 media dates
- // we want the dates of the target media as the S1 title might be used for SX releases
- const titles = createTitle(absolute.media).join(')|(')
-
- const url = new URL(`${settings.value.catURL}/?page=rss&c=1_2&f=0&s=seeders&o=desc&q=(${titles})${epstring(absoluteep)}${quality}-(${excl})`)
- nodes = [...nodes, ...(await getRSSContent(url)).querySelectorAll('item')]
- }
-
- let entries = parseRSSNodes(nodes)
-
- const checkSequelDate = media.status === 'FINISHED' && (sequel?.status === 'FINISHED' || sequel?.status === 'RELEASING') && sequel.startDate
-
- const sequelStartDate = checkSequelDate && new Date(Object.values(checkSequelDate).join(' '))
-
- // recursive, get all entries for media sequel, and its sequel, and its sequel
- const sequelEntries =
- (sequel?.status === 'FINISHED' || sequel?.status === 'RELEASING') &&
- (await getRSSEntries({ media: (await anilistClient.searchIDSingle({ id: sequel.id })).data.Media, episode, mode: mode || 'check' }))
-
- const checkPrequelDate = (media.status === 'FINISHED' || media.status === 'RELEASING') && prequel?.status === 'FINISHED' && prequel?.endDate
-
- const prequelEndDate = checkPrequelDate && new Date(Object.values(checkPrequelDate).join(' '))
-
- // 1 month in MS, a bit of jitter for pre-releases and releasers being late as fuck, lets hope it doesnt cause issues
- const month = 2674848460
-
- if (prequelEndDate) {
- entries = entries.filter(entry => entry.date > new Date(+prequelEndDate + month))
- }
-
- if (sequelStartDate && media.format === 'TV') {
- entries = entries.filter(entry => entry.date < new Date(+sequelStartDate - month))
- }
-
- if (sequelEntries?.length) {
- if (mode === 'check') {
- entries = [...entries, ...sequelEntries]
- } else {
- entries = entries.filter(entry => !sequelEntries.find(sequel => sequel.link === entry.link))
- }
- }
-
- // this gets entries without any episode limiting, and for batches
- const batchEntries = !mode && isBatch && (await getRSSEntries({ media, episode, ignoreQuality, mode: 'batch' })).filter(entry => {
- return !epNumRx.test(entry.title)
- })
-
- if (batchEntries?.length) {
- entries = [...entries, ...batchEntries]
- }
-
- // some archaic shows only have shit DVD's in weird qualities, so try to look up without any quality restrictions when there are no results
- if (!entries.length && !ignoreQuality && !mode) {
- entries = await getRSSEntries({ media, episode, ignoreQuality: true })
- }
-
- // dedupe
- const ids = entries.map(e => e.link)
- return mapBestRelease(entries.filter(({ link }, index) => !ids.includes(link, index + 1)))
-}
-
-// padleft a variable with 0 ex: 1 => '01'
-function zeropad (v = 1, l = 2) {
- return (typeof v === 'string' ? v : v.toString()).padStart(l, '0')
-}
-
-const epstring = ep => `"E${zeropad(ep)}+"|"E${zeropad(ep)}v"|"+${zeropad(ep)}+"|"+${zeropad(ep)}v"`
-// [EO]?[-EPD _—]\d{2}(?:[-v _.—]|$)
-// /[EO]?[-EPD]\d{2}(?:[-v.]|$)|[EO]?[EPD ]\d{2}(?:[v .]|$)|[EO]?[EPD_]\d{2}(?:[v_.]|$)|[EO]?[EPD—]\d{2}(?:[v.—]|$)|\d{2} ?[-~—] ?\d{2}/i
-// matches: OP01 ED01 EP01 E01 01v 01. -01- _01_ with spaces and stuff
-const epNumRx = /[EO]?[-EPD]\d{2}(?:[-v.]|$)|[EO]?[EPD ]\d{2}(?:[v .]|$)|[EO]?[EPD_]\d{2}(?:[v_.]|$)|[EO]?[EPD—]\d{2}(?:[v.—]|$)|\d{2} ?[-~—] ?\d{2}/i
-
-// create an array of potentially valid titles from a given media
-function createTitle (media) {
- // group and de-duplicate
- const grouped = [
- ...new Set(
- Object.values(media.title)
- .concat(media.synonyms)
- .filter(name => name != null && name.length > 3)
- )
- ]
- const titles = []
- const appendTitle = t => {
- // replace & with encoded
- const title = t.replace(/&/g, '%26').replace(/\?/g, '%3F').replace(/#/g, '%23')
- titles.push(title)
-
- // replace Season 2 with S2, else replace 2nd Season with S2, but keep the original title
- const match1 = title.match(/(\d)(?:nd|rd|th) Season/i)
- const match2 = title.match(/Season (\d)/i)
-
- if (match2) {
- titles.push(title.replace(/Season \d/i, `S${match2[1]}`))
- } else if (match1) {
- titles.push(title.replace(/(\d)(?:nd|rd|th) Season/i, `S${match1[1]}`))
- }
- }
- for (const t of grouped) {
- appendTitle(t)
- if (t.includes('-')) appendTitle(t.replaceAll('-', ''))
- }
- return titles
-}
diff --git a/common/modules/providers/seadex.js b/common/modules/providers/seadex.js
deleted file mode 100644
index ac179c5..0000000
--- a/common/modules/providers/seadex.js
+++ /dev/null
@@ -1,24 +0,0 @@
-import { fastPrettyBytes } from '../util.js'
-
-export default async function (media) {
- const res = await fetch(`https://beta.releases.moe/api/collections/entries/records?page=1&perPage=1&filter=alID%3D%22${media.id}%22&skipTotal=1&expand=trs`)
- const { items } = await res.json()
-
- if (!items[0]?.expand?.trs?.length) return []
-
- const { trs } = items[0]?.expand
-
- return trs.filter(({ infoHash }) => infoHash !== '').map(torrent => {
- return {
- hash: torrent.infoHash,
- link: torrent.infoHash,
- title: `[${torrent.releaseGroup}] ${media.title.userPreferred}`,
- size: fastPrettyBytes(torrent.files.reduce((prev, curr) => prev + curr.length, 0)),
- type: torrent.isBest ? 'best' : 'alt',
- date: new Date(torrent.created),
- parseObject: {
- audio_term: [torrent.dualAudio ? 'DUALAUDIO' : '']
- }
- }
- })
-}
diff --git a/common/modules/providers/sneedex.js b/common/modules/providers/sneedex.js
deleted file mode 100644
index 4311e48..0000000
--- a/common/modules/providers/sneedex.js
+++ /dev/null
@@ -1,21 +0,0 @@
-import { binarySearch } from '../util.js'
-
-let seadex = []
-requestIdleCallback(async () => {
- const res = await fetch('https://sneedex.moe/api/public/nyaa')
- const json = await res.json()
- seadex = json.flatMap(({ nyaaIDs }) => nyaaIDs).sort((a, b) => a - b) // sort for binary search
-})
-
-export default function (entries) {
- return entries.map(entry => {
- if (entry.id) {
- if (entry.id === '?') return entry
- if (binarySearch(seadex, entry.id)) entry.type = 'alt'
- return entry
- }
- const match = entry.link.match(/\d+/i)
- if (match && binarySearch(seadex, Number(match[0]))) entry.type = 'alt'
- return entry
- })
-}
diff --git a/common/modules/providers/tosho.js b/common/modules/providers/tosho.js
deleted file mode 100644
index 0077969..0000000
--- a/common/modules/providers/tosho.js
+++ /dev/null
@@ -1,326 +0,0 @@
-import { anitomyscript } from '../anime.js'
-import { fastPrettyBytes, sleep } from '../util.js'
-import { exclusions } from '../rss.js'
-import { settings } from '@/modules/settings.js'
-import { anilistClient } from '../anilist.js'
-import { client } from '@/modules/torrent.js'
-import mapBestSneedexReleases from './sneedex.js'
-import getSeedexBests from './seadex.js'
-
-export default async function ({ media, episode }) {
- const json = await getAniDBFromAL(media)
- if (typeof json !== 'object') {
- const bests = await getSeedexBests(media)
- if (!bests.length) throw new Error(json || 'No mapping found.')
- return bests
- }
-
- const movie = isMovie(media) // don't query movies with qualities, to allow 4k
-
- const aniDBEpisode = await getAniDBEpisodeFromAL({ media, episode }, json)
- let entries = await getToshoEntriesForMedia(media, aniDBEpisode, json, !movie && settings.value.rssQuality)
- if (!entries.length && !movie) entries = await getToshoEntriesForMedia(media, aniDBEpisode, json)
- if (!entries?.length) throw new Error('No entries found.')
-
- const deduped = dedupeEntries(entries)
- const parseObjects = await anitomyscript(deduped.map(({ title }) => title))
- for (const i in parseObjects) deduped[i].parseObject = parseObjects[i]
-
- const withBests = dedupeEntries([...await getSeedexBests(media), ...mapBestSneedexReleases(deduped)])
-
- return updatePeerCounts(withBests)
-}
-
-async function updatePeerCounts (entries) {
- const id = crypto.randomUUID()
-
- const updated = await Promise.race([
- new Promise(resolve => {
- function check ({ detail }) {
- if (detail.id !== id) return
- client.removeListener('scrape', check)
- resolve(detail.result)
- console.log(detail)
- }
- client.on('scrape', check)
- client.send('scrape', { id, infoHashes: entries.map(({ hash }) => hash) })
- }),
- sleep(5000)
- ])
-
- for (const { hash, complete, downloaded, incomplete } of updated || []) {
- const found = entries.find(mapped => mapped.hash === hash)
- found.downloads = downloaded
- found.leechers = incomplete
- found.seeders = complete
- }
- return entries
-}
-
-async function getAniDBFromAL (media) {
- console.log('getting AniDB ID from AL')
- const mappingsResponse = await fetch('https://api.ani.zip/mappings?anilist_id=' + media.id)
- const json = await mappingsResponse.json()
- if (json.mappings?.anidb_id) return json
-
- console.log('failed getting AniDB ID, checking via parent')
-
- const parentID = getParentForSpecial(media)
- if (!parentID) return
-
- console.log('found via parent')
-
- const parentResponse = await fetch('https://api.ani.zip/mappings?anilist_id=' + parentID)
- return parentResponse.json()
-}
-
-function getParentForSpecial (media) {
- if (!['SPECIAL', 'OVA', 'ONA'].some(format => media.format === format)) return false
- const animeRelations = media.relations.edges.filter(({ node }) => node.type === 'ANIME')
-
- return getRelation(animeRelations, 'PARENT') || getRelation(animeRelations, 'PREQUEL') || getRelation(animeRelations, 'SEQUEL')
-}
-
-function getRelation (list, type) {
- return list.find(({ relationType }) => relationType === type)?.node.id
-}
-
-// TODO: https://anilist.co/anime/13055/
-async function getAniDBEpisodeFromAL ({ media, episode }, { episodes, episodeCount, specialCount }) {
- console.log('getting AniDB EpID for Mal EP', { episode, episodes })
- if (!episode || !Object.values(episodes).length) return
- // if media has no specials or their episode counts don't match
- if (!specialCount || (media.episodes && media.episodes === episodeCount && episodes[Number(episode)])) return episodes[Number(episode)]
- console.log('EP count doesn\'t match, checking by air date')
- const res = await anilistClient.episodeDate({ id: media.id, ep: episode })
- // TODO: if media only has one episode, and airdate doesn't exist use start/release/end dates
- const alDate = new Date((res.data.AiringSchedule?.airingAt || 0) * 1000)
-
- return getEpisodeNumberByAirDate(alDate, episodes, episode)
-}
-
-export function getEpisodeNumberByAirDate (alDate, episodes, episode) {
- if (!+alDate) return episodes[Number(episode)] || episodes[1] // what the fuck, are you braindead anilist?, the source episode number to play is from an array created from AL ep count, so how come it's missing?
- // 1 is key for episod 1, not index
-
- // find closest episodes by air date, multiple episodes can have the same air date distance
- // ineffcient but reliable
- const closestEpisodes = Object.values(episodes).reduce((prev, curr) => {
- if (!prev[0]) return [curr]
- const prevDate = Math.abs(+new Date(prev[0]?.airdate) - alDate)
- const currDate = Math.abs(+new Date(curr.airdate) - alDate)
- if (prevDate === currDate) {
- prev.push(curr)
- return prev
- }
- if (currDate < prevDate) return [curr]
- return prev
- }, [])
-
- console.log({ closestEpisodes })
-
- return closestEpisodes.reduce((prev, curr) => {
- return Math.abs(curr.episodeNumber - episode) < Math.abs(prev.episodeNumber - episode) ? curr : prev
- })
-}
-
-async function getToshoEntriesForMedia (media, episode, { mappings }, quality) {
- const promises = []
-
- if (episode) {
- const { anidbEid } = episode
-
- console.log('fetching episode', anidbEid, quality)
-
- promises.push(fetchSingleEpisodeForAnidb({ id: anidbEid, quality }))
- } else {
- // TODO: look for episodes via.... title?
- }
-
- // look for batches and movies
- const movie = isMovie(media)
- if (mappings.anidb_id && media.status === 'FINISHED' && (movie || media.episodes !== 1)) {
- promises.push(fetchBatchesForAnidb({ episodeCount: media.episodes, id: mappings.anidb_id, quality, movie }))
- console.log('fetching batch', quality, movie)
- if (!movie) {
- const courRelation = getSplitCourRelation(media)
- if (courRelation) {
- console.log('found split cour!')
- const episodeCount = (media.episodes || 0) + (courRelation.episodes || 0)
- try {
- const mappingsResponse = await fetch('https://api.ani.zip/mappings?anilist_id=' + courRelation.id)
- const json = await mappingsResponse.json()
- console.log('found mappings for split cour', !!json.mappings.anidb_id)
- if (json.mappings.anidb_id) promises.push(fetchBatchesForAnidb({ episodeCount, id: json.mappings.anidb_id, quality }))
- } catch (e) {
- console.error('failed getting split-cour data', e)
- }
- }
- }
- }
-
- return mapToshoEntries((await Promise.all(promises)).flat())
-}
-
-function getSplitCourRelation (media) {
- // Part 2 / Cour 3 / 4th Cour
- if (isTitleSplitCour(media)) return getCourPrequel(media)
-
- // Part 1 of split cour which usually doesn't get labeled as split cour
- // sequel can not exist
- return getCourSequel(media)
-}
-
-const courRegex = /[2-9](?:nd|rd|th) Cour|Cour [2-9]|Part [2-9]/i
-
-function isTitleSplitCour (media) {
- const titles = [...Object.values(media.title), ...media.synonyms]
-
- console.log('checking cour titles', titles)
-
- return titles.some(title => courRegex.test(title))
-}
-
-const seasons = ['WINTER', 'SPRING', 'SUMMER', 'FALL']
-const getDate = ({ seasonYear, season }) => +new Date(`${seasonYear}-${seasons.indexOf(season) * 4 || 1}-01`)
-
-function getMediaDate (media) {
- if (media.startDate) return +new Date(Object.values(media.startDate).join(' '))
- return getDate(media)
-}
-
-function getCourSequel (media) {
- const mediaDate = getMediaDate(media)
- const animeRelations = media.relations.edges.filter(({ node, relationType }) => {
- if (node.type !== 'ANIME') return false
- if (node.status !== 'FINISHED') return false
- if (relationType !== 'SEQUEL') return false
- if (!['OVA', 'TV'].some(format => node.format === format)) return false // not movies or ona's
- if (mediaDate > getMediaDate(node)) return false // node needs to be released after media to be a sequel
- return isTitleSplitCour(node)
- }).map(({ node }) => node)
-
- if (!animeRelations.length) return false
-
- // get closest sequel
- return animeRelations.reduce((prev, curr) => {
- return getMediaDate(prev) - mediaDate > getMediaDate(curr) - mediaDate ? curr : prev
- })
-}
-
-function getCourPrequel (media) {
- const mediaDate = getMediaDate(media)
- const animeRelations = media.relations.edges.filter(({ node, relationType }) => {
- if (node.type !== 'ANIME') return false
- if (node.status !== 'FINISHED') return false
- if (relationType !== 'PREQUEL') return false
- if (!['OVA', 'TV'].some(format => node.format === format)) return false
- if (mediaDate < getMediaDate(node)) return false // node needs to be released before media to be a prequel
- return true
- }).map(({ node }) => node)
-
- if (!animeRelations.length) {
- console.error('Detected split count but couldn\'t find prequel', media)
- return false
- }
-
- // get closest prequel
- return animeRelations.reduce((prev, curr) => {
- return mediaDate - getMediaDate(prev) > mediaDate - getMediaDate(curr) ? curr : prev
- })
-}
-
-function isMovie (media) {
- if (media.format === 'MOVIE') return true
- if ([...Object.values(media.title), ...media.synonyms].some(title => title?.toLowerCase().includes('movie'))) return true
- // if (!getParentForSpecial(media)) return true // TODO: this is good for checking movies, but false positives with normal TV shows
- return media.duration > 80 && media.episodes === 1
-}
-
-const QUALITIES = ['1080', '720', '540', '480']
-
-const ANY = 'e*|a*|r*|i*|o*'
-
-function buildToshoQuery (quality) {
- let query = `&qx=1&q=!("${exclusions.join('"|"')}")`
- if (quality) {
- query += `((${ANY}|"${quality}") !"${QUALITIES.filter(q => q !== quality).join('" !"')}")`
- } else {
- query += ANY // HACK: tosho NEEDS a search string, so we lazy search a single common vowel
- }
-
- return query
-}
-
-async function fetchBatchesForAnidb ({ episodeCount, id, quality, movie = null }) {
- try {
- const queryString = buildToshoQuery(quality)
- const torrents = await fetch(settings.value.toshoURL + 'json?order=size-d&aid=' + id + queryString)
-
- // safe both if AL includes EP 0 or doesn't
- const batches = (await torrents.json()).filter(entry => entry.num_files >= episodeCount)
- if (!movie) {
- for (const batch of batches) batch.type = 'batch'
- }
- console.log({ batches })
- return batches
- } catch (error) {
- console.log('failed fetching batch', error)
- return []
- }
-}
-
-async function fetchSingleEpisodeForAnidb ({ id, quality }) {
- try {
- const queryString = buildToshoQuery(quality)
- const torrents = await fetch(settings.value.toshoURL + 'json?eid=' + id + queryString)
-
- const episodes = await torrents.json()
- console.log({ episodes })
- return episodes
- } catch (error) {
- console.log('failed fetching single episode', error)
- return []
- }
-}
-
-function mapToshoEntries (entries) {
- return entries.map(entry => {
- return {
- title: entry.title || entry.torrent_name,
- link: entry.magnet_uri,
- id: entry.nyaa_id, // TODO: used for sneedex mappings, remove later
- seeders: entry.seeders >= 30000 ? 0 : entry.seeders,
- leechers: entry.leechers >= 30000 ? 0 : entry.leechers,
- downloads: entry.torrent_downloaded_count,
- hash: entry.info_hash,
- size: entry.total_size && fastPrettyBytes(entry.total_size),
- verified: !!entry.anidb_fid,
- type: entry.type,
- date: entry.timestamp && new Date(entry.timestamp * 1000)
- }
- })
-}
-
-function dedupeEntries (entries) {
- const deduped = {}
- for (const entry of entries) {
- if (deduped[entry.hash]) {
- const dupe = deduped[entry.hash]
- dupe.title ??= entry.title
- dupe.link ??= entry.link
- dupe.id ||= entry.id
- dupe.seeders ||= entry.seeders >= 30000 ? 0 : entry.seeders
- dupe.leechers ||= entry.leechers >= 30000 ? 0 : entry.leechers
- dupe.downloads ||= entry.downloads
- dupe.size ||= entry.size
- dupe.verified ||= entry.verified
- dupe.date ||= entry.date
- dupe.type ??= entry.type
- } else {
- deduped[entry.hash] = entry
- }
- }
-
- return Object.values(deduped)
-}
diff --git a/common/modules/settings.js b/common/modules/settings.js
index a2040f9..73df77a 100644
--- a/common/modules/settings.js
+++ b/common/modules/settings.js
@@ -20,7 +20,7 @@ try {
}
/**
- * @type {import('svelte/store').Writable & { value: any }}
+ * @type {import('simple-store-svelte').Writable}
*/
export const settings = writable({ ...defaults, ...scopedDefaults, ...storedSettings })
diff --git a/common/modules/support.js b/common/modules/support.js
index 158c742..a32399e 100644
--- a/common/modules/support.js
+++ b/common/modules/support.js
@@ -10,5 +10,6 @@ export const SUPPORTS = {
torrentPort: true,
torrentPath: true,
torrentPersist: true,
- keybinds: true
+ keybinds: true,
+ extensions: true
}
diff --git a/common/modules/util.js b/common/modules/util.js
index 0818c43..4eb966d 100644
--- a/common/modules/util.js
+++ b/common/modules/util.js
@@ -1,3 +1,5 @@
+import { SUPPORTS } from './support.js'
+
export function countdown (s) {
const d = Math.floor(s / (3600 * 24))
s -= d * 3600 * 24
@@ -166,14 +168,15 @@ export const defaults = {
enableDoH: false,
doHURL: 'https://cloudflare-dns.com/dns-query',
disableSubtitleBlur: false,
- toshoURL: decodeURIComponent(atob('aHR0cHM6Ly9mZWVkLmFuaW1ldG9zaG8ub3JnLw==')),
showDetailsInRPC: true,
smoothScroll: true,
cards: 'small',
expandingSidebar: true,
torrentPath: undefined,
font: undefined,
- angle: 'default'
+ angle: 'default',
+ extensions: SUPPORTS.extensions ? ['@thaunknown/ani-resourced'] : [],
+ sources: {}
}
export const subtitleExtensions = ['srt', 'vtt', 'ass', 'ssa', 'sub', 'txt']
diff --git a/common/package.json b/common/package.json
index f2add35..97f2f4c 100644
--- a/common/package.json
+++ b/common/package.json
@@ -5,9 +5,11 @@
"@fontsource-variable/material-symbols-outlined": "^5.0.24",
"@fontsource-variable/nunito": "^5.0.18",
"@fontsource/roboto": "^5.0.12",
- "anitomyscript": "github:ThaUnknown/anitomyscript#42290c4b3f256893be08a4e89051f448ff5e9d00",
+ "@thaunknown/ani-resourced": "^1.0.3",
+ "anitomyscript": "github:ThaUnknown/anitomyscript#51abfeaa99114659fb4c0d30e32c61d50d6d1a8a",
"bottleneck": "^2.19.5",
"browser-event-target-emitter": "^1.0.1",
+ "comlink": "^4.4.1",
"jassub": "latest",
"js-levenshtein": "^1.1.6",
"p2pt": "github:ThaUnknown/p2pt#modernise",
diff --git a/common/views/Player/Player.svelte b/common/views/Player/Player.svelte
index f660c50..5fb0362 100644
--- a/common/views/Player/Player.svelte
+++ b/common/views/Player/Player.svelte
@@ -1,7 +1,7 @@
-
-
-
-
- {#if data}
-
-
-
-
-
-
- |
- Name |
- Size |
- Seed |
- Leech |
- Downloads |
- Released |
-
-
-
- {#each filtered as row}
- play(row)}>
- |
- {#if row.type === 'best'}
-
- star
-
- {:else if row.type === 'alt'}
-
- star
-
- {:else if row.verified}
-
- verified
-
- {:else if row.type === 'batch'}
-
- database
-
- {/if}
- |
- {row.title}
-
- {#each sanitiseTerms(row.parseObject) as { text, color }}
-
- {text}
-
- {/each}
-
- |
- {row.size} |
- {row.seeders ?? '?'} |
- {row.leechers ?? '?'} |
- {row.downloads ?? '?'} |
- {row.date ? since(row.date) : '?'} |
-
- {/each}
-
-
-
-
- {/if}
-
-
-
diff --git a/common/views/Settings/TorrentSettings.svelte b/common/views/Settings/TorrentSettings.svelte
index 02dfb5a..adf965c 100644
--- a/common/views/Settings/TorrentSettings.svelte
+++ b/common/views/Settings/TorrentSettings.svelte
@@ -1,3 +1,34 @@
+
+
Lookup Settings
@@ -18,7 +66,7 @@
-
+
@@ -38,9 +86,6 @@
{/if}
-
-
-
Client Settings
{#if SUPPORTS.torrentPath}
@@ -64,23 +109,23 @@
{/if}
-
+
{#if SUPPORTS.torrentPort}
-
+
{/if}
{#if SUPPORTS.dht}
-
+
@@ -95,3 +140,53 @@
+
+Extension Settings
+
+
+
+
+
+ {#each settings.extensions as extension, i}
+
+
+ {extension}
+
+
+
+ {/each}
+
+
+
+
+
+
+ {#key settings.extensions}
+ {#await extensionsWorker then worker}
+ {#await worker.metadata then metadata}
+ {#each metadata as { accuracy, name, description }}
+
+
+
{name}
+
{description}
+
+
+
Accuracy: {accuracy}
+
+
+
+
+
+
+ {/each}
+ {/await}
+ {/await}
+ {/key}
+
+
+
diff --git a/common/views/TorrentSearch/TorrentCard.svelte b/common/views/TorrentSearch/TorrentCard.svelte
new file mode 100644
index 0000000..18f714f
--- /dev/null
+++ b/common/views/TorrentSearch/TorrentCard.svelte
@@ -0,0 +1,135 @@
+
+
+
+
+ play(result)} title={result.parseObject.file_name}>
+ {#if media.bannerImage}
+
+

+
+
+ {/if}
+
+
+
{result.parseObject?.release_group && result.parseObject.release_group.length < 20 ? result.parseObject.release_group : 'No Group'}
+ {#if result.type === 'batch'}
+
+ database
+
+ {:else if result.verified}
+
+ verified
+
+ {/if}
+
+
{simplifyFilename(result.parseObject)}
+
+
{fastPrettyBytes(result.size)}
+
•
+
{result.seeders} Seeders
+
•
+
{since(new Date(result.date))}
+
+ {#if result.type === 'best'}
+
+ Best Release
+
+ {:else if result.type === 'alt'}
+
+ Alt Release
+
+ {/if}
+ {#each sanitiseTerms(result.parseObject) as { text }}
+
+ {text}
+
+ {/each}
+
+
+
+
+
+
diff --git a/common/views/TorrentSearch/TorrentMenu.svelte b/common/views/TorrentSearch/TorrentMenu.svelte
new file mode 100644
index 0000000..dd1d0a3
--- /dev/null
+++ b/common/views/TorrentSearch/TorrentMenu.svelte
@@ -0,0 +1,178 @@
+
+
+
+
+
+
+
Find Torrents
+
+
+
Auto-Selected Torrent
+ {#await best}
+
+ {:then bestRelease}
+
+ {:catch error}
+
+ {/await}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Episode
+
+
+
+ Resolution
+
+
+
+
+
+
+ {#await lookup}
+ {#each Array.from({ length: 10 }) as _}
+
+ {/each}
+ {:then results}
+ {#each filterResults(results, searchText) as result}
+
+ {/each}
+ {/await}
+
+
+
diff --git a/common/views/TorrentSearch/TorrentModal.svelte b/common/views/TorrentSearch/TorrentModal.svelte
new file mode 100644
index 0000000..e64bb92
--- /dev/null
+++ b/common/views/TorrentSearch/TorrentModal.svelte
@@ -0,0 +1,46 @@
+
+
+
+
+
+ {#if search}
+
+ {/if}
+
+
+
diff --git a/common/views/TorrentSearch/TorrentSkeletonCard.svelte b/common/views/TorrentSearch/TorrentSkeletonCard.svelte
new file mode 100644
index 0000000..71b3696
--- /dev/null
+++ b/common/views/TorrentSearch/TorrentSkeletonCard.svelte
@@ -0,0 +1,14 @@
+
diff --git a/common/views/ViewAnime/EpisodeList.svelte b/common/views/ViewAnime/EpisodeList.svelte
index 38437cc..c3a090b 100644
--- a/common/views/ViewAnime/EpisodeList.svelte
+++ b/common/views/ViewAnime/EpisodeList.svelte
@@ -1,7 +1,7 @@