mirror of
https://github.com/p-stream/providers.git
synced 2026-01-11 20:10:33 +00:00
Added filelions, reenabled dood, added primesrc(primewire), and fixed minor flixhq thing
This commit is contained in:
parent
44630a0ace
commit
593ed4cf21
4 changed files with 297 additions and 24 deletions
|
|
@ -26,6 +26,7 @@ import {
|
|||
} from './embeds/autoembed';
|
||||
import { cinemaosEmbeds } from './embeds/cinemaos';
|
||||
import { closeLoadScraper } from './embeds/closeload';
|
||||
import { filelionsScraper } from './embeds/filelions';
|
||||
import { madplayBaseEmbed, madplayNsapiEmbed, madplayNsapiVidFastEmbed, madplayRoperEmbed } from './embeds/madplay';
|
||||
import { mp4hydraServer1Scraper, mp4hydraServer2Scraper } from './embeds/mp4hydra';
|
||||
import { myanimedubScraper } from './embeds/myanimedub';
|
||||
|
|
@ -83,6 +84,7 @@ import { madplayScraper } from './sources/madplay';
|
|||
import { myanimeScraper } from './sources/myanime';
|
||||
import { nunflixScraper } from './sources/nunflix';
|
||||
import { pelisplushdScraper } from './sources/pelisplushd';
|
||||
import { primewireScraper } from './sources/primewire';
|
||||
import { rgshowsScraper } from './sources/rgshows';
|
||||
import { ridooMoviesScraper } from './sources/ridomovies';
|
||||
import { slidemoviesScraper } from './sources/slidemovies';
|
||||
|
|
@ -137,6 +139,7 @@ export function gatherAllSources(): Array<Sourcerer> {
|
|||
lookmovieScraper,
|
||||
turbovidSourceScraper,
|
||||
pelisplushdScraper,
|
||||
primewireScraper,
|
||||
];
|
||||
}
|
||||
|
||||
|
|
@ -203,5 +206,6 @@ export function gatherAllEmbeds(): Array<Embed> {
|
|||
vidhideLatinoScraper,
|
||||
vidhideSpanishScraper,
|
||||
vidhideEnglishScraper,
|
||||
filelionsScraper,
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,64 +1,201 @@
|
|||
import { customAlphabet } from 'nanoid';
|
||||
|
||||
import { flags } from '@/entrypoint/utils/targets';
|
||||
import { makeEmbed } from '@/providers/base';
|
||||
|
||||
const nanoid = customAlphabet('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', 10);
|
||||
const baseUrl = 'https://d000d.com';
|
||||
|
||||
const PASS_MD5_PATTERNS: RegExp[] = [
|
||||
/\$\.get\('\/pass_md5[^']*'\)/,
|
||||
/\$\.get\("\/pass_md5[^"]*"\)/,
|
||||
/\$\.get\s*\('\/pass_md5([^']+)'\)/,
|
||||
/\$\.get\s*\("\/pass_md5([^"]+)"\)/,
|
||||
/fetch\(\s*["'](\/pass_md5[^"']+)["']\s*\)/,
|
||||
/axios\.get\(\s*["'](\/pass_md5[^"']+)["']\s*\)/,
|
||||
/open\(\s*["']GET["']\s*,\s*["'](\/pass_md5[^"']+)["']\s*\)/,
|
||||
/url\s*:\s*["'](\/pass_md5[^"']+)["']/,
|
||||
/location\.href\s*=\s*["'](\/pass_md5[^"']+)["']/,
|
||||
/(\/pass_md5\.php[^"']*)/,
|
||||
/["'](\/pass_md5\/[^"']+)["']/,
|
||||
];
|
||||
|
||||
const TOKEN_PATTERNS: RegExp[] = [/token["']?\s*[:=]\s*["']([^"']+)["']/, /makePlay\([^)]*token=([^"&']+)/];
|
||||
|
||||
function extractFirst(html: string, patterns: RegExp[]): string | null {
|
||||
for (const pat of patterns) {
|
||||
const m = pat.exec(html);
|
||||
if (m) {
|
||||
// capture group if available else try to parse from full match
|
||||
if (m.length > 1 && m[1]) return m[1];
|
||||
const match = m[0];
|
||||
const inner = /\/pass_md5[^'"')]+/.exec(match)?.[0] ?? null;
|
||||
if (inner) return inner;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function resolveAbsoluteUrl(base: string, maybeRelative: string): string {
|
||||
try {
|
||||
return new URL(maybeRelative, base).toString();
|
||||
} catch {
|
||||
return maybeRelative;
|
||||
}
|
||||
}
|
||||
|
||||
export const doodScraper = makeEmbed({
|
||||
id: 'dood',
|
||||
name: 'dood',
|
||||
disabled: false,
|
||||
rank: 173,
|
||||
async scrape(ctx) {
|
||||
let url = ctx.url;
|
||||
if (ctx.url.includes('primewire')) {
|
||||
const request = await ctx.proxiedFetcher.full(ctx.url);
|
||||
url = request.finalUrl;
|
||||
// Resolve any interstitial/redirect links (e.g., primewire wrappers)
|
||||
let pageUrl = ctx.url;
|
||||
if (pageUrl.includes('primewire')) {
|
||||
const req = await ctx.proxiedFetcher.full(pageUrl);
|
||||
pageUrl = req.finalUrl;
|
||||
}
|
||||
|
||||
const id = url.split('/d/')[1] || url.split('/e/')[1];
|
||||
// Normalize to embed page /e/{id} when a /d/{id} download page is provided
|
||||
const initial = new URL(pageUrl);
|
||||
const idMatch = initial.pathname.match(/\/(?:d|e)\/([A-Za-z0-9]+)/);
|
||||
const origin = (() => {
|
||||
try {
|
||||
return `${initial.protocol}//${initial.host}`;
|
||||
} catch {
|
||||
return 'https://d000d.com';
|
||||
}
|
||||
})();
|
||||
const embedUrl = idMatch ? `${origin}/e/${idMatch[1]}` : pageUrl;
|
||||
|
||||
const doodData = await ctx.proxiedFetcher<string>(`/e/${id}`, {
|
||||
method: 'GET',
|
||||
baseUrl,
|
||||
});
|
||||
// Fetch the dood embed page (consistent location of scripts)
|
||||
const pageResp = await ctx.proxiedFetcher.full<string>(embedUrl);
|
||||
const html = pageResp.body;
|
||||
const finalPageUrl = pageResp.finalUrl || embedUrl;
|
||||
const pageOrigin = (() => {
|
||||
try {
|
||||
const u = new URL(finalPageUrl);
|
||||
return `${u.protocol}//${u.host}`;
|
||||
} catch {
|
||||
return origin;
|
||||
}
|
||||
})();
|
||||
|
||||
const dataForLater = doodData.match(/\?token=([^&]+)&expiry=/)?.[1];
|
||||
const path = doodData.match(/\$\.get\('\/pass_md5([^']+)/)?.[1];
|
||||
const thumbnailTrack = doodData.match(/thumbnails:\s\{\s*vtt:\s'([^']*)'/);
|
||||
// Try to read thumbnail track (both quote styles)
|
||||
const thumbnailTrack = html.match(/thumbnails:\s*\{\s*vtt:\s*['"]([^'"]+)['"]/);
|
||||
|
||||
const doodPage = await ctx.proxiedFetcher<string>(`/pass_md5${path}`, {
|
||||
// Find pass_md5 path in the main page, or fallback to iframes
|
||||
let passPath = extractFirst(html, PASS_MD5_PATTERNS);
|
||||
|
||||
if (!passPath) {
|
||||
const iframeSrcs = Array.from(html.matchAll(/<iframe[^>]+src=["']([^"']+)["']/gi))
|
||||
.slice(0, 5)
|
||||
.map((m) => m[1]);
|
||||
for (const src of iframeSrcs) {
|
||||
try {
|
||||
const abs = resolveAbsoluteUrl(finalPageUrl, src);
|
||||
const sub = await ctx.proxiedFetcher.full<string>(abs, {
|
||||
headers: {
|
||||
Referer: finalPageUrl,
|
||||
},
|
||||
});
|
||||
passPath = extractFirst(sub.body, PASS_MD5_PATTERNS);
|
||||
if (passPath) break;
|
||||
} catch {
|
||||
// ignore iframe failures
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: scan external scripts referenced by the page for pass_md5 usage
|
||||
if (!passPath) {
|
||||
const scriptSrcs = Array.from(html.matchAll(/<script[^>]+src=["']([^"']+)["']/gi))
|
||||
.slice(0, 8)
|
||||
.map((m) => m[1]);
|
||||
for (const src of scriptSrcs) {
|
||||
try {
|
||||
const abs = resolveAbsoluteUrl(finalPageUrl, src);
|
||||
const sub = await ctx.proxiedFetcher.full<string>(abs, {
|
||||
headers: {
|
||||
Referer: finalPageUrl,
|
||||
},
|
||||
});
|
||||
passPath = extractFirst(sub.body, PASS_MD5_PATTERNS);
|
||||
if (passPath) break;
|
||||
} catch {
|
||||
// ignore script failures
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: if a /d/{id} page exists, try scanning it as some variants only expose pass_md5 there
|
||||
if (!passPath && idMatch) {
|
||||
try {
|
||||
const downloadUrl = `${pageOrigin}/d/${idMatch[1]}`;
|
||||
const sub = await ctx.proxiedFetcher.full<string>(downloadUrl, {
|
||||
headers: { Referer: finalPageUrl },
|
||||
});
|
||||
passPath = extractFirst(sub.body, PASS_MD5_PATTERNS);
|
||||
} catch {
|
||||
// ignore download page failure
|
||||
}
|
||||
}
|
||||
|
||||
if (!passPath) throw new Error('dood: pass_md5 path not found');
|
||||
|
||||
const passUrl = resolveAbsoluteUrl(pageOrigin, passPath.startsWith('/') ? passPath : `/${passPath}`);
|
||||
const doodPage = await ctx.proxiedFetcher<string>(passUrl, {
|
||||
headers: {
|
||||
Referer: `${baseUrl}/e/${id}`,
|
||||
Referer: finalPageUrl,
|
||||
},
|
||||
method: 'GET',
|
||||
baseUrl,
|
||||
});
|
||||
const downloadURL = `${doodPage}${nanoid()}?token=${dataForLater}&expiry=${Date.now()}`;
|
||||
|
||||
if (!downloadURL.startsWith('http')) throw new Error('Invalid URL');
|
||||
const token = extractFirst(html, TOKEN_PATTERNS);
|
||||
const rawUrl = (doodPage ?? '')
|
||||
.toString()
|
||||
.trim()
|
||||
.replace(/^['"]|['"]$/g, '');
|
||||
const normalizedUrl = (() => {
|
||||
if (!rawUrl) return '';
|
||||
if (rawUrl.startsWith('//')) return `https:${rawUrl}`;
|
||||
if (rawUrl.startsWith('/')) return resolveAbsoluteUrl(pageOrigin, rawUrl);
|
||||
if (rawUrl.startsWith('http')) return rawUrl;
|
||||
return resolveAbsoluteUrl(pageOrigin, rawUrl);
|
||||
})();
|
||||
const finalDownloadUrl = token ? `${normalizedUrl}${nanoid()}?token=${token}&expiry=${Date.now()}` : normalizedUrl;
|
||||
|
||||
if (!finalDownloadUrl.startsWith('http')) throw new Error('Invalid URL');
|
||||
|
||||
const thumbUrl = (() => {
|
||||
if (!thumbnailTrack) return null;
|
||||
const t = thumbnailTrack[1];
|
||||
if (t.startsWith('//')) return `https:${t}`;
|
||||
if (t.startsWith('http')) return t;
|
||||
return resolveAbsoluteUrl(origin, t);
|
||||
})();
|
||||
|
||||
return {
|
||||
stream: [
|
||||
{
|
||||
id: 'primary',
|
||||
type: 'file',
|
||||
flags: [],
|
||||
flags: [flags.CORS_ALLOWED],
|
||||
captions: [],
|
||||
qualities: {
|
||||
unknown: {
|
||||
type: 'mp4',
|
||||
url: downloadURL,
|
||||
url: finalDownloadUrl,
|
||||
},
|
||||
},
|
||||
headers: {
|
||||
Referer: baseUrl,
|
||||
preferredHeaders: {
|
||||
Referer: pageOrigin,
|
||||
},
|
||||
...(thumbnailTrack
|
||||
...(thumbUrl
|
||||
? {
|
||||
thumbnailTrack: {
|
||||
type: 'vtt',
|
||||
url: `https:${thumbnailTrack[1]}`,
|
||||
url: thumbUrl,
|
||||
},
|
||||
}
|
||||
: {}),
|
||||
|
|
|
|||
58
src/providers/embeds/filelions.ts
Normal file
58
src/providers/embeds/filelions.ts
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
import { load } from 'cheerio';
|
||||
import { unpack } from 'unpacker';
|
||||
|
||||
import { makeEmbed } from '@/providers/base';
|
||||
import { NotFoundError } from '@/utils/errors';
|
||||
|
||||
export const filelionsScraper = makeEmbed({
|
||||
id: 'filelions',
|
||||
name: 'Filelions',
|
||||
rank: 115,
|
||||
async scrape(ctx) {
|
||||
const html = await ctx.proxiedFetcher<string>(ctx.url, {
|
||||
headers: {
|
||||
Referer: 'https://primesrc.me/',
|
||||
},
|
||||
});
|
||||
|
||||
const $ = load(html);
|
||||
const packedScript = $('script')
|
||||
.filter((_, el) => {
|
||||
const htmlContent = $(el).html();
|
||||
return htmlContent != null && htmlContent.includes('eval(function(p,a,c,k,e,d)');
|
||||
})
|
||||
.first()
|
||||
.html();
|
||||
if (!packedScript) throw new NotFoundError('Packed script not found');
|
||||
|
||||
const evalMatch = packedScript.match(/eval\((.*)\)/);
|
||||
if (!evalMatch) throw new NotFoundError('Eval code not found');
|
||||
|
||||
const unpacked = unpack(evalMatch[1]);
|
||||
|
||||
const linksMatch = unpacked.match(/var links=(\{.*?\})/);
|
||||
if (!linksMatch) throw new NotFoundError('Links object not found');
|
||||
|
||||
const links = eval(`(${linksMatch[1]})`);
|
||||
Object.keys(links).forEach((key) => {
|
||||
if (links[key].startsWith('/stream/')) {
|
||||
links[key] = `https://dinisglows.com${links[key]}`;
|
||||
}
|
||||
});
|
||||
|
||||
const streamUrl = links.hls4 || Object.values(links)[0];
|
||||
if (!streamUrl) throw new NotFoundError('No stream URL found');
|
||||
|
||||
return {
|
||||
stream: [
|
||||
{
|
||||
id: 'primary',
|
||||
type: 'hls',
|
||||
playlist: streamUrl,
|
||||
flags: [],
|
||||
captions: [],
|
||||
},
|
||||
],
|
||||
};
|
||||
},
|
||||
});
|
||||
74
src/providers/sources/primewire.ts
Normal file
74
src/providers/sources/primewire.ts
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
import { SourcererOutput, makeSourcerer } from '@/providers/base';
|
||||
import { MovieScrapeContext, ShowScrapeContext } from '@/utils/context';
|
||||
import { NotFoundError } from '@/utils/errors';
|
||||
|
||||
async function comboScraper(ctx: ShowScrapeContext | MovieScrapeContext): Promise<SourcererOutput> {
|
||||
const baseApiUrl = 'https://primesrc.me/api/v1/';
|
||||
|
||||
let serverData;
|
||||
try {
|
||||
if (ctx.media.type === 'movie') {
|
||||
const url = `${baseApiUrl}s?tmdb=${ctx.media.tmdbId}&type=movie`;
|
||||
serverData = await fetch(url);
|
||||
} else {
|
||||
const url = `${baseApiUrl}s?tmdb=${ctx.media.tmdbId}&season=${ctx.media.season.number}&episode=${ctx.media.episode.number}&type=tv`;
|
||||
serverData = await fetch(url);
|
||||
}
|
||||
} catch (error) {
|
||||
return { embeds: [] };
|
||||
}
|
||||
|
||||
let data;
|
||||
try {
|
||||
data = await serverData.json();
|
||||
} catch (error) {
|
||||
return { embeds: [] };
|
||||
}
|
||||
|
||||
const nameToEmbedId: Record<string, string> = {
|
||||
Filelions: 'filelions',
|
||||
Dood: 'dood',
|
||||
Streamwish: 'streamwish-english',
|
||||
Filemoon: 'filemoon',
|
||||
};
|
||||
|
||||
if (!data.servers || !Array.isArray(data.servers)) {
|
||||
return { embeds: [] };
|
||||
}
|
||||
|
||||
const embeds = [];
|
||||
for (const server of data.servers) {
|
||||
if (!server.name || !server.key) {
|
||||
continue;
|
||||
}
|
||||
if (nameToEmbedId[server.name]) {
|
||||
try {
|
||||
const linkData = await fetch(`${baseApiUrl}l?key=${server.key}`);
|
||||
if (linkData.status !== 200) {
|
||||
continue;
|
||||
}
|
||||
const linkJson = await linkData.json();
|
||||
if (linkJson.link) {
|
||||
const embed = {
|
||||
embedId: nameToEmbedId[server.name],
|
||||
url: linkJson.link,
|
||||
};
|
||||
embeds.push(embed);
|
||||
}
|
||||
} catch (error) {
|
||||
throw new NotFoundError(`Error: ${error}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { embeds };
|
||||
}
|
||||
|
||||
export const primewireScraper = makeSourcerer({
|
||||
id: 'primewire',
|
||||
name: 'PrimeWire',
|
||||
rank: 105,
|
||||
flags: [],
|
||||
scrapeMovie: comboScraper,
|
||||
scrapeShow: comboScraper,
|
||||
});
|
||||
Loading…
Reference in a new issue