aboutsummaryrefslogtreecommitdiffstats
path: root/walker.py
blob: d98be8f661cdadf43dd616b1c83cc5b48dcce162 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import argparse
import json
import logging
import os
import requests
import sys

from binascii import unhexlify

parser = argparse.ArgumentParser()
parser.add_argument('-cid', help='content id, &cid={...}. see url when reading a chapter')
parser.add_argument('-nolog', help='no progressive download logs on terminal', action="store_true")
args = parser.parse_args(args=None if sys.argv[1:] else ['--help'])

if args.cid == None:
    parser.print_help()
    sys.exit()

def start(url, headers):
    meta = requests.get(url=url, headers=headers).json()
    img_url = f'{url}/frames?enable_webp=true'

    try:
        cid_info = {
            "TITLE": meta['data']['extra']['content']['title'],
            "CHAPTER": meta['data']['result']['title']
        }

    except KeyError:
        logging.error("Metadata malformed, check CID's validity")
        sys.exit()

    else:
        print('{} - {}'.format(cid_info['TITLE'], cid_info['CHAPTER']))

        undrm(img_url, headers, cid_info)

def undrm(url, headers, cid_info):
    meta = requests.get(url=url, headers=headers).json()

    print('Page count: {}\n'.format(len(meta['data']['result'])))

    save_path = os.path.join('downloaded_chapters/{}/{}'.format(cid_info['TITLE'], cid_info['CHAPTER']))

    if not os.path.exists(save_path):
        os.makedirs(save_path)

    print(f'Saving chapter to {save_path}\n')

    for page in range(1, len(meta['data']['result']) + 1):

        if args.nolog:
            if page == 1:
                logging.info('DL in progress...')
        else:
            logging.info('Progress: page ' + str(page))

        key = unhexlify(meta['data']['result'][page-1]['meta']['drm_hash'][:16])
        enc = requests.get(meta['data']['result'][page-1]['meta']['source_url'], headers=headers).content
        pagination = str(page) + '.webp'

        with open(f'{save_path}/{pagination}', 'wb') as f:
            f.write(xor(enc, key))

    logging.info('Done.')

def xor(bin, key):
    retval = []

    for idx, val in enumerate(bin):
        retval.append(val ^ key[idx % len(key)])

    return bytes(retval)

def main():

    headers = {
        'authority': 'comicwalker-api.nicomanga.jp',
        'accept': '*/*',
        'accept-language': 'en-US,en;q=0.9',
        'cache-control': 'no-cache',
        'origin': 'https://comic-walker.com',
        'pragma': 'no-cache',
        'referer': 'https://comic-walker.com/',
        'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
        'sec-fetch-Blowfisht': 'empty',
        'sec-fetch-mode': 'cors',
        'sec-fetch-site': 'cross-site',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36',
    }

    content_url = f'https://comicwalker-api.nicomanga.jp/api/v1/comicwalker/episodes/{args.cid}'

    start(content_url, headers)

if __name__ == "__main__":
    main()