-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathutils.py
108 lines (76 loc) · 2.76 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import copy
import json
import re
from collections import OrderedDict
import requests
from bs4 import BeautifulSoup, PageElement, ResultSet
from dict_deep import deep_get as base_deep_get
from fake_useragent import UserAgent
from path import Path
from requests import Response
from labosphere.constants import BASE_URL, DEV_MODE, LABOSPHERE_DIR, ROOT
def base_metadata() -> dict:
return {
"title": "One Piece",
"description": (LABOSPHERE_DIR / "description.txt").read_text(),
"artist": "Eiichiro Oda",
"author": "Eiichiro Oda",
"cover": "https://cdn.myanimelist.net/images/manga/2/253146.jpg",
"chapters": {
"0": {
"title": "About This Repository",
"groups": {
"celsius narhwal": [
"https://raw.githubusercontent.com/celsiusnarhwal/punk-records/main/about.png"
]
},
"volume": 0,
}
},
}
def request(url) -> Response:
ua = UserAgent()
resp = requests.get(url, headers={"User-Agent": ua.chrome})
resp.raise_for_status()
return resp
def get_soup(url: str) -> BeautifulSoup:
return BeautifulSoup(request(url).content, "html.parser")
def get_chapter_list() -> ResultSet:
return get_soup(BASE_URL / "mangas/5/one-piece").find_all(
"a", href=lambda href: href and "chapter" in href
)
def get_chapter_number(chapter: PageElement) -> float:
return float(re.search(r"[\d.]+", chapter.text).group())
def without_keys(d: dict, *keys: str) -> dict:
d = copy.deepcopy(d)
for key in keys:
d.pop(key, None)
return d
def truncate(number: float) -> float | int:
return int(number) if number.is_integer() else number
def cubari_path() -> Path:
return ROOT / ("cubari.json" if not DEV_MODE else "test.cubari.json")
def load_cubari() -> dict:
if not cubari_path().exists():
json.dump({}, cubari_path().open("w"), indent=4)
cubari = json.load(cubari_path().open())
cubari.update(without_keys(base_metadata(), "chapters"))
try:
cubari["chapters"].update(base_metadata()["chapters"])
except KeyError:
cubari["chapters"] = base_metadata()["chapters"]
dump_cubari(cubari)
return json.load(cubari_path().open())
def dump_cubari(data: dict):
data = OrderedDict(
sorted(
data.items(),
key=lambda x: [*base_metadata().keys(), "chapters"].index(x[0]),
)
)
data["chapters"] = OrderedDict(
reversed(sorted(data.get("chapters", {}).items(), key=lambda x: float(x[0])))
)
json.dump(data, cubari_path().open("w"), indent=4)
def deep_get(obj, key, default=None, **kwargs):
return base_deep_get(obj, key, **kwargs) or default