@@ -13,18 +13,26 @@ def get_list(list_id, config=None):
13
13
list_name = None
14
14
description = None
15
15
movies = []
16
+ config = config or {}
16
17
17
18
while True :
18
19
print ("Page number: " , page_number )
19
20
watchlist = list_id .endswith ("/watchlist" )
21
+ likeslist = list_id .endswith ("/likes/films" )
20
22
21
23
if watchlist :
22
- r = requests .get (f"https://letterboxd.com/{ list_id } /by/release-earliest/page/{ page_number } /" , headers = {'User-Agent' : 'Mozilla/5.0' })
23
-
24
24
list_name = list_id .split ("/" )[0 ] + " Watchlist"
25
25
description = "Watchlist for " + list_id .split ("/" )[0 ]
26
- else :
27
- r = requests .get (f"https://letterboxd.com/{ list_id } /detail/by/release-earliest/page/{ page_number } /" , headers = {'User-Agent' : 'Mozilla/5.0' })
26
+ elif likeslist :
27
+ list_name = list_id .split ("/" )[0 ] + " Likes"
28
+ description = "Likes list for " + list_id .split ("/" )[0 ]
29
+
30
+ url_format = "https://letterboxd.com/{list_id}{maybe_detail}/by/release-earliest/page/{page_number}/"
31
+ maybe_detail = "" if watchlist or likeslist else "/detail"
32
+ r = requests .get (
33
+ url_format .format (list_id = list_id , maybe_detail = maybe_detail , page_number = page_number ),
34
+ headers = {'User-Agent' : 'Mozilla/5.0' },
35
+ )
28
36
29
37
soup = bs4 .BeautifulSoup (r .text , 'html.parser' )
30
38
@@ -38,13 +46,13 @@ def get_list(list_id, config=None):
38
46
else :
39
47
description = ""
40
48
41
- if watchlist :
49
+ if watchlist or likeslist :
42
50
page = soup .find_all ('li' , {'class' : 'poster-container' })
43
51
else :
44
52
page = soup .find_all ('div' , {'class' : 'film-detail-content' })
45
53
46
54
for movie_soup in page :
47
- if watchlist :
55
+ if watchlist or likeslist :
48
56
movie = {"title" : movie_soup .find ('img' ).attrs ['alt' ], "media_type" : "movie" }
49
57
link = movie_soup .find ('div' , {'class' : 'film-poster' })['data-target-link' ]
50
58
else :
0 commit comments