-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcrawl.py
executable file
·76 lines (62 loc) · 2.72 KB
/
crawl.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
#! /usr/bin/python2
from __future__ import print_function
import re
import sys
import time
import requests
import bs4
from bs4 import BeautifulSoup
import pandas as pd
base_url = "http://www.moviebodycounts.com/"
r = requests.get(base_url + "movies.htm")
soup = BeautifulSoup(r.text)
anchors = soup.findAll('a')
movie_pages = [a['href'] for a in anchors if a['href'].startswith('movies-')]
data = {'title':[], 'year':[], 'kills':[], 'imdb':[]}
kill_phrases = ["entire film:", "on screen kills:", "film:", "Kills*"]
def findStringChildNodes(node):
tags = [child.strip().replace("\r\n", " ")
for child in node.recursiveChildGenerator() if
isinstance(child, bs4.element.NavigableString) and child.strip() != '']
return tags
def flatten(list_of_lists):
return reduce(lambda x,y: x+y, list_of_lists)
for page in movie_pages:
r = requests.get(base_url + page)
soup = BeautifulSoup(r.text)
movies = [a['href'] for a in soup.findAll('a') if (a['href'].endswith('.htm')
and a.text != '' and a['href'] not in ['movies.htm', 'contact.htm'])]
for movie in movies:
try:
print("Crawling", movie)
r = requests.get(base_url + movie)
soup = BeautifulSoup(r.text)
title = flatten([findStringChildNodes(node) for node in
soup.findAll('span', attrs={'style':"color: rgb(153, 153, 153);"})])[0]
tags = flatten([findStringChildNodes(node) for node in
soup.findAll('font', attrs={'size':'-1'})])
kills = [tag for tag in tags if any(
map(lambda s: s in tag.replace("\r\n", " ").lower(), kill_phrases))]
kills = int(re.sub("[^0-9]", "", kills[0].split(":")[-1].split("(")[0]))
year = soup.findAll(['a', 'span'], attrs={"style":"color: rgb(198, 213, 217);"})
if len(year) != 1:
print("Ambiguous year lines, skipping", movie)
continue
year = int(year[0].text)
imdb = [a['href'] for a in soup.findAll('a') if
(a.has_attr('href') and 'imdb.com' in a['href'] and 'imdb' in a.text.lower())]
if len(imdb) != 1:
print("Ambiguous imdb lines, skipping", movie)
continue
imdb = imdb[0].split('/title/')[1].strip('/')
data['title'].append(title)
data['year'].append(year)
data['kills'].append(kills)
data['imdb'].append(imdb)
except: # catch all exception types
e = sys.exc_info()[0]
print("Error processing", movie, ":", e)
# sleep a little to not spam the server too much
time.sleep(0.1)
data = pd.DataFrame(data)
data.to_csv('moviebodycounts.csv')