-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathjobFinder.py
166 lines (127 loc) · 5.87 KB
/
jobFinder.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
import os
import json
from datetime import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import NoSuchElementException
from constants import *
from structs import JobInfo
'''
Main code to scrape data off of indeed.com. Data is retreived 'as is' and
will need to be cleaned later if user plans to do data analysis or something
useful with the resulting data.
'''
class jobFinder:
# init the chrome webdriver in headless mode
def __init__(self):
options = Options()
options.headless = True
options.add_argument('--window-size=1920,1200')
self.driver = webdriver.Chrome(options=options, executable_path=os.path.abspath('chromedriver'))
self.current_url = None
# function that helps with getting the job description
def get_description(self, url):
try:
self.driver.get(url)
description = self.driver.find_element_by_id('jobDescriptionText')
page_soup = BeautifulSoup(description.get_attribute('innerHTML'), features='html.parser')
except NoSuchElementException:
return 'None'
return page_soup.get_text("\t", strip=True).lower()
# boolean that rectifies if there is another
# page in the search results
def next_page_exists(self):
self.driver.implicitly_wait(WAIT_5_SEC)
self.driver.get(self.current_url)
self.driver.implicitly_wait(WAIT_5_SEC)
num_pages = self.driver.find_elements_by_xpath("//*[@aria-label='Next']")
if len(num_pages) == 0:
return False
else:
return True
#change the current url to the next page in the search
def get_next_page(self):
next_page_button = self.driver.find_element_by_xpath("//*[@aria-label='Next']")
self.driver.execute_script("arguments[0].click();", next_page_button)
self.driver.implicitly_wait(WAIT_5_SEC)
self.current_url = self.driver.current_url
# search and collect job listings
def search_jobs(self, job, location):
# go to indeed.com home page (duhh)
self.driver.get('https://www.indeed.com/')
self.driver.implicitly_wait(30)
# type in given job
job_search_text_box = self.driver.find_element_by_id('text-input-what')
job_search_text_box.clear()
job_search_text_box.send_keys(job)
# type in given location
location_search_text_box = self.driver.find_element_by_id('text-input-where')
location_search_text_box.clear()
for i in range(1):
location_search_text_box.send_keys(Keys.BACK_SPACE * 30)
location_search_text_box.send_keys(location)
# click the Find Jobs button
find_jobs_button = self.driver.find_element_by_xpath('//button[text()="Find jobs"]')
find_jobs_button.click()
self.driver.implicitly_wait(WAIT_5_SEC)
# get the first page's url right after the search loads results
self.current_url = self.driver.current_url
# start collecting attributes of job results
page_num = 1
rank = 1
job_list = []
while 1==1: # infinite loop ensures every single result is scraped
#while page_num <=1:
print('Current page: ', page_num)
soup = BeautifulSoup(self.driver.page_source, 'html.parser')
for job in soup.find_all('div', class_='jobsearch-SerpJobCard unifiedRow row result clickcard'):
# get job ID assigned by indeed
job_id = job.attrs['data-jk']
# get title
title = job.find('a', class_='jobtitle turnstileLink')['title']
# get company name
try:
company = job.find('span', class_='company').text.strip()
except AttributeError:
company = 'None'
# get location
location = job.find('div', class_="recJobLoc")['data-rc-loc']
# get salary
try:
salary = job.find('span', attrs={'class':'salaryText'}).string.strip()
except AttributeError:
salary = 'None'
# get date listed
date_listed = job.find('span', attrs={'class':'date'}).text.strip()
# get company review rating
try:
company_rating = float(job.find('span', attrs={'class':'ratingsContent'}).text.strip().replace(',', '.'))
except AttributeError:
company_rating = 0.00
# get indeed url
url = 'http://indeed.com' + job.find('a', class_="jobtitle turnstileLink")['href']
# get description
description = self.get_description(url)
job_info = json.dumps({'job_id' : job_id,
'title' : title,
'company' : company,
'location' : location,
'salary' : salary,
'date_listed' : date_listed,
'date_scraped' : datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'company_rating' : company_rating,
'rank' : rank,
'page_num' : page_num,
'url' : url,
'description' : description})
job_list += [job_info]
rank +=1
if self.next_page_exists():
self.get_next_page()
page_num +=1
else:
break
return job_list
self.driver.close()