text
stringlengths
1
93.6k
return None # Account for reviews that have been blocked
res = {}
# import pdb;pdb.set_trace()
for field in SCHEMA:
res[field] = scrape(field, review, author)
assert set(res.keys()) == set(SCHEMA)
return res
logger.info(f'Extracting reviews from page {page[0]}')
res = pd.DataFrame([], columns=SCHEMA)
reviews = browser.find_elements_by_class_name('empReview')
logger.info(f'Found {len(reviews)} reviews on page {page[0]}')
# refresh page if failed to load properly, else terminate the search
if len(reviews) < 1:
browser.refresh()
time.sleep(5)
reviews = browser.find_elements_by_class_name('empReview')
logger.info(f'Found {len(reviews)} reviews on page {page[0]}')
if len(reviews) < 1:
valid_page[0] = False # make sure page is populated
for review in reviews:
if not is_featured(review):
data = extract_review(review)
if data != None:
logger.info(f'Scraped data for "{data["review_title"]}"\
({data["date"]})')
res.loc[idx[0]] = data
else:
logger.info('Discarding a blocked review')
else:
logger.info('Discarding a featured review')
idx[0] = idx[0] + 1
if args.max_date and \
(pd.to_datetime(res['date']).max() > args.max_date) or \
args.min_date and \
(pd.to_datetime(res['date']).min() < args.min_date):
logger.info('Date limit reached, ending process')
date_limit_reached[0] = True
return res
def more_pages():
try:
current = browser.find_element_by_class_name('selected')
pages = browser.find_element_by_class_name('pageContainer').text.split()
if int(pages[-1]) != int(current.text):
return True
else:
return False
except selenium.common.exceptions.NoSuchElementException:
return False
def go_to_next_page():
logger.info(f'Going to page {page[0] + 1}')
next_ = browser.find_element_by_class_name('nextButton')
ActionChains(browser).click(next_).perform()
time.sleep(5) # wait for ads to load
page[0] = page[0] + 1
def no_reviews():
return False
# TODO: Find a company with no reviews to test on
def navigate_to_reviews():
logger.info('Navigating to company reviews')
browser.get(args.url)
time.sleep(1)
if no_reviews():
logger.info('No reviews to scrape. Bailing!')
return False
reviews_cell = browser.find_element_by_xpath(
'//a[@data-label="Reviews"]')
reviews_path = reviews_cell.get_attribute('href')
# reviews_path = driver.current_url.replace('Overview','Reviews')
browser.get(reviews_path)
time.sleep(1)
return True
def sign_in():
logger.info(f'Signing in to {args.username}')
url = 'https://www.glassdoor.com/profile/login_input.htm'
browser.get(url)
# import pdb;pdb.set_trace()