text stringlengths 1 93.6k |
|---|
email_field = browser.find_element_by_name('username')
|
password_field = browser.find_element_by_name('password')
|
submit_btn = browser.find_element_by_xpath('//button[@type="submit"]')
|
email_field.send_keys(args.username)
|
password_field.send_keys(args.password)
|
submit_btn.click()
|
time.sleep(3)
|
browser.get(args.url)
|
def get_browser():
|
logger.info('Configuring browser')
|
chrome_options = wd.ChromeOptions()
|
if args.headless:
|
chrome_options.add_argument('--headless')
|
chrome_options.add_argument('log-level=3')
|
browser = wd.Chrome(options=chrome_options)
|
return browser
|
def get_current_page():
|
logger.info('Getting current page number')
|
current = browser.find_element_by_class_name('selected')
|
return int(current.text)
|
def verify_date_sorting():
|
logger.info('Date limit specified, verifying date sorting')
|
ascending = urllib.parse.parse_qs(
|
args.url)['sort.ascending'] == ['true']
|
if args.min_date and ascending:
|
raise Exception(
|
'min_date required reviews to be sorted DESCENDING by date.')
|
elif args.max_date and not ascending:
|
raise Exception(
|
'max_date requires reviews to be sorted ASCENDING by date.')
|
browser = get_browser()
|
page = [1]
|
idx = [0]
|
date_limit_reached = [False]
|
valid_page = [True]
|
def main():
|
logger.info(f'Scraping up to {args.limit} reviews.')
|
res = pd.DataFrame([], columns=SCHEMA)
|
sign_in()
|
if not args.start_from_url:
|
reviews_exist = navigate_to_reviews()
|
if not reviews_exist:
|
return
|
elif args.max_date or args.min_date:
|
verify_date_sorting()
|
browser.get(args.url)
|
page[0] = get_current_page()
|
logger.info(f'Starting from page {page[0]:,}.')
|
time.sleep(1)
|
else:
|
browser.get(args.url)
|
page[0] = get_current_page()
|
logger.info(f'Starting from page {page[0]:,}.')
|
time.sleep(1)
|
reviews_df = extract_from_page()
|
res = res.append(reviews_df)
|
# import pdb;pdb.set_trace()
|
while more_pages() and\
|
len(res) < args.limit and\
|
not date_limit_reached[0] and\
|
valid_page[0]:
|
go_to_next_page()
|
try:
|
reviews_df = extract_from_page()
|
res = res.append(reviews_df)
|
except:
|
break
|
logger.info(f'Writing {len(res)} reviews to file {args.file}')
|
res.to_csv(args.file, index=False, encoding='utf-8')
|
end = time.time()
|
logger.info(f'Finished in {end - start} seconds')
|
if __name__ == '__main__':
|
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.