text
stringlengths
1
93.6k
try:
ip_address = socket.gethostbyname(hostname)
except:
return -1
url_match = re.search(
r'at\.ua|usa\.cc|baltazarpresentes\.com\.br|pe\.hu|esy\.es|hol\.es|sweddy\.com|myjino\.ru|96\.lt|ow\.ly', url)
ip_match = re.search(
'146\.112\.61\.108|213\.174\.157\.151|121\.50\.168\.88|192\.185\.217\.116|78\.46\.211\.158|181\.174\.165\.13|46\.242\.145\.103|121\.50\.168\.40|83\.125\.22\.219|46\.242\.145\.98|'
'107\.151\.148\.44|107\.151\.148\.107|64\.70\.19\.203|199\.184\.144\.27|107\.151\.148\.108|107\.151\.148\.109|119\.28\.52\.61|54\.83\.43\.69|52\.69\.166\.231|216\.58\.192\.225|'
'118\.184\.25\.86|67\.208\.74\.71|23\.253\.126\.58|104\.239\.157\.210|175\.126\.123\.219|141\.8\.224\.221|10\.10\.10\.10|43\.229\.108\.32|103\.232\.215\.140|69\.172\.201\.153|'
'216\.218\.185\.162|54\.225\.104\.146|103\.243\.24\.98|199\.59\.243\.120|31\.170\.160\.61|213\.19\.128\.77|62\.113\.226\.131|208\.100\.26\.234|195\.16\.127\.102|195\.16\.127\.157|'
'34\.196\.13\.28|103\.224\.212\.222|172\.217\.4\.225|54\.72\.9\.51|192\.64\.147\.141|198\.200\.56\.183|23\.253\.164\.103|52\.48\.191\.26|52\.214\.197\.72|87\.98\.255\.18|209\.99\.17\.27|'
'216\.38\.62\.18|104\.130\.124\.96|47\.89\.58\.141|78\.46\.211\.158|54\.86\.225\.156|54\.82\.156\.19|37\.157\.192\.102|204\.11\.56\.48|110\.34\.231\.42',
ip_address)
if url_match:
return -1
elif ip_match:
return -1
else:
return 1
def get_hostname_from_url(url):
hostname = url
# TODO: Put this pattern in patterns.py as something like - get_hostname_pattern.
pattern = "https://|http://|www.|https://www.|http://www."
pre_pattern_match = re.search(pattern, hostname)
if pre_pattern_match:
hostname = hostname[pre_pattern_match.end():]
post_pattern_match = re.search("/", hostname)
if post_pattern_match:
hostname = hostname[:post_pattern_match.start()]
return hostname
# TODO: Put the DNS and domain code into a function.
def main(url):
with open(LOCALHOST_PATH + DIRECTORY_NAME + '/markup.txt', 'r') as file:
soup_string = file.read()
soup = BeautifulSoup(soup_string, 'html.parser')
status = []
hostname = get_hostname_from_url(url)
status.append(having_ip_address(url))
status.append(url_length(url))
status.append(shortening_service(url))
status.append(having_at_symbol(url))
status.append(double_slash_redirecting(url))
status.append(prefix_suffix(hostname))
status.append(having_sub_domain(url))
dns = 1
try:
domain = whois.query(hostname)
except:
dns = -1
status.append(-1 if dns == -1 else domain_registration_length(domain))
status.append(favicon(url, soup, hostname))
status.append(https_token(url))
status.append(request_url(url, soup, hostname))
status.append(url_of_anchor(url, soup, hostname))
status.append(links_in_tags(url, soup, hostname))
status.append(sfh(url, soup, hostname))
status.append(submitting_to_email(soup))
status.append(-1 if dns == -1 else abnormal_url(domain, url))
status.append(i_frame(soup))
status.append(-1 if dns == -1 else age_of_domain(domain))
status.append(dns)
status.append(web_traffic(soup))
status.append(google_index(url))
status.append(statistical_report(url, hostname))
print('\n1. Having IP address\n2. URL Length\n3. URL Shortening service\n4. Having @ symbol\n'
'5. Having double slash\n6. Having dash symbol(Prefix Suffix)\n7. Having multiple subdomains\n'
'8. SSL Final State\n8. Domain Registration Length\n9. Favicon\n10. HTTP or HTTPS token in domain name\n'
'11. Request URL\n12. URL of Anchor\n13. Links in tags\n14. SFH\n15. Submitting to email\n16. Abnormal URL\n'
'17. IFrame\n18. Age of Domain\n19. DNS Record\n20. Web Traffic\n21. Google Index\n22. Statistical Reports\n')
print(status)
return status
# Use the below two lines if features_extraction.py is being run as a standalone file. If you are running this file as
# a part of the workflow pipeline starting with the chrome extension, comment out these two lines.
# if __name__ == "__main__":
# if len(sys.argv) != 2:
# print("Please use the following format for the command - `python2 features_extraction.py <url-to-be-tested>`")
# exit(0)
# main(sys.argv[1])