text
stringlengths
1
93.6k
action="show",
action_id=str(botid),
)
if botdata:
trailing_stoploss(botdata)
else:
if boterror and "status_code" in boterror:
if boterror["status_code"] == 404:
logger.error(
"Error occurred updating bots: bot with id '%s' was not found" % botid
)
else:
logger.error(
"Error occurred updating bots: %s" % boterror["msg"]
)
elif boterror and "msg" in boterror:
logger.error(
"Error occurred updating bots: %s" % boterror["msg"]
)
else:
logger.error("Error occurred updating bots")
if not wait_time_interval(logger, notification, timeint):
break
# <FILESEP>
# Purpose -
# Running this file (stand alone) - For extracting all the features from a web page for testing.
# Notes -
# 1 stands for legitimate
# 0 stands for suspicious
# -1 stands for phishing
from bs4 import BeautifulSoup
import urllib
import bs4
import re
import socket
import whois
from datetime import datetime
import time
# https://breakingcode.wordpress.com/2010/06/29/google-search-python/
# Previous package structure was modified. Import statements according to new structure added. Also code modified.
from googlesearch import search
# This import is needed only when you run this file in isolation.
import sys
from patterns import *
# Path of your local server. Different for different OSs.
LOCALHOST_PATH = "/Library/WebServer/Documents/"
DIRECTORY_NAME = "Malicious-Web-Content-Detection-Using-Machine-Learning"
def having_ip_address(url):
ip_address_pattern = ipv4_pattern + "|" + ipv6_pattern
match = re.search(ip_address_pattern, url)
return -1 if match else 1
def url_length(url):
if len(url) < 54:
return 1
if 54 <= len(url) <= 75:
return 0
return -1
def shortening_service(url):
match = re.search(shortening_services, url)
return -1 if match else 1
def having_at_symbol(url):
match = re.search('@', url)
return -1 if match else 1
def double_slash_redirecting(url):
# since the position starts from 0, we have given 6 and not 7 which is according to the document.
# It is convenient and easier to just use string search here to search the last occurrence instead of re.
last_double_slash = url.rfind('//')
return -1 if last_double_slash > 6 else 1
def prefix_suffix(domain):
match = re.search('-', domain)
return -1 if match else 1
def having_sub_domain(url):
# Here, instead of greater than 1 we will take greater than 3 since the greater than 1 condition is when www and
# country domain dots are skipped
# Accordingly other dots will increase by 1
if having_ip_address(url) == -1:
match = re.search(
'(([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\.([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\.([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\.'
'([01]?\\d\\d?|2[0-4]\\d|25[0-5]))|(?:[a-fA-F0-9]{1,4}:){7}[a-fA-F0-9]{1,4}',