text stringlengths 1 93.6k |
|---|
不重复爬取
|
'''
|
if os.path.exists(self.file_links_path):
|
print("[*] skip this site {}".format(self.start_url))
|
self.skip_flag = True
|
def prepare(self):
|
'''
|
创建存放的文件夹和文件
|
'''
|
self.current_path = getCurrentPath()
|
project_path = os.path.join(self.current_path, self.project_name)
|
makeDir(project_path)
|
subdomain_name = getDomain(self.start_url)
|
self.domain_path = os.path.join(project_path, subdomain_name)
|
makeDir(self.domain_path)
|
self.file_links_path = os.path.join(self.domain_path, 'file_links.json')
|
# self.other_links_path = os.path.join(self.domain_path, 'other_links.json')
|
self.finally_result_path = os.path.join(self.domain_path, 'result.json')
|
def crawlLinks(self):
|
'''
|
爬取链接
|
'''
|
print("[*] start to crawlLinks {}".format(self.start_url))
|
cmd = '{0} '.format(config.PY_EXECUTABLE_PATH) + '{0}/LinksCrawler.py {1} {2}'.format(self.current_path,
|
self.start_url, self.file_links_path)
|
print("[*] cmd: {}".format(cmd))
|
p = subprocess.Popen(
|
["{}".format(config.PY_EXECUTABLE_PATH), "{}/LinksCrawler.py".format(self.current_path), self.start_url, self.file_links_path],
|
stdout=subprocess.PIPE)
|
try:
|
(stdoutput, erroutput) = p.communicate(timeout=config.CRAWLER_SITE_TIMEOUT) # 超时时间
|
except subprocess.TimeoutExpired:
|
p.kill()
|
print("[*] TIMEOUT: %s" % cmd)
|
except Exception as e:
|
# pass
|
log.logger.debug(e) # 报错日志过多
|
def parseFileLinks(self):
|
'''
|
解析爬取到的文件url
|
'''
|
print("[*] start to parseFileLinks")
|
if os.path.exists(self.file_links_path):
|
with open(self.file_links_path, 'r') as f:
|
print('[*] reading {0}'.format(self.file_links_path))
|
self.crawled_file_links_dict = json.load(f)
|
# os.remove(self.file_links_path) # 删除
|
def downloadFile(self, url_file_list, file_type):
|
'''
|
下载文件
|
'''
|
print("[*] start to downloadFile {0} :{1}".format(file_type, len(url_file_list)))
|
download = DownLoader(self.domain_path, url_file_list, file_type)
|
# download.prepare()
|
downloaded_file_path_dict = download.startDownload()
|
return downloaded_file_path_dict
|
def detectSensitiveFile(self, downloaded_file_path_dict, file_type):
|
'''
|
检测含敏感信息的文件
|
:param file_type: 文件类型
|
:return:
|
'''
|
print("[*] start to detectSensitiveFile {0} :{1}".format(file_type, len(downloaded_file_path_dict)))
|
parser = SensitiveFileParser(downloaded_file_path_dict, file_type)
|
sensitive_result_dict = parser.startParse()
|
if sensitive_result_dict:
|
self.result_dict = dict(sensitive_result_dict.items())
|
def saveResultFile(self):
|
'''
|
保存最终结果
|
'''
|
if len(self.result_dict):
|
with open(self.finally_result_path, "w") as f:
|
f.write(str(json.dumps(self.result_dict)))
|
def main(target_txt, project_name):
|
with open(target_txt, 'r') as f:
|
url_list = [url.strip() for url in f]
|
print("[*] target length is :{}".format(len(url_list)))
|
num = 0
|
for url in url_list:
|
num += 1
|
hunter = SensitivesHunter(url, project_name)
|
print("[*] detecting NO.{} url".format(num))
|
#try:
|
hunter.startHunt()
|
#except Exception as e:
|
# pass
|
# log.logger.debug(e) # 报错日志过多
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.