id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
25,500 | conpot_cmdrsp.py | mushorg_conpot/conpot/protocols/snmp/conpot_cmdrsp.py | import sys
import logging
import random
from pysnmp.entity.rfc3413 import cmdrsp
from pysnmp.proto import error
from pysnmp.proto.api import v2c
import pysnmp.smi.error
from pysnmp import debug
import gevent
import conpot.core as conpot_core
from conpot.utils.networking import get_interface_ip
logger = logging.getLogger(__name__)
class conpot_extension(object):
def _getStateInfo(self, snmpEngine, stateReference):
for _, v in list(snmpEngine.messageProcessingSubsystems.items()):
if stateReference in v._cache.__dict__["_Cache__stateReferenceIndex"]:
state_dict = v._cache.__dict__["_Cache__stateReferenceIndex"][
stateReference
][0]
addr = state_dict["transportAddress"]
# msgVersion 0/1 to SNMPv1/2, msgversion 3 corresponds to SNMPv3
if state_dict["msgVersion"] < 3:
snmp_version = state_dict["msgVersion"] + 1
else:
snmp_version = state_dict["msgVersion"]
return addr, snmp_version
def log(self, version, msg_type, addr, req_varBinds, res_varBinds=None, sock=None):
session = conpot_core.get_session(
"snmp", addr[0], addr[1], get_interface_ip(addr[0]), sock.getsockname()[1]
)
req_oid = req_varBinds[0][0]
req_val = req_varBinds[0][1]
event_type = "SNMPv{0} {1}".format(version, msg_type)
request = {"oid": str(req_oid), "val": str(req_val)}
response = None
logger.info("%s request from %s: %s %s", event_type, addr, req_oid, req_val)
if res_varBinds:
res_oid = ".".join(map(str, res_varBinds[0][0]))
res_val = res_varBinds[0][1]
logger.info("%s response to %s: %s %s", event_type, addr, res_oid, res_val)
response = {"oid": str(res_oid), "val": str(res_val)}
session.add_event(
{"type": event_type, "request": request, "response": response}
)
def do_tarpit(self, delay):
# sleeps the thread for $delay ( should be either 1 float to apply a static period of time to sleep,
# or 2 floats seperated by semicolon to sleep a randomized period of time determined by ( rand[x;y] )
lbound, _, ubound = delay.partition(";")
if not lbound or lbound is None:
# no lower boundary found. Assume zero latency
pass
elif not ubound or ubound is None:
# no upper boundary found. Assume static latency
gevent.sleep(float(lbound))
else:
# both boundaries found. Assume random latency between lbound and ubound
gevent.sleep(random.uniform(float(lbound), float(ubound)))
def check_evasive(self, state, threshold, addr, cmd):
# checks if current states are > thresholds and returns True if the request
# is considered to be a DoS request.
state_individual, state_overall = state
threshold_individual, _, threshold_overall = threshold.partition(";")
if int(threshold_individual) > 0:
if int(state_individual) > int(threshold_individual):
logger.warning(
"SNMPv%s: DoS threshold for %s exceeded (%s/%s).",
cmd,
addr,
state_individual,
threshold_individual,
)
# DoS threshold exceeded.
return True
if int(threshold_overall) > 0:
if int(state_overall) > int(threshold_overall):
logger.warning(
"SNMPv%s: DDoS threshold exceeded (%s/%s).",
cmd,
state_individual,
threshold_overall,
)
# DDoS threshold exceeded
return True
# This request will be answered
return False
class c_GetCommandResponder(cmdrsp.GetCommandResponder, conpot_extension):
def __init__(self, snmpEngine, snmpContext, databus_mediator, host, port):
self.databus_mediator = databus_mediator
self.tarpit = "0;0"
self.threshold = "0;0"
self.host = host
self.port = port
cmdrsp.GetCommandResponder.__init__(self, snmpEngine, snmpContext)
conpot_extension.__init__(self)
def handleMgmtOperation(self, snmpEngine, stateReference, contextName, PDU, acInfo):
(acFun, acCtx) = acInfo
# rfc1905: 4.2.1.1
mgmtFun = self.snmpContext.getMibInstrum(contextName).readVars
varBinds = v2c.apiPDU.getVarBinds(PDU)
addr, snmp_version = self._getStateInfo(snmpEngine, stateReference)
evasion_state = self.databus_mediator.update_evasion_table(addr)
if self.check_evasive(
evasion_state, self.threshold, addr, str(snmp_version) + " Get"
):
return None
rspVarBinds = None
try:
# generate response
rspVarBinds = mgmtFun(v2c.apiPDU.getVarBinds(PDU), (acFun, acCtx))
# determine the correct response class and update the dynamic value table
reference_class = rspVarBinds[0][1].__class__.__name__
# reference_value = rspVarBinds[0][1]
response = self.databus_mediator.get_response(
reference_class, tuple(rspVarBinds[0][0])
)
if response:
rspModBinds = [(tuple(rspVarBinds[0][0]), response)]
rspVarBinds = rspModBinds
finally:
sock = snmpEngine.transportDispatcher.socket
self.log(snmp_version, "Get", addr, varBinds, rspVarBinds, sock)
# apply tarpit delay
if self.tarpit != 0:
self.do_tarpit(self.tarpit)
# send response
self.sendRsp(snmpEngine, stateReference, 0, 0, rspVarBinds)
self.releaseStateInformation(stateReference)
class c_NextCommandResponder(cmdrsp.NextCommandResponder, conpot_extension):
def __init__(self, snmpEngine, snmpContext, databus_mediator, host, port):
self.databus_mediator = databus_mediator
self.tarpit = "0;0"
self.threshold = "0;0"
self.host = host
self.port = port
cmdrsp.NextCommandResponder.__init__(self, snmpEngine, snmpContext)
conpot_extension.__init__(self)
def handleMgmtOperation(self, snmpEngine, stateReference, contextName, PDU, acInfo):
(acFun, acCtx) = acInfo
# rfc1905: 4.2.2.1
mgmtFun = self.snmpContext.getMibInstrum(contextName).readNextVars
varBinds = v2c.apiPDU.getVarBinds(PDU)
addr, snmp_version = self._getStateInfo(snmpEngine, stateReference)
evasion_state = self.databus_mediator.update_evasion_table(addr)
if self.check_evasive(
evasion_state, self.threshold, addr, str(snmp_version) + " GetNext"
):
return None
rspVarBinds = None
try:
while 1:
rspVarBinds = mgmtFun(varBinds, (acFun, acCtx))
# determine the correct response class and update the dynamic value table
reference_class = rspVarBinds[0][1].__class__.__name__
# reference_value = rspVarBinds[0][1]
response = self.databus_mediator.get_response(
reference_class, tuple(rspVarBinds[0][0])
)
if response:
rspModBinds = [(tuple(rspVarBinds[0][0]), response)]
rspVarBinds = rspModBinds
# apply tarpit delay
if self.tarpit != 0:
self.do_tarpit(self.tarpit)
# send response
try:
self.sendRsp(snmpEngine, stateReference, 0, 0, rspVarBinds)
except error.StatusInformation:
idx = sys.exc_info()[1]["idx"]
varBinds[idx] = (rspVarBinds[idx][0], varBinds[idx][1])
else:
break
finally:
sock = snmpEngine.transportDispatcher.socket
self.log(snmp_version, "GetNext", addr, varBinds, rspVarBinds, sock)
self.releaseStateInformation(stateReference)
class c_BulkCommandResponder(cmdrsp.BulkCommandResponder, conpot_extension):
def __init__(self, snmpEngine, snmpContext, databus_mediator, host, port):
self.databus_mediator = databus_mediator
self.tarpit = "0;0"
self.threshold = "0;0"
self.host = host
self.port = port
cmdrsp.BulkCommandResponder.__init__(self, snmpEngine, snmpContext)
conpot_extension.__init__(self)
def handleMgmtOperation(self, snmpEngine, stateReference, contextName, PDU, acInfo):
(acFun, acCtx) = acInfo
nonRepeaters = v2c.apiBulkPDU.getNonRepeaters(PDU)
if nonRepeaters < 0:
nonRepeaters = 0
maxRepetitions = v2c.apiBulkPDU.getMaxRepetitions(PDU)
if maxRepetitions < 0:
maxRepetitions = 0
reqVarBinds = v2c.apiPDU.getVarBinds(PDU)
addr, snmp_version = self._getStateInfo(snmpEngine, stateReference)
evasion_state = self.databus_mediator.update_evasion_table(addr)
if self.check_evasive(
evasion_state, self.threshold, addr, str(snmp_version) + " Bulk"
):
return None
raise Exception("This class is not converted to new architecture")
try:
N = min(int(nonRepeaters), len(reqVarBinds))
M = int(maxRepetitions)
R = max(len(reqVarBinds) - N, 0)
if R:
M = min(M, self.maxVarBinds / R)
debug.logger & debug.flagApp and debug.logger(
"handleMgmtOperation: N %d, M %d, R %d" % (N, M, R)
)
mgmtFun = self.snmpContext.getMibInstrum(contextName).readNextVars
if N:
rspVarBinds = mgmtFun(reqVarBinds[:N], (acFun, acCtx))
else:
rspVarBinds = []
varBinds = reqVarBinds[-R:]
while M and R:
rspVarBinds.extend(mgmtFun(varBinds, (acFun, acCtx)))
varBinds = rspVarBinds[-R:]
M = M - 1
finally:
sock = snmpEngine.transportDispatcher.socket
self.log(snmp_version, "Bulk", addr, varBinds, rspVarBinds, sock)
# apply tarpit delay
if self.tarpit != 0:
self.do_tarpit(self.tarpit)
# send response
if len(rspVarBinds):
self.sendRsp(snmpEngine, stateReference, 0, 0, rspVarBinds)
self.releaseStateInformation(stateReference)
else:
raise pysnmp.smi.error.SmiError()
class c_SetCommandResponder(cmdrsp.SetCommandResponder, conpot_extension):
def __init__(self, snmpEngine, snmpContext, databus_mediator, host, port):
self.databus_mediator = databus_mediator
self.tarpit = "0;0"
self.threshold = "0;0"
self.host = host
self.port = port
conpot_extension.__init__(self)
cmdrsp.SetCommandResponder.__init__(self, snmpEngine, snmpContext)
def handleMgmtOperation(self, snmpEngine, stateReference, contextName, PDU, acInfo):
(acFun, acCtx) = acInfo
mgmtFun = self.snmpContext.getMibInstrum(contextName).writeVars
varBinds = v2c.apiPDU.getVarBinds(PDU)
addr, snmp_version = self._getStateInfo(snmpEngine, stateReference)
evasion_state = self.databus_mediator.update_evasion_table(addr)
if self.check_evasive(
evasion_state, self.threshold, addr, str(snmp_version) + " Set"
):
return None
# rfc1905: 4.2.5.1-13
rspVarBinds = None
# apply tarpit delay
if self.tarpit != 0:
self.do_tarpit(self.tarpit)
try:
rspVarBinds = mgmtFun(v2c.apiPDU.getVarBinds(PDU), (acFun, acCtx))
# generate response
self.sendRsp(snmpEngine, stateReference, 0, 0, rspVarBinds)
self.releaseStateInformation(stateReference)
oid = tuple(rspVarBinds[0][0])
self.databus_mediator.set_value(oid, rspVarBinds[0][1])
except (
pysnmp.smi.error.NoSuchObjectError,
pysnmp.smi.error.NoSuchInstanceError,
):
e = pysnmp.smi.error.NotWritableError()
e.update(sys.exc_info()[1])
raise e
finally:
sock = snmpEngine.transportDispatcher.socket
self.log(snmp_version, "Set", addr, varBinds, rspVarBinds, sock)
| 12,612 | Python | .py | 272 | 34.959559 | 109 | 0.606325 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,501 | databus.py | mushorg_conpot/conpot/core/databus.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import inspect
# this is needed because we use it in the xml.
import random
import gevent
import gevent.event
from lxml import etree
logger = logging.getLogger(__name__)
class Databus(object):
def __init__(self):
self._data = {}
self._observer_map = {}
self.initialized = gevent.event.Event()
# the idea here is that we can store both values and functions in the key value store
# functions could be used if a profile wants to simulate a sensor, or the function
# could interface with a real sensor
def get_value(self, key):
logger.debug("DataBus: Get value from key: [%s]", key)
assert key in self._data
item = self._data[key]
if getattr(item, "get_value", None):
# this could potentially generate a context switch, but as long the called method
# does not "callback" the databus we should be fine
value = item.get_value()
logger.debug("(K, V): (%s, %s)" % (key, value))
return value
elif hasattr(item, "__call__"):
return item()
else:
# guaranteed to not generate context switch
logger.debug("(K, V): (%s, %s)" % (key, item))
return item
def set_value(self, key, value):
logger.debug("DataBus: Storing key: [%s] value: [%s]", key, value)
self._data[key] = value
# notify observers
if key in self._observer_map:
gevent.spawn(self.notify_observers, key)
def notify_observers(self, key):
for cb in self._observer_map[key]:
cb(key)
def observe_value(self, key, callback):
assert hasattr(callback, "__call__")
assert len(
inspect.getfullargspec(callback)[0]
) # depreciated in py3.5, un-depreciated in py3.6
if key not in self._observer_map:
self._observer_map[key] = []
self._observer_map[key].append(callback)
def initialize(self, config_file):
self.reset()
assert self.initialized.isSet() is False
logger.debug("Initializing databus using %s.", config_file)
dom = etree.parse(config_file)
entries = dom.xpath("//core/databus/key_value_mappings/*")
for entry in entries:
key = entry.attrib["name"]
value = entry.xpath("./value/text()")[0].strip()
value_type = str(entry.xpath("./value/@type")[0])
assert key not in self._data
logging.debug("Initializing %s with %s as a %s.", key, value, value_type)
if value_type == "value":
self.set_value(key, eval(value))
elif value_type == "function":
namespace, _classname = value.rsplit(".", 1)
params = entry.xpath("./value/@param")
module = __import__(namespace, fromlist=[_classname])
_class = getattr(module, _classname)
if len(params) > 0:
# eval param to list
params = eval(params[0])
self.set_value(key, _class(*(tuple(params))))
else:
self.set_value(key, _class())
else:
raise Exception("Unknown value type: {0}".format(value_type))
self.initialized.set()
def reset(self):
logger.debug("Resetting databus.")
# if the class has a stop method call it.
for value in list(self._data.values()):
if getattr(value, "stop", None):
value.stop()
self._data.clear()
self._observer_map.clear()
self.initialized.clear()
| 4,440 | Python | .py | 102 | 34.696078 | 93 | 0.611381 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,502 | virtual_fs.py | mushorg_conpot/conpot/core/virtual_fs.py | # Copyright (C) 2018 Abhinav Saxena <xandfury@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import os
import sys
import fs
import conpot
from fs import open_fs, subfs
from conpot.core.filesystem import AbstractFS, SubAbstractFS
logger = logging.getLogger(__name__)
class VirtualFS(object):
"""
Conpot's virtual file system. Based on Pyfilesystem2, it would allow us to have
arbitrary file uploads while sand boxing them for later analysis. This is how it should look like:
[_conpot_vfs]
|
|-- data_fs (persistent)
| |-- ftp/uploads
| `-- misc.
|
`-- protocol_fs (temporary, refreshed at startup)
|-- common
|-- telnet
|-- http
|-- snmp
`-- ftp etc.
:param data_fs_path: Path for storing data_fs. A dictionary with attribute name _protocol_vfs stores all the
fs folders made by all the individual protocols.
:type data_fs_path: fs.open_fs
"""
def __init__(self, data_fs_path=None):
self._conpot_vfs = (
dict()
) # dictionary to keep all the protocol vfs instances, maintain easy access for
# individual mounted protocols with paths
if data_fs_path is None:
try:
self.data_fs = open_fs(
os.path.join(
"/".join(conpot.__file__.split("/")[:-1]),
"tests",
"data",
"data_temp_fs",
)
)
except fs.errors.FSError:
logger.exception(
"Unable to create persistent storage for Conpot. Exiting"
)
sys.exit(3)
else:
try:
assert data_fs_path and isinstance(data_fs_path, str)
self.data_fs = open_fs(
data_fs_path
) # Specify the place where you would place the uploads
except AssertionError:
logger.exception(
"Incorrect FS url specified. Please check documentation for more details."
)
sys.exit(3)
except fs.errors.CreateFailed:
logger.exception("Unexpected error occurred while creating Conpot FS.")
sys.exit(3)
self.protocol_fs = None
def initialize_vfs(self, fs_path=None, data_fs_path=None, temp_dir=None):
if data_fs_path is not None:
logger.info(
"Opening path {} for persistent storage of files.".format(data_fs_path)
)
self.__init__(data_fs_path=data_fs_path)
if fs_path is None:
fs_path = "tar://" + os.path.join(
"/".join(conpot.__file__.split("/")[:-1]), "data.tar"
)
logger.warning("Using default FS path. {}".format(fs_path))
self.protocol_fs = AbstractFS(src_path=fs_path, temp_dir=temp_dir)
def add_protocol(
self,
protocol_name: str,
data_fs_subdir: str,
vfs_dst_path: str,
src_path=None,
owner_uid=0,
group_gid=0,
perms=0o755,
) -> (SubAbstractFS, subfs.SubFS):
"""
Method that would be used by protocols to initialize vfs. May be called by each protocol individually. This
creates a chroot jail sub file system env which makes easier handling. It also creates a data_fs sub file system
for managing protocol specific uploads.
:param protocol_name: name of the protocol for which VFS is being created.
:param data_fs_subdir: sub-folder name within data_fs that would be storing the uploads for later analysis
:param vfs_dst_path: protocol specific sub-folder path in the fs.
:param src_path: Source from where the files are to copied.
:param owner_uid: UID of a registered user. This is the default owner in the sub file system
:param group_gid: GID of a existing group.
:param perms: Default permissions of the sub file system.
:return: fs object
**Note:** The owner_uid and group_gid must be already registered with the fs. Otherwise an exception
would be raised.
"""
assert isinstance(protocol_name, str) and protocol_name
assert isinstance(data_fs_subdir, str) and data_fs_subdir
assert isinstance(vfs_dst_path, str) and vfs_dst_path
if src_path:
assert isinstance(src_path, str)
if not os.path.isdir(src_path):
logger.error("Protocol directory is not a valid directory.")
sys.exit(3)
logger.info(
"Creating persistent data store for protocol: {}".format(protocol_name)
)
# create a sub directory for persistent storage.
if self.data_fs.isdir(data_fs_subdir):
sub_data_fs = self.data_fs.opendir(path=data_fs_subdir)
else:
sub_data_fs = self.data_fs.makedir(path=data_fs_subdir)
if protocol_name not in self._conpot_vfs.keys():
sub_protocol_fs = self.protocol_fs.mount_fs(
vfs_dst_path, src_path, owner_uid, group_gid, perms
)
self._conpot_vfs[protocol_name] = (sub_protocol_fs, sub_data_fs)
return self._conpot_vfs[protocol_name]
def close(self, force=False):
"""
Close the filesystem properly. Better and more graceful than __del__
:param force: Force close. This would close the AbstractFS instance - without close closing data_fs File Systems
"""
if self._conpot_vfs and (not force):
for _fs in self._conpot_vfs.keys():
try:
# First let us close all the data_fs instances.
self._conpot_vfs[_fs][1].close()
# Let us close the protocol_fs sub dirs for that protocol
self._conpot_vfs[_fs][0].close()
except fs.errors.FSError:
logger.exception("Error occurred while closing FS {}".format(_fs))
del self._conpot_vfs[_fs][0]
self.protocol_fs.close()
self.protocol_fs.clean()
| 7,179 | Python | .py | 155 | 34.341935 | 120 | 0.582264 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,503 | filesystem.py | mushorg_conpot/conpot/core/filesystem.py | # Copyright (C) 2018 Abhinav Saxena <xandfury@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import time
import stat
import tempfile
import logging
import contextlib
import shutil
import fs
from stat import filemode
from datetime import datetime
from os import F_OK, R_OK, W_OK
from typing import Optional, Union, Text, Any, List
from fs import open_fs, mirror, errors, subfs, base
from fs.mode import Mode
from fs.wrapfs import WrapFS
from fs.permissions import Permissions
from fs.osfs import Info
from types import FunctionType
from conpot.core.fs_utils import (
_custom_conpot_file,
SubAbstractFS,
copy_files,
FilesystemError,
)
from conpot.core.fs_utils import FSOperationNotPermitted
logger = logging.getLogger(__name__)
months_map = {
1: "Jan",
2: "Feb",
3: "Mar",
4: "Apr",
5: "May",
6: "Jun",
7: "Jul",
8: "Aug",
9: "Sep",
10: "Oct",
11: "Nov",
12: "Dec",
}
# ---------------------------------------------------
# Regarding Permissions:
# ---------------------------------------------------
# For directories:
# - Read bit = You can read the names on the list.
# - Write bit = You can {add,rename,delete} names on the list IF the execute bit is set too.
# - Execute bit = You can make this directory your working directory.
# ---------------------------------------------------
# For files:
# - Read bit = Grants the capability to read, i.e., view the contents of the file.
# - Write bit = Grants the capability to modify, or remove the content of the file.
# - Execute bit = User with execute permissions can run a file as a program.
# ---------------------------------------------------
class AbstractFS(WrapFS):
"""
AbstractFS distinguishes between "real" filesystem paths and "virtual" ftp paths emulating a UNIX chroot jail
where the user can not escape its home directory (example: real "/home/user" path will be seen as "/" by the client)
This class exposes common fs wrappers around all os.* calls involving operations against the filesystem like
creating files or removing directories (such as listdir etc.)
*Implementation Note:* When doing I/O - Always with the check_access and set_access context managers for safe
operations.
"""
def __init__(
self,
src_path: str,
create_mode: int = 0o777, # Default file system permissions.
temp_dir: Union[str, None] = None,
identifier: Optional[str] = "__conpot__",
auto_clean: Optional[bool] = True,
ignore_clean_errors: Optional[bool] = True,
) -> None:
self._cwd = self.getcwd() # keep track of the current working directory
self._cache = {} # Storing all cache of the file system
self.identifier = identifier.replace("/", "-")
self._auto_clean = auto_clean
self._ignore_clean_errors = ignore_clean_errors
self.temp_dir = temp_dir
self._cleaned = False
self.built_cache = False
# Create our file system
self._temp_dir = tempfile.mkdtemp(
prefix=(self.identifier or "ConpotTempFS"), dir=self.temp_dir
)
# open various filesystems that would be used by Conpot
try:
self.vfs = open_fs(self._temp_dir)
super(AbstractFS, self).__init__(self.vfs)
except fs.errors.FSError as fs_err:
logger.exception("File System exception occurred! {}".format(fs_err))
# Copy all files from src_path into our file system
logger.info(
"Initializing Virtual File System at {}. Source specified : {}\n Please wait while the "
"system copies all specified files".format(self._temp_dir, src_path)
)
self.utime = self.settimes # utime maps to settimes
# keep records related to users and groups
self.default_uid = 0
self.default_gid = 0
self.default_perms = create_mode
self._users = {0: {"user": "root"}}
self._grps = {0: {"group": "root"}}
# simple dictionary linking users to groups ->
self._user_grps = {0: {0}} # --> gid: set(uids)
self._initialize_fs(src_path=src_path)
# fixme: kind of hack-ish. Find the correct way of doing this.
self._wrap_fs._meta["supports_rename"] = False
def norm_path(self, path):
path = "/" if path == "." else path
try:
_path = (
self.validatepath(self._cwd + path)
if self._cwd not in path
else self.validatepath(path)
)
return _path
except fs.errors.FSError:
logger.debug("Could not validate path: {}".format(path))
raise FilesystemError("Could not validate path: {}".format(path))
def _initialize_fs(self, src_path: str) -> None:
"""
Copies all data into Conpot's created fs folder and builds up the cache.
:param src_path: FS URLS
"""
# copy all contents from the source path the filesystem.
src_fs = open_fs(src_path)
logger.debug(
"Building up file system: copying contents from the source path {}".format(
src_path
)
)
with src_fs.lock():
mirror.mirror(src_fs=src_fs, dst_fs=self.vfs)
self._cache.update(
{
path: info
for path, info in self.walk.info(
namespaces=["basic", "access", "details", "stat"]
)
}
)
self._cache["/"] = self._wrap_fs.getinfo(
"/", namespaces=["basic", "access", "details", "stat", "link"]
)
self.chown("/", self.default_uid, self.default_gid, recursive=True)
self.chmod("/", self.default_perms, recursive=True)
self.built_cache = (
True # FS has been built. Now all info must be accessed from cache.
)
src_fs.close()
del src_fs
def __str__(self):
return "<Conpot AbstractFS '{}'>".format(self._temp_dir)
__repr__ = __str__
@property
def users(self):
return self._users
@property
def groups(self):
return self._grps
@property
def user_groups(self):
"""gid: {set of uid of users.}"""
return self._user_grps
def getmeta(self, namespace="standard"):
self.check()
meta = self.delegate_fs().getmeta(namespace=namespace)
meta["supports_rename"] = False
return meta
# ------- context managers for easier handling of fs
@contextlib.contextmanager
def check_access(self, path=None, user=None, perms=None):
"""
Checks whether the current user has permissions to do a specific operation. Raises FSOperationNotPermitted
exception in case permissions are not satisfied.
Handy utility to check whether the user with uid provided has permissions specified. Examples:
>>> import conpot.core as conpot_core
>>> _vfs, _ = conpot_core.get_vfs('ftp')
>>> with _vfs.check_access(path='/', user=13, perms='rwx'):
>>> _vfs.listdir('/')
>>> with _vfs.check_access(path='/', user=45, perms='w'):
>>> with _vfs.open('/test', mode='wb') as _file:
>>> _file.write(b'Hello World!')
"""
if not self.access(path=path, name_or_id=user, required_perms=perms):
raise FSOperationNotPermitted(
"User {} does not have required permission to file/path: {}".format(
user, path
)
)
else:
logger.debug(
"File/Dir {} has the requested params : {}".format(path, (user, perms))
)
self.setinfo(path, {})
yield
if self.vfs.isfile(path):
logger.debug("yield file: {} after requested access.".format(path))
elif self.vfs.isdir(path):
logger.debug("yield dir: {} after requested access.".format(path))
else:
logger.debug(
"yield unknown type: {} after requested access.".format(path)
)
# -----------------------------------------------------------
# Custom "setter" methods overwriting behaviour FS library methods
# We need to update our cache in such cases.
# -----------------------------------------------------------
def setinfo(self, path, info):
"""
Higher level function to directly change values in the file system. Dictionary specified here changes cache
values.
:param path: path of the file that is to be changed
:param info: Raw Info object. Please check pyfilesystem2's docs for more info.
"""
assert path and isinstance(path, str)
path = self.norm_path(path)
if "lstat" not in info:
try:
if "details" in info:
details = info["details"]
if "accessed" in details or "modified" in details:
return self._wrap_fs.setinfo(path, info)
finally:
try:
assert self._cache[path]
except (AssertionError, KeyError):
# This is the first time we have seen this file. Let us create this entry.
logger.debug("Creating cache for file/directory : {}".format(path))
self._cache[path] = self._wrap_fs.getinfo(
path, namespaces=["basic", "access", "details", "stat", "link"]
)
# update the 'accessed' and 'modified' time.
self.settimes(path)
if "access" in info:
access = info["access"]
if "permissions" in access:
self._cache[path].raw["access"]["permissions"] = access[
"permissions"
]
self._cache[path].raw["details"]["metadata_changed"] = (
fs.time.datetime_to_epoch(datetime.now())
)
if "user" in access or "uid" in access:
try:
if "user" in access or (
"user" in access and "uid" in access
):
self._cache[path].raw["access"]["user"] = access["user"]
[_uid] = [
key
for key, value in self._users.items()
if value == {"user": access["user"]}
]
self._cache[path].raw["access"]["uid"] = _uid
self._cache[path].raw["details"]["metadata_changed"] = (
fs.time.datetime_to_epoch(datetime.now())
)
else:
# Must be 'uid' that is available.
_uid = int(access["uid"]) # type: ignore
self._cache[path].raw["access"]["uid"] = _uid
self._cache[path].raw["access"]["user"] = self._users[
_uid
]["user"]
self._cache[path].raw["details"]["metadata_changed"] = (
fs.time.datetime_to_epoch(datetime.now())
)
except (TypeError, AssertionError, KeyError):
raise
if "group" in access or "gid" in access:
try:
if "group" in access or (
"group" in access and "gid" in access
):
self._cache[path].raw["access"]["group"] = access[
"group"
]
[_gid] = [
key
for key, value in self._grps.items()
if value == {"group": access["group"]}
]
self._cache[path].raw["access"]["gid"] = _gid
self._cache[path].raw["details"]["metadata_changed"] = (
fs.time.datetime_to_epoch(datetime.now())
)
else:
# Must be 'gid' that is available.
_gid = int(access["gid"]) # type: ignore
self._cache[path].raw["access"]["gid"] = _gid
self._cache[path].raw["access"]["group"] = self._grps[
_gid
]["group"]
self._cache[path].raw["details"]["metadata_changed"] = (
fs.time.datetime_to_epoch(datetime.now())
)
except (TypeError, AssertionError, KeyError):
raise
else:
raise FilesystemError("lstat is not currently supported!")
def makedir(
self,
path, # type: Text
permissions=None, # type: Optional[int]
recreate=True, # type: bool
):
# make a directory in the file system. Also, update the cache about the directory.
_path = self.norm_path(path)
# we always want to overwrite a directory if it already exists.
recreate = True if recreate is False else recreate
perms = permissions
fs_err = None
try:
super(AbstractFS, self).makedir(_path, permissions=None, recreate=recreate)
except fs.errors.FSError as err:
fs_err = err
finally:
if not fs_err:
dir_perms = perms if perms else self.default_perms
dir_cache = {
"access": {
"permissions": Permissions.create(dir_perms),
"uid": self.default_uid,
"gid": self.default_gid,
}
}
logger.debug("Created directory {}".format(_path))
self.setinfo(_path, info=dir_cache)
else:
raise fs_err
def removedir(self, path, rf=True):
"""Remove a directory from the file system.
:param path: directory path
:param rf: remove directory recursively and forcefully. This removes directory even if there is any data
in it. If set to False, an exception would be raised
"""
# removing a directory and finally block would clear the local cache.
_path = self.norm_path(path)
fs_err = None
try:
super(AbstractFS, self).removedir(_path)
except fs.errors.FSError as err:
fs_err = err
finally:
if not fs_err:
rm_dir = self._cache.pop(_path)
logger.debug("Removed directory {}".format(rm_dir))
else:
if isinstance(fs_err, fs.errors.DirectoryNotEmpty) and rf is True:
# delete the contents for the directory recursively
self._wrap_fs.removetree(_path)
# delete the all the _cache files in the directory.
_files = [i for i in self._cache.keys() if _path in i]
for _f in _files:
file = self._cache.pop(_f)
logger.debug("Removing file : {}".format(repr(file)))
else:
raise fs_err
def remove(self, path):
"""Remove a file from the file system."""
_path = self.norm_path(path)
fs_err = None
try:
super(AbstractFS, self).remove(_path)
except fs.errors.FSError as err:
fs_err = err
finally:
if not fs_err:
rm_file = self._cache.pop(_path)
logger.debug("Removed file {}".format(rm_file))
else:
raise fs_err
def openbin(self, path, mode="r", buffering=-1, **options):
"""
Open a file in the ConpotFS in binary mode.
"""
logger.debug("Opening file {} with mode {}".format(path, mode))
_path = self.norm_path(path)
_bin_mode = Mode(mode).to_platform_bin()
_bin_mode = _bin_mode.replace("t", "") if "t" in _bin_mode else _bin_mode
_parent_fs = self.delegate_fs()
self.check()
binary_file = _custom_conpot_file(
file_system=self,
parent_fs=_parent_fs,
path=_path,
mode=_bin_mode,
encoding=None,
)
return binary_file
def open(
self,
path, # type: Text
mode="r", # type: Text
buffering=-1, # type: int
encoding=None, # type: Optional[Text]
newline="", # type: Text
line_buffering=False, # type: bool
**options # type: Any
):
_open_mode = Mode(mode)
base.validate_open_mode(mode)
self.check()
_path = self.norm_path(path)
_parent_fs = self.delegate_fs()
_encoding = encoding or "utf-8"
file = _custom_conpot_file(
file_system=self,
parent_fs=_parent_fs,
path=_path,
mode=_open_mode.to_platform(),
buffering=buffering,
encoding=encoding,
newline=newline,
line_buffering=line_buffering,
)
return file
def setbinfile(self, path, file):
with self.openbin(path, "wb") as dst_file:
copy_files(file, dst_file)
self.setinfo(path, {})
def move(self, src_path, dst_path, overwrite=False):
if self.getinfo(src_path).is_dir:
raise fs.errors.FileExpected(src_path)
with self.openbin(src_path, "rb") as read_file:
with self.openbin(dst_path, "wb") as dst_file:
copy_files(read_file, dst_file)
self.setinfo(src_path, {})
self.setinfo(dst_path, {})
self.remove(src_path)
def copy(self, src_path, dst_path, overwrite=False):
if self.getinfo(src_path).is_dir:
raise fs.errors.FileExpected(src_path)
with self.openbin(src_path, "rb") as read_file:
with self.openbin(dst_path, "wb") as dst_file:
copy_files(read_file, dst_file)
self.setinfo(src_path, {})
self.setinfo(dst_path, {})
# -----------------------------------------------------------
# Custom "getter" methods overwriting behaviour FS library methods
# Data is retrieved from the cached file-system.
# -----------------------------------------------------------
def opendir(self, path, factory=SubAbstractFS):
return super(AbstractFS, self).opendir(path, factory=factory)
def settimes(self, path, accessed=None, modified=None):
if accessed or modified:
self.delegate_fs().settimes(path, accessed, modified)
self._cache[path].raw["details"]["accessed"] = fs.time.datetime_to_epoch(
super(AbstractFS, self).getinfo(path, namespaces=["details"]).accessed
)
self._cache[path].raw["details"]["modified"] = fs.time.datetime_to_epoch(
super(AbstractFS, self).getinfo(path, namespaces=["details"]).modified
)
def getinfo(self, path: str, get_actual: bool = False, namespaces=None):
if get_actual or (not self.built_cache):
return self._wrap_fs.getinfo(path, namespaces)
else:
try:
# ensure that the path starts with '/'
if path[0] != "/":
path = "/" + path
info = {"basic": self._cache[path].raw["basic"]}
if namespaces is not None:
if "details" in namespaces:
info["details"] = self._cache[path].raw["details"]
if "stat" in namespaces:
stat_cache = {
"st_uid": self._cache[path].raw["access"]["uid"],
"st_gid": self._cache[path].raw["access"]["gid"],
"st_atime": self._cache[path].raw["details"]["accessed"],
"st_mtime": self._cache[path].raw["details"]["modified"],
# TODO: Fix these to appropriate values
"st_mtime_ns": None,
"st_ctime_ns": None,
"st_ctime": None,
}
if isinstance(
self._cache[path].raw["access"]["permissions"], list
):
stat_cache["st_mode"] = Permissions(
self._cache[path].raw["access"]["permissions"]
).mode
else:
stat_cache["st_mode"] = (
self._cache[path].raw["access"]["permissions"].mode
)
self._cache[path].raw["stat"].update(stat_cache)
info["stat"] = self._cache[path].raw["stat"]
# Note that we won't be keeping tabs on 'lstat'
if "lstat" in namespaces:
info["lstat"] = self._cache[path].raw["lstat"]
info["lstat"] = self._cache[path].raw["lstat"]
if "link" in namespaces:
info["link"] = self._cache[path].raw["link"]
if "access" in namespaces:
info["access"] = self._cache[path].raw["access"]
return Info(info)
except KeyError:
raise FilesystemError
def listdir(self, path):
logger.debug("Listing contents from directory: {}".format(self.norm_path(path)))
self.setinfo(self.norm_path(path), {})
return super(AbstractFS, self).listdir(self.norm_path(path))
def getfile(self, path, file, chunk_size=None, **options):
# check where there exists a copy in the cache
if (
self.exists(self.norm_path(path))
and self.norm_path(path) in self._cache.keys()
):
self.setinfo(self.norm_path(path), {})
return self._wrap_fs.getfile(
self.norm_path(path), file, chunk_size, **options
)
else:
raise FilesystemError("Can't get. File does not exist!")
def __del__(self):
self._wrap_fs.close()
if self._auto_clean:
self.clean()
# -----------------------------------------------------------
# Methods defined for our needs.
# -----------------------------------------------------------
def create_jail(self, path):
"""Returns chroot jail sub system for a path"""
logger.debug("Creating jail for path: {}".format(path))
return self.opendir(path)
def getcwd(self):
return "/"
def take_snapshot(self):
"""Take snapshot of entire filesystem.
:rtype: dict
"""
return {
"date-time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"snapshot-data": self._cache,
}
def register_user(self, name: str, uid: int) -> None:
"""Store all user related data for the file system."""
assert name and isinstance(name, str)
self._users[uid] = {"user": name}
# let us check for duplicate usernames/group names
if len(set([v["user"] for k, v in self._users.items()])) != len(
self._users.keys()
):
_uname = self._users.pop(uid)["user"]
raise FilesystemError(
"Can't add users with duplicate uname: {}.".format(_uname)
)
def create_group(self, name: str, gid: int) -> None:
"""
Store all group related data for the file system.
:param name: Name of the group
:param gid: gid of the group
"""
assert name and isinstance(name, str)
self._grps[gid] = {"group": name}
if len(set([v["group"] for k, v in self._grps.items()])) != len(
self._grps.keys()
):
_gname = self._grps.pop(gid)
raise FilesystemError(
"Can't create groups with duplicate names: {}.".format(_gname)
)
def add_users_to_group(self, gid: int, uids: List) -> None:
"""Add list of users to an existing group
:param gid: Group id of the group.
:param uids: List of registers users that belong to this group
"""
try:
assert gid in self._grps.keys()
for i in uids:
if i not in self._users.keys():
raise AssertionError
_uids = set(uids)
if gid in self._user_grps.keys():
self._user_grps[gid] += _uids
else:
self._user_grps[gid] = _uids
except AssertionError:
raise FilesystemError(
"uid/gid does not exist in the file system. Please register it via create_group/"
"register_user method."
)
def chown(
self, fs_path: str, uid: int, gid: int, recursive: Optional[bool] = False
) -> None:
"""Change the owner of a specified file. Wrapper for os.chown
:param fs_path: path or directory in the VFS where chown would be executed.
:param uid: The `uid` of the user. **User must be a registered user on the filesystem or an exception would be
thrown.
:param gid: The `gid` of the group **Group must be a registered group on the filesystem or an exception would be
thrown.
:param recursive: If the given path is directory, then setting the recursive option to true would walk down the
tree and recursive change permissions in the cache.
** `fs_path` needs to be the absolute path w.r.t to the vfs. If you are in a sub file system, please use
`subvfs.getcwd()` to get the current directory. **
"""
path = self.norm_path(fs_path)
try:
assert isinstance(uid, int) and isinstance(gid, int)
except AssertionError:
logger.exception("Integers expected got {} - {}".format(uid, gid))
if self.isdir(path) or self.isfile(path):
assert self._grps[gid] and self._users[uid]
chown_cache = {
"access": {
"user": self._users[uid]["user"],
"uid": self._users[uid],
"group": self._grps[gid]["group"],
"gid": self._grps[gid],
}
}
if self.isdir(path) and recursive:
if self.norm_path(path) != "/":
self.setinfo(path, chown_cache)
sub_dir = self.opendir(path)
for _path, _ in sub_dir.walk.info():
assert self._cache[self.norm_path(path + _path)]
self.setinfo(path + _path, chown_cache)
sub_dir.close()
else:
self.setinfo(path, chown_cache)
else:
# TODO: map this to the actual output of os.chown
raise FilesystemError("File not found for chown")
def clean(self):
"""Clean (delete) temporary files created by this filesystem."""
if self._cleaned:
return
try:
logger.info(
"Shutting down File System. Cleaning directories at {}".format(
self._temp_dir
)
)
shutil.rmtree(self._temp_dir)
except Exception as error:
if not self._ignore_clean_errors:
raise errors.OperationFailed(
msg="failed to remove temporary directory", exc=error
)
self._cleaned = True
@property
def root(self):
"""The root directory - where the filesystem is stored"""
return self._temp_dir
def stat(self, path):
"""Perform a stat() system call on the given path.
:param path: (str) must be protocol relative path
"""
assert path, isinstance(path, str)
self.setinfo(self.norm_path(path), {})
return self.getinfo(path, namespaces=["stat"]).raw["stat"]
def readlink(self, path):
"""Perform a readlink() system call. Return a string representing the path to which a symbolic link points.
:param path: (str) must be protocol relative path
"""
assert path, isinstance(path, str)
self.setinfo(self.norm_path(path), {})
return self.getinfo(path, get_actual=True, namespaces=["link"]).raw["link"][
"target"
]
def format_list(self, basedir, listing):
"""
Return an iterator object that yields the entries of given directory emulating the "/bin/ls -lA" UNIX command
output.
This is how output should appear:
-rw-rw-rw- 1 owner group 7045120 Sep 02 3:47 music.mp3
drwxrwxrwx 1 owner group 0 Aug 31 18:50 e-books
-rw-rw-rw- 1 owner group 380 Sep 02 3:40 module.py
:param basedir: (str) must be protocol relative path
:param listing: (list) list of files to needed for output.
"""
assert isinstance(basedir, str), basedir
basedir += "/" if basedir[-1:] != "/" else basedir
now = time.time()
for basename in listing:
file = self.norm_path(
basedir + basename
) # for e.g. basedir = '/' and basename = test.png.
# So file is '/test.png'
try:
st = self.stat(file)
except (fs.errors.FSError, FilesystemError):
raise
permission = filemode(Permissions.create(st["st_mode"]).mode)
if self.isdir(file):
permission = permission.replace("?", "d")
elif self.isfile(file):
permission = permission.replace("?", "-")
elif self.islink(file):
permission = permission.replace("?", "l")
nlinks = st["st_nlink"]
size = st["st_size"] # file-size
uname = self.getinfo(path=file, namespaces=["access"]).user
# |-> pwd.getpwuid(st['st_uid']).pw_name would fetch the user_name of the actual owner of these files.
gname = self.getinfo(path=file, namespaces=["access"]).group
# |-> grp.getgrgid(st['st_gid']).gr_name would fetch the user_name of the actual of these files.
mtime = time.gmtime(
fs.time.datetime_to_epoch(
self.getinfo(file, namespaces=["details"]).modified
)
)
if (now - st["st_mtime"]) > (180 * 24 * 60 * 60):
fmtstr = "%d %Y"
else:
fmtstr = "%d %H:%M"
mtimestr = "%s %s" % (
months_map[mtime.tm_mon],
time.strftime(fmtstr, mtime),
)
if (st["st_mode"] & 61440) == stat.S_IFLNK:
# if the file is a symlink, resolve it, e.g. "symlink -> realfile"
basename = basename + " -> " + self.readlink(file)
# formatting is matched with proftpd ls output
line = "%s %3s %-8s %-8s %8s %s %s\r\n" % (
permission,
nlinks,
uname,
gname,
size,
mtimestr,
basename,
)
yield line
def getmtime(self, path):
"""Return the last modified time as a number of seconds since the epoch."""
self.setinfo(self.norm_path(path), {})
return self.getinfo(path, namespaces=["details"]).modified
# FIXME: refactor to os.access. Mode is missing from the params
def access(
self, path: str, name_or_id: Union[int, str] = None, required_perms: str = None
):
"""
Returns bool w.r.t the a user/group has permissions to read/write/execute a file.
This is a wrapper around os.access. But it would accept name or id instead of of just ids.
Also it can accept required permissions in the form of strings rather than os.F_OK, os.R_OK, os.W_OK etc.
*Implementation Note*: First we would check whether the current user has the required permissions. If not,
then we check the group to which this user belongs to. Finally if the user's group also does not meet the
perms we check for other permissions.
"""
try:
_path = self.norm_path(path)
_perms = self.getinfo(_path, namespaces=["access"]).permissions
_uid = self.getinfo(_path, namespaces=["access"]).uid
_gid = self.getinfo(_path, namespaces=["access"]).gid
if isinstance(required_perms, int):
if required_perms == F_OK:
return True
elif required_perms == R_OK:
required_perms = "r"
elif required_perms == W_OK:
required_perms = "w"
# first we need to find the uid - in case username is provided instead of uid.
if isinstance(name_or_id, str):
# must be username or group name
# fetch the uid/gid of that uname/gname
[_id] = [k for k, v in self._users.items() if v == {"user": name_or_id}]
else:
_id = name_or_id
# find the gid of this user.
_grp_id = None
# FIXME: The above operation can cause incorrect results if one user belongs to more than one group.
for key, values in self._user_grps.items():
if _id in values:
_grp_id = key
if _id is not None:
if _id == _uid:
# provided id is the owner
return all([_perms.check("u_" + i) for i in list(required_perms)])
elif _grp_id and (_grp_id == _gid):
# provided id is not the owner but belongs to that grp.
# That means we would check it's group permissions.
return all([_perms.check("g_" + i) for i in list(required_perms)])
else:
# id not equal to either in uid/gid
# check other permissions
return all([_perms.check("o_" + i) for i in list(required_perms)])
except (ValueError, AssertionError, KeyError, fs.errors.FSError) as err:
logger.info("Exception has occurred while doing fs.access: {}".format(err))
logger.info("Returning False to avoid conpot crash")
return False
def get_permissions(self, path):
"""Get permissions for a particular user on a particular file/directory in 'rwxrx---' format"""
_path = self.norm_path(path)
self.setinfo(self.norm_path(path), {})
_perms = self.getinfo(_path, namespaces=["access"]).permissions
return _perms.as_str()
def chmod(self, path: str, mode: oct, recursive: bool = False) -> None:
"""Change file/directory mode.
:param path: Path to be modified.
:param mode: Operating-system mode bitfield. Must be in octal's form.
Eg: chmod with (mode=0o755) = Permissions(user='rwx', group='rx', other='rx')
:param recursive: If the path is directory, setting recursive to true would change permissions to sub folders
and contained files.
:type recursive: bool
"""
assert isinstance(mode, str) or isinstance(mode, int)
if isinstance(mode, str):
# convert mode to octal
mode = int(mode, 8)
chmod_cache_info = {"access": {"permissions": Permissions.create(mode)}}
if self.isdir(path) and recursive:
if path != "/":
self.setinfo(path, chmod_cache_info)
# create a walker
sub_dir = self.opendir(path)
for _path, _ in sub_dir.walk.info():
self.setinfo(path + _path, chmod_cache_info)
sub_dir.close()
else:
self.setinfo(path, chmod_cache_info)
def mount_fs(
self,
dst_path: str,
fs_url: str = None,
owner_uid: Optional[int] = 0,
group_gid: Optional[int] = 0,
perms: Optional[Union[Permissions, int]] = 0o755,
) -> subfs.SubFS:
"""
To be called to mount individual filesystems.
:param fs_url: Location/URL for the file system that is to be mounted.
:param dst_path: Place in the Conpot's file system where the files would be placed. This should be relative to
FS root.
:param owner_uid: The owner `user` **UID** of the directory and the sub directory. Default is root/
:param group_gid: The group 'group` to which the directory beings. Defaults to root.
:param perms: Permission UMASK
"""
path = self.norm_path(dst_path)
if self.exists(path) and self.isdir(path):
if not fs_url:
new_dir = self.create_jail(path)
else:
temp_fs = open_fs(fs_url=fs_url)
with temp_fs.lock():
new_dir = self.opendir(
self.norm_path(dst_path), factory=SubAbstractFS
)
mirror.mirror(src_fs=temp_fs, dst_fs=new_dir)
self._cache.update(
{
path: info
for path, info in self.walk.info(
namespaces=["basic", "access", "details", "stat"]
)
}
)
del temp_fs # delete the instance since no longer required
new_dir.default_uid, new_dir.default_gid = owner_uid, group_gid
new_dir.chown("/", uid=owner_uid, gid=group_gid, recursive=True)
new_dir.chmod("/", mode=perms, recursive=True)
return new_dir
else:
raise fs.errors.DirectoryExpected("{} path does not exist".format(path))
def __getattribute__(self, attr):
# Restrict access to methods that are implemented in AbstractFS class - Calling methods from base class may
# not be safe to use.
# FIXME: Need to fix these for only allow methods that are defined here.
if not WrapFS:
return
method_list = [x for x, y in WrapFS.__dict__.items() if type(y) == FunctionType]
if attr in method_list:
if attr in super(AbstractFS, self).__getattribute__(
"__dict__"
).keys() or attr not in ["match", "settext"]:
# These methods have been overwritten and are safe to use.
try:
return super(AbstractFS, self).__getattribute__(attr)
except KeyError as ke:
raise FilesystemError("Invalid Path : {}".format(ke))
else:
raise NotImplementedError(
"The method requested is not supported by Conpot's VFS"
)
else:
try:
return super(AbstractFS, self).__getattribute__(attr)
except KeyError as ke:
raise FilesystemError("Invalid Path : {}".format(ke))
| 40,960 | Python | .py | 895 | 32.606704 | 120 | 0.526286 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,504 | protocol_wrapper.py | mushorg_conpot/conpot/core/protocol_wrapper.py | # Copyright (C) 2018 Abhinav Saxena <xandfury@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from conpot.core import get_interface
from datetime import datetime
core_interface = get_interface()
def conpot_protocol(cls):
class Wrapper(object):
def __init__(self, *args, **kwargs):
self.wrapped = cls(*args, **kwargs)
self.cls = cls
if self.cls.__name__ not in "Proxy":
if self.cls not in core_interface.protocols:
core_interface.protocols[self.cls] = []
core_interface.protocols[self.cls].append(self.wrapped)
self.__class__.__name__ = self.cls.__name__
def __getattr__(self, name):
if name == "handle":
# assuming that handle function from a class is only called when a client tries to connect with an
# enabled protocol, update the last_active (last_attacked attribute)
# FIXME: No handle function in HTTPServer
core_interface.last_active = datetime.now().strftime(
"%b %d %Y - %H:%M:%S"
)
return self.wrapped.__getattribute__(name)
def __repr__(self):
return self.wrapped.__repr__()
__doc__ = cls.__doc__
__module__ = cls.__module__
return Wrapper
| 2,018 | Python | .py | 43 | 39.093023 | 114 | 0.644784 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,505 | attack_session.py | mushorg_conpot/conpot/core/attack_session.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import uuid
from datetime import datetime
logger = logging.getLogger(__name__)
# one instance per connection
class AttackSession(object):
def __init__(
self,
protocol,
source_ip,
source_port,
destination_ip,
destination_port,
log_queue,
):
self.log_queue = log_queue
self.id = uuid.uuid4()
logger.info("New %s session from %s (%s)", protocol, source_ip, self.id)
self.protocol = protocol
self.source_ip = source_ip
self.source_port = source_port
self.destination_ip = destination_ip
self.destination_port = destination_port
self.timestamp = datetime.utcnow()
self.public_ip = None
self.data = dict()
self._ended = False
def _dump_data(self, data):
return {
"id": self.id,
"remote": (self.source_ip, self.source_port),
"src_ip": self.source_ip,
"src_port": self.source_port,
"local": (self.destination_ip, self.destination_port),
"dst_ip": self.destination_ip,
"dst_port": self.destination_port,
"data_type": self.protocol,
"timestamp": self.timestamp,
"public_ip": self.public_ip,
"data": data,
}
def add_event(self, event_data):
sec_elapsed = (datetime.utcnow() - self.timestamp).total_seconds()
elapse_ms = int(sec_elapsed * 1000)
while elapse_ms in self.data:
elapse_ms += 1
self.data[elapse_ms] = event_data
# TODO: We should only log the session when it is finished
self.log_queue.put(self._dump_data(event_data))
def dump(self):
return self._dump_data(self.data)
def set_ended(self):
self._ended = True
| 2,606 | Python | .py | 69 | 30.811594 | 80 | 0.645289 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,506 | session_manager.py | mushorg_conpot/conpot/core/session_manager.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from gevent.queue import Queue
from conpot.core.attack_session import AttackSession
# one instance only
class SessionManager:
def __init__(self):
self._sessions = []
self.log_queue = Queue()
def _find_sessions(self, protocol, source_ip):
for session in self._sessions:
if session.protocol == protocol:
if session.source_ip == source_ip:
return session
return None
def get_session(
self,
protocol,
source_ip,
source_port,
destination_ip=None,
destination_port=None,
):
# around here we would inject dependencies into the attack session
attack_session = self._find_sessions(protocol, source_ip)
if not attack_session:
attack_session = AttackSession(
protocol,
source_ip,
source_port,
destination_ip,
destination_port,
self.log_queue,
)
self._sessions.append(attack_session)
return attack_session
def purge_sessions(self):
# there is no native purge/clear mechanism for gevent queues, so...
self.log_queue = Queue()
| 2,019 | Python | .py | 53 | 30.773585 | 75 | 0.661052 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,507 | __init__.py | mushorg_conpot/conpot/core/__init__.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from typing import Tuple, Union, Optional
from .databus import Databus
from .internal_interface import Interface
from .session_manager import SessionManager
from .virtual_fs import VirtualFS, AbstractFS
databus = Databus()
sessionManager = SessionManager()
virtualFS = VirtualFS()
core_interface = Interface()
# databus related --
def get_sessionManager():
return sessionManager
def get_databus():
return databus
def get_session(*args, **kwargs):
return sessionManager.get_session(*args, **kwargs)
# file-system related --
def initialize_vfs(fs_path=None, data_fs_path=None, temp_dir=None):
return virtualFS.initialize_vfs(
fs_path=fs_path, data_fs_path=data_fs_path, temp_dir=temp_dir
)
def add_protocol(
protocol_name: str,
data_fs_subdir: str,
vfs_dst_path: str,
src_path=None,
owner_uid: Optional[int] = 0,
group_gid: Optional[int] = 0,
perms: Optional[oct] = 0o755,
) -> Tuple:
return virtualFS.add_protocol(
protocol_name,
data_fs_subdir,
vfs_dst_path,
src_path,
owner_uid,
group_gid,
perms,
)
def get_vfs(protocol_name: Optional[str] = None) -> Union[AbstractFS, Tuple]:
"""
Get the File System.
:param protocol_name: Name of the protocol to be fetched
"""
if protocol_name:
return virtualFS._conpot_vfs[protocol_name]
else:
return virtualFS.protocol_fs
def close_fs():
"""Close the file system. Remove all the temp files."""
virtualFS.close()
# internal-interface related --
def get_interface():
return globals()["core_interface"]
| 2,410 | Python | .py | 70 | 30.628571 | 77 | 0.722366 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,508 | internal_interface.py | mushorg_conpot/conpot/core/internal_interface.py | # Copyright (C) 2015 Lukas Rist <glaslos@gmail.com>
#
# Rewritten by Abhinav Saxena <xandfury@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from datetime import datetime
class DotDict(dict):
def __getattr__(self, name):
return self[name]
class Network(object):
def __init__(self):
self.public_ip = None
self.hw_address = None
# create attributes to interface on the fly.
def __setattr__(self, attr, value):
object.__setattr__(self, attr, value)
# return default value in case an attribute cannot be found in the interface
def __getattr__(self, attr):
raise AttributeError("Interface.Network attribute does exist")
class Interface(object):
"""Conpot's internal interface"""
def __init__(self):
self.network = Network()
self.config = None
self.protocols = DotDict()
self.last_active = datetime.now().strftime("%b %d %Y - %H:%M:%S")
@property
def enabled(self):
return [k for k in self.protocols.keys() if self.protocols[k] is not None]
def __setattr__(self, attr, value):
object.__setattr__(self, attr, value)
def __getattr__(self, attr):
raise AttributeError("Interface attribute does exist. Please check assignment")
# FIXME: Do we really need this?
def __repr__(self):
s = """ Conpot: ICS/SCADA Honeypot
(c) 2018, MushMush Foundation.
---------------------------------------------
(1) Using Config : {}
(2) Enabled Protocols : {}
(3) Last Active (Attacked) on : {}""".format(
self.config, self.enabled, self.last_active
)
return s
__str__ = __repr__
| 2,491 | Python | .py | 58 | 36.689655 | 87 | 0.626964 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,509 | fs_utils.py | mushorg_conpot/conpot/core/fs_utils.py | # Copyright (C) 2018 Abhinav Saxena <xandfury@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Utils related to ConpotVFS
"""
import fs
from typing import Optional, Union
from fs.permissions import Permissions
import typing
from fs.subfs import SubFS
from fs.error_tools import unwrap_errors
import logging
_F = typing.TypeVar("_F", bound="FS", covariant=True)
logger = logging.getLogger(__name__)
class FilesystemError(fs.errors.FSError):
"""Custom class for filesystem-related exceptions."""
class FSOperationNotPermitted(fs.errors.FSError):
"""Custom class for filesystem-related exceptions."""
def copy_files(source, dest, buffer_size=1024 * 1024):
"""
Copy a file from source to dest. source and dest must be file-like objects.
"""
while True:
copy_buffer = source.read(buffer_size)
if not copy_buffer:
break
dest.write(copy_buffer)
class _custom_conpot_file(object):
def __init__(
self,
file_system,
parent_fs,
path,
mode,
buffering=-1,
encoding=None,
newline="",
line_buffering=False,
):
self.file_system = file_system
self._path = path
self._file = parent_fs.open(
path=self._path,
mode=mode,
buffering=buffering,
encoding=encoding,
newline=newline,
line_buffering=line_buffering,
)
self.mode = self._file.mode
def __getattr__(self, item):
return getattr(self._file, item)
def __repr__(self):
return "<conpot_fs cached file: {}>".format(self._file.__repr__())
__str__ = __repr__
@property
def get_file(self):
return self._file
def close(self):
self._file.close()
if (
("w" in self.mode)
or ("a" in self.mode)
or (self.file_system.built_cache is False)
or ("x" in self.mode)
):
self.file_system._cache.update(
{
self._path: self.file_system.getinfo(
self._path,
get_actual=True,
namespaces=["basic", "access", "details", "stat"],
)
}
)
self.file_system.chown(
self._path, self.file_system.default_uid, self.file_system.default_gid
)
self.file_system.chmod(self._path, self.file_system.default_perms)
logger.debug("Updating modified/access time")
self.file_system.setinfo(self._path, {})
def __enter__(self):
return self._file
def __exit__(self, exc_type, exc_value, traceback):
logger.debug("Exiting file at : {}".format(self._path))
self.close()
class SubAbstractFS(SubFS[_F], typing.Generic[_F]):
"""
Creates a chroot jail sub file system. Each protocol can have an instance of this class. Use AbstractFS's
create_jail method to access this. You won't be able to cd into an `up` directory.
"""
def __init__(self, parent_fs, path):
self.parent_fs = parent_fs
self._default_uid, self._default_gid = (
parent_fs.default_uid,
parent_fs.default_gid,
)
self._default_perms = parent_fs.default_perms
self.utime = self.settimes
super(SubAbstractFS, self).__init__(parent_fs, path)
def getinfo(self, path: str, get_actual: bool = False, namespaces=None):
_fs, _path = self.delegate_path(path)
with unwrap_errors(path):
return _fs.getinfo(_path, get_actual=get_actual, namespaces=namespaces)
# ------- Setters and getters for default users/grps/perms
@property
def default_perms(self):
return self._default_perms
@default_perms.setter
def default_perms(self, perms):
try:
assert isinstance(perms, Permissions)
self._default_perms = perms
except AssertionError:
raise FilesystemError(
"Permissions provided must be of valid type (fs.permissions.Permission)"
)
@property
def default_uid(self):
return self._default_uid
@default_uid.setter
def default_uid(self, _uid):
if _uid in self.parent_fs._users.keys():
self._default_uid = _uid
else:
raise FilesystemError("User with id {} not registered with fs".format(_uid))
@property
def default_gid(self):
return self._default_gid
@default_gid.setter
def default_gid(self, _gid):
if _gid in self.parent_fs._grps.keys():
self._default_gid = _gid
else:
raise FilesystemError(
"Group with id {} not registered with fs".format(_gid)
)
# ---- Other utilites
@property
def default_user(self):
return self.parent_fs._users[self.default_uid]["user"]
@property
def default_group(self):
return self.parent_fs._grps[self.default_gid]["group"]
def getcwd(self):
return self._sub_dir
@property
def root(self):
return self.parent_fs.root + self.getcwd()
def getmtime(self, path):
_fs, _path = self.delegate_path(path)
with unwrap_errors(path):
return _fs.getmtime(_path)
def format_list(self, basedir, listing):
_fs, _path = self.delegate_path(basedir)
with unwrap_errors(basedir):
return _fs.format_list(_path, listing)
def check_access(self, path=None, user=None, perms=None):
_fs, _path = self.delegate_path(path)
with unwrap_errors(path):
return _fs.check_access(_path, user, perms)
def chown(
self, fs_path: str, uid: int, gid: int, recursive: Optional[bool] = False
):
_fs, _path = self.delegate_path(fs_path)
with unwrap_errors(fs_path):
return _fs.chown(_path, uid, gid, recursive)
def chmod(self, path: str, mode: oct, recursive: bool = False) -> None:
_fs, _path = self.delegate_path(path)
with unwrap_errors(path):
return _fs.chmod(_path, mode, recursive)
def access(
self, path: str, name_or_id: Union[int, str] = None, required_perms: str = None
):
_fs, _path = self.delegate_path(path)
with unwrap_errors(path):
return _fs.access(_path, name_or_id, required_perms)
def stat(self, path):
_fs, _path = self.delegate_path(path)
with unwrap_errors(path):
return _fs.stat(_path)
def readlink(self, path):
_fs, _path = self.delegate_path(path)
with unwrap_errors(path):
return _fs.readlink(_path)
def get_permissions(self, path):
_fs, _path = self.delegate_path(path)
with unwrap_errors(path):
return _fs.get_permissions(_path)
def removedir(self, path, rf=False):
_fs, _path = self.delegate_path(path)
with unwrap_errors(path):
return _fs.removedir(_path)
def remove(self, path):
_fs, _path = self.delegate_path(path)
with unwrap_errors(path):
return _fs.remove(_path)
def move(self, src_path, dst_path, overwrite=True):
_fs, _src_path = self.delegate_path(src_path)
_, _dst_path = self.delegate_path(dst_path)
with unwrap_errors({_src_path: src_path, _dst_path: dst_path}):
return _fs.move(_src_path, _dst_path, overwrite=overwrite)
def __getattr__(self, item):
if hasattr(self.parent_fs, item) and item in {
"_cache",
"create_group",
"register_user",
"take_snapshot",
"norm_path",
"users",
"groups",
"add_users_to_group",
"check_access",
}:
return getattr(self.parent_fs, item)
else:
raise NotImplementedError(
"Conpot's File System does not currently support method: {}".format(
item
)
)
| 8,783 | Python | .py | 235 | 28.804255 | 109 | 0.602517 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,510 | log_worker.py | mushorg_conpot/conpot/core/log_worker.py | # Copyright (C) 2014 Lukas Rist <glaslos@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import json
import logging
import time
from datetime import datetime
import configparser
from gevent.queue import Empty
from conpot.core.loggers.sqlite_log import SQLiteLogger
from conpot.core.loggers.hpfriends import HPFriendsLogger
from conpot.core.loggers.syslog import SysLogger
from conpot.core.loggers.taxii_log import TaxiiLogger
from conpot.core.loggers.json_log import JsonLogger
from .loggers.helpers import json_default
logger = logging.getLogger(__name__)
class LogWorker(object):
def __init__(self, config, dom, session_manager, public_ip):
self.config = config
self.log_queue = session_manager.log_queue
self.session_manager = session_manager
self.sqlite_logger = None
self.json_logger = None
self.friends_feeder = None
self.syslog_client = None
self.public_ip = public_ip
self.taxii_logger = None
if config.getboolean("sqlite", "enabled"):
self.sqlite_logger = SQLiteLogger()
if config.getboolean("json", "enabled"):
filename = config.get("json", "filename")
sensorid = config.get("common", "sensorid")
self.json_logger = JsonLogger(filename, sensorid, public_ip)
if config.getboolean("hpfriends", "enabled"):
host = config.get("hpfriends", "host")
port = config.getint("hpfriends", "port")
ident = config.get("hpfriends", "ident")
secret = config.get("hpfriends", "secret")
channels = eval(config.get("hpfriends", "channels"))
try:
self.friends_feeder = HPFriendsLogger(
host, port, ident, secret, channels
)
except Exception as e:
logger.exception(e)
self.friends_feeder = None
if config.getboolean("syslog", "enabled"):
host = config.get("syslog", "host")
port = config.getint("syslog", "port")
facility = config.get("syslog", "facility")
logdevice = config.get("syslog", "device")
logsocket = config.get("syslog", "socket")
self.syslog_client = SysLogger(host, port, facility, logdevice, logsocket)
if config.getboolean("taxii", "enabled"):
# TODO: support for certificates
self.taxii_logger = TaxiiLogger(config, dom)
self.enabled = True
def _process_sessions(self):
sessions = self.session_manager._sessions
try:
session_timeout = self.config.get("session", "timeout")
except (configparser.NoSectionError, configparser.NoOptionError):
session_timeout = 5
for session in sessions:
if len(session.data) > 0:
sec_last_event = max(session.data) / 1000
else:
sec_last_event = 0
sec_session_start = time.mktime(session.timestamp.timetuple())
sec_now = time.mktime(datetime.utcnow().timetuple())
if (sec_now - (sec_session_start + sec_last_event)) >= float(
session_timeout
):
# TODO: We need to close sockets in this case
logger.info("Session timed out: %s", session.id)
session.set_ended()
sessions.remove(session)
def start(self):
self.enabled = True
while self.enabled:
try:
event = self.log_queue.get(timeout=2)
except Empty:
self._process_sessions()
else:
if self.public_ip:
event["public_ip"] = self.public_ip
if self.friends_feeder:
self.friends_feeder.log(json.dumps(event, default=json_default))
if self.sqlite_logger:
self.sqlite_logger.log(event)
if self.syslog_client:
self.syslog_client.log(event)
if self.taxii_logger:
self.taxii_logger.log(event)
if self.json_logger:
self.json_logger.log(event)
def stop(self):
self.enabled = False
| 4,942 | Python | .py | 112 | 34.044643 | 86 | 0.620295 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,511 | json_log.py | mushorg_conpot/conpot/core/loggers/json_log.py | # Copyright (C) 2015 Danilo Massa
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import json
from .helpers import json_default
class JsonLogger(object):
def __init__(self, filename, sensorid, public_ip):
self.fileHandle = open(filename, "a")
self.sensorid = sensorid
self.public_ip = public_ip
def log(self, event):
if self.public_ip is not None:
dst_ip = self.public_ip
else:
dst_ip = None
data = {
"timestamp": event["timestamp"].isoformat(),
"sensorid": self.sensorid,
"id": event["id"],
"src_ip": event["remote"][0],
"src_port": event["remote"][1],
"dst_ip": event["local"][0],
"dst_port": event["local"][1],
"public_ip": dst_ip,
"data_type": event["data_type"],
"request": event["data"].get("request"),
"response": event["data"].get("response"),
"event_type": event["data"].get("type"),
}
json.dump(data, self.fileHandle, default=json_default)
self.fileHandle.write("\n")
self.fileHandle.flush()
| 1,827 | Python | .py | 45 | 33.777778 | 67 | 0.635135 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,512 | taxii_log.py | mushorg_conpot/conpot/core/loggers/taxii_log.py | # Copyright (C) 2013 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import libtaxii
import libtaxii.messages
from libtaxii.messages_11 import ContentBlock, InboxMessage, generate_message_id
from libtaxii.clients import HttpClient
from conpot.core.loggers.stix_transform import StixTransformer
logger = logging.getLogger(__name__)
class TaxiiLogger(object):
def __init__(self, config, dom):
self.host = config.get("taxii", "host")
self.port = config.getint("taxii", "port")
self.inbox_path = config.get("taxii", "inbox_path")
self.use_https = config.getboolean("taxii", "use_https")
self.client = HttpClient()
self.client.setProxy("noproxy")
self.stix_transformer = StixTransformer(config, dom)
def log(self, event):
# converts from conpot log format to STIX compatible xml
stix_package = self.stix_transformer.transform(event)
# wrapping the stix message in a TAXII envelope
content_block = ContentBlock(
libtaxii.CB_STIX_XML_11, stix_package.encode("utf-8")
)
inbox_message = InboxMessage(
message_id=generate_message_id(), content_blocks=[content_block]
)
inbox_xml = inbox_message.to_xml()
# the actual call to the TAXII web service
response = self.client.callTaxiiService2(
self.host, self.inbox_path, libtaxii.VID_TAXII_XML_11, inbox_xml, self.port
)
response_message = libtaxii.get_message_from_http_response(response, "0")
if response_message.status_type != libtaxii.messages.ST_SUCCESS:
logger.error(
"Error while transmitting message to TAXII server: %s",
response_message.message,
)
return False
else:
return True
| 2,546 | Python | .py | 56 | 39.053571 | 87 | 0.697862 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,513 | hpfriends.py | mushorg_conpot/conpot/core/loggers/hpfriends.py | # Copyright (C) 2013 Lukas Rist <glaslos@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import socket
import hpfeeds
import gevent
import logging
logger = logging.getLogger(__name__)
class HPFriendsLogger(object):
def __init__(self, host, port, ident, secret, channels):
self.host = host
self.port = port
self.ident = ident
self.secret = secret
self.channels = channels
self.max_retires = 5
self._initial_connection_happend = False
self.greenlet = gevent.spawn(self._start_connection, host, port, ident, secret)
def _start_connection(self, host, port, ident, secret):
# if no initial connection to hpfeeds this will hang forever, reconnect=True only comes into play
# when lost connection after the initial connect happend.
self.hpc = hpfeeds.new(host, port, ident, secret)
self._initial_connection_happend = True
def log(self, data):
retries = 0
if self._initial_connection_happend:
# hpfeed lib supports passing list of channels
while True:
if retries >= self.max_retires:
break
try:
self.hpc.publish(self.channels, data)
except socket.error:
retries += 1
self.__init__(
self.host, self.port, self.ident, self.secret, self.channels
)
gevent.sleep(0.5)
else:
break
error_msg = self.hpc.wait()
return error_msg
else:
error_msg = (
"Not logging event because initial hpfeeds connect has not happend yet."
)
logger.warning(error_msg)
return error_msg
| 2,504 | Python | .py | 61 | 32.180328 | 105 | 0.632444 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,514 | stix_transform.py | mushorg_conpot/conpot/core/loggers/stix_transform.py | # Copyright (C) 2013 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import json
import ast
import textwrap
from mixbox import idgen
from mixbox.namespaces import Namespace
from stix.core import STIXHeader, STIXPackage
from stix.common import InformationSource
from stix.common.vocabs import VocabString
from stix.incident import Incident
from stix.incident.time import Time as StixTime
from stix.indicator import Indicator
from stix.ttp import TTP, VictimTargeting
from stix.extensions.identity.ciq_identity_3_0 import CIQIdentity3_0Instance
from cybox.core import Observable
from cybox.objects.socket_address_object import SocketAddress
from cybox.objects.address_object import Address
from cybox.objects.port_object import Port
from cybox.objects.network_connection_object import NetworkConnection
from cybox.objects.artifact_object import Artifact, ZlibCompression, Base64Encoding
from cybox.common import ToolInformationList, ToolInformation
from cybox.common import Time as CyboxTime
from datetime import datetime
import conpot
CONPOT_NAMESPACE = "mushmush-conpot"
CONPOT_NAMESPACE_URL = "http://mushmush.org/conpot"
class StixTransformer(object):
def __init__(self, config, dom):
self.protocol_to_port_mapping = dict(
modbus=502,
snmp=161,
http=80,
s7comm=102,
)
port_path_list = [
"//conpot_template/protocols/" + x + "/@port"
for x in list(self.protocol_to_port_mapping.keys())
]
for port_path in port_path_list:
try:
protocol_port = ast.literal_eval(dom.xpath(port_path)[0])
protocol_name = port_path.rsplit("/", 2)[1]
self.protocol_to_port_mapping[protocol_name] = protocol_port
except IndexError:
continue
conpot_namespace = Namespace(CONPOT_NAMESPACE_URL, CONPOT_NAMESPACE, "")
idgen.set_id_namespace(conpot_namespace)
def _add_header(self, stix_package, title, desc):
stix_header = STIXHeader()
stix_header.title = title
stix_header.description = desc
stix_header.information_source = InformationSource()
stix_header.information_source.time = CyboxTime()
stix_header.information_source.time.produced_time = datetime.now().isoformat()
stix_package.stix_header = stix_header
def transform(self, event):
stix_package = STIXPackage()
self._add_header(
stix_package,
"Unauthorized traffic to honeypot",
"Describes one or more honeypot incidents",
)
incident = Incident(
id_="%s:%s-%s" % (CONPOT_NAMESPACE, "incident", event["session_id"])
)
initial_time = StixTime()
initial_time.initial_compromise = event["timestamp"].isoformat()
incident.time = initial_time
incident.title = "Conpot Event"
incident.short_description = "Traffic to Conpot ICS honeypot"
incident.add_category(VocabString(value="Scans/Probes/Attempted Access"))
tool_list = ToolInformationList()
tool_list.append(
ToolInformation.from_dict(
{
"name": "Conpot",
"vendor": "Conpot Team",
"version": conpot.__version__,
"description": textwrap.dedent(
"Conpot is a low interactive server side Industrial Control Systems "
"honeypot designed to be easy to deploy, modify and extend."
),
}
)
)
incident.reporter = InformationSource(tools=tool_list)
incident.add_discovery_method("Monitoring Service")
incident.confidence = "High"
# Victim Targeting by Sector
ciq_identity = CIQIdentity3_0Instance()
# identity_spec = STIXCIQIdentity3_0()
# identity_spec.organisation_info = OrganisationInfo(industry_type="Electricity, Industrial Control Systems")
# ciq_identity.specification = identity_spec
ttp = TTP(
title="Victim Targeting: Electricity Sector and Industrial Control System Sector"
)
ttp.victim_targeting = VictimTargeting()
ttp.victim_targeting.identity = ciq_identity
incident.leveraged_ttps.append(ttp)
indicator = Indicator(title="Conpot Event")
indicator.description = "Conpot network event"
indicator.confidence = "High"
source_port = Port.from_dict(
{"port_value": event["remote"][1], "layer4_protocol": "tcp"}
)
dest_port = Port.from_dict(
{
"port_value": self.protocol_to_port_mapping[event["data_type"]],
"layer4_protocol": "tcp",
}
)
source_ip = Address.from_dict(
{"address_value": event["remote"][0], "category": Address.CAT_IPV4}
)
dest_ip = Address.from_dict(
{"address_value": event["public_ip"], "category": Address.CAT_IPV4}
)
source_address = SocketAddress.from_dict(
{"ip_address": source_ip.to_dict(), "port": source_port.to_dict()}
)
dest_address = SocketAddress.from_dict(
{"ip_address": dest_ip.to_dict(), "port": dest_port.to_dict()}
)
network_connection = NetworkConnection.from_dict(
{
"source_socket_address": source_address.to_dict(),
"destination_socket_address": dest_address.to_dict(),
"layer3_protocol": "IPv4",
"layer4_protocol": "TCP",
"layer7_protocol": event["data_type"],
"source_tcp_state": "ESTABLISHED",
"destination_tcp_state": "ESTABLISHED",
}
)
indicator.add_observable(Observable(network_connection))
artifact = Artifact()
artifact.data = json.dumps(event["data"])
artifact.packaging.append(ZlibCompression())
artifact.packaging.append(Base64Encoding())
indicator.add_observable(Observable(artifact))
incident.related_indicators.append(indicator)
stix_package.add_incident(incident)
stix_package_xml = stix_package.to_xml()
return stix_package_xml
| 7,022 | Python | .py | 159 | 35.289308 | 117 | 0.651761 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,515 | helpers.py | mushorg_conpot/conpot/core/loggers/helpers.py | from datetime import datetime
import uuid
def json_default(obj):
if isinstance(obj, datetime):
return obj.isoformat()
elif isinstance(obj, uuid.UUID):
return str(obj)
elif isinstance(obj, bytes):
return str(obj)
else:
return None
| 280 | Python | .py | 11 | 19.909091 | 36 | 0.670412 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,516 | syslog.py | mushorg_conpot/conpot/core/loggers/syslog.py | # Copyright (C) 2013 Daniel creo Haslinger <creo-conpot@blackmesa.at>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from logging.handlers import SysLogHandler
import logging
import socket
class SysLogger(object):
def __init__(self, host, port, facility, logdevice, logsocket):
logger = logging.getLogger()
if str(logsocket).lower() == "udp":
logger.addHandler(
SysLogHandler(
address=(host, port),
facility=getattr(SysLogHandler, "LOG_" + str(facility).upper()),
socktype=socket.SOCK_DGRAM,
)
)
elif str(logsocket).lower() == "dev":
logger.addHandler(SysLogHandler(logdevice))
def log(self, data):
# stub function since the additional handler has been added to the root loggers instance.
pass
| 1,539 | Python | .py | 35 | 37.8 | 97 | 0.691127 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,517 | sqlite_log.py | mushorg_conpot/conpot/core/loggers/sqlite_log.py | # Copyright (C) 2013 Lukas Rist <glaslos@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sqlite3
import pwd
import os
import platform
import grp
class SQLiteLogger(object):
def _chown_db(self, path, uid_name="nobody", gid_name="nogroup"):
path = path.rpartition("/")[0]
if not os.path.isdir(path):
os.mkdir(path)
# TODO: Have this in a central place
wanted_uid = pwd.getpwnam(uid_name)[2]
# special handling for os x. (getgrname has trouble with gid below 0)
if platform.mac_ver()[0]:
wanted_gid = -2
else:
wanted_gid = grp.getgrnam(gid_name)[2]
os.chown(path, wanted_uid, wanted_gid)
def __init__(self, db_path="logs/conpot.db"):
self._chown_db(db_path)
self.conn = sqlite3.connect(db_path)
self._create_db()
def _create_db(self):
cursor = self.conn.cursor()
cursor.execute(
"""CREATE TABLE IF NOT EXISTS events
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
session TEXT,
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
remote TEXT,
protocol TEXT,
request TEXT,
response TEXT
)"""
)
def log(self, event):
cursor = self.conn.cursor()
cursor.execute(
"INSERT INTO events(session, remote, protocol, request, response) VALUES (?, ?, ?, ?, ?)",
(
str(event["id"]),
str(event["remote"]),
event["data_type"],
str(event["data"].get("request")),
str(event["data"].get("response")),
),
)
self.conn.commit()
return cursor.lastrowid
| 2,471 | Python | .py | 66 | 29.242424 | 102 | 0.609675 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,518 | kamstrup_prober.py | mushorg_conpot/bin/kamstrup_prober.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import socket
import json
from datetime import datetime
import calendar
import time
import argparse
import crc16
import xml.dom.minidom
from conpot.protocols.kamstrup_meter import kamstrup_constants
logger = logging.getLogger(__name__)
port_start_range = 1
port_end_range = 65535
default_comm_port = 63
class KamstrupRegisterCopier(object):
def __init__(self, ip_address, port, comm_address):
self._sock = None
self.ip_address = ip_address
self.port = port
self.comm_address = comm_address
self._connect()
def _connect(self):
logger.info("Connecting to {0}:{1}".format(self.ip_address, self.port))
if self._sock is not None:
self._sock.close()
time.sleep(1)
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.settimeout(2)
try:
self._sock.connect((self.ip_address, self.port))
except socket.error as socket_err:
logger.exception("Error while connecting: {0}".format(str(socket_err)))
self._connect()
def get_register(self, register):
message = [
kamstrup_constants.REQUEST_MAGIC,
self.comm_address,
0x10,
0x01,
register >> 8,
register & 0xFF,
]
crc = crc16.crc16xmodem("".join([chr(item) for item in message[1:]]))
message.append(crc >> 8)
message.append(crc & 0xFF)
message_length = len(message)
y = 1
while y < message_length:
if message[y] in kamstrup_constants.NEED_ESCAPE:
message[y] ^= 0xFF
message.insert(y, kamstrup_constants.ESCAPE)
y += 1
message_length += 1
y += 1
message.append(kamstrup_constants.EOT_MAGIC)
received_data = None
while received_data is None:
try:
self._sock.send(bytearray(message))
received_data = self._sock.recv(1024)
received_data = bytearray(received_data)
except socket.error as socket_err:
logger.exception(
"Error while communicating: {0}".format(str(socket_err))
)
self._connect()
data_length = len(received_data)
# remove escaped bytes
p = 0
while p < data_length:
if received_data[p] is kamstrup_constants.ESCAPE:
del received_data[p]
received_data[p] ^= 0xFF
data_length -= 1
p += 1
return received_data
def find_registers_in_candidates(args):
if args.registerfile:
candidate_registers_values = []
with open(args.registerfile, "r") as register_file:
old_register_values = json.load(register_file)
for value in old_register_values.iterkeys():
candidate_registers_values.append(int(value))
else:
candidate_registers_values = range(port_start_range, port_end_range)
found_registers = registers_from_candidates(candidate_registers_values, args)
logger.info(
"Scanned {0} registers, found {1}.".format(
len(candidate_registers_values), len(found_registers)
)
)
# with open('kamstrup_dump_{0}.json'.format(calendar.timegm(datetime.utcnow().utctimetuple())), 'w') as json_file:
# json_file.write(json.dumps(found_registers, indent=4, default=json_default))
logger.info("""*** Sample Conpot configuration from this scrape:""")
logger.info(generate_conpot_config(found_registers))
def registers_from_candidates(candidate_registers_values, args):
kamstrupRegisterCopier = KamstrupRegisterCopier(
args.host, args.port, int(args.communication_address)
)
found_registers = {}
not_found_counts = 0
scanned = 0
dumpfile = "kamstrup_dump_{0}.json".format(
calendar.timegm(datetime.utcnow().utctimetuple())
)
for register_id in candidate_registers_values:
result = kamstrupRegisterCopier.get_register(register_id)
if len(result) > 12:
units = result[5]
length = result[6]
unknown = result[7]
register_value = 0
for p in range(length):
register_value += result[8 + p] << (8 * ((length - p) - 1))
found_registers[register_id] = {
"timestamp": datetime.utcnow(),
"units": units,
"value": register_value,
"value_length": length,
"unknown": unknown,
}
logger.info(
"Found register value at {0}:{1}".format(
hex(register_id), register_value
)
)
with open(dumpfile, "w") as json_file:
json_file.write(
json.dumps(found_registers, indent=4, default=json_default)
)
else:
not_found_counts += 1
if not_found_counts % 10 == 0:
logger.info(
"Hang on, still scanning, so far scanned {0} and found {1} registers".format(
scanned, len(found_registers)
)
)
scanned += 1
return found_registers
def generate_conpot_config(result_list):
config_xml = """<conpot_template name="Kamstrup-Auto382" description="Register clone of an existing Kamstrup meter">
<core><databus><key_value_mappings>"""
for key, value in result_list.items():
config_xml += (
"""<key name="register_{0}"><value type="value">{1}</value></key>""".format(
key, value["value"]
)
)
config_xml += """</key_value_mappings></databus></core><protocols><kamstrup_meter enabled="True" host="0.0.0.0" port="1025"><registers>"""
for key, value in result_list.items():
config_xml += """<register name="{0}" units="{1}" unknown="{2}" length="{3}"><value>register_{0}</value></register>""".format(
key, value["units"], value["unknown"], value["value_length"]
)
config_xml += "</registers></kamstrup_meter></protocols></conpot_template>"
parsed_xml = xml.dom.minidom.parseString(config_xml)
pretty_xml = parsed_xml.toprettyxml()
return pretty_xml
def json_default(obj):
if isinstance(obj, datetime):
return obj.isoformat()
else:
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Probes kamstrup_meter meter registers."
)
parser.add_argument("host", help="Hostname or IP or Kamstrup meter")
parser.add_argument("port", type=int, help="TCP port")
parser.add_argument(
"--registerfile",
dest="registerfile",
help="Reads registers from previous dumps files instead of bruteforcing the meter.",
)
parser.add_argument(
"--comm-addr", dest="communication_address", default=default_comm_port
)
find_registers_in_candidates(parser.parse_args())
| 7,896 | Python | .py | 194 | 31.701031 | 142 | 0.612588 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,519 | start_protocol.py | mushorg_conpot/bin/start_protocol.py | # Copyright (C) 2020 srenfo
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
import gevent
from conpot.utils.greenlet import init_test_server_by_name, teardown_test_server
def main():
"""Start individual protocol instances (for debugging)"""
name = sys.argv[1]
ports = {
"bacnet": 9999,
"enip": 60002,
"ftp": 10001,
"guardian_ast": 10001,
"http": 50001,
"kamstrup_management": 50100,
"kamstrup_meter": 1025,
"ipmi": 10002,
"s7comm": 9999,
"tftp": 6090,
}
port = ports.get(name, 0)
print(f"Starting '{name}'...")
server, greenlet = init_test_server_by_name(name, port=port)
try:
gevent.wait()
except KeyboardInterrupt:
teardown_test_server(server=server, greenlet=greenlet)
if __name__ == "__main__":
main()
| 1,526 | Python | .py | 43 | 31.093023 | 80 | 0.68907 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,520 | conf.py | mushorg_conpot/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# Conpot documentation build configuration file, created by
# sphinx-quickstart on Sat Apr 20 14:00:03 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
sys.path.append(os.path.join(os.path.dirname(__file__)))
sys.path.append(os.path.join(os.getcwd() + "/../../"))
import conpot
# print(conpot.__version__)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Conpot"
copyright = "2018, MushMush Foundation"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = conpot.__version__
# The full version, including alpha/beta/rc tags.
release = conpot.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# t oday_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "Conpotdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "Conpot.tex", "Conpot Documentation", "MushMush Foundation", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "conpot", "Conpot Documentation", ["MushMush Foundation"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"Conpot",
"Conpot Documentation",
"MushMush Foundation",
"Conpot",
"ICS/SCADA honeypot.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| 8,370 | Python | .py | 189 | 42.444444 | 85 | 0.713828 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,521 | test_attack_session.py | mushorg_conpot/conpot/tests/test_attack_session.py | from datetime import datetime, timedelta
from freezegun import freeze_time
from conpot.core.attack_session import AttackSession
class LogQueueFake:
def __init__(self):
self.events = []
def put(self, event):
self.events.append(event)
def test_add_event_is_logged():
protocol = "testing"
source_ip = "1.2.3.4"
source_port = 11
destination_ip = "5.6.7.8"
destination_port = 22
log_queue = LogQueueFake()
session = AttackSession(
protocol=protocol,
source_ip=source_ip,
source_port=source_port,
destination_ip=destination_ip,
destination_port=destination_port,
log_queue=log_queue,
)
event = {"foo": "bar"}
session.add_event(event)
logged = log_queue.events[0]
assert logged["data"] == event
assert logged["data_type"] == protocol
assert logged["src_ip"] == source_ip
assert logged["src_port"] == source_port
assert logged["remote"] == (source_ip, source_port)
assert logged["dst_ip"] == destination_ip
assert logged["dst_port"] == destination_port
assert logged["local"] == (destination_ip, destination_port)
# TODO should this even include public_ip if it's always None?
assert logged["public_ip"] is None
def test_add_event_same_id():
log_queue = LogQueueFake()
session = AttackSession(
protocol=None,
source_ip=None,
source_port=None,
destination_ip=None,
destination_port=None,
log_queue=log_queue,
)
session.add_event({"foo": "bar"})
session.add_event({"bar": "baz"})
assert log_queue.events[0]["id"] == log_queue.events[1]["id"]
def test_add_event_sessions_have_unique_ids():
log_queue = LogQueueFake()
session_1 = AttackSession(
protocol=None,
source_ip=None,
source_port=None,
destination_ip=None,
destination_port=None,
log_queue=log_queue,
)
session_2 = AttackSession(
protocol=None,
source_ip=None,
source_port=None,
destination_ip=None,
destination_port=None,
log_queue=log_queue,
)
session_1.add_event({"foo": "bar"})
session_2.add_event({"bar": "baz"})
assert log_queue.events[0]["id"] != log_queue.events[1]["id"]
def test_add_event_uses_session_timestamp():
log_queue = LogQueueFake()
session_start = datetime(2000, 1, 1)
with freeze_time(session_start) as frozen_time:
session = AttackSession(
protocol=None,
source_ip=None,
source_port=None,
destination_ip=None,
destination_port=None,
log_queue=log_queue,
)
frozen_time.tick(timedelta(days=1))
session.add_event({"foo": "bar"})
session.add_event({"bar": "baz"})
# timestamp is always the time the session started,
# not the time the event occurred
assert log_queue.events[0]["timestamp"] == session_start
assert log_queue.events[1]["timestamp"] == session_start
@freeze_time("2000-01-01", auto_tick_seconds=2)
def test_dump_collects_events():
protocol = "testing"
source_ip = "1.2.3.4"
source_port = 11
destination_ip = "5.6.7.8"
destination_port = 22
log_queue = LogQueueFake()
session = AttackSession(
protocol=protocol,
source_ip=source_ip,
source_port=source_port,
destination_ip=destination_ip,
destination_port=destination_port,
log_queue=log_queue,
)
event_1 = {"foo": "bar"}
event_2 = {"bar": "baz"}
session.add_event(event_1)
session.add_event(event_2)
session.add_event(event_1)
dump = session.dump()
assert dump["data_type"] == protocol
assert list(dump["data"].keys()) == [2000, 4000, 6000]
assert list(dump["data"].values()) == [event_1, event_2, event_1]
assert dump["src_ip"] == source_ip
assert dump["src_port"] == source_port
assert dump["remote"] == (source_ip, source_port)
assert dump["dst_ip"] == destination_ip
assert dump["dst_port"] == destination_port
assert dump["local"] == (destination_ip, destination_port)
# TODO should this even include public_ip if it's always None?
assert dump["public_ip"] is None
| 4,297 | Python | .tac | 122 | 28.491803 | 69 | 0.633946 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,522 | attack_session.py | mushorg_conpot/conpot/core/attack_session.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import uuid
from datetime import datetime
logger = logging.getLogger(__name__)
# one instance per connection
class AttackSession(object):
def __init__(
self,
protocol,
source_ip,
source_port,
destination_ip,
destination_port,
log_queue,
):
self.log_queue = log_queue
self.id = uuid.uuid4()
logger.info("New %s session from %s (%s)", protocol, source_ip, self.id)
self.protocol = protocol
self.source_ip = source_ip
self.source_port = source_port
self.destination_ip = destination_ip
self.destination_port = destination_port
self.timestamp = datetime.utcnow()
self.public_ip = None
self.data = dict()
self._ended = False
def _dump_data(self, data):
return {
"id": self.id,
"remote": (self.source_ip, self.source_port),
"src_ip": self.source_ip,
"src_port": self.source_port,
"local": (self.destination_ip, self.destination_port),
"dst_ip": self.destination_ip,
"dst_port": self.destination_port,
"data_type": self.protocol,
"timestamp": self.timestamp,
"public_ip": self.public_ip,
"data": data,
}
def add_event(self, event_data):
sec_elapsed = (datetime.utcnow() - self.timestamp).total_seconds()
elapse_ms = int(sec_elapsed * 1000)
while elapse_ms in self.data:
elapse_ms += 1
self.data[elapse_ms] = event_data
# TODO: We should only log the session when it is finished
self.log_queue.put(self._dump_data(event_data))
def dump(self):
return self._dump_data(self.data)
def set_ended(self):
self._ended = True
| 2,606 | Python | .tac | 69 | 30.811594 | 80 | 0.645289 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,523 | dnscan.py | rbsec_dnscan/dnscan.py | #!/usr/bin/env python3
#
# dnscan copyright (C) 2013-2014 rbsec
# Licensed under GPLv3, see LICENSE for details
#
from __future__ import print_function
import packaging.version
import os
import platform
import re
import sys
import threading
import time
try: # Ugly hack because Python3 decided to rename Queue to queue
import Queue
except ImportError:
import queue as Queue
try: # Python2 and Python3 have different IP address libraries
from ipaddress import ip_address as ipaddr
except ImportError:
try:
from netaddr import IPAddress as ipaddr
except ImportError:
if sys.version_info[0] == 2:
print("FATAL: dnscan requires either the netaddr (python-netaddr) or ipaddress (python-ipaddress) modules.")
else:
print("FATAL: dnscan requires either the netaddr (python3-netaddr) or ipaddress (standard library) modules.")
sys.exit(1)
try:
import argparse
except:
print("FATAL: Module argparse missing (python-argparse)")
sys.exit(1)
try:
import dns.query
import dns.resolver
import dns.zone
import dns.dnssec
except:
print("FATAL: Module dnspython missing (python-dnspython)")
sys.exit(1)
if (packaging.version.parse(dns.__version__) < packaging.version.Version("2.0.0")):
print("dnscan requires dnspython 2.0.0 or greater.\nYou can install it with `pip install -r requirements.txt`")
sys.exit(1)
# Usage: dnscan.py -d <domain name>
class scanner(threading.Thread):
def __init__(self, queue):
global wildcard
threading.Thread.__init__(self)
self.queue = queue
def get_name(self, domain):
global wildcard, addresses
try:
if sys.stdout.isatty(): # Don't spam output if redirected
print(domain + '\033[K\r', end='')
res = lookup(domain, recordtype)
if args.tld and res:
nameservers = sorted(list(res))
ns0 = str(nameservers[0])[:-1] # First nameserver
print('\033[K\r', end='')
print(domain + " - " + col.brown + ns0 + col.end)
if outfile:
print(ns0 + " - " + domain, file=outfile)
if args.tld:
if res:
print('\033[K\r', end='')
print(domain + " - " + res)
return
for rdata in res:
address = rdata.address
if wildcard:
for wildcard_ip in wildcard:
if address == wildcard_ip:
return
print('\033[K\r', end='')
if args.no_ip:
print(col.brown + domain + col.end)
break
elif args.domain_first:
print(domain + " - " + col.brown + address + col.end)
else:
print(address + " - " + col.brown + domain + col.end)
if outfile:
if args.domain_first:
print(domain + " - " + address, file=outfile)
else:
print(address + " - " + domain, file=outfile)
try:
addresses.add(ipaddr(unicode(address)))
except NameError:
addresses.add(ipaddr(str(address)))
if ( domain != target and \
args.recurse and \
domain.count('.') - target.count('.') <= args.maxdepth
):
# Check if subdomain is wildcard so can filter false positives in the recursive scan
wildcard = get_wildcard(domain)
for wildcard_ip in wildcard:
try:
addresses.add(ipaddr(unicode(wildcard_ip)))
except NameError:
addresses.add(ipaddr(str(wildcard_ip)))
if args.recurse_wildcards or not wildcard:
add_target(domain) # Recursively scan subdomains
except:
pass
def run(self):
while True:
try:
domain = self.queue.get(timeout=1)
except:
return
self.get_name(domain)
self.queue.task_done()
class output:
def status(self, message):
print(col.blue + "[*] " + col.end + message)
if outfile and not args.quick:
print("[*] " + message, file=outfile)
def good(self, message):
print(col.green + "[+] " + col.end + message)
if outfile and not args.quick:
print("[+] " + message, file=outfile)
def verbose(self, message):
if args.verbose:
print(col.brown + "[v] " + col.end + message)
if outfile and not args.quick:
print("[v] " + message, file=outfile)
def warn(self, message):
print(col.red + "[-] " + col.end + message)
if outfile and not args.quick:
print("[-] " + message, file=outfile)
def fatal(self, message):
print("\n" + col.red + "FATAL: " + message + col.end)
if outfile and not args.quick:
print("FATAL " + message, file=outfile)
class col:
if sys.stdout.isatty() and platform.system() != "Windows":
green = '\033[32m'
blue = '\033[94m'
red = '\033[31m'
brown = '\033[33m'
end = '\033[0m'
else: # Colours mess up redirected output, disable them
green = ""
blue = ""
red = ""
brown = ""
end = ""
def lookup(domain, recordtype):
try:
res = resolver.resolve(domain, recordtype)
return res
except:
return
def get_wildcard(target):
# List of IP's for wildcard DNS
wildcards = []
# Use current unix time as a test subdomain
epochtime = str(int(time.time()))
# Prepend a letter to work around incompetent companies like CableOne
# and their stupid attempts at DNS hijacking
res = lookup("a" + epochtime + "." + target, recordtype)
if res:
for res_data in res:
address = res_data.address
wildcards.append(address)
out.warn("Wildcard domain found - " + col.brown + "*." + target + col.end + " (" + address + ")")
else:
out.verbose("No wildcard domain found")
return wildcards
def get_nameservers(target):
try:
ns = resolver.resolve(target, 'NS')
return ns
except:
return
def get_v6(target):
out.verbose("Getting IPv6 (AAAA) records")
try:
res = lookup(target, "AAAA")
if res:
out.good("IPv6 (AAAA) records found. Try running dnscan with the "+ col.green + "-6 " + col.end + "option.")
for v6 in res:
print(str(v6) + "\n")
if outfile:
print(v6, file=outfile)
except:
return
def get_txt(target):
out.verbose("Getting TXT records")
try:
res = lookup(target, "TXT")
if res:
out.good("TXT records found")
for txt in res:
print(txt)
if outfile:
print(txt, file=outfile)
print("")
except:
return
def get_dmarc(target):
out.verbose("Getting DMARC records")
try:
res = lookup("_dmarc." + target, "TXT")
if res:
out.good("DMARC records found")
for dmarc in res:
print(dmarc)
if outfile:
print(dmarc, file=outfile)
print("")
except:
return
def get_dnssec(target, nameserver):
out.verbose("Checking DNSSEC")
request = dns.message.make_query(target, dns.rdatatype.DNSKEY, want_dnssec=True)
response = dns.query.udp(request, nameserver, timeout=1)
if response.rcode() != 0:
out.warn("DNSKEY lookup returned error code " + dns.rcode.to_text(response.rcode()) + "\n")
else:
answer = response.answer
if len(answer) == 0:
out.warn("DNSSEC not supported\n")
elif len(answer) != 2:
out.warn("Invalid DNSKEY record length\n")
else:
name = dns.name.from_text(target)
try:
dns.dnssec.validate(answer[0],answer[1],{name:answer[0]})
except dns.dnssec.ValidationFailure:
out.warn("DNSSEC key validation failed\n")
else:
out.good("DNSSEC enabled and validated")
dnssec_values = str(answer[0][0]).split(' ')
algorithm_int = int(dnssec_values[2])
algorithm_str = dns.dnssec.algorithm_to_text(algorithm_int)
print("Algorithm = " + algorithm_str + " (" + str(algorithm_int) + ")\n")
def get_mx(target):
out.verbose("Getting MX records")
try:
res = lookup(target, "MX")
except:
return
# Return if we don't get any MX records back
if not res:
return
out.good("MX records found, added to target list")
for mx in res:
print(mx.to_text())
if outfile:
print(mx.to_text(), file=outfile)
mxsub = re.search("([a-z0-9\.\-]+)\."+target, mx.to_text(), re.IGNORECASE)
try:
if mxsub.group(1) and mxsub.group(1) not in wordlist:
queue.put(mxsub.group(1) + "." + target)
except AttributeError:
pass
print("")
def zone_transfer(domain, ns, nsip):
out.verbose("Trying zone transfer against " + str(ns))
try:
print(str(domain))
zone = dns.zone.from_xfr(dns.query.xfr(str(nsip), domain, relativize=False, timeout=3),
relativize=False)
out.good("Zone transfer sucessful using nameserver " + col.brown + str(ns) + col.end)
names = list(zone.nodes.keys())
names.sort()
for n in names:
print(zone[n].to_text(n)) # Print raw zone
if outfile:
print(zone[n].to_text(n), file=outfile)
sys.exit(0)
except Exception:
pass
def add_target(domain):
for word in wordlist:
patterns = [word]
if args.alt:
probes = ["dev", "prod", "stg", "qa", "uat", "api", "alpha", "beta",
"cms", "test", "internal", "staging", "origin", "stage"]
for probe in probes:
if probe not in word: # Reduce alterations that most likely don't exist (e.i. dev-dev.domain.com)
patterns.append(probe + word)
patterns.append(word + probe)
patterns.append(probe + "-" + word)
patterns.append(word + "-" + probe)
if not word[-1].isdigit(): # If the subdomain has already had a number as the suffix
for n in range(1, 6):
patterns.append(word + str(n))
patterns.append(word + "0" + str(n))
for pattern in patterns:
if '%%' in domain:
queue.put(domain.replace(r'%%', pattern))
else:
queue.put(pattern + "." + domain)
def add_tlds(domain):
for tld in wordlist:
queue.put(domain + "." + tld)
def get_args():
global args
parser = argparse.ArgumentParser('dnscan.py', formatter_class=lambda prog:argparse.HelpFormatter(prog,max_help_position=40),
epilog="Specify a custom insertion point with %% in the domain name, such as: dnscan.py -d dev-%%.example.org")
target = parser.add_mutually_exclusive_group(required=True) # Allow a user to specify a list of target domains
target.add_argument('-d', '--domain', help='Target domains (separated by commas)', dest='domain', required=False)
target.add_argument('-l', '--list', help='File containing list of target domains', dest='domain_list', required=False)
parser.add_argument('-w', '--wordlist', help='Wordlist', dest='wordlist', required=False)
parser.add_argument('-t', '--threads', help='Number of threads', dest='threads', required=False, type=int, default=8)
parser.add_argument('-6', '--ipv6', action="store_true", help='Scan for AAAA records', dest='ipv6')
parser.add_argument('-z', '--zonetransfer', action="store_true", help='Only perform zone transfers', dest='zonetransfer')
parser.add_argument('-r', '--recursive', action="store_true", help="Recursively scan subdomains", dest='recurse')
parser.add_argument('--recurse-wildcards', action="store_true", help="Recursively scan wildcards (slow)", dest='recurse_wildcards')
parser.add_argument('-m', '--maxdepth', help='Maximal recursion depth (for brute-forcing)', dest='maxdepth', required=False, type=int, default=5)
parser.add_argument('-a', '--alterations', action="store_true", help='Scan for alterations of subdomains (slow)', dest='alt')
parser.add_argument('-R', '--resolver', help="Use the specified resolvers (separated by commas)", dest='resolvers', required=False)
parser.add_argument('-L', '--resolver-list', help="File containing list of resolvers", dest='resolver_list', required=False)
parser.add_argument('-T', '--tld', action="store_true", help="Scan for TLDs", dest='tld')
parser.add_argument('-o', '--output', help="Write output to a file", dest='output_filename', required=False)
parser.add_argument('-i', '--output-ips', help="Write discovered IP addresses to a file", dest='output_ips', required=False)
parser.add_argument('-D', '--domain-first', action="store_true", help='Output domain first, rather than IP address', dest='domain_first')
parser.add_argument('-N', '--no-ip', action="store_true", help='Don\'t print IP addresses in the output', dest='no_ip')
parser.add_argument('-v', '--verbose', action="store_true", help='Verbose mode', dest='verbose')
parser.add_argument('-n', '--nocheck', action="store_true", help='Don\'t check nameservers before scanning', dest='nocheck')
parser.add_argument('-q', '--quick', action="store_true", help='Only perform zone transfer and subdomains scan, with minimal output to file', dest='quick')
args = parser.parse_args()
def setup():
global targets, wordlist, queue, resolver, recordtype, outfile, outfile_ips
if args.domain:
targets = args.domain.split(",")
if args.tld and not args.wordlist:
args.wordlist = os.path.join(os.path.dirname(os.path.realpath(__file__)), "tlds.txt")
else:
if not args.wordlist: # Try to use default wordlist if non specified
args.wordlist = os.path.join(os.path.dirname(os.path.realpath(__file__)), "subdomains.txt")
# Open file handle for output
try:
outfile = open(args.output_filename, "w")
except TypeError:
outfile = None
except IOError:
out.fatal("Could not open output file: " + args.output_filename)
sys.exit(1)
if args.output_ips:
outfile_ips = open(args.output_ips, "w")
else:
outfile_ips = None
try:
wordlist = open(args.wordlist).read().splitlines()
except:
out.fatal("Could not open wordlist " + args.wordlist)
sys.exit(1)
# Number of threads should be between 1 and 32
if args.threads < 1:
args.threads = 1
elif args.threads > 32:
args.threads = 32
queue = Queue.Queue()
resolver = dns.resolver.Resolver()
resolver.timeout = 1
resolver.lifetime = 1
if args.resolver_list:
try:
resolver.nameservers = open(args.resolver_list, 'r').read().splitlines()
except FileNotFoundError:
out.fatal("Could not open file containing resolvers: " + args.resolver_list)
sys.exit(1)
elif args.resolvers:
resolver.nameservers = args.resolvers.split(",")
# Record type
if args.ipv6:
recordtype = 'AAAA'
elif args.tld:
recordtype = 'NS'
else:
recordtype = 'A'
if __name__ == "__main__":
global wildcard, addresses, outfile_ips
addresses = set([])
out = output()
get_args()
setup()
if args.nocheck == False:
try:
resolver.resolve('.', 'NS')
except dns.resolver.NoAnswer:
pass
except dns.resolver.NoNameservers:
out.warn("Failed to resolve '.' - server may be buggy. Continuing anyway....")
pass
except:
out.fatal("No valid DNS resolver. This can occur when the server only resolves internal zones")
out.fatal("Set a custom resolver with -R <resolver>")
out.fatal("Ignore this warning with -n --nocheck\n")
sys.exit(1)
if args.domain_list:
out.verbose("Domain list provided, will parse {} for domains.".format(args.domain_list))
if not os.path.isfile(args.domain_list):
out.fatal("Domain list {} doesn't exist!".format(args.domain_list))
sys.exit(1)
with open(args.domain_list, 'r') as domain_list:
try:
targets = list(filter(bool, domain_list.read().split('\n')))
except Exception as e:
out.fatal("Couldn't read {}, {}".format(args.domain_list, e))
sys.exit(1)
for subtarget in targets:
global target
target = subtarget
out.status("Processing domain {}".format(target))
if args.resolver_list:
out.status("Using resolvers from: {}".format(args.resolver_list))
elif args.resolvers:
out.status("Using specified resolvers: {}".format(args.resolvers))
else:
out.status("Using system resolvers: {}".format(",".join(resolver.nameservers)))
if args.tld and not '%%' in target:
if "." in target:
out.warn("Warning: TLD scanning works best with just the domain root")
out.good("TLD Scan")
add_tlds(target)
else:
queue.put(target) # Add actual domain as well as subdomains
# These checks will all fail if we have a custom injection point, so skip them
if not '%%' in target:
nameservers = get_nameservers(target)
out.good("Getting nameservers")
targetns = [] # NS servers for target
nsip = None
try: # Subdomains often don't have NS recoards..
for ns in nameservers:
ns = str(ns)[:-1] # Removed trailing dot
res = lookup(ns, "A")
for rdata in res:
targetns.append(rdata.address)
nsip = rdata.address
print(nsip + " - " + col.brown + ns + col.end)
if not args.quick:
if outfile:
print(nsip + " - " + ns, file=outfile)
zone_transfer(target, ns, nsip)
except SystemExit:
sys.exit(0)
except:
out.warn("Getting nameservers failed")
out.warn("Zone transfer failed\n")
if args.zonetransfer:
sys.exit(0)
if not args.quick:
get_v6(target)
get_txt(target)
get_dmarc(target)
# These checks need a proper nameserver, the systemd stub doesn't work
if nsip:
get_dnssec(target, nsip)
else:
get_dnssec(target, resolver.nameservers[0])
get_mx(target)
wildcard = get_wildcard(target)
for wildcard_ip in wildcard:
try:
addresses.add(ipaddr(unicode(wildcard_ip)))
except NameError:
addresses.add(ipaddr(str(wildcard_ip)))
out.status("Scanning " + target + " for " + recordtype + " records")
add_target(target)
for i in range(args.threads):
t = scanner(queue)
t.daemon = True
t.start()
try:
for i in range(args.threads):
t.join(1024) # Timeout needed or threads ignore exceptions
except KeyboardInterrupt:
out.fatal("Caught KeyboardInterrupt, quitting...")
if outfile:
outfile.close()
sys.exit(1)
print(" ")
if outfile_ips:
for address in sorted(addresses):
print(address, file=outfile_ips)
if outfile:
outfile.close()
if outfile_ips:
outfile_ips.close()
| 20,966 | Python | .py | 486 | 31.950617 | 159 | 0.56094 | rbsec/dnscan | 1,114 | 411 | 6 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,524 | setup.py | florianfesti_boxes/setup.py | #!/usr/bin/env python3
import glob
import os
import subprocess
import sys
from pathlib import Path
from subprocess import CalledProcessError, check_output
from setuptools import find_packages, setup
from setuptools.command.build_py import build_py
class CustomBuildExtCommand(build_py):
"""Customized setuptools install command - prints a friendly greeting."""
script_path: Path = Path("scripts")
def buildInkscapeExt(self) -> None:
try:
subprocess.run([sys.executable, str(self.script_path / "boxes2inkscape"), "inkex"], check=True, capture_output=True, text=True)
except CalledProcessError as e:
print("Could not build inkscape extension because of error: ", e)
print("Output: ", e.stdout, e.stderr)
def updatePOT(self) -> None:
try:
subprocess.run([sys.executable, str(self.script_path / "boxes2pot"), "po/boxes.py.pot"], check=True, capture_output=True, text=True)
subprocess.run(["xgettext -L Python -j --from-code=utf-8 -o po/boxes.py.pot boxes/*.py scripts/boxesserver scripts/boxes"], shell=True, check=True, capture_output=True, text=True)
except CalledProcessError as e:
print("Could not process translation because of error: ", e)
print("Output: ", e.stdout, e.stderr)
def generate_mo_files(self):
pos = glob.glob("po/*.po")
for po in pos:
lang = po.split(os.sep)[1][:-3].replace("-", "_")
try:
os.makedirs(os.path.join("locale", lang, "LC_MESSAGES"))
except FileExistsError:
pass
os.system(f"msgfmt {po} -o locale/{lang}/LC_MESSAGES/boxes.py.mo")
self.distribution.data_files.append(
(os.path.join("share", "locale", lang, "LC_MESSAGES"),
[os.path.join("locale", lang, "LC_MESSAGES", "boxes.py.mo")]))
def run(self):
if self.distribution.data_files is None:
self.distribution.data_files = []
self.execute(self.updatePOT, ())
self.execute(self.generate_mo_files, ())
self.execute(self.buildInkscapeExt, ())
if 'CURRENTLY_PACKAGING' in os.environ:
# we are most probably building a Debian package
# let us define a simple path!
path="/usr/share/inkscape/extensions"
self.distribution.data_files.append((path, [i for i in glob.glob(os.path.join("inkex", "*.inx"))]))
self.distribution.data_files.append((path, ['scripts/boxes']))
self.distribution.data_files.append((path, ['scripts/boxes_proxy.py']))
else:
# we are surely not building a Debian package
# then here is the default behavior:
try:
path = check_output(["inkscape", "--system-data-directory"]).decode().strip()
path = os.path.join(path, "extensions")
if not os.access(path, os.W_OK): # Can we install globally
# Not tested on Windows and Mac
path = os.path.expanduser("~/.config/inkscape/extensions")
self.distribution.data_files.append((path, [i for i in glob.glob(os.path.join("inkex", "*.inx"))]))
self.distribution.data_files.append((path, ['scripts/boxes']))
self.distribution.data_files.append((path, ['scripts/boxes_proxy.py']))
except (CalledProcessError, FileNotFoundError) as e:
print("Could not find Inkscape. Skipping plugin files.\n", e)
pass # Inkscape is not installed
build_py.run(self)
setup(
packages=find_packages(),
cmdclass={
'build_py': CustomBuildExtCommand,
},
)
| 3,732 | Python | .py | 72 | 41.486111 | 191 | 0.619792 | florianfesti/boxes | 970 | 351 | 40 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,525 | boxes.py.mo | florianfesti_boxes/locale/de/LC_MESSAGES/boxes.py.mo | fiï - Ñ ? Ï2 ËC π ÈC N £E Ï ÚE flF ÛF G G 7G B <G . G 3 ÆG D ‚G X 'H ÄH ÑH B °H ‰H ÌH 1 ˇH 1I & |