id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
25,400 | dataset_attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/pool_/dataset_attachments.py | from middlewared.schema import accepts, Ref, returns, Str
from middlewared.service import item_method, private, Service
from .utils import dataset_mountpoint
class PoolDatasetService(Service):
attachment_delegates = []
class Config:
namespace = 'pool.dataset'
@item_method
@accepts(Str('id', required=True), roles=['DATASET_READ'])
@returns(Ref('attachments'))
async def attachments(self, oid):
"""
Return a list of services dependent of this dataset.
Responsible for telling the user whether there is a related
share, asking for confirmation.
Example return value:
[
{
"type": "NFS Share",
"service": "nfs",
"attachments": ["/mnt/tank/work"]
}
]
"""
dataset = await self.middleware.call('pool.dataset.get_instance_quick', oid)
if mountpoint := dataset_mountpoint(dataset):
return await self.attachments_with_path(mountpoint)
return []
@private
async def attachments_with_path(self, path, check_parent=False, exact_match=False):
result = []
if isinstance(path, str) and not path.startswith('/mnt/'):
self.logger.warning('%s: unexpected path not located within pool mountpoint', path)
if path:
options = {'check_parent': check_parent, 'exact_match': exact_match}
for delegate in self.attachment_delegates:
attachments = {'type': delegate.title, 'service': delegate.service, 'attachments': []}
for attachment in await delegate.query(path, True, options):
attachments['attachments'].append(await delegate.get_attachment_name(attachment))
if attachments['attachments']:
result.append(attachments)
return result
@private
def register_attachment_delegate(self, delegate):
self.attachment_delegates.append(delegate)
@private
async def query_attachment_delegate(self, name, path, enabled):
for delegate in self.attachment_delegates:
if delegate.name == name:
return await delegate.query(path, enabled)
raise RuntimeError(f'Unknown attachment delegate {name!r}')
@private
async def get_attachment_delegates(self):
return self.attachment_delegates
| 2,388 | Python | .tac | 54 | 34.888889 | 102 | 0.64569 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,401 | attachment.py | truenas_middleware/src/middlewared/middlewared/plugins/kmip/attachment.py | # Copyright (c) - iXsystems Inc.
#
# Licensed under the terms of the TrueNAS Enterprise License Agreement
# See the file LICENSE.IX for complete terms and conditions
from middlewared.common.attachment.certificate import CertificateServiceAttachmentDelegate
from middlewared.common.ports import ServicePortDelegate
class KmipCertificateAttachment(CertificateServiceAttachmentDelegate):
HUMAN_NAME = 'KMIP Service'
SERVICE = 'kmip'
SERVICE_VERB = 'start'
class KMIPServicePortDelegate(ServicePortDelegate):
name = 'kmip'
namespace = 'kmip'
port_fields = ['port']
title = 'KMIP Service'
async def setup(middleware):
await middleware.call('certificate.register_attachment_delegate', KmipCertificateAttachment(middleware))
await middleware.call('port.register_attachment_delegate', KMIPServicePortDelegate(middleware))
| 859 | Python | .tac | 18 | 44.222222 | 108 | 0.80649 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,402 | cert_attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/ldap_/cert_attachments.py | from middlewared.common.attachment.certificate import CertificateServiceAttachmentDelegate
class LdapCertificateAttachmentDelegate(CertificateServiceAttachmentDelegate):
HUMAN_NAME = 'LDAP Service'
SERVICE = 'ldap'
SERVICE_VERB = 'start'
async def setup(middleware):
await middleware.call('certificate.register_attachment_delegate', LdapCertificateAttachmentDelegate(middleware))
| 401 | Python | .tac | 7 | 53.285714 | 116 | 0.840617 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,403 | cert_attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/idmap_/cert_attachments.py | from middlewared.common.attachment.certificate import CertificateCRUDServiceAttachmentDelegate
class IdmapCertificateAttachmentDelegate(CertificateCRUDServiceAttachmentDelegate):
CERT_FILTER_KEY = 'certificate.id'
HUMAN_NAME = 'IDMAP Service'
NAMESPACE = 'idmap'
async def redeploy(self, cert_id):
pass
async def setup(middleware):
await middleware.call('certificate.register_attachment_delegate', IdmapCertificateAttachmentDelegate(middleware))
| 480 | Python | .tac | 9 | 48.555556 | 117 | 0.819355 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,404 | attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/crypto_/attachments.py | from middlewared.service import private, Service
class CertificateService(Service):
class Config:
cli_namespace = 'system.certificate'
def __init__(self, *args, **kwargs):
super(CertificateService, self).__init__(*args, **kwargs)
self.delegates = []
@private
async def register_attachment_delegate(self, delegate):
self.delegates.append(delegate)
@private
async def in_use_attachments(self, cert_id):
return [delegate for delegate in self.delegates if await delegate.state(cert_id)]
@private
async def get_attachments(self, cert_id):
return list(filter(bool, [await delegate.consuming_cert_human_output(cert_id) for delegate in self.delegates]))
@private
async def redeploy_cert_attachments(self, cert_id):
for delegate in await self.in_use_attachments(cert_id):
await delegate.redeploy(cert_id)
| 912 | Python | .tac | 20 | 38.8 | 119 | 0.702489 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,405 | attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/vm/attachments.py | import collections
import os.path
from middlewared.common.attachment import FSAttachmentDelegate
from middlewared.common.ports import PortDelegate
from middlewared.plugins.zfs_.utils import zvol_path_to_name
from middlewared.service import private, Service
from .utils import ACTIVE_STATES
async def determine_recursive_search(recursive, device, child_datasets):
# TODO: Add unit tests for this please
if recursive:
return True
elif device['dtype'] == 'DISK':
return False
# What we want to do here is make sure that any raw files or cdrom files are not living in the child
# dataset and not affected by the parent snapshot as they live on a different filesystem
path = device['attributes']['path'].removeprefix('/mnt/')
for split_count in range(path.count('/')):
potential_ds = path.rsplit('/', split_count)[0]
if potential_ds in child_datasets:
return False
else:
return True
class VMService(Service):
@private
async def periodic_snapshot_task_begin(self, task_id):
task = await self.middleware.call('pool.snapshottask.query', [['id', '=', task_id]], {'get': True})
return await self.query_snapshot_begin(task['dataset'], task['recursive'])
@private
async def query_snapshot_begin(self, dataset, recursive):
vms = collections.defaultdict(list)
datasets = {
d['id']: d for d in await self.middleware.call(
'pool.dataset.query', [['id', '^', f'{dataset}/']], {'extra': {'properties': []}}
)
}
to_ignore_vms = await self.get_vms_to_ignore_for_querying_attachments(True, [['suspend_on_snapshot', '=', False]])
for device in await self.middleware.call(
'vm.device.query', [
['dtype', 'in', ('DISK', 'RAW', 'CDROM')],
['vm', 'nin', to_ignore_vms],
]
):
path = device['attributes'].get('path')
if not path:
continue
elif path.startswith('/dev/zvol'):
path = os.path.join('/mnt', zvol_path_to_name(path))
dataset_path = os.path.join('/mnt', dataset)
if await determine_recursive_search(recursive, device, datasets):
if await self.middleware.call('filesystem.is_child', path, dataset_path):
vms[device['vm']].append(device)
elif dataset_path == path:
vms[device['vm']].append(device)
return vms
@private
async def get_vms_to_ignore_for_querying_attachments(self, enabled, extra_filters=None):
extra_filters = extra_filters or []
return {
vm['id']: vm for vm in await self.middleware.call(
'vm.query', [('status.state', 'nin' if enabled else 'in', ACTIVE_STATES)] + extra_filters
)
}
class VMFSAttachmentDelegate(FSAttachmentDelegate):
name = 'vm'
title = 'VM'
async def query(self, path, enabled, options=None):
vms_attached = []
ignored_vms = await self.middleware.call('vm.get_vms_to_ignore_for_querying_attachments', enabled)
for device in await self.middleware.call('datastore.query', 'vm.device'):
if (device['dtype'] not in ('DISK', 'RAW', 'CDROM')) or device['vm']['id'] in ignored_vms:
continue
disk = device['attributes'].get('path')
if not disk:
continue
if disk.startswith('/dev/zvol'):
disk = os.path.join('/mnt', zvol_path_to_name(disk))
if await self.middleware.call('filesystem.is_child', disk, path):
vm = {
'id': device['vm'].get('id'),
'name': device['vm'].get('name'),
}
if vm not in vms_attached:
vms_attached.append(vm)
return vms_attached
async def delete(self, attachments):
for attachment in attachments:
try:
await self.middleware.call('vm.stop', attachment['id'])
except Exception:
self.middleware.logger.warning('Unable to vm.stop %r', attachment['id'])
async def toggle(self, attachments, enabled):
for attachment in attachments:
action = 'vm.start' if enabled else 'vm.stop'
try:
await self.middleware.call(action, attachment['id'])
except Exception:
self.middleware.logger.warning('Unable to %s %r', action, attachment['id'])
async def stop(self, attachments):
await self.toggle(attachments, False)
async def start(self, attachments):
await self.toggle(attachments, True)
class VMPortDelegate(PortDelegate):
name = 'vm devices'
namespace = 'vm.device'
title = 'VM Device Service'
async def get_ports(self):
ports = []
vms = {vm['id']: vm['name'] for vm in await self.middleware.call('vm.query')}
for device in await self.middleware.call('vm.device.query', [['dtype', '=', 'DISPLAY']]):
ports.append({
'description': f'{vms[device["vm"]]!r} VM',
'ports': [
(device['attributes']['bind'], device['attributes']['port']),
(device['attributes']['bind'], device['attributes']['web_port']),
]
})
return ports
async def setup(middleware):
middleware.create_task(
middleware.call('pool.dataset.register_attachment_delegate', VMFSAttachmentDelegate(middleware))
)
await middleware.call('port.register_attachment_delegate', VMPortDelegate(middleware))
| 5,702 | Python | .tac | 122 | 36.409836 | 122 | 0.59964 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,406 | attachments.py | truenas_middleware/src/middlewared/middlewared/plugins/virt/attachments.py | from typing import TYPE_CHECKING
from middlewared.common.attachment import FSAttachmentDelegate
from middlewared.common.ports import PortDelegate
if TYPE_CHECKING:
from middlewared.main import Middleware
class VirtFSAttachmentDelegate(FSAttachmentDelegate):
name = 'virt'
title = 'Virtualization'
async def query(self, path, enabled, options=None):
config = await self.middleware.call('virt.global.config')
instances = []
for i in await self.middleware.call('virt.instance.query'):
append = False
if path != f'/mnt/{config["pool"]}':
for device in await self.middleware.call('virt.instance.device_list', i['id']):
if device['dev_type'] != 'DISK':
continue
if device['source'] is None:
continue
if await self.middleware.call('filesystem.is_child', device['source'], path):
append = True
break
else:
append = True
if append:
instances.append({
'id': i['id'],
'name': i['name'],
})
return instances
async def delete(self, attachments):
if attachments:
job = await self.middleware.call('virt.global.update', {'pool': ''})
await job.wait(raise_error=True)
async def toggle(self, attachments, enabled):
for attachment in attachments:
action = 'start' if enabled else 'stop'
try:
job = await self.middleware.call(f'virt.instance.{action}', attachment['id'])
await job.wait(raise_error=True)
except Exception as e:
self.middleware.logger.warning('Unable to %s %r: %s', action, attachment['id'], e)
async def stop(self, attachments):
await self.toggle(attachments, False)
async def start(self, attachments):
await self.toggle(attachments, True)
class VirtPortDelegate(PortDelegate):
name = 'virt devices'
namespace = 'virt.device'
title = 'Virtualization Device'
async def get_ports(self):
ports = []
for instance in await self.middleware.call('virt.instance.query'):
instance_ports = []
for device in await self.middleware.call('virt.instance.device_list', instance['id']):
if device['dev_type'] != 'PROXY':
continue
instance_ports.append(('0.0.0.0', device['source_port']))
instance_ports.append(('::', device['source_port']))
if instance_ports:
ports.append({
'description': f'{instance["id"]!r} instance',
'ports': instance_ports,
})
return ports
async def setup(middleware: 'Middleware'):
middleware.create_task(
middleware.call(
'pool.dataset.register_attachment_delegate',
VirtFSAttachmentDelegate(middleware),
)
)
await middleware.call('port.register_attachment_delegate', VirtPortDelegate(middleware))
| 3,198 | Python | .tac | 73 | 31.90411 | 98 | 0.581537 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,407 | test_attachment_querying.py | truenas_middleware/tests/api2/test_attachment_querying.py | #!/usr/bin/env python3
import os
import sys
from pytest_dependency import depends
sys.path.append(os.getcwd())
from middlewared.test.integration.assets.nfs import nfs_share
from middlewared.test.integration.assets.pool import dataset
from middlewared.test.integration.utils import call, client
PARENT_DATASET = 'test_parent'
CHILD_DATASET = f'{PARENT_DATASET}/child_dataset'
def test_attachment_with_child_path(request):
with dataset(PARENT_DATASET) as parent_dataset:
parent_path = f'/mnt/{parent_dataset}'
assert call('pool.dataset.attachments_with_path', parent_path) == []
with nfs_share(parent_dataset):
attachments = call('pool.dataset.attachments_with_path', parent_path)
assert len(attachments) > 0, attachments
assert attachments[0]['type'] == 'NFS Share', attachments
with dataset(CHILD_DATASET) as child_dataset:
child_path = f'/mnt/{child_dataset}'
attachments = call('pool.dataset.attachments_with_path', child_path)
assert len(attachments) == 0, attachments
attachments = call('pool.dataset.attachments_with_path', child_path, True)
assert len(attachments) == 1, attachments
assert attachments[0]['type'] == 'NFS Share', attachments
| 1,329 | Python | .tac | 25 | 44.88 | 90 | 0.68779 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,408 | test_boot_attach_replace_detach.py | truenas_middleware/tests/api2/test_boot_attach_replace_detach.py | import pytest
from middlewared.test.integration.utils import call
from auto_config import ha
if not ha:
# the HA VMs only have 1 extra disk at time
# of writing this. QE is aware and is working
# on adding more disks to them so in the meantime
# we have to skip this test since it will fail
# 100% of the time on HA VMs.
@pytest.mark.timeout(600)
def test_boot_attach_replace_detach():
existing_disks = call("boot.get_disks")
assert len(existing_disks) == 1
unused = call("disk.get_unused")
to_attach = unused[0]["name"]
replace_with = unused[1]["name"]
# Attach a disk and wait for resilver to finish
call("boot.attach", to_attach, job=True)
while True:
state = call("boot.get_state")
if not (
state["scan"] and
state["scan"]["function"] == "RESILVER" and
state["scan"]["state"] == "SCANNING"
):
break
assert state["topology"]["data"][0]["type"] == "MIRROR"
assert state["topology"]["data"][0]["children"][0]["status"] == "ONLINE"
to_replace = state["topology"]["data"][0]["children"][1]["name"]
assert to_replace.startswith(to_attach)
assert state["topology"]["data"][0]["children"][1]["status"] == "ONLINE"
# Replace newly attached disk
call("boot.replace", to_replace, replace_with, job=True)
# Resilver is a part of replace routine
state = call("boot.get_state")
assert state["topology"]["data"][0]["type"] == "MIRROR"
assert state["topology"]["data"][0]["children"][0]["status"] == "ONLINE"
to_detach = state["topology"]["data"][0]["children"][1]["name"]
assert to_detach.startswith(replace_with)
assert state["topology"]["data"][0]["children"][1]["status"] == "ONLINE"
# Detach replaced disk, returning the pool to its initial state
call("boot.detach", to_detach)
assert len(call("boot.get_disks")) == 1
| 2,053 | Python | .tac | 43 | 38.790698 | 80 | 0.596192 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,409 | test_pool_attach.py | truenas_middleware/tests/api2/test_pool_attach.py | from middlewared.test.integration.assets.pool import another_pool
from middlewared.test.integration.utils import call, ssh
def test_attach_raidz1_vdev():
with another_pool(topology=(6, lambda disks: {
"data": [
{
"type": "RAIDZ1",
"disks": disks[0:3]
},
{
"type": "RAIDZ1",
"disks": disks[3:6]
},
],
})) as pool:
disk = call("disk.get_unused")[0]["name"]
call("pool.attach", pool["id"], {
"target_vdev": pool["topology"]["data"][0]["guid"],
"new_disk": disk,
}, job=True)
pool = call("pool.get_instance", pool["id"])
assert pool["expand"]["state"] == "FINISHED"
| 766 | Python | .tac | 22 | 24.545455 | 65 | 0.5 | truenas/middleware | 2,277 | 481 | 13 | LGPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,410 | system-lock.py | fossasia_meilix-systemlock/systemlock_0.1/usr/bin/system-lock.py | #!/usr/bin/python
from gi.repository import Gtk
from gi.repository import Gio
import sys
import os
import subprocess
class SystemLock(Gtk.Application):
lock = True
command = 0
message = Gtk.Dialog()
def do_activate(self):
window = Gtk.Window(application=self)
window.set_title("System Lock")
self.initWindow(window)
window.show_all()
def initWindow(self,window):
window.set_default_size(300,200)
window.set_resizable(False)
#window.set_margin_top(50)
#window.set_margin_bottom(50)
main = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
box1 = Gtk.Box(spacing=6)
freeze = Gtk.RadioButton(label="Freeze System")
freeze.connect("toggled",self.toggled_radio_lock)
ufreeze = Gtk.RadioButton.new_from_widget(freeze)
ufreeze.set_label("Unfreeze System")
ufreeze.connect("toggled",self.toggled_radio_unlock)
box2 = Gtk.Box(spacing=6)
save = Gtk.Button(label = "Save")
save.connect("clicked",self.on_save_clicked)
cancel = Gtk.Button(label ="Cancel")
cancel.connect("clicked",self.on_cancel_clicked)
box2.add(cancel)
box2.add(save)
main.set_margin_left(20)
main.set_margin_top(20)
main.set_margin_right(20)
main.set_margin_bottom(20)
main.add(freeze)
main.add(ufreeze)
status = self.check_freeze()
main.add(Gtk.Label("Status: "+status))
main.add(box2)
window.add(main)
window.set_default_icon_from_file("/etc/system-lock/lock-icon.png")
window.set_position(Gtk.WindowPosition.CENTER)
self.message_dialog();
def toggled_radio_lock(self,button):
if button.get_active():
self.lock = True
else:
self.lock = False
def toggled_radio_unlock(self,button):
if button.get_active():
self.lock = False
else:
self.lock = True
def check_freeze(self):
if (os.path.exists("/etc/.ofris")):
return "System is locked"
else:
return "System is not locked"
def on_save_clicked(self,widget):
if self.lock:
#win = self.create_spinner()
#win.show_all()
#self.spin = Gtk.Spinner();
#self.spin.start()
#self.add(self.spin)
#subprocess.call(['gksu','sh lock.sh'])
self.command = 0
#self.message_dialog()
#win.destroy()
self.message.show_all()
else:
self.command = 1
self.message.show_all()
#self.message_dialog()
#subprocess.call(['gksu','sh unlock.sh'])
def message_dialog(self):
content_area = self.message.get_content_area()
content_area.add(Gtk.Label("You need to restart the computer for the change to take effect."))
self.message.add_button(button_text="Cancel",response_id=Gtk.ResponseType.CANCEL)
self.message.add_button(button_text="Save and Restart",response_id=Gtk.ResponseType.OK)
self.message.connect("response", self.on_response)
#self.message.show_all()
def on_response(self, widget, response_id):
# destroy the widget (the dialog) when the function on_response() is called
if (response_id == Gtk.ResponseType.CANCEL):
widget.hide()
elif response_id == Gtk.ResponseType.OK:
# win = self.create_spinner()
#win.show_all()
if (self.command == 0):
subprocess.Popen(['gksu','sh /etc/system-lock/lock.sh'])
#win.destroy();
else:
subprocess.Popen(['gksu','sh /etc/system-lock/unlock.sh'])
#win.destroy();
widget.hide()
#//p.communicate()
def create_spinner(self):
win = Gtk.Window();
win.set_default_size(100,50)
main = Gtk.Box(orientation=Gtk.Orientation.VERTICAL,spacing=6)
main.set_margin_left(10)
main.set_margin_top(10)
main.set_margin_right(10)
main.set_margin_bottom(10)
win.set_position(Gtk.WindowPosition.CENTER)
win.set_decorated(False)
win.set_keep_above(True)
win.set_skip_taskbar_hint(True)
win.set_modal(True)
label = Gtk.Label();
label.set_markup("<big>Please Wait !</big>");
main.add(label)
win.add(main)
self.message.destroy()
return win
def on_cancel_clicked(self,widget):
sys.exit()
app = SystemLock();
exit_status = app.run(sys.argv)
sys.exit(exit_status)
| 4,852 | Python | .py | 125 | 28.248 | 102 | 0.580734 | fossasia/meilix-systemlock | 1,539 | 18 | 11 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,411 | copyright | fossasia_meilix-systemlock/systemlock_0.1/debian/copyright | Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: systemlock
Source:
Files: *
Copyright: 2013 Mario Behling <mb@mariobehling.de>
License: GPL-3+
Files: debian/*
Copyright: 2013 Mario Behling <mb@mariobehling.de>
License: GPL-2+
License: GPL-2+
On Debian systems, the full text of the GNU General Public License version 2 can
be found in the `/usr/share/common-licenses/GPL-2' file.
License: GPL-3+
This package is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
.
You should have received a copy of the GNU General Public License
along with this package; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
.
On Debian systems, the full text of the GNU General Public License version 3 can
be found in the `/usr/share/common-licenses/GPL-3' file.
| 1,280 | Python | .py | 29 | 42.275862 | 82 | 0.787319 | fossasia/meilix-systemlock | 1,539 | 18 | 11 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,412 | system-lock.py | fossasia_meilix-systemlock/systemlock_0.1/debian/systemlock/usr/bin/system-lock.py | #!/usr/bin/python
from gi.repository import Gtk
from gi.repository import Gio
import sys
import os
import subprocess
class SystemLock(Gtk.Application):
lock = True
command = 0
message = Gtk.Dialog()
def do_activate(self):
window = Gtk.Window(application=self)
window.set_title("System Lock")
self.initWindow(window)
window.show_all()
def initWindow(self,window):
window.set_default_size(300,200)
window.set_resizable(False)
#window.set_margin_top(50)
#window.set_margin_bottom(50)
main = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
box1 = Gtk.Box(spacing=6)
freeze = Gtk.RadioButton(label="Freeze System")
freeze.connect("toggled",self.toggled_radio_lock)
ufreeze = Gtk.RadioButton.new_from_widget(freeze)
ufreeze.set_label("Unfreeze System")
ufreeze.connect("toggled",self.toggled_radio_unlock)
box2 = Gtk.Box(spacing=6)
save = Gtk.Button(label = "Save")
save.connect("clicked",self.on_save_clicked)
cancel = Gtk.Button(label ="Cancel")
cancel.connect("clicked",self.on_cancel_clicked)
box2.add(cancel)
box2.add(save)
main.set_margin_left(20)
main.set_margin_top(20)
main.set_margin_right(20)
main.set_margin_bottom(20)
main.add(freeze)
main.add(ufreeze)
status = self.check_freeze()
main.add(Gtk.Label("Status: "+status))
main.add(box2)
window.add(main)
window.set_default_icon_from_file("/etc/system-lock/lock-icon.png")
window.set_position(Gtk.WindowPosition.CENTER)
self.message_dialog();
def toggled_radio_lock(self,button):
if button.get_active():
self.lock = True
else:
self.lock = False
def toggled_radio_unlock(self,button):
if button.get_active():
self.lock = False
else:
self.lock = True
def check_freeze(self):
if (os.path.exists("/etc/.ofris")):
return "System is locked"
else:
return "System is not locked"
def on_save_clicked(self,widget):
if self.lock:
#win = self.create_spinner()
#win.show_all()
#self.spin = Gtk.Spinner();
#self.spin.start()
#self.add(self.spin)
#subprocess.call(['gksu','sh lock.sh'])
self.command = 0
#self.message_dialog()
#win.destroy()
self.message.show_all()
else:
self.command = 1
self.message.show_all()
#self.message_dialog()
#subprocess.call(['gksu','sh unlock.sh'])
def message_dialog(self):
content_area = self.message.get_content_area()
content_area.add(Gtk.Label("You need to restart the computer for the change to take effect."))
self.message.add_button(button_text="Cancel",response_id=Gtk.ResponseType.CANCEL)
self.message.add_button(button_text="Save and Restart",response_id=Gtk.ResponseType.OK)
self.message.connect("response", self.on_response)
#self.message.show_all()
def on_response(self, widget, response_id):
# destroy the widget (the dialog) when the function on_response() is called
if (response_id == Gtk.ResponseType.CANCEL):
widget.hide()
elif response_id == Gtk.ResponseType.OK:
# win = self.create_spinner()
#win.show_all()
if (self.command == 0):
subprocess.Popen(['gksu','sh /etc/system-lock/lock.sh'])
#win.destroy();
else:
subprocess.Popen(['gksu','sh /etc/system-lock/unlock.sh'])
#win.destroy();
widget.hide()
#//p.communicate()
def create_spinner(self):
win = Gtk.Window();
win.set_default_size(100,50)
main = Gtk.Box(orientation=Gtk.Orientation.VERTICAL,spacing=6)
main.set_margin_left(10)
main.set_margin_top(10)
main.set_margin_right(10)
main.set_margin_bottom(10)
win.set_position(Gtk.WindowPosition.CENTER)
win.set_decorated(False)
win.set_keep_above(True)
win.set_skip_taskbar_hint(True)
win.set_modal(True)
label = Gtk.Label();
label.set_markup("<big>Please Wait !</big>");
main.add(label)
win.add(main)
self.message.destroy()
return win
def on_cancel_clicked(self,widget):
sys.exit()
app = SystemLock();
exit_status = app.run(sys.argv)
sys.exit(exit_status)
| 4,852 | Python | .py | 125 | 28.248 | 102 | 0.580734 | fossasia/meilix-systemlock | 1,539 | 18 | 11 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,413 | copyright | fossasia_meilix-systemlock/systemlock_0.1/debian/systemlock/usr/share/doc/systemlock/copyright | Format: http://dep.debian.net/deps/dep5
Upstream-Name: systemlock
Source: <url://example.com>
Files: *
Copyright: <years> <put author's name and email here>
<years> <likewise for another author>
License: <special license>
<Put the license of the package here indented by 1 space>
<This follows the format of Description: lines in control file>
.
<Including paragraphs>
# If you want to use GPL v2 or later for the /debian/* files use
# the following clauses, or change it to suit. Delete these two lines
Files: debian/*
Copyright: 2012 x-mario <vanhonit@gmail.com>
License: GPL-2+
This package is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
.
On Debian systems, the complete text of the GNU General
Public License version 2 can be found in "/usr/share/common-licenses/GPL-2".
# Please also look if there are files or directories which have a
# different copyright/license attached and list them here.
| 1,487 | Python | .py | 33 | 43.030303 | 77 | 0.777395 | fossasia/meilix-systemlock | 1,539 | 18 | 11 | GPL-3.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,414 | setup.py | mushorg_conpot/setup.py | from setuptools import setup, find_packages
import conpot
setup(
name="conpot",
version=conpot.__version__,
packages=find_packages(exclude=["*.pyc"]),
python_requires=">=3.6",
scripts=["bin/conpot"],
url="http://conpot.org",
license="GPL 2",
author="MushMush Foundation",
author_email="glastopf@public.honeynet.org",
classifiers=[
"Development Status :: 6 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python",
"Topic :: Security",
],
package_data={
"": ["*.txt", "*.rst"],
"conpot": ["conpot.cfg", "tests/data/*"],
},
keywords="ICS SCADA honeypot",
include_package_data=True,
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
description="""Conpot is an ICS honeypot with the goal to collect intelligence about the motives and methods of adversaries targeting industrial control systems""",
install_requires=open("requirements.txt").read().splitlines(),
)
| 1,305 | Python | .py | 34 | 32.529412 | 168 | 0.643307 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,415 | mac_addr.py | mushorg_conpot/conpot/utils/mac_addr.py | # Copyright (C) 2015 Adarsh Dinesh <adarshdinesh@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import subprocess
logger = logging.getLogger(__name__)
def _check_mac(iface, addr):
s = subprocess.Popen(["ifconfig", iface], stdout=subprocess.PIPE)
data = s.stdout.read()
if addr in data:
return True
else:
return False
def _is_dhcp(iface):
s = subprocess.Popen(
["cat", "/var/lib/dhcp/dhclient.leases"], stdout=subprocess.PIPE
)
data = s.stdout.read()
if iface in data:
return True
else:
return False
def _renew_lease(iface):
subprocess.Popen(["dhclient", "-r"], stdout=subprocess.PIPE)
subprocess.Popen(["dhclient", iface], stdout=subprocess.PIPE)
def change_mac(iface=None, mac=None, config=None, revert=None):
if config:
iface = config.get("change_mac_addr", "iface")
mac = config.get("change_mac_addr", "addr")
# Changing MAC address and restarting network
subprocess.Popen(
["ip", "link", "set", iface, "down"],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
)
subprocess.Popen(
["ip", "link", "set", "dev", iface, "address", mac],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
)
subprocess.Popen(
["ip", "link", "set", iface, "up"],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
)
if _check_mac(iface, mac):
if revert:
logger.info("MAC address reverted for interface %s", iface)
else:
logger.info("MAC address of interface %s changed %s", iface, mac)
if _is_dhcp(iface):
_renew_lease(iface)
logger.info("Interface has a DHCP lease, refreshed.")
else:
logger.warning("Could not change MAC address.")
def revert_mac(iface):
s = subprocess.Popen(["ethtool", "-P", iface], stdout=subprocess.PIPE)
mac = s.stdout.read().split(" ")[2].strip()
change_mac(iface, mac, revert=True)
| 2,696 | Python | .py | 72 | 32.027778 | 77 | 0.668199 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,416 | networking.py | mushorg_conpot/conpot/utils/networking.py | import socket
from datetime import datetime
from slugify import slugify
def sanitize_file_name(name, host, port):
"""
Ensure that file_name is legal. Slug the filename and store it onto the server.
This would ensure that there are no duplicates as far as writing a file is concerned. Also client addresses are
noted so that one can verify which client uploaded the file.
:param name: Name of the file
:param host: host/client address
:param port port/client port
:type name: str
"""
return (
"("
+ host
+ ", "
+ str(port)
+ ")-"
+ datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ "-"
+ slugify(name)
)
# py3 chr
def chr_py3(x):
return bytearray((x,))
# convert a string to an ascii byte string
def str_to_bytes(x):
return x if isinstance(x, bytes) else str(x).encode("ascii")
# https://www.bountysource.com/issues/4335201-ssl-broken-for-python-2-7-9
# Kudos to Eugene for this workaround!
def fix_sslwrap():
# Re-add sslwrap to Python 2.7.9
import inspect
__ssl__ = __import__("ssl")
try:
_ssl = __ssl__._ssl
except AttributeError:
_ssl = __ssl__._ssl2
def new_sslwrap(
sock,
server_side=False,
keyfile=None,
certfile=None,
cert_reqs=__ssl__.CERT_NONE,
ssl_version=__ssl__.PROTOCOL_SSLv23,
ca_certs=None,
ciphers=None,
):
context = __ssl__.SSLContext(ssl_version)
context.verify_mode = cert_reqs or __ssl__.CERT_NONE
if ca_certs:
context.load_verify_locations(ca_certs)
if certfile:
context.load_cert_chain(certfile, keyfile)
if ciphers:
context.set_ciphers(ciphers)
caller_self = inspect.currentframe().f_back.f_locals["self"]
return context._wrap_socket(sock, server_side=server_side, ssl_sock=caller_self)
if not hasattr(_ssl, "sslwrap"):
_ssl.sslwrap = new_sslwrap
def get_interface_ip(destination_ip: str):
# returns interface ip from socket in case direct udp socket access not possible
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((destination_ip, 80))
socket_ip = s.getsockname()[0]
s.close()
return socket_ip
| 2,302 | Python | .py | 68 | 27.441176 | 115 | 0.635257 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,417 | greenlet.py | mushorg_conpot/conpot/utils/greenlet.py | # Copyright (C) 2020 srenfo
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
from types import SimpleNamespace
from gevent import Greenlet, sleep
from gevent.event import Event
import conpot
from conpot import core, protocols
class ServiceGreenlet(Greenlet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.scheduled_once = Event()
def run(self):
self.scheduled_once.set()
super().run()
def spawn_startable_greenlet(instance, *args, **kwargs):
greenlet = ServiceGreenlet.spawn(instance.start, *args, **kwargs)
greenlet.name = instance.__class__.__name__
return greenlet
def spawn_test_server(server_class, template, protocol, args=None, port=0):
conpot_dir = os.path.dirname(conpot.__file__)
template_dir = f"{conpot_dir}/templates/{template}"
template_xml = f"{template_dir}/template.xml"
protocol_xml = f"{template_dir}/{protocol}/{protocol}.xml"
core.get_databus().initialize(template_xml)
server = server_class(
template=protocol_xml, template_directory=template_dir, args=args
)
greenlet = spawn_startable_greenlet(server, "127.0.0.1", port)
greenlet.scheduled_once.wait()
return server, greenlet
def teardown_test_server(server, greenlet):
server.stop()
greenlet.get()
# this is really a test helper but start_protocol.py wants to use it too
def init_test_server_by_name(name, port=0):
server_class = protocols.name_mapping[name]
template = {
"guardian_ast": "guardian_ast",
"IEC104": "IEC104",
"kamstrup_management": "kamstrup_382",
"kamstrup_meter": "kamstrup_382",
}.get(name, "default")
# Required by SNMP
class Args(SimpleNamespace):
mibcache = None
if name in ("ftp", "tftp"):
core.initialize_vfs()
server, greenlet = spawn_test_server(
server_class, template, name, args=Args(), port=port
)
# special case protocol with more complex start() logic
# TODO: add serve_forever-Event() to servers to fix this properly
if name == "http":
sleep(0.5)
return server, greenlet
| 2,826 | Python | .py | 70 | 35.857143 | 75 | 0.708059 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,418 | ext_ip.py | mushorg_conpot/conpot/utils/ext_ip.py | # Copyright (C) 2014 Lukas Rist <glaslos@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import json
import logging
import socket
import requests
from requests.exceptions import Timeout, ConnectionError
logger = logging.getLogger(__name__)
def _verify_address(addr):
try:
socket.inet_aton(addr)
return True
except (socket.error, UnicodeEncodeError, TypeError):
return False
def _fetch_data(urls):
# we only want warning+ messages from the requests module
logging.getLogger("requests").setLevel(logging.WARNING)
for url in urls:
try:
req = requests.get(url, timeout=5)
if req.status_code == 200:
data = req.text.strip()
if data is None or not _verify_address(data):
continue
else:
return data
else:
raise ConnectionError
except (Timeout, ConnectionError):
logger.warning("Could not fetch public ip from %s", url)
return None
def get_ext_ip(config=None, urls=None):
if config:
urls = json.loads(config.get("fetch_public_ip", "urls"))
public_ip = _fetch_data(urls)
if public_ip:
logger.info("Fetched %s as external ip.", public_ip)
else:
logger.warning("Could not fetch public ip: %s", public_ip)
return public_ip
if __name__ == "__main__":
print((get_ext_ip(urls=["https://api.ipify.org", "http://127.0.0.1:8000"])))
| 2,164 | Python | .py | 56 | 32.732143 | 80 | 0.675727 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,419 | usage_simulator.py | mushorg_conpot/conpot/emulators/kamstrup/usage_simulator.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import gevent
import conpot.core as conpot_core
logger = logging.getLogger(__name__)
# Simulates power usage for a Kamstrup 382 meter
class UsageSimulator(object):
def __init__(self, *args):
self._enabled = True
self.stopped = gevent.event.Event()
# both highres, lowres will be calculated on the fly
self.energy_in = 0
self.energy_out = 0
# p1, p2, p3
self.voltage = [0, 0, 0]
self.current = [0, 0, 0]
self.power = [0, 0, 0]
gevent.spawn(self.initialize)
def usage_counter(self):
while self._enabled:
# since this is gevent, this actually sleep for _at least_ 1 second
# TODO: measure last entry and figure it out < jkv: Figure what out?!?
gevent.sleep(1)
for x in [0, 1, 2]:
self.energy_in += int(self.power[x] * 0.0036)
# ready for shutdown!
self.stopped.set()
def stop(self):
self._enabled = False
self.stopped.wait()
def initialize(self):
# we need the databus initialized before we can probe values
databus = conpot_core.get_databus()
databus.initialized.wait()
# accumulated counter
energy_in_register = "register_13"
self.energy_in = databus.get_value(energy_in_register)
databus.set_value(energy_in_register, self._get_energy_in)
databus.set_value("register_1", self._get_energy_in_lowres)
energy_out_register = "register_14"
self.energy_out = databus.get_value(energy_out_register)
databus.set_value(energy_out_register, self._get_energy_out)
databus.set_value("register_2", self._get_energy_out_lowres)
volt_1_register = "register_1054"
self.voltage[0] = databus.get_value(volt_1_register)
databus.set_value(volt_1_register, self._get_voltage_1)
volt_2_register = "register_1055"
self.voltage[1] = databus.get_value(volt_2_register)
databus.set_value(volt_2_register, self._get_voltage_2)
volt_3_register = "register_1056"
self.voltage[2] = databus.get_value(volt_3_register)
databus.set_value(volt_3_register, self._get_voltage_3)
current_1_register = "register_1076"
self.current[0] = databus.get_value(current_1_register)
databus.set_value(current_1_register, self._get_current_1)
current_2_register = "register_1077"
self.current[1] = databus.get_value(current_2_register)
databus.set_value(current_2_register, self._get_current_2)
current_3_register = "register_1078"
self.current[2] = databus.get_value(current_3_register)
databus.set_value(current_3_register, self._get_current_3)
power_1_register = "register_1080"
self.power[0] = databus.get_value(power_1_register)
databus.set_value(power_1_register, self._get_power_1)
power_2_register = "register_1081"
self.power[1] = databus.get_value(power_2_register)
databus.set_value(power_2_register, self._get_power_2)
power_3_register = "register_1082"
self.power[2] = databus.get_value(power_3_register)
databus.set_value(power_3_register, self._get_power_3)
gevent.spawn(self.usage_counter)
def _get_energy_in(self):
return self.energy_in
def _get_energy_out(self):
return self.energy_out
def _get_energy_in_lowres(self):
return self.energy_in / 1000
def _get_energy_out_lowres(self):
return self.energy_out / 1000
def _get_voltage_1(self):
return self.voltage[0]
def _get_voltage_2(self):
return self.voltage[1]
def _get_voltage_3(self):
return self.voltage[2]
def _get_current_1(self):
return self.current[0]
def _get_current_2(self):
return self.current[1]
def _get_current_3(self):
return self.current[2]
def _get_power_1(self):
return self.power[0]
def _get_power_2(self):
return self.power[1]
def _get_power_3(self):
return self.power[2]
| 4,908 | Python | .py | 112 | 36.482143 | 82 | 0.661487 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,420 | uptime.py | mushorg_conpot/conpot/emulators/misc/uptime.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import time
import calendar
class Uptime:
def __init__(self, started=-1):
if started >= 0:
initial = started
else:
initial = calendar.timegm(time.gmtime())
self.started = calendar.timegm(time.gmtime(initial))
def get_value(self):
return calendar.timegm(time.gmtime()) - self.started
| 1,122 | Python | .py | 27 | 38.037037 | 67 | 0.734189 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,421 | sysinfo.py | mushorg_conpot/conpot/emulators/misc/sysinfo.py | import socket
import psutil
from datetime import datetime
class CpuLoad:
def get_value(self):
return psutil.cpu_percent()
class TotalRam:
def get_value(self):
return psutil.virtual_memory().total / 1024
class StorageSize:
def get_value(self):
return psutil.disk_usage("/").total / 1024
class StorageUsed:
def get_value(self):
return psutil.disk_usage("/").used / 1024
class BootTime:
def get_value(self):
return int(datetime.now().timestamp() - psutil.boot_time())
class CurrentDatetime:
def get_value(self):
return datetime.now().strftime("%Y-%m-%d,%H:%M:%S.0")
class LocalIP:
def get_value(self):
return socket.gethostbyname(socket.gethostname())
class PacketsSent:
def get_value(self):
return psutil.net_io_counters().packets_sent
class PacketsRecv:
def get_value(self):
return psutil.net_io_counters().packets_recv
class BytesSent:
def get_value(self):
return psutil.net_io_counters().bytes_sent
class BytesRecv:
def get_value(self):
return psutil.net_io_counters().bytes_recv
class TcpCurrEstab:
def get_value(self):
return len(
[
conn
for conn in psutil.net_connections("tcp")
if conn.status in (psutil.CONN_ESTABLISHED, psutil.CONN_CLOSE_WAIT)
]
)
| 1,403 | Python | .py | 45 | 24.666667 | 83 | 0.65967 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,422 | random.py | mushorg_conpot/conpot/emulators/misc/random.py | # Copyright (C) 2018 Billy Ferguson <wferguson@blueprintpower.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import random
class Random8BitRegisters:
def __init__(self):
self.key_num = random.SystemRandom()
def get_value(self):
values = [self.key_num.randint(0, 1) for b in range(0, 8)]
return values
class Random16bitRegister:
def __init__(self):
self.key_num = random.SystemRandom()
def get_value(self):
return [self.key_num.randint(0, 1)]
| 1,173 | Python | .py | 28 | 38.642857 | 67 | 0.737258 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,423 | test_modbus_server.py | mushorg_conpot/conpot/tests/test_modbus_server.py | # Copyright (C) 2013 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from gevent import monkey
monkey.patch_all()
import unittest
import modbus_tk.defines as cst
import modbus_tk.modbus_tcp as modbus_tcp
from datetime import datetime
from modbus_tk.exceptions import ModbusError
from gevent import socket
import conpot.core as conpot_core
from conpot.protocols.modbus import modbus_server
from conpot.utils.greenlet import spawn_test_server, teardown_test_server
class TestModbusServer(unittest.TestCase):
def setUp(self):
conpot_core.get_sessionManager().purge_sessions()
self.modbus, self.greenlet = spawn_test_server(
modbus_server.ModbusServer, "default", "modbus"
)
self.databus = conpot_core.get_databus()
self.host = self.modbus.server.server_host
self.port = self.modbus.server.server_port
# We have to use different slave IDs under different modes. In tcp mode,
# only 255 and 0 make sense. However, modbus_tcp.TcpMaster explicitly
# ignores slave ID 0. Therefore we can only use 255 in tcp mode.
self.target_slave_id = 1 if self.modbus.mode == "serial" else 255
def tearDown(self):
teardown_test_server(self.modbus, self.greenlet)
def test_read_coils(self):
"""
Objective: Test if we can extract the expected bits from a slave using the modbus protocol.
"""
self.databus.set_value(
"memoryModbusSlave%dBlockA" % self.target_slave_id,
[1 for b in range(0, 128)],
)
# create READ_COILS request
master = modbus_tcp.TcpMaster(host=self.host, port=self.port)
master.set_timeout(1.0)
actual_bits = master.execute(
slave=self.target_slave_id,
function_code=cst.READ_COILS,
starting_address=1,
quantity_of_x=128,
)
# the test template sets all bits to 1 in the range 1-128
expected_bits = [1 for b in range(0, 128)]
self.assertSequenceEqual(actual_bits, expected_bits)
def test_write_read_coils(self):
"""
Objective: Test if we can change values using the modbus protocol.
"""
master = modbus_tcp.TcpMaster(host=self.host, port=self.port)
master.set_timeout(1.0)
set_bits = [1, 0, 0, 1, 0, 0, 1, 1]
# write 8 bits
master.execute(
slave=self.target_slave_id,
function_code=cst.WRITE_MULTIPLE_COILS,
starting_address=1,
output_value=set_bits,
)
# read 8 bit
actual_bit = master.execute(
slave=self.target_slave_id,
function_code=cst.READ_COILS,
starting_address=1,
quantity_of_x=8,
)
self.assertSequenceEqual(set_bits, actual_bit)
def test_read_nonexistent_slave(self):
"""
Objective: Test if the correct exception is raised when trying to read from nonexistent slave.
"""
master = modbus_tcp.TcpMaster(host=self.host, port=self.port)
master.set_timeout(1.0)
with self.assertRaises(ModbusError) as cm:
master.execute(
slave=5,
function_code=cst.READ_COILS,
starting_address=1,
quantity_of_x=1,
)
self.assertEqual(cm.exception.get_exception_code(), cst.SLAVE_DEVICE_FAILURE)
def test_modbus_logging(self):
"""
Objective: Test if modbus generates log messages as expected.
Expected output is a dictionary with the following structure:
{'timestamp': datetime.datetime(2013, 4, 23, 18, 47, 38, 532960),
'remote': ('127.0.0.1', 60991),
'data_type': 'modbus',
'id': '01bd90d6-76f4-43cb-874f-5c8f254367f5',
'data': {'function_code': 1,
'slave_id': 1,
'request': '0100010080',
'response': '0110ffffffffffffffffffffffffffffffff'}}
"""
self.databus.set_value(
"memoryModbusSlave%dBlockA" % self.target_slave_id,
[1 for b in range(0, 128)],
)
master = modbus_tcp.TcpMaster(host=self.host, port=self.port)
master.set_timeout(1.0)
# issue request to modbus server
master.execute(
slave=self.target_slave_id,
function_code=cst.READ_COILS,
starting_address=1,
quantity_of_x=128,
)
# extract the generated log entries
log_queue = conpot_core.get_sessionManager().log_queue
conn_log_item = log_queue.get(True, 2)
conn_expected_payload = {"type": "NEW_CONNECTION"}
self.assertDictEqual(conn_expected_payload, conn_log_item["data"])
modbus_log_item = log_queue.get(True, 2)
self.assertIsInstance(modbus_log_item["timestamp"], datetime)
self.assertTrue("data" in modbus_log_item)
# we expect session_id to be 36 characters long (32 x char, 4 x dashes)
self.assertTrue(len(str(modbus_log_item["id"])), modbus_log_item)
self.assertEqual("127.0.0.1", modbus_log_item["remote"][0])
self.assertEqual("modbus", modbus_log_item["data_type"])
req = (
"000100000006%s0100010080" % ("01" if self.target_slave_id == 1 else "ff")
).encode()
# testing the actual modbus data
modbus_expected_payload = {
"function_code": 1,
"slave_id": self.target_slave_id,
"request": req,
"response": b"0110ffffffffffffffffffffffffffffffff",
}
self.assertDictEqual(modbus_expected_payload, modbus_log_item["data"])
def test_report_slave_id(self):
"""
Objective: Test conpot for function code 17.
"""
# Function 17 is not currently supported by modbus_tk
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, self.port))
s.sendall(b"\x00\x00\x00\x00\x00\x02\x01\x11")
data = s.recv(1024)
s.close()
self.assertEqual(data, b"\x00\x00\x00\x00\x00\x06\x01\x11\x11\x01\x01\xff")
def test_response_function_43_device_info(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, self.port))
s.sendall(b"\x00\x01\x00\x00\x00\x05\x01\x2b\x0e\x01\x02")
data = s.recv(1024)
s.close()
self.assertTrue(b"SIMATIC" in data and b"Siemens" in data)
| 7,205 | Python | .py | 165 | 34.963636 | 102 | 0.638853 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,424 | test_ipmi_server.py | mushorg_conpot/conpot/tests/test_ipmi_server.py | # Copyright (C) 2015 Lukas Rist <glaslos@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from gevent import monkey
monkey.patch_all()
from gevent.subprocess import Popen, PIPE, STDOUT
import unittest
from conpot.protocols.ipmi.ipmi_server import IpmiServer
from conpot.utils.greenlet import spawn_test_server, teardown_test_server
class TestIPMI(unittest.TestCase):
def setUp(self):
self.ipmi_server, self.greenlet = spawn_test_server(
IpmiServer, "default", "ipmi"
)
def tearDown(self):
teardown_test_server(self.ipmi_server, self.greenlet)
def run_cmd(self, cmd):
_cmd = [
"ipmitool",
"-I",
"lanplus",
"-H",
self.ipmi_server.server.server_host,
"-p",
str(self.ipmi_server.server.server_port),
"-R1",
"-U",
"Administrator",
"-P",
"Password",
]
_cmd += cmd
_process = Popen(_cmd, stdout=PIPE, stderr=STDOUT, universal_newlines=True)
_result_out, _ = _process.communicate()
return _result_out
def test_boot_device(self):
"""
Objective: test boot device get and set
"""
result = self.run_cmd(["chassis", "bootdev", "pxe"])
self.assertEqual(result, "Set Boot Device to pxe\n")
def test_power_state(self):
"""
Objective: test power on/off/reset/cycle/shutdown
"""
# power status
result = self.run_cmd(["chassis", "power", "status"])
self.assertEqual(result, "Chassis Power is off\n")
# power on
result = self.run_cmd(["chassis", "power", "on"])
self.assertEqual(result, "Chassis Power Control: Up/On\n")
# power off
result = self.run_cmd(["chassis", "power", "off"])
self.assertEqual(result, "Chassis Power Control: Down/Off\n")
# power reset
result = self.run_cmd(["chassis", "power", "reset"])
self.assertEqual(result, "Chassis Power Control: Reset\n")
# power cycle
result = self.run_cmd(["chassis", "power", "cycle"])
self.assertEqual(result, "Chassis Power Control: Cycle\n")
# shutdown gracefully
result = self.run_cmd(["chassis", "power", "soft"])
self.assertEqual(result, "Chassis Power Control: Soft\n")
def test_chassis_status(self):
result = self.run_cmd(["chassis", "status"])
self.assertEqual(
result,
"System Power : off\n"
"Power Overload : false\n"
"Power Interlock : inactive\n"
"Main Power Fault : false\n"
"Power Control Fault : false\n"
"Power Restore Policy : always-off\n"
"Last Power Event : \n"
"Chassis Intrusion : inactive\n"
"Front-Panel Lockout : inactive\n"
"Drive Fault : false\n"
"Cooling/Fan Fault : false\n",
)
def test_user_list(self):
result = self.run_cmd(["user", "list"])
self.assertEqual(
result,
"ID Name\t Callin Link Auth\tIPMI Msg Channel Priv Limit\n"
"1 Administrator true true true ADMINISTRATOR\n"
"2 Operator true false false OPERATOR\n"
"3 User1 true true true USER\n"
"4 User2 true false false USER\n"
"5 User3 true true true CALLBACK\n",
)
def test_channel_get_access(self):
result = self.run_cmd(["channel", "getaccess", "1", "3"])
self.assertIn(
"Maximum User IDs : 5\n"
"Enabled User IDs : 3\n\n"
"User ID : 3\n"
"User Name : User1\n"
"Fixed Name : Yes\n"
"Access Available : call-in / callback\n"
"Link Authentication : enabled\n"
"IPMI Messaging : enabled\n"
"Privilege Level : USER\n",
result,
)
def test_misc(self):
# change the session pass
result = self.run_cmd(["set", "password", "1", "TEST"])
self.assertEqual(result, "Set session password\n")
| 5,044 | Python | .py | 121 | 32.975207 | 83 | 0.571137 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,425 | test_ftp.py | mushorg_conpot/conpot/tests/test_ftp.py | # Copyright (C) 2018 Abhinav Saxena <xandfury@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from gevent import monkey
monkey.patch_all()
import unittest
import os
from datetime import datetime
from tempfile import NamedTemporaryFile
from freezegun import freeze_time
import conpot
import conpot.core as conpot_core
from conpot.protocols.ftp.ftp_server import FTPServer
from conpot.protocols.ftp.ftp_utils import ftp_commands
from conpot.utils.greenlet import spawn_test_server, teardown_test_server
from conpot.utils.networking import sanitize_file_name
import ftplib # Use ftplib's client for more authentic testing
class TestFTPServer(unittest.TestCase):
"""
All tests are executed in a similar way. We run a valid/invalid FTP request/command and check for valid
response. Testing is done by sending/receiving files in data channel related commands.
Implementation Note: There are no explicit tests for active/passive mode. These are covered in list and nlst
tests
"""
def setUp(self):
conpot_core.initialize_vfs()
self.ftp_server, self.greenlet = spawn_test_server(FTPServer, "default", "ftp")
self.client = ftplib.FTP()
self.vfs, self.data_fs = conpot_core.get_vfs("ftp")
def tearDown(self):
if self.client:
try:
self.client.close()
except ftplib.all_errors:
pass
teardown_test_server(self.ftp_server, self.greenlet)
def client_connect(self):
return self.client.connect(
host=self.ftp_server.server.server_host,
port=self.ftp_server.server.server_port,
)
def client_init(self):
self.client_connect()
self.client.login(user="nobody", passwd="nobody")
def client_refresh(self):
"""
Disconnect and reconnect a client
"""
if self.client:
self.client.quit()
del self.client
self.client = ftplib.FTP()
self.client_connect()
def test_auth(self):
"""Test for user, pass and quit commands."""
# test with anonymous
self.assertEqual(self.client_connect(), "200 FTP server ready.")
self.assertIn("Technodrome - Mouser Factory.", self.client.login())
self.client_refresh()
# test with registered user nobody:nobody
self.assertIn(
"Technodrome - Mouser Factory.",
self.client.login(user="nobody", passwd="nobody"),
)
# testing with incorrect password
# testing with incorrect username
# try to access a command that requires auth with being authenticated.
self.assertEqual(self.client.quit(), "221 Bye.")
def test_help(self):
# TODO: test help before login and after login.
self.client_init()
cmds = self.ftp_server.handler.config.enabled_commands
[cmds.remove(i) for i in ("SITE HELP", "SITE CHMOD") if i in cmds]
help_text = self.client.sendcmd("help")
self.assertTrue(all([i in help_text for i in cmds]))
# test command specific help
for i in cmds:
response = self.client.sendcmd("help {}".format(i))
self.assertIn(ftp_commands[i]["help"], response)
# test unrecognized command
self.assertRaisesRegex(
ftplib.error_perm,
"501 Unrecognized command.",
self.client.sendcmd,
"help ABCD",
)
def test_noop(self):
self.client_init()
self.assertEqual(
self.client.sendcmd("noop"), "200 I successfully done nothin'."
)
def test_stru(self):
self.client_init()
self.assertEqual(
self.client.sendcmd("stru F"), "200 File transfer structure set to: F."
)
self.assertRaisesRegex(
ftplib.error_perm,
"504 Unimplemented STRU type.",
self.client.sendcmd,
"stru P",
)
self.assertRaisesRegex(
ftplib.error_perm,
"504 Unimplemented STRU type.",
self.client.sendcmd,
"stru R",
)
self.assertRaisesRegex(
ftplib.error_perm,
"501 Unrecognized STRU type.",
self.client.sendcmd,
"stru invalid_stru_cmd",
)
def test_allo(self):
self.client_init()
self.assertEqual(
self.client.sendcmd("allo 250"), "202 No storage allocation necessary."
)
def test_syst(self):
self.client_init()
self.assertEqual(self.client.sendcmd("syst"), "215 UNIX Type: L8")
def test_mode(self):
self.client_init()
self.assertRaisesRegex(
ftplib.error_perm,
"501 Syntax error: command needs an argument",
self.client.sendcmd,
"mode",
)
self.assertEqual(self.client.sendcmd("mode S"), "200 Transfer mode set to: S")
self.assertRaisesRegex(
ftplib.error_perm,
"504 Unimplemented MODE type.",
self.client.sendcmd,
"mode B",
)
def test_site(self):
self.client_init()
self.assertRaisesRegex(
ftplib.error_perm,
"501 Syntax error: command needs an argument",
self.client.sendcmd,
"site",
)
def test_site_help(self):
self.client_init()
self.assertIn("Help SITE command successful.", self.client.sendcmd("site help"))
self.assertIn("HELP", self.client.sendcmd("site help"))
self.assertIn("CHMOD", self.client.sendcmd("site help"))
def test_type(self):
self.client_init()
self.assertEqual(self.client.sendcmd("type I"), "200 Type set to: Binary.")
self.assertEqual(self.client.sendcmd("type L8"), "200 Type set to: Binary.")
self.assertEqual(self.client.sendcmd("type A"), "200 Type set to: ASCII.")
self.assertEqual(self.client.sendcmd("type L7"), "200 Type set to: ASCII.")
self.assertRaises(ftplib.error_perm, self.client.sendcmd, "type 234")
def test_size(self):
# TODO: test for a user who does not has permissions for size
self.client_init()
self.assertRaisesRegex(
ftplib.error_perm,
"550 SIZE not allowed in ASCII mode.",
self.client.sendcmd,
"size ftp_data.txt",
)
# change to mode to binary
_ = self.client.sendcmd("type I")
self.assertEqual(self.client.sendcmd("size ftp_data.txt"), "213 49")
self.assertRaisesRegex(
ftplib.error_perm,
"550 is not retrievable.",
self.client.sendcmd,
"size file_does_not_exist",
)
def test_pwd(self):
self.client_init()
self.assertEqual(
self.client.sendcmd("pwd"), '257 "/" is the current directory.'
)
def test_mkd(self):
# TODO: test for a user who does not has permissions to make directory
self.client_init()
self.assertEqual(
self.client.sendcmd("mkd testing"), '257 "/testing" directory created.'
)
self.assertRaisesRegex(
ftplib.error_perm,
"550 'mkd /../../testing/testing' points to a path which is "
"outside the user's root directory.",
self.client.sendcmd,
"mkd /../../testing/testing",
)
_ = self.client.sendcmd("mkd testing/testing")
self.assertEqual(
self.client.sendcmd("mkd testing/testing/../demo"),
'257 "/testing/demo" directory created.',
)
self.vfs.removedir("testing/testing")
self.vfs.removedir("testing/demo")
self.vfs.removedir("testing")
def test_cwd(self):
# TODO: test for a user who does not has permissions to change directory
self.client_init()
# create a directory to cwd to.
self.vfs.makedir("testing")
self.assertEqual(
self.client.sendcmd("cwd testing"),
'250 "/testing" is the current directory.',
)
# check consistency with pwd
self.assertEqual(
self.client.sendcmd("pwd"), '257 "/testing" is the current directory.'
)
# test for cdup.
self.assertEqual(
self.client.sendcmd("cdup"), '250 "/" is the current directory.'
)
# make sure that user does not go - out of the root path.
self.assertRaisesRegex(
ftplib.error_perm,
"550 'cwd ../' points to a path which is outside the user's "
"root directory.",
self.client.sendcmd,
"cwd ../",
)
self.vfs.removedir("testing")
def test_rmd(self):
self.client_init()
# let us create a temp dir for deleting
self.vfs.makedir("tmp")
self.assertEqual(self.client.sendcmd("rmd tmp"), "250 Directory removed.")
self.assertRaisesRegex(
ftplib.error_perm,
"550 Remove directory operation failed.",
self.client.sendcmd,
"rmd tmp",
)
# TODO: Test with user that has no or little permissions.
# test for a user trying to delete '/'
self.assertRaisesRegex(
ftplib.error_perm,
"550 Can't remove root directory.",
self.client.sendcmd,
"rmd /",
)
self.assertRaisesRegex(
ftplib.error_perm,
"550 'rmd ../../' points to a path which is outside the user's root directory.",
self.client.sendcmd,
"rmd ../../",
)
@freeze_time("2018-07-15 17:51:17")
def test_mdtm(self):
# TODO : test for user that does not have permissions for mdtm
self.client_init()
self.vfs.settimes(
"ftp_data.txt", accessed=datetime.now(), modified=datetime.now()
)
# test for a file that already exists
self.assertEqual(self.client.sendcmd("mdtm ftp_data.txt"), "213 20180715175117")
self.assertRaisesRegex(
ftplib.error_perm,
"550 /this_file_does_not_exist.txt is not retrievable",
self.client.sendcmd,
"mdtm this_file_does_not_exist.txt",
)
def test_dele(self):
# TODO: check for a user who does not have permissions to delete a file!
self.client_init()
# let us create a temp file just for deleting.
with self.vfs.open("/temp_file", mode="w") as _tmp:
_tmp.write("This is just a temp file for testing rm")
# delete that file
self.assertEqual(self.client.sendcmd("dele temp_file"), "250 File removed.")
# check for errors
self.assertRaisesRegex(
ftplib.error_perm,
"550 Failed to delete file.",
self.client.sendcmd,
"dele temp_file",
)
def test_file_rename(self):
# TODO: check for a user who does not have permissions to rename a file!
self.client_init()
# First we would do everything for a valid file and all valid params
# check with invalid rnfr params
self.assertRaisesRegex(
ftplib.error_perm,
"550 Can't rename home directory.",
self.client.sendcmd,
"rnfr /",
)
self.assertRaisesRegex(
ftplib.error_perm,
"550 No such file or directory.",
self.client.sendcmd,
"rnfr file_DNE",
)
self.assertRaisesRegex(
ftplib.error_perm,
"503 Bad sequence of commands: use RNFR first.",
self.client.sendcmd,
"rnto /random_path",
)
# create a custom file to play with.
try:
# do a rnfr to rename file ftp_data.txt
with self.vfs.open("/test_rename_file.txt", mode="w") as _test:
_test.write("This is just a test file for rename testing of FTP server")
self.assertEqual(
self.client.sendcmd("rnfr test_rename_file.txt"),
"350 Ready for destination name.",
)
self.assertEqual(
self.client.sendcmd("rnto new_data.txt"), "250 Renaming ok."
)
# try for a case that would fail --
# fixme: tests fail after trying to rename files once they have been renamed.
# self.assertEqual(self.client.sendcmd('rnfr new_data.txt'), '350 Ready for destination name.')
# self.assertRaisesRegex(ftplib.error_perm, '501 can\'t decode command.', self.client.sendcmd,
# 'rnto Very / Unsafe / file\nname hähä \n\r .txt')
finally:
self.vfs.remove("new_data.txt")
def test_site_chmod(self):
# TODO: check for a user who does not have permissions to do chmod!
self.client_init()
# change permissions
self.client.sendcmd("site chmod 644 ftp_data.txt")
self.assertEqual(self.vfs.get_permissions("ftp_data.txt"), "rw-r--r--")
def test_stat(self):
# TODO: check for a user who does not have permissions to do stat!
self.client_init()
# do stat without args
self.assertIn(
"Logged in as: nobody\n TYPE: ASCII; STRUcture: File; MODE: Stream\n",
self.client.sendcmd("stat"),
)
self.assertIn("ftp_data.txt", self.client.sendcmd("stat /"))
# ------ Data channel related. -----
@freeze_time("2018-07-15 17:51:17")
def test_list(self):
# TODO: check for a user who does not have permissions to do list!
self.client_init()
self.vfs.settimes(
"ftp_data.txt", accessed=datetime.now(), modified=datetime.now()
)
# Do a list of directory for passive mode
_pasv_list = list()
self.client.retrlines("LIST", _pasv_list.append)
# note that this time is set in ftp_server settimes method. Picked up from the default template.
self.assertEqual(
["-rwxrwxrwx 1 nobody ftp 49 Jul 15 17:51 ftp_data.txt"],
_pasv_list,
)
# check list for active mode
_actv_list = list()
self.client.set_pasv(False)
self.client.retrlines("LIST", _actv_list.append)
# note that this time is set in ftp_server settimes method. Picked up from the default template.
self.assertEqual(
["-rwxrwxrwx 1 nobody ftp 49 Jul 15 17:51 ftp_data.txt"],
_actv_list,
)
# response from active and pasv mode should be same.
def test_nlist(self):
# TODO: check for a user who does not have permissions to do nlst!
self.client_init()
# Do a list of directory
_pasv_list = list()
self.client.retrlines("NLST", _pasv_list.append)
self.assertEqual(["ftp_data.txt"], _pasv_list)
# check list for active mode
_actv_list = list()
self.client.set_pasv(False)
self.client.retrlines("NLST", _actv_list.append)
self.assertEqual(["ftp_data.txt"], _actv_list)
def test_retr(self):
"""Test retr or downloading a file from the server."""
self.client_init()
_path = os.path.join(
"".join(conpot.__path__), "tests", "data", "data_temp_fs", "ftp"
)
with open(_path + "/ftp_testing_retr.txt", mode="wb") as _file:
self.client.retrbinary("retr ftp_data.txt", _file.write)
buffer = ""
with open(_path + "/ftp_testing_retr.txt", mode="r") as _file:
buffer += _file.readline()
self.assertEqual(buffer, "This is just a test file for Conpot's FTP server\n")
os.remove(_path + "/ftp_testing_retr.txt")
def test_rein(self):
self.client_init()
self.assertEqual(self.client.sendcmd("rein"), "230 Ready for new user.")
self.assertRaisesRegex(
ftplib.error_perm,
"503 Login with USER first.",
self.client.sendcmd,
"pass testing",
)
# TODO: Add test with existing transfer in progress.
@freeze_time("2018-07-15 17:51:17")
def test_stor(self):
# let us test by uploading a file called ftp_testing.txt
self.client_init()
_path = os.path.join(
"".join(conpot.__path__), "tests", "data", "test_data_fs", "ftp"
)
with open(_path + "/ftp_testing.txt", mode="rb") as _file:
self.client.storbinary("stor ftp_testing_stor.txt", _file)
self.assertIn(
"ftp_testing_stor.txt", self.ftp_server.handler.config.vfs.listdir("/")
)
self.vfs.remove("ftp_testing_stor.txt")
_data_fs_file = sanitize_file_name(
"ftp_testing_stor.txt",
self.client.sock.getsockname()[0],
self.client.sock.getsockname()[1],
)
self.data_fs.remove(_data_fs_file)
def test_appe(self):
self.client_init()
_data_1 = "This is just a test!\n"
_data_2 = "This is another test\n"
_file_name = "ftp_appe_test.txt"
with self.vfs.open(_file_name, mode="w") as _server_file:
_server_file.write(_data_1)
try:
with NamedTemporaryFile(mode="w+") as _temp:
_temp.write(_data_2)
_temp.flush()
with open(_temp.name, mode="rb+") as _source:
self.client.storbinary(f"appe {_file_name}", _source)
with self.vfs.open(_file_name, mode="r") as _server_file:
_file_contents = _server_file.read()
self.assertEqual(_file_contents, _data_1 + _data_2)
finally:
self.vfs.remove(_file_name)
_data_fs_file = sanitize_file_name(
_file_name,
self.client.sock.getsockname()[0],
self.client.sock.getsockname()[1],
)
self.data_fs.remove(_data_fs_file)
def test_abor(self):
self.client_init()
self.assertEqual(self.client.sendcmd("abor"), "225 No transfer to abort.")
def test_rest(self):
self.client_init()
# Let us test error conditions first.
self.client.sendcmd("type i")
self.assertRaises(ftplib.error_perm, self.client.sendcmd, "rest")
self.assertRaises(ftplib.error_perm, self.client.sendcmd, "rest str")
self.assertRaises(ftplib.error_perm, self.client.sendcmd, "rest -1")
self.assertRaises(ftplib.error_perm, self.client.sendcmd, "rest 10.1")
# REST is not supposed to be allowed in ASCII mode
self.client.sendcmd("type a")
self.assertRaisesRegex(
ftplib.error_perm,
"501 Resuming transfers not allowed in ASCII mode.",
self.client.sendcmd,
"rest 10",
)
# Fixme: test rest while an actual transfer is going on.
def test_stou(self):
# fixme: incomplete test.
self.client_init()
self.client.sendcmd("type i")
self.client.sendcmd("rest 10")
self.assertRaisesRegex(
ftplib.error_temp, "Can't STOU while REST", self.client.sendcmd, "stou"
)
def test_max_retries(self):
"""client should raise an error when max retries are reached."""
self.client_connect()
self.assertRaises(
ftplib.error_perm, self.client.login, user="nobody", passwd="incorrect_pass"
)
self.assertRaises(
ftplib.error_perm, self.client.login, user="nobody", passwd="incorrect_pass"
)
self.assertRaises(
ftplib.error_perm, self.client.login, user="nobody", passwd="incorrect_pass"
)
self.assertRaisesRegex(
ftplib.error_temp,
"421 Too many connections. Service temporarily unavailable.",
self.client.login,
user="nobody",
passwd="incorrect_pass",
)
| 20,724 | Python | .py | 503 | 31.49503 | 112 | 0.598711 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,426 | test_utils_ext_ip.py | mushorg_conpot/conpot/tests/test_utils_ext_ip.py | # Copyright (C) 2014 Lukas Rist <glaslos@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import gevent.monkey
gevent.monkey.patch_all()
import unittest
import conpot.utils.ext_ip
from gevent.pywsgi import WSGIServer
import gevent
class TestExtIPUtil(unittest.TestCase):
def setUp(self):
def application(environ, start_response):
headers = [("Content-Type", "text/html")]
start_response("200 OK", headers)
return [b"127.0.0.1"]
self.server = WSGIServer(("localhost", 8000), application)
gevent.spawn(self.server.serve_forever)
def tearDown(self):
self.server.stop()
def test_ip_verify(self):
self.assertTrue(conpot.utils.ext_ip._verify_address("127.0.0.1") is True)
def test_ext_util(self):
ip_address = str(
conpot.utils.ext_ip._fetch_data(
urls=[
"http://127.0.0.1:8000",
]
)
)
self.assertTrue(conpot.utils.ext_ip._verify_address(ip_address) is True)
def test_fetch_ext_ip(self):
self.assertIsNotNone(
conpot.utils.ext_ip.get_ext_ip(urls=["https://api.ipify.org"])
)
| 1,876 | Python | .py | 47 | 33.93617 | 81 | 0.680594 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,427 | test_docs.py | mushorg_conpot/conpot/tests/test_docs.py | # Copyright (C) 2013 Lukas Rist <glaslos@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
import subprocess
import unittest
class TestMakeDocs(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_make_docs(self):
cmd = "make -C docs/ html"
project_root = os.path.join(os.path.dirname(__file__), "..", "..")
process = subprocess.Popen(
cmd.split(), cwd=project_root, stdout=subprocess.PIPE
)
output = process.communicate()[0].decode()
self.assertIn("Build finished. The HTML pages are in build/html.", output)
| 1,310 | Python | .py | 32 | 36.96875 | 82 | 0.717545 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,428 | test_snmp_server.py | mushorg_conpot/conpot/tests/test_snmp_server.py | # Copyright (C) 2013 Lukas Rist <glaslos@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from gevent import monkey
monkey.patch_all()
import shutil
import tempfile
import unittest
from collections import namedtuple
from pysnmp.proto import rfc1902
import conpot.core as conpot_core
from conpot.protocols.snmp.snmp_server import SNMPServer
from conpot.tests.helpers import snmp_client
from conpot.utils.greenlet import spawn_test_server, teardown_test_server
class TestSNMPServer(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
args = namedtuple("FakeArgs", "mibcache")
args.mibcache = self.tmp_dir
self.snmp_server, self.greenlet = spawn_test_server(
SNMPServer, template="default", protocol="snmp", args=args
)
self.host = "127.0.0.1"
self.port = self.snmp_server.get_port()
def tearDown(self):
teardown_test_server(self.snmp_server, self.greenlet)
shutil.rmtree(self.tmp_dir)
def test_snmp_get(self):
"""
Objective: Test if we can get data via snmp_get
"""
client = snmp_client.SNMPClient(self.host, self.port)
oid = ((1, 3, 6, 1, 2, 1, 1, 1, 0), None)
client.get_command(oid, callback=self.mock_callback)
self.assertEqual("Siemens, SIMATIC, S7-200", self.result)
def test_snmp_set(self):
"""
Objective: Test if we can set data via snmp_set
"""
client = snmp_client.SNMPClient(self.host, self.port)
# syslocation
oid = ((1, 3, 6, 1, 2, 1, 1, 6, 0), rfc1902.OctetString("TESTVALUE"))
client.set_command(oid, callback=self.mock_callback)
databus = conpot_core.get_databus()
self.assertEqual("TESTVALUE", databus.get_value("sysLocation")._value.decode())
def mock_callback(
self,
sendRequestHandle,
errorIndication,
errorStatus,
errorIndex,
varBindTable,
cbCtx,
):
self.result = None
if errorIndication:
self.result = errorIndication
elif errorStatus:
self.result = errorStatus.prettyPrint()
else:
for oid, val in varBindTable:
self.result = val.prettyPrint()
| 2,946 | Python | .py | 75 | 33.08 | 87 | 0.681834 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,429 | test_enip_server.py | mushorg_conpot/conpot/tests/test_enip_server.py | # Copyright (C) 2017 Yuru Shao <shaoyuru@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from gevent import monkey
monkey.patch_all()
import unittest
from cpppo.server.enip import client
from gevent import socket
from conpot.protocols.enip.enip_server import EnipServer
from conpot.utils.greenlet import spawn_test_server, teardown_test_server
# In lieu of creating dedicated test templates we modify
# EnipServer config through inheritance
class EnipServerTCP(EnipServer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.addr = "127.0.0.1"
self.port = 50002
self.config.mode = "tcp"
class EnipServerUDP(EnipServer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.addr = "127.0.0.1"
self.port = 60002
self.config.mode = "udp"
class TestENIPServer(unittest.TestCase):
def setUp(self):
self.enip_server_tcp, self.server_greenlet_tcp = spawn_test_server(
EnipServerTCP, "default", "enip", port=50002
)
self.enip_server_udp, self.server_greenlet_udp = spawn_test_server(
EnipServerUDP, "default", "enip", port=60002
)
def tearDown(self):
teardown_test_server(self.enip_server_udp, self.server_greenlet_udp)
teardown_test_server(self.enip_server_tcp, self.server_greenlet_tcp)
@staticmethod
def attribute_operations(paths, int_type=None, **kwds):
for op in client.parse_operations(paths, int_type=int_type or "SINT", **kwds):
path_end = op["path"][-1]
if "instance" in path_end:
op["method"] = "get_attributes_all"
assert (
"data" not in op
), "All Attributes cannot be operated on using Set Attribute services"
elif "symbolic" in path_end or "attribute" in path_end or "element":
op["method"] = (
"set_attribute_single" if "data" in op else "get_attribute_single"
)
else:
raise AssertionError(
"Path invalid for Attribute services: %r", op["path"]
)
yield op
@staticmethod
def await_cpf_response(connection, command):
response, _ = client.await_response(connection, timeout=4.0)
return response["enip"]["CIP"][command]["CPF"]
def test_read_tags(self):
with client.connector(
host=self.enip_server_tcp.addr, port=self.enip_server_tcp.port, timeout=4.0
) as connection:
tags = ["@22/1/1"]
ops = self.attribute_operations(tags)
for _, _, _, _, _, val in connection.pipeline(operations=ops):
self.assertEqual(100, val[0])
def test_write_tags(self):
with client.connector(
host=self.enip_server_tcp.addr, port=self.enip_server_tcp.port, timeout=4.0
) as connection:
tags = ["@22/1/1=(SINT)50", "@22/1/1"]
ops = self.attribute_operations(tags)
for idx, _, _, _, _, val in connection.pipeline(operations=ops):
if idx == 0:
self.assertEqual(True, val)
elif idx == 1:
self.assertEqual(50, val[0])
def test_list_services_tcp(self):
with client.connector(
host=self.enip_server_tcp.addr,
port=self.enip_server_tcp.port,
timeout=4.0,
udp=False,
broadcast=False,
) as connection:
connection.list_services()
connection.shutdown()
response = self.await_cpf_response(connection, "list_services")
self.assertEqual(
"Communications",
response["item"][0]["communications_service"]["service_name"],
)
def test_list_services_udp(self):
with client.connector(
host=self.enip_server_udp.addr,
port=self.enip_server_udp.port,
timeout=4.0,
udp=True,
broadcast=True,
) as connection:
connection.list_services()
response = self.await_cpf_response(connection, "list_services")
self.assertEqual(
"Communications",
response["item"][0]["communications_service"]["service_name"],
)
def test_list_identity_tcp(self):
with client.connector(
host=self.enip_server_tcp.addr,
port=self.enip_server_tcp.port,
timeout=4.0,
udp=False,
broadcast=False,
) as connection:
connection.list_identity()
connection.shutdown()
response = self.await_cpf_response(connection, "list_identity")
expected = self.enip_server_tcp.config.product_name
self.assertEqual(
expected, response["item"][0]["identity_object"]["product_name"]
)
def test_list_identity_udp(self):
with client.connector(
host=self.enip_server_udp.addr,
port=self.enip_server_udp.port,
timeout=4.0,
udp=True,
broadcast=True,
) as connection:
connection.list_identity()
response = self.await_cpf_response(connection, "list_identity")
expected = self.enip_server_tcp.config.product_name
self.assertEqual(
expected, response["item"][0]["identity_object"]["product_name"]
)
def test_list_interfaces_tcp(self):
with client.connector(
host=self.enip_server_tcp.addr,
port=self.enip_server_tcp.port,
timeout=4.0,
udp=False,
broadcast=False,
) as conn:
conn.list_interfaces()
conn.shutdown()
response = self.await_cpf_response(conn, "list_interfaces")
self.assertDictEqual({"count": 0}, response)
def test_list_interfaces_udp(self):
with client.connector(
host=self.enip_server_udp.addr,
port=self.enip_server_udp.port,
timeout=4.0,
udp=True,
broadcast=True,
) as conn:
conn.list_interfaces()
response = self.await_cpf_response(conn, "list_interfaces")
self.assertDictEqual({"count": 0}, response)
# Tests related to restart of ENIP device..
# def test_send_NOP(self):
# # test tcp
# pass
def test_malformend_request_tcp(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.enip_server_tcp.addr, self.enip_server_tcp.port))
s.send(
b"e\x00\x04\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ b"x00\x00\x01\x00\x00\x00"
) # test the help command
_ = s.recv(1024)
s.close()
# TODO: verify data packet?
def test_malformend_request_udp(self):
pass
| 7,732 | Python | .py | 186 | 31.478495 | 100 | 0.600266 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,430 | test_protocols.py | mushorg_conpot/conpot/tests/test_protocols.py | # Copyright (C) 2020 srenfo
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import pytest
from conpot import protocols
from conpot.utils.greenlet import init_test_server_by_name
@pytest.mark.parametrize("name", protocols.name_mapping.keys())
def test_protocols_can_be_stopped(name):
server, greenlet = init_test_server_by_name(name)
server.stop()
greenlet.join(0.2)
# Greenlets with working shutdown logic will have run to completion
# Greenlets with broken shutdown logic will wait to be scheduled again
assert greenlet.successful()
@pytest.mark.parametrize("name", protocols.name_mapping.keys())
def test_protocols_serve_forever(name):
server, greenlet = init_test_server_by_name(name)
assert not greenlet.ready()
server.stop()
greenlet.join(0.2)
| 1,462 | Python | .py | 33 | 41.787879 | 74 | 0.77167 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,431 | test_http_server.py | mushorg_conpot/conpot/tests/test_http_server.py | # Copyright (C) 2013 Lukas Rist <glaslos@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from gevent import monkey
monkey.patch_all()
import unittest
import datetime
import conpot
import os
from lxml import etree
import requests
from gevent import socket, sleep
from conpot.protocols.http import web_server
from conpot.utils.greenlet import spawn_test_server, teardown_test_server
import conpot.core as conpot_core
class TestHTTPServer(unittest.TestCase):
def setUp(self):
self.http_server, self.http_worker = spawn_test_server(
web_server.HTTPServer, "default", "http"
)
sleep(0.5)
def tearDown(self):
teardown_test_server(self.http_server, self.http_worker)
def test_http_request_base(self):
"""
Objective: Test if http service delivers data on request
"""
ret = requests.get(
"http://127.0.0.1:{0}/tests/unittest_base.html".format(
self.http_server.server_port
)
)
self.assertIn(
"ONLINE", ret.text, "Could not retrieve expected data from test output."
)
def test_http_backend_databus(self):
"""
Objective: Test if http backend is able to retrieve data from databus
"""
sysName = conpot_core.get_databus().get_value("sysName")
if sysName:
ret = requests.get(
"http://127.0.0.1:{0}/tests/unittest_databus.html".format(
self.http_server.server_port
)
)
self.assertIn(
sysName,
ret.text,
"Could not find databus entity 'sysName' (value '{0}') in output.".format(
sysName
),
)
else:
raise Exception(
"Assertion failed. Key 'sysName' not found in databus definition table."
)
def test_http_backend_tarpit(self):
"""
Objective: Test if http tarpit delays responses properly
"""
# retrieve configuration from xml
dir_name = os.path.dirname(conpot.__file__)
dom = etree.parse(dir_name + "/templates/default/http/http.xml")
# check for proper tarpit support
tarpit = dom.xpath(
'//http/htdocs/node[@name="/tests/unittest_tarpit.html"]/tarpit'
)
if tarpit:
tarpit_delay = tarpit[0].xpath("./text()")[0]
# requesting file via HTTP along with measuring the timedelta
dt_req_start = datetime.datetime.now()
requests.get(
"http://127.0.0.1:{0}/tests/unittest_tarpit.html".format(
self.http_server.server_port
)
)
dt_req_delta = datetime.datetime.now() - dt_req_start
# check if the request took at least the expected delay to be processed
self.assertLessEqual(
int(tarpit_delay),
dt_req_delta.seconds,
"Expected delay: >= {0} seconds. Actual delay: {1} seconds".format(
tarpit_delay, dt_req_delta.seconds
),
)
else:
raise AssertionError(
"Assertion failed. Tarpit delay not found in HTTP template."
)
def test_http_subselect_trigger(self):
"""
Objective: Test if http subselect triggers work correctly
"""
ret = requests.get(
"http://127.0.0.1:{0}/tests/unittest_subselects.html?action=unit&subaction=test".format(
self.http_server.server_port
)
)
self.assertIn(
"SUCCESSFUL", ret.text, "Trigger missed. An unexpected page was delivered."
)
def test_do_TRACE(self):
"""
Objective: Test the web server with a trace request
"""
# requests has no trace method.. So resorting to the good'ol socket - sending raw data
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", self.http_server.server_port))
s.sendall(b"TRACE /index.html HTTP/1.1\r\nHost: localhost\r\n\r\n")
data = s.recv(1024)
# FIXME: Omitting the time etc from data - mechanism to check them needed as well?
self.assertIn(b"HTTP/1.1 200 OK", data)
# test for 501 - Disable TRACE method
self.http_server.cmd_responder.httpd.disable_method_trace = True
s.sendall(b"TRACE /index.html HTTP/1.1\r\nHost: localhost\r\n\r\n")
data = s.recv(1024)
s.close()
self.assertIn(b"501", data)
def test_do_HEAD(self):
"""
Objective: Test the web server by sending a HTTP HEAD request.
Should be responded back by the valid HTTP headers
"""
ret = requests.head(
"http://127.0.0.1:{0}/tests/unittest_subselects.html?action=unit&subaction=test".format(
self.http_server.server_port
)
)
self.assertTrue(
ret.status_code == 200 and ret.headers["Content-Length"] == "370"
)
# Test for 404
ret = requests.head(
"http://127.0.0.1:{0}/tests/random_page_does_not_exists.html".format(
self.http_server.server_port
)
)
self.assertEqual(ret.status_code, 404)
# test for 501 - Disable HEAD method
self.http_server.cmd_responder.httpd.disable_method_head = True
ret = requests.head(
"http://127.0.0.1:{0}/tests/unittest_subselects.html?action=unit&subaction=test".format(
self.http_server.server_port
)
)
self.assertEqual(ret.status_code, 501)
def test_do_OPTIONS(self):
"""
Objective: Test the web server by sending a valid OPTIONS HTTP request
"""
ret = requests.options(
"http://127.0.0.1:{0}/tests/unittest_subselects.html?action=unit&subaction=test".format(
self.http_server.server_port
)
)
self.assertEqual((ret.headers["allow"]), "GET,HEAD,POST,OPTIONS,TRACE")
# test for 501 - Disable OPTIONS method
self.http_server.cmd_responder.httpd.disable_method_options = True
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", self.http_server.server_port))
s.sendall(b"OPTIONS /index.html HTTP/1.1\r\nHost: localhost\r\n\r\n")
data = s.recv(1024)
self.assertIn(b"501", data)
def test_do_POST(self):
"""
Objective: send a POST request to a invalid URI. Should get a 404 response
"""
payload = {"key1": "value1", "key2": "value2"}
ret = requests.post(
"http://127.0.0.1:{0}/tests/demo.html".format(self.http_server.server_port),
data=payload,
)
self.assertEqual(ret.status_code, 404)
def test_not_implemented_method(self):
"""
Objective: PUT HTTP method is not implemented in Conpot, should raise 501
"""
payload = b"PUT /index.html HTTP/1.1\r\nHost: localhost\r\n\r\n"
ret = requests.put(
"http://127.0.0.1:{0}/tests/demo.html".format(self.http_server.server_port),
data=payload,
)
self.assertEqual(ret.status_code, 501)
| 8,044 | Python | .py | 197 | 31.299492 | 100 | 0.604396 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,432 | test_attack_session.py | mushorg_conpot/conpot/tests/test_attack_session.py | from datetime import datetime, timedelta
from freezegun import freeze_time
from conpot.core.attack_session import AttackSession
class LogQueueFake:
def __init__(self):
self.events = []
def put(self, event):
self.events.append(event)
def test_add_event_is_logged():
protocol = "testing"
source_ip = "1.2.3.4"
source_port = 11
destination_ip = "5.6.7.8"
destination_port = 22
log_queue = LogQueueFake()
session = AttackSession(
protocol=protocol,
source_ip=source_ip,
source_port=source_port,
destination_ip=destination_ip,
destination_port=destination_port,
log_queue=log_queue,
)
event = {"foo": "bar"}
session.add_event(event)
logged = log_queue.events[0]
assert logged["data"] == event
assert logged["data_type"] == protocol
assert logged["src_ip"] == source_ip
assert logged["src_port"] == source_port
assert logged["remote"] == (source_ip, source_port)
assert logged["dst_ip"] == destination_ip
assert logged["dst_port"] == destination_port
assert logged["local"] == (destination_ip, destination_port)
# TODO should this even include public_ip if it's always None?
assert logged["public_ip"] is None
def test_add_event_same_id():
log_queue = LogQueueFake()
session = AttackSession(
protocol=None,
source_ip=None,
source_port=None,
destination_ip=None,
destination_port=None,
log_queue=log_queue,
)
session.add_event({"foo": "bar"})
session.add_event({"bar": "baz"})
assert log_queue.events[0]["id"] == log_queue.events[1]["id"]
def test_add_event_sessions_have_unique_ids():
log_queue = LogQueueFake()
session_1 = AttackSession(
protocol=None,
source_ip=None,
source_port=None,
destination_ip=None,
destination_port=None,
log_queue=log_queue,
)
session_2 = AttackSession(
protocol=None,
source_ip=None,
source_port=None,
destination_ip=None,
destination_port=None,
log_queue=log_queue,
)
session_1.add_event({"foo": "bar"})
session_2.add_event({"bar": "baz"})
assert log_queue.events[0]["id"] != log_queue.events[1]["id"]
def test_add_event_uses_session_timestamp():
log_queue = LogQueueFake()
session_start = datetime(2000, 1, 1)
with freeze_time(session_start) as frozen_time:
session = AttackSession(
protocol=None,
source_ip=None,
source_port=None,
destination_ip=None,
destination_port=None,
log_queue=log_queue,
)
frozen_time.tick(timedelta(days=1))
session.add_event({"foo": "bar"})
session.add_event({"bar": "baz"})
# timestamp is always the time the session started,
# not the time the event occurred
assert log_queue.events[0]["timestamp"] == session_start
assert log_queue.events[1]["timestamp"] == session_start
@freeze_time("2000-01-01", auto_tick_seconds=2)
def test_dump_collects_events():
protocol = "testing"
source_ip = "1.2.3.4"
source_port = 11
destination_ip = "5.6.7.8"
destination_port = 22
log_queue = LogQueueFake()
session = AttackSession(
protocol=protocol,
source_ip=source_ip,
source_port=source_port,
destination_ip=destination_ip,
destination_port=destination_port,
log_queue=log_queue,
)
event_1 = {"foo": "bar"}
event_2 = {"bar": "baz"}
session.add_event(event_1)
session.add_event(event_2)
session.add_event(event_1)
dump = session.dump()
assert dump["data_type"] == protocol
assert list(dump["data"].keys()) == [2000, 4000, 6000]
assert list(dump["data"].values()) == [event_1, event_2, event_1]
assert dump["src_ip"] == source_ip
assert dump["src_port"] == source_port
assert dump["remote"] == (source_ip, source_port)
assert dump["dst_ip"] == destination_ip
assert dump["dst_port"] == destination_port
assert dump["local"] == (destination_ip, destination_port)
# TODO should this even include public_ip if it's always None?
assert dump["public_ip"] is None
| 4,297 | Python | .py | 122 | 28.491803 | 69 | 0.633946 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,433 | test_kamstrup_management.py | mushorg_conpot/conpot/tests/test_kamstrup_management.py | # Copyright (C) 2018 Abhinav Saxena <xandfury@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from gevent import monkey
monkey.patch_all()
import unittest
from gevent import socket
from conpot.protocols.kamstrup_management.kamstrup_management_server import (
KamstrupManagementServer,
)
from conpot.tests.data.kamstrup_management_data import RESPONSES
from conpot.utils.greenlet import spawn_test_server, teardown_test_server
def check_command_resp_help_message(
packet_type, help_msg_command, packet_msg_command, kamstrup_management_server
):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", kamstrup_management_server.server.server_port))
_ = s.recv(1024) # receive the banner
s.sendall(help_msg_command) # test the help command
help_data = s.recv(1024)
help_response = help_data == RESPONSES["H"][packet_type]
s.sendall(packet_msg_command)
pkt_data = s.recv(1024)
packet_resp = pkt_data == RESPONSES[packet_type]
s.close()
return help_response and packet_resp
class TestKamstrupManagementProtocol(unittest.TestCase):
"""
All tests work in similar way. We send a get command check for a valid reply. We send in set command and
expect things to change in the databus.
"""
def setUp(self):
self.kamstrup_management_server, self.server_greenlet = spawn_test_server(
KamstrupManagementServer, "kamstrup_382", "kamstrup_management"
)
def tearDown(self):
teardown_test_server(self.kamstrup_management_server, self.server_greenlet)
def test_help_command(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", self.kamstrup_management_server.server.server_port))
_ = s.recv(1024) # receive the banner
s.sendall(b"H\r\n") # test the help command
data = s.recv(1024)
s.close()
self.assertEqual(data, RESPONSES["H"]["H"])
def test_set_config_command(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", self.kamstrup_management_server.server.server_port))
_ = s.recv(1024) # receive the banner
s.sendall(b"H !SC\r\n") # test the help command
data = s.recv(1024)
s.close()
self.assertEqual(data, RESPONSES["H"]["!SC"])
def test_access_control_command(self):
self.assertTrue(
check_command_resp_help_message(
"!AC",
b"H !AC\r\n",
b"!AC 0 2 192.168.1.211\r\n",
self.kamstrup_management_server,
)
)
def test_alarm_server_command(self):
self.assertTrue(
check_command_resp_help_message(
"!AS",
b"H !AS\r\n",
b"!AC 192.168.1.4 4000\r\n",
self.kamstrup_management_server,
)
)
def test_get_config_command(self):
self.assertTrue(
check_command_resp_help_message(
"!GC", b"H !GC\r\n", b"!GC\r\n", self.kamstrup_management_server
)
)
def test_get_software_version_command(self):
self.assertTrue(
check_command_resp_help_message(
"!GV", b"H !GV\r\n", b"!GV\r\n", self.kamstrup_management_server
)
)
def test_set_kap1_command(self):
# TODO: verify that values in the databus have actually changed!
self.assertTrue(
check_command_resp_help_message(
"!SA",
b"H !SA\r\n",
b"!SA 192168001002 61000\r\n",
self.kamstrup_management_server,
)
)
def test_set_kap2_command(self):
# TODO: verify that values in the databus have actually changed!
self.assertTrue(
check_command_resp_help_message(
"!SB",
b"H !SB\r\n",
b"!SB 192.168.1.2 61000\r\n",
self.kamstrup_management_server,
)
)
def test_set_device_name_command(self):
# TODO: verify that values in the databus have actually changed!
self.assertTrue(
check_command_resp_help_message(
"!SD", b"H !SD\r\n", b"!SD\r\n", self.kamstrup_management_server
)
)
def test_set_lookup_command(self):
# TODO: verify that values in the databus have actually changed!
self.assertTrue(
check_command_resp_help_message(
"!SH",
b"H !SH\r\n",
b"!SH hosting.kamstrup_meter.dk\r\n",
self.kamstrup_management_server,
)
)
def test_set_ip_command(self):
# TODO: verify that values in the databus have actually changed!
self.assertTrue(
check_command_resp_help_message(
"!SI",
b"H !SI\r\n",
b"!SI 192168001200\r\n",
self.kamstrup_management_server,
)
)
def test_set_watchdog_command(self):
# TODO: verify that values in the databus have actually changed!
self.assertTrue(
check_command_resp_help_message(
"!SK",
b"H !SK\r\n",
b"!SK 3600 60 10\r\n",
self.kamstrup_management_server,
)
)
def test_set_name_server_command(self):
# TODO: verify that values in the databus have actually changed!
self.assertTrue(
check_command_resp_help_message(
"!SN",
b"H !SN\r\n",
b"!SN 192168001200 192168001201 000000000000\r\n",
self.kamstrup_management_server,
)
)
def test_set_ports_command(self):
# TODO: verify that values in the databus have actually changed!
self.assertTrue(
check_command_resp_help_message(
"!SP",
b"H !SP\r\n",
b"!SP 50 1025 1026 50100\r\n",
self.kamstrup_management_server,
)
)
def test_set_serial_command(self):
# TODO: verify that values in the databus have actually changed!
self.assertTrue(
check_command_resp_help_message(
"!SS",
b"H !SS\r\n",
b"!SS B 115200,8,E,1,L\r\n",
self.kamstrup_management_server,
)
)
def test_request_connect_command(self):
self.assertTrue(
check_command_resp_help_message(
"!RC",
b"H !RC\r\n",
b"!RC A 195.215.168.45\r\n",
self.kamstrup_management_server,
)
)
| 7,466 | Python | .py | 192 | 29.052083 | 108 | 0.592276 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,434 | test_logger_hpfriends.py | mushorg_conpot/conpot/tests/test_logger_hpfriends.py | # Copyright (C) 2013 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import unittest
import gevent
from conpot.core.loggers.hpfriends import HPFriendsLogger
class Test_HPFriends(unittest.TestCase):
@unittest.skip("disabled until honeycloud up and running again")
def test_hpfriends(self):
"""
Objective: Test if data can be published to hpfriends without errors.
"""
host = "hpfriends.honeycloud.net"
port = 20000
ident = "HBmU08rR"
secret = "XDNNuMGYUuWFaWyi"
channels = [
"test.test",
]
hpf = HPFriendsLogger(host, port, ident, secret, channels)
gevent.sleep(0.5)
error_message = hpf.log("some some test data")
gevent.sleep(0.5)
self.assertIsNone(
error_message, "Unexpected error message: {0}".format(error_message)
)
| 1,581 | Python | .py | 39 | 35.512821 | 80 | 0.709174 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,435 | test_tftp.py | mushorg_conpot/conpot/tests/test_tftp.py | import unittest
import filecmp
from freezegun import freeze_time
from tftpy import TftpClient
import conpot
import conpot.core as conpot_core
from conpot.protocols.tftp.tftp_server import TftpServer
from conpot.utils.greenlet import spawn_test_server, teardown_test_server
class TestTFTPServer(unittest.TestCase):
def setUp(self):
conpot_core.initialize_vfs()
self.tftp_server, self.greenlet = spawn_test_server(
TftpServer, template="default", protocol="tftp"
)
self.client = TftpClient(
self.tftp_server.server.server_host, self.tftp_server.server.server_port
)
self._test_file = "/".join(
conpot.__path__ + ["tests/data/test_data_fs/tftp/test.txt"]
)
def tearDown(self):
teardown_test_server(self.tftp_server, self.greenlet)
@freeze_time("2018-07-15 17:51:17")
def test_tftp_upload(self):
"""Testing TFTP upload files."""
self.client.upload("test.txt", self._test_file)
_, _data_fs = conpot_core.get_vfs("tftp")
[_file] = [
i for i in _data_fs.listdir("./") if "2018-07-15 17:51:17-test-txt" in i
]
self.assertEqual(
_data_fs.readtext(_file),
"This is just a test file for Conpot's TFTP server\n",
)
_data_fs.remove(_file)
@freeze_time("2018-07-15 17:51:17")
def test_mkdir_upload(self):
"""Testing TFTP upload files - while recursively making directories as per the TFTP path."""
self.client.upload("/dir/dir/test.txt", self._test_file)
_, _data_fs = conpot_core.get_vfs("tftp")
[_file] = [
i for i in _data_fs.listdir("./") if "2018-07-15 17:51:17-test-txt" in i
]
self.assertEqual(
_data_fs.readtext(_file),
"This is just a test file for Conpot's TFTP server\n",
)
_data_fs.remove(_file)
def test_tftp_download(self):
_dst_path = "/".join(
conpot.__path__ + ["tests/data/data_temp_fs/tftp/download"]
)
try:
self.client.download("tftp_data.txt", _dst_path)
self.assertTrue(filecmp.cmp(_dst_path, self._test_file))
finally:
_, _data_fs = conpot_core.get_vfs("tftp")
_data_fs.remove("download")
| 2,327 | Python | .py | 58 | 31.724138 | 100 | 0.608407 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,436 | test_protocol_wrapper.py | mushorg_conpot/conpot/tests/test_protocol_wrapper.py | from conpot.core.protocol_wrapper import conpot_protocol
@conpot_protocol
class ProtocolFake:
"""Fake docstring"""
def __init__(self, value):
self.value = value
def __repr__(self):
return f"ProtocolFake({self.value})"
def test_instances_have_separate_fields():
inst_1 = ProtocolFake(1)
inst_2 = ProtocolFake(2)
assert inst_1.value == 1
assert inst_2.value == 2
def test_instances_have_separate_repr():
inst_1 = ProtocolFake(1)
inst_2 = ProtocolFake(2)
assert repr(inst_1) == "ProtocolFake(1)"
assert repr(inst_2) == "ProtocolFake(2)"
def test_wrapped_classes_have_inner_class_attributes():
assert ProtocolFake.__doc__ == "Fake docstring"
assert ProtocolFake.__module__ == __name__
assert ProtocolFake.__name__ == "ProtocolFake"
| 812 | Python | .py | 22 | 32.090909 | 56 | 0.682519 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,437 | test_logger_json.py | mushorg_conpot/conpot/tests/test_logger_json.py | # Copyright (C) 2016 MushMush Foundation
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from os import path
from datetime import datetime
import unittest
import tempfile
import shutil
import json
from conpot.core.loggers.json_log import JsonLogger
class TestJsonLogger(unittest.TestCase):
def setUp(self):
self.logging_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.logging_dir)
def test_log_event(self):
filename = path.join(self.logging_dir, "test.json")
sensorid = "default"
public_ip = "0.0.0.0"
dst_port = 502
timestamp = datetime.now()
event_id = 1337
src_ip = "127.0.0.1"
src_port = "2048"
data_type = "unittest"
request = "ping"
response = "pong"
json_logger = JsonLogger(filename, sensorid, public_ip)
json_logger.log(
{
"timestamp": timestamp,
"id": event_id,
"remote": (src_ip, src_port),
"local": (public_ip, dst_port),
"data_type": data_type,
"data": {"request": request, "response": response},
}
)
with open(filename, "r") as logfile:
e = json.load(logfile)
self.assertEqual(e["timestamp"], timestamp.isoformat())
self.assertEqual(e["sensorid"], sensorid)
self.assertEqual(e["id"], event_id)
self.assertEqual(e["src_ip"], src_ip)
self.assertEqual(e["src_port"], src_port)
self.assertEqual(e["dst_ip"], public_ip)
self.assertEqual(e["dst_port"], dst_port)
self.assertEqual(e["data_type"], data_type)
self.assertEqual(e["request"], request)
self.assertEqual(e["response"], response)
self.assertEqual(e["event_type"], None)
| 2,536 | Python | .py | 64 | 32.046875 | 67 | 0.63581 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,438 | test_vfs.py | mushorg_conpot/conpot/tests/test_vfs.py | # Copyright (C) 2018 Abhinav Saxena <xandfury@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Test core features for Conpot's virtual file system
"""
import conpot.core as conpot_core
from conpot.core.filesystem import SubAbstractFS
import unittest
import conpot
from freezegun import freeze_time
import fs
from datetime import datetime
from fs.time import epoch_to_datetime
class TestFileSystem(unittest.TestCase):
"""
Tests related to Conpot's virtual file system.
"""
def setUp(self):
conpot_core.initialize_vfs()
self.test_vfs = conpot_core.get_vfs()
def tearDown(self):
self.test_vfs.close()
def test_listdir(self):
_list = self.test_vfs.listdir(".")
self.assertIn("data", _list)
def test_chmod(self):
# let us first create some directories!
# TODO: check file create permissions and directory create permissions
self.test_vfs.chmod("/data", 0o500, recursive=True)
# Test that changes are reflected in the FS
self.assertEqual(
fs.permissions.Permissions.create(0o500),
self.test_vfs.getinfo(path="/data", namespaces=["access"]).permissions,
)
# No changes made in the actual file system.
self.assertNotEqual(
fs.permissions.Permissions.create(0o500),
self.test_vfs.getinfo(
path="/data", get_actual=True, namespaces=["access"]
).permissions,
)
def test_chown(self):
self.test_vfs.register_user(name="test_user", uid=3000)
self.test_vfs.create_group(name="test_grp", gid=2000)
# do chown
self.test_vfs.chown("/data", uid=3000, gid=2000)
# check uid
self.assertEqual(
self.test_vfs.getinfo("/data", namespaces=["access"]).uid, 3000
)
# actual uid shouldn't have changed
self.assertNotEqual(
self.test_vfs.getinfo("/data", get_actual=True, namespaces=["access"]).uid,
3000,
)
# check gid
self.assertEqual(
self.test_vfs.getinfo("/data", namespaces=["access"]).gid, 2000
)
# FIXME: self.assertNotEqual(self.test_vfs.getinfo('/data', get_actual=True, namespaces=['access']).gid, 2000)
# check file username
self.assertEqual(
self.test_vfs.getinfo("/data", namespaces=["access"]).user, "test_user"
)
self.assertNotEqual(
self.test_vfs.getinfo("/data", get_actual=True, namespaces=["access"]).user,
"test_user",
)
# check file group
self.assertEqual(
self.test_vfs.getinfo("/data", namespaces=["access"]).group, "test_grp"
)
self.assertNotEqual(
self.test_vfs.getinfo(
"/data", get_actual=True, namespaces=["access"]
).group,
"test_grp",
)
# TODO: check for exceptions when user does not exist in the user/grp tables.
def test_jail(self):
"""Test for checking chroot jail a subfilesystem"""
# TODO: check for fs.error.permission denied error when we do a '../'
self.assertTrue(self.test_vfs.create_jail("/data"))
def test_mkdir(self):
self.test_vfs.makedir("/dummy_dir", permissions=0o500)
self.assertTrue(self.test_vfs.norm_path("/dummy_dir"))
self.assertNotEqual(
self.test_vfs.getinfo(
"/dummy_dir", get_actual=True, namespaces=["access"]
).permissions,
fs.permissions.Permissions.create(0o500),
)
# check the usr/grp that created the folder
self.assertEqual(
self.test_vfs.getinfo("/dummy_dir", namespaces=["access"]).uid,
self.test_vfs.default_uid,
)
def test_open_dir(self):
self.assertIsInstance(self.test_vfs.opendir("/data"), SubAbstractFS)
def test_get_cwd(self):
self.assertEqual("/", self.test_vfs.getcwd())
@freeze_time("2018-07-15 17:51:17", tz_offset=-4)
def test_snapshot(self):
self.assertEqual(
{"date-time", "snapshot-data"}, set(self.test_vfs.take_snapshot().keys())
)
self.assertEqual(
self.test_vfs.take_snapshot()["date-time"],
datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
)
@freeze_time("2018-07-15 17:51:17", tz_offset=-4)
def test_stat(self):
# TODO: Fix these to appropriate values -- check if the values are valid or not.
# check if all relevant attributes exist in stat
self.assertTrue(
all(
True if i in set(self.test_vfs.stat("/data").keys()) else False
for i in {
"st_uid",
"st_gid",
"st_mode",
"st_atime",
"st_mtime",
"st_mtime_ns",
"st_ctime_ns",
"st_ctime",
}
)
)
@freeze_time("2018-07-15 17:51:17", tz_offset=-4)
def test_getmtime(self):
_mtime = self.test_vfs.getmtime("/data")
self.assertFalse(_mtime == datetime.now())
def test_get_permissions(self):
self.assertEqual(
self.test_vfs.getinfo("/data", namespaces=["access"]).permissions.as_str(),
self.test_vfs.get_permissions("/data"),
)
def test_remove(self):
self.test_vfs.touch("test_remove.txt")
self.test_vfs.setinfo("test_remove.txt", {})
self.test_vfs.remove("test_remove.txt")
self.assertNotIn("test_remove.txt", self.test_vfs._cache.keys())
def test_removedir(self):
self.test_vfs.makedir("/dummy")
self.test_vfs.removedir("/dummy")
self.assertNotIn("/dummy", self.test_vfs._cache.keys())
def test_readlink(self):
self.assertIsNone(self.test_vfs.readlink("/data"))
def test_mkdirs(self):
self.test_vfs.makedirs("demo/demo")
self.assertTrue(self.test_vfs.get_permissions("/demo/demo"))
def test_openbin_file(self):
with self.test_vfs.openbin("new_file", mode="wb") as _file:
_file.write(b"This is just a test")
self.assertIn("new_file", self.test_vfs.listdir("/"))
_test = self.test_vfs.readtext("/new_file")
self.test_vfs.getinfo("new_file", namespaces=["basic"])
self.assertEqual(_test, "This is just a test")
def test_open_file(self):
with self.test_vfs.open("new_file", mode="w+") as _file:
_file.write("This is just a test")
self.assertIn("new_file", self.test_vfs.listdir("/"))
_test = self.test_vfs.readtext("/new_file")
self.test_vfs.getinfo("new_file", namespaces=["basic"])
self.assertEqual(_test, "This is just a test")
@freeze_time("2018-07-15 17:51:17")
def test_format_list(self):
self.test_vfs.settimes(
"/data", accessed=datetime.now(), modified=datetime.now()
)
self._f_list = self.test_vfs.format_list("/", self.test_vfs.listdir("/"))
[_result] = [i for i in self._f_list]
self.assertIn("root", _result)
self.assertIn("Jul 15 17:51", _result)
@freeze_time("2028-07-15 17:51:17")
def test_utime(self):
self.test_vfs.utime("/data", accessed=datetime.now(), modified=datetime.now())
self.assertEqual(
self.test_vfs.getinfo("/data", namespaces=["details"]).modified.ctime(),
datetime.now().ctime(),
)
self.assertEqual(
self.test_vfs.getinfo("/data", namespaces=["details"]).accessed.ctime(),
datetime.now().ctime(),
)
def test_access(self):
# check it the root user has all the permissions
self.assertTrue(self.test_vfs.access("/data", 0, required_perms="rwx"))
# create some random group and check permissions for that
self.test_vfs.create_group("random", 220)
self.assertFalse(self.test_vfs.access("/data", "random", required_perms="x"))
@freeze_time("2028-07-15 17:51:17")
def test_movedir(self):
# move a directory - retain it's contents
_uid = self.test_vfs.getinfo("/data", namespaces=["access"]).uid
_gid = self.test_vfs.getinfo("/data", namespaces=["access"]).gid
_perms = self.test_vfs.getinfo("/data", namespaces=["access"]).permissions
_user = self.test_vfs.getinfo("/data", namespaces=["access"]).user
_group = self.test_vfs.getinfo("/data", namespaces=["access"]).group
_accessed = self.test_vfs.getinfo("/data", namespaces=["details"]).accessed
_modified = self.test_vfs.getinfo("/data", namespaces=["details"]).modified
self.test_vfs.movedir("/data", "/data_move", create=True)
self.assertEqual(
self.test_vfs.getinfo("/data_move", namespaces=["access"]).uid, _uid
)
self.assertEqual(
self.test_vfs.getinfo("/data_move", namespaces=["access"]).gid, _gid
)
self.assertEqual(
self.test_vfs.getinfo("/data_move", namespaces=["access"]).permissions,
_perms,
)
self.assertEqual(
self.test_vfs.getinfo("/data_move", namespaces=["access"]).user, _user
)
self.assertEqual(
self.test_vfs.getinfo("/data_move", namespaces=["access"]).group, _group
)
# accessed and modified file must not be the same.
self.test_vfs.settimes(
"/data_move", accessed=datetime.now(), modified=datetime.now()
)
self.assertNotEqual(
self.test_vfs.getinfo("/data_move", namespaces=["details"]).accessed,
_accessed,
)
self.assertNotEqual(
self.test_vfs.getinfo("/data_move", namespaces=["details"]).modified,
_modified,
)
@freeze_time("2028-07-15 17:51:17")
def test_copydir(self):
# copy a directory
_uid = self.test_vfs.getinfo("/data", namespaces=["access"]).uid
_gid = self.test_vfs.getinfo("/data", namespaces=["access"]).gid
_perms = self.test_vfs.getinfo("/data", namespaces=["access"]).permissions
_user = self.test_vfs.getinfo("/data", namespaces=["access"]).user
_group = self.test_vfs.getinfo("/data", namespaces=["access"]).group
self.test_vfs.copydir("/data", "/data_move", create=True)
self.assertEqual(
self.test_vfs.getinfo("/data_move", namespaces=["access"]).uid, _uid
)
self.assertEqual(
self.test_vfs.getinfo("/data_move", namespaces=["access"]).gid, _gid
)
self.assertEqual(
self.test_vfs.getinfo("/data_move", namespaces=["access"]).permissions,
_perms,
)
self.assertEqual(
self.test_vfs.getinfo("/data_move", namespaces=["access"]).user, _user
)
self.assertEqual(
self.test_vfs.getinfo("/data_move", namespaces=["access"]).group, _group
)
self.assertEqual(set(self.test_vfs.listdir("/")), {"data", "data_move"})
@freeze_time("2028-07-15 17:51:17")
def test_copyfile(self):
# create a copy of a file in a separate directory
with self.test_vfs.open("test_fs.txt", mode="w+") as _file:
_file.write("This is just a test file checking copyfile")
self.test_vfs.copy(
src_path="test_fs.txt", dst_path="test_fs_copy.txt", overwrite=True
)
_text = self.test_vfs.readtext("test_fs_copy.txt")
self.assertEqual(_text, "This is just a test file checking copyfile")
self.assertTrue(self.test_vfs.getbasic("test_fs_copy.txt"))
@freeze_time("2028-07-15 17:51:17")
def test_movefile(self):
# create a copy of a file in a separate directory
with self.test_vfs.open("test_fs.txt", mode="w") as _file:
_file.write("This is just a test file checking copyfile")
_uid = self.test_vfs.getinfo("test_fs.txt", namespaces=["access"]).uid
self.test_vfs.move("test_fs.txt", "test_fs_copy.txt", overwrite=True)
_text = self.test_vfs.readtext("test_fs_copy.txt")
self.assertEqual(
self.test_vfs.getinfo("test_fs_copy.txt", namespaces=["access"]).uid, _uid
)
self.assertEqual(_text, "This is just a test file checking copyfile")
class TestSubFileSystem(unittest.TestCase):
"""
Tests related to Conpot's virtual sub file system. This would test fs generated folders for each and
every protocol.
"""
def setUp(self):
conpot_core.initialize_vfs()
self._vfs = conpot_core.get_vfs()
self._vfs.register_user("test_user", 13)
self._vfs.create_group("test_grp", 13)
self.test_vfs = self._vfs.mount_fs(
fs_url="/".join(conpot.__path__ + ["tests/data/test_data_fs/vfs"]),
dst_path="/data",
owner_uid=13,
group_gid=13,
perms=0o750,
)
def tearDown(self):
self._vfs.close()
def test_listdir(self):
_list = self.test_vfs.listdir(".")
self.assertIn("vfs.txt", _list)
def test_chmod(self):
self.test_vfs.chmod("vfs.txt", 0o500, recursive=True)
# Test that changes are reflected in the FS
self.assertEqual(
fs.permissions.Permissions.create(0o500),
self.test_vfs.getinfo(path="vfs.txt", namespaces=["access"]).permissions,
)
# No changes made in the actual file system.
self.assertNotEqual(
fs.permissions.Permissions.create(0o500),
self.test_vfs.getinfo(
path="vfs.txt", get_actual=True, namespaces=["access"]
).permissions,
)
def test_chown(self):
self.test_vfs.register_user(name="new_user", uid=3000)
self.test_vfs.create_group(name="new_grp", gid=2000)
# do chown
self.test_vfs.chown("vfs.txt", uid=3000, gid=2000)
# check uid
self.assertEqual(
self.test_vfs.getinfo("vfs.txt", namespaces=["access"]).uid, 3000
)
# actual uid shouldn't have changed
self.assertNotEqual(
self.test_vfs.getinfo(
"vfs.txt", get_actual=True, namespaces=["access"]
).uid,
3000,
)
def test_mkdir(self):
self.test_vfs.makedir("/dummy", permissions=0o500)
self.assertFalse(
self.test_vfs.access("/dummy", self.test_vfs.default_uid, "rwx")
)
self.test_vfs.removedir("/dummy")
self.assertNotIn("/dummy", self.test_vfs._cache.keys())
def test_mkdirs(self):
self.test_vfs.makedirs("/demo/demo")
self.assertTrue(self.test_vfs.get_permissions("/demo/demo"))
@freeze_time("2018-07-17 17:51:17")
def test_open_file(self):
with self.test_vfs.open("new_file", mode="wb") as _file:
_file.write(b"This is just a test")
self.assertIn("new_file", self.test_vfs.listdir("/"))
self.test_vfs.settimes(
"/new_file", accessed=datetime.now(), modified=datetime.now()
)
_test = self.test_vfs.readtext("/new_file")
self.assertEqual(_test, "This is just a test")
self.assertEqual(
self.test_vfs.getinfo("new_file", namespaces=["details"]).modified.ctime(),
datetime.now().ctime(),
)
def test_get_cwd(self):
self.assertEqual(self.test_vfs.getcwd(), "/data")
@freeze_time("2028-07-15 17:51:17")
def test_utime(self):
self.test_vfs.utime("vfs.txt", accessed=datetime.now(), modified=datetime.now())
self.assertEqual(
self.test_vfs.getinfo("vfs.txt", namespaces=["details"]).modified.ctime(),
datetime.now().ctime(),
)
self.assertEqual(
self.test_vfs.getinfo("vfs.txt", namespaces=["details"]).accessed.ctime(),
datetime.now().ctime(),
)
@freeze_time("2018-07-15 17:51:17", tz_offset=-4)
def test_stat(self):
# TODO: Fix these to appropriate values -- check if the values are valid or not.
# check if all relevant attributes exist in stat
self.assertTrue(
all(
True if i in set(self.test_vfs.stat("vfs.txt").keys()) else False
for i in {
"st_uid",
"st_gid",
"st_mode",
"st_atime",
"st_mtime",
"st_mtime_ns",
"st_ctime_ns",
"st_ctime",
}
)
)
def test_get_permissions(self):
self.assertEqual(
self.test_vfs.getinfo(
"vfs.txt", namespaces=["access"]
).permissions.as_str(),
self.test_vfs.get_permissions("vfs.txt"),
)
@freeze_time("2018-07-15 17:51:17")
def test_set_time(self):
"""Test for changing time in the file system."""
self.test_vfs.settimes(
"vfs.txt",
accessed=epoch_to_datetime(103336010),
modified=epoch_to_datetime(103336010),
)
self.assertEqual(
self.test_vfs.getinfo("vfs.txt", namespaces=["details"]).accessed,
epoch_to_datetime(103336010),
)
self.assertEqual(
self.test_vfs.getinfo("vfs.txt", namespaces=["details"]).modified,
epoch_to_datetime(103336010),
)
def test_access(self):
# check it the root user has all the permissions
self.assertFalse(self.test_vfs.access("/vfs.txt", 0, required_perms="rwx"))
self.assertEqual(
self.test_vfs.getinfo("/vfs.txt", namespaces=["access"]).uid, 13
)
self.assertTrue(self.test_vfs.access("/vfs.txt", 13, required_perms="rwx"))
# create some random group and check permissions for that
self.test_vfs.create_group("random", 220)
self.assertFalse(self.test_vfs.access("/vfs.txt", "random", required_perms="x"))
# create a new user called test_access and add it group with gid 13
self.test_vfs.register_user("test_access", 45)
self.test_vfs.add_users_to_group(13, [45])
self.assertTrue(self.test_vfs.access("/vfs.txt", 45, required_perms="rx"))
self.assertFalse(self.test_vfs.access("/vfs.txt", 45, required_perms="rwx"))
def test_remove(self):
self.test_vfs.touch("test_remove.txt")
self.test_vfs.setinfo("test_remove.txt", {})
self.test_vfs.remove("test_remove.txt")
self.assertNotIn("test_remove.txt", self.test_vfs._cache.keys())
def test_removedir(self):
self.test_vfs.makedir("/dummy")
self.test_vfs.removedir("/dummy")
self.assertNotIn("/dummy", self.test_vfs._cache.keys())
def test_readlink(self):
# FIXME: add tests for a file that is actually a link!
self.assertIsNone(self.test_vfs.readlink("vfs.txt"))
@freeze_time("2018-07-15 17:51:17")
def test_format_list(self):
self.test_vfs.settimes(
"vfs.txt", accessed=datetime.now(), modified=datetime.now()
)
self._f_list = self.test_vfs.format_list("/", self.test_vfs.listdir("/"))
[_result] = [i for i in self._f_list]
self.assertIn(self.test_vfs.default_user, _result)
self.assertIn("Jul 15 17:51", _result)
| 20,119 | Python | .py | 461 | 34.171367 | 118 | 0.598807 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,439 | test_proxy.py | mushorg_conpot/conpot/tests/test_proxy.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import unittest
import os
import gevent
from gevent.server import StreamServer
from gevent.socket import socket
from gevent.ssl import wrap_socket
import conpot
from conpot.protocols.proxy.ascii_decoder import AsciiDecoder
from conpot.protocols.proxy.proxy import Proxy
from conpot.utils.networking import fix_sslwrap
package_directory = os.path.dirname(os.path.abspath(conpot.__file__))
class TestProxy(unittest.TestCase):
def test_proxy(self):
self.test_input = "Hiya, this is a test".encode("utf-8")
mock_service = StreamServer(("127.0.0.1", 0), self.echo_server)
gevent.spawn(mock_service.start)
gevent.sleep(1)
proxy = Proxy("proxy", "127.0.0.1", mock_service.server_port)
server = proxy.get_server("127.0.0.1", 0)
gevent.spawn(server.start)
gevent.sleep(1)
s = socket()
s.connect(("127.0.0.1", server.server_port))
s.sendall(self.test_input)
received = s.recv(len(self.test_input))
self.assertEqual(self.test_input, received)
mock_service.stop(1)
def test_ssl_proxy(self):
fix_sslwrap()
self.test_input = "Hiya, this is a test".encode("utf-8")
keyfile = os.path.join(package_directory, "templates/default/ssl/ssl.key")
certfile = os.path.join(package_directory, "templates/default/ssl/ssl.crt")
mock_service = StreamServer(
("127.0.0.1", 0), self.echo_server, keyfile=keyfile, certfile=certfile
)
gevent.spawn(mock_service.start)
gevent.sleep(1)
proxy = Proxy(
"proxy",
"127.0.0.1",
mock_service.server_port,
keyfile=keyfile,
certfile=certfile,
)
server = proxy.get_server("127.0.0.1", 0)
gevent.spawn(server.start)
gevent.sleep(1)
s = wrap_socket(socket(), keyfile=keyfile, certfile=certfile)
s.connect(("127.0.0.1", server.server_port))
s.sendall(self.test_input)
received = s.recv(len(self.test_input))
self.assertEqual(self.test_input, received)
mock_service.stop(1)
def test_ascii_decoder(self):
test_decoder = AsciiDecoder()
# should not raise a UnicodeDecodeError
self.assertTrue(
(test_decoder.decode_in(b"\x80abc") == b"\xef\xbf\xbdabc")
and (test_decoder.decode_out(b"\x80abc") == b"\xef\xbf\xbdabc")
)
def test_proxy_with_decoder(self):
self.test_input = "Hiya, this is a test".encode("utf-8")
mock_service = StreamServer(("127.0.0.1", 0), self.echo_server)
gevent.spawn(mock_service.start)
gevent.sleep(1)
proxy = Proxy(
"proxy",
"127.0.0.1",
mock_service.server_port,
decoder="conpot.protocols.proxy.ascii_decoder.AsciiDecoder",
)
server = proxy.get_server("127.0.0.1", 0)
gevent.spawn(server.start)
gevent.sleep(1)
s = socket()
s.connect(("127.0.0.1", server.server_port))
s.sendall(self.test_input)
received = s.recv(len(self.test_input))
self.assertEqual(self.test_input, received)
mock_service.stop(1)
def test_ssl_proxy_with_decoder(self):
fix_sslwrap()
self.test_input = "Hiya, this is a test".encode("utf-8")
keyfile = os.path.join(package_directory, "templates/default/ssl/ssl.key")
certfile = os.path.join(package_directory, "templates/default/ssl/ssl.crt")
mock_service = StreamServer(
("127.0.0.1", 0), self.echo_server, keyfile=keyfile, certfile=certfile
)
gevent.spawn(mock_service.start)
gevent.sleep(1)
proxy = Proxy(
"proxy",
"127.0.0.1",
mock_service.server_port,
decoder="conpot.protocols.proxy.ascii_decoder.AsciiDecoder",
keyfile=keyfile,
certfile=certfile,
)
server = proxy.get_server("127.0.0.1", 0)
gevent.spawn(server.start)
gevent.sleep(1)
s = wrap_socket(socket(), keyfile=keyfile, certfile=certfile)
s.connect(("127.0.0.1", server.server_port))
s.sendall(self.test_input)
received = s.recv(len(self.test_input))
self.assertEqual(self.test_input, received)
mock_service.stop(1)
def echo_server(self, sock, address):
r = sock.recv(len(self.test_input))
sock.send(r)
| 5,237 | Python | .py | 126 | 33.777778 | 83 | 0.644344 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,440 | test_snmp_command_responder.py | mushorg_conpot/conpot/tests/test_snmp_command_responder.py | # Copyright (C) 2020 srenfo
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
from tempfile import TemporaryDirectory
import pytest
from pysnmp.smi.error import MibNotFoundError
from conpot.protocols.snmp.command_responder import CommandResponder
def test_register_fails_on_unknown_mib():
with TemporaryDirectory() as tmpdir:
responder = CommandResponder("", 0, "/tmp", tmpdir)
with pytest.raises(MibNotFoundError) as exc_info:
responder.register("NONEXISTENT-MIB", "foobar", (0,), 42, None)
assert str(exc_info.value).startswith("NONEXISTENT-MIB compilation error")
assert not responder._get_mibSymbol("NONEXISTENT-MIB", "foobar")
def test_register_loads_custom_mib():
raw_mibs = os.path.join(os.path.dirname(__file__), "data")
with TemporaryDirectory() as tmpdir:
responder = CommandResponder("", 0, raw_mibs, tmpdir)
responder.register("VOGON-POEM-MIB", "poemNumber", (0,), 42, None)
assert responder._get_mibSymbol("VOGON-POEM-MIB", "poemNumber")
| 1,714 | Python | .py | 34 | 46.705882 | 82 | 0.745803 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,441 | test_guardian_ast.py | mushorg_conpot/conpot/tests/test_guardian_ast.py | # Copyright (C) 2018 Abhinav Saxena <xandfury@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from gevent import monkey
monkey.patch_all()
import re
import unittest
from gevent import socket
from conpot.protocols.guardian_ast.guardian_ast_server import GuardianASTServer
from conpot.utils.greenlet import spawn_test_server, teardown_test_server
DATA = {
"I20100": b"\nI20100\n05/30/2018 19:15\n\nSTATOIL STATION\n\n\n\nIN-TANK INVENTORY\n\nTANK PRODUCT VOLUME TC VOLUME ULLAGE HEIGHT WATER TEMP\n 1 SUPER 2428 2540 4465 39.88 6.62 53.74\n 2 UNLEAD 7457 7543 7874 65.59 8.10 58.17\n 3 DIESEL 6532 6664 4597 33.06 5.91 57.91\n 4 PREMIUM 2839 2867 4597 66.57 4.49 57.88\n",
"I20200": b"\nI20200\n05/30/2018 19:17\n\nSTATOIL STATION\n\n\n\nDELIVERY REPORT\n\nT 1:SUPER \nINCREASE DATE / TIME GALLONS TC GALLONS WATER TEMP DEG F HEIGHT\n\n END: 05/30/2018 14:14 1947 2064 5.32 56.55 65.48\n START: 05/30/2018 14:04 1347 1464 5.32 56.55 42.480000000000004\n AMOUNT: 1647 1764\n\n",
"I20300": b"\nI20300\n05/30/2018 19:18\n\nSTATOIL STATION\n\n\nTANK 1 SUPER \n TEST STATUS: OFF\nLEAK DATA NOT AVAILABLE ON THIS TANK\n\nTANK 2 UNLEAD \n TEST STATUS: OFF\nLEAK DATA NOT AVAILABLE ON THIS TANK\n\nTANK 3 DIESEL \n TEST STATUS: OFF\nLEAK DATA NOT AVAILABLE ON THIS TANK\n\nTANK 4 PREMIUM \n TEST STATUS: OFF\nLEAK DATA NOT AVAILABLE ON THIS TANK\n\n",
"I20400": b"\nI20400\n05/30/2018 19:18\n\nSTATOIL STATION\n\n\n\nSHIFT REPORT\n\nSHIFT 1 TIME: 12:00 AM\n\nTANK PRODUCT\n\n 1 SUPER VOLUME TC VOLUME ULLAGE HEIGHT WATER TEMP\nSHIFT 1 STARTING VALUES 7950 8130 9372 31.03 1.25 52.60\n ENDING VALUES 8890 9016 9717 84.03 1.25 52.60\n DELIVERY VALUE 0\n TOTALS 940\n\n",
"I20500": b"\nI20500\n05/30/2018 19:18\n\n\nSTATOIL STATION\n\n\nTANK PRODUCT STATUS\n\n 1 SUPER NORMAL\n\n 2 UNLEAD HIGH WATER ALARM\n HIGH WATER WARNING\n\n 3 DIESEL NORMAL\n\n 4 PREMIUM NORMAL\n\n",
}
class TestGuardianAST(unittest.TestCase):
def setUp(self):
self.guardian_ast_server, self.server_greenlet = spawn_test_server(
GuardianASTServer, "guardian_ast", "guardian_ast"
)
def tearDown(self):
teardown_test_server(self.guardian_ast_server, self.server_greenlet)
def test_I20100(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", self.guardian_ast_server.server.server_port))
s.send(b"\x01I20100\r\n")
data = s.recv(1024)
s.close()
# FIXME: Omitting the time etc from data - mechanism to check them needed as well?
self.assertEqual(
data[:8] + data[24:156], DATA["I20100"][:8] + DATA["I20100"][24:156]
)
def test_I20200(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", self.guardian_ast_server.server.server_port))
s.send(b"\x01I20200\r\n")
data = s.recv(1024)
s.close()
self.assertEqual(
data[:8] + data[24:181], DATA["I20200"][:8] + DATA["I20200"][24:181]
)
def test_I20300(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", self.guardian_ast_server.server.server_port))
s.send(b"\x01I20300\r\n")
data = s.recv(1024)
s.close()
self.assertEqual(data[:8] + data[24:], DATA["I20300"][:8] + DATA["I20300"][24:])
def test_I20400(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", self.guardian_ast_server.server.server_port))
s.send(b"\x01I20400\r\n")
data = s.recv(1024)
s.close()
self.assertEqual(
data[:8] + data[24:202], DATA["I20400"][:8] + DATA["I20400"][24:202]
)
def test_I20500(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", self.guardian_ast_server.server.server_port))
s.send(b"\x01I20500\r\n")
data = s.recv(1024)
s.close()
self.assertEqual(data[:8] + data[24:], DATA["I20500"][:8] + DATA["I20500"][24:])
def test_ast_error(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", self.guardian_ast_server.server.server_port))
s.send(b"\x01S6020\r\n")
data = s.recv(1024)
s.close()
self.assertEqual(data, b"9999FF1B\n")
def test_S60201(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.5)
s.connect(("127.0.0.1", self.guardian_ast_server.server.server_port))
s.send(b"\x01S60201NONSUPER\r\n")
try:
_ = s.recv(1024)
except socket.timeout:
pass
s.send(b"\x01I20100\r\n")
data = s.recv(1024)
s.close()
self.assertIn(b"NONSUPER", data)
def test_S60202(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.5)
s.connect(("127.0.0.1", self.guardian_ast_server.server.server_port))
s.send(b"\x01S60202TESTLEAD\r\n")
try:
_ = s.recv(1024)
except socket.timeout:
pass
s.send(b"\x01I20100\r\n")
data = s.recv(1024)
s.close()
self.assertIn(b"TESTLEAD", data)
def test_S60203(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.5)
s.connect(("127.0.0.1", self.guardian_ast_server.server.server_port))
s.send(b"\x01S60203TESTDIESEL\r\n")
try:
_ = s.recv(1024)
except socket.timeout:
pass
s.send(b"\x01I20100\r\n")
data = s.recv(1024)
s.close()
self.assertIn(b"TESTDIESEL", data)
def test_S60204(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.5)
s.connect(("127.0.0.1", self.guardian_ast_server.server.server_port))
s.send(b"\x01S60204TESTPREMIUM\r\n")
try:
_ = s.recv(1024)
except socket.timeout:
pass
s.send(b"\x01I20100\r\n")
data = s.recv(1024)
s.close()
self.assertIn(b"TESTPREMIUM", data)
def test_S60200(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.5)
s.connect(("127.0.0.1", self.guardian_ast_server.server.server_port))
s.send(b"\x01S60200ULTIMATETEST\r\n")
try:
_ = s.recv(1024)
except socket.timeout:
pass
s.send(b"\x01I20100\r\n")
data = s.recv(1024)
s.close()
count = len(re.findall("(?=ULTIMATETEST)", data.decode()))
self.assertEqual(count, 4)
| 7,959 | Python | .py | 152 | 44.710526 | 487 | 0.59489 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,442 | test_bacnet_server.py | mushorg_conpot/conpot/tests/test_bacnet_server.py | # Copyright (C) 2015 Peter Sooky <xsooky00@stud.fit.vutbr.cz>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from gevent import monkey
monkey.patch_all()
import unittest
from gevent import socket, Timeout
from bacpypes.pdu import GlobalBroadcast, PDU
from bacpypes.apdu import (
APDU,
WhoIsRequest,
IAmRequest,
IHaveRequest,
WhoHasObject,
WhoHasRequest,
ReadPropertyRequest,
ReadPropertyACK,
)
from bacpypes.constructeddata import Any
from bacpypes.primitivedata import Real
from conpot.protocols.bacnet import bacnet_server
from conpot.utils.greenlet import spawn_test_server, teardown_test_server
class TestBACnetServer(unittest.TestCase):
"""
All tests are executed in a similar way. We initiate a service request to the BACnet server and wait for response.
Instead of decoding the response, we create an expected response. We encode the expected response and compare the
two encoded data.
"""
def setUp(self):
self.bacnet_server, self.greenlet = spawn_test_server(
bacnet_server.BacnetServer, "default", "bacnet"
)
self.address = (self.bacnet_server.host, self.bacnet_server.port)
def tearDown(self):
teardown_test_server(self.bacnet_server, self.greenlet)
def test_whoIs(self):
request = WhoIsRequest(
deviceInstanceRangeLowLimit=500, deviceInstanceRangeHighLimit=50000
)
apdu = APDU()
request.encode(apdu)
pdu = PDU()
apdu.encode(pdu)
buf_size = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(pdu.pduData, self.address)
data = s.recvfrom(buf_size)
s.close()
received_data = data[0]
expected = IAmRequest()
expected.pduDestination = GlobalBroadcast()
expected.iAmDeviceIdentifier = 36113
expected.maxAPDULengthAccepted = 1024
expected.segmentationSupported = "segmentedBoth"
expected.vendorID = 15
exp_apdu = APDU()
expected.encode(exp_apdu)
exp_pdu = PDU()
exp_apdu.encode(exp_pdu)
self.assertEqual(exp_pdu.pduData, received_data)
def test_whoHas(self):
request_object = WhoHasObject()
request_object.objectIdentifier = ("binaryInput", 12)
request = WhoHasRequest(object=request_object)
apdu = APDU()
request.encode(apdu)
pdu = PDU()
apdu.encode(pdu)
buf_size = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(pdu.pduData, self.address)
data = s.recvfrom(buf_size)
s.close()
received_data = data[0]
expected = IHaveRequest()
expected.pduDestination = GlobalBroadcast()
expected.deviceIdentifier = 36113
expected.objectIdentifier = 12
expected.objectName = "BI 01"
exp_apdu = APDU()
expected.encode(exp_apdu)
exp_pdu = PDU()
exp_apdu.encode(exp_pdu)
self.assertEqual(exp_pdu.pduData, received_data)
def test_readProperty(self):
request = ReadPropertyRequest(
objectIdentifier=("analogInput", 14), propertyIdentifier=85
)
request.apduMaxResp = 1024
request.apduInvokeID = 101
apdu = APDU()
request.encode(apdu)
pdu = PDU()
apdu.encode(pdu)
buf_size = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(pdu.pduData, self.address)
data = s.recvfrom(buf_size)
s.close()
received_data = data[0]
expected = ReadPropertyACK()
expected.pduDestination = GlobalBroadcast()
expected.apduInvokeID = 101
expected.objectIdentifier = 14
expected.objectName = "AI 01"
expected.propertyIdentifier = 85
expected.propertyValue = Any(Real(68.0))
exp_apdu = APDU()
expected.encode(exp_apdu)
exp_pdu = PDU()
exp_apdu.encode(exp_pdu)
self.assertEqual(exp_pdu.pduData, received_data)
def test_no_response_requests(self):
"""When the request has apduType not 0x01, no reply should be returned from Conpot"""
request = ReadPropertyRequest(
objectIdentifier=("analogInput", 14), propertyIdentifier=85
)
request.pduData = bytearray(b"test_data")
request.apduMaxResp = 1024
request.apduInvokeID = 101
# Build requests - Confirmed, simple ack pdu, complex ack pdu, error pdu - etc.
test_requests = list()
for i in range(2, 8):
if i not in {1, 3, 4}:
request.apduType = i
if i == 2:
# when apdu.apduType is 2 - we have SimpleAckPDU
# set the apduInvokeID and apduService
request.apduService = 8
elif i == 5:
# when apdu.apduType is 5 - we have ErrorPDU
# set the apduInvokeID and apduService
request.apduService = 8
elif i == 6:
# when apdu.apduType is 6 - we have RejectPDU
# set the apduInvokeID and apduAbortRejectReason
request.apduAbortRejectReason = 9
else:
# when apdu.apduType is 7 - we have AbortPDU
# set the apduInvokeID and apduAbortRejectReason
request.apduAbortRejectReason = 9
test_requests.append(request)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
buf_size = 1024
[s.sendto(i.pduData, self.address) for i in test_requests]
results = None
with Timeout(1, False):
results = [s.recvfrom(buf_size) for i in range(len(test_requests))]
self.assertIsNone(results)
| 6,514 | Python | .py | 162 | 31.574074 | 118 | 0.64612 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,443 | test_iec104_server.py | mushorg_conpot/conpot/tests/test_iec104_server.py | # Copyright (C) 2017 Patrick Reichenberger (University of Passau) <patrick.reichenberger@t-online.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from gevent import monkey
monkey.patch_all()
import socket
import time
import unittest
from unittest.mock import patch
import conpot.core as conpot_core
from conpot.protocols.IEC104 import IEC104_server, frames
from conpot.utils.greenlet import spawn_test_server, teardown_test_server
class TestIEC104Server(unittest.TestCase):
def setUp(self):
self.databus = conpot_core.get_databus()
self.iec104_inst, self.greenlet = spawn_test_server(
IEC104_server.IEC104Server, "IEC104", "IEC104", port=2404
)
self.coa = self.iec104_inst.device_data_controller.common_address
def tearDown(self):
teardown_test_server(self.iec104_inst, self.greenlet)
def test_startdt(self):
"""
Objective: Test if answered correctly to STARTDT act
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect(("127.0.0.1", 2404))
s.send(frames.STARTDT_act.build())
data = s.recv(6)
self.assertSequenceEqual(data, frames.STARTDT_con.build())
def test_testfr(self):
"""
Objective: Test if answered correctly to TESTFR act
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect(("127.0.0.1", 2404))
s.send(frames.TESTFR_act.build())
data = s.recv(6)
self.assertEqual(data, frames.TESTFR_con.build())
def test_write_for_non_existing(self):
"""
Objective: Test answer for a command to a device that doesn't exist
(Correct behaviour of the IEC104 protocol is not known exactly. Other case is test for no answer)
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect(("127.0.0.1", 2404))
s.send(frames.STARTDT_act.build())
s.recv(6)
single_command = (
frames.i_frame()
/ frames.asdu_head(COA=self.coa, COT=6)
/ frames.asdu_infobj_45(IOA=0xEEEEEE, SCS=1)
)
s.send(single_command.build())
data = s.recv(16)
bad_addr = (
frames.i_frame(RecvSeq=0x0002)
/ frames.asdu_head(COA=self.coa, COT=47)
/ frames.asdu_infobj_45(IOA=0xEEEEEE, SCS=1)
)
self.assertSequenceEqual(data, bad_addr.build())
def test_write_relation_for_existing(self):
"""
Objective: Test answer for a correct command to a device that does exist and has a related sensor
(Actuator 22_20 (Type 45: Single Command) will be tested,
the corresponding(!) sensor 13_20 (Type 1: Single Point Information) changes the value
and the termination confirmation is returned)
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect(("127.0.0.1", 2404))
s.send(frames.STARTDT_act.build())
s.recv(6)
self.databus.set_value("22_20", 0) # Must be in template and relation to 13_20
self.databus.set_value("13_20", 0) # Must be in template
single_command = (
frames.i_frame()
/ frames.asdu_head(COA=self.coa, COT=6)
/ frames.asdu_infobj_45(IOA=0x141600, SCS=1)
)
s.send(single_command.build())
data = s.recv(16)
act_conf = (
frames.i_frame(RecvSeq=0x0002)
/ frames.asdu_head(COA=self.coa, COT=7)
/ frames.asdu_infobj_45(IOA=0x141600, SCS=1)
)
self.assertSequenceEqual(data, act_conf.build())
data = s.recv(16)
info = (
frames.i_frame(SendSeq=0x0002, RecvSeq=0x0002)
/ frames.asdu_head(COA=self.coa, COT=11)
/ frames.asdu_infobj_1(IOA=0x140D00)
)
info.SIQ = frames.SIQ(SPI=1)
self.assertSequenceEqual(data, info.build())
data = s.recv(16)
act_term = (
frames.i_frame(SendSeq=0x0004, RecvSeq=0x0002)
/ frames.asdu_head(COA=self.coa, COT=10)
/ frames.asdu_infobj_45(IOA=0x141600, SCS=1)
)
self.assertSequenceEqual(data, act_term.build())
def test_write_no_relation_for_existing(self):
"""
Objective: Test answer for a correct command to a device that does exist and has no related sensor
(Actuator 22_19 (Type 45: Single Command) will be tested, the corresponding(!) sensor is not existent)
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect(("127.0.0.1", 2404))
s.send(frames.STARTDT_act.build())
s.recv(6)
self.databus.set_value("22_19", 0) # Must be in template and no relation
single_command = (
frames.i_frame()
/ frames.asdu_head(COA=self.coa, COT=6)
/ frames.asdu_infobj_45(IOA=0x131600, SCS=0)
)
s.send(single_command.build())
data = s.recv(16)
act_conf = (
frames.i_frame(RecvSeq=0x0002)
/ frames.asdu_head(COA=self.coa, COT=7)
/ frames.asdu_infobj_45(IOA=0x131600, SCS=0)
)
self.assertSequenceEqual(data, act_conf.build())
data = s.recv(16)
act_term = (
frames.i_frame(SendSeq=0x0002, RecvSeq=0x0002)
/ frames.asdu_head(COA=self.coa, COT=10)
/ frames.asdu_infobj_45(IOA=0x131600, SCS=0)
)
self.assertSequenceEqual(data, act_term.build())
def test_write_wrong_type_for_existing(self):
"""
Objective: Test answer for a command of wrong type to a device that does exist
(Actuator 22_20 (Type 45: Single Command) will be tested,
but a wrong command type (Double Commands instead of Single Command) is sent to device)
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect(("127.0.0.1", 2404))
s.send(frames.STARTDT_act.build())
s.recv(6)
self.databus.set_value("22_20", 0) # Must be in template
single_command = (
frames.i_frame()
/ frames.asdu_head(COA=self.coa, COT=6)
/ frames.asdu_infobj_46(IOA=0x141600, DCS=1)
)
s.send(single_command.build())
data = s.recv(16)
act_conf = (
frames.i_frame(RecvSeq=0x0002)
/ frames.asdu_head(COA=self.coa, PN=1, COT=7)
/ frames.asdu_infobj_46(IOA=0x141600, DCS=1)
)
self.assertSequenceEqual(data, act_conf.build())
@patch("conpot.protocols.IEC104.IEC104_server.gevent._socket3.socket.recv")
def test_failing_connection_connection_lost_event(self, mock_timeout):
"""
Objective: Test if correct exception is executed when a socket.error
with EPIPE occurs
"""
mock_timeout.side_effect = OSError(32, "Socket Error")
conpot_core.get_sessionManager().purge_sessions()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", 2404))
time.sleep(0.1)
log_queue = conpot_core.get_sessionManager().log_queue
con_new_event = log_queue.get()
con_lost_event = log_queue.get(timeout=1)
self.assertEqual("NEW_CONNECTION", con_new_event["data"]["type"])
self.assertEqual("CONNECTION_LOST", con_lost_event["data"]["type"])
s.close()
| 8,229 | Python | .py | 192 | 34.270833 | 110 | 0.628311 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,444 | test_kamstrup_meter.py | mushorg_conpot/conpot/tests/test_kamstrup_meter.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from gevent import monkey
monkey.patch_all()
import conpot
from conpot.protocols.kamstrup_meter.command_responder import CommandResponder
from conpot.protocols.kamstrup_meter.request_parser import KamstrupRequestParser
from conpot.protocols.kamstrup_meter.kamstrup_server import KamstrupServer
from conpot.utils.greenlet import spawn_test_server, teardown_test_server
from conpot.utils.networking import chr_py3
from gevent import socket
import os
import unittest
class TestKamstrup(unittest.TestCase):
def setUp(self):
# get the conpot directory
self.dir_name = os.path.dirname(conpot.__file__)
self.request_parser = KamstrupRequestParser()
self.command_responder = CommandResponder(
self.dir_name + "/templates/kamstrup_382/kamstrup_meter/kamstrup_meter.xml"
)
self.kamstrup_management_server, self.server_greenlet = spawn_test_server(
KamstrupServer, "kamstrup_382", "kamstrup_meter"
)
def tearDown(self):
teardown_test_server(self.kamstrup_management_server, self.server_greenlet)
def test_request_get_register(self):
# requesting register 1033
request_bytes = (0x80, 0x3F, 0x10, 0x01, 0x04, 0x09, 0x18, 0x6D, 0x0D)
for i in range(0, len(request_bytes)):
self.request_parser.add_byte(chr(request_bytes[i]))
if i < len(request_bytes) - 1:
# parser returns None until it can put together an entire message
self.assertEqual(self.request_parser.get_request(), None)
parsed_request = self.request_parser.get_request()
response = self.command_responder.respond(parsed_request)
self.assertEqual(len(response.registers), 1)
self.assertEqual(response.registers[0].name, 1033)
# we should have no left overs
self.assertEqual(len(self.request_parser.bytes), 0)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", self.kamstrup_management_server.server.server_port))
s.sendall(
chr_py3(0x80)
+ chr_py3(0x3F)
+ chr_py3(0x10)
+ chr_py3(0x01)
+ chr_py3(0x04)
+ chr_py3(0x09)
+ chr_py3(0x18)
+ chr_py3(0x6D)
+ chr_py3(0x0D)
)
data = s.recv(1024)
s.close()
# FIXME: verify bytes received from server - ask jkv?
pkt = [hex(data[i]) for i in range(len(data))]
self.assertTrue(("0x40" in pkt) and ("0x3f" in pkt) and ("0xd" in pkt))
| 3,333 | Python | .py | 72 | 39.486111 | 87 | 0.690557 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,445 | test_s7_server.py | mushorg_conpot/conpot/tests/test_s7_server.py | # Copyright (C) 2013 Lukas Rist <glaslos@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from gevent import monkey
monkey.patch_all()
import unittest
from conpot.protocols.s7comm.s7_server import S7Server
from conpot.tests.helpers import s7comm_client
from conpot.utils.greenlet import spawn_test_server, teardown_test_server
class TestS7Server(unittest.TestCase):
def setUp(self):
self.s7_instance, self.greenlet = spawn_test_server(
S7Server, "default", "s7comm"
)
self.server_host = self.s7_instance.server.server_host
self.server_port = self.s7_instance.server.server_port
def tearDown(self):
teardown_test_server(self.s7_instance, self.greenlet)
def test_s7(self):
"""
Objective: Test if the S7 server returns the values expected.
"""
src_tsaps = (0x100, 0x200)
dst_tsaps = (0x102, 0x200, 0x201)
s7_con = s7comm_client.s7(self.server_host, self.server_port)
res = None
for src_tsap in src_tsaps:
for dst_tsap in dst_tsaps:
try:
s7_con.src_tsap = src_tsap
s7_con.dst_tsap = dst_tsap
res = src_tsap, dst_tsap
break
except s7comm_client.S7ProtocolError:
continue
if res:
break
s7_con.src_ref = 10
s7_con.s.settimeout(s7_con.timeout)
s7_con.s.connect((s7_con.ip, s7_con.port))
s7_con.Connect()
identities = s7comm_client.GetIdentity(
self.server_host, self.server_port, res[0], res[1]
)
s7_con.plc_stop_function()
dic = {
17: {1: "v.0.0"},
28: {
1: "Technodrome",
2: "Siemens, SIMATIC, S7-200",
3: "Mouser Factory",
4: "Original Siemens Equipment",
5: "88111222",
7: "IM151-8 PN/DP CPU",
10: "",
11: "",
},
}
for line in identities:
sec, item, val = line.split(";")
try:
self.assertTrue(dic[int(sec)][int(item)] == val.strip())
except AssertionError:
print((sec, item, val))
raise
| 3,005 | Python | .py | 78 | 29.205128 | 73 | 0.598012 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,446 | test_logger_taxii.py | mushorg_conpot/conpot/tests/test_logger_taxii.py | # Copyright (C) 2013 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
import uuid
from datetime import datetime
from io import StringIO
import unittest
from configparser import ConfigParser
from lxml import etree
from conpot.core.loggers.taxii_log import TaxiiLogger
from conpot.core.loggers.stix_transform import StixTransformer
import sdv.validators as validators
# from conpot.tests.helpers.mitre_stix_validator import STIXValidator
class TestLoggers(unittest.TestCase):
@unittest.skip("disabled until STIX 2.0")
def test_stix_transform(self):
"""
Objective: Test if our STIX xml can be validated.
"""
config = ConfigParser()
config_file = os.path.join(os.path.dirname(__file__), "../conpot.cfg")
config.read(config_file)
config.set("taxii", "enabled", True)
test_event = {
"remote": ("127.0.0.1", 54872),
"data_type": "s7comm",
"public_ip": "111.222.111.222",
"timestamp": datetime.now(),
"session_id": str(uuid.uuid4()),
"data": {
0: {"request": "who are you", "response": "mr. blue"},
1: {"request": "give me apples", "response": "no way"},
},
}
dom = etree.parse("conpot/templates/default/template.xml")
stixTransformer = StixTransformer(config, dom)
stix_package_xml = stixTransformer.transform(test_event)
validator = validators.STIXSchemaValidator()
result = validator.validate(
StringIO(stix_package_xml.encode("utf-8"))
).as_dict()
has_errors = False
error_string = ""
if "errors" in result:
has_errors = True
for error in result["errors"]:
error_string += error["message"]
error_string += ", "
self.assertFalse(
has_errors, "Error while validations STIX xml: {0}".format(error_string)
)
@unittest.skip("disabled until taxii server is up and running again")
def test_taxii(self):
"""
Objective: Test if we can transmit data to MITRE's TAXII test server.
Note: This actually also tests the StixTransformer since the event is parsed by the transformer
before transmission.
"""
config = ConfigParser()
config_file = os.path.join(os.path.dirname(__file__), "../conpot.cfg")
config.read(config_file)
config.set("taxii", "enabled", True)
test_event = {
"remote": ("127.0.0.1", 54872),
"data_type": "s7comm",
"timestamp": datetime.now(),
"public_ip": "111.222.111.222",
"session_id": str(uuid.uuid4()),
"data": {
0: {"request": "who are you", "response": "mr. blue"},
1: {"request": "give me apples", "response": "no way"},
},
}
dom = etree.parse("conpot/templates/default/template.xml")
taxiiLogger = TaxiiLogger(config, dom)
taxii_result = taxiiLogger.log(test_event)
# TaxiiLogger returns false if the message could not be delivered
self.assertTrue(taxii_result)
| 3,910 | Python | .py | 92 | 34.684783 | 103 | 0.635408 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,447 | test_utils_mac_addr.py | mushorg_conpot/conpot/tests/test_utils_mac_addr.py | # Copyright (C) 2015 Adarsh Dinesh <adarshdinesh@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import unittest
import conpot.utils.mac_addr as mac_addr
import subprocess
class TestMacAddrUtil(unittest.TestCase):
def setUp(self):
self.change_mac_process = subprocess.Popen(
["ip", "li", "delete", "dummy", "type", "dummy"],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
)
def tearDown(self):
self.change_mac_process.terminate()
@unittest.skip("shunt to a later phase")
def test_mac(self):
"""
Objective: Test if the spoofer is able to change MAC address
"""
testmac = b"00:de:ad:be:ef:00"
iface = b"dummy"
# Load dummy module
s = subprocess.Popen(
["modprobe", "dummy"], stderr=subprocess.STDOUT, stdout=subprocess.PIPE
)
# Check if dummy is loaded
data = s.stdout.read()
if data:
self.skipTest("Can't create dummy device")
# Create a dummy network interface
subprocess.Popen(
["ip", "li", "add", "dummy", "type", "dummy"],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
)
s = subprocess.Popen(["ip", "link", "show"], stdout=subprocess.PIPE)
data = s.stdout.read()
if b"dummy" in data:
# Change mac address of dummy interface and test it
mac_addr.change_mac(iface=iface, mac=testmac)
flag = mac_addr._check_mac(iface, testmac)
# Remove the dummy interface
with self.change_mac_process:
self.assertTrue(flag is True)
else:
self.skipTest("Can't change MAC address")
| 2,419 | Python | .py | 60 | 33.1 | 83 | 0.647833 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,448 | test_kamstrup_decoder.py | mushorg_conpot/conpot/tests/test_kamstrup_decoder.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import unittest
from conpot.protocols.kamstrup_meter.decoder_382 import Decoder382
class TestKamstrupDecoder(unittest.TestCase):
# TODO: Rename functions when i figure out the actual meaning of the requests / responses
def test_request_one(self):
request = [
chr(0x80),
chr(0x3F),
chr(0x10),
chr(0x01),
chr(0x04),
chr(0x1E),
chr(0x7A),
chr(0xBB),
chr(0x0D),
]
decoder = Decoder382()
result = decoder.decode_in(request)
self.assertEqual(result, "Request for 1 register(s): 1054 (Voltage p1) [0x3f]")
def test_invalid_crc(self):
invalid_sequences = [
[
chr(0x80),
chr(0x3F),
chr(0x10),
chr(0x02),
chr(0x00),
chr(0x01),
chr(0x55),
chr(0xA1),
chr(0x0D),
],
[
chr(0x80),
chr(0x3F),
chr(0x10),
chr(0x01),
chr(0x00),
chr(0x02),
chr(0x65),
chr(0xCF),
chr(0x0D),
],
]
for seq in invalid_sequences:
decoder = Decoder382()
result = decoder.decode_in(seq)
self.assertEqual(
result,
"Request discarded due to invalid CRC.",
"Invalid CRC {0} tested valid".format(seq),
)
# def test_request_two(self):
# request = "803f1001000265c20d".encode('hex-codec')
# decoder = Decoder()
# result = decoder.decode_in(request)
#
# def test_response_one(self):
# response = "403f1000010204000000008be1900d".encode('hex-codec')
# decoder = Decoder()
# result = decoder.decode_in(response)
#
# def test_response_two(self):
# response = "403f10000202040000000000091bf90d".encode('hex-codec')
# decoder = Decoder()
# result = decoder.decode_in(response)
| 3,020 | Python | .py | 82 | 26.195122 | 93 | 0.55116 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,449 | test_greenlet.py | mushorg_conpot/conpot/tests/test_greenlet.py | # Copyright (C) 2020 srenfo
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from contextlib import redirect_stderr
import pytest
from gevent import Greenlet, sleep
from conpot import core
from conpot.utils.greenlet import (
spawn_startable_greenlet,
spawn_test_server,
teardown_test_server,
)
class StartableStub:
def __init__(self):
self.args = None
def start(self, *args):
self.args = args
@pytest.mark.parametrize("args", ((), ("127.0.0.1", 8080), (1, 2, 3, 4)))
def test_spawn_startable_greenlet_passes_args(args):
instance = StartableStub()
greenlet = spawn_startable_greenlet(instance, *args)
greenlet.get()
assert instance.args == args
def test_spawn_startable_greenlet_sets_name():
greenlet = spawn_startable_greenlet(StartableStub())
assert str(greenlet).startswith('<ServiceGreenlet "StartableStub"')
def test_spawn_startable_greenlet_not_scheduled():
greenlet = spawn_startable_greenlet(StartableStub())
assert not greenlet.scheduled_once.is_set()
def test_spawn_startable_greenlet_can_observe_scheduling():
greenlet = spawn_startable_greenlet(StartableStub())
greenlet.scheduled_once.wait()
assert greenlet.scheduled_once.is_set()
class ServerStub:
def __init__(self, template, template_directory, args):
self.template = template
self.template_directory = template_directory
self.args = args
self.host = None
self.port = None
def start(self, host, port):
self.host = host
self.port = port
def test_spawn_test_server_returns_server_and_greenlet():
server, greenlet = spawn_test_server(
ServerStub, "default", "Fake", args="arbitrary"
)
assert isinstance(server, ServerStub)
assert isinstance(greenlet, Greenlet)
assert server.template.endswith("/conpot/templates/default/Fake/Fake.xml")
assert server.template_directory.endswith("/conpot/templates/default")
assert server.args == "arbitrary"
def test_spawn_test_server_initializes_databus():
spawn_test_server(ServerStub, "default", "Fake")
assert core.get_databus().initialized.is_set()
def test_spawn_test_server_runs_at_least_once():
_, greenlet = spawn_test_server(ServerStub, "default", "Fake")
assert greenlet.scheduled_once.is_set()
def test_spawn_test_server_starts_on_localhost_any_port():
server, _ = spawn_test_server(ServerStub, "default", "Fake")
assert server.host == "127.0.0.1"
assert server.port == 0
def test_spawn_test_server_can_set_port():
server, _ = spawn_test_server(ServerStub, "default", "Fake", port=42)
assert server.port == 42
class LoopingServer:
def __init__(self, *_, **__):
self.stopped = False
def start(self, _, __):
while not self.stopped:
sleep()
def stop(self):
self.stopped = True
def test_teardown_test_server_stops_instance():
server, greenlet = spawn_test_server(LoopingServer, "default", "Fake")
teardown_test_server(server, greenlet)
assert server.stopped
assert greenlet.dead
class RaisingServer(LoopingServer):
def start(self, host, port):
super().start(host, port)
raise RuntimeError("Test Error")
def test_teardown_test_server_propagates_exception():
server, greenlet = spawn_test_server(RaisingServer, "default", "Fake")
with pytest.raises(RuntimeError) as exc_info:
# Greenlets print exception tracebacks to stderr, suppress that in this test
with redirect_stderr(None):
teardown_test_server(server, greenlet)
assert str(exc_info.value) == "Test Error"
| 4,319 | Python | .py | 102 | 37.5 | 84 | 0.719328 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,450 | kamstrup_management_data.py | mushorg_conpot/conpot/tests/data/kamstrup_management_data.py | """
Data related to Kamstrup Management Protocol
"""
RESPONSES = {
"H": {
"H": b"==============================================================================\r\nService Menu\r\n==============================================================================\r\nH: Help [cmd].\r\nQ: Close connection.\r\n!AC: Access control.\r\n!AS: Alarm Server.\r\n!GC: Get Config.\r\n!GV: Software version.\r\n!SA: Set KAP Server IP and port (*1).\r\n!SB: Set 2nd KAP Server IP and port.\r\n!SC: Set Config (*1).\r\n!SD: Set device name (*1).\r\n!SH: Set KAP Server lookup (DNS or DHCP)\r\n!SI: Set IP (enter either valid IP or 0 to force DHCP)(*1).\r\n!SK: Set KAP watchdog timeout(WDT).\r\n!SN: Set IP for DNS Name servers to use.\r\n!SP: Set IP Ports\r\n!SS: Set Serial Settings.\r\n!RC: Request connect\r\n!RR: Request restart (*1).\r\n!WM: Wink module.\r\n==============================================================================\r\n(*1) Forces system restart\r\n==============================================================================\r\nKamstrup (R)\r\n",
"!AC": b"!AC: Access control.\r\n Used for simple IP address firewall filtering.\r\n If enabled only the listed IP's can assess this module.\r\n Format: !AC [En/Dis [ID IP]]\r\n Example: !AC\r\n Lists the setup.\r\n Example: !AC 0\r\n Disables the filter allowing everybody to access.\r\n Example: !AC 0 1 192.168.1.211\r\n !AC 0 2 10.0.0.1\r\n !AC 0 3 195.215.168.45\r\n !AC 1\r\n Only connections from 192.168.1.211, \r\n 10.0.0.1 or 195.215.168.45 are possible.\r\n",
"!AS": b"!AS: Alarm Server.\r\n Used to set IP and port of server to handle alarm notifications.\r\n Format: !AS [SrvIP [SrvPort]]\r\n Example: !AS 195.215.168.45 \r\n Alarms are sent to 195.215.168.45.\r\n Example: !AS 195.215.168.45 4000\r\n Alarms are sent to to port 4000 on 195.215.168.45.\r\n Example: !AS 0.0.0.0\r\n Alarm reporting is disabled.\r\n",
"!GC": b"!GC: Get Config.\r\n Returns the module configuration.\r\n",
"!GV": b"!GV: Software version.\r\n Returns the software revision of the module.\r\n",
"!SA": b'!SA: Set KAP Server IP and port (*1).\r\n Used for setting the IP of the Server to receive KAP-pacakeges.\r\n UDP port on server can be provided optionally.\r\n Format: !SA SrvIP [SrvPort]\r\n Example: !SA 195215168045 \r\n KAP packages are hereafter sent to 195.215.168.45.\r\n Example: !SA 195.215.168.45 \r\n Same result as "!SA 195215168045".\r\n Example: !SA 192168001002 61000\r\n KAP packages are hereafter sent to 192.168.1.2:61000\r\n from module port 8000.\r\n Example: !SA 0.0.0.0 \r\n Disables KAP.\r\n',
"!SB": b"!SB: Set 2nd KAP Server IP and port.\r\n Used for redundancy with two KAP servers.\r\n When enabled every second KAP is send to the IP defined by !SB.\r\n NB: The KAP interval to each individual server is half of KAPINT\r\n defined by !SK.\r\n NB: !SA must be enabled (not 0.0.0.0) \r\n Format: !SB SrvIP [SrvPort]\r\n Example: !SB 195.215.168.45 \r\n KAP packages are hereafter also sent to 195.215.168.45.\r\n Example: !SB 0.0.0.0 \r\n Disabled.\r\n Example: !SB 192.168.1.2 61000\r\n KAP packages are hereafter sent to 192.168.1.2:61000\r\n from module port 8000.\r\n",
"!SC": b"!SC: Set Config (*1).\r\n Configures the module.\r\n Format: !SC DHCP IP SUB GW DNS1 DNS2 DNS3 SRV_IP DEVICENAME SRV_DNS\r\n DHCP 1 for DHCP, 0 for static IP.\r\n IP.. Static IP settings.\r\n SRV_IP IP of remote server (Only if SRV_DNS is 0).\r\n DEVICENAME User label for for individual naming.\r\n SRV_DNS DNS name of remote server (0 to disable DNS lookup)\r\n",
"!SD": b"!SD: Set device name (*1).\r\n Option for individual naming of the module (0-20 chars).\r\n",
"!SH": b'!SH: Set KAP Server lookup (DNS or DHCP)\r\n Used for setting the DNS name of the Server to receive KAP-pacakeges.\r\n Using the keyword "DHCP_OPTION:xxx" makes the module request the IP using DHCP option xxx.\r\n The settings are first activated when the module is reset (using !RR).\r\n Example: !SH 0 \r\n Lookup Disabled.\r\n The module will send KAP to the IP listed by !SA. \r\n Example: !SH hosting.kamstrup_meter.dk \r\n Use DNS lookup.\r\n The module will send KAP to the IP listed by !SA until it resolves the DNS,\r\n hereafter the KAP will be sent to hosting.kamstrup_meter.dk\r\n Example: !SH DHCP_OPTION:129\r\n Use DHCP Option.\r\n The module will send KAP to the IP provided by DHCP (in this case option 129).\r\n The module uses the IP provided by !SA if the DHCP offer dos not include option xxx data.\r\n',
"!SI": b"!SI: Set IP (enter either valid IP or 0 to force DHCP)(*1).\r\n Used for changing the module IP.\r\n (Use !SC if you need to change subnet/Gateway too).\r\n Entering a '0' will enable DHCP.\r\n Format: !SI IP\r\n Example: !SI 0\r\n The module will reboot and acuire the IP settings using DHCP.\r\n Example: !SI 192168001200\r\n The module will reboot using static IP addr 192.168.1.200.\r\n (SUB, GW and DNS unchanged)\r\n Example: !SI 192.168.1.200\r\n Same as !SI 192168001200.\r\n",
"!SK": b"!SK: Set KAP watchdog timeout(WDT).\r\n Used for setting KeepAlive watchdog timing.\r\n Format: !SK [WDT] [MISSING] [KAPINT]\r\n Example: !SK\r\n Example: !SK 3600\r\n Example: !SK 3600 60 10\r\n WDT The module reboots after WDT?KAPINT seconds\r\n without an ACK from the server.\r\n 0 = disable WDT.\r\n MISSING After MISSING?KAPINT seconds without an ACK,\r\n the Err LED starts blinking.\r\n (Used for indication of missing link to the server)\r\n KAPINT Interval in seconds for how often KeepAlivePackages\r\n are send to the KAP server.\r\n The WDT and MISSING timeout counts are both reset by an ACK from the server. \r\n",
"!SN": b"!SN: Set IP for DNS Name servers to use.\r\n Format: !SN DNS1 DNS2 DNS3\r\n Example: !SN 192168001200 192168001201 000000000000\r\n Example: !SN 172.16.0.83 172.16.0.84 0.0.0.0\r\n",
"!SP": b"!SP: Set IP Ports\r\n Format: !SP [KAP CHA CHB CFG]\r\n Example: !SP 333\r\n KAP packages are hereafter sent to port 333 on the server.\r\n Example: !SP 50 1025 1026 50100\r\n KAP packages are sent to port 50.\r\n Direct connections to UART channel A is on port 1025, B on 1026.\r\n Config connection on port 50100.\r\n (default values)\r\n Example: !SP 0 0 80\r\n UART channel B is on port 80 (KAP and ChA is ingored - unchanged).\r\n",
"!SS": b"!SS: Set Serial Settings.\r\n Used for setting the serial interface for channel A or B.\r\n Format: !SS [Channel Baud,DataBits,Parity,StopBits[,Ctrl]]\r\n Example: !SS A Auto\r\n Example: !SS A 9600,8,N,2\r\n Example: !SS B 115200,8,E,1\r\n Example: !SS B 115200,8,E,1,I\r\n Example: !SS B 115200,8,E,1,L\r\n The ctrl flag can be 'C'(check), 'I' (ignore framing errors) or 'L' (Link, ChB only).\r\n Chanel A supports auto mode (Also enables load profile logger in old E-Meters).\r\n",
"!RC": b"!RC: Request connect\r\n Makes the module crate a ChA or ChB socket to a remote server.\r\n Format: !RC Action [IP [Port]]\r\n Example: !RC A 195.215.168.45 200\r\n Example: !RC B 195.215.168.45 201\r\n Example: !RC D\r\n Disconnects both A and B if open.\r\n Example: !RC\r\n Status only.\r\n",
},
"!AC": b"\r\nOK\r\n\r\nDISABLED \r\n [1] 0.0.0.0\r\n [2] 192.168.1.211\r\n [3] 0.0.0.0\r\n [4] 0.0.0.0\r\n [5] 0.0.0.0\r\n",
"!AS": b"\r\nOK\r\n\r\nDISABLED \r\n [1] 0.0.0.0\r\n [2] 0.0.0.0\r\n [3] 0.0.0.0\r\n [4] 0.0.0.0\r\n [5] 0.0.0.0\r\n",
"!GC": b"Device Name : \r\nUse DHCP : YES\r\nIP addr. : 192.168.1.210\r\nIP Subnet : 255.255.255.0\r\nGateway addr. : 192.168.1.1\r\nService server addr.: 202.202.202.1\r\nService server hostname.: kapserver.evilpowerprovider.org\r\nDNS Server No. 1: 0.0.0.0\r\nDNS Server No. 2: 0.0.0.0\r\nDNS Server No. 3: 0.0.0.0\r\nMAC addr. (HEX) : 00:13:EA:00:00:00\r\nChannel A device meterno.: A1 06 A1 02 B7 34 12 00 00 03\r\nChannel B device meterno.: A1 06 A1 02 B7 34 12 00 00 03\r\nKeep alive timer (flash setting): ENABLED 10\r\nKeep alive timer (current setting): ENABLED 10\r\nHas the module received acknowledge from the server: NO\r\nKAP Server port: 50\r\nKAP Local port: 800\r\nSoftware watchdog: ENABLED 3600\r\n",
"!GV": b"\r\nSoftware Version: 5.5 (E5)\r\n",
"!SA": b"\r\nOK\r\nService server addr.: 192.168.1.2:61000\r\n",
"!SB": b"\r\nOK\r\nService server addr.: 202.202.202.1:50 (from DNS)\r\nand fallback KAP to: 192.168.1.2:61000\r\n",
"!SC": b"!SC: Set Config (*1).\r\n Configures the module.\r\n Format: !SC DHCP IP SUB GW DNS1 DNS2 DNS3 SRV_IP DEVICENAME SRV_DNS\r\n DHCP 1 for DHCP, 0 for static IP.\r\n IP.. Static IP settings.\r\n SRV_IP IP of remote server (Only if SRV_DNS is 0).\r\n DEVICENAME User label for for individual naming.\r\n SRV_DNS DNS name of remote server (0 to disable DNS lookup)\r\n",
"!SD": b"\r\nOK",
"!SH": b"\r\nOK\r\nService server hostname.: hosting.kamstrup_meter.dk\r\n",
"!SI": b"\r\nUse DHCP : NO\r\n\r\nIP addr. : 192.168.1.200\r\n",
"!SK": b"\r\nOK\r\nSoftware watchdog: ENABLED 3600\r\nKAP Missing warning: ENABLED 60\r\nKeep alive timer (flash setting): ENABLED 10\r\n",
"!SN": b"\r\nOK",
"!SP": b"\r\nOK\r\nKAP on server: 50\r\nChA on module: 1025\r\nChB on module: 1026\r\nCfg on module: 50100\r\n",
"!SS": b"\r\nOK\r\n",
"!RC": b"\r\nOK\r\nStatus: 0100\r\n",
}
| 10,653 | Python | .py | 38 | 274.157895 | 996 | 0.577068 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,451 | snmp_client.py | mushorg_conpot/conpot/tests/helpers/snmp_client.py | # Command Responder (GET/GETNEXT)
# Based on examples from http://pysnmp.sourceforge.net/
from pysnmp.entity import engine, config
from pysnmp.carrier.asynsock.dgram import udp
from pysnmp.entity.rfc3413 import cmdgen
class SNMPClient(object):
def __init__(self, host, port):
# Create SNMP engine instance
self.snmpEngine = engine.SnmpEngine()
# user: usr-sha-aes, auth: SHA, priv AES
config.addV3User(
self.snmpEngine,
"usr-sha-aes128",
config.usmHMACSHAAuthProtocol,
"authkey1",
config.usmAesCfb128Protocol,
"privkey1",
)
config.addTargetParams(
self.snmpEngine, "my-creds", "usr-sha-aes128", "authPriv"
)
# Setup transport endpoint and bind it with security settings yielding
# a target name (choose one entry depending of the transport needed).
# UDP/IPv4
config.addSocketTransport(
self.snmpEngine, udp.domainName, udp.UdpSocketTransport().openClientMode()
)
config.addTargetAddr(
self.snmpEngine, "my-router", udp.domainName, (host, port), "my-creds"
)
# Error/response receiver
def cbFun(
self,
sendRequestHandle,
errorIndication,
errorStatus,
errorIndex,
varBindTable,
cbCtx,
):
if errorIndication:
print(errorIndication)
elif errorStatus:
print(
(
"%s at %s"
% (
errorStatus.prettyPrint(),
errorIndex and varBindTable[-1][int(errorIndex) - 1] or "?",
)
)
)
else:
for oid, val in varBindTable:
print(("%s = %s" % (oid.prettyPrint(), val.prettyPrint())))
def get_command(self, OID=((1, 3, 6, 1, 2, 1, 1, 1, 0), None), callback=None):
if not callback:
callback = self.cbFun
# Prepare and send a request message
cmdgen.GetCommandGenerator().sendReq(
self.snmpEngine,
"my-router",
(OID,),
callback,
)
self.snmpEngine.transportDispatcher.runDispatcher()
# Run I/O dispatcher which would send pending queries and process responses
self.snmpEngine.transportDispatcher.runDispatcher()
def set_command(self, OID, callback=None):
if not callback:
callback = self.cbFun
cmdgen.SetCommandGenerator().sendReq(
self.snmpEngine,
"my-router",
(OID,),
callback,
)
self.snmpEngine.transportDispatcher.runDispatcher()
def walk_command(self, OID, callback=None):
if not callback:
callback = self.cbFun
cmdgen.NextCommandGenerator().sendReq(
self.snmpEngine,
"my-router",
(OID,),
callback,
)
if __name__ == "__main__":
snmp_client = SNMPClient("127.0.0.1", 161)
OID = ((1, 3, 6, 1, 2, 1, 1, 1, 0), None)
snmp_client.get_command(OID)
| 3,172 | Python | .py | 91 | 24.494505 | 86 | 0.566634 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,452 | s7comm_client.py | mushorg_conpot/conpot/tests/helpers/s7comm_client.py | # Copyright (C) 2013 Daniel creo Haslinger <creo-conpot@blackmesa.at>
# Derived from plcscan by Dmitry Efanov (Positive Research)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from struct import pack, unpack
from optparse import OptionGroup
import struct
import socket
import string
from conpot.utils.networking import str_to_bytes
__FILTER = "".join(
[" "]
+ [
" " if chr(x) not in string.printable or chr(x) in string.whitespace else chr(x)
for x in range(1, 256)
]
)
_bytes_to_str = lambda items: (
value.decode("ascii") if isinstance(value, bytes) else value for value in items
)
def StripUnprintable(msg):
return msg.decode("ascii").translate(__FILTER)
class TPKTPacket:
"""TPKT packet. RFC 1006"""
def __init__(self, data=""):
self.data = data
def pack(self):
return pack(
"!BBH",
3, # version
0, # reserved
len(bytes(self.data)) + 4, # packet size
) + str_to_bytes(bytes(self.data))
def unpack(self, packet):
try:
header = unpack("!BBH", packet[:4])
except struct.error:
raise S7ProtocolError("Unknown TPKT format")
self.data = packet[4 : 4 + header[2]]
return self
class COTPConnectionPacket:
"""COTP Connection Request or Connection Confirm packet (ISO on TCP). RFC 1006"""
def __init__(self, dst_ref=0, src_ref=0, dst_tsap=0, src_tsap=0, tpdu_size=0):
self.dst_ref = dst_ref
self.src_ref = src_ref
self.dst_tsap = dst_tsap
self.src_tsap = src_tsap
self.tpdu_size = tpdu_size
def pack(self):
"""make Connection Request Packet"""
return pack(
"!BBHHBBBHBBHBBB",
17, # size
0xE0, # pdu type: CR
self.dst_ref,
self.src_ref,
0, # flag
0xC1,
2,
self.src_tsap,
0xC2,
2,
self.dst_tsap,
0xC0,
1,
self.tpdu_size,
)
def unpack(self, packet):
"""parse Connection Confirm Packet (header only)"""
try:
size, pdu_type, self.dst_ref, self.src_ref, _ = unpack("!BBHHB", packet[:7])
except struct.error:
raise S7ProtocolError("Wrong CC packet format")
if len(packet) != size + 1:
raise S7ProtocolError("Wrong CC packet size")
if pdu_type != 0xD0:
raise S7ProtocolError("Not a CC packet")
return self
def __bytes__(self):
return self.pack()
class COTPDataPacket:
"""COTP Data packet (ISO on TCP). RFC 1006"""
def __init__(self, data=""):
self.data = data
def pack(self):
return pack("!BBB", 2, 0xF0, 0x80) + str_to_bytes( # header len # data packet
bytes(self.data)
)
def unpack(self, packet):
self.data = packet[packet[0] + 1 :]
return self
def __bytes__(self):
return self.pack()
class S7Packet:
"""S7 packet"""
def __init__(self, _type=1, req_id=0, parameters="", data=""):
self.type = _type
self.req_id = req_id
self.parameters = parameters
self.data = data
self.error = 0
def pack(self):
if self.type not in [1, 7]:
raise S7ProtocolError("Unknown pdu type")
return (
pack(
"!BBHHHH",
0x32, # protocol s7 magic
self.type, # pdu-type
0, # reserved
self.req_id, # request id
len(self.parameters), # parameters length
len(self.data),
)
+ str_to_bytes(self.parameters) # data length
+ str_to_bytes(self.data)
)
def unpack(self, packet):
try:
if packet[1] in [3, 2]: # pdu-type = response
header_size = 12
(
_,
self.type,
_,
self.req_id,
parameters_length,
data_length,
self.error,
) = unpack("!BBHHHHH", packet[:header_size])
if self.error:
raise S7Error(self.error)
elif packet[1] in [1, 7]:
header_size = 10
(
_,
self.type,
_,
self.req_id,
parameters_length,
data_length,
) = unpack("!BBHHHH", packet[:header_size])
else:
raise S7ProtocolError("Unknown pdu type (%d)" % packet[1])
except struct.error:
raise S7ProtocolError("Wrong S7 packet format")
self.parameters = packet[header_size : header_size + parameters_length]
self.data = packet[
header_size
+ parameters_length : header_size
+ parameters_length
+ data_length
]
return self
def __bytes__(self):
return self.pack()
class S7ProtocolError(Exception):
def __init__(self, message, packet=""):
self.message = message
self.packet = packet
def __str__(self):
return "[ERROR][S7Protocol] %s" % self.message
class S7Error(Exception):
_errors = {
# s7 data errors
0x05: "Address Error",
0x0A: "Item not available",
# s7 header errors
0x8104: "Context not supported",
0x8500: "Wrong PDU size",
}
def __init__(self, code):
self.code = code
def __str__(self):
if self.code in S7Error._errors:
message = S7Error._errors[self.code]
else:
message = "Unknown error"
return "[ERROR][S7][0x%x] %s" % (self.code, message)
def Split(ar, size):
"""split sequence into blocks of given size"""
return [ar[i : i + size] for i in range(0, len(ar), size)]
class s7:
def __init__(self, ip, port, src_tsap=0x200, dst_tsap=0x201, timeout=8):
self.ip = ip
self.port = port
self.req_id = 0
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.dst_ref = 0
self.src_ref = 0x04
self.dst_tsap = dst_tsap
self.src_tsap = src_tsap
self.timeout = timeout
def Connect(self):
"""Establish ISO on TCP connection and negotiate PDU"""
# sleep(1)
# self.src_ref = randint(1, 20)
self.src_ref = 10
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.settimeout(self.timeout)
self.s.connect((self.ip, self.port))
self.s.send(
TPKTPacket(
COTPConnectionPacket(
self.dst_ref, self.src_ref, self.dst_tsap, self.src_tsap, 0x0A
)
).pack()
)
reply = self.s.recv(1024)
_ = COTPConnectionPacket().unpack(TPKTPacket().unpack(reply).data)
self.NegotiatePDU()
def Request(self, _type, parameters="", data=""):
"""Send s7 request and receive response"""
packet = TPKTPacket(
COTPDataPacket(S7Packet(_type, self.req_id, parameters, data))
).pack()
self.s.send(packet)
reply = self.s.recv(1024)
response = S7Packet().unpack(
COTPDataPacket().unpack(TPKTPacket().unpack(reply).data).data
)
if self.req_id != response.req_id:
raise S7ProtocolError("Sequence ID not correct")
return response
def NegotiatePDU(self, pdu=480):
"""Send negotiate pdu request and receive response. Reply no matter"""
response = self.Request(
0x01,
pack(
"!BBHHH",
0xF0, # function NegotiatePDU
0x00, # unknown
0x01, # max number of parallel jobs
0x01, # max number of parallel jobs
pdu,
),
) # pdu length
_, _, _, _, pdu = unpack("!BBHHH", response.parameters)
return pdu
def Function(self, _type, group, function, data=""):
parameters = pack(
"!LBBBB",
0x00011200 + 0x04, # parameter head (magic) # parameter length
0x11, # unknown
_type * 0x10 + group, # type, function group
function, # function
0x00,
) # sequence
data = pack("!BBH", 0xFF, 0x09, len(data)) + data
response = self.Request(0x07, parameters, data)
code, _, _ = unpack("!BBH", response.data[:4])
if code != 0xFF:
raise S7Error(code)
return response.data[4:]
def plc_stop_function(self):
pdu_type = 1
request_id = 256
stop_func_parameter = struct.pack(
"!B5x10p", 0x29, str_to_bytes("P_PROGRAM") # function code # Function Name
)
s7packet = S7Packet(pdu_type, request_id, stop_func_parameter).pack()
cotp_packet = COTPDataPacket(s7packet).pack()
tpkt_packet = TPKTPacket(cotp_packet).pack()
self.s.send(tpkt_packet)
reply = self.s.recv(1024)
if reply:
return (
S7Packet()
.unpack(COTPDataPacket().unpack(TPKTPacket().unpack(reply).data).data)
.data
)
else:
return None
def ReadSZL(self, szl_id):
szl_data = self.Function(
0x04, # request
0x04, # szl-functions
0x01, # read szl
pack("!HH", szl_id, 1), # szl id
) # szl index
szl_id, _, element_size, _ = unpack("!HHHH", szl_data[:8])
return Split(szl_data[8:], element_size)
def BruteTsap(ip, port, src_tsaps=(0x100, 0x200), dst_tsaps=(0x102, 0x200, 0x201)):
for src_tsap in src_tsaps:
for dst_tsap in dst_tsaps:
try:
con = s7(ip, port)
con.src_tsap = src_tsap
con.dst_tsap = dst_tsap
con.Connect()
return src_tsap, dst_tsap
except S7ProtocolError:
pass
return None
def GetIdentity(ip, port, src_tsap, dst_tsap):
res = []
szl_dict = {
0x11: {
"title": "Module Identification",
"indexes": {1: "Module", 6: "Basic Hardware", 7: "Basic Firmware"},
"packer": {
(1, 6): lambda packet: "{0:s} v.{2:d}.{3:d}".format(
*_bytes_to_str(unpack("!20sHBBH", packet))
),
(7,): lambda packet: "{0:s} v.{3:d}.{4:d}.{5:d}".format(
*_bytes_to_str(unpack("!20sHBBBB", packet))
),
},
},
0x1C: {
"title": "Component Identification",
"indexes": {
1: "Name of the PLC",
2: "Name of the module",
3: "Plant identification",
4: "Copyright",
5: "Serial number of module",
6: "Reserved for operating system",
7: "Module type name",
8: "Serial number of memory card",
9: "Manufacturer and profile of a CPU module",
10: "OEM ID of a module",
11: "Location designation of a module",
},
"packer": {
(1, 2, 5): lambda packet: "%s" % packet[:24].decode("ascii"),
(3, 7, 8): lambda packet: "%s" % packet[:32].decode("ascii"),
(4,): lambda packet: "%s" % packet[:26].decode("ascii"),
},
},
}
con = s7(ip, port, src_tsap, dst_tsap)
con.Connect()
for szl_id in list(szl_dict.keys()):
try:
entities = con.ReadSZL(szl_id)
except S7Error:
continue
packers = szl_dict[szl_id]["packer"]
for item in entities:
if len(item) > 2:
(n,) = unpack("!H", item[:2])
item = item[2:]
try:
packers_keys = [i for i in list(packers.keys()) if n in i]
formated_item = packers[packers_keys[0]](item).strip("\x00")
except (struct.error, IndexError):
formated_item = StripUnprintable(item).strip("\x00")
res.append("%s;%s;%s" % (szl_id, n, formated_item))
return res
def Scan(ip, port):
res = ()
try:
res = BruteTsap(ip, port)
except socket.error as e:
print("%s:%d %s" % (ip, port, e))
if not res:
print(" MEH!")
return False
print("%s:%d S7comm (src_tsap=0x%x, dst_tsap=0x%x)" % (ip, port, res[0], res[1]))
# sometimes unexpected exceptions occur, so try to get identity several time
identities = []
for attempt in [0, 1]:
try:
identities = GetIdentity(ip, port, res[0], res[1])
break
except (S7ProtocolError, socket.error) as e:
print("Attempt {0}: {1}".format(attempt, e))
return identities
def AddOptions(parser):
group = OptionGroup(parser, "S7 scanner options")
group.add_option(
"--src-tsap",
help="Try this src-tsap (list) (default: 0x100,0x200)",
type="string",
metavar="LIST",
)
group.add_option(
"--dst-tsap",
help="Try this dst-tsap (list) (default: 0x102,0x200,0x201)",
type="string",
metavar="LIST",
)
parser.add_option_group(group)
| 14,280 | Python | .py | 397 | 25.685139 | 88 | 0.534082 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,453 | __init__.py | mushorg_conpot/conpot/protocols/__init__.py | # Copyright (C) 2020 srenfo
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from .IEC104.IEC104_server import IEC104Server
from .bacnet.bacnet_server import BacnetServer
from .enip.enip_server import EnipServer
from .ftp.ftp_server import FTPServer
from .guardian_ast.guardian_ast_server import GuardianASTServer
from .http.web_server import HTTPServer
from .ipmi.ipmi_server import IpmiServer
from .kamstrup_management.kamstrup_management_server import KamstrupManagementServer
from .kamstrup_meter.kamstrup_server import KamstrupServer
from .modbus.modbus_server import ModbusServer
from .s7comm.s7_server import S7Server
from .snmp.snmp_server import SNMPServer
from .tftp.tftp_server import TftpServer
# Defines protocol directory names inside template directories
name_mapping = {
"bacnet": BacnetServer,
"enip": EnipServer,
"ftp": FTPServer,
"guardian_ast": GuardianASTServer,
"http": HTTPServer,
"IEC104": IEC104Server,
"ipmi": IpmiServer,
"kamstrup_management": KamstrupManagementServer,
"kamstrup_meter": KamstrupServer,
"modbus": ModbusServer,
"s7comm": S7Server,
"snmp": SNMPServer,
"tftp": TftpServer,
}
| 1,833 | Python | .py | 45 | 38.533333 | 84 | 0.788914 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,454 | cotp.py | mushorg_conpot/conpot/protocols/s7comm/cotp.py | # This implementation of the S7 protocol is highly inspired
# by the amazing plcscan work by the ScadaStrangeLove group.
# https://code.google.com/p/plcscan/source/browse/trunk/s7.py
from struct import pack, unpack
import struct
from conpot.protocols.s7comm.exceptions import ParseException
from conpot.utils.networking import str_to_bytes
class COTP(object):
def __init__(self, tpdu_type=0, opt_field=0, payload="", trailer=""):
self.tpdu_type = tpdu_type
self.opt_field = opt_field
self.payload = payload
self.trailer = trailer
if self.tpdu_type == 240:
self.packet_length = 2
else:
self.packet_length = 1 + len(self.payload)
# COTP BASE PACKET FORMAT:
# -------------------------------------
# 1 byte LENGTH (=n + 1)
# 1 byte TPDU TYPE
# 1 byte OPT FIELD (optional!), bitmask!
# n bytes TPDU PAYLOAD
# x bytes TRAILER (optional!), most probably containing S7.
def pack(self):
if self.tpdu_type == 0xF0:
return (
pack("!BBB", self.packet_length, self.tpdu_type, self.opt_field)
+ str_to_bytes(self.payload)
+ str_to_bytes(self.trailer)
)
else:
return (
pack("!BB", self.packet_length, self.tpdu_type)
+ str_to_bytes(self.payload)
+ str_to_bytes(self.trailer)
)
def parse(self, packet):
try:
header = unpack("!BBB", packet[:3])
except struct.error:
raise ParseException("s7comm", "malformed packet header structure")
self.packet_length = header[0]
self.tpdu_type = int(header[1])
self.trailer = packet[1 + self.packet_length :]
if self.tpdu_type == 0xF0:
# the DT DATA TPDU features another header byte that shifts our structure
self.opt_field = header[2]
self.payload = packet[3 : 1 + self.packet_length]
else:
self.payload = packet[2 : 1 + self.packet_length]
return self
# COTP Connection Request or Connection Confirm packet (ISO on TCP). RFC 1006
class COTPConnectionPacket:
def __init__(
self, dst_ref=0, src_ref=0, opt_field=0, src_tsap=0, dst_tsap=0, tpdu_size=0
):
self.dst_ref = dst_ref
self.src_ref = src_ref
self.opt_field = opt_field
self.src_tsap = src_tsap
self.dst_tsap = dst_tsap
self.tpdu_size = tpdu_size
# COTP CR PACKET FORMAT:
# -------------------------------------
# 2 bytes DST REFERENCE
# 2 bytes SRC REFERENCE
# 1 byte OPTION FIELD (bitmask!)
# ---------------------------------------
# n bytes 1 byte PARAM CODE
# 1 byte PARAM LENGTH (n)
# n bytes PARAM DATA
# ---------------------------------------
# "n" Block repeats until end of packet
def dissect(self, packet):
# dissect fixed header
try:
fixed_header = unpack("!HHB", packet[:5])
except struct.error:
raise ParseException("s7comm", "malformed fixed header structure")
self.dst_ref = fixed_header[0]
self.src_ref = fixed_header[1]
self.opt_field = fixed_header[2]
# dissect variable header
chunk = packet[5:]
while len(chunk) > 0:
chunk_param_header = unpack("!BB", chunk[:2])
chunk_param_code = int(chunk_param_header[0])
chunk_param_length = chunk_param_header[1]
if chunk_param_length == 1:
param_unpack_structure = "!B"
elif chunk_param_length == 2:
param_unpack_structure = "!H"
else:
raise ParseException("s7comm", "malformed variable header structure")
chunk_param_data = unpack(
param_unpack_structure, chunk[2 : 2 + chunk_param_length]
)
if chunk_param_code == 0xC1:
self.src_tsap = chunk_param_data[0]
elif chunk_param_code == 0xC2:
self.dst_tsap = chunk_param_data[0]
elif chunk_param_code == 0xC0:
self.tpdu_size = chunk_param_data[0]
else:
raise ParseException("s7comm", "unknown parameter code")
# remove this part of the chunk
chunk = chunk[2 + chunk_param_length :]
return self
class COTP_ConnectionConfirm(COTPConnectionPacket):
def __init__(
self, dst_ref=0, src_ref=0, opt_field=0, src_tsap=0, dst_tsap=0, tpdu_size=0
):
self.dst_ref = dst_ref
self.src_ref = src_ref
self.opt_field = opt_field
self.src_tsap = src_tsap
self.dst_tsap = dst_tsap
self.tpdu_size = tpdu_size
super().__init__()
def assemble(self):
return pack(
"!HHBBBHBBH",
self.dst_ref,
self.src_ref,
self.opt_field,
0xC1, # param code: src-tsap
0x02, # param length: 2 bytes
self.src_tsap,
0xC2, # param code: dst-tsap
0x02, # param length: 2 bytes
self.dst_tsap,
)
class COTP_ConnectionRequest(COTPConnectionPacket):
def __init__(
self, dst_ref=0, src_ref=0, opt_field=0, src_tsap=0, dst_tsap=0, tpdu_size=0
):
self.dst_ref = dst_ref
self.src_ref = src_ref
self.opt_field = opt_field
self.src_tsap = src_tsap
self.dst_tsap = dst_tsap
self.tpdu_size = tpdu_size
super().__init__()
def assemble(self):
return pack(
"!HHBBBHBBHBBB",
self.dst_ref,
self.src_ref,
self.opt_field,
0xC1, # param code: src-tsap
0x02, # param length: 2 bytes
self.src_tsap,
0xC2, # param code: dst-tsap
0x02, # param length: 2 bytes
self.dst_tsap,
0xC0, # param code: tpdu-size
0x01, # param length: 1 byte
self.tpdu_size,
)
| 6,401 | Python | .py | 160 | 29.5875 | 85 | 0.525266 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,455 | s7.py | mushorg_conpot/conpot/protocols/s7comm/s7.py | # References: S7_300-400_full_reference_handbook_ENGLISH.pdf
# http://www.bj-ig.de/147.html
# https://code.google.com/p/plcscan/source/browse/trunk/s7.py
from struct import pack, unpack
import struct
import conpot.core as conpot_core
from conpot.protocols.s7comm.exceptions import AssembleException, ParseException
from conpot.utils.networking import str_to_bytes
import logging
logger = logging.getLogger(__name__)
# S7 packet
class S7(object):
ssl_lists = {}
def __init__(
self,
pdu_type=0,
reserved=0,
request_id=0,
result_info=0,
parameters="",
data="",
):
self.magic = 0x32
self.pdu_type = pdu_type
self.reserved = reserved
self.request_id = request_id
# sometimes "parameters" happen to be of type int, and not a byte string
self.param_length = (
len(parameters) if isinstance(parameters, bytes) else len(str(parameters))
)
self.data_length = len(data)
self.result_info = result_info
self.parameters = parameters
self.data = data
# param codes (http://www.bj-ig.de/147.html):
# maps request types to methods
self.param_mapping = {
0x00: ("diagnostics", self.request_diagnostics),
0x04: ("read", self.request_not_implemented),
0x05: ("write", self.request_not_implemented),
0x1A: ("request_download", self.request_not_implemented),
0x1B: ("download_block", self.request_not_implemented),
0x1C: ("end_download", self.request_not_implemented),
0x1D: ("start_upload", self.request_not_implemented),
0x1E: ("upload", self.request_not_implemented),
0x1F: ("end_upload", self.request_not_implemented),
0x28: ("insert_block", self.request_not_implemented),
0x29: ("plc_stop", self.plc_stop_signal),
}
# maps valid pdu codes to name
self.pdu_mapping = {
0x01: set("request_pdu"),
0x02: set("known_but_unindentified_pdu"),
0x03: set("response_pdu"),
0x07: set("system_status_list"),
}
self.data_bus = conpot_core.get_databus()
def __len__(self):
if self.pdu_type in (2, 3):
return 12 + int(self.param_length) + int(self.data_length)
else:
return 10 + int(self.param_length) + int(self.data_length)
def handle(self, current_client=None):
if self.param in self.param_mapping:
if self.param == 0x29:
return self.param_mapping[self.param][1](current_client)
# direct execution to the correct method based on the param
return self.param_mapping[self.param][1]()
def request_not_implemented(self):
raise ParseException("s7comm", "request not implemented in honeypot yet.")
def pack(self):
if self.pdu_type not in self.pdu_mapping:
raise AssembleException("s7comm", "invalid or unsupported pdu type")
elif self.pdu_type in (2, 3):
# type 2 and 3 feature an additional RESULT INFORMATION header
return (
pack(
"!BBHHHHH",
self.magic,
self.pdu_type,
self.reserved,
self.request_id,
self.param_length,
self.data_length,
self.result_info,
)
+ str_to_bytes(self.parameters)
+ str_to_bytes(self.data)
)
else:
return (
pack(
"!BBHHHH",
self.magic,
self.pdu_type,
self.reserved,
self.request_id,
self.param_length,
self.data_length,
)
+ str_to_bytes(self.parameters)
+ str_to_bytes(self.data)
)
def parse(self, packet):
# dissect fixed header
try:
fixed_header = unpack("!BBHHHH", packet[:10])
except struct.error:
raise ParseException("s7comm", "malformed fixed packet header structure")
self.magic = int(fixed_header[0])
if self.magic != 0x32:
raise ParseException(
"s7comm",
"bad magic number, expected 0x32 but got {0}.".format(self.magic),
)
self.pdu_type = fixed_header[1]
self.reserved = fixed_header[2]
self.request_id = fixed_header[3]
self.param_length = fixed_header[4]
self.data_length = fixed_header[5]
# dissect variable header
if self.pdu_type in (2, 3):
# type 2 and 3 feature an additional RESULT INFORMATION header
self.result_info = unpack("!H", packet[10:12])
header_offset = 2
else:
header_offset = 0
self.parameters = packet[
10 + header_offset : 10 + header_offset + self.param_length
]
self.data = packet[
10
+ header_offset
+ self.param_length : 10
+ header_offset
+ self.param_length
+ self.data_length
]
try:
self.param = unpack("!B", self.parameters[:1])[0]
except:
raise ParseException("s7comm", "invalid packet")
return self
# SSL/SZL System Status List/Systemzustandsliste
def plc_stop_signal(self, current_client):
# This function gets executed after plc stop signal is received the function stops the server for a while and then restarts it
logger.info("Stop signal recieved from {}".format(current_client))
return str_to_bytes("0x00"), str_to_bytes("0x29")
def request_diagnostics(self):
# semi-check
try:
unpack("!BBBBBBBB", self.parameters[:8])
except struct.error:
raise ParseException("s7comm", "malformed SSL/SZL parameter structure")
chunk = self.data
chunk_id = 0
while chunk:
try:
ssl_chunk_header = unpack("!BBH", chunk[:4])
except struct.error:
raise ParseException("s7comm", "malformed SSL/SZL data structure")
# dissect data blocks
# data_error_code = ssl_chunk_header[0]
# data_data_type = ssl_chunk_header[1]
data_next_bytes = ssl_chunk_header[2]
data_ssl_id = ""
data_ssl_index = ""
# data_ssl_unknown = ""
if data_next_bytes > 0:
data_ssl_id = unpack("!H", chunk[4:6])[0]
if data_next_bytes > 1:
data_ssl_index = unpack("!H", chunk[6:8])[0]
if data_next_bytes > 2:
# data_ssl_unknown = chunk[8 : 4 + data_next_bytes]
pass
# map request ssl to method
if hasattr(self, "request_ssl_{0}".format(data_ssl_id)):
m = getattr(self, "request_ssl_{0}".format(data_ssl_id))
_, params, data = m(data_ssl_index)
return params, data
chunk = chunk[4 + data_next_bytes :]
chunk_id += 1
return 0x00, 0x00
# W#16#xy11 - module identification
def request_ssl_17(self, data_ssl_index):
# just for convenience
current_ssl = S7.ssl_lists["W#16#xy11"]
if data_ssl_index == 1: # 0x0001 - component identification
ssl_index_description = "Component identification"
ssl_resp_data = pack(
"!HHHHH20sHHH",
17, # 1 WORD ( ID )
data_ssl_index, # 1 WORD ( Index )
28, # 1 WORD ( Length of payload after element count )
0x01, # 1 WORD ( 1 element follows )
data_ssl_index, # 1 WORD ( Data Index )
str_to_bytes(self.data_bus.get_value(current_ssl["W#16#0001"])),
# 10 WORDS ( MLFB of component: 20 bytes => 19 chars + 1 blank (0x20) )
0x0, # 1 WORD ( RESERVED )
0x0, # 1 WORD ( Output state of component )
0x0,
) # 1 WORD ( RESERVED )
ssl_resp_head = pack(
"!BBH",
0xFF, # 1 BYTE ( Data Error Code. 0xFF = OK )
0x09, # 1 BYTE ( Data Type. 0x09 = Char/String )
len(ssl_resp_data),
) # 1 WORD ( Length of following data )
elif data_ssl_index == 6: # 0x0006 - hardware identification
ssl_index_description = "Hardware identification"
ssl_resp_data = pack(
"!HHHHH20sHHH",
17, # 1 WORD ( ID )
data_ssl_index, # 1 WORD ( Index )
28, # 1 WORD ( Length of payload after element count )
0x01, # 1 WORD ( 1 element follows )
data_ssl_index, # 1 WORD ( Data Index )
str_to_bytes(self.data_bus.get_value(current_ssl["W#16#0006"])),
# 10 WORDS ( MLFB of component: 20 bytes => 19 chars + 1 blank (0x20) )
0x0, # 1 WORD ( RESERVED )
"V3", # 1 WORD ( 'V' and first digit of version number )
0x539,
) # 1 WORD ( remaining digits of version number )
ssl_resp_head = pack(
"!BBH",
0xFF, # 1 BYTE ( Data Error Code. 0xFF = OK )
0x09, # 1 BYTE ( Data Type. 0x09 = Char/String )
len(ssl_resp_data),
) # 1 WORD ( Length of following data )
elif data_ssl_index == 7: # 0x0007 - firmware identification
ssl_index_description = "Firmware identification"
ssl_resp_data = pack(
"!HHHHH20sHHH",
17, # 1 WORD ( ID )
data_ssl_index, # 1 WORD ( Index )
28, # 1 WORD ( Length of payload after element count )
0x01, # 1 WORD ( 1 element follows )
data_ssl_index, # 1 WORD ( Data Index )
str_to_bytes(str(0x0)), # 10 WORDS ( RESERVED )
0x0, # 1 WORD ( RESERVED )
"V3", # 1 WORD ( 'V' and first digit of version number )
0x53A,
) # 1 WORD ( remaining digits of version number )
ssl_resp_head = pack(
"!BBH",
0xFF, # 1 BYTE ( Data Error Code. 0xFF = OK )
0x09, # 1 BYTE ( Data Type. 0x09 = Char/String )
len(ssl_resp_data),
) # 1 WORD ( Length of following data )
else:
ssl_index_description = "UNKNOWN / UNDEFINED / RESERVED {0}".format(
hex(data_ssl_index)
)
ssl_resp_data = ""
ssl_resp_head = ""
ssl_resp_params = pack(
"!BBBBBBBB",
0x00, # SSL DIAG
0x01, # unknown
0x12, # unknown
0x08, # bytes following
0x12, # unknown, maybe 0x11 + 1
0x84, # function; response to 0x44
0x01, # subfunction; readszl
0x01,
) # sequence ( = sequence + 1 )
return ssl_index_description, ssl_resp_params, ssl_resp_head + ssl_resp_data
# W#16#011C
def request_ssl_28(self, data_ssl_index):
# just for convenience
current_ssl = S7.ssl_lists["W#16#xy1C"]
# initiate header for mass component block
ssl_resp_data = pack(
"!HHHH",
28, # 1 WORD ( ID )
data_ssl_index, # 1 WORD ( Index )
34, # 1 WORD ( Length of payload after element count )
0x08,
) # 1 WORD ( 2 elements follow )
# craft module data 0x0001 - automation system name
ssl_resp_data += pack(
"!H24s8s",
0x01, # 1 WORD ( Data Index )
str_to_bytes(
self.data_bus.get_value(current_ssl["W#16#0001"])
), # TODO: PADDING
# 'System Name ', # 12 WORDS ( Name of automation system, padded with (0x00) )
str_to_bytes(""),
) # 4 WORDS ( RESERVED )
# craft module data 0x0002 - component name
ssl_resp_data += pack(
"!H24s8s",
0x02, # 1 WORD ( Data Index )
str_to_bytes(self.data_bus.get_value(current_ssl["W#16#0002"])),
# 12 WORDS ( Name of component, padded with (0x00) )
str_to_bytes(""),
) # 4 WORDS ( RESERVED )
# craft module data 0x0003 - plant identification
ssl_resp_data += pack(
"!H32s",
0x03, # 1 WORD ( Data Index )
str_to_bytes(self.data_bus.get_value(current_ssl["W#16#0003"])),
)
# 16 WORDS ( Name of plant, padded with (0x00) )
# craft module data 0x0004 - copyright
ssl_resp_data += pack(
"!H26s6s",
0x04, # 1 WORD ( Data Index )
str_to_bytes(
self.data_bus.get_value(current_ssl["W#16#0004"])
), # 13 WORDS ( CONSTANT )
str_to_bytes(""),
) # 3 WORDS ( RESERVED )
# craft module data 0x0005 - module serial number
ssl_resp_data += pack(
"!H24s8s",
0x05, # 1 WORD ( Data Index )
str_to_bytes(self.data_bus.get_value(current_ssl["W#16#0005"])),
# 12 WORDS ( Unique Serial Number )
str_to_bytes(""),
) # 4 WORDS ( RESERVED )
# craft module data 0x0007 - module type name
ssl_resp_data += pack(
"!H32s",
0x07, # 1 WORD ( Data Index )
str_to_bytes(self.data_bus.get_value(current_ssl["W#16#0007"])),
)
# 16 WORDS ( CPU type name, padded wit (0x00) )
# craft module data 0x000a - OEM ID of module
ssl_resp_data += pack(
"!H20s6s2s4s",
0x0A, # 1 WORD ( Data Index )
str_to_bytes(self.data_bus.get_value(current_ssl["W#16#000A"])),
# 10 WORDS ( OEM-Copyright Text, padded with (0x00) )
str_to_bytes(
""
), # 3 WORDS ( OEM Copyright Text padding to 26 characters )
str_to_bytes(""), # 1 WORD ( OEM ID provided by Siemens )
str_to_bytes(""),
) # 2 WORDS ( OEM user defined ID )
# craft module data 0x000b - location
ssl_resp_data += pack(
"!H32s",
0x0B, # 1 WORD ( Data Index )
str_to_bytes(self.data_bus.get_value(current_ssl["W#16#000B"])),
)
# 16 WORDS ( Location String, padded with (0x00) )
# craft leading response header
ssl_resp_head = pack(
"!BBH",
0xFF, # 1 BYTE ( Data Error Code. 0xFF = OK )
0x09, # 1 BYTE ( Data Type. 0x09 = Char/String )
len(ssl_resp_data),
) # 1 WORD ( Length of following data )
ssl_resp_packet = ssl_resp_head + ssl_resp_data
ssl_resp_params = pack(
"!BBBBBBBB",
0x00, # SSL DIAG
0x01, # unknown
0x12, # unknown
0x08, # bytes following
0x12, # unknown, maybe 0x11 + 1
0x84, # function; response to 0x44
0x01, # subfunction; readszl
0x01,
) # sequence ( = sequence + 1 )
return "", ssl_resp_params, ssl_resp_packet
| 15,783 | Python | .py | 367 | 30.708447 | 134 | 0.517906 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,456 | s7_server.py | mushorg_conpot/conpot/protocols/s7comm/s7_server.py | # Copyright (C) 2013 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import time
from gevent.server import StreamServer
import codecs
import socket
from struct import unpack
from conpot.protocols.s7comm.tpkt import TPKT
from conpot.protocols.s7comm.cotp import COTP as COTP_BASE_packet
from conpot.protocols.s7comm.cotp import COTP_ConnectionRequest
from conpot.protocols.s7comm.cotp import COTP_ConnectionConfirm
from conpot.protocols.s7comm.s7 import S7
import conpot.core as conpot_core
from conpot.core.protocol_wrapper import conpot_protocol
from lxml import etree
import logging
logger = logging.getLogger(__name__)
def cleanse_byte_string(packet):
new_packet = packet.decode("latin-1").replace("b", "")
return new_packet.encode("latin-1")
@conpot_protocol
class S7Server(object):
def __init__(self, template, template_directory, args):
self.timeout = 5
self.ssl_lists = {}
self.server = None
S7.ssl_lists = self.ssl_lists
self.start_time = None # Initialize later
dom = etree.parse(template)
system_status_lists = dom.xpath("//s7comm/system_status_lists/*")
for ssl in system_status_lists:
ssl_id = ssl.attrib["id"]
ssl_dict = {}
self.ssl_lists[ssl_id] = ssl_dict
items = ssl.xpath("./*")
for item in items:
item_id = item.attrib["id"]
databus_key = (
item.xpath("./text()")[0] if len(item.xpath("./text()")) else ""
)
ssl_dict[item_id] = databus_key
logger.debug("Conpot debug info: S7 SSL/SZL: {0}".format(self.ssl_lists))
logger.info("Conpot S7Comm initialized")
def handle(self, sock, address):
sock.settimeout(self.timeout)
session = conpot_core.get_session(
"s7comm",
address[0],
address[1],
sock.getsockname()[0],
sock.getsockname()[1],
)
self.start_time = time.time()
logger.info(
"New S7 connection from {0}:{1}. ({2})".format(
address[0], address[1], session.id
)
)
session.add_event({"type": "NEW_CONNECTION"})
try:
while True:
data = sock.recv(4, socket.MSG_WAITALL)
if len(data) == 0:
session.add_event({"type": "CONNECTION_LOST"})
break
_, _, length = unpack("!BBH", data[:4])
# check for length
if length <= 4:
logger.info("S7 error: Invalid length")
session.add_event({"error": "S7 error: Invalid length"})
break
data += sock.recv(length - 4, socket.MSG_WAITALL)
tpkt_packet = TPKT().parse(cleanse_byte_string(data))
cotp_base_packet = COTP_BASE_packet().parse(tpkt_packet.payload)
if cotp_base_packet.tpdu_type == 0xE0:
# connection request
cotp_cr_request = COTP_ConnectionRequest().dissect(
cotp_base_packet.payload
)
logger.info(
"Received COTP Connection Request: dst-ref:{0} src-ref:{1} dst-tsap:{2} src-tsap:{3} "
"tpdu-size:{4}. ({5})".format(
cotp_cr_request.dst_ref,
cotp_cr_request.src_ref,
cotp_cr_request.dst_tsap,
cotp_cr_request.src_tsap,
cotp_cr_request.tpdu_size,
session.id,
)
)
# confirm connection response
cotp_cc_response = COTP_ConnectionConfirm(
cotp_cr_request.src_ref,
cotp_cr_request.dst_ref,
0,
cotp_cr_request.src_tsap,
cotp_cr_request.dst_tsap,
0x0A,
).assemble()
# encapsulate and transmit
cotp_resp_base_packet = COTP_BASE_packet(
0xD0, 0, cotp_cc_response
).pack()
tpkt_resp_packet = TPKT(3, cotp_resp_base_packet).pack()
sock.send(tpkt_resp_packet)
session.add_event(
{
"request": codecs.encode(data, "hex"),
"response": codecs.encode(tpkt_resp_packet, "hex"),
}
)
data = sock.recv(1024)
# another round of parsing payloads
tpkt_packet = TPKT().parse(data)
cotp_base_packet = COTP_BASE_packet().parse(tpkt_packet.payload)
if cotp_base_packet.tpdu_type == 0xF0:
logger.info(
"Received known COTP TPDU: {0}. ({1})".format(
cotp_base_packet.tpdu_type, session.id
)
)
# will throw exception if the packet does not contain the S7 magic number (0x32)
S7_packet = S7().parse(cotp_base_packet.trailer)
logger.info(
"Received S7 packet: magic:%s pdu_type:%s reserved:%s req_id:%s param_len:%s "
"data_len:%s result_inf:%s session_id:%s",
S7_packet.magic,
S7_packet.pdu_type,
S7_packet.reserved,
S7_packet.request_id,
S7_packet.param_length,
S7_packet.data_length,
S7_packet.result_info,
session.id,
)
# request pdu
if S7_packet.pdu_type == 1:
# 0xf0 == Request for connect / pdu negotiate
if S7_packet.param == 0xF0:
# create S7 response packet
s7_resp_negotiate_packet = S7(
3, 0, S7_packet.request_id, 0, S7_packet.parameters
).pack()
# wrap s7 the packet in cotp
cotp_resp_negotiate_packet = COTP_BASE_packet(
0xF0, 0x80, s7_resp_negotiate_packet
).pack()
# wrap the cotp packet
tpkt_resp_packet = TPKT(
3, cotp_resp_negotiate_packet
).pack()
sock.send(tpkt_resp_packet)
session.add_event(
{
"request": codecs.encode(data, "hex"),
"response": codecs.encode(
tpkt_resp_packet, "hex"
),
}
)
# handshake done, give some more data.
data = sock.recv(1024)
while data:
tpkt_packet = TPKT().parse(data)
cotp_base_packet = COTP_BASE_packet().parse(
tpkt_packet.payload
)
if cotp_base_packet.tpdu_type == 0xF0:
S7_packet = S7().parse(cotp_base_packet.trailer)
logger.info(
"Received S7 packet: magic:%s pdu_type:%s reserved:%s "
"req_id:%s param_len:%s data_len:%s result_inf:%s session_id:%s",
S7_packet.magic,
S7_packet.pdu_type,
S7_packet.reserved,
S7_packet.request_id,
S7_packet.param_length,
S7_packet.data_length,
S7_packet.result_info,
session.id,
)
(
response_param,
response_data,
) = S7_packet.handle(address[0])
s7_resp_ssl_packet = S7(
7,
0,
S7_packet.request_id,
0,
response_param,
response_data,
).pack()
cotp_resp_ssl_packet = COTP_BASE_packet(
0xF0, 0x80, s7_resp_ssl_packet
).pack()
tpkt_resp_packet = TPKT(
3, cotp_resp_ssl_packet
).pack()
sock.send(tpkt_resp_packet)
session.add_event(
{
"request": codecs.encode(data, "hex"),
"response": codecs.encode(
tpkt_resp_packet, "hex"
),
}
)
data = sock.recv(1024)
else:
logger.info(
"Received unknown COTP TPDU after handshake: {0}".format(
cotp_base_packet.tpdu_type
)
)
session.add_event(
{
"error": "Received unknown COTP TPDU after handshake: {0}".format(
cotp_base_packet.tpdu_type
)
}
)
else:
logger.info(
"Received unknown COTP TPDU before handshake: {0}".format(
cotp_base_packet.tpdu_type
)
)
session.add_event(
{
"error": "Received unknown COTP TPDU before handshake: {0}".format(
cotp_base_packet.tpdu_type
)
}
)
except socket.timeout:
session.add_event({"type": "CONNECTION_LOST"})
logger.debug(
"Socket timeout, remote: {0}. ({1})".format(address[0], session.id)
)
except socket.error:
session.add_event({"type": "CONNECTION_LOST"})
logger.debug(
"Connection reset by peer, remote: {0}. ({1})".format(
address[0], session.id
)
)
except Exception as e:
logger.exception(
"Exception caught {0}, remote: {1}. ({2})".format(
e, address[0], session.id
)
)
def start(self, host, port):
self.host = host
self.port = port
connection = (host, port)
self.server = StreamServer(connection, self.handle)
logger.info("S7Comm server started on: {0}".format(connection))
self.server.serve_forever()
def stop(self):
self.server.stop()
| 13,426 | Python | .py | 276 | 25.963768 | 110 | 0.413097 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,457 | tpkt.py | mushorg_conpot/conpot/protocols/s7comm/tpkt.py | from struct import pack, unpack
import struct
from conpot.protocols.s7comm.exceptions import ParseException
from conpot.utils.networking import str_to_bytes
class TPKT:
# References: rfc2126 section-4.3, rfc1006# section-6
# Packet format:
# +--------+--------+----------------+-----------....---------------+
# |version |reserved| packet length | TPDU |
# +----------------------------------------------....---------------+
# <8 bits> <8 bits> < 16 bits > < variable length >
def __init__(self, version=3, payload=""):
self.payload = payload
self.version = version
self.reserved = 0
self.packet_length = len(payload) + 4
def pack(self):
return pack(
"!BBH", self.version, self.reserved, self.packet_length
) + str_to_bytes(self.payload)
def parse(self, packet):
# packet = cleanse_byte_string(packet)
try:
# try to extract the header by pattern to find malformed header data
header = unpack("!BBH", packet[:4])
except struct.error:
raise ParseException("s7comm", "malformed packet header structure")
# extract header data and payload
self.version = header[0]
self.reserved = header[1]
self.packet_length = header[2]
self.payload = packet[4 : 4 + header[2]]
return self
| 1,422 | Python | .py | 33 | 35.727273 | 80 | 0.553145 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,458 | exceptions.py | mushorg_conpot/conpot/protocols/s7comm/exceptions.py | class ParseException(Exception):
def __init__(self, protocol, reason, payload=""):
self.proto = protocol
self.reason = reason
self.payload = payload
def __str__(self):
return "DissectException: proto:{0} reason:{1}".format(self.proto, self.reason)
class AssembleException(Exception):
def __init__(self, protocol, reason, payload=""):
self.proto = protocol
self.reason = reason
self.payload = payload
def __str__(self):
return "AssembleException: proto:{0} reason:{1}".format(self.proto, self.reason)
| 584 | Python | .py | 14 | 34.714286 | 88 | 0.646643 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,459 | bacnet_server.py | mushorg_conpot/conpot/protocols/bacnet/bacnet_server.py | # Copyright (C) 2015 Peter Sooky <xsooky00@stud.fit.vubtr.cz>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Author: Peter Sooky <xsooky00@stud.fit.vubtr.cz>
# Brno University of Technology, Faculty of Information Technology
import socket
from lxml import etree
from gevent.server import DatagramServer
from bacpypes.local.device import LocalDeviceObject
from bacpypes.apdu import APDU
from bacpypes.pdu import PDU
from bacpypes.errors import DecodingError
import conpot.core as conpot_core
from conpot.protocols.bacnet.bacnet_app import BACnetApp
from conpot.core.protocol_wrapper import conpot_protocol
from conpot.utils.networking import get_interface_ip
import logging
logger = logging.getLogger(__name__)
@conpot_protocol
class BacnetServer(object):
def __init__(self, template, template_directory, args):
self.dom = etree.parse(template)
device_info_root = self.dom.xpath("//bacnet/device_info")[0]
name_key = device_info_root.xpath("./device_name/text()")[0]
id_key = device_info_root.xpath("./device_identifier/text()")[0]
vendor_name_key = device_info_root.xpath("./vendor_name/text()")[0]
vendor_identifier_key = device_info_root.xpath("./vendor_identifier/text()")[0]
apdu_length_key = device_info_root.xpath("./max_apdu_length_accepted/text()")[0]
segmentation_key = device_info_root.xpath("./segmentation_supported/text()")[0]
self.thisDevice = LocalDeviceObject(
objectName=name_key,
objectIdentifier=int(id_key),
maxApduLengthAccepted=int(apdu_length_key),
segmentationSupported=segmentation_key,
vendorName=vendor_name_key,
vendorIdentifier=int(vendor_identifier_key),
)
self.bacnet_app = None
self.server = None # Initialize later
logger.info("Conpot Bacnet initialized using the %s template.", template)
def handle(self, data, address):
session = conpot_core.get_session(
"bacnet",
address[0],
address[1],
get_interface_ip(address[0]),
self.server.server_port,
)
logger.info(
"New Bacnet connection from %s:%d. (%s)", address[0], address[1], session.id
)
session.add_event({"type": "NEW_CONNECTION"})
# I'm not sure if gevent DatagramServer handles issues where the
# received data is over the MTU -> fragmentation
if data:
pdu = PDU()
pdu.pduData = bytearray(data)
apdu = APDU()
try:
apdu.decode(pdu)
except DecodingError:
logger.warning("DecodingError - PDU: {}".format(pdu))
return
self.bacnet_app.indication(apdu, address, self.thisDevice)
# send an appropriate response from BACnet app to the attacker
self.bacnet_app.response(self.bacnet_app._response, address)
logger.info(
"Bacnet client disconnected %s:%d. (%s)", address[0], address[1], session.id
)
def start(self, host, port):
connection = (host, port)
self.server = DatagramServer(connection, self.handle)
# start to init the socket
self.server.start()
self.server.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.host = self.server.server_host
self.port = self.server.server_port
# create application instance
# not too beautiful, but the BACnetApp needs access to the socket's sendto method
# this could properly be refactored in a way such that sending operates on it's own
# (non-bound) socket.
self.bacnet_app = BACnetApp(self.thisDevice, self.server)
# get object_list and properties
self.bacnet_app.get_objects_and_properties(self.dom)
logger.info("Bacnet server started on: %s", (self.host, self.port))
self.server.serve_forever()
def stop(self):
self.server.stop()
| 4,685 | Python | .py | 101 | 38.990099 | 91 | 0.677525 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,460 | bacnet_app.py | mushorg_conpot/conpot/protocols/bacnet/bacnet_app.py | # Copyright (C) 2015 Peter Sooky <xsooky00@stud.fit.vubtr.cz>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Author: Peter Sooky <xsooky00@stud.fit.vubtr.cz>
# Brno University of Technology, Faculty of Information Technology
import logging
import re
import sys
from bacpypes.pdu import GlobalBroadcast
import bacpypes.object
from bacpypes.app import BIPSimpleApplication
from bacpypes.constructeddata import Any
from bacpypes.constructeddata import InvalidParameterDatatype
from bacpypes.apdu import (
APDU,
apdu_types,
confirmed_request_types,
unconfirmed_request_types,
ErrorPDU,
RejectPDU,
IAmRequest,
IHaveRequest,
ReadPropertyACK,
ConfirmedServiceChoice,
UnconfirmedServiceChoice,
)
from bacpypes.pdu import PDU
import ast
logger = logging.getLogger(__name__)
class BACnetApp(BIPSimpleApplication):
"""
BACnet device emulation class. BACnet properties are populated from the template file. Services are defined.
Conpot implements a smart sensor and hence
- DM-RP-B (execute ReadProperty)
- DM-DDB-B (execute Who-Is, initiate I-Am)
- DM-DOB-B (execute Who-Has, initiate I-Have)
services are supported.
"""
def __init__(self, device, datagram_server):
self._request = None
self._response = None
self._response_service = None
self.localDevice = device
self.objectName = {device.objectName: device}
self.objectIdentifier = {device.objectIdentifier: device}
self.datagram_server = datagram_server
self.deviceIdentifier = None
super(BIPSimpleApplication, self).__init__()
def get_objects_and_properties(self, dom):
"""
parse the bacnet template for objects and their properties
"""
self.deviceIdentifier = int(dom.xpath("//bacnet/device_info/*")[1].text)
device_property_list = dom.xpath("//bacnet/device_info/*")
for prop in device_property_list:
prop_key = prop.tag.lower().title()
prop_key = re.sub("['_','-']", "", prop_key)
prop_key = prop_key[0].lower() + prop_key[1:]
if (
prop_key not in self.localDevice.propertyList.value
and prop_key not in ["deviceIdentifier", "deviceName"]
):
self.add_property(prop_key, prop.text)
object_list = dom.xpath("//bacnet/object_list/object/@name")
for obj in object_list:
property_list = dom.xpath(
'//bacnet/object_list/object[@name="%s"]/properties/*' % obj
)
for prop in property_list:
if prop.tag == "object_type":
object_type = re.sub("-", " ", prop.text).lower().title()
object_type = re.sub(" ", "", object_type) + "Object"
try:
device_object = getattr(bacpypes.object, object_type)()
device_object.propertyList = list()
except NameError:
logger.critical("Non-existent BACnet object type")
sys.exit(3)
for prop in property_list:
prop_key = prop.tag.lower().title()
prop_key = re.sub("['_','-']", "", prop_key)
prop_key = prop_key[0].lower() + prop_key[1:]
if prop_key == "objectType":
prop_val = prop.text.lower().title()
prop_val = re.sub(" ", "", prop_val)
prop_val = prop_val[0].lower() + prop_val[1:]
prop_val = prop.text
try:
if prop_key == "objectIdentifier":
device_object.objectIdentifier = int(prop_val)
else:
setattr(device_object, prop_key, prop_val)
device_object.propertyList.append(prop_key)
except bacpypes.object.PropertyError:
logger.critical("Non-existent BACnet property type")
sys.exit(3)
self.add_object(device_object)
def add_object(self, obj):
object_name = obj.objectName
if not object_name:
raise RuntimeError("object name required")
object_identifier = obj.objectIdentifier
if not object_identifier:
raise RuntimeError("object identifier required")
if object_name in self.objectName:
raise RuntimeError("object already added with the same name")
if object_identifier in self.objectIdentifier:
raise RuntimeError("object already added with the same identifier")
# Keep dictionaries -- for name and identifiers
self.objectName[object_name] = obj
self.objectIdentifier[object_identifier] = obj
self.localDevice.objectList.append(object_identifier)
def add_property(self, prop_name, prop_value):
if not prop_name:
raise RuntimeError("property name required")
if not prop_value:
raise RuntimeError("property value required")
setattr(self.localDevice, prop_name, prop_value)
self.localDevice.propertyList.append(prop_name)
def iAm(self, *args):
self._response = None
return
def iHave(self, *args):
self._response = None
return
def whoIs(self, request, address, invoke_key, device):
# Limits are optional (but if used, must be paired)
execute = False
try:
if (request.deviceInstanceRangeLowLimit is not None) and (
request.deviceInstanceRangeHighLimit is not None
):
if (
request.deviceInstanceRangeLowLimit
> list(self.objectIdentifier.keys())[0][1]
> request.deviceInstanceRangeHighLimit
):
logger.info("Bacnet WhoHasRequest out of range")
else:
execute = True
else:
execute = True
except AttributeError:
execute = True
if execute:
self._response_service = "IAmRequest"
self._response = IAmRequest()
self._response.pduDestination = GlobalBroadcast()
self._response.iAmDeviceIdentifier = self.deviceIdentifier
# self._response.objectIdentifier = list(self.objectIdentifier.keys())[0][1]
self._response.maxAPDULengthAccepted = int(
getattr(self.localDevice, "maxApduLengthAccepted")
)
self._response.segmentationSupported = getattr(
self.localDevice, "segmentationSupported"
)
self._response.vendorID = int(getattr(self.localDevice, "vendorIdentifier"))
def whoHas(self, request, address, invoke_key, device):
execute = False
try:
if (request.deviceInstanceRangeLowLimit is not None) and (
request.deviceInstanceRangeHighLimit is not None
):
if (
request.deviceInstanceRangeLowLimit
> list(self.objectIdentifier.keys())[0][1]
> request.deviceInstanceRangeHighLimit
):
logger.info("Bacnet WhoHasRequest out of range")
else:
execute = True
else:
execute = True
except AttributeError:
execute = True
if execute:
for obj in device.objectList.value[2:]:
if (
int(request.object.objectIdentifier[1]) == obj[1]
and request.object.objectIdentifier[0] == obj[0]
):
objName = self.objectIdentifier[obj].objectName
self._response_service = "IHaveRequest"
self._response = IHaveRequest()
self._response.pduDestination = GlobalBroadcast()
# self._response.deviceIdentifier = list(self.objectIdentifier.keys())[0][1]
self._response.deviceIdentifier = self.deviceIdentifier
self._response.objectIdentifier = obj[1]
self._response.objectName = objName
break
else:
logger.info("Bacnet WhoHasRequest: no object found")
def readProperty(self, request, address, invoke_key, device):
# Read Property
# TODO: add support for PropertyArrayIndex handling;
for obj in device.objectList.value[2:]:
if (
int(request.objectIdentifier[1]) == obj[1]
and request.objectIdentifier[0] == obj[0]
):
objName = self.objectIdentifier[obj].objectName
for prop in self.objectIdentifier[obj].properties:
if request.propertyIdentifier == prop.identifier:
propName = prop.identifier
propValue = prop.ReadProperty(self.objectIdentifier[obj])
propType = prop.datatype()
self._response_service = "ComplexAckPDU"
self._response = ReadPropertyACK()
self._response.pduDestination = address
self._response.apduInvokeID = invoke_key
self._response.objectIdentifier = obj[1]
self._response.objectName = objName
self._response.propertyIdentifier = propName
# get the property type
for p in dir(sys.modules[propType.__module__]):
_obj = getattr(sys.modules[propType.__module__], p)
try:
if type(propType) == _obj:
break
except TypeError:
pass
value = ast.literal_eval(propValue)
self._response.propertyValue = Any(_obj(value))
# self._response.propertyValue.cast_in(objPropVal)
# self._response.debug_contents()
break
else:
logger.info(
"Bacnet ReadProperty: object has no property %s",
request.propertyIdentifier,
)
self._response = ErrorPDU()
self._response.pduDestination = address
self._response.apduInvokeID = invoke_key
self._response.apduService = 0x0C
# self._response.errorClass
# self._response.errorCode
def indication(self, apdu, address, device):
"""logging the received PDU type and Service request"""
request = None
apdu_type = apdu_types.get(apdu.apduType)
invoke_key = apdu.apduInvokeID
logger.info(
"Bacnet PDU received from %s:%d. (%s)",
address[0],
address[1],
apdu_type.__name__,
)
if apdu_type.pduType == 0x0:
# Confirmed request handling
apdu_service = confirmed_request_types.get(apdu.apduService)
logger.info(
"Bacnet indication from %s:%d. (%s)",
address[0],
address[1],
apdu_service.__name__,
)
try:
request = apdu_service()
request.decode(apdu)
except (AttributeError, RuntimeError, InvalidParameterDatatype) as e:
logger.warning("Bacnet indication: Invalid service. Error: %s" % e)
return
except bacpypes.errors.DecodingError:
pass
for key, value in list(ConfirmedServiceChoice.enumerations.items()):
if apdu_service.serviceChoice == value:
try:
getattr(self, key)(request, address, invoke_key, device)
break
except AttributeError:
logger.error("Not implemented Bacnet command")
self._response = None
return
else:
logger.info(
"Bacnet indication: Invalid confirmed service choice (%s)",
apdu_service.__name__,
)
self._response = None
return
# Unconfirmed request handling
elif apdu_type.pduType == 0x1:
apdu_service = unconfirmed_request_types.get(apdu.apduService)
logger.info(
"Bacnet indication from %s:%d. (%s)",
address[0],
address[1],
apdu_service.__name__,
)
try:
request = apdu_service()
request.decode(apdu)
except (AttributeError, RuntimeError):
logger.exception("Bacnet indication: Invalid service.")
self._response = None
return
except bacpypes.errors.DecodingError:
pass
for key, value in list(UnconfirmedServiceChoice.enumerations.items()):
if apdu_service.serviceChoice == value:
try:
getattr(self, key)(request, address, invoke_key, device)
break
except AttributeError:
logger.error("Not implemented Bacnet command")
self._response = None
return
else:
# Unrecognized services
logger.info(
"Bacnet indication: Invalid unconfirmed service choice (%s)",
apdu_service,
)
self._response_service = "ErrorPDU"
self._response = ErrorPDU()
self._response.pduDestination = address
return
# ignore the following
elif apdu_type.pduType == 0x2:
# simple ack pdu
self._response = None
return
elif apdu_type.pduType == 0x3:
# complex ack pdu
self._response = None
return
elif apdu_type.pduType == 0x4:
# segment ack
self._response = None
return
elif apdu_type.pduType == 0x5:
# error pdu
self._response = None
return
elif apdu_type.pduType == 0x6:
# reject pdu
self._response = None
return
elif apdu_type.pduType == 0x7:
# abort pdu
self._response = None
return
elif 0x8 <= apdu_type.pduType <= 0xF:
# reserved
self._response = None
return
else:
# non-BACnet PDU types
logger.info("Bacnet Unrecognized service")
self._response = None
return
# socket not actually socket, but DatagramServer with sendto method
def response(self, response_apdu, address):
if response_apdu is None:
return
apdu = APDU()
response_apdu.encode(apdu)
pdu = PDU()
apdu.encode(pdu)
if isinstance(response_apdu, RejectPDU) or isinstance(response_apdu, ErrorPDU):
self.datagram_server.sendto(pdu.pduData, address)
else:
apdu_type = apdu_types.get(response_apdu.apduType)
if pdu.pduDestination == "*:*":
# broadcast
# sendto operates under lock
self.datagram_server.sendto(pdu.pduData, ("", address[1]))
else:
# sendto operates under lock
self.datagram_server.sendto(pdu.pduData, address)
logger.info(
"Bacnet response sent to %s (%s:%s)",
response_apdu.pduDestination,
apdu_type.__name__,
self._response_service,
)
| 16,862 | Python | .py | 390 | 29.505128 | 112 | 0.553536 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,461 | enip_server.py | mushorg_conpot/conpot/protocols/enip/enip_server.py | # Copyright (C) 2017 Yuru Shao <shaoyuru@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import socket
import cpppo
import contextlib
import time
import sys
import traceback
from lxml import etree
from cpppo.server import network
from cpppo.server.enip import logix
from cpppo.server.enip import parser
from cpppo.server.enip import device
from conpot.core.protocol_wrapper import conpot_protocol
import conpot.core as conpot_core
logger = logging.getLogger(__name__)
class EnipConfig(object):
"""
Configurations parsed from template
"""
def __init__(self, template):
self.template = template
self.parse_template()
class Tag(object):
"""
Represents device tag setting parsed from template
"""
def __init__(self, name, type, size, value, addr=None):
self.name = name
self.type = str(type).upper()
self.size = size
self.value = value
self.addr = addr
def parse_template(self):
dom = etree.parse(self.template)
self.server_addr = dom.xpath("//enip/@host")[0]
self.server_port = int(dom.xpath("//enip/@port")[0])
self.vendor_id = int(dom.xpath("//enip/device_info/VendorId/text()")[0])
self.device_type = int(dom.xpath("//enip/device_info/DeviceType/text()")[0])
self.product_rev = int(
dom.xpath("//enip/device_info/ProductRevision/text()")[0]
)
self.product_code = int(dom.xpath("//enip/device_info/ProductCode/text()")[0])
self.product_name = dom.xpath("//enip/device_info/ProductName/text()")[0]
self.serial_number = dom.xpath("//enip/device_info/SerialNumber/text()")[0]
self.mode = dom.xpath("//enip/mode/text()")[0]
self.timeout = float(dom.xpath("//enip/timeout/text()")[0])
self.latency = float(dom.xpath("//enip/latency/text()")[0])
# parse device tags, these tags will be further processed by the ENIP server
self.dtags = []
for t in dom.xpath("//enip/tags/tag"):
name = t.xpath("@name")[0]
type = t.xpath("type/text()")[0]
value = t.xpath("value/text()")[0]
addr = t.xpath("addr/text()")[0]
size = 1
try:
size = int(t.xpath("size/text()")[0])
except:
raise AssertionError("Invalid tag size: %r" % size)
self.dtags.append(self.Tag(name, type, size, value, addr))
@conpot_protocol
class EnipServer(object):
"""
Ethernet/IP server
"""
def __init__(self, template, template_directory, args):
self.config = EnipConfig(template)
self.addr = self.config.server_addr
self.port = self.config.server_port
self.connections = cpppo.dotdict()
self.control = None
# all known tags
self.tags = cpppo.dotdict()
self.set_tags()
logger.debug("ENIP server serial number: " + self.config.serial_number)
logger.debug("ENIP server product name: " + self.config.product_name)
def stats_for(self, peer):
if peer is None:
return None, None
connkey = "%s_%d" % (peer[0].replace(".", "_"), peer[1])
stats = self.connections.get(connkey)
if stats is not None:
return stats, connkey
stats = cpppo.apidict(timeout=self.config.timeout)
self.connections[connkey] = stats
stats["requests"] = 0
stats["received"] = 0
stats["eof"] = False
stats["interface"] = peer[0]
stats["port"] = peer[1]
return stats, connkey
def handle(self, conn, address, enip_process=None, delay=None, **kwds):
"""
Handle an incoming connection
"""
host, port = address if address else ("UDP", "UDP")
name = "ENIP_%s" % port
session = conpot_core.get_session(
"enip", host, port, conn.getsockname()[0], conn.getsockname()[1]
)
logger.debug("ENIP server %s begins serving client %s", name, address)
session.add_event({"type": "NEW_CONNECTION"})
tcp = conn.family == socket.AF_INET and conn.type == socket.SOCK_STREAM
udp = conn.family == socket.AF_INET and conn.type == socket.SOCK_DGRAM
if tcp:
try:
conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except Exception as e:
logger.error(
"%s unable to set TCP_NODELAY for client %r: %s", name, address, e
)
try:
conn.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
except Exception as e:
logger.error(
"%s unable to set SO_KEEPALIVE for client %r: %s", name, address, e
)
self.handle_tcp(
conn,
address,
session,
name=name,
enip_process=enip_process,
delay=delay,
**kwds
)
elif udp:
self.handle_udp(
conn, name=name, enip_process=enip_process, session=session, **kwds
)
else:
raise NotImplementedError("Unknown socket protocol for EtherNet/IP CIP")
def handle_tcp(
self, conn, address, session, name, enip_process, delay=None, **kwds
):
"""
Handle a TCP client
"""
source = cpppo.rememberable()
with parser.enip_machine(name=name, context="enip") as machine:
try:
assert (
address
), "EtherNet/IP CIP server for TCP/IP must be provided a peer address"
stats, connkey = self.stats_for(address)
while not stats.eof:
data = cpppo.dotdict()
source.forget()
# If no/partial EtherNet/IP header received, parsing will fail with a NonTerminal
# Exception (dfa exits in non-terminal state). Build data.request.enip:
begun = cpppo.timer()
with contextlib.closing(
machine.run(path="request", source=source, data=data)
) as engine:
# PyPy compatibility; avoid deferred destruction of generators
for _, sta in engine:
if sta is not None:
continue
# No more transitions available. Wait for input. EOF (b'') will lead to
# termination. We will simulate non-blocking by looping on None (so we can
# check our options, in case they've been changed). If we still have input
# available to process right now in 'source', we'll just check (0 timeout);
# otherwise, use the specified server.control.latency.
msg = None
while msg is None and not stats.eof:
wait = (
kwds["server"]["control"]["latency"]
if source.peek() is None
else 0
)
brx = cpppo.timer()
msg = network.recv(conn, timeout=wait)
now = cpppo.timer()
(logger.info if msg else logger.debug)(
"Transaction receive after %7.3fs (%5s bytes in %7.3f/%7.3fs)",
now - begun,
len(msg) if msg is not None else "None",
now - brx,
wait,
)
# After each block of input (or None), check if the server is being
# signalled done/disabled; we need to shut down so signal eof. Assumes
# that (shared) server.control.{done,disable} dotdict be in kwds. We do
# *not* read using attributes here, to avoid reporting completion to
# external APIs (eg. web) awaiting reception of these signals.
if (
kwds["server"]["control"]["done"]
or kwds["server"]["control"]["disable"]
):
logger.info(
"%s done, due to server done/disable",
machine.name_centered(),
)
stats["eof"] = True
if msg is not None:
stats["received"] += len(msg)
stats["eof"] = stats["eof"] or not len(msg)
if logger.getEffectiveLevel() <= logging.INFO:
logger.info(
"%s recv: %5d: %s",
machine.name_centered(),
len(msg),
cpppo.reprlib.repr(msg),
)
source.chain(msg)
else:
# No input. If we have symbols available, no problem; continue.
# This can occur if the state machine cannot make a transition on
# the input symbol, indicating an unacceptable sentence for the
# grammar. If it cannot make progress, the machine will terminate
# in a non-terminal state, rejecting the sentence.
if source.peek() is not None:
break
# We're at a None (can't proceed), and no input is available. This
# is where we implement "Blocking"; just loop.
logger.info(
"Transaction parsed after %7.3fs", cpppo.timer() - begun
)
# Terminal state and EtherNet/IP header recognized, or clean EOF (no partial
# message); process and return response
if "request" in data:
stats["requests"] += 1
try:
# enip_process must be able to handle no request (empty data), indicating the
# clean termination of the session if closed from this end (not required if
# enip_process returned False, indicating the connection was terminated by
# request.)
delayseconds = 0 # response delay (if any)
if enip_process(address, data=data, **kwds):
# Produce an EtherNet/IP response carrying the encapsulated response data.
# If no encapsulated data, ensure we also return a non-zero EtherNet/IP
# status. A non-zero status indicates the end of the session.
assert (
"response.enip" in data
), "Expected EtherNet/IP response; none found"
if (
"input" not in data.response.enip
or not data.response.enip.input
):
logger.warning(
"Expected EtherNet/IP response encapsulated message; none found"
)
assert (
data.response.enip.status
), "If no/empty response payload, expected non-zero EtherNet/IP status"
rpy = parser.enip_encode(data.response.enip)
if logger.getEffectiveLevel() <= logging.INFO:
logger.info(
"%s send: %5d: %s %s",
machine.name_centered(),
len(rpy),
cpppo.reprlib.repr(rpy),
("delay: %r" % delay) if delay else "",
)
if delay:
# A delay (anything with a delay.value attribute) == #[.#] (converible
# to float) is ok; may be changed via web interface.
try:
delayseconds = float(
delay.value
if hasattr(delay, "value")
else delay
)
if delayseconds > 0:
time.sleep(delayseconds)
except Exception as exc:
logger.info(
"Unable to delay; invalid seconds: %r", delay
)
try:
conn.send(rpy)
except socket.error as exc:
logger.info("Session ended (client abandoned): %s", exc)
stats["eof"] = True
if data.response.enip.status:
logger.warning(
"Session ended (server EtherNet/IP status: 0x%02x == %d)",
data.response.enip.status,
data.response.enip.status,
)
stats["eof"] = True
else:
# Session terminated. No response, just drop connection.
if logger.getEffectiveLevel() <= logging.INFO:
logger.info(
"Session ended (client initiated): %s",
parser.enip_format(data),
)
stats["eof"] = True
logger.info(
"Transaction complete after %7.3fs (w/ %7.3fs delay)",
cpppo.timer() - begun,
delayseconds,
)
session.add_event({"type": "CONNECTION_CLOSED"})
except:
logger.error("Failed request: %s", parser.enip_format(data))
enip_process(address, data=cpppo.dotdict()) # Terminate.
raise
stats["processed"] = source.sent
except:
# Parsing failure.
stats["processed"] = source.sent
memory = bytes(bytearray(source.memory))
pos = len(source.memory)
future = bytes(bytearray(b for b in source))
where = "at %d total bytes:\n%s\n%s (byte %d)" % (
stats.processed,
repr(memory + future),
"-" * (len(repr(memory)) - 1) + "^",
pos,
)
logger.error(
"EtherNet/IP error %s\n\nFailed with exception:\n%s\n",
where,
"".join(traceback.format_exception(*sys.exc_info())),
)
raise
finally:
# Not strictly necessary to close (network.server_main will discard the socket,
# implicitly closing it), but we'll do it explicitly here in case the thread doesn't die
# for some other reason. Clean up the connections entry for this connection address.
self.connections.pop(connkey, None)
logger.info(
"%s done; processed %3d request%s over %5d byte%s/%5d received (%d connections remain)",
name,
stats.requests,
" " if stats.requests == 1 else "s",
stats.processed,
" " if stats.processed == 1 else "s",
stats.received,
len(self.connections),
)
sys.stdout.flush()
conn.close()
def handle_udp(self, conn, name, enip_process, session, **kwds):
"""
Process UDP packets from multiple clients
"""
with parser.enip_machine(name=name, context="enip") as machine:
while (
not kwds["server"]["control"]["done"]
and not kwds["server"]["control"]["disable"]
):
try:
source = cpppo.rememberable()
data = cpppo.dotdict()
# If no/partial EtherNet/IP header received, parsing will fail with a NonTerminal
# Exception (dfa exits in non-terminal state). Build data.request.enip:
begun = cpppo.timer() # waiting for next transaction
addr, stats = None, None
with contextlib.closing(
machine.run(path="request", source=source, data=data)
) as engine:
# PyPy compatibility; avoid deferred destruction of generators
for _, sta in engine:
if sta is not None:
# No more transitions available. Wait for input.
continue
assert not addr, "Incomplete UDP request from client %r" % (
addr
)
msg = None
while msg is None:
# For UDP, we'll allow no input only at the start of a new request parse
# (addr is None); anything else will be considered a failed request Back
# to the trough for more symbols, after having already received a packet
# from a peer? No go!
wait = (
kwds["server"]["control"]["latency"]
if source.peek() is None
else 0
)
brx = cpppo.timer()
msg, frm = network.recvfrom(conn, timeout=wait)
now = cpppo.timer()
if not msg:
if (
kwds["server"]["control"]["done"]
or kwds["server"]["control"]["disable"]
):
return
(logger.info if msg else logger.debug)(
"Transaction receive after %7.3fs (%5s bytes in %7.3f/%7.3fs): %r",
now - begun,
len(msg) if msg is not None else "None",
now - brx,
wait,
self.stats_for(frm)[0],
)
# If we're at a None (can't proceed), and we haven't yet received input,
# then this is where we implement "Blocking"; we just loop for input.
# We have received exactly one packet from an identified peer!
begun = now
addr = frm
stats, _ = self.stats_for(addr)
# For UDP, we don't ever receive incoming EOF, or set stats['eof'].
# However, we can respond to a manual eof (eg. from web interface) by
# ignoring the peer's packets.
assert stats and not stats.get(
"eof"
), "Ignoring UDP request from client %r: %r" % (addr, msg)
stats["received"] += len(msg)
logger.debug(
"%s recv: %5d: %s",
machine.name_centered(),
len(msg),
cpppo.reprlib.repr(msg),
)
source.chain(msg)
# Terminal state and EtherNet/IP header recognized; process and return response
assert stats
if "request" in data:
stats["requests"] += 1
# enip_process must be able to handle no request (empty data), indicating the
# clean termination of the session if closed from this end (not required if
# enip_process returned False, indicating the connection was terminated by
# request.)
if enip_process(addr, data=data, **kwds):
# Produce an EtherNet/IP response carrying the encapsulated response data.
# If no encapsulated data, ensure we also return a non-zero EtherNet/IP
# status. A non-zero status indicates the end of the session.
assert (
"response.enip" in data
), "Expected EtherNet/IP response; none found"
if (
"input" not in data.response.enip
or not data.response.enip.input
):
logger.warning(
"Expected EtherNet/IP response encapsulated message; none found"
)
assert (
data.response.enip.status
), "If no/empty response payload, expected non-zero EtherNet/IP status"
rpy = parser.enip_encode(data.response.enip)
logger.debug(
"%s send: %5d: %s",
machine.name_centered(),
len(rpy),
cpppo.reprlib.repr(rpy),
)
conn.sendto(rpy, addr)
logger.debug(
"Transaction complete after %7.3fs", cpppo.timer() - begun
)
session.add_event({"type": "CONNECTION_CLOSED"})
stats["processed"] = source.sent
except:
# Parsing failure. Suck out some remaining input to give us some context, but don't re-raise
if stats:
stats["processed"] = source.sent
memory = bytes(bytearray(source.memory))
pos = len(source.memory)
future = bytes(bytearray(b for b in source))
where = "at %d total bytes:\n%s\n%s (byte %d)" % (
stats.get("processed", 0) if stats else 0,
repr(memory + future),
"-" * (len(repr(memory)) - 1) + "^",
pos,
)
logger.error(
"Client %r EtherNet/IP error %s\n\nFailed with exception:\n%s\n",
addr,
where,
"".join(traceback.format_exception(*sys.exc_info())),
)
session.add_event({"type": "CONNECTION_FAILED"})
def set_tags(self):
typenames = {
"BOOL": (parser.BOOL, 0, lambda v: bool(v)),
"INT": (parser.INT, 0, lambda v: int(v)),
"DINT": (parser.DINT, 0, lambda v: int(v)),
"SINT": (parser.SINT, 0, lambda v: int(v)),
"REAL": (parser.REAL, 0.0, lambda v: float(v)),
"SSTRING": (parser.SSTRING, "", lambda v: str(v)),
"STRING": (parser.STRING, "", lambda v: str(v)),
}
for t in self.config.dtags:
tag_name = t.name
tag_type = t.type
tag_size = t.size
assert tag_type in typenames, "Invalid tag type; must be one of %r" % list(
typenames
)
tag_class, _, f = typenames[tag_type]
tag_value = f(t.value)
tag_address = t.addr
logger.debug("tag address: %s", tag_address)
path, attribute = None, None
if tag_address:
# Resolve the @cls/ins/att, and optionally [elm] or /elm
segments, _, cnt = device.parse_path_elements("@" + tag_address)
assert (
not cnt or cnt == 1
), "A Tag may be specified to indicate a single element: %s" % (
tag_address
)
path = {"segment": segments}
cls, ins, att = device.resolve(path, attribute=True)
assert ins > 0, "Cannot specify the Class' instance for a tag's address"
elm = device.resolve_element(path)
# Look thru defined tags for one assigned to same cls/ins/att (maybe different elm);
# must be same type/size.
for tn, te in dict.items(self.tags):
if not te["path"]:
continue # Ignore tags w/o pre-defined path...
if device.resolve(te["path"], attribute=True) == (cls, ins, att):
assert (
te.attribute.parser.__class__ is tag_class
and len(te.attribute) == tag_size
), "Incompatible Attribute types for tags %r and %r" % (
tn,
tag_name,
)
attribute = te.attribute
break
if not attribute:
# No Attribute found
attribute = device.Attribute(
tag_name,
tag_class,
default=(tag_value if tag_size == 1 else [tag_value] * tag_size),
)
# Ready to create the tag and its Attribute (and error code to return, if any). If tag_size
# is 1, it will be a scalar Attribute. Since the tag_name may contain '.', we don't want
# the normal dotdict.__setitem__ resolution to parse it; use plain dict.__setitem__.
logger.debug(
"Creating tag: %-14s%-10s %10s[%4d]",
tag_name,
"@" + tag_address if tag_address else "",
attribute.parser.__class__.__name__,
len(attribute),
)
tag_entry = cpppo.dotdict()
tag_entry.attribute = (
attribute # The Attribute (may be shared by multiple tags)
)
tag_entry.path = (
path # Desired Attribute path (may include element), or None
)
tag_entry.error = 0x00
dict.__setitem__(self.tags, tag_name, tag_entry)
def start(self, host, port):
srv_ctl = cpppo.dotdict()
srv_ctl.control = cpppo.apidict(timeout=self.config.timeout)
srv_ctl.control["done"] = False
srv_ctl.control["disable"] = False
srv_ctl.control.setdefault("latency", self.config.latency)
options = cpppo.dotdict()
options.setdefault("enip_process", logix.process)
kwargs = dict(options, tags=self.tags, server=srv_ctl)
tcp_mode = True if self.config.mode == "tcp" else False
udp_mode = True if self.config.mode == "udp" else False
self.control = srv_ctl.control
logger.debug(
"ENIP server started on: %s:%d, mode: %s" % (host, port, self.config.mode)
)
while not self.control["done"]:
network.server_main(
address=(host, port),
target=self.handle,
kwargs=kwargs,
udp=udp_mode,
tcp=tcp_mode,
)
def stop(self):
logger.debug("Stopping ENIP server")
self.control["done"] = True
| 30,252 | Python | .py | 585 | 30.952137 | 113 | 0.454951 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,462 | ipmi_server.py | mushorg_conpot/conpot/protocols/ipmi/ipmi_server.py | # Copyright 2015 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: Peter Sooky <xsooky00@stud.fit.vubtr.cz>
# Brno University of Technology, Faculty of Information Technology
from gevent import socket
from gevent.server import DatagramServer
import struct
import pyghmi.ipmi.private.constants as constants
import pyghmi.ipmi.private.serversession as serversession
import uuid
import hmac
import hashlib
import os
import collections
from lxml import etree
from conpot.protocols.ipmi.fakebmc import FakeBmc
from conpot.protocols.ipmi.fakesession import FakeSession
from conpot.utils.networking import chr_py3
import conpot.core as conpot_core
import logging as logger
class IpmiServer(object):
def __init__(self, template, template_directory, args):
dom = etree.parse(template)
databus = conpot_core.get_databus()
self.device_name = databus.get_value(
dom.xpath("//ipmi/device_info/device_name/text()")[0]
)
self.port = None
self.sessions = dict()
self.uuid = uuid.uuid4()
self.kg = None
self.sock = None
self.authdata = collections.OrderedDict()
lanchannel = 1
authtype = 0b10000000
authstatus = 0b00000100
chancap = 0b00000010
oemdata = (0, 0, 0, 0)
self.authcap = struct.pack(
"BBBBBBBBB", 0, lanchannel, authtype, authstatus, chancap, *oemdata
)
self.server = None
self.session = None
self.bmc = self._configure_users(dom)
logger.info("Conpot IPMI initialized using %s template", template)
def _configure_users(self, dom):
# XML parsing
authdata_name = dom.xpath("//ipmi/user_list/user/user_name/text()")
authdata_passwd = dom.xpath("//ipmi/user_list/user/password/text()")
authdata_name = [i.encode("utf-8") for i in authdata_name]
authdata_passwd = [i.encode("utf-8") for i in authdata_passwd]
self.authdata = collections.OrderedDict(zip(authdata_name, authdata_passwd))
authdata_priv = dom.xpath("//ipmi/user_list/user/privilege/text()")
if False in map(lambda k: 0 < int(k) <= 4, authdata_priv):
raise ValueError("Privilege level must be between 1 and 4")
authdata_priv = [int(k) for k in authdata_priv]
self.privdata = collections.OrderedDict(zip(authdata_name, authdata_priv))
activeusers = dom.xpath("//ipmi/user_list/user/active/text()")
self.activeusers = [1 if x == "true" else 0 for x in activeusers]
fixedusers = dom.xpath("//ipmi/user_list/user/fixed/text()")
self.fixedusers = [1 if x == "true" else 0 for x in fixedusers]
self.channelaccessdata = collections.OrderedDict(
zip(authdata_name, activeusers)
)
return FakeBmc(self.authdata, self.port)
def _checksum(self, *data):
csum = sum(data)
csum ^= 0xFF
csum += 1
csum &= 0xFF
return csum
def handle(self, data, address):
# make sure self.session exists
if not address[0] in self.sessions.keys() or not hasattr(self, "session"):
# new session for new source
logger.info("New IPMI traffic from %s", address)
self.session = FakeSession(address[0], "", "", address[1])
self.session.server = self
self.uuid = uuid.uuid4()
self.kg = None
self.session.socket = self.sock
self.sessions[address[0]] = self.session
self.initiate_session(data, address, self.session)
else:
# session already exists
logger.info("Incoming IPMI traffic from %s", address)
if self.session.stage == 0:
self.close_server_session()
else:
self._got_request(data, address, self.session)
def initiate_session(self, data, address, session):
if len(data) < 22:
self.close_server_session()
return
if not (chr_py3(data[0]) == b"\x06" and data[2:4] == b"\xff\x07"):
# check rmcp version, sequencenumber and class;
self.close_server_session()
return
if chr_py3(data[4]) == b"\x06":
# ipmi v2
session.ipmiversion = 2.0
session.authtype = 6
payload_type = chr_py3(data[5])
if payload_type not in (b"\x00", b"\x10"):
self.close_server_session()
return
if payload_type == b"\x10":
# new session to handle conversation
serversession.ServerSession(
self.authdata,
self.kg,
session.sockaddr,
self.sock,
data[16:],
self.uuid,
bmc=self,
)
serversession.ServerSession.logged = logger
return
# data = data[13:]
if len(data[14:16]) < 2:
self.close_server_session()
else:
myaddr, netfnlun = struct.unpack("2B", data[14:16])
netfn = (netfnlun & 0b11111100) >> 2
mylun = netfnlun & 0b11
if netfn == 6:
# application request
if chr_py3(data[19]) == b"\x38":
# cmd = get channel auth capabilities
verchannel, level = struct.unpack("2B", data[20:22])
version = verchannel & 0b10000000
if version != 0b10000000:
self.close_server_session()
return
channel = verchannel & 0b1111
if channel != 0xE:
self.close_server_session()
return
(clientaddr, clientlun) = struct.unpack("BB", data[17:19])
level &= 0b1111
self.send_auth_cap(
myaddr, mylun, clientaddr, clientlun, session.sockaddr
)
def send_auth_cap(self, myaddr, mylun, clientaddr, clientlun, sockaddr):
header = b"\x06\x00\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10"
headerdata = (clientaddr, clientlun | (7 << 2))
headersum = self._checksum(*headerdata)
header += struct.pack(
"BBBBBB", *(headerdata + (headersum, myaddr, mylun, 0x38))
)
header += self.authcap
bodydata = struct.unpack("B" * len(header[17:]), header[17:])
header += chr_py3(self._checksum(*bodydata))
self.session.stage += 1
logger.info("Connection established with %s", sockaddr)
self.session.send_data(header, sockaddr)
def close_server_session(self):
logger.info("IPMI Session closed %s", self.session.sockaddr[0])
# cleanup session
del self.sessions[self.session.sockaddr[0]]
del self.session
def _got_request(self, data, address, session):
if chr_py3(data[4]) in (b"\x00", b"\x02"):
# ipmi 1.5 payload
session.ipmiversion = 1.5
remsequencenumber = struct.unpack("<I", data[5:9])[0]
if (
hasattr(session, "remsequencenumber")
and remsequencenumber < session.remsequencenumber
):
self.close_server_session()
return
session.remsequencenumber = remsequencenumber
if ord(chr_py3(data[4])) != session.authtype:
self.close_server_session()
return
remsessid = struct.unpack("<I", data[9:13])[0]
if remsessid != session.sessionid:
self.close_server_session()
return
rsp = list(struct.unpack("!%dB" % len(data), data))
authcode = False
if chr_py3(data[4]) == b"\x02":
# authcode in ipmi 1.5 packet
authcode = data[13:29]
del rsp[13:29]
payload = list(rsp[14 : 14 + rsp[13]])
if authcode:
expectedauthcode = session._ipmi15authcode(
payload, checkremotecode=True
)
expectedauthcode = struct.pack(
"%dB" % len(expectedauthcode), *expectedauthcode
)
if expectedauthcode != authcode:
self.close_server_session()
return
session._ipmi15(payload)
elif chr_py3(data[4]) == b"\x06":
# ipmi 2.0 payload
session.ipmiversion = 2.0
session.authtype = 6
session._ipmi20(data)
else:
# unrecognized data
self.close_server_session()
return
def _got_rmcp_openrequest(self, data):
request = struct.pack("B" * len(data), *data)
clienttag = ord(chr_py3(request[0]))
self.clientsessionid = list(struct.unpack("4B", request[4:8]))
self.managedsessionid = list(struct.unpack("4B", os.urandom(4)))
self.session.privlevel = 4
response = (
[clienttag, 0, self.session.privlevel, 0]
+ self.clientsessionid
+ self.managedsessionid
+ [
0,
0,
0,
8,
1,
0,
0,
0, # auth
1,
0,
0,
8,
1,
0,
0,
0, # integrity
2,
0,
0,
8,
1,
0,
0,
0, # privacy
]
)
logger.info("IPMI open session request")
self.session.send_payload(
response, constants.payload_types["rmcpplusopenresponse"], retry=False
)
def _got_rakp1(self, data):
clienttag = data[0]
self.Rm = data[8:24]
self.rolem = data[24]
self.maxpriv = self.rolem & 0b111
namepresent = data[27]
if namepresent == 0:
self.close_server_session()
return
usernamebytes = data[28:]
self.username = struct.pack("%dB" % len(usernamebytes), *usernamebytes)
if self.username not in self.authdata:
logger.info(
"User {} supplied by client not in user_db.".format(
self.username,
)
)
self.close_server_session()
return
uuidbytes = self.uuid.bytes
uuidbytes = list(struct.unpack("%dB" % len(uuidbytes), uuidbytes))
self.uuiddata = uuidbytes
self.Rc = list(struct.unpack("16B", os.urandom(16)))
hmacdata = (
self.clientsessionid
+ self.managedsessionid
+ self.Rm
+ self.Rc
+ uuidbytes
+ [self.rolem, len(self.username)]
)
hmacdata = struct.pack("%dB" % len(hmacdata), *hmacdata)
hmacdata += self.username
self.kuid = self.authdata[self.username]
if self.kg is None:
self.kg = self.kuid
authcode = hmac.new(self.kuid, hmacdata, hashlib.sha1).digest()
authcode = list(struct.unpack("%dB" % len(authcode), authcode))
newmessage = (
[clienttag, 0, 0, 0] + self.clientsessionid + self.Rc + uuidbytes + authcode
)
logger.info("IPMI rakp1 request")
self.session.send_payload(
newmessage, constants.payload_types["rakp2"], retry=False
)
def _got_rakp3(self, data):
RmRc = struct.pack("B" * len(self.Rm + self.Rc), *(self.Rm + self.Rc))
self.sik = hmac.new(
self.kg,
RmRc + struct.pack("2B", self.rolem, len(self.username)) + self.username,
hashlib.sha1,
).digest()
self.session.k1 = hmac.new(self.sik, b"\x01" * 20, hashlib.sha1).digest()
self.session.k2 = hmac.new(self.sik, b"\x02" * 20, hashlib.sha1).digest()
self.session.aeskey = self.session.k2[0:16]
hmacdata = (
struct.pack("B" * len(self.Rc), *self.Rc)
+ struct.pack("4B", *self.clientsessionid)
+ struct.pack("2B", self.rolem, len(self.username))
+ self.username
)
expectedauthcode = hmac.new(self.kuid, hmacdata, hashlib.sha1).digest()
authcode = struct.pack("%dB" % len(data[8:]), *data[8:])
if expectedauthcode != authcode:
self.close_server_session()
return
clienttag = data[0]
if data[1] != 0:
self.close_server_session()
return
self.session.localsid = struct.unpack(
"<I", struct.pack("4B", *self.managedsessionid)
)[0]
logger.info("IPMI rakp3 request")
self.session.ipmicallback = self.handle_client_request
self._send_rakp4(clienttag, 0)
def _send_rakp4(self, tagvalue, statuscode):
payload = [tagvalue, statuscode, 0, 0] + self.clientsessionid
hmacdata = self.Rm + self.managedsessionid + self.uuiddata
hmacdata = struct.pack("%dB" % len(hmacdata), *hmacdata)
authdata = hmac.new(self.sik, hmacdata, hashlib.sha1).digest()[:12]
payload += struct.unpack("%dB" % len(authdata), authdata)
logger.info("IPMI rakp4 sent")
self.session.send_payload(
payload, constants.payload_types["rakp4"], retry=False
)
self.session.confalgo = "aes"
self.session.integrityalgo = "sha1"
self.session.sessionid = struct.unpack(
"<I", struct.pack("4B", *self.clientsessionid)
)[0]
def handle_client_request(self, request):
if request["netfn"] == 6 and request["command"] == 0x3B:
# set session privilage level
pendingpriv = request["data"][0]
returncode = 0
if pendingpriv > 1:
if pendingpriv > self.maxpriv:
returncode = 0x81
else:
self.clientpriv = request["data"][0]
self.session._send_ipmi_net_payload(code=returncode, data=[self.clientpriv])
logger.info(
"IPMI response sent (Set Session Privilege) to %s",
self.session.sockaddr,
)
elif request["netfn"] == 6 and request["command"] == 0x3C:
# close session
self.session.send_ipmi_response()
logger.info(
"IPMI response sent (Close Session) to %s", self.session.sockaddr
)
self.close_server_session()
elif request["netfn"] == 6 and request["command"] == 0x44:
# get user access
# reschan = request["data"][0]
# channel = reschan & 0b00001111
resuid = request["data"][1]
usid = resuid & 0b00011111
if self.clientpriv > self.maxpriv:
returncode = 0xD4
else:
returncode = 0
self.usercount = len(self.authdata.keys())
self.channelaccess = (
0b0000000 | self.privdata[list(self.authdata.keys())[usid - 1]]
)
if self.channelaccessdata[list(self.authdata.keys())[usid - 1]] == "true":
# channelaccess: 7=res; 6=callin; 5=link; 4=messaging; 3-0=privilege
self.channelaccess |= 0b00110000
data = list()
data.append(self.usercount)
data.append(sum(self.activeusers))
data.append(sum(self.fixedusers))
data.append(self.channelaccess)
self.session._send_ipmi_net_payload(code=returncode, data=data)
logger.info(
"IPMI response sent (Get User Access) to %s", self.session.sockaddr
)
elif request["netfn"] == 6 and request["command"] == 0x46:
# get user name
userid = request["data"][0]
returncode = 0
username = list(self.authdata.keys())[userid - 1]
data = list(username)
while len(data) < 16:
# filler
data.append(0)
self.session._send_ipmi_net_payload(code=returncode, data=data)
logger.info(
"IPMI response sent (Get User Name) to %s", self.session.sockaddr
)
elif request["netfn"] == 6 and request["command"] == 0x45:
# set user name
# TODO: fix issue where users can be overwritten
# python does not support dictionary with duplicate keys
userid = request["data"][0]
username = "".join(chr(x) for x in request["data"][1:]).strip(b"\x00")
oldname = list(self.authdata.keys())[userid - 1]
# need to recreate dictionary to preserve order
self.copyauth = collections.OrderedDict()
self.copypriv = collections.OrderedDict()
self.copychannel = collections.OrderedDict()
index = 0
for k, v in self.authdata.iteritems():
if index == userid - 1:
self.copyauth.update({username: self.authdata[oldname]})
self.copypriv.update({username: self.privdata[oldname]})
self.copychannel.update({username: self.channelaccessdata[oldname]})
else:
self.copyauth.update({k: v})
self.copypriv.update({k: self.privdata[k]})
self.copychannel.update({k: self.channelaccessdata[k]})
index += 1
self.authdata = self.copyauth
self.privdata = self.copypriv
self.channelaccessdata = self.copychannel
returncode = 0
self.session._send_ipmi_net_payload(code=returncode)
logger.info(
"IPMI response sent (Set User Name) to %s", self.session.sockaddr
)
elif request["netfn"] == 6 and request["command"] == 0x47:
# set user passwd
passwd_length = request["data"][0] & 0b10000000
userid = request["data"][0] & 0b00111111
username = list(self.authdata.keys())[userid - 1]
operation = request["data"][1] & 0b00000011
returncode = 0
if passwd_length:
# 20 byte
passwd = "".join(chr(x) for x in request["data"][2:22])
else:
# 16 byte
passwd = "".join(chr(x) for x in request["data"][2:18])
if operation == 0:
# disable user
if self.activeusers[list(self.authdata.keys()).index(username)]:
self.activeusers[list(self.authdata.keys()).index(username)] = 0
elif operation == 1:
# enable user
if not self.activeusers[list(self.authdata.keys()).index(username)]:
self.activeusers[list(self.authdata.keys()).index(username)] = 1
elif operation == 2:
# set passwd
if len(passwd) not in [16, 20]:
returncode = 0x81
self.authdata[username] = passwd.strip(b"\x00")
else:
# test passwd
if len(passwd) not in [16, 20]:
returncode = 0x81
if self.authdata[username] != passwd.strip(b"\x00"):
returncode = 0x80
self.session._send_ipmi_net_payload(code=returncode)
logger.info(
"IPMI response sent (Set User Password) to %s", self.session.sockaddr
)
elif request["netfn"] in [0, 6] and request["command"] in [1, 2, 8, 9]:
self.bmc.handle_raw_request(request, self.session)
else:
returncode = 0xC1
self.session._send_ipmi_net_payload(code=returncode)
logger.info("IPMI unrecognized command from %s", self.session.sockaddr)
logger.info(
"IPMI response sent (Invalid Command) to %s", self.session.sockaddr
)
def start(self, host, port):
connection = (host, port)
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.setblocking(True)
self.sock.bind(connection)
self.server = DatagramServer(self.sock, self.handle)
self.server.start()
logger.info("IPMI server started on: %s", (host, self.server.server_port))
self.server.serve_forever()
def stop(self):
self.server.stop()
| 21,402 | Python | .py | 501 | 30.526946 | 88 | 0.554768 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,463 | fakesession.py | mushorg_conpot/conpot/protocols/ipmi/fakesession.py | # Copyright 2015 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: Peter Sooky <xsooky00@stud.fit.vubtr.cz>
# Brno University of Technology, Faculty of Information Technology
import struct
import os
import socket
import logging
import pyghmi.exceptions as exc
import pyghmi.ipmi.private.constants as constants
from pyghmi.ipmi.private.session import Session
import random
import hmac
import hashlib
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
logger = logging.getLogger(__name__)
def _monotonic_time():
return os.times()[4]
class FakeSession(Session):
def __init__(self, bmc, userid, password, port):
self.lastpayload = None
self.servermode = True
self.privlevel = 4
self.request_entry = []
self.socket = None
self.response = None
self.stage = 0
self.bmc = bmc
self.port = port
self.bmc_handlers = {}
self.userid = userid
self.password = password
self._initsession()
self.sockaddr = (bmc, port)
self.server = None
self.sol_handler = None
self.ipmicallback = self._generic_callback
logger.info("New IPMI session initialized for client (%s)", self.sockaddr)
def _generic_callback(self, response):
self.lastresponse = response
def _ipmi20(self, rawdata):
data = list(struct.unpack("%dB" % len(rawdata), rawdata))
# payload type numbers in IPMI specification Table 13-16; 6 bits
payload_type = data[5] & 0b00111111
# header = data[:15]; message = data[16:]
if payload_type == 0x10:
# rmcp+ open session request
return self.server._got_rmcp_openrequest(data[16:])
elif payload_type == 0x11:
# ignore: rmcp+ open session response
return
elif payload_type == 0x12:
# rakp message 1
return self.server._got_rakp1(data[16:])
elif payload_type == 0x13:
# ignore: rakp message 2
return
elif payload_type == 0x14:
# rakp message 3
return self.server._got_rakp3(data[16:])
elif payload_type == 0x15:
# ignore: rakp message 4
return
elif payload_type == 0 or payload_type == 1:
# payload_type == 0; IPMI message
# payload_type == 1; SOL(Serial Over Lan)
if not (data[5] & 0b01000000):
# non-authenticated payload
self.server.close_server_session()
return
encryption_bit = 0
if data[5] & 0b10000000:
# using AES-CBC-128
encryption_bit = 1
authcode = rawdata[-12:]
if self.k1 is None:
# we are in no shape to process a packet now
self.server.close_server_session()
return
expectedauthcode = hmac.new(self.k1, rawdata[4:-12], hashlib.sha1).digest()[
:12
]
if authcode != expectedauthcode:
# BMC failed to assure integrity to us, drop it
self.server.close_server_session()
return
sid = struct.unpack("<I", rawdata[6:10])[0]
if sid != self.localsid:
# session id mismatch, drop it
self.server.close_server_session()
return
remseqnumber = struct.unpack("<I", rawdata[10:14])[0]
if hasattr(self, "remseqnumber"):
if remseqnumber < self.remseqnumber and self.remseqnumber != 0xFFFFFFFF:
self.server.close_server_session()
return
self.remseqnumber = remseqnumber
psize = data[14] + (data[15] << 8)
payload = data[16 : 16 + psize]
if encryption_bit:
iv = rawdata[16:32]
cipher = Cipher(algorithms.AES(self.aeskey), modes.CBC(iv))
decryptor = cipher.decryptor()
decrypted = (
decryptor.update(
struct.pack("%dB" % len(payload[16:]), *payload[16:])
)
+ decryptor.finalize()
)
payload = struct.unpack("%dB" % len(decrypted), decrypted)
padsize = payload[-1] + 1
payload = list(payload[:-padsize])
if payload_type == 0:
self._ipmi15(payload)
elif payload_type == 1:
if self.last_payload_type == 1:
self.lastpayload = None
self.last_payload_type = None
self.waiting_sessions.pop(self, None)
if len(self.pendingpayloads) > 0:
(
nextpayload,
nextpayloadtype,
retry,
) = self.pendingpayloads.popleft()
self.send_payload(
payload=nextpayload,
payload_type=nextpayloadtype,
retry=retry,
)
if self.sol_handler:
# FIXME: self.sol_handler(payload)
pass
else:
logger.error("IPMI Unrecognized payload type.")
self.server.close_server_session()
return
def _ipmi15(self, payload):
self.seqlun = payload[4]
self.clientaddr = payload[3]
self.clientnetfn = (payload[1] >> 2) + 1
self.clientcommand = payload[5]
self._parse_payload(payload)
return
def _parse_payload(self, payload):
if hasattr(self, "hasretried"):
if self.hasretried:
self.hasretried = 0
self.tabooseq[(self.expectednetfn, self.expectedcmd, self.seqlun)] = 16
self.expectednetfn = 0x1FF
self.expectedcmd = 0x1FF
self.waiting_sessions.pop(self, None)
self.lastpayload = None
self.last_payload_type = None
response = {}
response["netfn"] = payload[1] >> 2
del payload[0:5]
# remove the trailing checksum
del payload[-1]
response["command"] = payload[0]
del payload[0:1]
response["data"] = payload
self.timeout = 0.5 + (0.5 * random.random())
self.ipmicallback(response)
def _send_ipmi_net_payload(
self,
netfn=None,
command=None,
data=None,
code=0,
bridge_request=None,
retry=None,
delay_xmit=None,
):
if data is None:
data = []
if retry is None:
retry = not self.servermode
data = [code] + data
if netfn is None:
netfn = self.clientnetfn
if command is None:
command = self.clientcommand
if data[0] is None and len(data) == 1:
self.server.close_server_session()
return
ipmipayload = self._make_ipmi_payload(netfn, command, bridge_request, data)
payload_type = constants.payload_types["ipmi"]
self.send_payload(
payload=ipmipayload,
payload_type=payload_type,
retry=retry,
delay_xmit=delay_xmit,
)
def _make_ipmi_payload(self, netfn, command, bridge_request=None, data=()):
bridge_msg = []
self.expectedcmd = command
self.expectednetfn = netfn + 1
# IPMI spec forbids gaps bigger then 7 in seq number.
# seqincrement = 7
if bridge_request:
addr = bridge_request.get("addr", 0x0)
channel = bridge_request.get("channel", 0x0)
bridge_msg = self._make_bridge_request_msg(channel, netfn, command)
rqaddr = constants.IPMI_BMC_ADDRESS
rsaddr = addr
else:
rqaddr = self.rqaddr
rsaddr = constants.IPMI_BMC_ADDRESS
rsaddr = self.clientaddr
header = [rsaddr, netfn << 2]
reqbody = [rqaddr, self.seqlun, command] + list(data)
headsum = self.server._checksum(*header)
bodysum = self.server._checksum(*reqbody)
payload = header + [headsum] + reqbody + [bodysum]
if bridge_request:
payload = bridge_msg + payload
tail_csum = self.server._checksum(*payload[3:])
payload.append(tail_csum)
return payload
def _aespad(self, data):
newdata = list(data)
currlen = len(data) + 1
neededpad = currlen % 16
if neededpad:
neededpad = 16 - neededpad
padval = 1
while padval <= neededpad:
newdata.append(padval)
padval += 1
newdata.append(neededpad)
return newdata
def send_payload(
self,
payload=(),
payload_type=None,
retry=True,
delay_xmit=None,
needskeepalive=False,
):
if payload and self.lastpayload:
self.pendingpayloads.append((payload, payload_type, retry))
return
if payload_type is None:
payload_type = self.last_payload_type
if not payload:
payload = self.lastpayload
# constant RMCP header for IPMI
message = [0x6, 0x00, 0xFF, 0x07]
if retry:
self.lastpayload = payload
self.last_payload_type = payload_type
message.append(self.authtype)
baretype = payload_type
if self.integrityalgo:
payload_type |= 0b01000000
if self.confalgo:
payload_type |= 0b10000000
if self.ipmiversion == 2.0:
message.append(payload_type)
if baretype == 2:
raise NotImplementedError("OEM Payloads")
elif baretype not in constants.payload_types.values():
raise NotImplementedError("Unrecognized payload type %d" % baretype)
message += struct.unpack("!4B", struct.pack("<I", self.sessionid))
message += struct.unpack("!4B", struct.pack("<I", self.sequencenumber))
if self.ipmiversion == 1.5:
message += struct.unpack("!4B", struct.pack("<I", self.sessionid))
if not self.authtype == 0:
message += self._ipmi15authcode(payload)
message.append(len(payload))
message += payload
totlen = 34 + len(message)
if totlen in (56, 84, 112, 128, 156):
# Legacy pad as mandated by ipmi spec
message.append(0)
elif self.ipmiversion == 2.0:
psize = len(payload)
if self.confalgo:
pad = (psize + 1) % 16
if pad:
# if no pad needed, then we take no more action
pad = 16 - pad
newpsize = psize + pad + 17
message.append(newpsize & 0xFF)
message.append(newpsize >> 8)
iv = os.urandom(16)
message += list(struct.unpack("16B", iv))
payloadtocrypt = self._aespad(payload)
cipher = Cipher(algorithms.AES(self.aeskey), modes.CBC(iv))
encryptor = cipher.encryptor()
crypted = (
encryptor.update(
struct.pack("%dB" % len(payloadtocrypt), *payloadtocrypt)
)
+ encryptor.finalize()
)
crypted = list(struct.unpack("%dB" % len(crypted), crypted))
message += crypted
else:
# no confidetiality algorithm
message.append(psize & 0xFF)
message.append(psize >> 8)
message += list(payload)
if self.integrityalgo:
neededpad = (len(message) - 2) % 4
if neededpad:
neededpad = 4 - neededpad
message += [0xFF] * neededpad
message.append(neededpad)
message.append(7)
integdata = message[4:]
authcode = hmac.new(
self.k1,
struct.pack("%dB" % len(integdata), *integdata),
hashlib.sha1,
).digest()[
:12
] # SHA1-96 - per RFC2404 truncates to 96 bits
message += struct.unpack("12B", authcode)
self.netpacket = struct.pack("!%dB" % len(message), *message)
self.stage += 1
self._xmit_packet(retry, delay_xmit=delay_xmit)
def send_ipmi_response(self, data=None, code=0):
if data is None:
data = []
self._send_ipmi_net_payload(data=data, code=code)
def _xmit_packet(self, retry=True, delay_xmit=None):
if self.sequencenumber:
self.sequencenumber += 1
if delay_xmit is not None:
# skip transmit, let retry timer do it's thing
self.waiting_sessions[self] = {}
self.waiting_sessions[self]["ipmisession"] = self
self.waiting_sessions[self]["timeout"] = delay_xmit + _monotonic_time()
return
if self.sockaddr:
self.send_data(self.netpacket, self.sockaddr)
else:
self.allsockaddrs = []
try:
for res in socket.getaddrinfo(
self.bmc, self.port, 0, socket.SOCK_DGRAM
):
sockaddr = res[4]
if res[0] == socket.AF_INET:
# convert the sockaddr to AF_INET6
newhost = "::ffff:" + sockaddr[0]
sockaddr = (newhost, sockaddr[1], 0, 0)
self.allsockaddrs.append(sockaddr)
self.bmc_handlers[sockaddr] = self
self.send_data(self.netpacket, sockaddr)
except socket.gaierror:
raise exc.IpmiException("Unable to transmit to specified address")
if retry:
self.waiting_sessions[self] = {}
self.waiting_sessions[self]["ipmisession"] = self
self.waiting_sessions[self]["timeout"] = self.timeout + _monotonic_time()
def send_data(self, packet, address):
logger.info("IPMI response sent to %s", address)
logger.debug("IPMI: Sending response {} to client {}".format(packet, address))
self.socket.sendto(packet, address)
| 15,066 | Python | .py | 373 | 28.120643 | 88 | 0.548163 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,464 | fakebmc.py | mushorg_conpot/conpot/protocols/ipmi/fakebmc.py | # Copyright 2015 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: Peter Sooky <xsooky00@stud.fit.vubtr.cz>
# Brno University of Technology, Faculty of Information Technology
import logging
from pyghmi.ipmi.bmc import Bmc
logger = logging.getLogger()
class FakeBmc(Bmc):
def __init__(self, authdata, port):
self.authdata = authdata
self.port = port
self.deviceid = 0x25
self.revision = 0x13
self.firmwaremajor = 0x14
self.firmwareminor = 0x1
self.ipmiversion = 2
self.additionaldevices = 0
self.mfgid = 0xF
self.prodid = 0xE
self.powerstate = "off"
self.bootdevice = "default"
logger.info("IPMI BMC initialized.")
def get_boot_device(self):
logger.info("IPMI BMC Get_Boot_Device request.")
return self.bootdevice
def set_boot_device(self, bootdevice):
logger.info("IPMI BMC Set_Boot_Device request.")
self.bootdevice = bootdevice
def cold_reset(self):
logger.info("IPMI BMC Cold_Reset request.")
self.powerstate = "off"
self.bootdevice = "default"
def get_power_state(self):
logger.info("IPMI BMC Get_Power_State request.")
return self.powerstate
def power_off(self):
logger.info("IPMI BMC Power_Off request.")
self.powerstate = "off"
def power_on(self):
logger.info("IPMI BMC Power_On request.")
self.powerstate = "on"
def power_reset(self):
logger.info("IPMI BMC Power_Reset request.")
self.powerstate = "off"
def power_cycle(self):
logger.info("IPMI BMC Power_Cycle request.")
if self.powerstate == "off":
self.powerstate = "on"
else:
self.powerstate = "off"
def power_shutdown(self):
logger.info("IPMI BMC Power_Shutdown request.")
self.powerstate = "off"
| 2,417 | Python | .py | 64 | 31.390625 | 74 | 0.670946 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,465 | web_server.py | mushorg_conpot/conpot/protocols/http/web_server.py | # Copyright (C) 2013 Lukas Rist <glaslos@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
import logging
from conpot.protocols.http.command_responder import CommandResponder
from conpot.core.protocol_wrapper import conpot_protocol
logger = logging.getLogger(__name__)
@conpot_protocol
class HTTPServer(object):
def __init__(self, template, template_directory, args):
self.template = template
self.template_directory = template_directory
self.server_port = None
self.cmd_responder = None
def start(self, host, port):
logger.info("HTTP server started on: %s", (host, port))
self.cmd_responder = CommandResponder(
host, port, self.template, os.path.join(self.template_directory, "http")
)
self.cmd_responder.httpd.allow_reuse_address = True
self.server_port = self.cmd_responder.server_port
self.cmd_responder.serve_forever()
def stop(self):
if self.cmd_responder:
self.cmd_responder.stop()
| 1,702 | Python | .py | 39 | 39.282051 | 84 | 0.732488 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,466 | command_responder.py | mushorg_conpot/conpot/protocols/http/command_responder.py | # Copyright (C) 2013 Daniel creo Haslinger <creo-conpot@blackmesa.at>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import time
import random
import os
from datetime import datetime
from html.parser import HTMLParser
from socketserver import ThreadingMixIn
import http.server
import http.client
from lxml import etree
import conpot.core as conpot_core
from conpot.utils.networking import str_to_bytes
import gevent
logger = logging.getLogger(__name__)
class HTTPServer(http.server.BaseHTTPRequestHandler):
def log(self, version, request_type, addr, request, response=None):
session = conpot_core.get_session(
"http",
addr[0],
addr[1],
self.connection._sock.getsockname()[0],
self.connection._sock.getsockname()[1],
)
log_dict = {
"remote": addr,
"timestamp": datetime.utcnow(),
"data_type": "http",
"dst_port": self.server.server_port,
"data": {
0: {"request": "{0} {1}: {2}".format(version, request_type, request)}
},
}
logger.info(
"%s %s request from %s: %s. %s",
version,
request_type,
addr,
request,
session.id,
)
if response:
logger.info(
"%s response to %s: %s. %s", version, addr, response, session.id
)
log_dict["data"][0]["response"] = "{0} response: {1}".format(
version, response
)
session.add_event({"request": str(request), "response": str(response)})
else:
session.add_event({"request": str(request)})
# FIXME: Proper logging
def get_entity_headers(self, rqfilename, headers, configuration):
xml_headers = configuration.xpath(
'//http/htdocs/node[@name="' + rqfilename + '"]/headers/*'
)
if xml_headers:
# retrieve all headers assigned to this entity
for header in xml_headers:
headers.append((header.attrib["name"], header.text))
return headers
def get_trigger_appendix(self, rqfilename, rqparams, configuration):
xml_triggers = configuration.xpath(
'//http/htdocs/node[@name="' + rqfilename + '"]/triggers/*'
)
if xml_triggers:
paramlist = rqparams.split("&")
# retrieve all subselect triggers assigned to this entity
for triggers in xml_triggers:
triggerlist = triggers.text.split(";")
trigger_missed = False
for trigger in triggerlist:
if not trigger in paramlist:
trigger_missed = True
if not trigger_missed:
return triggers.attrib["appendix"]
return None
def get_entity_trailers(self, rqfilename, configuration):
trailers = []
xml_trailers = configuration.xpath(
'//http/htdocs/node[@name="' + rqfilename + '"]/trailers/*'
)
if xml_trailers:
# retrieve all headers assigned to this entity
for trailer in xml_trailers:
trailers.append((trailer.attrib["name"], trailer.text))
return trailers
def get_status_headers(self, status, headers, configuration):
xml_headers = configuration.xpath(
'//http/statuscodes/status[@name="' + str(status) + '"]/headers/*'
)
if xml_headers:
# retrieve all headers assigned to this status
for header in xml_headers:
headers.append((header.attrib["name"], header.text))
return headers
def get_status_trailers(self, status, configuration):
trailers = []
xml_trailers = configuration.xpath(
'//http/statuscodes/status[@name="' + str(status) + '"]/trailers/*'
)
if xml_trailers:
# retrieve all trailers assigned to this status
for trailer in xml_trailers:
trailers.append((trailer.attrib["name"], trailer.text))
return trailers
def send_response(self, code, message=None):
"""Send the response header and log the response code.
This function is overloaded to change the behaviour when
loggers and sending default headers.
"""
# replace integrated loggers with conpot logger..
# self.log_request(code)
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ""
if self.request_version != "HTTP/0.9":
msg = str_to_bytes(
"{} {} {}\r\n".format(self.protocol_version, code, message)
)
self.wfile.write(msg)
# the following two headers are omitted, which is why we override
# send_response() at all. We do this one on our own...
# - self.send_header('Server', self.version_string())
# - self.send_header('Date', self.date_time_string())
def substitute_template_fields(self, payload):
# initialize parser with our payload
parser = TemplateParser(payload)
# triggers the parser, just in case of open / incomplete tags..
parser.close()
# retrieve and return (substituted) payload
return parser.payload
def load_status(
self,
status,
requeststring,
requestheaders,
headers,
configuration,
docpath,
method="GET",
body=None,
):
"""Retrieves headers and payload for a given status code.
Certain status codes can be configured to forward the
request to a remote system. If not available, generate
a minimal response"""
# handle PROXY tag
entity_proxy = configuration.xpath(
'//http/statuscodes/status[@name="' + str(status) + '"]/proxy'
)
if entity_proxy:
source = "proxy"
target = entity_proxy[0].xpath("./text()")[0]
else:
source = "filesystem"
# handle TARPIT tag
entity_tarpit = configuration.xpath(
'//http/statuscodes/status[@name="' + str(status) + '"]/tarpit'
)
if entity_tarpit:
tarpit = self.server.config_sanitize_tarpit(
entity_tarpit[0].xpath("./text()")[0]
)
else:
tarpit = None
# check if we have to delay further actions due to global or local TARPIT configuration
if tarpit is not None:
# this node has its own delay configuration
self.server.do_tarpit(tarpit)
else:
# no delay configuration for this node. check for global latency
if self.server.tarpit is not None:
# fall back to the globally configured latency
self.server.do_tarpit(self.server.tarpit)
# If the requested resource resides on our filesystem,
# we try retrieve all metadata and the resource itself from there.
if source == "filesystem":
# retrieve headers from entities configuration block
headers = self.get_status_headers(status, headers, configuration)
# retrieve headers from entities configuration block
trailers = self.get_status_trailers(status, configuration)
# retrieve payload directly from filesystem, if possible.
# If this is not possible, return an empty, zero sized string.
try:
if not isinstance(status, int):
status = status.value
with open(
os.path.join(docpath, "statuscodes", str(int(status)) + ".status"),
"rb",
) as f:
payload = f.read()
except IOError as e:
logger.exception("%s", e)
payload = ""
# there might be template data that can be substituted within the
# payload. We only substitute data that is going to be displayed
# by the browser:
# perform template substitution on payload
payload = self.substitute_template_fields(payload)
# How do we transport the content?
chunked_transfer = configuration.xpath(
'//http/htdocs/node[@name="' + str(status) + '"]/chunks'
)
if chunked_transfer:
# Append a chunked transfer encoding header
headers.append(("Transfer-Encoding", "chunked"))
chunks = str(chunked_transfer[0].xpath("./text()")[0])
else:
# Calculate and append a content length header
headers.append(("Content-Length", payload.__len__()))
chunks = "0"
return status, headers, trailers, payload, chunks
# the requested status code is configured to forward the
# originally targeted resource to a remote system.
elif source == "proxy":
# open a connection to the remote system.
# If something goes wrong, fall back to 503.
# NOTE: we use try:except here because there is no perfect
# platform independent way to check file accessibility.
trailers = []
chunks = "0"
try:
# Modify a few headers to fit our new destination and the fact
# that we're proxying while being unaware of any session foo..
requestheaders["Host"] = target
requestheaders["Connection"] = "close"
conn = http.client.HTTPConnection(target)
conn.request(method, requeststring, body, dict(requestheaders))
response = conn.getresponse()
remotestatus = int(response.status)
headers = (
response.getheaders()
) # We REPLACE the headers to avoid duplicates!
payload = response.read()
# WORKAROUND: to get around a strange httplib-behaviour when it comes
# to chunked transfer encoding, we replace the chunked-header with a
# valid Content-Length header:
for i, header in enumerate(headers):
if (
header[0].lower() == "transfer-encoding"
and header[1].lower() == "chunked"
):
del headers[i]
break
status = remotestatus
except:
# before falling back to 503, we check if we are ALREADY dealing with a 503
# to prevent an infinite request handling loop...
if status != 503:
# we're handling another error here.
# generate a 503 response from configuration.
(status, headers, trailers, payload, chunks) = self.load_status(
503,
requeststring,
self.headers,
headers,
configuration,
docpath,
)
else:
# oops, we're heading towards an infinite loop here,
# generate a minimal 503 response regardless of the configuration.
status = 503
payload = ""
chunks = "0"
headers.append(("Content-Length", 0))
return status, headers, trailers, payload, chunks
def load_entity(self, requeststring, headers, configuration, docpath):
"""
Retrieves status, headers and payload for a given entity, that
can be stored either local or on a remote system
"""
# extract filename and GET parameters from request string
rqfilename = requeststring.partition("?")[0]
rqparams = requeststring.partition("?")[2]
# handle ALIAS tag
entity_alias = configuration.xpath(
'//http/htdocs/node[@name="' + rqfilename + '"]/alias'
)
if entity_alias:
rqfilename = entity_alias[0].xpath("./text()")[0]
# handle SUBSELECT tag
rqfilename_appendix = self.get_trigger_appendix(
rqfilename, rqparams, configuration
)
if rqfilename_appendix:
rqfilename += "_" + rqfilename_appendix
# handle PROXY tag
entity_proxy = configuration.xpath(
'//http/htdocs/node[@name="' + rqfilename + '"]/proxy'
)
if entity_proxy:
source = "proxy"
target = entity_proxy[0].xpath("./text()")[0]
else:
source = "filesystem"
# handle TARPIT tag
entity_tarpit = configuration.xpath(
'//http/htdocs/node[@name="' + rqfilename + '"]/tarpit'
)
if entity_tarpit:
tarpit = self.server.config_sanitize_tarpit(
entity_tarpit[0].xpath("./text()")[0]
)
else:
tarpit = None
# check if we have to delay further actions due to global or local TARPIT configuration
if tarpit is not None:
# this node has its own delay configuration
self.server.do_tarpit(tarpit)
else:
# no delay configuration for this node. check for global latency
if self.server.tarpit is not None:
# fall back to the globally configured latency
self.server.do_tarpit(self.server.tarpit)
# If the requested resource resides on our filesystem,
# we try retrieve all metadata and the resource itself from there.
if source == "filesystem":
# handle STATUS tag
# ( filesystem only, since proxied requests come with their own status )
entity_status = configuration.xpath(
'//http/htdocs/node[@name="' + rqfilename + '"]/status'
)
if entity_status:
status = int(entity_status[0].xpath("./text()")[0])
else:
status = 200
# retrieve headers from entities configuration block
headers = self.get_entity_headers(rqfilename, headers, configuration)
# retrieve trailers from entities configuration block
trailers = self.get_entity_trailers(rqfilename, configuration)
# retrieve payload directly from filesystem, if possible.
# If this is not possible, return an empty, zero sized string.
if os.path.isabs(rqfilename):
relrqfilename = rqfilename[1:]
else:
relrqfilename = rqfilename
try:
with open(os.path.join(docpath, "htdocs", relrqfilename), "rb") as f:
payload = f.read()
except IOError as e:
if not os.path.isdir(os.path.join(docpath, "htdocs", relrqfilename)):
logger.error("Failed to get template content: %s", e)
payload = ""
# there might be template data that can be substituted within the
# payload. We only substitute data that is going to be displayed
# by the browser:
templated = False
for header in headers:
if (
header[0].lower() == "content-type"
and header[1].lower() == "text/html"
):
templated = True
if templated:
# perform template substitution on payload
payload = self.substitute_template_fields(payload)
# How do we transport the content?
chunked_transfer = configuration.xpath(
'//http/htdocs/node[@name="' + rqfilename + '"]/chunks'
)
if chunked_transfer:
# Calculate and append a chunked transfer encoding header
headers.append(("Transfer-Encoding", "chunked"))
chunks = str(chunked_transfer[0].xpath("./text()")[0])
else:
# Calculate and append a content length header
headers.append(("Content-Length", payload.__len__()))
chunks = "0"
return status, headers, trailers, payload, chunks
# the requested resource resides on another server,
# so we act as a proxy between client and target system
elif source == "proxy":
# open a connection to the remote system.
# If something goes wrong, fall back to 503
trailers = []
try:
conn = http.client.HTTPConnection(target)
conn.request("GET", requeststring)
response = conn.getresponse()
status = int(response.status)
headers = (
response.getheaders()
) # We REPLACE the headers to avoid duplicates!
payload = response.read()
chunks = "0"
except:
status = 503
(status, headers, trailers, payload, chunks) = self.load_status(
status, requeststring, self.headers, headers, configuration, docpath
)
return status, headers, trailers, payload, chunks
def send_chunked(self, chunks, payload, trailers):
"""Send payload via chunked transfer encoding to the
client, followed by eventual trailers."""
chunk_list = chunks.split(",")
pointer = 0
for cwidth in chunk_list:
cwidth = int(cwidth)
# send chunk length indicator
self.wfile.write(format(cwidth, "x").upper() + "\r\n")
# send chunk payload
self.wfile.write(payload[pointer : pointer + cwidth] + "\r\n")
pointer += cwidth
# is there another chunk that has not been configured? Send it anyway for the sake of completeness..
if len(payload) > pointer:
# send chunk length indicator
self.wfile.write(format(len(payload) - pointer, "x").upper() + "\r\n")
# send chunk payload
self.wfile.write(payload[pointer:] + "\r\n")
# we're done with the payload. Send a zero chunk as EOF indicator
self.wfile.write("0" + "\r\n")
# if there are trailing headers :-) we send them now..
for trailer in trailers:
self.wfile.write("%s: %s\r\n" % (trailer[0], trailer[1]))
# and finally, the closing ceremony...
self.wfile.write("\r\n")
def send_error(self, code, message=None):
"""Send and log an error reply.
This method is overloaded to make use of load_status()
to allow handling of "Unsupported Method" errors.
"""
headers = []
headers.extend(self.server.global_headers)
configuration = self.server.configuration
docpath = self.server.docpath
if not hasattr(self, "headers"):
self.headers = self.MessageClass(self.rfile)
trace_data_length = self.headers.get("content-length")
unsupported_request_data = None
if trace_data_length:
unsupported_request_data = self.rfile.read(int(trace_data_length))
# there are certain situations where variables are (not yet) registered
# ( e.g. corrupted request syntax ). In this case, we set them manually.
if hasattr(self, "path") and self.path is not None:
requeststring = self.path
else:
requeststring = ""
self.path = None
if message is not None:
logger.info(message)
# generate the appropriate status code, header and payload
(status, headers, trailers, payload, chunks) = self.load_status(
code,
requeststring.partition("?")[0],
self.headers,
headers,
configuration,
docpath,
)
# send http status to client
self.send_response(status)
# send all headers to client
for header in headers:
self.send_header(header[0], header[1])
self.end_headers()
# decide upon sending content as a whole or chunked
if chunks == "0":
# send payload as a whole to the client
if type(payload) != bytes:
payload = payload.encode()
self.wfile.write(payload)
else:
# send payload in chunks to the client
self.send_chunked(chunks, payload, trailers)
# loggers
self.log(
self.request_version,
self.command,
self.client_address,
(self.path, self.headers._headers, unsupported_request_data),
status,
)
def do_TRACE(self):
"""Handle TRACE requests."""
# fetch configuration dependent variables from server instance
headers = []
headers.extend(self.server.global_headers)
configuration = self.server.configuration
docpath = self.server.docpath
# retrieve TRACE body data
# ( sticking to the HTTP protocol, there should not be any body in TRACE requests,
# an attacker could though use the body to inject data if not flushed correctly,
# which is done by accessing the data like we do now - just to be secure.. )
trace_data_length = self.headers.get("content-length")
trace_data = None
if trace_data_length:
trace_data = self.rfile.read(int(trace_data_length))
# check configuration: are we allowed to use this method?
if self.server.disable_method_trace is True:
# Method disabled by configuration. Fall back to 501.
status = 501
(status, headers, _, payload, _) = self.load_status(
status, self.path, self.headers, headers, configuration, docpath
)
else:
# Method is enabled
status = 200
payload = ""
headers.append(("Content-Type", "message/http"))
# Gather all request data and return it to sender..
for rqheader in self.headers:
payload = (
payload + str(rqheader) + ": " + self.headers.get(rqheader) + "\n"
)
# send initial HTTP status line to client
self.send_response(status)
# send all headers to client
for header in headers:
self.send_header(header[0], header[1])
self.end_headers()
# send payload (the actual content) to client
if type(payload) != bytes:
payload = payload.encode()
self.wfile.write(payload)
# loggers
self.log(
self.request_version,
self.command,
self.client_address,
(self.path, self.headers._headers, trace_data),
status,
)
def do_HEAD(self):
"""Handle HEAD requests."""
# fetch configuration dependent variables from server instance
headers = list()
headers.extend(self.server.global_headers)
configuration = self.server.configuration
docpath = self.server.docpath
# retrieve HEAD body data
# ( sticking to the HTTP protocol, there should not be any body in HEAD requests,
# an attacker could though use the body to inject data if not flushed correctly,
# which is done by accessing the data like we do now - just to be secure.. )
head_data_length = self.headers.get("content-length")
head_data = None
if head_data_length:
head_data = self.rfile.read(int(head_data_length))
# check configuration: are we allowed to use this method?
if self.server.disable_method_head is True:
# Method disabled by configuration. Fall back to 501.
status = 501
(status, headers, _, _, _) = self.load_status(
status, self.path, self.headers, headers, configuration, docpath
)
else:
# try to find a configuration item for this HEAD request
try:
entity_xml = configuration.xpath(
'//http/htdocs/node[@name="' + self.path.partition("?")[0] + '"]'
)
except etree.XPathEvalError:
entity_xml = None
logger.debug(
"Malformed HTTP:HEAD URN. Failed to handle <{}>. (Client: {})".format(
self.path, self.client_address
)
)
if entity_xml:
# A config item exists for this entity. Handle it..
(status, headers, _, _, _) = self.load_entity(
self.path, headers, configuration, docpath
)
else:
# No config item could be found. Fall back to a standard 404..
status = 404
(status, headers, _, _, _) = self.load_status(
status, self.path, self.headers, headers, configuration, docpath
)
# send initial HTTP status line to client
self.send_response(status)
# send all headers to client
for header in headers:
self.send_header(header[0], header[1])
self.end_headers()
# loggers
self.log(
self.request_version,
self.command,
self.client_address,
(self.path, self.headers._headers, head_data),
status,
)
def do_OPTIONS(self):
"""Handle OPTIONS requests."""
# fetch configuration dependent variables from server instance
headers = []
headers.extend(self.server.global_headers)
configuration = self.server.configuration
docpath = self.server.docpath
# retrieve OPTIONS body data
# ( sticking to the HTTP protocol, there should not be any body in OPTIONS requests,
# an attacker could though use the body to inject data if not flushed correctly,
# which is done by accessing the data like we do now - just to be secure.. )
options_data_length = self.headers.get("content-length")
options_data = None
if options_data_length:
options_data = self.rfile.read(int(options_data_length))
# check configuration: are we allowed to use this method?
if self.server.disable_method_options is True:
# Method disabled by configuration. Fall back to 501.
status = 501
(status, headers, _, payload, _) = self.load_status(
status, self.path, self.headers, headers, configuration, docpath
)
else:
status = 200
payload = ""
# Add ALLOW header to response. GET, POST and OPTIONS are static, HEAD and TRACE are dynamic
allowed_methods = "GET"
if self.server.disable_method_head is False:
# add head to list of allowed methods
allowed_methods += ",HEAD"
allowed_methods += ",POST,OPTIONS"
if self.server.disable_method_trace is False:
allowed_methods += ",TRACE"
headers.append(("Allow", allowed_methods))
# Calculate and append a content length header
headers.append(("Content-Length", payload.__len__()))
# Append CC header
headers.append(("Connection", "close"))
# Append CT header
headers.append(("Content-Type", "text/html"))
# send initial HTTP status line to client
self.send_response(status)
# send all headers to client
for header in headers:
self.send_header(header[0], header[1])
self.end_headers()
# loggers
self.log(
self.request_version,
self.command,
self.client_address,
(self.path, self.headers._headers, options_data),
status,
)
def do_GET(self):
"""Handle GET requests"""
# fetch configuration dependent variables from server instance
headers = []
headers.extend(self.server.global_headers)
configuration = self.server.configuration
docpath = self.server.docpath
# retrieve GET body data
# ( sticking to the HTTP protocol, there should not be any body in GET requests,
# an attacker could though use the body to inject data if not flushed correctly,
# which is done by accessing the data like we do now - just to be secure.. )
get_data_length = self.headers.get("content-length")
get_data = None
if get_data_length:
get_data = self.rfile.read(int(get_data_length))
# try to find a configuration item for this GET request
try:
entity_xml = configuration.xpath(
'//http/htdocs/node[@name="' + self.path.partition("?")[0] + '"]'
)
except etree.XPathEvalError:
entity_xml = None
logger.debug(
"Malformed HTTP:GET URN. Failed to handle <{}>. (Client: {})".format(
self.path, self.client_address
)
)
if entity_xml:
# A config item exists for this entity. Handle it..
(status, headers, trailers, payload, chunks) = self.load_entity(
self.path, headers, configuration, docpath
)
else:
# No config item could be found. Fall back to a standard 404..
status = 404
(status, headers, trailers, payload, chunks) = self.load_status(
status, self.path, self.headers, headers, configuration, docpath, "GET"
)
# send initial HTTP status line to client
self.send_response(status)
# send all headers to client
for header in headers:
self.send_header(header[0], header[1])
self.end_headers()
# decide upon sending content as a whole or chunked
if chunks == "0":
# send payload as a whole to the client
self.wfile.write(str_to_bytes(payload))
else:
# send payload in chunks to the client
self.send_chunked(chunks, payload, trailers)
# loggers
self.log(
self.request_version,
self.command,
self.client_address,
(self.path, self.headers._headers, get_data),
status,
)
def do_POST(self):
"""Handle POST requests"""
# fetch configuration dependent variables from server instance
headers = list()
headers.extend(self.server.global_headers)
configuration = self.server.configuration
docpath = self.server.docpath
# retrieve POST data ( important to flush request buffers )
post_data_length = self.headers.get("content-length")
post_data = None
if post_data_length:
post_data = self.rfile.read(int(post_data_length))
# try to find a configuration item for this POST request
try:
entity_xml = configuration.xpath(
'//http/htdocs/node[@name="' + self.path.partition("?")[0] + '"]'
)
except etree.XPathEvalError:
entity_xml = None
logger.debug(
"Malformed HTTP:POST URN. Failed to handle <{}>. (Client: {})".format(
self.path, self.client_address
)
)
if entity_xml:
# A config item exists for this entity. Handle it..
(status, headers, trailers, payload, chunks) = self.load_entity(
self.path, headers, configuration, docpath
)
else:
# No config item could be found. Fall back to a standard 404..
status = 404
(status, headers, trailers, payload, chunks) = self.load_status(
status,
self.path,
self.headers,
headers,
configuration,
docpath,
"POST",
post_data,
)
# send initial HTTP status line to client
self.send_response(status)
# send all headers to client
for header in headers:
self.send_header(header[0], header[1])
self.end_headers()
# decide upon sending content as a whole or chunked
if chunks == "0":
# send payload as a whole to the client
if type(payload) != bytes:
payload = payload.encode()
self.wfile.write(payload)
else:
# send payload in chunks to the client
self.send_chunked(chunks, payload, trailers)
# loggers
self.log(
self.request_version,
self.command,
self.client_address,
(self.path, self.headers._headers, post_data),
status,
)
class TemplateParser(HTMLParser):
def __init__(self, data):
self.databus = conpot_core.get_databus()
if type(data) == bytes:
data = data.decode()
self.data = data
HTMLParser.__init__(self)
self.payload = self.data
self.feed(self.data)
def handle_startendtag(self, tag, attrs):
"""handles template tags provided in XHTML notation.
Expected format: <condata source="(engine)" key="(descriptor)" />
Example: <condata source="databus" key="SystemDescription" />
at the moment, the parser is space- and case-sensitive(!),
this could be improved by using REGEX for replacing the template tags
with actual values.
"""
source = ""
key = ""
# only parse tags that are conpot template tags ( <condata /> )
if tag == "condata":
# initialize original tag (needed for value replacement)
origin = "<" + tag
for attribute in attrs:
# extend original tag
origin = origin + " " + attribute[0] + '="' + attribute[1] + '"'
# fill variables with all meta information needed to
# gather actual data from the other engines (databus, modbus, ..)
if attribute[0] == "source":
source = attribute[1]
elif attribute[0] == "key":
key = attribute[1]
# finalize original tag
origin += " />"
# we really need a key in order to do our work..
if key:
# deal with databus powered tags:
if source == "databus":
self.result = self.databus.get_value(key)
self.payload = self.payload.replace(origin, str(self.result))
# deal with eval powered tags:
elif source == "eval":
result = ""
# evaluate key
try:
result = eval(key)
except Exception as e:
logger.exception(e)
self.payload = self.payload.replace(origin, result)
class ThreadedHTTPServer(ThreadingMixIn, http.server.HTTPServer):
"""Handle requests in a separate thread."""
class SubHTTPServer(ThreadedHTTPServer):
"""this class is necessary to allow passing custom request handler into
the RequestHandlerClass"""
daemon_threads = True
def __init__(self, server_address, RequestHandlerClass, template, docpath):
http.server.HTTPServer.__init__(self, server_address, RequestHandlerClass)
self.docpath = docpath
# default configuration
self.update_header_date = True # this preserves authenticity
self.disable_method_head = False
self.disable_method_trace = False
self.disable_method_options = False
self.tarpit = "0"
# load the configuration from template and parse it
# for the first time in order to reduce further handling..
self.configuration = etree.parse(template)
xml_config = self.configuration.xpath("//http/global/config/*")
if xml_config:
# retrieve all global configuration entities
for entity in xml_config:
if entity.attrib["name"] == "protocol_version":
RequestHandlerClass.protocol_version = entity.text
elif entity.attrib["name"] == "update_header_date":
if entity.text.lower() == "false":
# DATE header auto update disabled by configuration
self.update_header_date = False
elif entity.text.lower() == "true":
# DATE header auto update enabled by configuration
self.update_header_date = True
elif entity.attrib["name"] == "disable_method_head":
if entity.text.lower() == "false":
# HEAD method enabled by configuration
self.disable_method_head = False
elif entity.text.lower() == "true":
# HEAD method disabled by configuration
self.disable_method_head = True
elif entity.attrib["name"] == "disable_method_trace":
if entity.text.lower() == "false":
# TRACE method enabled by configuration
self.disable_method_trace = False
elif entity.text.lower() == "true":
# TRACE method disabled by configuration
self.disable_method_trace = True
elif entity.attrib["name"] == "disable_method_options":
if entity.text.lower() == "false":
# OPTIONS method enabled by configuration
self.disable_method_options = False
elif entity.text.lower() == "true":
# OPTIONS method disabled by configuration
self.disable_method_options = True
elif entity.attrib["name"] == "tarpit":
if entity.text:
self.tarpit = self.config_sanitize_tarpit(entity.text)
# load global headers from XML
self.global_headers = []
xml_headers = self.configuration.xpath("//http/global/headers/*")
if xml_headers:
# retrieve all headers assigned to this status code
for header in xml_headers:
if (
header.attrib["name"].lower() == "date"
and self.update_header_date is True
):
# All HTTP date/time stamps MUST be represented in Greenwich Mean Time (GMT),
# without exception ( RFC-2616 )
self.global_headers.append(
(
header.attrib["name"],
time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()),
)
)
else:
self.global_headers.append((header.attrib["name"], header.text))
def config_sanitize_tarpit(self, value):
# checks tarpit value for being either a single int or float,
# or a series of two concatenated integers and/or floats seperated by semicolon and returns
# either the (sanitized) value or zero.
if value is not None:
x, _, y = value.partition(";")
try:
_ = float(x)
except ValueError:
# first value is invalid, ignore the whole setting.
logger.error("Invalid tarpit value: '%s'. Assuming no latency.", value)
return "0;0"
try:
_ = float(y)
# both values are fine.
return value
except ValueError:
# second value is invalid, use the first one.
return x
else:
return "0;0"
def do_tarpit(self, delay):
# sleeps the thread for $delay ( should be either 1 float to apply a static period of time to sleep,
# or 2 floats seperated by semicolon to sleep a randomized period of time determined by ( rand[x;y] )
lbound, _, ubound = delay.partition(";")
if not lbound or lbound is None:
# no lower boundary found. Assume zero latency
pass
elif not ubound or ubound is None:
# no upper boundary found. Assume static latency
gevent.sleep(float(lbound))
else:
# both boundaries found. Assume random latency between lbound and ubound
gevent.sleep(random.uniform(float(lbound), float(ubound)))
class CommandResponder(object):
def __init__(self, host, port, template, docpath):
# Create HTTP server class
self.httpd = SubHTTPServer((host, port), HTTPServer, template, docpath)
self.server_port = self.httpd.server_port
def serve_forever(self):
self.httpd.serve_forever()
def stop(self):
logging.info(
"HTTP server will shut down gracefully as soon as all connections are closed."
)
self.httpd.shutdown()
| 42,636 | Python | .py | 949 | 32.232877 | 109 | 0.566719 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,467 | decoder_382.py | mushorg_conpot/conpot/protocols/kamstrup_meter/decoder_382.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
from crc16.crc16pure import crc16xmodem
from conpot.utils.networking import chr_py3
from . import kamstrup_constants
logger = logging.getLogger(__name__)
class Decoder382(object):
REGISTERS = {
0x01: "Energy in",
0x02: "Energy out",
0x0D: "Energy in hi-res",
0x0E: "Energy out hi-res",
0x33: "Meter number", # user configurable
0x3E9: "Meter serialnumber", # not user configurable
0x466: "Meter type",
0x417: "Time zone", # how is this represented?
0x4F7: "KMP address",
0x4F4: "M-bus address",
0x041E: "Voltage p1",
0x041F: "Voltage p2",
0x0420: "Voltage p3",
0x0434: "Current p1",
0x0435: "Current p2",
0x0436: "Current p3",
0x0438: "Power p1",
0x0439: "Power p2",
0x043A: "Power p3",
0x178A: "Firmware revision A", # not too sure on how to parse there.
0x178F: "Firmware revision B",
}
def __init__(self):
self.in_data = []
self.in_parsing = False
self.in_data_escaped = False
self.out_data = []
self.out_parsing = False
self.out_data_escaped = False
self.request_command_map = {
0x01: self._decode_cmd_get_type,
0x10: self._decode_cmd_get_register,
0x92: self._decode_cmd_login,
}
self.response_map = {0x10: self._decode_cmd_return_register}
def decode_in(self, data):
for d in data:
d = ord(d)
if not self.in_parsing and d != kamstrup_constants.REQUEST_MAGIC:
logger.info(
"No kamstrup_meter request magic received, got: %s",
d.encode("hex-codec"),
)
else:
self.in_parsing = True
escape_escape_byte = False
if self.in_data_escaped:
d ^= 0xFF
if d is kamstrup_constants.EOT_MAGIC:
escape_escape_byte = True
self.in_data_escaped = False
elif d is kamstrup_constants.ESCAPE:
self.in_data_escaped = True
continue
assert self.in_data_escaped is False
if d is kamstrup_constants.EOT_MAGIC and not escape_escape_byte:
if not self.valid_crc(self.in_data[1:]):
self.in_parsing = False
self.in_data = []
# TODO: Log discarded bytes?
return "Request discarded due to invalid CRC."
# now we expect (0x80, 0x3f, 0x10) =>
# (request magic, communication address, command byte)
comm_address = self.in_data[1]
if self.in_data[2] in self.request_command_map:
result = self.request_command_map[
self.in_data[2]
]() + " [{0}]".format(hex(comm_address))
else:
result = "Unknown request command: {0}".format(self.in_data[2])
self.in_data = []
return result
else:
self.in_data.append(d)
def decode_out(self, data):
for d in data:
d = ord(d)
if not self.out_parsing and d != kamstrup_constants.RESPONSE_MAGIC:
logger.info(
"Kamstrup: Expected response magic but got got: %s",
d.encode("hex-codec"),
)
else:
self.out_parsing = True
escape_escape_byte = False
if self.out_data_escaped:
d ^= 0xFF
if d is kamstrup_constants.EOT_MAGIC:
escape_escape_byte = True
self.out_data_escaped = False
elif d is kamstrup_constants.ESCAPE:
self.out_data_escaped = True
continue
assert self.out_data_escaped is False
if d is kamstrup_constants.EOT_MAGIC and not escape_escape_byte:
if not self.valid_crc(self.out_data[1:]):
self.out_parsing = False
self.out_data = []
# TODO: Log discarded bytes?
return "Response discarded due to invalid CRC."
comm_address = self.out_data[1]
if self.out_data[2] in self.response_map:
result = self.response_map[
self.out_data[2]
]() + " [{0}]".format(hex(comm_address))
else:
result = "Unknown response command: {0}".format(
self.out_data[2]
)
self.out_data = []
return result
else:
self.out_data.append(d)
def _decode_cmd_get_register(self):
assert self.in_data[2] == 0x10
# cmd = self.in_data[2]
register_count = self.in_data[3]
message = "Request for {0} register(s): ".format(register_count)
if len(self.in_data[2:]) < register_count:
return "Invalid message, register count was too high"
for count in range(register_count):
register = self.in_data[4 + count * 2] * 256 + self.in_data[5 + count * 2]
if register in Decoder382.REGISTERS:
message += "{0} ({1})".format(register, Decoder382.REGISTERS[register])
else:
message += "Unknown ({0})".format(register)
if count + 1 < register_count:
message += ", "
return message
def _decode_cmd_return_register(self):
assert self.out_data[2] == 0x10
# skip command bytes and trailing checksum
msg = self.out_data[3:-2]
return_value = "Register reponse: "
if len(msg) == 0:
return_value += "Invalid register"
else:
i = 0
while i < len(msg):
# Header is (ushort registerId, byte units, byte length, byte unknown)
register = msg[i] * 256 + msg[i + 1]
# unknown_byte_A = msg[i + 2]
length = msg[i + 3]
# unknown_byte_B = msg[i + 2]
# Payload
register_value = 0
for p in range(length):
register_value += msg[i + 5 + p] << (8 * ((length - p) - 1))
if register in Decoder382.REGISTERS:
return_value += "{0}({1}):{2}, ".format(
register, Decoder382.REGISTERS[register], register_value
)
else:
return_value += "{0}:{1}, ".format(register, register_value)
i += 5 + length
return return_value
# meter type
def _decode_cmd_get_type(self):
assert self.in_data[2] == 0x01
return "Request for GetType"
def _decode_cmd_login(self):
assert self.in_data[2] == 0x92
pin_code = self.in_data[3] * 256 + self.in_data[4]
return "Login command with pin_code: {0}".format(pin_code)
# supplied message should be stripped of leading and trailing magic
@classmethod
def valid_crc(cls, message):
supplied_crc = message[-2] * 256 + message[-1]
calculated_crc = crc16xmodem(b"".join([chr_py3(item) for item in message[:-2]]))
return supplied_crc == calculated_crc
@classmethod
def _decode_response(cls):
return "Decoding of this response has not been implemented yet."
| 8,614 | Python | .py | 196 | 30.510204 | 88 | 0.529909 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,468 | messages.py | mushorg_conpot/conpot/protocols/kamstrup_meter/messages.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import binascii
from crc16.crc16pure import crc16xmodem
from conpot.utils.networking import chr_py3
from . import kamstrup_constants
import conpot.core as conpot_core
logger = logging.getLogger(__name__)
class KamstrupProtocolBase(object):
def __init__(self, communication_address):
self.communication_address = communication_address
# ############ REQUEST MESSAGES ##############
class KamstrupRequestBase(KamstrupProtocolBase):
def __init__(self, communication_address, command, message_bytes):
super(KamstrupRequestBase, self).__init__(communication_address)
self.command = command
self.message_bytes = message_bytes
logger.info(
"Kamstrup request package created with bytes: %s", self.message_bytes
)
def __str__(self):
return "Comm address: {0}, Command: {1}, Message: {2}".format(
hex(self.communication_address),
hex(self.command),
binascii.hexlify(bytearray(self.message_bytes)),
)
# Valid but request command unknown
class KamstrupRequestUnknown(KamstrupRequestBase):
def __init__(self, communication_address, command_byte, message_bytes):
super(KamstrupRequestUnknown, self).__init__(
communication_address, command_byte, message_bytes
)
logger.warning("Unknown Kamstrup request: %s", self)
class KamstrupRequestGetRegisters(KamstrupRequestBase):
command_byte = 0x10
def __init__(self, communication_address, command_byte, message_bytes):
assert command_byte is command_byte
super(KamstrupRequestGetRegisters, self).__init__(
communication_address,
KamstrupRequestGetRegisters.command_byte,
message_bytes,
)
self.registers = []
self._parse_register_bytes()
logger.debug(
"Kamstrup request for registers: %s", str(self.registers).strip("[]")
)
def _parse_register_bytes(self):
register_count = self.message_bytes[0]
if len(self.message_bytes[1:]) * 2 < register_count:
raise Exception("Invalid register count in register request")
for count in range(register_count):
register = (
self.message_bytes[1 + count * 2] * 256
+ self.message_bytes[2 + count * 2]
)
self.registers.append(register)
# ############ RESPONSE MESSAGES ##############
class KamstrupResponseBase(KamstrupProtocolBase):
def __init__(self, communication_address):
super(KamstrupResponseBase, self).__init__(communication_address)
def serialize(self, message):
final_message = list()
# prefix message
final_message.append(kamstrup_constants.RESPONSE_MAGIC)
final_message.append(self.communication_address)
# add the original content
for c in message:
final_message.append(c)
# generate and append checksum
crc = crc16xmodem(b"".join([chr_py3(item) for item in final_message[1:]]))
final_message.append(crc >> 8)
final_message.append(crc & 0xFF)
# trailing magic
final_message.append(kamstrup_constants.EOT_MAGIC)
escaped_message = self.escape(final_message)
return escaped_message
# escape everything but leading and trailing magic
@classmethod
def escape(cls, message):
escaped_list = list()
escaped_list.append(message[0])
for c in message[1:-1]:
if c in kamstrup_constants.NEED_ESCAPE:
escaped_list.append(kamstrup_constants.ESCAPE)
escaped_list.append(c ^ 0xFF)
else:
escaped_list.append(c)
escaped_list.append(message[-1])
return escaped_list
class KamstrupResponseRegister(KamstrupResponseBase):
def __init__(self, communication_address):
super(KamstrupResponseRegister, self).__init__(communication_address)
self.registers = []
def add_register(self, register):
self.registers.append(register)
def serialize(self, message=None):
if not message:
message = []
message.append(0x10)
for register in self.registers:
# each register must be packed: (ushort registerId, byte units, byte length, byte unknown)
# and the following $length payload with the register value
message.append(register.name >> 8)
message.append(register.name & 0xFF)
message.append(register.units)
message.append(register.length)
# mystery byte
message.append(register.unknown)
low_endian_value_packed = []
register_value = conpot_core.get_databus().get_value(register.databus_key)
for _ in range(register.length):
# get least significant
low_endian_value_packed.append(register_value & 0xFF)
register_value >>= 8
# reverse to get pack high endian
for b in reversed(low_endian_value_packed):
message.append(b)
# add leading/trailing magic and escape as appropriate
serialized_message = super(KamstrupResponseRegister, self).serialize(message)
return bytearray(serialized_message)
| 6,111 | Python | .py | 136 | 36.617647 | 102 | 0.664199 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,469 | kamstrup_constants.py | mushorg_conpot/conpot/protocols/kamstrup_meter/kamstrup_constants.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from enum import Enum
REQUEST_MAGIC = 0x80
RESPONSE_MAGIC = 0x40
EOT_MAGIC = 0x0D
ESCAPE = 0x1B
# what is 0x06?
NEED_ESCAPE = [0x06, EOT_MAGIC, ESCAPE, RESPONSE_MAGIC, REQUEST_MAGIC]
UNITS = {
0: "None",
1: "Wh",
2: "kWh",
3: "MWh",
4: "GWh",
5: "j",
6: "kj",
7: "Mj",
8: "Gj",
9: "Cal",
10: "kCal",
11: "MCal",
12: "GCal",
13: "varh",
14: "kvarh",
15: "Mvarh",
16: "Gvarh",
17: "VAh",
18: "kVAh",
19: "MVAh",
20: "GVAh",
21: "W",
22: "kW",
23: "MW",
24: "GW",
25: "var",
26: "kvar",
27: "MVar",
28: "Gvar",
29: "VA",
30: "kVA",
31: "MVA",
32: "GVA",
33: "V",
34: "A",
35: "kV",
36: "kA",
37: "C",
38: "K",
39: "I",
40: "m3",
41: "I_h",
42: "m3_h",
43: "m3xC",
44: "ton",
45: "ton_h",
46: "h",
47: "clock", # hh:mm:ss
48: "date1", # yy:mm:dd
49: "date2", # yyyy:mm:dd
50: "date3", # mm:dd
51: "number",
52: "bar",
53: "RTC",
54: "ASCII",
55: "m3x10",
56: "tonx10",
57: "GJx10",
58: "minutes",
59: "Bitfield",
60: "s",
61: "ms",
62: "days",
63: "RTC_Q",
64: "Datetime",
65: "imp_L",
66: "L_imp",
67: "Hz",
68: "Degree",
69: "Percent",
70: "USgal",
71: "USgal_min",
72: "KamDateTime",
73: "IPv4Address",
74: "IPv6Address",
}
class MeterTypes(Enum):
Unknown = (0,)
K382M = (1,)
K162M = (2,)
K351C = (3,)
OMNIA = (4,)
# where does 382J fit in? together with 382M?
| 2,366 | Python | .py | 107 | 18.028037 | 70 | 0.562805 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,470 | kamstrup_server.py | mushorg_conpot/conpot/protocols/kamstrup_meter/kamstrup_server.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import binascii
import logging
import random
import socket
from gevent.server import StreamServer
import gevent
import conpot.core as conpot_core
from conpot.utils.networking import chr_py3
from .request_parser import KamstrupRequestParser
from .command_responder import CommandResponder
from conpot.core.protocol_wrapper import conpot_protocol
logger = logging.getLogger(__name__)
@conpot_protocol
class KamstrupServer(object):
def __init__(self, template, template_directory, args):
self.command_responder = CommandResponder(template)
self.server_active = True
self.server = None
conpot_core.get_databus().observe_value("reboot_signal", self.reboot)
logger.info("Kamstrup protocol server initialized.")
# pretending reboot... really just closing connecting while "rebooting"
def reboot(self, key):
assert key == "reboot_signal"
self.server_active = False
logger.info("Pretending server reboot")
gevent.spawn_later(2, self.set_reboot_done)
def set_reboot_done(self):
logger.info("Stopped pretending reboot")
self.server_active = True
def handle(self, sock, address):
session = conpot_core.get_session(
"kamstrup_protocol",
address[0],
address[1],
sock.getsockname()[0],
sock.getsockname()[1],
)
logger.info(
"New Kamstrup connection from %s:%s. (%s)",
address[0],
address[1],
session.id,
)
session.add_event({"type": "NEW_CONNECTION"})
self.server_active = True
parser = KamstrupRequestParser()
try:
while self.server_active:
raw_request = sock.recv(1024)
if not raw_request:
logger.info("Kamstrup client disconnected. (%s)", session.id)
session.add_event({"type": "CONNECTION_LOST"})
break
for x in raw_request:
parser.add_byte(chr_py3(x))
while True:
request = parser.get_request()
if not request:
session.add_event({"type": "CONNECTION_LOST"})
break
else:
logdata = {
"request": binascii.hexlify(
bytearray(request.message_bytes)
)
}
response = self.command_responder.respond(request)
# real Kamstrup meters has delay in this interval
gevent.sleep(random.uniform(0.24, 0.34))
if response:
serialized_response = response.serialize()
logdata["response"] = binascii.hexlify(serialized_response)
logger.info(
"Kamstrup traffic from %s: %s (%s)",
address[0],
logdata,
session.id,
)
sock.send(serialized_response)
session.add_event(logdata)
else:
session.add_event(logdata)
break
except socket.timeout:
logger.debug("Socket timeout, remote: %s. (%s)", address[0], session.id)
session.add_event({"type": "CONNECTION_LOST"})
sock.close()
def start(self, host, port):
self.host = host
self.port = port
connection = (host, port)
self.server = StreamServer(connection, self.handle)
logger.info("Kamstrup protocol server started on: %s", connection)
self.server.serve_forever()
def stop(self):
self.server.stop()
| 4,749 | Python | .py | 112 | 30.044643 | 87 | 0.574551 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,471 | request_parser.py | mushorg_conpot/conpot/protocols/kamstrup_meter/request_parser.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
from crc16.crc16pure import crc16xmodem
from conpot.utils.networking import chr_py3
from . import kamstrup_constants
from .messages import KamstrupRequestGetRegisters, KamstrupRequestUnknown
logger = logging.getLogger(__name__)
class KamstrupRequestParser(object):
def __init__(self):
self.bytes = list()
self.parsing = False
self.data_escaped = False
self.done = False
self.request_map = {
KamstrupRequestGetRegisters.command_byte: KamstrupRequestGetRegisters
}
def add_byte(self, byte):
self.bytes.append(ord(byte))
def get_request(self):
bytes_len = len(self.bytes)
position = 0
while position < bytes_len:
d = self.bytes[position]
if not self.parsing and d != kamstrup_constants.REQUEST_MAGIC:
logger.info(
"Kamstrup skipping byte, expected kamstrup_meter request magic but got: {0}".format(
hex(d)
)
)
del self.bytes[position]
bytes_len -= 1
continue
else:
self.parsing = True
escape_escape_byte = False
if self.data_escaped:
self.bytes[position] ^= 0xFF
if d is kamstrup_constants.EOT_MAGIC:
escape_escape_byte = True
self.data_escaped = False
elif d is kamstrup_constants.ESCAPE:
self.data_escaped = True
del self.bytes[position]
bytes_len -= 1
continue
assert self.data_escaped is False
if d is kamstrup_constants.EOT_MAGIC and not escape_escape_byte:
if not self.valid_crc(self.bytes[1:position]):
self.parsing = False
del self.bytes[0:position]
logger.warning("Kamstrup CRC check failed for request.")
# now we expect (0x80, 0x3f, 0x10) =>
# (request magic, communication address, command byte)
comm_address = self.bytes[1]
command_byte = self.bytes[2]
if self.bytes[2] in self.request_map:
result = self.request_map[command_byte](
comm_address, command_byte, self.bytes[3:-3]
)
del self.bytes[: position + 1]
else:
result = KamstrupRequestUnknown(
comm_address, command_byte, self.bytes[3:-3]
)
del self.bytes[: position + 1]
return result
position += 1
@classmethod
def valid_crc(cls, message):
supplied_crc = message[-2] * 256 + message[-1]
calculated_crc = crc16xmodem(b"".join([chr_py3(item) for item in message[:-2]]))
return supplied_crc == calculated_crc
| 3,896 | Python | .py | 87 | 31.873563 | 104 | 0.573347 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,472 | command_responder.py | mushorg_conpot/conpot/protocols/kamstrup_meter/command_responder.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
from . import messages
import copy
from lxml import etree
from .register import KamstrupRegister
logger = logging.getLogger(__name__)
class CommandResponder(object):
def __init__(self, template):
# key: kamstrup_meter register, value: databus key
self.registers = {}
dom = etree.parse(template)
registers = dom.xpath("//kamstrup_meter/registers/*")
self.communication_address = int(
dom.xpath("//kamstrup_meter/config/communication_address/text()")[0]
)
for register in registers:
name = int(register.attrib["name"])
length = int(register.attrib["length"])
units = int(register.attrib["units"])
unknown = int(register.attrib["unknown"])
databuskey = register.xpath("./value/text()")[0]
kamstrup_register = KamstrupRegister(
name, units, length, unknown, databuskey
)
assert name not in self.registers
self.registers[name] = kamstrup_register
def respond(self, request):
if request.communication_address != self.communication_address:
logger.warning(
"Kamstrup request received with wrong communication address, got {} but expected {}.".format(
request.communication_address, self.communication_address
)
)
return None
elif isinstance(request, messages.KamstrupRequestGetRegisters):
response = messages.KamstrupResponseRegister(self.communication_address)
for register in request.registers:
if register in self.registers:
response.add_register(copy.deepcopy(self.registers[register]))
return response
else:
assert False
| 2,610 | Python | .py | 58 | 37.034483 | 109 | 0.673742 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,473 | register.py | mushorg_conpot/conpot/protocols/kamstrup_meter/register.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
class KamstrupRegister(object):
def __init__(self, name, units, length, unknown, databus_key):
self.name = name
self.units = units
self.length = length
self.unknown = unknown
self.databus_key = databus_key
| 1,024 | Python | .py | 23 | 41.521739 | 67 | 0.744745 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,474 | ascii_decoder.py | mushorg_conpot/conpot/protocols/proxy/ascii_decoder.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
from .proxy import ProxyDecoder
logger = logging.getLogger(__name__)
class AsciiDecoder(ProxyDecoder):
def decode_in(self, data):
return data.decode("utf-8", "replace").encode("utf-8")
def decode_out(self, data):
return data.decode("utf-8", "replace").encode("utf-8")
| 1,083 | Python | .py | 24 | 42.916667 | 67 | 0.757116 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,475 | proxy.py | mushorg_conpot/conpot/protocols/proxy/proxy.py | # Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
from gevent import select
from gevent import socket as _socket
import codecs
import gevent
from gevent.socket import socket
from gevent.ssl import wrap_socket
from gevent.server import StreamServer
import abc
import conpot.core as conpot_core
logger = logging.getLogger(__name__)
class ProxyDecoder(abc.ABC):
@abc.abstractmethod
def decode_in(self, data):
"""Decode data that goes into the proxied device"""
@abc.abstractmethod
def decode_out(self, data):
"""Decode data that goes out from the proxied device to the connected client(attacker)."""
class Proxy(object):
def __init__(
self, name, proxy_host, proxy_port, decoder=None, keyfile=None, certfile=None
):
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.name = name
self.proxy_id = self.name.lower().replace(" ", "_")
self.host = None
self.port = None
self.keyfile = keyfile
self.certfile = certfile
if decoder:
try:
namespace, _classname = decoder.rsplit(".", 1)
module = __import__(namespace, fromlist=[_classname])
_class = getattr(module, _classname)
self.decoder = _class()
assert isinstance(self.decoder, ProxyDecoder)
except AssertionError:
logger.fatal(
"Invalid decoder: decoder must be an instance of ProxyDecoder."
)
else:
self.decoder = None
def get_server(self, host, port):
self.host = host
connection = (host, port)
if self.keyfile and self.certfile:
server = StreamServer(
connection, self.handle, keyfile=self.keyfile, certfile=self.certfile
)
else:
server = StreamServer(connection, self.handle)
self.port = server.server_port
logger.info(
"%s proxy server started, listening on %s, proxy for: (%s, %s) using %s decoder.",
self.name,
connection,
self.proxy_host,
self.proxy_port,
self.decoder,
)
return server
def handle(self, sock, address):
session = conpot_core.get_session(
self.proxy_id,
address[0],
address[1],
sock.getsockname()[0],
sock.getsockname()[1],
)
logger.info(
"New connection from %s:%s on %s proxy. (%s)",
address[0],
address[1],
self.proxy_id,
session.id,
)
proxy_socket = socket()
if self.keyfile and self.certfile:
proxy_socket = wrap_socket(
proxy_socket, keyfile=self.keyfile, certfile=self.certfile
)
try:
proxy_socket.connect((self.proxy_host, self.proxy_port))
except _socket.error:
logger.exception(
"Error while connecting to proxied service at ({}, {})".format(
self.proxy_host, self.proxy_port
)
)
self._close([proxy_socket, sock])
return
sockets = [proxy_socket, sock]
while len(sockets) == 2:
gevent.sleep(0)
sockets_read, _, sockets_err = select.select(sockets, [], sockets, 10)
if len(sockets_err) > 0:
self._close([proxy_socket, sock])
break
for s in sockets_read:
socket_close_reason = "socket closed"
try:
data = s.recv(1024)
except _socket.error as socket_err:
data = []
socket_close_reason = str(socket_err)
if len(data) == 0:
self._close([proxy_socket, sock])
if s is proxy_socket:
logging.warning(
"Closing proxied socket while receiving (%s, %s): %s.",
self.proxy_host,
self.proxy_port,
socket_close_reason,
)
sockets = []
break
elif s is sock:
logging.warning(
"Closing connection to remote while receiving from remote (%s, %s): %s",
socket_close_reason,
address[0],
address[1],
)
sockets = []
break
else:
assert False
try:
if s is proxy_socket:
self.handle_out_data(data, sock, session)
elif s is sock:
self.handle_in_data(data, proxy_socket, session)
else:
assert False
except _socket.error as socket_err:
if s is proxy_socket:
destination = "proxied socket"
else:
destination = "remote connection"
logger.warning(
"Error while sending data to %s: %s.",
destination,
str(socket_err),
)
sockets = []
break
session.set_ended()
proxy_socket.close()
sock.close()
def handle_in_data(self, data, sock, session):
# convert the data from bytes to hex string
hex_data = codecs.encode(data, "hex_codec")
session.add_event({"raw_request": hex_data, "raw_response": ""})
logger.debug(
"Received %s bytes from outside to proxied service: %s", len(data), hex_data
)
if self.decoder:
# TODO: data could be chunked, proxy needs to handle this
decoded = self.decoder.decode_in(data)
logger.debug("Decoded request: %s", decoded)
session.add_event({"request": decoded, "raw_response": ""})
sock.send(data)
def handle_out_data(self, data, sock, session):
hex_data = codecs.encode(data, "hex_codec")
session.add_event({"raw_request": "", "raw_response": hex_data})
logger.debug("Received %s bytes from proxied service: %s", len(data), hex_data)
if self.decoder:
# TODO: data could be chunked, proxy needs to handle this
decoded = self.decoder.decode_out(data)
logger.debug("Decoded response: %s", decoded)
session.add_event({"request": "", "raw_response": decoded})
sock.send(data)
def _close(self, sockets):
for s in sockets:
s.close()
def stop(self):
# TODO: Keep active sockets in list and close them on stop()
return
| 7,796 | Python | .py | 195 | 27.323077 | 100 | 0.537599 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,476 | ftp_base_handler.py | mushorg_conpot/conpot/protocols/ftp/ftp_base_handler.py | # Copyright (C) 2018 Abhinav Saxena <xandfury@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import socketserver
import gevent
import conpot.core as conpot_core
from conpot.core.filesystem import FilesystemError
import logging
import errno
import time
import fs
from datetime import datetime
import os
from conpot.protocols.ftp.ftp_utils import FTPPrivilegeException
from conpot.utils.networking import sanitize_file_name
from gevent import socket
logger = logging.getLogger(__name__)
# -----------------------------------------------------------
# Implementation Note: DTP channel that would have two queues for Input and Output (separate for producer and consumer.)
# There would be 3 threads (greenlets) in place. One for handling the command_channel, one for handling the input/
# output of the data_channel and one that would run the command processor.
# Commands class inheriting this base handler is independent of the **green drama** and may be considered on as is basis
# when migrating to async/io.
# -----------------------------------------------------------
class FTPMetrics(object):
"""Simple class to track total bytes transferred, login attempts etc."""
def __init__(self):
self.start_time = time.time()
self.data_channel_bytes_recv = 0
self.data_channel_bytes_send = 0
self.command_chanel_bytes_send = 0
self.command_chanel_bytes_recv = 0
self.last_active = self.start_time
# basically we need a timeout time composite of timeout for
# data_sock and client_sock. Let us say that timeout of 5 sec for data_sock
# and command_sock is 300.
# Implementation note: Data sock is non-blocking. So the connection is closed when there is no data.
@property
def timeout(self):
if self.last_active:
return lambda: int(time.time() - self.last_active)
else:
return lambda: int(time.time() - self.start_time)
def get_elapsed_time(self):
return self.last_active - self.start_time
def __repr__(self):
tot = (
self.data_channel_bytes_recv
+ self.data_channel_bytes_send
+ self.command_chanel_bytes_recv
+ self.command_chanel_bytes_send
)
s = """
Total data transferred : {} (bytes)
Command channel sent : {} (bytes)
Command channel received : {} (bytes)
Data channel sent : {} (bytes)
Data channel received : {} (bytes)""".format(
tot,
self.command_chanel_bytes_send,
self.command_chanel_bytes_recv,
self.data_channel_bytes_send,
self.data_channel_bytes_recv,
)
return s
def get_metrics(
self, user_name, uid, failed_login_attempts, max_login_attempts, client_address
):
s = """
FTP statistics for client : {}
----------------------------------
Logged in as user :{} with uid: {}
Failed login attempts : {}/{}
Start time : {}
Last active on : {}
----------------------------------
""".format(
client_address,
user_name,
uid,
failed_login_attempts,
max_login_attempts,
datetime.fromtimestamp(self.start_time).ctime(),
datetime.fromtimestamp(self.last_active).ctime(),
)
s += self.__repr__()
return s
class FTPHandlerBase(socketserver.BaseRequestHandler):
"""Base class for a full duplex connection"""
config = None # Config of FTP server. FTPConfig class instance.
host, port = None, None # FTP Sever's host and port.
_local_ip = "127.0.0.1" # IP to bind the _data_listener_sock with.
_ac_in_buffer_size = 4096 # incoming data buffer size (defaults 4096)
_ac_out_buffer_size = 4096 # outgoing data buffer size (defaults 4096)
def __init__(self, request, client_address, server):
# ------------------------ Environment -------------------------
self.client_sock = request._sock
# only commands that are enabled should work! This is configured in the FTPConfig class.
if not self._local_ip:
self._local_ip = self.client_sock.getsockname()[
0
] # for masquerading.. Local IP would work just fine.
# Username of the current user
self.username = None
self._uid = None # UID of the current username
# The flag to check whether a user is already authenticated!
self.authenticated = False
# What commands does this FTP support
self.COMMANDS = {}
# conpot session
self.session = None
# terminate character - CR+LF
self.terminator = b"\r\n"
# tracking login attempts
self.invalid_login_attempt = 0
# max login attempts
self.max_login_attempts = self.config.max_login_attempts
# ftp absolute path of the file system
self.root = self.config.vfs.getcwd()
# get the current working directory of the current user
self.working_dir = "/"
# flag to check whether we need to disconnect the client
self.disconnect_client = False
# keep state of the last command/response
self._last_command = None
# Note: From stream, block or compressed - only stream is supported.
self._transfer_mode = None # For APPE and REST commands
self._restart_position = 0 # For APPE and REST commands
# binary ('i') or ascii mode ('a')
self._current_type = "a"
# buffer-size for FTP **commands**, send error if this is exceeded
self.buffer_limit = 2048 # command channel would not accept more data than this for one command.
self.active_passive_mode = (
None # Flag to check the current mode. Would be set to 'PASV' or 'PORT'
)
self._data_channel = False # check whether the data channel is running or not. This would trigger
# the start and end of the data channel.
self._data_channel_send = (
gevent.event.Event()
) # Event when we are trying to send a file.
self._data_channel_recv = gevent.event.Event() # Event for receiving a file.
self.cli_ip, self.cli_port = (
None,
None,
) # IP and port received from client in active/passive mode.
self._data_sock = None
self._data_listener_sock = None
# socket for accepting cli_ip and cli_port in passive mode.
self._rnfr = None # For RNFR and RNTO
self.metrics = FTPMetrics() # track session related metrics.
# Input and output queues.
self._command_channel_input_q = gevent.queue.Queue()
self._command_channel_output_q = gevent.queue.Queue()
self._data_channel_output_q = gevent.queue.Queue()
self._data_channel_input_q = gevent.queue.Queue()
self.ftp_greenlets = None # Keep track of all greenlets
socketserver.BaseRequestHandler.__init__(
self, request=request, client_address=client_address, server=server
)
def ftp_path(self, path):
"""Clean and sanitize ftp paths relative fs instance it is hosted in."""
_path = self.config.vfs.norm_path(os.path.join(self.working_dir, path))
_path = _path.replace(self.root, "/")
return _path
# -- Wrappers for gevent StreamServer -------
class false_request(object):
def __init__(self):
self._sock = None
def __del__(self):
if self._sock:
if self._sock.fileno() != -1:
self._sock.close()
del self._sock
@classmethod
def stream_server_handle(cls, sock, address):
"""Translate this class for use in a StreamServer"""
request = cls.false_request()
request._sock = sock
server = None
_ftp = None
try:
_ftp = cls(request, address, server)
except socket.error:
logger.warning("Unexpected Error Occurred!")
del _ftp
def setup(self):
"""Connect incoming connection to a FTP session"""
self.session = conpot_core.get_session(
"ftp",
self.client_address[0],
self.client_address[1],
self.request._sock.getsockname()[0],
self.request._sock.getsockname()[1],
)
logger.info(
"New FTP connection from {}:{}. ({})".format(
self.client_address[0], self.client_address[1], self.session.id
)
)
self.session.add_event({"type": "NEW_CONNECTION"})
# send 200 + banner -- new client has connected!
self.respond(b"200 " + self.config.banner.encode())
# Is there a delay in command response? < gevent.sleep(0.5) ?
return socketserver.BaseRequestHandler.setup(self)
def finish(self):
"""End this client session"""
if self.disconnect_client is False:
logger.info(
"FTP client {} disconnected. ({})".format(
self.client_address, self.session.id
)
)
self.stop_data_channel(
abort=True,
purge=True,
reason="Closing connection to {}. "
"Client disconnected".format(self.client_address),
)
if self._data_listener_sock:
if self._data_listener_sock.fileno() != -1:
self._data_sock.close()
socketserver.BaseRequestHandler.finish(self)
self.client_sock.close()
logger.info(
"{}".format(
self.metrics.get_metrics(
client_address=self.client_address,
user_name=self.username,
uid=self._uid,
failed_login_attempts=self.invalid_login_attempt,
max_login_attempts=self.max_login_attempts,
)
)
)
self.disconnect_client = True
else:
logger.debug("Client {} already disconnected.".format(self.client_address))
def __del__(self):
if self.disconnect_client is False:
self.finish()
# -- FTP Command Channel ------------
def handle_cmd_channel(self):
"""Read data from the socket and add it to the _command_channel_input_q for processing"""
log_data = dict()
try:
if self.client_sock.closed:
logger.info(
"FTP socket is closed, connection lost. Remote: {} ({}).".format(
self.client_address, self.session.id
)
)
self.session.add_event({"type": "CONNECTION_LOST"})
self.finish()
return
socket_read, socket_write, _ = gevent.select.select(
[self.client_sock], [self.client_sock], [], 1
)
# make sure the socket is ready to read - we would read from the command channel.
if self.client_sock in socket_read:
data = self.client_sock.recv(self.buffer_limit)
# put the data in the _input_q for processing
if data and data != b"":
log_data["request"] = data
if self._command_channel_input_q.qsize() > self.buffer_limit:
# Flush buffer if it gets too long (possible DOS condition). RFC-959 specifies that
# 500 response should be given in such cases.
logger.info(
"FTP command input exceeded buffer from client {}".format(
self.client_address
)
)
self.respond(b"500 Command too long.")
else:
self.metrics.command_chanel_bytes_recv += len(data)
self.metrics.last_active = time.time()
self._command_channel_input_q.put(data)
# make sure the socket is ready to write
elif self.client_sock in socket_write and (
not self._command_channel_output_q.empty()
):
response = self._command_channel_output_q.get()
if response is not None:
logger.debug(
"Sending packet {} to client {}".format(
self.client_address, response
)
)
log_data["response"] = response
# add len to metrics
self.metrics.command_chanel_bytes_send += len(response)
self.metrics.last_active = time.time()
self.client_sock.send(response)
if "request" in log_data or "response" in log_data:
logger.info(
"FTP traffic to {}: {} ({})".format(
self.client_address, log_data, self.session.id
)
)
self.session.add_event(log_data)
except socket.error as se:
if se.errno == errno.EWOULDBLOCK:
gevent.sleep(0.1)
else:
logger.info(
"Socket error, remote: {}. ({}). Error {}".format(
self.client_address, self.session.id, se
)
)
self.session.add_event({"type": "CONNECTION_LOST"})
self.finish()
def respond(self, response):
"""Send processed command/data as reply to the client"""
response = (
response.encode("utf-8") if not isinstance(response, bytes) else response
)
response = (
response + self.terminator if response[-2:] != self.terminator else response
)
self._command_channel_output_q.put(response)
def process_ftp_command(self):
raise NotImplementedError
# -- FTP Data Channel --------------
def start_data_channel(self, send_recv="send"):
"""
Starts the data channel. To be called from the command process greenlet.
:param send_recv: Whether the event is a send event or recv event. When set to 'send' data channel's socket
writes data in the output queues else when set to 'read' data channel's socket reads data into the input queue.
:type send_recv: str
"""
try:
assert self.cli_port and self.cli_port and self._data_sock
if self._data_channel is True:
logger.debug("Already sending some data that has to finish first.")
# waait till that process finishes.
self._data_channel_send.wait()
self._data_channel_recv.wait()
if send_recv == "send":
# we just want to do send and not receive
self._data_channel_send.clear()
self._data_channel_recv.set()
else:
# we just want to do receive and not send
self._data_channel_recv.clear()
self._data_channel_send.set()
self._data_channel = True
except AssertionError:
self.respond(b"425 Use PORT or PASV first.")
logger.info(
"Can't initiate {} mode since either of IP or Port supplied by the "
"client are None".format(self.active_passive_mode)
)
def stop_data_channel(self, abort=False, purge=False, reason=None):
if reason:
logger.info("Closing data channel. Reason: {}".format(reason))
if (not self._data_channel_output_q.empty()) or (
not self._command_channel_input_q.empty()
):
if not abort:
# Wait for all transfers to complete.
self._data_channel_send.wait()
self._data_channel_recv.wait()
self._data_channel = False
if self._data_sock and self._data_sock.fileno() != -1:
self._data_sock.close()
# don't want to do either send and receive.
# Although this is done while sending - we're doing it just to be safe.
self._data_channel_recv.set()
self._data_channel_send.set()
if purge:
self.cli_ip = None
self.cli_port = None
# purge data in buffers .i.e the data queues.
while self._data_channel_input_q.qsize() != 0:
_ = self._data_channel_input_q.get()
while self._data_channel_output_q.qsize() != 0:
_ = self._data_channel_output_q.get()
def handle_data_channel(self):
if self._data_channel:
try:
# Need to know what kind of event are we expecting.
if not self._data_channel_send.is_set():
# must be a sending event. Get from the output_q and write it to socket.
# pick an item from the _data_channel_output_q and send it to the requisite socket
if not self._data_channel_output_q.empty():
# Consumes data from the data channel output queue. Log it and sends it across to the client.
# If a file needs to be send, pass the file name directly as file parameter. sendfile is used
# in this case.
data = self._data_channel_output_q.get()
if data["type"] == "raw_data":
logger.info(
"Send data {} at {}:{} for client : {}".format(
data["data"],
self.cli_ip,
self.cli_port,
self.client_address,
)
)
self.metrics.last_active = time.time()
self._data_sock.send(data=data["data"])
self.metrics.data_channel_bytes_send += len(data)
elif data["type"] == "file":
file_name = data["file"]
if self.config.vfs.isfile(file_name):
logger.info(
"Sending file {} to client {} at {}:{}".format(
file_name,
self.client_address,
self.cli_ip,
self.cli_port,
)
)
try:
self.metrics.last_active = time.time()
with self.config.vfs.open(
file_name, mode="rb"
) as file_:
self._data_sock.sendfile(file_, 0)
_size = self.config.vfs.getsize(file_name)
self.metrics.data_channel_bytes_send += _size
except (fs.errors.FSError, FilesystemError):
raise
if self._data_channel_output_q.qsize() == 0:
logger.debug(
"No more data to read. Either transfer finished or error occurred."
)
self._data_channel_send.set()
self.respond(b"226 Transfer complete.")
elif not self._data_channel_recv.is_set():
# must be a receiving event. Get data from socket and add it to input_q
# Receive data, log it and add it to the data channel input queue.
self.respond(b"125 Transfer starting.")
data = self._data_sock.recv(self._ac_in_buffer_size)
if data and data != b"":
self.metrics.last_active = time.time()
# There is some data -- could be a file.
logger.debug(
"Received {} from client {} on {}:{}".format(
data, self.client_address, self.cli_ip, self.cli_port
)
)
self.metrics.data_channel_bytes_recv += len(data)
self._data_channel_input_q.put(data)
while data and data != b"":
self.metrics.last_active = time.time()
data = self._data_sock.recv(self._ac_in_buffer_size)
logger.debug(
"Received {} from client {} on {}:{}".format(
data,
self.client_address,
self.cli_ip,
self.cli_port,
)
)
self.metrics.data_channel_bytes_recv += len(data)
self._data_channel_input_q.put(data)
# we have received all data. Time to finish this process.
# set the writing event to set - so that we can write this data to files.
self._data_channel_recv.set()
self.respond(b"226 Transfer complete.")
else:
# assume that the read/write event has finished
# send a nice resp to the client saying everything has finished.
# set the self._data_channel(_recv/_send) markers. This would also wait for read write to finish.
self.stop_data_channel(reason="Transfer has completed!.")
except (socket.error, socket.timeout) as se:
# TODO: send appropriate response
# Flush contents of the data channel
reason = (
"connection timed out"
if isinstance(se, socket.timeout)
else "socket error"
)
msg = "Stopping FTP data channel {}:{}. Reason: {}".format(
self.cli_ip, self.cli_port, reason
)
self.stop_data_channel(abort=True, purge=True, reason=msg)
except (
fs.errors.FSError,
FilesystemError,
FTPPrivilegeException,
FilesystemError,
) as fe:
self.respond(b"550 Transfer failed.")
self.stop_data_channel(
abort=True,
reason="VFS related exception occurred: {}".format(str(fe)),
)
def recv_file(self, _file, _file_pos=0, cmd="STOR"):
"""
Receive a file - to be used with STOR, REST and APPE. A copy would be made on the _data_fs.
:param _file: File Name to the file that would be written to fs.
:param _file_pos: Seek file to position before receiving.
:param cmd: Command used for receiving file.
"""
# FIXME: acquire lock to files - both data_fs and vfs.
with self.config.vfs.lock():
self.start_data_channel(send_recv="recv")
recv_err = None
logger.info("Receiving data from {}:{}".format(self.cli_ip, self.cli_port))
_data_fs_file = sanitize_file_name(
_file, self.client_address[0], str(self.client_address[1])
)
_data_fs_d = None
_file_d = None
# wait till all transfer has finished.
self._data_channel_recv.wait()
try:
# we are blocking on queue for 10 seconds to wait for incoming data.
# If there is no data in the queue. We assume that transfer has been completed.
_data = self._data_channel_input_q.get()
_data_fs_d = self.config.data_fs.open(path=_data_fs_file, mode="wb")
if _file_pos == 0 and cmd == "STOR":
# overwrite file or create a new one.
# we don't need to seek at all. Normal write process by STOR
_file_d = self.config.vfs.open(path=_file, mode="wb")
else:
assert _file_pos != 0
# must seek file. This is done in append or rest(resume transfer) command.
# in that case, we should create a duplicate copy of this file till that seek position.
with self.config.vfs.open(path=_file, mode="rb") as _file_d:
_data_fs_d.write(_file_d.read(_file_pos))
# finally we should let the file to be written as requested.
if cmd == "APPE":
_file_d = self.config.vfs.open(path=_file, mode="ab")
else:
# cmd is REST
_file_d = self.config.vfs.open(path=_file, mode="rb+")
_file_d.seek(_file_pos)
_file_d.write(_data)
_data_fs_d.write(_data)
while not self._data_channel_input_q.empty():
_data = self._data_channel_input_q.get()
_file_d.write(_data)
_data_fs_d.write(_data)
logger.info(
"Files {} and {} written successfully to disk".format(
_file, _data_fs_file
)
)
except (
AssertionError,
IOError,
fs.errors.FSError,
FilesystemError,
FTPPrivilegeException,
) as fe:
recv_err = fe
self.stop_data_channel(abort=True, reason=str(fe))
self.respond("554 {} command failed.".format(cmd))
finally:
if _file_d and _file_d.fileno() != -1:
_file_d.close()
if _data_fs_d and _data_fs_d.fileno() != -1:
_data_fs_d.close()
if not recv_err:
self.config.vfs.chmod(_file, self.config.file_default_perms)
if cmd == "STOR":
self.config.vfs.chown(
_file, uid=self._uid, gid=self.config.get_gid(self._uid)
)
self.config.vfs.settimes(
_file, accessed=datetime.now(), modified=datetime.now()
)
self.respond(b"226 Transfer complete.")
def push_data(self, data):
"""Handy utility to push some data using the data channel"""
# ensure data is encoded in bytes
data = data.encode("utf8") if not isinstance(data, bytes) else data
self._data_channel_output_q.put({"type": "raw_data", "data": data})
def send_file(self, file_name):
"""Handy utility to send a file using the data channel"""
if self._data_channel:
self.respond("125 Data connection already open. Transfer starting.")
else:
self.respond("150 File status okay. About to open data connection.")
self._data_channel_output_q.put({"type": "file", "file": file_name})
self.start_data_channel()
# -- FTP Authentication and other unities --------
# FIXME: Refactor this. Move this to the auth module.
def authentication_ok(self, user_pass):
"""
Verifies authentication and sets the username of the currently connected client. Returns True or False
Checks user names and passwords pairs. Sets the current user and uid.
"""
# if anonymous ftp is enabled - accept any password.
try:
if self.username == "anonymous" and self.config.anon_auth:
self.authenticated = True
self._uid = self.config.anon_uid
self.username = self.config.user_db[self._uid]["uname"]
return True
else:
if (self.username, user_pass) in self.config.user_pass:
# user/pass match and correct!
self.authenticated = True
self._uid = self.config.get_uid(self.username)
return True
return False
except (KeyError, ValueError):
return False
# -- Actual FTP Handler -----------
def handle(self):
"""Actual FTP service to which the user has connected."""
while not self.disconnect_client:
try:
# These greenlets would be running forever. During the connection.
# first two are for duplex command channel. Final one is for storing files on file-system.
self.ftp_greenlets = [
gevent.spawn(self.handle_cmd_channel),
gevent.spawn(self.process_ftp_command),
gevent.spawn(self.handle_data_channel),
]
gevent.joinall(self.ftp_greenlets)
# Block till all jobs are not finished
except KeyboardInterrupt:
logger.info("Shutting FTP server.")
finally:
gevent.killall(self.ftp_greenlets)
| 30,617 | Python | .py | 631 | 33.633914 | 120 | 0.527284 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,477 | ftp_handler.py | mushorg_conpot/conpot/protocols/ftp/ftp_handler.py | # This module is based on the original work done by Giampaolo Rodola and pyftpdlib authors.
# This is a heavily customized version that supports Conpot's virtual file system os.* wrappers and gevent support.
from conpot.protocols.ftp.ftp_base_handler import FTPHandlerBase
import logging
import fs
import os
import glob
import sys
import tempfile
from datetime import datetime
import gevent
from gevent import socket
from conpot.core.filesystem import FilesystemError, FSOperationNotPermitted
from conpot.protocols.ftp.ftp_utils import FTPPrivilegeException, get_data_from_iter
logger = logging.getLogger(__name__)
# -----------------------------------------------------
# *Implementation Note*: Regarding Permissions:
# -----------------------------------------------------
# To change a directory as current directory we need permissions : rwx (CWD)| Also it has to be a directory. |
# To read a file we need : r permissions - It has to be a file | list files (LIST, NLST, STAT, SIZE, MDTM
# RETR commands)
# To store a file to the server we need 'w' permissions - (STOR, STOU commands)
# To rename file or directory we need 'w' permissions (RNFR, RNTO)
# To delete file or directory we need 'w' permissions (DELE, RMD commands)
# To append data to an existing file (APPE command) we need 'w' permissions
# -----------------------------------------------------
class FTPCommandChannel(FTPHandlerBase):
"""
FTP Command Responder. Implementation of RFC 959.
"""
# -----------------------------------------------------------------------
# There are some commands that do not require any kind of auth and permissions to run.
# These also do not require us to have an established data channel. So let us get rid of those first.
# Btw: These are - USER, PASS, HELP, NOOP, SYST, QUIT, SITE HELP, PWD, TYPE
# All commands assume that the path supplied have been cleaned and sanitized.
# The USER command is used to verify users as they try to login.
def do_USER(self, arg):
"""
USER FTP command. If the user is already logged in, return 530 else 331 for the PASS command
:param arg: username specified by the client/attacker
"""
# first we need to check if the user is authenticated?
if self.authenticated:
self.respond(b"530 Cannot switch to another user.")
else:
self.username = arg
self.respond(b"331 Now specify the Password.")
def do_PASS(self, arg):
if self.authenticated:
self.respond(b"503 User already authenticated.")
if not self.username:
self.respond(b"503 Login with USER first.")
if self.authentication_ok(user_pass=arg):
if not self.config.motd:
self.respond(b"230 Log in Successful.")
else:
_msg = "220-{}\r\n".format(self.config.motd)
self.respond(_msg)
self.respond(b"220 ")
else:
self.invalid_login_attempt += 1
self.respond(b"530 Authentication Failed.")
def do_HELP(self, arg):
"""Return help text to the client."""
if arg:
line = arg.upper()
if line in self.config.COMMANDS:
self.respond(b"214 %a" % self.config.COMMANDS[line]["help"])
else:
self.respond(b"501 Unrecognized command.")
else:
# provide a compact list of recognized commands
def formatted_help():
cmds = []
keys = sorted(
[
x
for x in self.config.COMMANDS.keys()
if not x.startswith("SITE ")
]
)
while keys:
elems = tuple((keys[0:8]))
cmds.append(b" %-6a" * len(elems) % elems + b"\r\n")
del keys[0:8]
return b"".join(cmds)
_buffer = b"214-The following commands are recognized:\r\n"
_buffer += formatted_help()
self.respond(_buffer + b"214 Help command successful.")
def do_NOOP(self, arg):
"""Do nothing. No params required. No auth required and no permissions required."""
self.respond(b"200 I successfully done nothin'.")
def do_SYST(self, arg):
"""Return system type (always returns UNIX type: L8)."""
# This command is used to find out the type of operating system
# at the server. The reply shall have as its first word one of
# the system names listed in RFC-943.
# Since that we always return a "/bin/ls -lA"-like output on
# LIST we prefer to respond as if we would on Unix in any case.
self.respond(b"215 UNIX Type: L8")
def do_QUIT(self, arg):
self.respond(b"221 Bye.")
self.session.add_event({"type": "CONNECTION_TERMINATED"})
self.disconnect_client = True
def do_SITE_HELP(self, line):
"""Return help text to the client for a given SITE command."""
if line:
line = line.upper()
if line in self.config.COMMANDS:
self.respond(b"214 %a" % self.config.COMMANDS[line]["help"])
else:
self.respond(b"501 Unrecognized SITE command.")
else:
_buffer = b"214-The following SITE commands are recognized:\r\n"
site_cmds = []
for cmd in sorted(self.config.COMMANDS.keys()):
if cmd.startswith("SITE "):
site_cmds.append(b" %a\r\n" % cmd[5:])
_buffer_cmds = b"".join(site_cmds)
self.respond(_buffer + _buffer_cmds + b"214 Help SITE command successful.")
def do_MODE(self, line):
"""Set data transfer mode ("S" is the only one supported (noop))."""
mode = line.upper()
if mode == "S":
self.respond(b"200 Transfer mode set to: S")
elif mode in ("B", "C"):
self.respond(b"504 Unimplemented MODE type.")
else:
self.respond(b"501 Unrecognized MODE type.")
def do_PWD(self, arg):
"""Return the name of the current working directory to the client."""
pwd = self.working_dir
try:
assert isinstance(pwd, str), pwd
_pwd = '257 "{}" is the current directory.'.format(pwd)
self.respond(_pwd.encode())
except AssertionError:
logger.info("FTP CWD specified is not unicode. {}".format(pwd))
self.respond(b"FTP CWD not unicode.")
def do_TYPE(self, line):
"""Set current type data type to binary/ascii"""
data_type = line.upper().replace(" ", "")
if data_type in ("A", "L7"):
self.respond(b"200 Type set to: ASCII.")
self._current_type = "a"
elif data_type in ("I", "L8"):
self.respond(b"200 Type set to: Binary.")
self._current_type = "i"
else:
self.respond(b'504 Unsupported type "%a".' % line)
def do_STRU(self, line):
"""Set file structure ("F" is the only one supported (noop))."""
stru = line.upper()
if stru == "F":
self.respond(b"200 File transfer structure set to: F.")
elif stru in ("P", "R"):
self.respond(b"504 Unimplemented STRU type.")
else:
self.respond(b"501 Unrecognized STRU type.")
def do_ALLO(self, arg):
"""Allocate bytes for storage (noop)."""
# not necessary (always respond with 202)
self.respond(b"202 No storage allocation necessary.")
def do_REIN(self, arg):
"""Reinitialize user's current session."""
self.stop_data_channel()
self.username = None
self._uid = None
self.authenticated = False
self.respond(b"230 Ready for new user.")
# -----------------------------------------------------------------------
# Next up we have commands that may require some kind of auth and permissions to run.
# These also do not require us to have an established data channel.
# These commands are -
# - {MDTM, SIZE, STAT, DELE, RNFR, RNTO} - require read permissions
# - {MKD, RMD} - require write permissions + path should be files
# - {CDUP, CWD and CHMOD} - require 'rwx' permissions + path should be folders
# Again all commands assume that the path supplied have been cleaned and sanitized.
def do_MDTM(self, path):
"""Return last modification time of file to the client as an ISO
3307 style timestamp (YYYYMMDDHHMMSS) as defined in RFC-3659.
On success return the file path, else None.
"""
try:
path = self.ftp_path(path)
if not self.config.vfs.isfile(path):
_msg = "550 {} is not retrievable".format(path)
self.respond(_msg.encode())
return
with self.config.vfs.check_access(path=path, user=self._uid, perms="r"):
m_time = "213 {}".format(
self.config.vfs.getmtime(path).strftime("%Y%m%d%H%M%S")
)
self.respond(m_time.encode())
except FSOperationNotPermitted:
self.respond(b"500 Operation not permitted.")
except (ValueError, fs.errors.FSError, FilesystemError, FTPPrivilegeException):
# It could happen if file's last modification time
# happens to be too old (prior to year 1900)
self.respond(b"550 Can't determine file's last modification time.")
def do_SIZE(self, path):
"""Return size of file in a format suitable for using with RESTart as defined in RFC-3659."""
try:
path = self.ftp_path(path)
if self._current_type == "a":
self.respond(b"550 SIZE not allowed in ASCII mode.")
return
# If the file is a sym-link i.e. not readable, send not retrievable
if not self.config.vfs.isfile(path):
self.respond(b"550 is not retrievable.")
return
else:
with self.config.vfs.check_access(path=path, user=self._uid, perms="r"):
size = self.config.vfs.getsize(path)
self.respond(b"213 %a" % size)
except FSOperationNotPermitted:
self.respond(b"500 Operation not permitted.")
except (OSError, fs.errors.FSError) as err:
self.respond(b"550 %a." % self._log_err(err))
def do_STAT(self, path):
"""If invoked without parameters, returns general status information about the FTP server process.
If a parameter is given, acts like the LIST command, except that data is sent over the command
channel (no PORT or PASV command is required).
"""
# return STATus information about ftp data connection
if not path:
s = list()
s.append("Connected to: {}:{}".format(self.host, self.port))
if self.authenticated:
s.append("Logged in as: {}".format(self.username))
else:
if not self.username:
s.append("Waiting for username.")
else:
s.append("Waiting for password.")
if self._current_type == "a":
_type = "ASCII"
else:
_type = "Binary"
s.append("TYPE: {}; STRUcture: File; MODE: Stream".format(_type))
if self._data_sock is not None and self._data_channel is False:
s.append("Passive data channel waiting for connection.")
elif self._data_channel is True:
bytes_sent = (
self.metrics.data_channel_bytes_send
+ self.metrics.command_chanel_bytes_send
)
bytes_recv = (
self.metrics.command_chanel_bytes_recv
+ self.metrics.data_channel_bytes_recv
)
elapsed_time = self.metrics.get_elapsed_time()
s.append("Data connection open:")
s.append("Total bytes sent: {}".format(bytes_sent))
s.append("Total bytes received: {}".format(bytes_recv))
s.append("Transfer elapsed time: {} secs".format(elapsed_time))
else:
s.append("Data connection closed.")
self.respond("211-FTP server status:\r\n")
self.respond("".join([" {}\r\n".format(item) for item in s]))
self.respond("211 End of status.")
# return directory LISTing over the command channel
else:
try:
line = self.ftp_path(path)
with self.config.vfs.check_access(path=line, user=self._uid, perms="r"):
if self.config.vfs.isdir(path):
listing = self.config.vfs.listdir(path)
# RFC 959 recommends the listing to be sorted.
listing.sort()
iterator = self.config.vfs.format_list(path, listing)
else:
basedir, filename = os.path.split(path)
self.config.stat(path)
iterator = self.config.vfs.format_list(basedir, [filename])
_status = '213-Status of "{}":\r\n'.format(line)
_status += get_data_from_iter(iterator)
_status += "213 End of status."
self.respond(_status.encode())
except FSOperationNotPermitted:
self.respond(b"500 Operation not permitted.")
except (
OSError,
FilesystemError,
AssertionError,
fs.errors.FSError,
FTPPrivilegeException,
):
self.respond(b"550 STAT command failed.")
def do_MKD(self, path):
"""
Create the specified directory. On success return the directory path, else None.
"""
try:
# In order to create a directory the current user must have 'w' permissions for the parent directory
# of current path.
_dir = self.ftp_path(path)
with self.config.vfs.check_access(
path=self.working_dir, user=self._uid, perms="w"
):
self.config.vfs.makedir(_dir)
_mkd = '257 "{}" directory created.'.format(_dir)
self.respond(_mkd)
self.config.vfs.chmod(_dir, self.config.dir_default_perms)
self.config.vfs.chown(
_dir, uid=self._uid, gid=self.config.get_gid(self._uid)
)
self.config.vfs.settimes(_dir, datetime.now(), datetime.now())
except FSOperationNotPermitted:
self.respond(b"500 Operation not permitted.")
except (FilesystemError, fs.errors.FSError, FTPPrivilegeException):
self.respond(b"550 Create directory operation failed.")
def do_RMD(self, path):
"""Remove the specified directory. On success return the directory path, else None."""
if self.ftp_path(path) == self.working_dir or path == "/":
self.respond(b"550 Can't remove root directory.")
return
try:
_path = self.ftp_path(self.working_dir + path)
# In order to create a directory the current user must have 'w' permissions for the current directory
with self.config.vfs.check_access(
path=self.ftp_path(os.path.join(path, "../")), user=self._uid, perms="w"
):
self.config.vfs.removedir(_path)
self.respond(b"250 Directory removed.")
except FSOperationNotPermitted:
self.respond(b"500 Operation not permitted.")
except (fs.errors.FSError, FilesystemError, FTPPrivilegeException):
self.respond(b"550 Remove directory operation failed.")
def do_CWD(self, path):
"""Change the current working directory."""
# Temporarily join the specified directory to see if we have permissions to do so, then get back to original
# process's current working directory.
try:
init_cwd = self.working_dir
if not self.config.vfs.isdir(path):
raise FSOperationNotPermitted
# make sure the current user has permissions to the new dir. To change the directory, user needs to have
# executable permissions for the directory
with self.config.vfs.check_access(path=path, user=self._uid, perms="rwx"):
self.working_dir = self.ftp_path(path)
logger.info(
"Changing current directory {} to {}".format(init_cwd, self.working_dir)
)
_cwd = '250 "{}" is the current directory.'.format(self.working_dir)
self.respond(_cwd.encode())
except FSOperationNotPermitted:
self.respond(b"500 Operation not permitted.")
except (fs.errors.FSError, FilesystemError, FTPPrivilegeException):
self.respond(b"550 Failed to change directory.")
def do_CDUP(self, arg):
"""
Change into the parent directory. On success return the new directory, else None.
"""
# Note: RFC-959 says that code 200 is required but it also says
# that CDUP uses the same codes as CWD.
return self.do_CWD(path="/".join([self.ftp_path(self.working_dir), "../"]))
def do_DELE(self, path):
"""Delete the specified file."""
try:
path = self.ftp_path(path)
if not self.config.vfs.isfile(path):
self.respond(b"550 Failed to delete file.")
else:
with self.config.vfs.check_access(path=path, user=self._uid, perms="w"):
self.config.vfs.remove(path)
self.respond(b"250 File removed.")
except FSOperationNotPermitted:
self.respond(b"500 Operation not permitted.")
except (fs.errors.FSError, FilesystemError, FTPPrivilegeException):
self.respond(b"550 Failed to delete file.")
def do_RNFR(self, path):
"""Rename the specified (only the source name is specified
here, see RNTO command)"""
try:
path = self.ftp_path(path)
if self.config.vfs.isfile(path) or self.config.vfs.isdir(path):
with self.config.vfs.check_access(path=path, user=self._uid, perms="w"):
assert isinstance(path, str)
if path == "/":
self.respond(b"550 Can't rename home directory.")
else:
self._rnfr = path
self.respond(b"350 Ready for destination name.")
else:
# file neither a file or a directory.
raise AssertionError
except FSOperationNotPermitted:
self.respond(b"500 Operation not permitted.")
except (
AssertionError,
KeyError,
fs.errors.FSError,
FilesystemError,
FTPPrivilegeException,
):
self.respond(b"550 No such file or directory.")
def do_RNTO(self, dst_path):
"""Rename file (destination name only, source is specified with RNFR)."""
try:
assert isinstance(dst_path, str)
if not self._rnfr:
self.respond(b"503 Bad sequence of commands: use RNFR first.")
return
src = self.ftp_path(self._rnfr)
self._rnfr = None
if self.config.vfs.isdir(src):
_move = self.config.vfs.movedir
elif self.config.vfs.isfile(src):
_move = self.config.vfs.move
else:
raise FilesystemError
with self.config.vfs.check_access(path=src, user=self._uid, perms="w"):
_path, _file = os.path.split(src)
_, _dst_file = os.path.split(dst_path)
# create new paths
_file = os.path.join(_path, _file)
_dst_file = os.path.join(_path, _dst_file)
if _file != _dst_file:
logger.info("Renaming file from {} to {}".format(_file, _dst_file))
_move(_file, _dst_file, overwrite=True)
self.respond(b"250 Renaming ok.")
except FSOperationNotPermitted:
self.respond(b"500 Operation not permitted.")
except (ValueError, fs.errors.FSError, FilesystemError, FTPPrivilegeException):
self.respond(b"550 File rename operation failed.")
def do_SITE_CHMOD(self, path, mode):
"""Change file mode. On success return a (file_path, mode) tuple."""
try:
# Note: although most UNIX servers implement it, SITE CHMOD is not
# defined in any official RFC.
path = self.ftp_path(path)
with self.config.vfs.check_access(path=path, user=self._uid, perms="rwx"):
assert len(mode) in (3, 4)
for x in mode:
assert 0 <= int(x) <= 7
mode = int(mode, 8)
# To do a chmod user needs to be the owner of the file.
self.config.vfs.chmod(path, mode)
self.respond(b"200 SITE CHMOD successful.")
except FSOperationNotPermitted:
self.respond(b"500 Operation not permitted.")
except (AssertionError, ValueError):
self.respond(b"501 Invalid SITE CHMOD format.")
except (fs.errors.FSError, FilesystemError, FTPPrivilegeException):
self.respond(b"550 SITE CHMOD command failed.")
# -----------------------------------------------------------------------
# Following up we have the PORT(active) and PASV(passive) commnads.
# These setup the data channel for data transfer.
def do_PASV(self, arg):
"""
Starts a Passive Data Channel using IPv4. We don't actually need to start the full duplex connection here.
Just need to figure the host ip and the port. The DTP connection would start in each command.
"""
if self._data_channel:
self.stop_data_channel(purge=True, reason="Switching from PASV mode.")
self.active_passive_mode = "PASV"
# We are in passive mode. Here we would create a simple socket listener.
self._data_listener_sock = gevent.socket.socket()
self._data_listener_sock.bind((self._local_ip, 0))
ip, port = self._data_listener_sock.getsockname()
self.respond(
"227 Entering Passive Mode (%s,%u,%u)."
% (",".join(ip.split(".")), port >> 8 & 0xFF, port & 0xFF)
)
try:
self._data_listener_sock.listen(1)
self._data_listener_sock.settimeout(
5
) # Timeout for ftp client to send info
logger.info(
"Client {} entering FTP passive mode".format(self.client_address)
)
(
self._data_sock,
(self.cli_ip, self.cli_port),
) = self._data_listener_sock.accept()
logger.info(
"Client {} provided ({}:{}) for PASV connection.".format(
self.client_address, self.cli_ip, self.cli_port
)
)
logger.info(
"FTP: starting data channel for client {}".format(self.client_address)
)
self._data_listener_sock.close()
except (socket.error, socket.timeout) as se:
logger.info(
"Can't switch to PASV mode. Error occurred: {}".format(str(se))
)
self.respond(b"550 PASV command failed.")
def do_PORT(self, arg):
"""
Starts an active data channel by using IPv4. We don't actually need to start the full duplex connection here.
Just need to figure the host ip and the port. The DTP connection would start in each command.
"""
if self._data_channel:
self.stop_data_channel(purge=True, reason="Switching from PORT mode.")
self.active_passive_mode = "PORT"
try:
addr = list(map(int, arg.split(",")))
if len(addr) != 6:
raise ValueError
for x in addr[:4]:
if not 0 <= x <= 255:
raise ValueError
ip = "%d.%d.%d.%d" % tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
if not 0 <= port <= 65535:
raise ValueError
self.cli_ip, self.cli_port = ip, port
self._data_sock = gevent.socket.socket()
self._data_sock.connect((self.cli_ip, self.cli_port))
logger.info("Client {} entered FTP active mode".format(self.client_address))
logger.info(
"Client {} provided {}:{} for PORT connection.".format(
self.client_address, self.cli_ip, self.cli_port
)
)
self.respond(b"200 PORT Command Successful. Consider using PASV.")
logger.info(
"FTP: configured data channel for client {}".format(self.client_address)
)
except (ValueError, OverflowError):
self.respond("501 Invalid PORT format.")
except socket.error as se:
if self._data_channel:
self.stop_data_channel(
reason="Can't switch to Active(PORT) mode. Error occurred: {}".format(
str(se)
)
)
# -- Data Channel related commands --
def do_LIST(self, path):
try:
_path = self.ftp_path(path)
with self.config.vfs.check_access(path=_path, user=self._uid, perms="r"):
listing = self.config.vfs.listdir(_path)
if isinstance(listing, list):
# RFC 959 recommends the listing to be sorted.
listing.sort()
iterator = self.config.vfs.format_list(_path, listing)
self.respond("150 Here comes the directory listing.")
_list_data = get_data_from_iter(iterator)
# Push data to the data channel
self.push_data(_list_data.encode())
# start the command channel
self.start_data_channel()
self.respond(b"226 Directory send OK.")
except FSOperationNotPermitted:
self.respond(b"500 Operation not permitted.")
except (
OSError,
fs.errors.FSError,
FilesystemError,
FTPPrivilegeException,
) as err:
self._log_err(err)
self.respond(b"550 LIST command failed.")
def do_NLST(self, path):
"""Return a list of files in the specified directory in a compact form to the client."""
try:
_path = self.ftp_path(path)
with self.config.vfs.check_access(path=_path, user=self._uid, perms="r"):
listing = self.config.vfs.listdir(_path)
data = ""
if listing:
listing.sort()
data = "\r\n".join(listing) + "\r\n"
self.respond(b"150 Here comes the directory listing.")
# Push data to the data channel
self.push_data(data=data)
# start the command channel
self.start_data_channel()
self.respond(b"226 Directory send OK.")
except FSOperationNotPermitted:
self.respond(b"500 Operation not permitted.")
except (
OSError,
fs.errors.FSError,
FilesystemError,
FTPPrivilegeException,
) as err:
self._log_err(err)
self.respond(b"550 NLST command failed.")
def do_RETR(self, arg):
"""
Fetch and send a file.
:param arg: Filename that is to be retrieved
"""
try:
filename = self.ftp_path(arg)
with self.config.vfs.check_access(path=filename, user=self._uid, perms="r"):
if self.config.vfs.isfile(filename):
self.send_file(file_name=filename)
else:
raise FilesystemError("cmd: RETR. Path requested {} is not a file.")
except FSOperationNotPermitted:
self.respond(b"500 Operation not permitted.")
except (
OSError,
fs.errors.FSError,
FilesystemError,
FTPPrivilegeException,
) as err:
self._log_err(err)
self.respond(b"550 The system cannot find the file specified.")
def do_ABOR(self, arg):
"""Aborts a file transfer currently in progress."""
if self.active_passive_mode is None:
self.respond(b"225 No transfer to abort.")
else:
# a PASV or PORT was received but connection wasn't made yet
if not self._data_channel:
self.stop_data_channel(abort=True, purge=True, reason="ABOR called.")
self.respond(b"225 ABOR command successful; data channel closed.")
else:
self.stop_data_channel(abort=True, purge=True, reason="ABOR called.")
self.respond(b"426 Transfer aborted via ABOR.")
self.respond(b"226 ABOR command successful.")
def do_STOR(self, file, mode="w"):
"""Store a file (transfer from the client to the server)."""
# A resume could occur in case of APPE or REST commands.
# In that case we have to open file object in different ways:
# STOR: mode = 'w'
# APPE: mode = 'a'
# REST: mode = 'r+' (to permit seeking on file object)
if "a" in mode:
cmd = "APPE"
else:
cmd = "STOR"
try:
with self.config.vfs.check_access(
path=self.working_dir, user=self._uid, perms="w"
):
rest_pos = self._restart_position
self._restart_position = 0
if rest_pos:
if rest_pos > self.config.vfs.getsize(file):
raise ValueError("Can't seek file more than its size.")
# rest_pos != 0 and not None. Must be REST cmd
cmd = "REST"
else:
rest_pos = 0
if cmd == "APPE":
_file_seek = self.config.vfs.getsize(file)
elif cmd == "REST":
_file_seek = rest_pos
else:
assert cmd == "STOR"
_file_seek = 0
self.recv_file(
os.path.join(self.working_dir, file), _file_seek, cmd=cmd
)
except FSOperationNotPermitted:
self.respond(b"500 Operation not permitted.")
except ValueError as err:
self._log_err(err)
self.respond(
b"550 STOR command failed. Can't seek file more than its size."
)
except (
OSError,
AssertionError,
fs.errors.FSError,
FilesystemError,
FTPPrivilegeException,
) as err:
self._log_err(err)
self.respond(b"550 STOR command failed. .")
def do_REST(self, line):
"""Restart a file transfer from a previous mark."""
if self._current_type == "a":
self.respond(b"501 Resuming transfers not allowed in ASCII mode.")
return
try:
marker = int(line)
if marker < 0:
raise ValueError
else:
self.respond("350 Restarting at position {}.".format(marker))
self._restart_position = marker
except (ValueError, OverflowError):
self.respond(b"501 Invalid parameter.")
def do_APPE(self, file):
"""Append data to an existing file on the server.
On success return the file path, else None.
"""
# watch for APPE preceded by REST, which makes no sense.
if self._restart_position:
self.respond(b"450 Can't APPE while REST request is pending.")
else:
return self.do_STOR(file, mode="a")
def do_STOU(self, line):
"""Store a file on the server with a unique name."""
try:
if self._restart_position:
self.respond(b"450 Can't STOU while REST request is pending.")
return
_, _file_name = os.path.split(tempfile.NamedTemporaryFile().name)
if line:
line = self.ftp_path(line)
basedir, _ = os.path.split(line)
_file_name = "." + _file_name
else:
basedir = self.working_dir
if self.config.stou_suffix:
_file_name = _file_name + self.config.stou_suffix
if self.config.stou_prefix:
_file_name = self.config.stou_prefix + _file_name
with self.config.vfs.check_access(path=basedir, user=self._uid, perms="w"):
self.respond(b"150 FILE: %a" % _file_name)
self.recv_file(os.path.join(basedir, _file_name), 0, cmd="STOR")
except FSOperationNotPermitted:
self.respond(b"500 Operation not permitted.")
# -----------------------------------------------------------------
# Depreciated/alias commands
# RFC-1123 requires that the server treat XCUP, XCWD, XMKD, XPWD and XRMD commands as synonyms for CDUP, CWD, MKD,
# LIST and RMD. Such commands are obsoleted but some ftp clients (e.g. Windows ftp.exe) still use them.
# Change to the parent directory. Synonym for CDUP. Deprecated.
do_XCUP = do_CDUP
# Change the current working directory. Synonym for CWD. Deprecated.
do_XCWD = do_CWD
# Create the specified directory. Synonym for MKD. Deprecated.
do_XMKD = do_MKD
# Return the current working directory. Synonym for PWD. Deprecated.
do_XPWD = do_PWD
# Remove the specified directory. Synonym for RMD. Deprecated.
do_XRMD = do_RMD
# Quit and end the current ftp session. Synonym for QUIT
do_BYE = do_QUIT
# -----------------------------------------------------------------
# Helper methods and Command Processors.
def _log_err(self, err):
"""
Log errors and send an unexpected response standard message to the client.
:param err: Exception object
:return: 500 msg to be sent to the client.
"""
logger.info(
"FTP error occurred. Client: {} error {}".format(
self.client_address, str(err)
)
)
# clean things, sanity checks and more
def _pre_process_cmd(self, line, cmd, arg):
kwargs = {}
if cmd == "SITE" and arg:
cmd = "SITE %s" % arg.split(" ")[0].upper()
arg = line[len(cmd) + 1 :]
logger.info(
"Received command {} : {} from FTP client {}: {}".format(
cmd, line, self.client_address, self.session.id
)
)
if cmd not in self.config.COMMANDS:
if cmd[-4:] in ("ABOR", "STAT", "QUIT"):
cmd = cmd[-4:]
else:
self.respond(b"500 Command %a not understood" % cmd)
return
# - checking for valid arguments
if not arg and self.config.COMMANDS[cmd]["arg"] is True:
self.respond(b"501 Syntax error: command needs an argument")
return
if arg and self.config.COMMANDS[cmd]["arg"] is False:
self.respond(b"501 Syntax error: command does not accept arguments.")
return
if not self.authenticated:
if self.config.COMMANDS[cmd]["auth"] or (cmd == "STAT" and arg):
self.respond(b"530 Log in with USER and PASS first.")
return
else:
# call the proper do_* method
self._process_command(cmd, arg)
return
else:
if (cmd == "STAT") and not arg:
self.do_STAT(path=None)
return
# for file-system related commands check whether real path
# destination is valid
if self.config.COMMANDS[cmd]["perm"] and (cmd != "STOU"):
if cmd in ("CWD", "XCWD"):
if arg and self.working_dir != "/":
arg = os.path.join(self.working_dir, arg)
else:
arg = arg or "/"
elif cmd in ("CDUP", "XCUP"):
arg = ""
elif cmd == "STAT":
if glob.has_magic(arg):
self.respond(b"550 Globbing not supported.")
return
arg = self.ftp_path(arg or self.working_dir)
elif cmd == "SITE CHMOD":
if " " not in arg:
self.respond(b"501 Syntax error: command needs two arguments.")
return
else:
mode, arg = arg.split(" ", 1)
arg = self.ftp_path(arg)
kwargs = dict(mode=mode)
else:
if cmd == "LIST":
if arg.lower() in ("-a", "-l", "-al", "-la"):
arg = self.working_dir
else:
arg = arg or self.working_dir
if glob.has_magic(arg):
self.respond(b"550 Globbing not supported.")
return
else:
arg = glob.escape(arg)
arg = arg or self.working_dir
arg = line.split(" ", 1)[1] if arg is None else arg
# call the proper do_* method
self._process_command(cmd, arg, **kwargs)
def _process_command(self, cmd, *args, **kwargs):
"""Process command by calling the corresponding do_* class method (e.g. for received command "MKD pathname",
do_MKD() method is called with "pathname" as the argument).
"""
if self.invalid_login_attempt >= self.max_login_attempts:
self.respond(b"421 Too many connections. Service temporarily unavailable.")
self.disconnect_client = True
self.session.add_event({"type": "CONNECTION_TERMINATED"})
else:
try:
method = getattr(self, "do_" + cmd.replace(" ", "_"))
self._last_command = cmd
method(*args, **kwargs)
except (fs.errors.FSError, FilesystemError):
raise
# - main command processor
def process_ftp_command(self):
"""
Handle an incoming handle request - pick and item from the input_q, reads the contents of the message and
dispatch contents to the appropriate do_* method.
:param: (bytes) line - incoming request
:return: (bytes) response - reply in respect to the request
"""
try:
if not self._command_channel_input_q.empty() and (
self.metrics.timeout() < self.config.timeout
):
# decoding should be done using utf-8
line = self._command_channel_input_q.get().decode()
# Remove any CR+LF if present
line = line[:-2] if line[-2:] == "\r\n" else line
if line:
cmd = line.split(" ")[0].upper()
arg = line[len(cmd) + 1 :]
try:
self._pre_process_cmd(line, cmd, arg)
except UnicodeEncodeError:
self.respond(
b"501 can't decode path (server filesystem encoding is %a)"
% sys.getfilesystemencoding()
)
except (fs.errors.PermissionDenied, FSOperationNotPermitted):
# TODO: log user as well.
logger.info(
"Client {} requested path: {} trying to access directory to which it has "
"no access to.".format(self.client_address, line)
)
self.respond(b"500 Permission denied")
except fs.errors.IllegalBackReference:
# Trying to access the directory which the current user has no access to
self.respond(
b"550 %a points to a path which is outside the user's root directory."
% line
)
except FTPPrivilegeException:
self.respond(b"550 Not enough privileges.")
except (fs.errors.FSError, FilesystemError) as fe:
logger.info(
"FTP client {} Unexpected error occurred : {}".format(
self.client_address, fe
)
)
# TODO: what to respond here? For now just terminate the session
self.disconnect_client = True
self.session.add_event({"type": "CONNECTION_TERMINATED"})
elif not (self.metrics.timeout() < self.config.timeout) and (
not self._data_channel
):
logger.info(
"FTP connection timeout, remote: {}. ({}). Disconnecting client".format(
self.client_address, self.session.id
)
)
self.session.add_event({"type": "CONNECTION_TIMEOUT"})
self.respond(b"421 Timeout.")
self.disconnect_client = True
else:
gevent.sleep(0)
except UnicodeDecodeError:
# RFC-2640 doesn't mention what to do in this case. So we'll just return 501
self.respond(b"501 can't decode command.")
| 42,562 | Python | .py | 899 | 33.995551 | 118 | 0.54509 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,478 | ftp_server.py | mushorg_conpot/conpot/protocols/ftp/ftp_server.py | # Copyright (C) 2018 Abhinav Saxena <xandfury@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
from os import R_OK, W_OK
from datetime import datetime
from lxml import etree
from gevent.server import StreamServer
from conpot.protocols.ftp.ftp_utils import ftp_commands, FTPException
from conpot.protocols.ftp.ftp_handler import FTPCommandChannel
from conpot.core.protocol_wrapper import conpot_protocol
import conpot.core as conpot_core
logger = logging.getLogger(__name__)
class FTPConfig(object):
def __init__(self, template):
self.user_db = dict() # user_db[uid] = (user_pass, user_group)
self.grp_db = (
dict()
) # grp_db[gid] = {group: 'group_name'. users: set(users_uid))
dom = etree.parse(template)
# First let us get FTP related data
self.all_commands = [
"USER",
"PASS",
"HELP",
"NOOP",
"QUIT",
"SITE HELP",
"SITE",
"SYST",
"TYPE",
"PASV",
"PORT",
"ALLO",
"MODE",
"SIZE",
"PWD",
"MKD",
"RMD",
"CWD",
"CDUP",
"MDTM",
"DELE",
"SITE CHMOD",
"RNFR",
"RNTO",
"STAT",
"LIST",
"NLST",
"RETR",
"REIN",
"ABOR",
"STOR",
"APPE",
"REST",
"STRU",
"STOU",
]
# Implementation Note: removing a command from here would make it unrecognizable in FTP server.
self.enabled_commands = (
"".join(
dom.xpath("//ftp/device_info/enabled_commands/text()")[0]
.strip()
.split()
)
).split(",")
self.enabled_commands = [i.replace("'", "") for i in self.enabled_commands]
if "SITEHELP" in self.enabled_commands:
self.enabled_commands.remove("SITEHELP")
self.enabled_commands.append("SITE HELP")
if "SITECHMOD" in self.enabled_commands:
self.enabled_commands.remove("SITECHMOD")
self.enabled_commands.append("SITE CHMOD")
for i in self.enabled_commands:
assert i in self.all_commands
self.device_type = dom.xpath("//ftp/device_info/device_type/text()")[0]
self.banner = dom.xpath("//ftp/device_info/banner/text()")[0]
self.max_login_attempts = int(
dom.xpath("//ftp/device_info/max_login_attempts/text()")[0]
)
# set the connection timeout to 300 secs.
self.timeout = int(dom.xpath("//ftp/device_info/sever_timeout/text()")[0])
if dom.xpath("//ftp/device_info/motd/text()"):
self.motd = dom.xpath("//ftp/device_info/motd/text()")[0]
else:
self.motd = None
self.stou_prefix = dom.xpath("//ftp/device_info/stou_prefix/text()")
self.stou_suffix = dom.xpath("//ftp/device_info/stou_suffix/text()")
# Restrict FTP to only enabled FTP commands
self.COMMANDS = {i: ftp_commands[i] for i in self.enabled_commands}
# -- Now that we fetched FTP meta, let us populate users.
grp = dom.xpath("//ftp/ftp_users/users")[0].attrib["group"]
for i in dom.xpath("//ftp/ftp_users/users/*"):
self.user_db[int(i.attrib["uid"])] = {
"uname": i.xpath("./uname/text()")[0],
"grp": grp,
"password": i.xpath("./password/text()")[0],
}
self.anon_auth = bool(
dom.xpath("//ftp/ftp_users/anon_login")[0].attrib["enabled"]
)
if self.anon_auth:
self.anon_uid = int(
dom.xpath("//ftp/ftp_users/anon_login")[0].attrib["uid"]
)
self.user_db[self.anon_uid] = {
"uname": dom.xpath("//ftp/ftp_users/anon_login/uname/text()")[0],
"grp": grp,
"password": "",
}
# As a last step, get VFS related data.
self.root_path = dom.xpath("//ftp/ftp_vfs/path/text()")[0]
self.data_fs_subdir = dom.xpath("//ftp/ftp_vfs/data_fs_subdir/text()")[0]
if len(dom.xpath("//ftp/ftp_vfs/add_src/text()")) == 0:
self.add_src = None
else:
self.add_src = dom.xpath("//ftp/ftp_vfs/add_src/text()")[0].lower()
# default ftp owners and groups
self.default_owner = int(dom.xpath("//ftp/ftp_vfs/default_owner/text()")[0])
self.default_group = int(grp.split(":")[0])
self.default_perms = oct(
int(dom.xpath("//ftp/ftp_vfs/default_perms/text()")[0], 8)
)
self.file_default_perms = oct(
int(dom.xpath("//ftp/ftp_vfs/upload_file_perms/text()")[0], 8)
)
self.dir_default_perms = oct(
int(dom.xpath("//ftp/ftp_vfs/upload_file_perms/text()")[0], 8)
)
self._custom_files = dom.xpath("//ftp/ftp_vfs/file")
self._custom_dirs = dom.xpath("//ftp/ftp_vfs/dir")
self._init_user_db() # Initialize User DB
self._init_fs() # Initialize FTP file system.
def _init_fs(self):
# Create/register all necessary users and groups in the file system
_ = {
conpot_core.get_vfs().register_user(uid=k, name=v["uname"])
for k, v in self.user_db.items()
}
_ = {
conpot_core.get_vfs().create_group(gid=k, name=v["group"])
for k, v in self.grp_db.items()
}
_ = {
conpot_core.get_vfs().add_users_to_group(gid=k, uids=list(v["users"]))
for k, v in self.grp_db.items()
}
# Initialize file system
self.vfs, self.data_fs = conpot_core.add_protocol(
protocol_name="ftp",
data_fs_subdir=self.data_fs_subdir,
vfs_dst_path=self.root_path,
src_path=self.add_src,
owner_uid=self.default_owner,
group_gid=self.default_group,
perms=self.default_perms,
)
if self.add_src:
logger.info(
"FTP Serving File System from {} at {} in vfs. FTP data_fs sub directory: {}".format(
self.add_src, self.root_path, self.data_fs._sub_dir
)
)
else:
logger.info(
"FTP Serving File System at {} in vfs. FTP data_fs sub directory: {}".format(
self.root_path, self.data_fs._sub_dir
)
)
logger.debug(
"FTP serving list of files : {}".format(", ".join(self.vfs.listdir(".")))
)
self.root = "/" # Setup root dir.
# check for permissions etc.
logger.debug("FTP root {} is a directory".format(self.vfs.getcwd() + self.root))
if self.vfs.access(self.root, 0, R_OK):
logger.debug(
"FTP root {} is readable".format(self.vfs.getcwd() + self.root)
)
else:
raise FTPException("FTP root must be readable")
if self.vfs.access(self.root, 0, W_OK):
logger.debug(
"FTP root {} is writable".format(self.vfs.getcwd() + self.root)
)
else:
logger.warning(
"FTP root {} is not writable".format(self.vfs.getcwd() + self.root)
)
# Finally apply permissions to specific files.
for _file in self._custom_files:
_path = _file.attrib["path"]
_path = _path.replace(self.root_path, self.root)
_owner = int(_file.xpath("./owner_uid/text()")[0])
_perms = oct(int(_file.xpath("./perms/text()")[0], 8))
_accessed = datetime.fromtimestamp(
float(_file.xpath("./last_accessed/text()")[0])
)
_modified = datetime.fromtimestamp(
float(_file.xpath("./last_modified/text()")[0])
)
self.vfs.chown(_path, _owner, self.default_group)
self.vfs.chmod(_path, _perms)
_fs = self.vfs.delegate_fs().delegate_fs()
_fs.settimes(self.vfs.delegate_path(_path)[1], _accessed, _modified)
for _dir in self._custom_dirs:
_path = _dir.attrib["path"]
_recursive = bool(_dir.attrib["recursive"])
_path = _path.replace(self.root_path, self.root)
_owner = int(_dir.xpath("./owner_uid/text()")[0])
_perms = oct(int(_dir.xpath("./perms/text()")[0], 8))
_accessed = datetime.fromtimestamp(
float(_dir.xpath("./last_accessed/text()")[0])
)
_modified = datetime.fromtimestamp(
float(_dir.xpath("./last_modified/text()")[0])
)
self.vfs.chown(_path, _owner, self.default_group, _recursive)
self.vfs.chmod(_path, _perms)
_fs = self.vfs.delegate_fs().delegate_fs()
_fs.settimes(self.vfs.delegate_path(_path)[1], _accessed, _modified)
# self.default_owner = 13
# self.default_group = 45
# self.vfs.chmod('/', self.default_perms, recursive=True)
# self.vfs.chown('/', uid=self.default_owner, gid=self.default_group, recursive=True)
# --------------------------------------------
# TODO: move this method to auth module.
def _init_user_db(self):
"""
We expect the following dict format to build for every user
self.user_db[10] = {
'uname': 'test_user',
'grp': '45:ftp',
'password': 'test'
}
:return:
"""
# TODO: Get users from the template.
self.user_db[13] = {"uname": "nobody", "grp": "45:ftp", "password": "nobody"}
# Toggle enable/disable anonymous user.
self.user_db[22] = {"uname": "anonymous", "grp": "45:ftp", "password": ""}
# Let us create groups from the populated users.
for i in self.user_db.keys():
grp = self.user_db[i].pop("grp")
_gid, _gname = grp.split(":")
_gid = int(_gid)
if _gid not in self.grp_db.keys():
# It is a new group. Let us create/register this.
self.grp_db[_gid] = {"group": _gname, "users": set()}
self.grp_db[_gid]["users"].add(i)
# create a simple set of user and pass combinations for easy auth
self.user_pass = set(
zip(
[v["uname"] for v in self.user_db.values()],
[v["password"] for v in self.user_db.values()],
)
)
# TODO: move this method to auth module.
def get_uid(self, user_name):
"""Get uid from a username"""
[_uid] = [k for k, v in self.user_db.items() if user_name in v.values()]
return _uid
# TODO: move this method to auth module.
def get_gid(self, uid):
"""Get group id of a user from it's uid"""
[_gid] = [k for k, v in self.grp_db.items() if uid in v["users"]]
return _gid
@conpot_protocol
class FTPServer(object):
def __init__(self, template, template_directory, args):
self.template = template
self.server = None # Initialize later
# Initialize vfs here..
self.handler = FTPCommandChannel
self.handler.config = FTPConfig(self.template)
def start(self, host, port):
self.handler.host, self.handler.port = host, port
connection = (self.handler.host, self.handler.port)
self.server = StreamServer(connection, self.handler.stream_server_handle)
logger.info("FTP server started on: {}".format(connection))
self.server.serve_forever()
def stop(self):
logger.debug("Stopping FTP server")
self.server.stop()
del self.handler
| 12,596 | Python | .py | 298 | 31.674497 | 103 | 0.547846 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,479 | ftp_utils.py | mushorg_conpot/conpot/protocols/ftp/ftp_utils.py | # Copyright (C) 2018 Abhinav Saxena <xandfury@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
class FTPException(Exception):
"""General FTP related exceptions."""
pass
class FTPMaxLoginAttemptsExceeded(FTPException):
pass
class FTPPrivilegeException(FTPException):
pass
# all commands:
ftp_commands = {
"ABOR": dict(
auth=True, perm=None, arg=False, help="Syntax: ABOR (abort transfer)."
),
"ALLO": dict(
perm=None,
auth=True,
arg=True,
help="Syntax: ALLO <SP> bytes (noop; allocate storage).",
),
"APPE": dict(
perm="a",
auth=True,
arg=True,
help="Syntax: APPE <SP> file-name (append data to file).",
),
"CDUP": dict(
perm="e", auth=True, arg=False, help="Syntax: CDUP (go to parent directory)."
),
"CWD": dict(
perm="e",
auth=True,
arg=None,
help="Syntax: CWD [<SP> dir-name] (change working directory).",
),
"DELE": dict(
perm="d", auth=True, arg=True, help="Syntax: DELE <SP> file-name (delete file)."
),
"HELP": dict(
perm=None, auth=False, arg=None, help="Syntax: HELP [<SP> cmd] (show help)."
),
"LIST": dict(
perm="l", auth=True, arg=None, help="Syntax: LIST [<SP> path] (list files)."
),
"MDTM": dict(
perm="l",
auth=True,
arg=True,
help="Syntax: MDTM [<SP> path] (file last modification time).",
),
"MODE": dict(
perm=None,
auth=True,
arg=True,
help="Syntax: MODE <SP> mode (noop; set data transfer mode).",
),
"MKD": dict(
perm="m", auth=True, arg=True, help="Syntax: MKD <SP> path (create directory)."
),
"NLST": dict(
perm="l",
auth=True,
arg=None,
help="Syntax: NLST [<SP> path] (list path in a compact form).",
),
"NOOP": dict(
perm=None, auth=False, arg=False, help="Syntax: NOOP (just do nothing)."
),
"PASS": dict(
perm=None,
auth=False,
arg=None,
help="Syntax: PASS [<SP> password] (set user password).",
),
"PASV": dict(
perm=None,
auth=True,
arg=False,
help="Syntax: PASV (open passive data connection).",
),
"PORT": dict(
perm=None,
auth=True,
arg=True,
help="Syntax: PORT <sp> h,h,h,h,p,p (open active data connection).",
),
"PWD": dict(
perm=None,
auth=True,
arg=False,
help="Syntax: PWD (get current working directory).",
),
"QUIT": dict(
perm=None, auth=False, arg=False, help="Syntax: QUIT (quit current session)."
),
"REIN": dict(perm=None, auth=True, arg=False, help="Syntax: REIN (flush account)."),
"RETR": dict(
perm="r",
auth=True,
arg=True,
help="Syntax: RETR <SP> file-name (retrieve a file).",
),
"RMD": dict(
perm="d",
auth=True,
arg=True,
help="Syntax: RMD <SP> dir-name (remove directory).",
),
"RNFR": dict(
perm="f",
auth=True,
arg=True,
help="Syntax: RNFR <SP> file-name (rename (source name)).",
),
"REST": dict(
perm=None,
auth=True,
arg=True,
help="Syntax: REST <SP> offset (set file offset).",
),
"RNTO": dict(
perm="f",
auth=True,
arg=True,
help="Syntax: RNTO <SP> file-name (rename (destination name)).",
),
"SITE": dict(
perm=None,
auth=False,
arg=True,
help="Syntax: SITE <SP> site-command (execute SITE command).",
),
"SITE HELP": dict(
perm=None,
auth=False,
arg=None,
help="Syntax: SITE HELP [<SP> cmd] (show SITE command help).",
),
"SITE CHMOD": dict(
perm="M",
auth=True,
arg=True,
help="Syntax: SITE CHMOD <SP> mode path (change file mode).",
),
"SIZE": dict(
perm="l",
auth=True,
arg=True,
help="Syntax: SIZE <SP> file-name (get file size).",
),
"STAT": dict(
perm="l",
auth=False,
arg=None,
help="Syntax: STAT [<SP> path name] (server stats [list files]).",
),
"STOR": dict(
perm="w",
auth=True,
arg=True,
help="Syntax: STOR <SP> file-name (store a file).",
),
"STOU": dict(
perm="w",
auth=True,
arg=None,
help="Syntax: STOU [<SP> name] (store a file with a unique name).",
),
"STRU": dict(
perm=None,
auth=True,
arg=True,
help="Syntax: STRU <SP> type (noop; set file structure).",
),
"SYST": dict(
perm=None,
auth=False,
arg=False,
help="Syntax: SYST (get operating system type).",
),
"TYPE": dict(
perm=None,
auth=True,
arg=True,
help="Syntax: TYPE <SP> [A | I] (set transfer type).",
),
"USER": dict(
perm=None,
auth=False,
arg=True,
help="Syntax: USER <SP> user-name (set username).",
),
"XCUP": dict(
perm="e",
auth=True,
arg=False,
help="Syntax: XCUP (obsolete; go to parent directory).",
),
"XCWD": dict(
perm="e",
auth=True,
arg=None,
help="Syntax: XCWD [<SP> dir-name] (obsolete; change directory).",
),
"XMKD": dict(
perm="m",
auth=True,
arg=True,
help="Syntax: XMKD <SP> dir-name (obsolete; create directory).",
),
"XPWD": dict(
perm=None,
auth=True,
arg=False,
help="Syntax: XPWD (obsolete; get current dir).",
),
"XRMD": dict(
perm="d",
auth=True,
arg=True,
help="Syntax: XRMD <SP> dir-name (obsolete; remove directory).",
),
}
def get_data_from_iter(iterator):
"""This utility function generates data from iterators and returns them as string"""
buffer = ""
try:
while True:
buffer += str(next(iterator))
except StopIteration:
return buffer
| 6,831 | Python | .py | 245 | 20.991837 | 88 | 0.554221 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,480 | i_frames_check.py | mushorg_conpot/conpot/protocols/IEC104/i_frames_check.py | # Copyright (C) 2017 Patrick Reichenberger (University of Passau) <patrick.reichenberger@t-online.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from conpot.protocols.IEC104.errors import InvalidFieldValueException
# direction either "m" for Monitor or "c" for control. Empty if irrelevant
# identical for some process information in monitor direction
def check_information_without_time(frame, direction):
type_id = int(frame.getfieldval("TypeID"), 16)
if frame.getfieldval("COT") in (2, 3, 5, 11, 12, 20):
pass
else:
raise InvalidFieldValueException(
"Invalid COT for ASDU type " + str(type_id) + "."
)
# identical for some process information in monitor direction
def check_information_with_time(frame, direction):
type_id = int(frame.getfieldval("TypeID"), 16)
if frame.getfieldval("SQ") == 0:
if frame.getfieldval("COT") in (3, 5, 11, 12):
pass
else:
raise InvalidFieldValueException(
"Invalid COT for ASDU type " + str(type_id) + "."
)
else:
raise InvalidFieldValueException(
"SQ=1 not supported for ASDU type " + str(type_id) + "."
)
def check_asdu_1(frame, direction):
check_information_without_time(frame, direction)
def check_asdu_2(frame, direction):
check_information_with_time(frame, direction)
def check_asdu_3(frame, direction):
check_information_without_time(frame, direction)
def check_asdu_4(frame, direction):
check_information_with_time(frame, direction)
def check_asdu_11(frame, direction):
check_information_without_time(frame, direction)
def check_asdu_12(frame, direction):
type_id = int(frame.getfieldval("TypeID"), 16)
if frame.getfieldval("SQ") == 0:
if frame.getfieldval("COT") in (3, 5):
pass
else:
raise InvalidFieldValueException(
"Invalid COT for ASDU type " + str(type_id) + "."
)
else:
raise InvalidFieldValueException(
"SQ=1 not supported for ASDU type " + str(type_id) + "."
)
def check_asdu_13(frame, direction):
check_information_without_time(frame, direction)
def check_asdu_14(frame, direction):
type_id = int(frame.getfieldval("TypeID"), 16)
if frame.getfieldval("SQ") == 0:
if frame.getfieldval("COT") in (2, 3, 5, 11, 12, 20):
pass
else:
raise InvalidFieldValueException(
"Invalid COT for ASDU type " + str(type_id) + "."
)
else:
raise InvalidFieldValueException(
"SQ=1 not supported for ASDU type " + str(type_id) + "."
)
def check_asdu_30(frame, direction):
check_information_with_time(frame, direction)
def check_asdu_31(frame, direction):
check_information_with_time(frame, direction)
def check_asdu_35(frame, direction):
type_id = int(frame.getfieldval("TypeID"), 16)
if frame.getfieldval("SQ") == 0:
if frame.getfieldval("COT") in (3, 5):
pass
else:
raise InvalidFieldValueException(
"Invalid COT for ASDU type " + str(type_id) + "."
)
else:
raise InvalidFieldValueException(
"SQ=1 not supported for ASDU type " + str(type_id) + "."
)
def check_asdu_36(frame, direction):
type_id = int(frame.getfieldval("TypeID"), 16)
if frame.getfieldval("SQ") == 0:
if frame.getfieldval("COT") in (2, 3, 5, 11, 12, 20):
pass
else:
raise InvalidFieldValueException(
"Invalid COT for ASDU type " + str(type_id) + "."
)
else:
raise InvalidFieldValueException(
"SQ=1 not supported for ASDU type " + str(type_id) + "."
)
# identical for process information in control direction
def check_command(frame, direction):
type_id = int(frame.getfieldval("TypeID"))
if frame.getfieldval("SQ") != 0:
raise InvalidFieldValueException(
"SQ=1 not supported for ASDU type " + str(type_id) + "."
)
if frame.getfieldval("COT") in (6, 8) and direction == "c":
pass
elif frame.getfieldval("COT") in (7, 9, 10, 44, 45, 46, 47) and direction == "m":
pass
else:
raise InvalidFieldValueException(
"Invalid COT for ASDU type " + str(type_id) + "."
)
number_of_objects = frame.getfieldval("NoO")
if number_of_objects != 1:
raise InvalidFieldValueException(
"Only one object allowed for ASDU type " + str(type_id) + "."
)
def check_asdu_45(frame, direction):
check_command(frame, direction)
if direction == "c" and frame.getfieldval("LenAPDU") != 14:
raise InvalidFieldValueException("Illogical length")
def check_asdu_46(frame, direction):
check_command(frame, direction)
def check_asdu_47(frame, direction):
check_command(frame, direction)
def check_asdu_48(frame, direction):
check_command(frame, direction)
def check_asdu_49(frame, direction):
check_command(frame, direction)
def check_asdu_50(frame, direction):
check_command(frame, direction)
def check_asdu_51(frame, direction):
check_command(frame, direction)
def check_asdu_100(frame, direction):
type_id = int(frame.getfieldval("TypeID"))
number_of_objects = frame.getfieldval("NoO")
cause_of_transmission = frame.getfieldval("COT")
qualif_of_inro = frame.getfieldval("QOI")
if frame.getfieldval("SQ") != 0:
raise InvalidFieldValueException(
"SQ=1 not supported for ASDU type " + str(type_id) + "."
)
if number_of_objects != 1:
raise InvalidFieldValueException(
"Only one object allowed for ASDU type " + str(type_id) + "."
)
if cause_of_transmission in (6, 8) and direction == "c":
pass
elif cause_of_transmission in (7, 9, 10, 44, 45, 46, 47) and direction == "m":
pass
else:
raise InvalidFieldValueException(
"Invalid COT for ASDU type " + str(type_id) + "."
)
if frame.getfieldval("IOA") != 0:
raise InvalidFieldValueException(
"IOA not 0 for ASDU type " + str(type_id) + "."
)
if qualif_of_inro not in (
0,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
):
raise InvalidFieldValueException(
"Invalid QOI "
+ str(qualif_of_inro)
+ " for ASDU type "
+ str(type_id)
+ "."
)
| 7,362 | Python | .py | 197 | 30.116751 | 102 | 0.628776 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,481 | errors.py | mushorg_conpot/conpot/protocols/IEC104/errors.py | # Copyright (C) 2017 Patrick Reichenberger (University of Passau) <patrick.reichenberger@t-online.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
class InvalidFieldValueException(ValueError):
"""This error is raised if a field value is not allowed"""
def __init__(self, *args):
self.args = args
class FrameError(Exception):
"""This error is raised if the IEC104 frame is wrong or ain't a IEC104 packet at all"""
def __init__(self, *args):
self.args = args
class Timeout_t1(BaseException):
"""Base class for exceptions in this module."""
class Timeout_t1_2nd(BaseException):
"""Base class for exceptions in this module."""
class Timeout_t3(BaseException):
"""Base class for exceptions in this module."""
| 1,426 | Python | .py | 30 | 44.666667 | 102 | 0.747832 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,482 | frames.py | mushorg_conpot/conpot/protocols/IEC104/frames.py | # Copyright (C) 2017 Patrick Reichenberger (University of Passau) <patrick.reichenberger@t-online.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from scapy.all import *
from datetime import datetime
# Structure of control field formats
class i_frame(Packet):
name = "i_frame"
fields_desc = [
XByteField("Start", 0x68),
ByteField("LenAPDU", None),
LEShortField("SendSeq", 0x0),
LEShortField("RecvSeq", 0x0),
]
# Compute length
def post_build(self, p, pay):
if self.LenAPDU is None:
length = len(pay) + 4
p = p[:1] + struct.pack("!B", length) + p[2:]
return p + pay
class u_frame(Packet):
name = "u_frame"
fields_desc = [
XByteField("Start", 0x68),
ByteField("LenAPDU", 0x04),
XByteField("Type", 0x07),
X3BytesField("Default", 0x000000),
]
class s_frame(Packet):
name = "s_frame"
fields_desc = [
XByteField("Start", 0x68),
ByteField("LenAPDU", 0x04),
XByteField("Type", 0x01),
XByteField("Default", 0x00),
LEShortField("RecvSeq", 0x0),
] # 0001 is in packet 01 00 and that's true, bec 1 is LSB
TypeIdentification = {
# Single point information
"M_SP_NA_1": 1,
# Single point information with time tag
"M_SP_TA_1": 2,
# Double point information
"M_DP_NA_1": 3,
# Double point information with time tag
"M_DP_TA_1": 4,
# Step position information
# "M_ST_NA_1": 5,
# Step position information with time tag
# "M_ST_TA_1": 6,
# Bit string of 32 bit
# "M_BO_NA_1": 7,
# Bit string of 32 bit with time tag
# "M_BO_TA_1": 8,
# Measured value, normalized value
# "M_ME_NA_1": 9,
# Measured value, normalized value with time tag
# "M_ME_TA_1": 10,
# Measured value, scaled value
"M_ME_NB_1": 11,
# Measured value, scaled value with time tag
"M_ME_TB_1": 12,
# Measured value, short floating point value
"M_ME_NC_1": 13,
# Measured value, short floating point value with time tag
"M_ME_TC_1": 14,
# Integrated totals
# "M_IT_NA_1": 15,
# Integrated totals with time tag
# "M_IT_TA_1": 16,
# Event of protection equipment with time tag
# "M_EP_TA_1": 17,
# Packed start events of protection equipment with time tag
# "M_EP_TB_1": 18,
# Packed output circuit information of protection equipment with time tag
# "M_EP_TC_1": 19,
# Packed single-point information with status change detection
# "M_PS_NA_1": 20,
# Measured value, normalized value without quality descriptor
# "M_ME_ND_1": 21,
# Process telegrams with long time tag ( 7 octets ) :
# Single point information with time tag CP56Time2a
"M_SP_TB_1": 30,
# Double point information with time tag CP56Time2a
"M_DP_TB_1": 31,
# Step position information with time tag CP56Time2a
# "M_ST_TB_1": 32,
# Bit string of 32 bit with time tag CP56Time2a
# "M_BO_TB_1": 33,
# Measured value, normalized value with time tag CP56Time2a
# "M_ME_TD_1": 34,
# Measured value, scaled value with time tag CP56Time2a
"M_ME_TE_1": 35,
# Measured value, short floating point value with time tag CP56Time2a
"M_ME_TF_1": 36,
# Integrated totals with time tag CP56Time2a
# "M_IT_TB_1": 37,
# Event of protection equipment with time tag CP56Time2a
# "M_EP_TD_1": 38,
# Packed start events of protection equipment with time tag CP56time2a
# "M_EP_TE_1": 39,
# Packed output circuit information of protection equipment with time tag CP56Time2a
# "M_EP_TF_1": 40,
# Process information in control direction:
# Single command
"C_SC_NA_1": 45,
# Double command
"C_DC_NA_1": 46,
# Regulating step command
# "C_RC_NA_1": 47,
# Setpoint command, normalized value
# "C_SE_NA_1": 48,
# Setpoint command, scaled value
"C_SE_NB_1": 49,
# Setpoint command, short floating point value
"C_SE_NC_1": 50,
# Bit string 32 bit
# "C_BO_NA_1": 51,
# Command telegrams with long time tag ( 7 octets ):
# Single command with time tag CP56Time2a
# "C_SC_TA_1": 58,
# Double command with time tag CP56Time2a
# "C_DC_TA_1": 59,
# Regulating step command with time tag CP56Time2a
# "C_RC_TA_1": 60,
# Setpoint command, normalized value with time tag CP56Time2a
# "C_SE_TA_1": 61,
# Setpoint command, scaled value with time tag CP56Time2a
# "C_SE_TB_1": 62,
# Setpoint command, short floating point value with time tag CP56Time2a
# "C_SE_TC_1": 63,
# Bit string 32 bit with time tag CP56Time2a
# "C_BO_TA_1": 64,
# System information in monitoring direction :
# End of initialization
# "M_EI_NA_1": 70,
# System information in control direction :
# (General-) Interrogation command
"C_IC_NA_1": 100,
# Counter interrogation command
# "C_CI_NA_1": 101,
# Read command
# "C_RD_NA_1": 102,
# Clock synchronization command
# "C_CS_NA_1": 103,
# ( IEC 101 ) Test command
# "C_TS_NB_1": 104,
# Reset process command
# "C_RP_NC_1": 105,
# ( IEC 101 ) Delay acquisition command
# "C_CD_NA_1": 106,
# Test command with time tag CP56Time2a
# "C_TS_TA_1": 107,
# Parameter in control direction :
# Parameter of measured value, normalized value
# "P_ME_NA_1": 110,
# Parameter of measured value, scaled value
# "P_ME_NB_1": 111,
# Parameter of measured value, short floating point value
# "P_ME_NC_1": 112,
# Parameter activation
# "P_AC_NA_1": 113,
# File transfer:
# File ready
# "F_FR_NA_1": 120,
# Section ready
# "F_SR_NA_1": 121,
# Call directory, select file, call file, call section
# "F_SC_NA_1": 122,
# Last section, last segment
# "F_LS_NA_1": 123,
# Ack file, Ack section
# "F_AF_NA_1": 124,
# Segment
# "F_SG_NA_1": 125,
# Directory
# "F_DR_TA_1": 126,
# QueryLog - Request archive file
# "F_SC_NB_1": 127
}
class LESignedShortField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "<h")
class NormValueField(LESignedShortField):
def i2repr(self, pkt, x):
normalized = 2 * ((x + 2**15) / ((2**15 + 2**15.0) - 1)) - 1
return self.i2h(pkt, normalized)
class CP56Time(Packet):
name = "CP56Time"
fields_desc = [
ShortField("Ms", 0x0000),
ByteField("Min", 0x00),
ByteField("Hour", 0x0),
ByteField("Day", 0x01),
ByteField("Month", 0x01),
ByteField("Year", 0x5B),
]
class CP24Time(Packet):
name = "CP24Time"
fields_desc = [ShortField("Ms", 0x0000), ByteField("Min", 0x00)]
def extract_padding(self, p):
return "", p
class CP16Time(Packet):
name = "CP24Time"
fields_desc = [ShortField("Ms", 0x0000)]
def extract_padding(self, p):
return "", p
# Info Elements
class IOA(Packet):
name = "IOA"
fields_desc = [LEX3BytesField("IOA", 0x010000)]
class QOS(Packet):
# Quality of set-point command
name = "QOS"
fields_desc = [
XBitField("seq", 0, 1),
XBitField("QL", 0, 7),
] # seq used to be S/E, threw an error
class QDS(Packet):
# Quality descriptor
name = "QDS"
fields_desc = [
XBitField("IV", 0, 1),
XBitField("NT", 0, 1),
XBitField("SB", 0, 1),
XBitField("BL", 0, 1),
XBitField("Padding", 0, 3),
XBitField("OV", 0, 1),
]
def extract_padding(self, p):
return "", p
class QDP(Packet):
name = "QDP"
fields_desc = [
XBitField("IV", 0, 1),
XBitField("NT", 0, 1),
XBitField("SB", 0, 1),
XBitField("BL", 0, 1),
XBitField("EI", 0, 1),
XBitField("Padding", 0, 3),
]
def extract_padding(self, p):
return "", p
class SIQ(Packet):
name = "SIQ"
fields_desc = [
# XByteField("SIQ", 0x00)]
# Exacter representation for SIQ:
XBitField("IV", 0, 1),
XBitField("NT", 0, 1),
XBitField("SB", 0, 1),
XBitField("BL", 0, 1),
XBitField("Padding", 0, 3),
XBitField("SPI", 0, 1),
]
class BSI(Packet):
name = "BSI"
fields_desc = [LEIntField("BSI", 0)]
class DIQ(Packet):
name = "DIQ"
fields_desc = [
# XByteField("DIQ", 0x00)]
# Exacter representation for DIQ:
XBitField("IV", 0, 1),
XBitField("NT", 0, 1),
XBitField("SB", 0, 1),
XBitField("BL", 0, 1),
XBitField("Padding", 0, 2),
XBitField("DPI", 0, 2),
]
class VTI(Packet):
name = "VTI"
fields_desc = [XBitField("T", 0, 1), XBitField("Value", 0, 7)]
class NVA(Packet):
# Normalized value
name = "NVA"
fields_desc = [NormValueField("NVA", 0x5000)]
class SVA(Packet):
# Scaled value
name = "SVA"
fields_desc = [LESignedShortField("SVA", 0x50)]
class BCR(Packet):
# Binary Counter Reading
name = "BCR"
fields_desc = [
LESignedIntField("Value", 0x0),
XBitField("IV", 0, 1),
XBitField("CA", 0, 1),
XBitField("CY", 0, 1),
XBitField("SeqNr", 0, 5),
]
class SEP(Packet):
name = "SEP"
fields_desc = [
XBitField("IV", 0, 1),
XBitField("NT", 0, 1),
XBitField("SB", 0, 1),
XBitField("BL", 0, 1),
XBitField("EI", 0, 1),
XBitField("Padding", 0, 1),
XBitField("ES", 0, 2),
]
def extract_padding(self, p):
return "", p
class SPE(Packet):
name = "SPE"
fields_desc = [
XBitField("Padding", 0, 2),
XBitField("SRD", 0, 1),
XBitField("SIE", 0, 1),
XBitField("SL3", 0, 1),
# XBitField("SL2", 0, 1), Duplicate field, commented
XBitField("SL2", 0, 1),
XBitField("GS", 0, 1),
]
def extract_padding(self, p):
return "", p
class OCI(Packet):
name = "OCI"
fields_desc = [
XBitField("Padding", 0, 4),
XBitField("CL3", 0, 1),
XBitField("CL2", 0, 1),
XBitField("CL1", 0, 1),
XBitField("GC", 0, 1),
]
class SCD(Packet):
name = "SCD"
fields_desc = [
LEShortField("Status", 0x0), # LE?
LEShortField("StatChaDet", 0x0),
] # LE?
class FloatField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "<f")
# ASDU packets
class asdu_infobj_1(Packet):
name = "M_SP_NA_1"
fields_desc = [
IOA,
# SIQ]
PacketField("SIQ", SIQ(), SIQ),
]
class asdu_infobj_2(Packet):
name = "M_SP_TA_1"
fields_desc = [
IOA,
PacketField("SIQ", SIQ(), SIQ),
PacketField("CP24Time", CP24Time(), CP24Time),
]
class asdu_infobj_3(Packet):
name = "M_DP_NA_1"
fields_desc = [IOA, PacketField("DIQ", DIQ(), DIQ)]
class asdu_infobj_4(Packet):
name = "M_DP_TA_1"
fields_desc = [
IOA,
PacketField("DIQ", DIQ(), DIQ),
PacketField("CP24Time", CP24Time(), CP24Time),
]
class asdu_infobj_5(Packet):
name = "M_ST_NA_1"
fields_desc = [IOA, PacketField("VTI", VTI(), VTI), PacketField("QDS", QDS(), QDS)]
class asdu_infobj_6(Packet):
name = "M_ST_TA_1"
fields_desc = [
IOA,
PacketField("VTI", VTI(), VTI),
PacketField("QDS", QDS(), QDS),
PacketField("CP24Time", CP24Time(), CP24Time),
]
class asdu_infobj_7(Packet):
name = "M_BO_NA_1"
fields_desc = [IOA, BSI, PacketField("QDS", QDS(), QDS)]
class asdu_infobj_8(Packet):
name = "M_BO_TA_1"
fields_desc = [
IOA,
BSI,
PacketField("QDS", QDS(), QDS),
PacketField("CP24Time", CP24Time(), CP24Time),
]
class asdu_infobj_9(Packet):
name = "M_ME_NA_1"
fields_desc = [IOA, NVA, PacketField("QDS", QDS(), QDS)]
class asdu_infobj_10(Packet):
name = "M_ME_TA_1"
fields_desc = [
IOA,
NVA,
PacketField("QDS", QDS(), QDS),
PacketField("CP24Time", CP24Time(), CP24Time),
]
class asdu_infobj_11(Packet):
name = "M_ME_NB_1"
fields_desc = [IOA, SVA, PacketField("QDS", QDS(), QDS)]
class asdu_infobj_12(Packet):
name = "M_ME_TB_1"
fields_desc = [
IOA,
SVA,
PacketField("QDS", QDS(), QDS),
PacketField("CP24Time", CP24Time(), CP24Time),
]
class asdu_infobj_13(Packet):
name = "M_ME_NC_1"
fields_desc = [IOA, FloatField("FPNumber", 1), PacketField("QDS", QDS(), QDS)]
class asdu_infobj_14(Packet):
name = "M_ME_TC_1"
fields_desc = [
IOA,
FloatField("FPNumber", 0),
PacketField("QDS", QDS(), QDS),
PacketField("CP24Time", CP24Time(), CP24Time),
]
class asdu_infobj_15(Packet):
name = "M_IT_NA_1"
fields_desc = [IOA, PacketField("BCR", BCR(), BCR)]
class asdu_infobj_16(Packet):
name = "M_IT_TA_1"
fields_desc = [
IOA,
PacketField("BCR", BCR(), BCR),
PacketField("CP24Time", CP24Time(), CP24Time),
]
class asdu_infobj_17(Packet):
name = "M_EP_TA_1"
fields_desc = [
IOA,
PacketField("SEP", SEP(), SEP),
CP16Time, # elapsed time
PacketField("CP24Time", CP24Time(), CP24Time),
] # binary time
class asdu_infobj_18(Packet):
name = "M_EP_TB_1"
fields_desc = [
IOA,
PacketField("SPE", SPE(), SPE),
PacketField("QDP", QDP(), QDP),
CP16Time, # elapsed time
PacketField("CP24Time", CP24Time(), CP24Time),
] # binary time
class asdu_infobj_19(Packet):
name = "M_EP_TC_1"
fields_desc = [
IOA,
PacketField("OCI", OCI(), OCI),
PacketField("QDP", QDP(), QDP),
CP16Time, # relay duration time
PacketField("CP24Time", CP24Time(), CP24Time),
] # binary time
class asdu_infobj_20(Packet):
name = "M_PS_NA_1"
fields_desc = [IOA, PacketField("SCD", SCD(), SCD), PacketField("QDS", QDS(), QDS)]
class asdu_infobj_21(Packet):
name = "M_ME_ND_1"
fields_desc = [IOA, NVA]
class asdu_infobj_30(Packet):
name = "M_SP_TB_1"
fields_desc = [
IOA,
PacketField("SIQ", SIQ(), SIQ),
PacketField("CP56Time", CP56Time(), CP56Time),
]
class asdu_infobj_31(Packet):
name = "M_DP_TB_1"
fields_desc = [
IOA,
PacketField("DIQ", DIQ(), DIQ),
PacketField("CP56Time", CP56Time(), CP56Time),
]
class asdu_infobj_32(Packet):
name = "M_ST_TA_1"
fields_desc = [
IOA,
PacketField("VTI", VTI(), VTI),
PacketField("QDS", QDS(), QDS),
PacketField("CP56Time", CP56Time(), CP56Time),
]
class asdu_infobj_33(Packet):
name = "M_BO_TB_1"
fields_desc = [
IOA,
PacketField("BSI", BSI(), BSI),
PacketField("QDS", QDS(), QDS),
PacketField("CP56Time", CP56Time(), CP56Time),
]
class asdu_infobj_34(Packet):
name = "M_ME_TD_1"
fields_desc = [
IOA,
NVA,
PacketField("QDS", QDS(), QDS),
PacketField("CP56Time", CP56Time(), CP56Time),
]
class asdu_infobj_35(Packet):
name = "M_ME_TE_1"
fields_desc = [
IOA,
SVA,
PacketField("QDS", QDS(), QDS),
PacketField("CP56Time", CP56Time(), CP56Time),
]
class asdu_infobj_36(Packet):
name = "M_ME_TF_1"
fields_desc = [
IOA,
FloatField("FPNumber", 0),
PacketField("QDS", QDS(), QDS),
PacketField("CP56Time", CP56Time(), CP56Time),
]
class asdu_infobj_37(Packet):
name = "M_IT_TB_1"
fields_desc = [
IOA,
PacketField("BCR", BCR(), BCR),
PacketField("CP56Time", CP56Time(), CP56Time),
]
class asdu_infobj_38(Packet):
name = "M_EP_TD_1"
fields_desc = [
IOA,
PacketField("SEP", SEP(), SEP),
CP16Time, # elapsed time
PacketField("CP56Time", CP56Time(), CP56Time),
] # binary time
class asdu_infobj_39(Packet):
name = "M_EP_TE_1"
fields_desc = [
IOA,
PacketField("SPE", SPE(), SPE),
PacketField("QDP", QDP(), QDP),
CP16Time, # relay duration time
PacketField("CP56Time", CP56Time(), CP56Time),
] # binary time
class asdu_infobj_40(Packet):
name = "M_EP_TF_1"
fields_desc = [
IOA,
PacketField("OCI", OCI(), OCI),
PacketField("QDP", QDP(), QDP),
CP16Time, # relay duration time
PacketField("CP56Time", CP56Time(), CP56Time),
] # binary time
class asdu_infobj_45(Packet):
name = "C_SC_NA_1"
fields_desc = [
IOA,
# XByteField("SCO", 0x00)]
# Exacter representation(2) for SCO:
# XBitField("S/E", 0, 1), XBitField("QU", 0, 5), XBitField("Padding", 0, 1), XBitField("SCS", 0, 1)]
XBitField("QOC", 0, 6),
XBitField("Padding", 0, 1),
BitField("SCS", 0, 1),
]
class asdu_infobj_46(Packet):
name = "C_DC_NA_1"
fields_desc = [
IOA,
# XByteField("SCO", 0x00)]
# Exacter representation(2) for DCO:
# XBitField("S/E", 0, 1), XBitField("QU", 0, 5), XBitField("DCS", 0, 2)]
XBitField("QOC", 0, 6),
XBitField("DCS", 0, 2),
]
class asdu_infobj_47(Packet):
name = "C_RC_NA_1"
fields_desc = [
IOA,
# XByteField("SCO", 0x00)]
# Exacter representation(2) for RCO:
# XBitField("S/E", 0, 1), XBitField("QU", 0, 5), XBitField("RCS", 0, 2)]
XBitField("QOC", 0, 6),
XBitField("RCS", 0, 2),
]
class asdu_infobj_48(Packet):
name = "C_SE_NA_1"
fields_desc = [
IOA,
# Normalized value
NVA,
PacketField("QOS", QOS(), QOS),
]
class asdu_infobj_49(Packet):
name = "C_SE_NB_1"
fields_desc = [
IOA,
# Scaled value
SVA,
PacketField("QOS", QOS(), QOS),
]
class asdu_infobj_50(Packet):
name = "C_SE_NC_1"
fields_desc = [IOA, FloatField("FPNumber", 0), PacketField("QOS", QOS(), QOS)]
class asdu_infobj_51(Packet):
name = "C_BO_NA_1"
fields_desc = [IOA, BSI]
# maybe in handle client
def calctime():
currenttime = datetime.now()
milliseconds = currenttime.microsecond / 1000
seconds = currenttime.second
ms = seconds * 1000 + milliseconds
minutes = currenttime.minute
hour = currenttime.hour
day = currenttime.day
month = currenttime.month
year = currenttime.year
cp56time = CP56Time()
cp56time.setfieldval("Ms", ms)
cp56time.setfieldval("Min", minutes)
cp56time.setfieldval("Hour", hour)
cp56time.setfieldval("Day", day)
cp56time.setfieldval("Month", month)
cp56time.setfieldval("Year", year)
return cp56time
class asdu_infobj_58(Packet):
name = "C_SC_TA_1"
fields_desc = [
IOA,
# XByteField("SCO", 0x00)]
# Exacter representation(2) for SCO:
# XBitField("S/E", 0, 1), XBitField("QU", 0, 5), XBitField("Padding", 0, 1), XBitField("SCS", 0, 1)]
XBitField("QOC", 0, 6),
XBitField("Padding", 0, 1),
BitField("SCS", 0, 1),
PacketField("CP56Time", CP56Time(), CP56Time),
]
class asdu_infobj_59(Packet):
name = "C_DC_TA_1"
fields_desc = [
IOA,
# XByteField("DCO", 0x00)]
# Exacter representation(2) for DCO:
# XBitField("S/E", 0, 1), XBitField("QU", 0, 5), XBitField("DCS", 0, 2)]
XBitField("QOC", 0, 6),
XBitField("DCS", 0, 2),
PacketField("CP56Time", CP56Time(), CP56Time),
]
class asdu_infobj_60(Packet):
name = "C_RC_TA_1"
fields_desc = [
IOA,
# XByteField("RCO", 0x00)]
# Exacter representation(2) for RCO:
# XBitField("S/E", 0, 1), XBitField("QU", 0, 5), XBitField("RCS", 0, 2)]
XBitField("QOC", 0, 6),
XBitField("RCS", 0, 2),
PacketField("CP56Time", CP56Time(), CP56Time),
]
class asdu_infobj_61(Packet):
name = "C_SE_TA_1"
fields_desc = [
IOA,
# Normalized value
NVA,
PacketField("QOS", QOS(), QOS),
PacketField("CP56Time", CP56Time(), CP56Time),
]
class asdu_infobj_62(Packet):
name = "C_SE_TB_1"
fields_desc = [
IOA,
# Scaled value
SVA,
PacketField("QOS", QOS(), QOS),
PacketField("CP56Time", CP56Time(), CP56Time),
]
class asdu_infobj_63(Packet):
name = "C_SE_TC_1"
fields_desc = [
IOA,
FloatField("FPNumber", 0),
PacketField("QOS", QOS(), QOS),
PacketField("CP56Time", CP56Time(), CP56Time),
]
class asdu_infobj_64(Packet):
name = "C_BO_TA_1"
fields_desc = [
IOA,
BSI,
PacketField("QOS", QOS(), QOS),
PacketField("CP56Time", CP56Time(), CP56Time),
]
class asdu_infobj_100(Packet):
name = "C_IC_NA_1"
fields_desc = [LEX3BytesField("IOA", 0x0), ByteField("QOI", 0x14)]
class asdu_infobj_101(Packet):
name = "C_CI_NA_1"
fields_desc = [LEX3BytesField("IOA", 0x0), ByteField("QCC", 0x05)]
class asdu_infobj_102(Packet):
name = "C_RD_NA_1"
fields_desc = [LEX3BytesField("IOA", 0x0)]
class asdu_infobj_103(Packet):
name = "C_CS_NA_1"
fields_desc = [
LEX3BytesField("IOA", 0x0),
PacketField("CP56Time", CP56Time(), CP56Time),
]
# IEC104 asdu head
class asdu_head(Packet):
name = "asdu_head"
fields_desc = [
ByteField("TypeID", 0x05), # Command Type
# Exacter representation for variable structure qualifier
BitField("SQ", 0b0, 1),
BitField("NoO", 1, 7), # SQ and Number of Object
# XByteField("NoO", 0x01),
# Exacter representation for Cause of Transmission:
BitField("T", 0, 1),
BitField("PN", 0, 1),
BitField("COT", 6, 6),
# XByteField("COT", 0x06),
XByteField("OrigAddr", 0x00),
LEShortField("COA", 0),
]
def __str__(self):
asdu_infobj = self.payload
infobj_repr = []
while not isinstance(asdu_infobj, NoPayload):
infobj_repr.append(str(asdu_infobj.fields))
asdu_infobj = asdu_infobj.payload
return "{} with {} Objects=[{}]".format(
self.payload.name, self.fields, ", ".join(infobj_repr)
)
def guess_payload_class(self, payload):
if self.TypeID == 1:
if self.SQ == 0:
# List--bind_layers(asdu_infobj_1, asdu_infobj_1) # if SQ = 0
# List--bind_layers(SIQ, asdu_infobj_1) # if SQ = 0
bind_layers(asdu_infobj_1, asdu_infobj_1) # if SQ = 0
bind_layers(SIQ, Padding) # if SQ = 0
else:
# List--bind_layers(SIQ, Padding) # if SQ = 1
bind_layers(SIQ, SIQ) # if SQ = 1
return asdu_infobj_1
elif self.TypeID == 2:
return asdu_infobj_2
elif self.TypeID == 3:
if self.SQ == 0:
bind_layers(asdu_infobj_3, asdu_infobj_3) # if SQ = 0
bind_layers(DIQ, Padding) # if SQ = 0
else:
bind_layers(DIQ, DIQ) # if SQ = 1
return asdu_infobj_3
elif self.TypeID == 4:
return asdu_infobj_4
elif self.TypeID == 5:
if self.SQ == 0:
bind_layers(asdu_infobj_5, asdu_infobj_5) # if SQ = 0
bind_layers(QDS, Padding) # if SQ = 0
else:
bind_layers(QDS, VTI) # if SQ = 1
return asdu_infobj_5
elif self.TypeID == 6:
return asdu_infobj_6
elif self.TypeID == 7:
if self.SQ == 0:
bind_layers(asdu_infobj_7, asdu_infobj_7) # if SQ = 0
bind_layers(QDS, Padding) # if SQ = 0
else:
bind_layers(QDS, BSI) # if SQ = 1
return asdu_infobj_7
elif self.TypeID == 8:
return asdu_infobj_8
elif self.TypeID == 9:
if self.SQ == 0:
bind_layers(asdu_infobj_9, asdu_infobj_9) # if SQ = 0
bind_layers(QDS, Padding) # if SQ = 0
else:
bind_layers(QDS, NVA) # if SQ = 1
return asdu_infobj_9
elif self.TypeID == 10:
return asdu_infobj_10
elif self.TypeID == 11:
if self.SQ == 0:
bind_layers(asdu_infobj_11, asdu_infobj_11) # if SQ = 0
bind_layers(QDS, Padding) # if SQ = 0
else:
bind_layers(QDS, SVA) # if SQ = 1
return asdu_infobj_11
elif self.TypeID == 12:
return asdu_infobj_12
elif self.TypeID == 13:
if self.SQ == 0:
bind_layers(asdu_infobj_13, asdu_infobj_13) # if SQ = 0
bind_layers(QDS, Padding) # if SQ = 0
else:
bind_layers(QDS, FloatField) # if SQ = 1
return asdu_infobj_13
elif self.TypeID == 14:
return asdu_infobj_14
elif self.TypeID == 15:
return asdu_infobj_15
elif self.TypeID == 16:
return asdu_infobj_16
elif self.TypeID == 17:
return asdu_infobj_17
elif self.TypeID == 18:
return asdu_infobj_18
elif self.TypeID == 19:
return asdu_infobj_19
elif self.TypeID == 20:
return asdu_infobj_20
elif self.TypeID == 21:
return asdu_infobj_21
elif self.TypeID == 30:
return asdu_infobj_30
elif self.TypeID == 31:
return asdu_infobj_31
elif self.TypeID == 32:
return asdu_infobj_32
elif self.TypeID == 33:
return asdu_infobj_33
elif self.TypeID == 34:
return asdu_infobj_34
elif self.TypeID == 35:
return asdu_infobj_35
elif self.TypeID == 36:
return asdu_infobj_36
elif self.TypeID == 37:
return asdu_infobj_37
elif self.TypeID == 38:
return asdu_infobj_38
elif self.TypeID == 39:
return asdu_infobj_39
elif self.TypeID == 40:
return asdu_infobj_40
elif self.TypeID == 45:
return asdu_infobj_45
elif self.TypeID == 46:
return asdu_infobj_46
elif self.TypeID == 47:
return asdu_infobj_47
elif self.TypeID == 48:
return asdu_infobj_48
elif self.TypeID == 49:
return asdu_infobj_49
elif self.TypeID == 50:
return asdu_infobj_50
elif self.TypeID == 51:
return asdu_infobj_51
elif self.TypeID == 58:
return asdu_infobj_58
elif self.TypeID == 59:
return asdu_infobj_59
elif self.TypeID == 60:
return asdu_infobj_60
elif self.TypeID == 61:
return asdu_infobj_61
elif self.TypeID == 62:
return asdu_infobj_62
elif self.TypeID == 63:
return asdu_infobj_63
elif self.TypeID == 64:
return asdu_infobj_64
elif self.TypeID == 100:
return asdu_infobj_100
elif self.TypeID == 101:
return asdu_infobj_101
elif self.TypeID == 102:
return asdu_infobj_102
elif self.TypeID == 103:
return asdu_infobj_103
bind_layers(i_frame, asdu_head)
bind_layers(asdu_head, asdu_infobj_1, {"TypeID": 1})
bind_layers(asdu_head, asdu_infobj_2, {"TypeID": 2})
bind_layers(asdu_head, asdu_infobj_3, {"TypeID": 3})
bind_layers(asdu_head, asdu_infobj_4, {"TypeID": 4})
bind_layers(asdu_head, asdu_infobj_5, {"TypeID": 5})
bind_layers(asdu_head, asdu_infobj_6, {"TypeID": 6})
bind_layers(asdu_head, asdu_infobj_7, {"TypeID": 7})
bind_layers(asdu_head, asdu_infobj_8, {"TypeID": 8})
bind_layers(asdu_head, asdu_infobj_9, {"TypeID": 9})
bind_layers(asdu_head, asdu_infobj_10, {"TypeID": 10})
bind_layers(asdu_head, asdu_infobj_11, {"TypeID": 11})
bind_layers(asdu_head, asdu_infobj_12, {"TypeID": 12})
bind_layers(asdu_head, asdu_infobj_13, {"TypeID": 13})
bind_layers(asdu_head, asdu_infobj_14, {"TypeID": 14})
bind_layers(asdu_head, asdu_infobj_15, {"TypeID": 15})
bind_layers(asdu_head, asdu_infobj_16, {"TypeID": 16})
bind_layers(asdu_head, asdu_infobj_17, {"TypeID": 17})
bind_layers(asdu_head, asdu_infobj_18, {"TypeID": 18})
bind_layers(asdu_head, asdu_infobj_19, {"TypeID": 19})
bind_layers(asdu_head, asdu_infobj_20, {"TypeID": 20})
bind_layers(asdu_head, asdu_infobj_21, {"TypeID": 21})
bind_layers(asdu_head, asdu_infobj_30, {"TypeID": 30})
bind_layers(asdu_head, asdu_infobj_31, {"TypeID": 31})
bind_layers(asdu_head, asdu_infobj_32, {"TypeID": 32})
bind_layers(asdu_head, asdu_infobj_33, {"TypeID": 33})
bind_layers(asdu_head, asdu_infobj_34, {"TypeID": 34})
bind_layers(asdu_head, asdu_infobj_35, {"TypeID": 35})
bind_layers(asdu_head, asdu_infobj_36, {"TypeID": 36})
bind_layers(asdu_head, asdu_infobj_37, {"TypeID": 37})
bind_layers(asdu_head, asdu_infobj_38, {"TypeID": 38})
bind_layers(asdu_head, asdu_infobj_39, {"TypeID": 39})
bind_layers(asdu_head, asdu_infobj_40, {"TypeID": 40})
bind_layers(asdu_head, asdu_infobj_45, {"TypeID": 45})
bind_layers(asdu_head, asdu_infobj_46, {"TypeID": 46})
bind_layers(asdu_head, asdu_infobj_47, {"TypeID": 47})
bind_layers(asdu_head, asdu_infobj_48, {"TypeID": 48})
bind_layers(asdu_head, asdu_infobj_49, {"TypeID": 49})
bind_layers(asdu_head, asdu_infobj_50, {"TypeID": 50})
bind_layers(asdu_head, asdu_infobj_51, {"TypeID": 51})
bind_layers(asdu_head, asdu_infobj_58, {"TypeID": 58})
bind_layers(asdu_head, asdu_infobj_59, {"TypeID": 59})
bind_layers(asdu_head, asdu_infobj_60, {"TypeID": 60})
bind_layers(asdu_head, asdu_infobj_61, {"TypeID": 61})
bind_layers(asdu_head, asdu_infobj_62, {"TypeID": 62})
bind_layers(asdu_head, asdu_infobj_63, {"TypeID": 63})
bind_layers(asdu_head, asdu_infobj_64, {"TypeID": 64})
bind_layers(asdu_head, asdu_infobj_100, {"TypeID": 100})
bind_layers(asdu_head, asdu_infobj_101, {"TypeID": 101})
bind_layers(asdu_head, asdu_infobj_102, {"TypeID": 102})
bind_layers(asdu_head, asdu_infobj_103, {"TypeID": 103})
# For SQ=1 and SQ=0, experimental..
# bind_layers(asdu_infobj_1, asdu_infobj_1)
# bind_layers(asdu_infobj_1, SIQ)
# bind_layers(SIQ, SIQ)
# bind_layers(asdu_infobj_1, asdu_infobj_1, {'SQ': 0})
# bind_layers(SIQ, asdu_infobj_1, {'SQ': 0})
bind_layers(SIQ, Padding)
bind_layers(DIQ, Padding)
bind_layers(VTI, Padding)
bind_layers(QDS, Padding)
bind_layers(BCR, Padding)
bind_layers(SEP, Padding)
bind_layers(SPE, Padding)
bind_layers(QDP, Padding)
bind_layers(QOS, Padding)
STARTDT_act = u_frame(Type=0x07)
STARTDT_con = u_frame(Type=0x0B)
STOPDT_act = u_frame(Type=0x13)
STOPDT_con = u_frame(Type=0x23)
TESTFR_act = u_frame(Type=0x43)
TESTFR_con = u_frame(Type=0x83)
u_list = {
"0x7": "STARTDT_ACT",
"0xB": "STARTDT_CON",
"0x13": "STOPDT_act",
"0x23": "STOPDT_con",
"0x43": "TESTFR_act",
"0x83": "TESTFR_con",
}
# ==== Timeouts ==== old.....
# Timeout of connection establishment
# T_0 = 30
# Timeout of send or test APDUs (Wartezeit auf Quittung)
# T_1 = 15
# Timeout for acknowledges in case of no data messages T_2 < T_1 (Quittieren nach x sek)
# T_2 = 10
# Timeout for sending test frames in case of a long idle state
# T_3 = 21
# ==== Other parameters ====
# Maximum difference receive sequence number to send state variable (Max. Anzahl unquittierter Telegramme)
# k = 12
# Latest acknowledge after receiving w I-format APDUs (Quittieren nach w Telegrammen)
# w = 8
# Maximum frame size (in bytes)
# MaxFrameSize = 254
# Testing a packet:
# test2 = i_frame() / asdu_head()
# test2.SIQ = SIQ(IV=1) # Change value in PacketField-packet
# hexdump(test2)
# test2.show()
| 32,558 | Python | .py | 978 | 26.769939 | 109 | 0.588464 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,483 | DeviceDataController.py | mushorg_conpot/conpot/protocols/IEC104/DeviceDataController.py | # Copyright (C) 2017 Patrick Reichenberger (University of Passau) <patrick.reichenberger@t-online.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from lxml import etree
from conpot.protocols.IEC104.frames import *
import conpot.core as conpot_core
from conpot.protocols.IEC104.register import IEC104Register
logger = logging.getLogger(__name__)
# Manages the devices in a dictionary with key: address in 16_8 Bit format and value: register objects
class DeviceDataController(object):
def __init__(self, template):
# key: IEC104 address, value: register object
self.registers = {}
self.common_address = int(
conpot_core.get_databus().get_value("CommonAddress"), 0
)
dom = etree.parse(template)
categories = dom.xpath("//IEC104/categories/*")
for category in categories:
categ_id = int(category.attrib["id"])
for register in category:
address = register.attrib["name"]
splt_addr1, splt_addr2 = address.split("_")
assert 0 <= int(splt_addr1) <= 65535 and 0 <= int(splt_addr2) <= 255, (
"Address %s not allowed. 0..65535_0..255" % address
)
databuskey = register.xpath("./value/text()")[0]
if register.get("rel"):
rel = register.attrib["rel"]
else:
rel = ""
# checks if a value for that key exists in xml file
try:
val = conpot_core.get_databus().get_value(databuskey)
except AssertionError as err:
err.args = ("Key not found in key-value store",)
raise
# simple data type checks
assert not (
categ_id in (1, 2, 30, 45, 58) and val not in (0, 1)
), "Value for obj %s not allowed with datatype %s" % (address, categ_id)
assert not (
categ_id in (3, 4, 31, 46, 59) and val not in (0, 1, 2, 3)
), "Value for obj %s not allowed with datatype %s" % (address, categ_id)
assert not (
categ_id in (11, 12, 49, 62) and -32768 >= val >= 32767
), "Value for obj %s not allowed with datatype %s" % (address, categ_id)
iec104_register = IEC104Register(categ_id, address, val, rel)
assert address not in self.registers
self.registers[address] = iec104_register
self.check_registers()
# Checks if relation (if stated) exists
def check_registers(self):
for elem in self.registers:
rel = self.registers[elem].relation
assert not (
rel != "" and rel not in self.registers
), "Relation object doesn't exist"
# Returns the object with the obj_addr from the register dictionary
def get_object_from_reg(self, obj_addr):
address_structured = hex_in_addr(obj_addr)
if address_structured in self.registers:
return self.registers[address_structured]
else:
return None
# Sets the value for an object in the register list
def set_object_val(self, obj_addr, val):
address_hex = hex(obj_addr)
address_structured = hex_in_addr(address_hex)
if address_structured in self.registers:
self.registers[address_structured].set_val(val)
def get_registers(self):
return self.registers
# Builds response for a certain asdu type and returns list of responses with this type
def inro_response(sorted_reg, asdu_type):
resp_list = []
resp = i_frame() / asdu_head(SQ=0, COT=20)
max_frame_size = conpot_core.get_databus().get_value("MaxFrameSize")
counter = 0
asdu_infobj_type = "asdu_infobj_" + str(asdu_type)
calls_dict = {
"asdu_infobj_1": asdu_infobj_1,
"asdu_infobj_3": asdu_infobj_3,
"asdu_infobj_5": asdu_infobj_5,
"asdu_infobj_7": asdu_infobj_7,
"asdu_infobj_9": asdu_infobj_9,
"asdu_infobj_11": asdu_infobj_11,
"asdu_infobj_13": asdu_infobj_13,
}
call = calls_dict[asdu_infobj_type]
for dev in sorted_reg:
if dev[1].category_id == asdu_type:
# 12 is length i_frame = 6 + length asdu_head = 6
if counter >= int((max_frame_size - 12) / len(call())):
resp_list.append(resp)
counter = 0
resp = i_frame() / asdu_head(SQ=0, COT=20)
xaddr = addr_in_hex(dev[1].addr)
add_info_obj = call(IOA=xaddr) # SQ = 0
val = dev[1].val
if asdu_type == 1:
add_info_obj.SIQ = SIQ(SPI=val)
# Other possibility for allocation (certain value for whole field)
# add_info_obj.SIQ = struct.pack("B", val)
elif asdu_type == 3:
add_info_obj.DIQ = DIQ(DPI=val)
elif asdu_type == 5:
add_info_obj.VTI = VTI(Value=val)
elif asdu_type == 7:
add_info_obj.BSI = val
elif asdu_type == 9:
add_info_obj.NVA = val
elif asdu_type == 11:
add_info_obj.SVA = val
elif asdu_type == 13:
add_info_obj.FPNumber = val
resp /= add_info_obj
counter += 1
resp.NoO = counter
if counter > 0:
resp_list.append(resp)
return resp_list
# Converts the address from number representation in 16_8 Bit String format with delimiter "_"
def hex_in_addr(hex_addr):
hexa = "{0:#0{1}x}".format(hex_addr, 8)
a1 = hexa[2:4]
a2 = hexa[4:6]
a3 = hexa[6:8]
a32 = a3 + a2
return str(int(a32, 16)) + "_" + str(int(a1, 16))
# Converts the address from 16_8 Bit String format with delimiter "_" in a number representation
def addr_in_hex(address):
a1, a2 = address.split("_")
hex1_temp = "{0:0{1}x}".format(int(a1), 4)
hex1_1 = hex1_temp[0:2]
hex1_2 = hex1_temp[2:4]
hex1 = str(hex1_2) + str(hex1_1)
hex2 = "{0:#0{1}x}".format(int(a2), 4) # bec of '0x' length 4
hex_string = str(hex2) + str(hex1)
return int(hex_string, 16)
| 6,921 | Python | .py | 153 | 35.431373 | 102 | 0.591347 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,484 | IEC104_server.py | mushorg_conpot/conpot/protocols/IEC104/IEC104_server.py | # Copyright (C) 2017 Patrick Reichenberger (University of Passau) <patrick.reichenberger@t-online.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from conpot.protocols.IEC104.DeviceDataController import DeviceDataController
from conpot.protocols.IEC104.IEC104 import IEC104
from .frames import struct, TESTFR_act, socket, errno
import logging
import conpot.core as conpot_core
from gevent.server import StreamServer
import gevent
from .errors import Timeout_t3
from conpot.core.protocol_wrapper import conpot_protocol
logger = logging.getLogger(__name__)
@conpot_protocol
class IEC104Server(object):
def __init__(self, template, template_directory, args):
self.timeout = conpot_core.get_databus().get_value("T_0")
self.device_data_controller = DeviceDataController(template)
self.server_active = True
self.server = None
logger.info("IEC 104 Server up")
self.template = template
def handle(self, sock, address):
sock.settimeout(self.timeout)
session = conpot_core.get_session(
"IEC104",
address[0],
address[1],
sock.getsockname()[0],
sock.getsockname()[1],
)
logger.info(
"New IEC 104 connection from %s:%s. (%s)",
address[0],
address[1],
session.id,
)
session.add_event({"type": "NEW_CONNECTION"})
iec104_handler = IEC104(self.device_data_controller, sock, address, session.id)
try:
while True:
timeout_t3 = gevent.Timeout(
conpot_core.get_databus().get_value("T_3"), Timeout_t3
)
timeout_t3.start()
try:
try:
request = sock.recv(6)
if not request:
logger.info("IEC104 Station disconnected. (%s)", session.id)
session.add_event({"type": "CONNECTION_LOST"})
iec104_handler.disconnect()
break
while request and len(request) < 2:
new_byte = sock.recv(1)
request += new_byte
_, length = struct.unpack(">BB", request[:2])
while len(request) < (length + 2):
new_byte = sock.recv(1)
if not new_byte:
break
request += new_byte
# check if IEC 104 packet or for the first occurrence of the indication 0x68 for IEC 104
for elem in list(request):
if 0x68 == elem:
index = request.index(elem)
iec_request = request[index:]
timeout_t3.cancel()
response = None
# check which frame type
if not (iec_request[2] & 0x01): # i_frame
response = iec104_handler.handle_i_frame(
iec_request
)
elif iec_request[2] & 0x01 and not (
iec_request[2] & 0x02
): # s_frame
iec104_handler.handle_s_frame(iec_request)
elif iec_request[2] & 0x03: # u_frame
response = iec104_handler.handle_u_frame(
iec_request
)
else:
logger.warning(
"%s ---> No valid IEC104 type (%s)",
address,
session.id,
)
if response:
for resp_packet in response:
if resp_packet:
sock.send(resp_packet)
break
except Timeout_t3:
pkt = iec104_handler.send_104frame(TESTFR_act)
if pkt:
sock.send(pkt)
finally:
timeout_t3.cancel()
except gevent.Timeout:
logger.warning("T1 timed out. (%s)", session.id)
logger.info("IEC104 Station disconnected. (%s)", session.id)
session.add_event({"type": "CONNECTION_LOST"})
iec104_handler.disconnect()
break
except socket.timeout:
logger.debug("Socket timeout, remote: %s. (%s)", address[0], session.id)
session.add_event({"type": "CONNECTION_LOST"})
except socket.error as err:
if isinstance(err.args, tuple):
if err.errno == errno.EPIPE:
# remote peer disconnected
logger.info("IEC104 Station disconnected. (%s)", session.id)
session.add_event({"type": "CONNECTION_LOST"})
else:
# determine and handle different error
pass
else:
print(("socket error ", err))
iec104_handler.disconnect()
def start(self, host, port):
connection = (host, port)
self.server = StreamServer(connection, self.handle)
logger.info("IEC 60870-5-104 protocol server started on: %s", connection)
self.server.serve_forever()
def stop(self):
self.server.stop()
| 6,629 | Python | .py | 140 | 29.528571 | 112 | 0.492745 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,485 | register.py | mushorg_conpot/conpot/protocols/IEC104/register.py | # Copyright (C) 2017 Patrick Reichenberger (University of Passau) <patrick.reichenberger@t-online.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
class IEC104Register(object):
def __init__(self, category_id, addr, val, relation):
self.category_id = category_id
self.addr = addr
self.val = val
self.relation = relation
def set_val(self, val):
self.val = val
| 1,075 | Python | .py | 24 | 41.666667 | 102 | 0.741412 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,486 | IEC104.py | mushorg_conpot/conpot/protocols/IEC104/IEC104.py | # Copyright (C) 2017 Patrick Reichenberger (University of Passau) <patrick.reichenberger@t-online.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import gevent
import natsort
from conpot.protocols.IEC104.DeviceDataController import addr_in_hex, inro_response
from conpot.protocols.IEC104.i_frames_check import *
import conpot.core as conpot_core
from .frames import *
logger = logging.getLogger(__name__)
class IEC104(object):
def __init__(self, device_data_controller, sock, address, session_id):
self.sock = sock
self.address = address
self.session_id = session_id
self.T_1 = conpot_core.get_databus().get_value("T_1")
self.timeout_t1 = gevent.Timeout(self.T_1, gevent.Timeout)
self.T_2 = conpot_core.get_databus().get_value("T_2")
self.w = conpot_core.get_databus().get_value("w")
self.device_data_controller = device_data_controller
self.ssn = 0
self.rsn = 0
self.ack = 0
self.allow_DT = False
self.t2_caller = None
self.telegram_count = 0
self.sentmsgs = list()
self.send_buffer = list()
# === u_frame
def handle_u_frame(self, frame):
container = u_frame(frame)
try:
# check if valid u_frame (length, rest bits)
if len(frame) == 6 and container.getfieldval("LenAPDU") == 4:
if frame[3] == 0x00 and frame[4] == 0x00 and frame[5] == 0x00:
# check which type (Start, Stop, Test), only one active at same time
# STARTDT_act
if frame[2] == 0x07:
logger.info(
"%s ---> u_frame. STARTDT act. (%s)",
self.address,
self.session_id,
)
self.allow_DT = True
yield self.send_104frame(STARTDT_con)
# === If buffered data, send
if self.send_buffer:
for pkt in self.send_buffer:
yield self.send_104frame(pkt)
# STARTDT_con
elif frame[2] == 0x0B:
logger.info(
"%s ---> u_frame. STARTDT con. (%s)",
self.address,
self.session_id,
)
# Station sends no STARTDT_act, so there is no STARTDT_con expected and no action performed
# Can be extended, if used as Master
# STOPDT_act
elif frame[2] == 0x13:
logger.info(
"%s ---> u_frame. STOPDT act. (%s)",
self.address,
self.session_id,
)
self.allow_DT = False
# Send S_Frame
resp_frame = s_frame()
yield self.send_104frame(resp_frame)
yield self.send_104frame(STOPDT_con)
# STOPDT_con
elif frame[2] == 0x23:
logger.info(
"%s ---> u_frame. STOPDT con. (%s)",
self.address,
self.session_id,
)
self.timeout_t1.cancel()
# TESTFR_act
elif frame[2] == 0x43:
logger.info(
"%s ---> u_frame. TESTFR act. (%s)",
self.address,
self.session_id,
)
# In case of both sending a TESTFR_act.
if self.sentmsgs:
temp_list = []
for x in self.sentmsgs:
if x.name != "u_frame" or x.getfieldval("Type") != 0x43:
if isinstance(x, frame_object_with_timer):
temp_list.append(x)
else:
x.cancel_t1()
self.sentmsgs = temp_list
yield self.send_104frame(TESTFR_con)
# TESTFR_con
elif frame[2] == 0x83:
logger.info(
"%s ---> u_frame. TESTFR con. (%s)",
self.address,
self.session_id,
)
if self.sentmsgs:
temp_list = []
for x in self.sentmsgs:
if x.name != "u_frame" or x.getfieldval("Type") != 0x43:
if isinstance(x, frame_object_with_timer):
temp_list.append(x)
else:
x.cancel_t1()
self.sentmsgs = temp_list
else:
raise InvalidFieldValueException(
"Invalid u_frame packet, more than 1 bit set! (%s)",
self.session_id,
)
else:
raise InvalidFieldValueException(
"Control field octet 2,3 or 4 not 0x00! (%s)", self.session_id
)
else:
raise InvalidFieldValueException(
"Wrong length for u_frame packet! (%s)", self.session_id
)
except InvalidFieldValueException as ex:
logger.warning("InvalidFieldValue: %s. (%s)", ex, self.session_id)
# === s_frame
def handle_s_frame(self, frame):
container = s_frame(frame)
try:
# check if valid u_frame (length, rest bits)
if len(frame) == 6 and container.getfieldval("LenAPDU") == 4:
if frame[2] & 0x01 and frame[3] == 0x00:
recv_snr = container.getfieldval("RecvSeq")
logger.info(
"%s ---> s_frame receive nr: %s. (%s)",
self.address,
str(recv_snr),
self.session_id,
)
if recv_snr <= self.ssn:
self.ack = recv_snr
if self.sentmsgs:
temp_list = []
for x in self.sentmsgs:
if (
x.name != "i_frame"
or x.getfieldval("SendSeq") >= self.ack
):
if isinstance(x, frame_object_with_timer):
temp_list.append(x)
else:
x.cancel_t1()
self.sentmsgs = temp_list
self.show_send_list()
else:
raise InvalidFieldValueException(
"Control field octet 1 in 's_frame' not 0x01 or 2 not 0x00! "
"(%s)",
self.session_id,
)
else:
raise InvalidFieldValueException(
"Wrong length for s_frame packet! (%s)", self.session_id
)
except InvalidFieldValueException as ex:
logger.warning("InvalidFieldValue: %s. (%s)", ex, self.session_id)
# === i_frame
def handle_i_frame(self, frame):
container = i_frame(frame)
request_string = " ".join(hex(n) for n in frame)
logger.debug(
"%s ---> i_frame %s. (%s)", self.address, request_string, self.session_id
)
logger.info(
"%s ---> i_frame %s (%s)", self.address, container.payload, self.session_id
)
frame_length = len(frame)
try:
if container.getfieldval("LenAPDU") != frame_length - 2:
raise InvalidFieldValueException(
"Wrong length for i_frame packet! (%s)", self.session_id
)
self.telegram_count += 1
recv_snr = container.getfieldval("RecvSeq")
# Figure 11
if container.getfieldval("SendSeq") == self.rsn:
self.recvseq_increment()
else:
logger.error(
"Sequence error, send s_frame for last correct packet. Then disconnect. (%s)",
self.session_id,
)
# Better solution exists..
if self.t2_caller:
gevent.kill(self.t2_caller)
gevent.Greenlet.spawn_later(1, self.disconnect())
return self.send_104frame(s_frame(RecvSeq=self.rsn))
# All packets up to recv_snr-1 are acknowledged
if self.sentmsgs:
temp_list = []
for x in self.sentmsgs:
if x.name != "i_frame" or x.getfieldval("SendSeq") >= recv_snr:
if isinstance(x, frame_object_with_timer):
temp_list.append(x)
else:
x.cancel_t1()
self.sentmsgs = temp_list
except InvalidFieldValueException as ex:
logger.warning("InvalidFieldValue: %s. (%s)", ex, self.session_id)
# Send S_Frame at w telegrams or (re)start timer T2
resp_frame = s_frame()
if not self.t2_caller:
self.t2_caller = gevent.Greenlet.spawn_later(
self.T_2, self.send_frame_imm, resp_frame
)
if self.telegram_count >= self.w:
return self.send_104frame(resp_frame)
common_address = self.device_data_controller.common_address
type_id = container.getfieldval("TypeID")
request_coa = container.getfieldval("COA")
# 45: Single command
if type_id == TypeIdentification["C_SC_NA_1"] and request_coa == common_address:
return self.handle_single_command45(container)
# 46: Double command
elif (
type_id == TypeIdentification["C_DC_NA_1"] and request_coa == common_address
):
return self.handle_double_command46(container)
# 49: Setpoint command, scaled value
elif (
type_id == TypeIdentification["C_SE_NB_1"] and request_coa == common_address
):
return self.handle_setpointscaled_command49(container)
# 50: Setpoint command, short floating point value
elif (
type_id == TypeIdentification["C_SE_NC_1"] and request_coa == common_address
):
return self.handle_setpointfloatpoint_command50(container)
# 100: (General-) Interrogation command
elif type_id == TypeIdentification["C_IC_NA_1"] and request_coa in (
common_address,
0xFFFF,
):
return self.handle_inro_command100(container)
def send_104frame(self, frame):
# send s_frame
if frame.name == "s_frame":
frame.RecvSeq = self.rsn
if self.t2_caller:
gevent.kill(self.t2_caller)
self.telegram_count = 0
response_string = " ".join(hex(n) for n in frame.build())
logger.info(
"%s <--- s_frame %s (%s)",
self.address,
response_string,
self.session_id,
)
return frame.build()
# send i_frame
elif frame.name == "i_frame":
if self.allow_DT:
if self.t2_caller:
gevent.kill(self.t2_caller)
frame.SendSeq = self.ssn
frame.RecvSeq = self.rsn
frame.COA = self.device_data_controller.common_address
self.increment_sendseq()
self.telegram_count = 0
iframe = frame_object_with_timer(frame)
self.sentmsgs.append(iframe)
iframe.restart_t1()
response_string = " ".join(hex(n) for n in frame.build())
logger.debug(
"%s <--- i_frame %s (%s)",
self.address,
response_string,
self.session_id,
)
logger.info(
"%s <--- i_frame %s (%s)",
self.address,
frame.payload,
self.session_id,
)
return frame.build()
else:
logger.info("StartDT missing, buffer data. (%s)", self.session_id)
# Limitation for buffer, arbitrary number
if len(self.send_buffer) < 50:
self.send_buffer.append(frame)
# send u_frame
elif frame.name == "u_frame":
if frame.getfieldval("Type") == 0x07 or frame.getfieldval("Type") == 0x43:
uframe = frame_object_with_timer(frame)
self.sentmsgs.append(uframe)
uframe.restart_t1()
response_string = " ".join(hex(n) for n in frame.build())
logger.info(
"%s <--- u_frame %s (%s)",
self.address,
response_string,
self.session_id,
)
return frame.build()
def send_frame_imm(self, frame):
# send s_frame
if frame.name == "s_frame":
frame.RecvSeq = self.rsn
if self.t2_caller:
gevent.kill(self.t2_caller)
self.telegram_count = 0
response_string = " ".join(hex(n) for n in frame.build())
logger.info(
"%s <--- s_frame %s (%s)",
self.address,
response_string,
self.session_id,
)
return self.sock.send(frame.build())
def handle_single_command45(self, container):
try:
check_asdu_45(container, "c")
cause_of_transmission = int(container.getfieldval("COT"))
info_obj_addr = container.getfieldval("IOA")
field_val = container.getfieldval("SCS")
if cause_of_transmission == 6:
obj = self.device_data_controller.get_object_from_reg(
info_obj_addr
) # get destination object
if not (obj is None): # if exists in xml-file
obj_cat = int(obj.category_id) # get type (single command)
if obj_cat == 45: # if object has type single command
# === Activation confirmation
act_con = (
i_frame()
/ asdu_head(COT=7)
/ asdu_infobj_45(IOA=info_obj_addr, SCS=field_val)
)
check_asdu_45(act_con, "m")
yield self.send_104frame(act_con)
# === Get related info object if exists
obj_rel_addr = obj.relation
if obj_rel_addr != "": # if relation available
obj_rel_addr_hex = addr_in_hex(
obj_rel_addr
) # get single point object address
# get the single point object
obj_rel = self.device_data_controller.get_object_from_reg(
obj_rel_addr_hex
)
obj.val = field_val # set the value in the object to the command value
obj_rel.val = field_val # set the value in the relation object to the command value
# test whether if it really updated the value
changed_val = obj_rel.val
single_point = (
i_frame()
/ asdu_head(COT=11)
/ asdu_infobj_1(IOA=obj_rel_addr_hex)
)
single_point.SIQ = SIQ(SPI=changed_val)
yield self.send_104frame(single_point)
# === Activation termination
act_term = (
i_frame()
/ asdu_head(COT=10)
/ asdu_infobj_45(IOA=info_obj_addr, SCS=field_val)
)
check_asdu_45(act_term, "m")
yield self.send_104frame(act_term)
else: # if command type doesn't fit
# === neg. Activation confirmation
act_con = (
i_frame()
/ asdu_head(PN=1, COT=7)
/ asdu_infobj_45(IOA=info_obj_addr, SCS=field_val)
)
check_asdu_45(act_con, "m")
yield self.send_104frame(act_con)
else: # object doesn't exist in xml file
# === unknown info obj address, object not found (or no reply?)
bad_addr = (
i_frame()
/ asdu_head(COT=47)
/ asdu_infobj_45(IOA=info_obj_addr, SCS=field_val)
)
check_asdu_45(bad_addr, "m")
yield self.send_104frame(bad_addr)
except InvalidFieldValueException as ex:
logger.warning("InvalidFieldValue: %s (%s)", ex, self.session_id)
except AttributeError as ex:
logger.warning(
"Allocation for field %s not possible. (%s)", ex, self.session_id
)
def handle_double_command46(self, container):
try:
check_asdu_46(container, "c")
cause_of_transmission = int(container.getfieldval("COT"))
info_obj_addr = container.getfieldval("IOA")
field_val = container.getfieldval("DCS")
if cause_of_transmission == 6:
obj = self.device_data_controller.get_object_from_reg(
info_obj_addr
) # get destination object
if not (obj is None): # if exists in xml-file
obj_cat = int(obj.category_id) # get type (double command)
if obj_cat == 46: # if object has type double command
# === Activation confirmation
act_con = (
i_frame()
/ asdu_head(COT=7)
/ asdu_infobj_46(IOA=info_obj_addr, DCS=field_val)
)
check_asdu_46(act_con, "m")
yield self.send_104frame(act_con)
# === Get related info object if exists
obj_rel_addr = obj.relation
if obj_rel_addr != "": # if relation available
obj_rel_addr_hex = addr_in_hex(
obj_rel_addr
) # get double point object address
# get the double point object
obj_rel = self.device_data_controller.get_object_from_reg(
obj_rel_addr_hex
)
obj.val = field_val # set the value in the object to the command value
obj_rel.val = field_val # set the value in the relation object to the command value
# test whether if it really updated the value
changed_val = obj_rel.val
double_point = (
i_frame()
/ asdu_head(COT=11)
/ asdu_infobj_3(IOA=obj_rel_addr_hex)
)
double_point.DIQ = DIQ(DPI=changed_val)
yield self.send_104frame(double_point)
# === Activation termination
act_term = (
i_frame()
/ asdu_head(COT=10)
/ asdu_infobj_46(IOA=info_obj_addr, DCS=field_val)
)
check_asdu_46(act_term, "m")
yield self.send_104frame(act_term)
else: # if command type doesn't fit
# === neg. Activation confirmation
act_con = (
i_frame()
/ asdu_head(PN=1, COT=7)
/ asdu_infobj_46(IOA=info_obj_addr, DCS=field_val)
)
check_asdu_46(act_con, "m")
yield self.send_104frame(act_con)
else: # object doesn't exist in xml file
# === unknown info obj address, object not found
bad_addr = (
i_frame()
/ asdu_head(COT=47)
/ asdu_infobj_46(IOA=info_obj_addr, DCS=field_val)
)
check_asdu_46(bad_addr, "m")
yield self.send_104frame(bad_addr)
except InvalidFieldValueException as ex:
logger.warning("InvalidFieldValue: %s (%s)", ex, self.session_id)
except AttributeError as ex:
logger.warning(
"Allocation for field %s not possible. (%s)", ex, self.session_id
)
def handle_setpointscaled_command49(self, container):
try:
check_asdu_49(container, "c")
cause_of_transmission = int(container.getfieldval("COT"))
info_obj_addr = container.getfieldval("IOA")
field_val = container.getfieldval("SVA")
if cause_of_transmission == 6:
obj = self.device_data_controller.get_object_from_reg(
info_obj_addr
) # get destination object
if not (obj is None): # if exists in xml-file
obj_cat = int(obj.category_id) # get type (double command)
if obj_cat == 49: # if object has type double command
# === Activation confirmation
act_con = (
i_frame()
/ asdu_head(COT=7)
/ asdu_infobj_49(IOA=info_obj_addr, SVA=field_val)
)
check_asdu_49(act_con, "m")
yield self.send_104frame(act_con)
# === Get related info object if exists
obj_rel_addr = obj.relation
if obj_rel_addr != "": # if relation available
obj_rel_addr_hex = addr_in_hex(
obj_rel_addr
) # get double point object address
# get the double point object
obj_rel = self.device_data_controller.get_object_from_reg(
obj_rel_addr_hex
)
obj.val = field_val # set the value in the object to the command value
obj_rel.val = field_val # set the value in the relation object to the command value
# test whether if it really updated the value
changed_val = obj_rel.val
setpoint_scaled = (
i_frame()
/ asdu_head(COT=3)
/ asdu_infobj_11(IOA=obj_rel_addr_hex, SVA=changed_val)
)
setpoint_scaled.show2()
yield self.send_104frame(setpoint_scaled)
# === Activation termination
act_term = (
i_frame()
/ asdu_head(COT=10)
/ asdu_infobj_49(IOA=info_obj_addr, SVA=field_val)
)
check_asdu_49(act_term, "m")
yield self.send_104frame(act_term)
else: # if command type doesn't fit
# === neg. Activation confirmation
act_con = (
i_frame()
/ asdu_head(PN=1, COT=7)
/ asdu_infobj_49(IOA=info_obj_addr, SVA=field_val)
)
check_asdu_49(act_con, "m")
yield self.send_104frame(act_con)
else: # object doesn't exist in xml file
# === unknown info obj address, object not found
bad_addr = (
i_frame()
/ asdu_head(COT=47)
/ asdu_infobj_49(IOA=info_obj_addr, SVA=field_val)
)
check_asdu_49(bad_addr, "m")
yield self.send_104frame(bad_addr)
except InvalidFieldValueException as ex:
logger.warning("InvalidFieldValue: %s (%s)", ex, self.session_id)
except AttributeError as ex:
logger.warning(
"Allocation for field %s not possible. (%s)", ex, self.session_id
)
def handle_setpointfloatpoint_command50(self, container):
try:
check_asdu_50(container, "c")
cause_of_transmission = int(container.getfieldval("COT"))
info_obj_addr = container.getfieldval("IOA")
field_val = container.getfieldval("FPNumber")
if cause_of_transmission == 6:
obj = self.device_data_controller.get_object_from_reg(
info_obj_addr
) # get destination object
if not (obj is None): # if exists in xml-file
obj_cat = int(obj.category_id) # get type (double command)
if obj_cat == 50: # if object has type double command
# === Activation confirmation
act_con = (
i_frame()
/ asdu_head(COT=7)
/ asdu_infobj_50(IOA=info_obj_addr, FPNumber=field_val)
)
check_asdu_50(act_con, "m")
yield self.send_104frame(act_con)
# === Get related info object if exists
obj_rel_addr = obj.relation
if obj_rel_addr != "": # if relation available
obj_rel_addr_hex = addr_in_hex(
obj_rel_addr
) # get double point object address
# get the double point object
obj_rel = self.device_data_controller.get_object_from_reg(
obj_rel_addr_hex
)
obj.val = field_val # set the value in the object to the command value
obj_rel.val = field_val # set the value in the relation object to the command value
# test whether if it really updated the value
changed_val = obj_rel.val
setpoint_scaled = (
i_frame()
/ asdu_head(COT=3)
/ asdu_infobj_13(
IOA=obj_rel_addr_hex, FPNumber=changed_val
)
)
yield self.send_104frame(setpoint_scaled)
# === Activation termination
act_term = (
i_frame()
/ asdu_head(COT=10)
/ asdu_infobj_50(IOA=info_obj_addr, FPNumber=field_val)
)
check_asdu_50(act_term, "m")
yield self.send_104frame(act_term)
else: # if command type doesn't fit
# === neg. Activation confirmation
act_con = (
i_frame()
/ asdu_head(PN=1, COT=7)
/ asdu_infobj_50(IOA=info_obj_addr, FPNumber=field_val)
)
check_asdu_50(act_con, "m")
yield self.send_104frame(act_con)
else: # object doesn't exist in xml file
# === unknown info obj address, object not found
bad_addr = (
i_frame()
/ asdu_head(COT=47)
/ asdu_infobj_50(IOA=info_obj_addr, FPNumber=field_val)
)
check_asdu_50(bad_addr, "m")
yield self.send_104frame(bad_addr)
except InvalidFieldValueException as ex:
logger.warning("InvalidFieldValue: %s (%s)", ex, self.session_id)
except AttributeError as ex:
logger.warning(
"Allocation for field %s not possible. (%s)", ex, self.session_id
)
def handle_inro_command100(self, container):
try:
# check_asdu_100(container, "c")
cause_of_transmission = container.getfieldval("COT")
qualif_of_inro = container.getfieldval("QOI")
if cause_of_transmission == 6:
# === Activation confirmation for inro
act_con_inro = (
i_frame() / asdu_head(COT=7) / asdu_infobj_100(QOI=qualif_of_inro)
)
check_asdu_100(act_con_inro, "m")
yield self.send_104frame(act_con_inro)
# === Inro response
if qualif_of_inro == 20:
reg = self.device_data_controller.get_registers()
sorted_reg = natsort.natsorted(list(reg.items()))
# get response list for certain types
resp1_list = inro_response(sorted_reg, 1)
resp3_list = inro_response(sorted_reg, 3)
resp5_list = inro_response(sorted_reg, 5)
resp7_list = inro_response(sorted_reg, 7)
resp9_list = inro_response(sorted_reg, 9)
resp11_list = inro_response(sorted_reg, 11)
resp13_list = inro_response(sorted_reg, 13)
# send each packet from each list
for resp1 in resp1_list:
yield self.send_104frame(resp1)
for resp3 in resp3_list:
yield self.send_104frame(resp3)
for resp5 in resp5_list:
yield self.send_104frame(resp5)
for resp7 in resp7_list:
yield self.send_104frame(resp7)
for resp9 in resp9_list:
yield self.send_104frame(resp9)
for resp11 in resp11_list:
yield self.send_104frame(resp11)
for resp13 in resp13_list:
yield self.send_104frame(resp13)
# === Activation termination
act_term = (
i_frame() / asdu_head(COT=10) / asdu_infobj_100(QOI=qualif_of_inro)
)
check_asdu_100(act_con_inro, "m")
yield self.send_104frame(act_term)
except InvalidFieldValueException as ex:
logger.warning("InvalidFieldValue: %s (%s)", ex, self.session_id)
except AttributeError as ex:
logger.warning(
"Allocation for field %s not possible. (%s)", ex, self.session_id
)
def restart_t1(self):
self.timeout_t1.cancel()
self.timeout_t1 = gevent.Timeout(self.T_1, gevent.Timeout)
self.timeout_t1.start()
def show_send_list(self):
list_temp = list()
for frm in self.sentmsgs:
if frm.name == "u_frame":
u_type = str(hex(frm.getfieldval("Type")))
list_temp.append("u(" + u_list[u_type] + ")")
elif frm.name == "s_frame":
s_type = frm.getfieldval("RecvSeq")
list_temp.append("s(" + str(s_type) + ")")
elif frm.name == "i_frame":
i_send_seq = frm.getfieldval("SendSeq")
i_recv_seq = frm.getfieldval("RecvSeq")
list_temp.append("i(" + str(i_send_seq) + "," + str(i_recv_seq) + ")")
print(list_temp)
def disconnect(self):
self.timeout_t1.cancel()
if self.t2_caller:
gevent.kill(self.t2_caller)
self.sock.close()
self.ssn = 0
self.rsn = 0
self.ack = 0
self.telegram_count = 0
def increment_sendseq(self):
if self.ssn < 65534:
self.ssn = self.ssn + 2
else:
self.ssn = 0
def recvseq_increment(self):
if self.rsn < 65534:
self.rsn = self.rsn + 2
else:
self.rsn = 0
# returns object list for SQ = 0
@staticmethod
def get_infoobj_list(frame):
info_obj_list = []
number_of_objects = frame.getfieldval("NoO")
for i in range(3, number_of_objects + 3):
info_obj_list.append(frame.getlayer(i))
return info_obj_list
class frame_object_with_timer:
def __init__(self, frame):
self.frame = frame
self.name = frame.name
self.T_1 = conpot_core.get_databus().get_value("T_1")
self.__timeout_t1 = gevent.Timeout(self.T_1, gevent.Timeout)
def restart_t1(self):
self.__timeout_t1.cancel()
self.__timeout_t1 = gevent.Timeout(self.T_1, gevent.Timeout)
self.__timeout_t1.start()
def cancel_t1(self):
self.__timeout_t1.cancel()
def getfieldval(self, fieldval):
return self.frame.getfieldval(fieldval)
def build(self):
return self.frame.build()
| 35,803 | Python | .py | 744 | 28.950269 | 116 | 0.458565 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,487 | kamstrup_management_server.py | mushorg_conpot/conpot/protocols/kamstrup_management/kamstrup_management_server.py | # This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import socket
import gevent
from gevent.server import StreamServer
import conpot.core as conpot_core
from .command_responder import CommandResponder
from conpot.core.protocol_wrapper import conpot_protocol
from conpot.utils.networking import str_to_bytes
logger = logging.getLogger(__name__)
@conpot_protocol
class KamstrupManagementServer(object):
def __init__(self, template, template_directory, args):
self.command_responder = CommandResponder()
self.banner = "\r\nWelcome...\r\nConnected to [{0}]\r\n"
logger.info("Kamstrup management protocol server initialized.")
self.server = None
def handle(self, sock, address):
session = conpot_core.get_session(
"kamstrup_management_protocol",
address[0],
address[1],
sock.getsockname()[0],
sock.getsockname()[1],
)
logger.info(
"New Kamstrup connection from %s:%s. (%s)",
address[0],
address[1],
session.id,
)
session.add_event({"type": "NEW_CONNECTION"})
try:
sock.send(
str_to_bytes(
self.banner.format(
conpot_core.get_databus().get_value("mac_address")
)
)
)
while True:
data = sock.recv(1024)
if not data:
logger.info("Kamstrup client disconnected. (%s)", session.id)
session.add_event({"type": "CONNECTION_LOST"})
break
request = data.decode()
logdata = {"request": request}
response = self.command_responder.respond(request)
logdata["response"] = response
logger.info(
"Kamstrup management traffic from %s: %s (%s)",
address[0],
logdata,
session.id,
)
session.add_event(logdata)
gevent.sleep(0.25) # TODO measure delay and/or RTT
if response is None:
session.add_event({"type": "CONNECTION_LOST"})
break
# encode data before sending
reply = str_to_bytes(response)
sock.send(reply)
except socket.timeout:
logger.debug("Socket timeout, remote: %s. (%s)", address[0], session.id)
session.add_event({"type": "CONNECTION_LOST"})
sock.close()
def start(self, host, port):
self.host = host
self.port = port
connection = (host, port)
self.server = StreamServer(connection, self.handle)
logger.info("Kamstrup management protocol server started on: %s", connection)
self.server.serve_forever()
def stop(self):
self.server.stop()
| 3,645 | Python | .py | 90 | 30.066667 | 85 | 0.594297 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,488 | command_responder.py | mushorg_conpot/conpot/protocols/kamstrup_management/command_responder.py | # Copyright (C) 2014 Andrea De Pasquale <andrea@de-pasquale.name>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
from . import commands
logger = logging.getLogger(__name__)
class CommandResponder(object):
COMMAND_NOT_FOUND = "\r\n" "? Command not found.\r\n" "Send 'H' for help.\r\n"
def __init__(self):
self.commands = {
"!AC": commands.AccessControlCommand(),
"!AS": commands.AlarmServerCommand(),
"!GC": commands.GetConfigCommand(),
"!GV": commands.SoftwareVersionCommand(),
"!SA": commands.SetKap1Command(),
"!SB": commands.SetKap2Command(),
"!SC": commands.SetConfigCommand(),
"!SD": commands.SetDeviceNameCommand(),
"!SH": commands.SetLookupCommand(),
"!SI": commands.SetIPCommand(),
"!SK": commands.SetWatchdogCommand(),
"!SN": commands.SetNameserverCommand(),
"!SP": commands.SetPortsCommand(),
"!SS": commands.SetSerialCommand(),
"!RC": commands.RequestConnectCommand(),
"!RR": commands.RequestRestartCommand(),
"!WM": commands.WinkModuleCommand(),
}
self.help_command = commands.HelpCommand(self.commands)
def respond(self, request):
stripped_request = request.strip()
if len(stripped_request) == 0:
return "" # idle
split_request = stripped_request.split(" ", 1)
command = split_request[0].upper()
if len(command) > 3:
return self.COMMAND_NOT_FOUND
elif command.startswith("Q"):
return # quit
params = None
if len(split_request) > 1:
params = split_request[1]
if command.startswith("H"):
return self.help_command.run(params)
if command.startswith("!"):
if command in list(self.commands.keys()):
return self.commands[command].run(params)
return self.COMMAND_NOT_FOUND
| 2,690 | Python | .py | 61 | 36.032787 | 82 | 0.640015 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,489 | commands.py | mushorg_conpot/conpot/protocols/kamstrup_management/commands.py | # Copyright (C) 2014 Andrea De Pasquale <andrea@de-pasquale.name>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import conpot.core as conpot_core
logger = logging.getLogger(__name__)
class BaseCommand(object):
HELP_MESSAGE = ""
CMD_OUTPUT = ""
INVALID_PARAMETER = (
"\r\n"
"? Invalid parameter.\r\n"
"Try 'H cmd' for specific help.\r\n"
" Ie: H !SC\r\n"
)
def help(self):
return self.HELP_MESSAGE
def run(self, params=None):
return self.CMD_OUTPUT
class HelpCommand(BaseCommand):
CMD_OUTPUT = (
"==============================================================================\r\n"
"Service Menu\r\n"
"==============================================================================\r\n"
"H: Help [cmd].\r\n"
"Q: Close connection.\r\n"
"!AC: Access control.\r\n"
"!AS: Alarm Server.\r\n"
"!GC: Get Config.\r\n"
"!GV: Software version.\r\n"
"!SA: Set KAP Server IP and port (*1).\r\n"
"!SB: Set 2nd KAP Server IP and port.\r\n"
"!SC: Set Config (*1).\r\n"
"!SD: Set device name (*1).\r\n"
"!SH: Set KAP Server lookup (DNS or DHCP)\r\n"
"!SI: Set IP (enter either valid IP or 0 to force DHCP)(*1).\r\n"
"!SK: Set KAP watchdog timeout(WDT).\r\n"
"!SN: Set IP for DNS Name servers to use.\r\n"
"!SP: Set IP Ports\r\n"
"!SS: Set Serial Settings.\r\n"
"!RC: Request connect\r\n"
"!RR: Request restart (*1).\r\n"
"!WM: Wink module.\r\n"
"==============================================================================\r\n"
"(*1) Forces system restart\r\n"
"==============================================================================\r\n"
"Kamstrup (R)\r\n"
)
def __init__(self, commands):
self.commands = commands
def run(self, params=None):
if params is None:
return self.CMD_OUTPUT
c = params[0:3]
if c in list(self.commands.keys()):
return self.commands[c].help()
return self.INVALID_PARAMETER
class AccessControlCommand(BaseCommand):
HELP_MESSAGE = (
"!AC: Access control.\r\n"
" Used for simple IP address firewall filtering.\r\n"
" If enabled only the listed IP's can assess this module.\r\n"
" Format: !AC [En/Dis [ID IP]]\r\n"
" Example: !AC\r\n"
" Lists the setup.\r\n"
" Example: !AC 0\r\n"
" Disables the filter allowing everybody to access.\r\n"
" Example: !AC 0 1 192.168.1.211\r\n"
" !AC 0 2 10.0.0.1\r\n"
" !AC 0 3 195.215.168.45\r\n"
" !AC 1\r\n"
" Only connections from 192.168.1.211, \r\n"
" 10.0.0.1 or 195.215.168.45 are possible.\r\n"
)
CMD_OUTPUT = (
"\r\n"
"{access_control_status} \r\n"
" [1] {access_control_1}\r\n"
" [2] {access_control_2}\r\n"
" [3] {access_control_3}\r\n"
" [4] {access_control_4}\r\n"
" [5] {access_control_5}\r\n"
)
def set_access_ip(self, number, ip_string):
databus = conpot_core.get_databus()
if ip_string.count(".") == 3:
if any(x in number for x in ["1", "2", "3", "4", "5"]):
acl_number = int(number)
final_ip = parse_ip(ip_string)
databus.set_value("access_control_{0}".format(acl_number), final_ip)
def run(self, params=None):
databus = conpot_core.get_databus()
cmd_output = ""
if params:
# return is always OK apparently...
cmd_output = "\r\nOK\r\n"
if len(params) == 1 and params == "0":
databus.set_value("access_control_status", "DISABLED")
elif len(params) == 1 and params == "1":
databus.set_value("access_control_status", "ENABLED")
elif len(params.split(" ")) == 3:
cmd, acl_number, ip_address = params.split(" ")
if cmd == "0":
self.set_access_ip(acl_number, ip_address)
return cmd_output + self.CMD_OUTPUT.format(
access_control_status=databus.get_value("access_control_status"),
access_control_1=databus.get_value("access_control_1"),
access_control_2=databus.get_value("access_control_2"),
access_control_3=databus.get_value("access_control_3"),
access_control_4=databus.get_value("access_control_4"),
access_control_5=databus.get_value("access_control_5"),
)
class AlarmServerCommand(BaseCommand):
HELP_MESSAGE = (
"!AS: Alarm Server.\r\n"
" Used to set IP and port of server to handle alarm notifications.\r\n"
" Format: !AS [SrvIP [SrvPort]]\r\n"
" Example: !AS 195.215.168.45 \r\n"
" Alarms are sent to 195.215.168.45.\r\n"
" Example: !AS 195.215.168.45 4000\r\n"
" Alarms are sent to to port 4000 on 195.215.168.45.\r\n"
" Example: !AS 0.0.0.0\r\n"
" Alarm reporting is disabled.\r\n"
)
CMD_OUTPUT = "\r\n" "Alarm server: {alarm_server_output} " # no CRLF
def run(self, params=None):
databus = conpot_core.get_databus()
output_prefix = ""
output_postfix = ""
if not params:
if databus.get_value("alarm_server_status") == "DISABLED":
output = "DISABLED"
else:
output = "{0}:{1}".format(
databus.get_value("alarm_server_ip"),
databus.get_value("alarm_server_port"),
)
else:
output_prefix = "\r\nOK"
# in this case the command has CRLF... really funky...
output_postfix = "\r\n"
databus.set_value("alarm_server_status", "ENABLED")
params_split = params.split(" ")
databus.set_value("alarm_server_ip", parse_ip(params_split[0]))
# port provided also
if len(params_split) > 1:
port = parse_port(params_split[1])
if port != 0:
databus.set_value("alarm_server_port", port)
output = "{0}:{1}".format(
databus.get_value("alarm_server_ip"),
databus.get_value("alarm_server_port"),
)
return (
output_prefix
+ self.CMD_OUTPUT.format(alarm_server_output=output)
+ output_postfix
)
class GetConfigCommand(BaseCommand):
HELP_MESSAGE = "!GC: Get Config.\r\n" " Returns the module configuration.\r\n"
CMD_OUTPUT = (
"Device Name : {device_name}\r\n"
"Use DHCP : {use_dhcp}\r\n"
"IP addr. : {ip_addr}\r\n"
"IP Subnet : {ip_subnet}\r\n"
"Gateway addr. : {ip_gateway}\r\n"
"Service server addr.: {service_server_ip}\r\n"
"Service server hostname.: {service_server_host}\r\n"
"DNS Server No. 1: {nameserver_1}\r\n"
"DNS Server No. 2: {nameserver_2}\r\n"
"DNS Server No. 3: {nameserver_3}\r\n"
"MAC addr. (HEX) : {mac_address}\r\n"
# TODO: i think was can get these from the other protocol also
"Channel A device meterno.: {channel_a_meternumber}\r\n"
"Channel B device meterno.: {channel_b_meternumber}\r\n"
# TODO: these...
"Keep alive timer (flash setting): ENABLED 10\r\n"
"Keep alive timer (current setting): ENABLED 10\r\n"
"Has the module received acknowledge from the server: {kap_ack_server}\r\n"
"KAP Server port: {kap_a_server_port}\r\n"
"KAP Local port: {kap_local_port}\r\n"
# TODO: This, read from other proto also?
"Software watchdog: ENABLED 3600\r\n"
)
def run(self, params=None):
databus = conpot_core.get_databus()
return self.CMD_OUTPUT.format(
device_name=databus.get_value("device_name"),
nameserver_1=databus.get_value("nameserver_1"),
nameserver_2=databus.get_value("nameserver_2"),
nameserver_3=databus.get_value("nameserver_3"),
mac_address=databus.get_value("mac_address"),
use_dhcp=databus.get_value("use_dhcp"),
ip_addr=databus.get_value("ip_addr"),
ip_subnet=databus.get_value("ip_subnet"),
ip_gateway=databus.get_value("ip_gateway"),
service_server_ip=databus.get_value("kap_a_server_ip"),
service_server_host=databus.get_value("kap_a_server_hostname"),
channel_a_meternumber=databus.get_value("channel_a_meternumber"),
channel_b_meternumber=databus.get_value("channel_b_meternumber"),
kap_ack_server=databus.get_value("kap_ack_server"),
kap_a_server_port=databus.get_value("kap_a_server_port"),
kap_local_port=databus.get_value("kap_local_port"),
)
class SoftwareVersionCommand(BaseCommand):
HELP_MESSAGE = (
"!GV: Software version.\r\n"
" Returns the software revision of the module.\r\n"
)
CMD_OUTPUT = "\r\n" "Software Version: {software_version}\r\n"
def run(self, params=None):
return self.CMD_OUTPUT.format(
software_version=conpot_core.get_databus().get_value("software_version")
)
class SetKap1Command(BaseCommand):
HELP_MESSAGE = (
"!SA: Set KAP Server IP and port (*1).\r\n" # restart is not forced...
" Used for setting the IP of the Server to receive KAP-pacakeges.\r\n"
" UDP port on server can be provided optionally.\r\n"
" Format: !SA SrvIP [SrvPort]\r\n"
" Example: !SA 195215168045 \r\n"
" KAP packages are hereafter sent to 195.215.168.45.\r\n"
" Example: !SA 195.215.168.45 \r\n"
' Same result as "!SA 195215168045".\r\n'
" Example: !SA 192168001002 61000\r\n"
" KAP packages are hereafter sent to 192.168.1.2:61000\r\n"
" from module port 8000.\r\n"
" Example: !SA 0.0.0.0 \r\n"
" Disables KAP.\r\n"
)
CMD_OUTPUT = "\r\n" "Service server addr.: {kap_a_output}\r\n"
def run(self, params=None):
databus = conpot_core.get_databus()
if params:
output_prefix = "\r\nOK"
params_split = params.split(" ")
databus.set_value("kap_a_server_ip", parse_ip(params_split[0]))
# TODO: The meter might do a lookup on the ip, and the result of that
# lookup might be stored in a_server_host...
databus.set_value("kap_a_server_hostname", "0 - none")
# port provided also
if len(params_split) > 1:
port = parse_port(params_split[1])
if port != 0:
databus.set_value("kap_a_server_port", port)
else:
output_prefix = "\r\n"
output = "{0}:{1}".format(
databus.get_value("kap_a_server_ip"), databus.get_value("kap_a_server_port")
)
return output_prefix + self.CMD_OUTPUT.format(kap_a_output=output)
class SetKap2Command(BaseCommand):
HELP_MESSAGE = (
"!SB: Set 2nd KAP Server IP and port.\r\n"
" Used for redundancy with two KAP servers.\r\n"
" When enabled every second KAP is send to the IP defined by !SB.\r\n"
" NB: The KAP interval to each individual server is half of KAPINT\r\n"
" defined by !SK.\r\n"
" NB: !SA must be enabled (not 0.0.0.0) \r\n"
" Format: !SB SrvIP [SrvPort]\r\n"
" Example: !SB 195.215.168.45 \r\n"
" KAP packages are hereafter also sent to 195.215.168.45.\r\n"
" Example: !SB 0.0.0.0 \r\n"
" Disabled.\r\n"
" Example: !SB 192.168.1.2 61000\r\n"
" KAP packages are hereafter sent to 192.168.1.2:61000\r\n"
" from module port 8000.\r\n"
)
CMD_OUTPUT_SINGLE = (
"\r\n" "{}\r\n" "Service server addr.: {}:{} (from DNS)\r\n" "No redundancy."
)
CMD_OUTPUT_DOUBLE = (
"\r\n"
"{}\r\n"
"Service server addr.: {}:{} (from DNS)\r\n"
"and fallback KAP to: {}:{}\r\n"
)
def run(self, params=None):
databus = conpot_core.get_databus()
cmd_ok = ""
if params:
cmd_ok = "OK"
params_split = params.split(" ")
databus.set_value("kap_b_server_ip", parse_ip(params_split[0]))
if len(params_split) > 1:
port = parse_port(params_split[1])
if port != 0:
databus.set_value("kap_b_server_port", params_split[1])
if databus.get_value("kap_b_server_ip") == "0.0.0.0":
return self.CMD_OUTPUT_SINGLE.format(
cmd_ok,
databus.get_value("kap_a_server_ip"),
databus.get_value("kap_a_server_port"),
)
return self.CMD_OUTPUT_DOUBLE.format(
cmd_ok,
databus.get_value("kap_a_server_ip"),
databus.get_value("kap_a_server_port"),
databus.get_value("kap_b_server_ip"),
databus.get_value("kap_b_server_port"),
)
class SetConfigCommand(BaseCommand):
HELP_MESSAGE = (
"!SC: Set Config (*1).\r\n"
" Configures the module.\r\n"
" Format: !SC DHCP IP SUB GW DNS1 DNS2 DNS3 SRV_IP DEVICENAME SRV_DNS\r\n"
" DHCP 1 for DHCP, 0 for static IP.\r\n"
" IP.. Static IP settings.\r\n"
" SRV_IP IP of remote server (Only if SRV_DNS is 0).\r\n"
" DEVICENAME User label for for individual naming.\r\n"
" SRV_DNS DNS name of remote server (0 to disable DNS lookup)\r\n"
)
CMD_OUTPUT = "\r\n" "Service server hostname.: {}\r\n"
def run(self, params=None):
databus = conpot_core.get_databus()
if params:
params_split = params.split(" ")
if len(params_split) >= 10:
if params_split[0] == "1":
databus.set_value("use_dhcp", "YES")
else:
databus.set_value("use_dhcp", "NO")
databus.set_value("ip_addr", parse_ip(params_split[1]))
databus.set_value("ip_subnet", parse_ip(params_split[2]))
databus.set_value("ip_gateway", parse_ip(params_split[3]))
databus.set_value("nameserver_1", parse_ip(params_split[4]))
databus.set_value("nameserver_2", parse_ip(params_split[5]))
databus.set_value("nameserver_3", parse_ip(params_split[6]))
if params_split[9] == "0":
databus.set_value("kap_a_server_ip", parse_ip(params_split[7]))
databus.set_value("kap_a_server_hostname", "0 - none")
else:
databus.set_value("kap_a_server_hostname", params_split[9])
# FIXME: server IP should be resolved from the hostname
# using nameserver_1, nameserver_2, nameserver_3
databus.set_value("kap_a_server_ip", parse_ip(params_split[7]))
device_name = params_split[8]
if len(device_name) > 20:
device_name = device_name[0:20]
databus.set_value("device_name", device_name)
databus.set_value("reboot_signal", 1)
class SetDeviceNameCommand(BaseCommand):
HELP_MESSAGE = (
"!SD: Set device name (*1).\r\n"
" Option for individual naming of the module (0-20 chars).\r\n"
)
def run(self, params=None):
if params is None:
params = ""
if len(params) > 20:
params = params[0:20]
output = ""
else:
output = "\r\nOK"
databus = conpot_core.get_databus()
databus.set_value("device_name", params)
databus.set_value("reboot_signal", 1)
return output
class SetLookupCommand(BaseCommand):
HELP_MESSAGE = (
"!SH: Set KAP Server lookup (DNS or DHCP)\r\n"
" Used for setting the DNS name of the Server to receive KAP-pacakeges.\r\n"
' Using the keyword "DHCP_OPTION:xxx" makes the module request the IP using DHCP option xxx.\r\n'
" The settings are first activated when the module is reset (using !RR).\r\n"
" Example: !SH 0 \r\n"
" Lookup Disabled.\r\n"
" The module will send KAP to the IP listed by !SA. \r\n"
" Example: !SH hosting.kamstrup_meter.dk \r\n"
" Use DNS lookup.\r\n"
" The module will send KAP to the IP listed by !SA until it resolves the DNS,\r\n"
" hereafter the KAP will be sent to hosting.kamstrup_meter.dk\r\n"
" Example: !SH DHCP_OPTION:129\r\n"
" Use DHCP Option.\r\n"
" The module will send KAP to the IP provided by DHCP (in this case option 129).\r\n"
" The module uses the IP provided by !SA if the DHCP offer dos not include option xxx data.\r\n"
)
def run(self, params=None):
if params is None:
params = ""
output = "\r\n"
databus = conpot_core.get_databus()
# no, i am not making this up... this is actually how it is implemented on the Kamstrup meter..
if len(params) == 1:
databus.set_value("kap_server_lookup", "0 - none")
output = "\r\nOK" + output
elif len(params) > 1:
databus.set_value("kap_server_lookup", params)
output = "\r\nOK" + output
output += "Service server hostname.: {0}\r\n"
return output.format(databus.get_value("kap_server_lookup"))
class SetIPCommand(BaseCommand):
HELP_MESSAGE = (
"!SI: Set IP (enter either valid IP or 0 to force DHCP)(*1).\r\n"
" Used for changing the module IP.\r\n"
" (Use !SC if you need to change subnet/Gateway too).\r\n"
" Entering a '0' will enable DHCP.\r\n"
" Format: !SI IP\r\n"
" Example: !SI 0\r\n"
" The module will reboot and acuire the IP settings using DHCP.\r\n"
" Example: !SI 192168001200\r\n"
" The module will reboot using static IP addr 192.168.1.200.\r\n"
" (SUB, GW and DNS unchanged)\r\n"
" Example: !SI 192.168.1.200\r\n"
" Same as !SI 192168001200.\r\n"
)
CMD_OUTPUT = (
"\r\n"
"Use DHCP : {use_dhcp}\r\n"
"\r\n"
"IP addr. : {ip_addr}\r\n"
)
def run(self, params=None):
databus = conpot_core.get_databus()
if params:
ip_addr = parse_ip(params)
if ip_addr == "0.0.0.0":
if databus.get_value("use_dhcp") == "NO":
databus.set_value("use_dhcp", "YES")
databus.set_value("ip_addr", databus.get_value("ip_addr_dhcp"))
databus.set_value(
"ip_gateway", databus.get_value("ip_gateway_dhcp")
)
databus.set_value("ip_subnet", databus.get_value("ip_subnet_dhcp"))
databus.set_value("reboot_signal", 1)
else:
databus.set_value("use_dhcp", "NO")
databus.set_value("ip_addr", ip_addr)
databus.set_value("reboot_signal", 1)
return self.CMD_OUTPUT.format(
use_dhcp=databus.get_value("use_dhcp"), ip_addr=databus.get_value("ip_addr")
)
class SetWatchdogCommand(BaseCommand):
HELP_MESSAGE = (
"!SK: Set KAP watchdog timeout(WDT).\r\n"
" Used for setting KeepAlive watchdog timing.\r\n"
" Format: !SK [WDT] [MISSING] [KAPINT]\r\n"
" Example: !SK\r\n"
" Example: !SK 3600\r\n"
" Example: !SK 3600 60 10\r\n"
" WDT The module reboots after WDT?KAPINT seconds\r\n"
" without an ACK from the server.\r\n"
" 0 = disable WDT.\r\n"
" MISSING After MISSING?KAPINT seconds without an ACK,\r\n"
" the Err LED starts blinking.\r\n"
" (Used for indication of missing link to the server)\r\n"
" KAPINT Interval in seconds for how often KeepAlivePackages\r\n"
" are send to the KAP server.\r\n"
" The WDT and MISSING timeout counts are both reset by an ACK from the server. \r\n"
)
CMD_OUTPUT = (
"Software watchdog: {0}\r\n"
"KAP Missing warning: {1}\r\n"
"Keep alive timer (flash setting): {2}\r\n"
)
def run(self, params=None):
output = "\r\n"
databus = conpot_core.get_databus()
if params is not None:
params_split = params.split(" ")
if len(params_split) > 0:
# meh, actually the real value is non-existing. If you supply a larger value the smart meter
# just overwrite memory and starts writing to the next memory location - yep, you heard it here first!
watchdog_value = str(
try_parse_uint(params_split[0], min_value=5, max_value=4294967295)
)
databus.set_value("software_watchdog", watchdog_value)
if len(params_split) > 1:
kap_missing = str(
try_parse_uint(
params_split[1], min_value=0, max_value=4294967295
)
)
databus.set_value("kap_missing_warning", kap_missing)
if len(params_split) > 2:
keep_alive_timer = str(
try_parse_uint(
params_split[2], min_value=0, max_value=4294967295
)
)
databus.set_value("keep_alive_timer", keep_alive_timer)
output = "\r\nOK" + output
return_values = [
databus.get_value("software_watchdog"),
databus.get_value("kap_missing_warning"),
databus.get_value("keep_alive_timer"),
]
for i in range(0, len(return_values)):
if return_values[i] == "0":
return_values[i] = "DISABLED {0}".format(return_values[i])
else:
return_values[i] = "ENABLED {0}".format(return_values[i])
output += SetWatchdogCommand.CMD_OUTPUT.format(
return_values[0], return_values[1], return_values[2]
)
return output.format(databus.get_value("kap_server_lookup"))
class SetNameserverCommand(BaseCommand):
HELP_MESSAGE = (
"!SN: Set IP for DNS Name servers to use.\r\n"
" Format: !SN DNS1 DNS2 DNS3\r\n"
" Example: !SN 192168001200 192168001201 000000000000\r\n"
" Example: !SN 172.16.0.83 172.16.0.84 0.0.0.0\r\n"
)
def run(self, params=None):
if params is None:
return self.INVALID_PARAMETER
nameservers = params.split(" ")
if len(nameservers) != 3:
return self.INVALID_PARAMETER
databus = conpot_core.get_databus()
databus.set_value("nameserver_1", parse_ip(nameservers[0]))
databus.set_value("nameserver_2", parse_ip(nameservers[1]))
databus.set_value("nameserver_3", parse_ip(nameservers[2]))
return "\r\nOK"
class SetPortsCommand(BaseCommand):
HELP_MESSAGE = (
"!SP: Set IP Ports\r\n"
" Format: !SP [KAP CHA CHB CFG]\r\n"
" Example: !SP 333\r\n"
" KAP packages are hereafter sent to port 333 on the server.\r\n"
" Example: !SP 50 1025 1026 50100\r\n"
" KAP packages are sent to port 50.\r\n"
" Direct connections to UART channel A is on port 1025, B on 1026.\r\n"
" Config connection on port 50100.\r\n"
" (default values)\r\n"
" Example: !SP 0 0 80\r\n"
" UART channel B is on port 80 (KAP and ChA is ingored - unchanged).\r\n"
)
CMD_OUTPUT = (
"\r\n"
"{}\r\n"
"KAP on server: {}\r\n"
"ChA on module: {}\r\n"
"ChB on module: {}\r\n"
"Cfg on module: {}\r\n"
)
def run(self, params=None):
databus = conpot_core.get_databus()
cmd_ok = ""
if params:
params_split = params.split(" ")
cmd_ok = "OK"
kap_port = parse_port(params_split[0])
if kap_port != 0:
databus.set_value("kap_a_server_port", kap_port)
if len(params_split) > 1:
cha_port = parse_port(params_split[1])
if cha_port != 0:
databus.set_value("channel_a_port", cha_port)
if len(params_split) > 2:
chb_port = parse_port(params_split[2])
if chb_port != 0:
databus.set_value("channel_b_port", chb_port)
# FIXME: how do we change the port we are connected to?
# if len(params_split) > 3:
# cfg_port = parse_port(params_split[3])
# if cfg_port != 0:
# databus.set_value("", cfg_port)
return self.CMD_OUTPUT.format(
cmd_ok,
databus.get_value("kap_a_server_port"),
databus.get_value("channel_a_port"),
databus.get_value("channel_b_port"),
50100,
) # FIXME: see above
class SetSerialCommand(BaseCommand):
HELP_MESSAGE = (
"!SS: Set Serial Settings.\r\n"
" Used for setting the serial interface for channel A or B.\r\n"
" Format: !SS [Channel Baud,DataBits,Parity,StopBits[,Ctrl]]\r\n"
" Example: !SS A Auto\r\n"
" Example: !SS A 9600,8,N,2\r\n"
" Example: !SS B 115200,8,E,1\r\n"
" Example: !SS B 115200,8,E,1,I\r\n"
" Example: !SS B 115200,8,E,1,L\r\n"
" The ctrl flag can be 'C'(check), 'I' (ignore framing errors) or 'L' (Link, ChB only).\r\n"
" Chanel A supports auto mode (Also enables load profile logger in old E-Meters).\r\n"
)
def run(self, params=None):
databus = conpot_core.get_databus()
invalid_message = "\r\nInvalid data!\r\n\r\n"
if params:
params_split = params.split(" ")
if len(params_split) == 2:
output = "\r\nOK\r\n"
if params_split[0] == "A":
databus.set_value("serial_settings_a", params_split[1])
elif params_split[0] == "B":
databus.set_value("serial_settings_b", params_split[1])
else:
return invalid_message
else:
return invalid_message
else:
output = "\r\n" "UART A setup : {0}\r\n" "UART B setup : {1}\r\n"
return output.format(
databus.get_value("serial_settings_a"),
databus.get_value("serial_settings_b"),
)
class RequestConnectCommand(BaseCommand):
HELP_MESSAGE = (
"!RC: Request connect\r\n"
" Makes the module crate a ChA or ChB socket to a remote server.\r\n"
" Format: !RC Action [IP [Port]]\r\n"
" Example: !RC A 195.215.168.45 200\r\n"
" Example: !RC B 195.215.168.45 201\r\n"
" Example: !RC D\r\n"
" Disconnects both A and B if open.\r\n"
" Example: !RC\r\n"
" Status only.\r\n"
)
def run(self, params):
databus = conpot_core.get_databus()
# TODO: Further investigations needed... How does this remote socket work? How should copot react?
output = "Status: 0100\r\n"
if params:
params_split = params.split(" ")
output = "\r\nOK\r\n" + output
if len(params_split) == 1 and params_split[0] == "D":
pass
elif len(params_split) == 2:
channel, value = params_split
if channel == "A":
# TODO: figure out how these are parsed when meter is online again
databus.set_value("channel_a_connect_socket", value)
elif channel == "B":
databus.set_value("channel_b_connect_socket", value)
else:
return self.INVALID_PARAMETER
else:
return self.INVALID_PARAMETER
else:
output = "\r\n" + output
return output
class RequestRestartCommand(BaseCommand):
HELP_MESSAGE = "!RR: Request restart (*1).\r\n"
def run(self, params=None):
conpot_core.get_databus().set_value("reboot_signal", 1)
return
class WinkModuleCommand(BaseCommand):
HELP_MESSAGE = (
"!WM: Wink module.\r\n"
" Causes the WINK LED on the module to blink for physical identification.\r\n"
)
# no other output
CMD_OUTPUT = "\r\n" "\r\n" "OK\r\n"
def parse_ip(ip_string):
default = "0.0.0.0"
if "." in ip_string:
octets = ip_string.split(".")
else:
octets = [int(ip_string[i : i + 3]) for i in range(0, len(ip_string), 3)]
if len(octets) != 4:
return default
for octet in octets:
if int(octet) < 0 or int(octet) > 255:
return default
return ".".join(list(map(str, octets)))
def parse_port(port_string):
try:
port = int(port_string)
if 0 < port < 65536:
return port
return 0
except ValueError:
return 0
def try_parse_uint(uint_string, min_value=0, max_value=254):
try:
value = int(uint_string)
if value < min_value or value > max_value:
value = 0
except ValueError:
value = "0"
return value
| 31,420 | Python | .py | 686 | 35.341108 | 118 | 0.531315 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,490 | slave.py | mushorg_conpot/conpot/protocols/modbus/slave.py | import struct
import logging
from modbus_tk.modbus import (
Slave,
ModbusError,
ModbusInvalidRequestError,
InvalidArgumentError,
DuplicatedKeyError,
InvalidModbusBlockError,
OverlapModbusBlockError,
)
from modbus_tk import defines, utils
from conpot.utils.networking import str_to_bytes
from .modbus_block_databus_mediator import ModbusBlockDatabusMediator
logger = logging.getLogger(__name__)
class MBSlave(Slave):
"""
Customized Modbus slave representation extending modbus_tk.modbus.Slave
"""
def __init__(self, slave_id, dom):
Slave.__init__(self, slave_id)
self._fn_code_map = {
defines.READ_COILS: self._read_coils,
defines.READ_DISCRETE_INPUTS: self._read_discrete_inputs,
defines.READ_INPUT_REGISTERS: self._read_input_registers,
defines.READ_HOLDING_REGISTERS: self._read_holding_registers,
defines.WRITE_SINGLE_COIL: self._write_single_coil,
defines.WRITE_SINGLE_REGISTER: self._write_single_register,
defines.WRITE_MULTIPLE_COILS: self._write_multiple_coils,
defines.WRITE_MULTIPLE_REGISTERS: self._write_multiple_registers,
defines.DEVICE_INFO: self._device_info,
defines.REPORT_SLAVE_ID: self._report_slave_id,
}
self.dom = dom
logger.debug("Modbus slave (ID: %d) created" % self._id)
def _report_slave_id(self, request_pdu):
logger.debug("Requested to report slave ID (0x11)")
response = struct.pack(">B", 0x11) # function code
response += struct.pack(">B", 1) # byte count
response += struct.pack(">B", 1) # slave id
response += struct.pack(">B", 0xFF) # run status, OxFF on, 0x00 off
return response
def _device_info(self, request_pdu):
info_root = self.dom.xpath("//modbus/device_info")[0]
vendor_name = info_root.xpath("./VendorName/text()")[0]
product_code = info_root.xpath("./ProductCode/text()")[0]
major_minor_revision = info_root.xpath("./MajorMinorRevision/text()")[0]
(req_device_id, _) = struct.unpack(">BB", request_pdu[2:4])
device_info = {0: vendor_name, 1: product_code, 2: major_minor_revision}
# MEI type
response = struct.pack(">B", 0x0E)
# requested device id
response += struct.pack(">B", req_device_id)
# conformity level
response += struct.pack(">B", 0x01)
# followup data 0x00 is False
response += struct.pack(">B", 0x00)
# No next object id
response += struct.pack(">B", 0x00)
# Number of objects
response += struct.pack(">B", len(device_info))
for i in range(len(device_info)):
# Object id
response += struct.pack(">B", i)
# Object length
response += struct.pack(">B", len(device_info[i]))
response += str_to_bytes(device_info[i])
return response
def handle_request(self, request_pdu, broadcast=False):
"""
parse the request pdu, makes the corresponding action
and returns the response pdu
"""
logger.debug("Slave (ID: %d) is handling request" % self._id)
with self._data_lock: # thread-safe
try:
# get the function code
(self.function_code,) = struct.unpack(">B", request_pdu[:1])
# check if the function code is valid. If not returns error response
if not self.function_code in self._fn_code_map:
raise ModbusError(defines.ILLEGAL_FUNCTION)
can_broadcast = [
defines.WRITE_MULTIPLE_COILS,
defines.WRITE_MULTIPLE_REGISTERS,
defines.WRITE_SINGLE_COIL,
defines.WRITE_SINGLE_REGISTER,
]
if broadcast and (self.function_code not in can_broadcast):
raise ModbusInvalidRequestError(
"Function %d can not be broadcasted" % self.function_code
)
# execute the corresponding function
try:
response_pdu = self._fn_code_map[self.function_code](request_pdu)
except struct.error:
raise ModbusError(exception_code=3)
if response_pdu:
if broadcast:
# not really sure whats going on here - better log it!
logger.info(
"Modbus broadcast: %s"
% (utils.get_log_buffer("!!", response_pdu))
)
return ""
else:
return struct.pack(">B", self.function_code) + response_pdu
raise Exception("No response for function %d" % self.function_code)
except ModbusError as e:
logger.error(
"Exception caught: %s. (A proper response will be sent to the peer)",
e,
)
return struct.pack(
">BB", self.function_code + 128, e.get_exception_code()
)
def add_block(self, block_name, block_type, starting_address, size):
"""Add a new block identified by its name"""
with self._data_lock: # thread-safe
if size <= 0:
raise InvalidArgumentError("size must be a positive number")
if starting_address < 0:
raise InvalidArgumentError(
"starting address must be zero or positive number"
)
if block_name in self._blocks:
raise DuplicatedKeyError("Block %s already exists. " % block_name)
if block_type not in self._memory:
raise InvalidModbusBlockError("Invalid block type %d" % block_type)
# check that the new block doesn't overlap an existing block
# it means that only 1 block per type must correspond to a given address
# for example: it must not have 2 holding registers at address 100
index = 0
for i in range(len(self._memory[block_type])):
block = self._memory[block_type][i]
if block.is_in(starting_address, size):
raise OverlapModbusBlockError(
"Overlap block at %d size %d"
% (block.starting_address, block.size)
)
if block.starting_address > starting_address:
index = i
break
# if the block is ok: register it
self._blocks[block_name] = (block_type, starting_address)
# add it in the 'per type' shortcut
self._memory[block_type].insert(
index, ModbusBlockDatabusMediator(block_name, starting_address)
)
| 7,004 | Python | .py | 148 | 34.216216 | 89 | 0.568461 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,491 | modbus_server.py | mushorg_conpot/conpot/protocols/modbus/modbus_server.py | # modified by Sooky Peter <xsooky00@stud.fit.vutbr.cz>
# Brno University of Technology, Faculty of Information Technology
import struct
import socket
import time
import logging
import sys
import codecs
from lxml import etree
from gevent.server import StreamServer
import modbus_tk.modbus_tcp as modbus_tcp
from modbus_tk import modbus
# Following imports are required for modbus template evaluation
import modbus_tk.defines as mdef
from conpot.core.protocol_wrapper import conpot_protocol
from conpot.protocols.modbus import slave_db
import conpot.core as conpot_core
logger = logging.getLogger(__name__)
@conpot_protocol
class ModbusServer(modbus.Server):
def __init__(self, template, template_directory, args):
self.timeout = 5
self.delay = None
self.mode = None
self.host = None
self.port = None
self.server = None
databank = slave_db.SlaveBase(template)
# Constructor: initializes the server settings
modbus.Server.__init__(self, databank if databank else modbus.Databank())
# retrieve mode of connection and turnaround delay from the template
self._get_mode_and_delay(template)
# not sure how this class remember slave configuration across
# instance creation, i guess there are some
# well hidden away class variables somewhere.
self.remove_all_slaves()
self._configure_slaves(template)
def _get_mode_and_delay(self, template):
dom = etree.parse(template)
self.mode = dom.xpath("//modbus/mode/text()")[0].lower()
if self.mode not in ["tcp", "serial"]:
logger.error(
"Conpot modbus initialization failed due to incorrect"
" settings. Check the modbus template file"
)
sys.exit(3)
try:
self.delay = int(dom.xpath("//modbus/delay/text()")[0])
except ValueError:
logger.error(
"Conpot modbus initialization failed due to incorrect"
" settings. Check the modbus template file"
)
sys.exit(3)
def _configure_slaves(self, template):
dom = etree.parse(template)
slaves = dom.xpath("//modbus/slaves/*")
try:
for s in slaves:
slave_id = int(s.attrib["id"])
slave = self.add_slave(slave_id)
logger.debug("Added slave with id %s.", slave_id)
for b in s.xpath("./blocks/*"):
name = b.attrib["name"]
request_type = eval("mdef." + b.xpath("./type/text()")[0])
start_addr = int(b.xpath("./starting_address/text()")[0])
size = int(b.xpath("./size/text()")[0])
slave.add_block(name, request_type, start_addr, size)
logger.debug(
"Added block %s to slave %s. " "(type=%s, start=%s, size=%s)",
name,
slave_id,
request_type,
start_addr,
size,
)
logger.info("Conpot modbus initialized")
except Exception as e:
logger.error(e)
def handle(self, sock, address):
sock.settimeout(self.timeout)
session = conpot_core.get_session(
"modbus",
address[0],
address[1],
sock.getsockname()[0],
sock.getsockname()[1],
)
self.start_time = time.time()
logger.info(
"New Modbus connection from %s:%s. (%s)", address[0], address[1], session.id
)
session.add_event({"type": "NEW_CONNECTION"})
try:
while True:
request = None
try:
request = sock.recv(7)
except Exception as e:
logger.error(
"Exception occurred in ModbusServer.handle() "
"at sock.recv(): %s",
str(e),
)
if not request:
logger.info("Modbus client disconnected. (%s)", session.id)
session.add_event({"type": "CONNECTION_LOST"})
break
if request.strip().lower() == "quit.":
logger.info("Modbus client quit. (%s)", session.id)
session.add_event({"type": "CONNECTION_QUIT"})
break
if len(request) < 7:
logger.info(
"Modbus client provided data {} but invalid.".format(session.id)
)
session.add_event({"type": "CONNECTION_TERMINATED"})
break
_, _, length = struct.unpack(">HHH", request[:6])
while len(request) < (length + 6):
try:
new_byte = sock.recv(1)
request += new_byte
except Exception:
break
query = modbus_tcp.TcpQuery()
# logdata is a dictionary containing request, slave_id,
# function_code and response
response, logdata = self._databank.handle_request(
query, request, self.mode
)
logdata["request"] = codecs.encode(request, "hex")
session.add_event(logdata)
logger.info(
"Modbus traffic from %s: %s (%s)", address[0], logdata, session.id
)
if response:
sock.sendall(response)
logger.info("Modbus response sent to %s", address[0])
else:
# TODO:
# response could be None under several different cases
# MB serial connection addressing UID=0
if (self.mode == "serial") and (logdata["slave_id"] == 0):
# delay is in milliseconds
time.sleep(self.delay / 1000)
logger.debug("Modbus server's turnaround delay expired.")
logger.info(
"Modbus connection terminated with client %s.", address[0]
)
session.add_event({"type": "CONNECTION_TERMINATED"})
sock.shutdown(socket.SHUT_RDWR)
sock.close()
break
# Invalid addressing
else:
logger.info(
"Modbus client ignored due to invalid addressing." " (%s)",
session.id,
)
session.add_event({"type": "CONNECTION_TERMINATED"})
sock.shutdown(socket.SHUT_RDWR)
sock.close()
break
except socket.timeout:
logger.debug("Socket timeout, remote: %s. (%s)", address[0], session.id)
session.add_event({"type": "CONNECTION_LOST"})
def start(self, host, port):
self.host = host
self.port = port
connection = (host, port)
self.server = StreamServer(connection, self.handle)
logger.info("Modbus server started on: %s", connection)
self.server.serve_forever()
def stop(self):
self.server.stop()
| 7,581 | Python | .py | 176 | 28.261364 | 88 | 0.512869 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,492 | slave_db.py | mushorg_conpot/conpot/protocols/modbus/slave_db.py | # modified by Sooky Peter <xsooky00@stud.fit.vutbr.cz>
# Brno University of Technology, Faculty of Information Technology
import struct
from lxml import etree
import codecs
from modbus_tk.modbus import (
Databank,
DuplicatedKeyError,
MissingKeyError,
ModbusInvalidRequestError,
)
from modbus_tk import defines
from conpot.protocols.modbus.slave import MBSlave
import logging
logger = logging.getLogger(__name__)
class SlaveBase(Databank):
"""
Database keeping track of the slaves.
"""
def __init__(self, template):
Databank.__init__(self)
self.dom = etree.parse(template)
def add_slave(self, slave_id, unsigned=True, memory=None):
"""
Add a new slave with the given id
"""
if (slave_id < 0) or (slave_id > 255):
raise Exception("Invalid slave id %d" % slave_id)
if slave_id not in self._slaves:
self._slaves[slave_id] = MBSlave(slave_id, self.dom)
return self._slaves[slave_id]
else:
raise DuplicatedKeyError("Slave %d already exists" % slave_id)
def handle_request(self, query, request, mode):
"""
Handles a request. Return value is a tuple where element 0
is the response object and element 1 is a dictionary
of items to log.
"""
request_pdu = None
response_pdu = b""
slave_id = None
function_code = None
func_code = None
slave = None
response = None
try:
# extract the pdu and the slave id
slave_id, request_pdu = query.parse_request(request)
if len(request_pdu) > 0:
(func_code,) = struct.unpack(">B", request_pdu[:1])
logger.debug("Working mode: %s" % mode)
if mode == "tcp":
if slave_id == 0 or slave_id == 255:
slave = self.get_slave(slave_id)
response_pdu = slave.handle_request(request_pdu)
response = query.build_response(response_pdu)
else:
# TODO:
# Shall we return SLAVE DEVICE FAILURE, or ILLEGAL ACCESS?
# Would it be better to make this configurable?
r = struct.pack(
">BB", func_code + 0x80, defines.SLAVE_DEVICE_FAILURE
)
response = query.build_response(r)
elif mode == "serial":
if slave_id == 0: # broadcasting
for key in self._slaves:
response_pdu = self._slaves[key].handle_request(
request_pdu, broadcast=True
)
# no response is sent back
return (
None,
{
"request": request_pdu.hex(),
"slave_id": slave_id,
"function_code": func_code,
"response": "",
},
)
elif 0 < slave_id <= 247: # normal request handling
slave = self.get_slave(slave_id)
response_pdu = slave.handle_request(request_pdu)
# make the full response
response = query.build_response(response_pdu)
else:
# TODO:
# Same here. Return SLAVE DEVICE FAILURE or ILLEGAL ACCESS?
r = struct.pack(
">BB", func_code + 0x80, defines.SLAVE_DEVICE_FAILURE
)
response = query.build_response(r)
except (MissingKeyError, IOError) as e:
logger.error(e)
# If slave was not found or the request was not handled correctly,
# return a server error response
r = struct.pack(">BB", func_code + 0x80, defines.SLAVE_DEVICE_FAILURE)
response = query.build_response(r)
except ModbusInvalidRequestError as e:
logger.error(e)
# TODO: return something here?
if slave:
function_code = slave.function_code
return (
response,
{
"request": codecs.encode(request_pdu, "hex"),
"slave_id": slave_id,
"function_code": function_code,
"response": codecs.encode(response_pdu, "hex"),
},
)
| 4,560 | Python | .py | 113 | 26.761062 | 82 | 0.518727 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,493 | modbus_block_databus_mediator.py | mushorg_conpot/conpot/protocols/modbus/modbus_block_databus_mediator.py | from modbus_tk.hooks import call_hooks
import conpot.core as conpot_core
class ModbusBlockDatabusMediator(object):
"""This class represents the values for a range of addresses"""
def __init__(self, databus_key, starting_address):
"""
Constructor: defines the address range and creates the array of values
"""
self.starting_address = starting_address
# self._data = [0]*size
self.databus_key = databus_key
self.size = len(conpot_core.get_databus().get_value(self.databus_key))
def is_in(self, starting_address, size):
"""
Returns true if a block with the given address and size
would overlap this block
"""
if starting_address > self.starting_address:
return (self.starting_address + self.size) > starting_address
elif starting_address < self.starting_address:
return (starting_address + size) > self.starting_address
return True
def __getitem__(self, r):
""""""
return conpot_core.get_databus().get_value(self.databus_key).__getitem__(r)
def __setitem__(self, r, v):
""""""
call_hooks("modbus.ModbusBlock.setitem", (self, r, v))
obj = conpot_core.get_databus().get_value(self.databus_key)
return obj.__setitem__(r, v)
| 1,328 | Python | .py | 30 | 36.266667 | 83 | 0.638545 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,494 | guardian_ast_server.py | mushorg_conpot/conpot/protocols/guardian_ast/guardian_ast_server.py | # Copyright (C) 2015 Lukas Rist <glaslos@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Service support based on gaspot.py [https://github.com/sjhilt/GasPot]
Original authors: Kyle Wilhoit and Stephen Hilt
"""
from gevent.server import StreamServer
import datetime
import logging
import random
import conpot.core as conpot_core
from conpot.core.protocol_wrapper import conpot_protocol
from conpot.utils.networking import str_to_bytes
logger = logging.getLogger(__name__)
# 9999 indicates that the command was not understood and
# FF1B is the checksum for the 9999
AST_ERROR = "9999FF1B\n"
@conpot_protocol
class GuardianASTServer(object):
def __init__(self, template, template_directory, args):
self.server = None
self.databus = conpot_core.get_databus()
# dom = etree.parse(template)
self.fill_offset_time = datetime.datetime.utcnow()
logger.info("Conpot GuardianAST initialized")
def handle(self, sock, addr):
session = conpot_core.get_session(
"guardian_ast",
addr[0],
addr[1],
sock.getsockname()[0],
sock.getsockname()[1],
)
logger.info(
"New GuardianAST connection from %s:%d. (%s)", addr[0], addr[1], session.id
)
session.add_event({"type": "NEW_CONNECTION"})
current_time = datetime.datetime.utcnow()
fill_start = self.fill_offset_time - datetime.timedelta(minutes=313)
fill_stop = self.fill_offset_time - datetime.timedelta(minutes=303)
# Default Product names, change based off country needs
product1 = self.databus.get_value("product1").ljust(22)
product1 = self.databus.get_value("product1").ljust(22)
product2 = self.databus.get_value("product2").ljust(22)
product3 = self.databus.get_value("product3").ljust(22)
product4 = self.databus.get_value("product4").ljust(22)
# Create random Numbers for the volumes
#
# this will crate an initial Volume and then the second value based
# off the orig value.
vol1 = self.databus.get_value("vol1")
vol1tc = random.randint(vol1, vol1 + 200)
vol2 = self.databus.get_value("vol2")
vol2tc = random.randint(vol2, vol2 + 200)
vol3 = self.databus.get_value("vol3")
vol3tc = random.randint(vol3, vol3 + 200)
vol4 = self.databus.get_value("vol4")
vol4tc = random.randint(vol4, vol4 + 200)
# unfilled space ULLAGE
ullage1 = str(self.databus.get_value("ullage1"))
ullage2 = str(self.databus.get_value("ullage2"))
ullage3 = str(self.databus.get_value("ullage3"))
ullage4 = str(self.databus.get_value("ullage3"))
# Height of tank
height1 = str(self.databus.get_value("height1")).ljust(5, "0")
height2 = str(self.databus.get_value("height2")).ljust(5, "0")
height3 = str(self.databus.get_value("height3")).ljust(5, "0")
height4 = str(self.databus.get_value("height4")).ljust(5, "0")
# Water in tank, this is a variable that needs to be low
h2o1 = str(self.databus.get_value("h2o1")).ljust(4, "0")
h2o2 = str(self.databus.get_value("h2o2")).ljust(4, "0")
h2o3 = str(self.databus.get_value("h2o3")).ljust(4, "0")
h2o4 = str(self.databus.get_value("h2o4")).ljust(4, "0")
# Temperature of the tank, this will need to be between 50 - 60
temp1 = str(self.databus.get_value("temp1")).ljust(5, "0")
temp2 = str(self.databus.get_value("temp2")).ljust(5, "0")
temp3 = str(self.databus.get_value("temp3")).ljust(5, "0")
temp4 = str(self.databus.get_value("temp4")).ljust(5, "0")
station = self.databus.get_value("station_name")
# This function is to set-up up the message to be sent upon a successful I20100 command being sent
# The final message is sent with a current date/time stamp inside of the main loop.
def I20100():
ret = "\nI20100\n" + str(current_time.strftime("%m/%d/%Y %H:%M"))
ret += "\n\n" + station + "\n\n\n\nIN-TANK INVENTORY\n\n"
ret += "TANK PRODUCT VOLUME TC VOLUME ULLAGE HEIGHT WATER TEMP"
ret += (
"\n 1 "
+ product1
+ str(vol1)
+ " "
+ str(vol1tc)
+ " "
+ ullage1
+ " "
+ height1
+ " "
+ h2o1
+ " "
+ temp1
)
ret += (
"\n 2 "
+ product2
+ str(vol2)
+ " "
+ str(vol2tc)
+ " "
+ ullage2
+ " "
+ height2
+ " "
+ h2o2
+ " "
+ temp2
)
ret += (
"\n 3 "
+ product3
+ str(vol3)
+ " "
+ str(vol3tc)
+ " "
+ ullage3
+ " "
+ height3
+ " "
+ h2o3
+ " "
+ temp3
)
ret += (
"\n 4 "
+ product4
+ str(vol4)
+ " "
+ str(vol4tc)
+ " "
+ ullage4
+ " "
+ height4
+ " "
+ h2o4
+ " "
+ temp4
)
ret += "\n"
return ret
###########################################################################
#
# Only one Tank is listed currently in the I20200 command
#
###########################################################################
def I20200():
ret = "\nI20200\n" + str(current_time.strftime("%m/%d/%Y %H:%M"))
ret += "\n\n" + station + "\n\n\n\nDELIVERY REPORT\n\n"
ret += (
"T 1:"
+ product1
+ "\nINCREASE DATE / TIME GALLONS TC GALLONS WATER TEMP DEG F HEIGHT\n\n"
)
ret += (
" END: "
+ str(fill_stop.strftime("%m/%d/%Y %H:%M"))
+ " "
+ str(vol1 + 300)
+ " "
+ str(vol1tc + 300)
+ " "
+ h2o1
+ " "
+ temp1
+ " "
+ height1
+ "\n"
)
ret += (
" START: "
+ str(fill_start.strftime("%m/%d/%Y %H:%M"))
+ " "
+ str(vol1 - 300)
+ " "
+ str(vol1tc - 300)
+ " "
+ h2o1
+ " "
+ temp1
+ " "
+ str(float(height1) - 23)
+ "\n"
)
ret += (
" AMOUNT: "
+ str(vol1)
+ " "
+ str(vol1tc)
+ "\n\n"
)
return ret
###########################################################################
#
# I20300 In-Tank Leak Detect Report
#
###########################################################################
def I20300():
ret = "\nI20300\n" + str(current_time.strftime("%m/%d/%Y %H:%M"))
ret += "\n\n" + station + "\n\n\n"
ret += (
"TANK 1 "
+ product1
+ "\n TEST STATUS: OFF\nLEAK DATA NOT AVAILABLE ON THIS TANK\n\n"
)
ret += (
"TANK 2 "
+ product2
+ "\n TEST STATUS: OFF\nLEAK DATA NOT AVAILABLE ON THIS TANK\n\n"
)
ret += (
"TANK 3 "
+ product3
+ "\n TEST STATUS: OFF\nLEAK DATA NOT AVAILABLE ON THIS TANK\n\n"
)
ret += (
"TANK 4 "
+ product4
+ "\n TEST STATUS: OFF\nLEAK DATA NOT AVAILABLE ON THIS TANK\n\n"
)
return ret
###########################################################################
# Shift report command I20400 only one item in report at this time,
# but can always add more if needed
###########################################################################
def I20400():
ret = "\nI20400\n" + str(current_time.strftime("%m/%d/%Y %H:%M"))
ret += "\n\n" + station + "\n\n\n\nSHIFT REPORT\n\n"
ret += "SHIFT 1 TIME: 12:00 AM\n\nTANK PRODUCT\n\n"
ret += (
" 1 " + product1 + " VOLUME TC VOLUME ULLAGE HEIGHT WATER TEMP\n"
)
ret += (
"SHIFT 1 STARTING VALUES "
+ str(vol1)
+ " "
+ str(vol1tc)
+ " "
+ ullage1
+ " "
+ height1
+ " "
+ h2o1
+ " "
+ temp1
+ "\n"
)
ret += (
" ENDING VALUES "
+ str(vol1 + 940)
+ " "
+ str(vol1tc + 886)
+ " "
+ str(int(ullage1) + 345)
+ " "
+ str(float(height1) + 53)
+ " "
+ h2o1
+ " "
+ temp1
+ "\n"
)
ret += " DELIVERY VALUE 0\n"
ret += " TOTALS 940\n\n"
return ret
###########################################################################
# I20500 In-Tank Status Report
###########################################################################
def I20500():
ret = "\nI20500\n" + str(current_time.strftime("%m/%d/%Y %H:%M"))
ret += "\n\n\n" + station + "\n\n\n"
ret += "TANK PRODUCT STATUS\n\n"
ret += " 1 " + product1 + " NORMAL\n\n"
ret += " 2 " + product2 + " HIGH WATER ALARM\n"
ret += " HIGH WATER WARNING\n\n"
ret += " 3 " + product3 + " NORMAL\n\n"
ret += " 4 " + product4 + " NORMAL\n\n"
return ret
while True:
try:
# Get the initial data
request = sock.recv(4096)
# The connection has been closed
if not request:
break
while not (b"\n" in request or b"00" in request):
request += sock.recv(4096)
# if first value is not ^A then do nothing
# thanks John(achillean) for the help
if request[:1] != b"\x01":
logger.info(
"Non ^A command attempt %s:%d. (%s)",
addr[0],
addr[1],
session.id,
)
break
# if request is less than 6, than do nothing
if len(request) < 6:
logger.info(
"Invalid command attempt %s:%d. (%s)",
addr[0],
addr[1],
session.id,
)
break
cmds = {
"I20100": I20100,
"I20200": I20200,
"I20300": I20300,
"I20400": I20400,
"I20500": I20500,
}
cmd = request[1:7].decode() # strip ^A and \n out
response = None
if cmd in cmds:
logger.info(
"%s command attempt %s:%d. (%s)",
cmd,
addr[0],
addr[1],
session.id,
)
response = cmds[cmd]()
elif cmd.startswith("S6020"):
# change the tank name
if cmd.startswith("S60201"):
# split string into two, the command, and the data
TEMP = request.split(b"S60201")
# if length is less than two, print error
if len(TEMP) < 2:
response = AST_ERROR
# Else the command was entered correctly and continue
else:
# Strip off the carrage returns and new lines
TEMP1 = TEMP[1].rstrip(b"\r\n").decode()
# if Length is less than 22
if len(TEMP1) < 22:
# pad the result to have 22 chars
product1 = TEMP1.ljust(22)
elif len(TEMP1) > 22:
# else only print 22 chars if the result was longer
product1 = TEMP1[:20] + " "
else:
# else it fits fine (22 chars)
product1 = TEMP1
logger.info(
"S60201: %s command attempt %s:%d. (%s)",
TEMP1,
addr[0],
addr[1],
session.id,
)
# Follows format for S60201 for comments
elif cmd.startswith("S60202"):
TEMP = request.split(b"S60202")
if len(TEMP) < 2:
response = AST_ERROR
else:
TEMP1 = TEMP[1].rstrip(b"\r\n").decode()
if len(TEMP1) < 22:
product2 = TEMP1.ljust(22)
elif len(TEMP1) > 22:
product2 = TEMP1[:20] + " "
else:
product2 = TEMP1
logger.info(
"S60202: %s command attempt %s:%d. (%s)",
TEMP1,
addr[0],
addr[1],
session.id,
)
# Follows format for S60201 for comments
elif cmd.startswith("S60203"):
TEMP = request.split(b"S60203")
if len(TEMP) < 2:
response = AST_ERROR
else:
TEMP1 = TEMP[1].rstrip(b"\r\n").decode()
if len(TEMP1) < 22:
product3 = TEMP1.ljust(22)
elif len(TEMP1) > 22:
product3 = TEMP1[:20] + " "
else:
product3 = TEMP1
logger.info(
"S60203: %s command attempt %s:%d. (%s)",
TEMP1,
addr[0],
addr[1],
session.id,
)
# Follows format for S60201 for comments
elif cmd.startswith("S60204"):
TEMP = request.split(b"S60204")
if len(TEMP) < 2:
response = AST_ERROR
else:
TEMP1 = TEMP[1].rstrip(b"\r\n").decode()
if len(TEMP1) < 22:
product4 = TEMP1.ljust(22)
elif len(TEMP1) > 22:
product4 = TEMP1[:20] + " "
else:
product4 = TEMP1
logger.info(
"S60204: %s command attempt %s:%d. (%s)",
TEMP1,
addr[0],
addr[1],
session.id,
)
# Follows format for S60201 for comments
elif cmd.startswith("S60200"):
TEMP = request.split(b"S60200")
if len(TEMP) < 2:
response = AST_ERROR
else:
TEMP1 = TEMP[1].rstrip(b"\r\n").decode()
if len(TEMP1) < 22:
product1 = TEMP1.ljust(22)
product2 = TEMP1.ljust(22)
product3 = TEMP1.ljust(22)
product4 = TEMP1.ljust(22)
elif len(TEMP1) > 22:
product1 = TEMP1[:20] + " "
product2 = TEMP1[:20] + " "
product3 = TEMP1[:20] + " "
product4 = TEMP1[:20] + " "
else:
product1 = TEMP1
product2 = TEMP1
product3 = TEMP1
product4 = TEMP1
logger.info(
"S60200: %s command attempt %s:%d. (%s)",
TEMP1,
addr[0],
addr[1],
session.id,
)
else:
response = AST_ERROR
else:
response = AST_ERROR
# log what was entered
logger.info(
"%s command attempt %s:%d. (%s)",
request,
addr[0],
addr[1],
session.id,
)
if response:
sock.send(str_to_bytes(response))
session.add_event(
{
"type": "AST {0}".format(cmd),
"request": request,
"response": response,
}
)
except Exception as e:
logger.exception(("Unknown Error: {}".format(str(e))))
logger.info(
"GuardianAST client disconnected %s:%d. (%s)", addr[0], addr[1], session.id
)
session.add_event({"type": "CONNECTION_LOST"})
def start(self, host, port):
connection = (host, port)
self.server = StreamServer(connection, self.handle)
logger.info("GuardianAST server started on: {0}".format(connection))
self.server.serve_forever()
def stop(self):
self.server.stop()
| 20,482 | Python | .py | 496 | 24.094758 | 106 | 0.381907 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,495 | tftp_server.py | mushorg_conpot/conpot/protocols/tftp/tftp_server.py | # Copyright (C) 2018 Abhinav Saxena <xandfury@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Author: Abhinav Saxena <xandfury@gmail.com>
# Institute of Informatics and Communication, University of Delhi, South Campus.
import gevent
import os
from lxml import etree
from conpot.protocols.tftp import tftp_handler
from gevent.server import DatagramServer
import conpot.core as conpot_core
from conpot.core.protocol_wrapper import conpot_protocol
from conpot.utils.networking import get_interface_ip
from tftpy import TftpException, TftpTimeout
import logging
logger = logging.getLogger(__name__)
@conpot_protocol
class TftpServer(object):
"""TFTP Server"""
TIMEOUT_RETRIES = 5
def __init__(self, template, template_directory, args, timeout=5):
self.timeout = float(timeout)
self.server = None # server attr - Initialize in start
self.root = None
self.listener = None # listener socket
# A dict of sessions, where each session is keyed by a string like
# ip:tid for the remote end.
self.sessions = {}
# A threading event to help threads synchronize with the server is_running state.
self.is_running = gevent.event.Event()
self.shutdown = False
self._init_vfs(template)
logger.debug("TFTP server initialized.")
def _init_vfs(self, template):
dom = etree.parse(template)
self.root_path = dom.xpath("//tftp/tftp_root_path/text()")[0].lower()
if len(dom.xpath("//tftp/add_src/text()")) == 0:
self.add_src = None
else:
self.add_src = dom.xpath("//tftp/add_src/text()")[0].lower()
self.data_fs_subdir = dom.xpath("//tftp/data_fs_subdir/text()")[0].lower()
# Create a file system.
self.vfs, self.data_fs = conpot_core.add_protocol(
protocol_name="tftp",
data_fs_subdir=self.data_fs_subdir,
vfs_dst_path=self.root_path,
src_path=self.add_src,
)
if self.add_src:
logger.info(
"TFTP Serving File System from {} at {} in vfs. TFTP data_fs sub directory: {}".format(
self.add_src, self.root_path, self.data_fs._sub_dir
)
)
else:
logger.info(
"TFTP Serving File System at {} in vfs. TFTP data_fs sub directory: {}".format(
self.root_path, self.data_fs._sub_dir
)
)
logger.debug(
"TFTP serving list of files : {}".format(", ".join(self.vfs.listdir(".")))
)
self.root = "/" # Setup root dir.
# check for permissions etc.
logger.debug(
"TFTP root {} is a directory".format(self.vfs.getcwd() + self.root)
)
if self.vfs.access(self.root, 0, os.R_OK):
logger.debug(
"TFTP root {} is readable".format(self.vfs.getcwd() + self.root)
)
else:
raise TftpException("The TFTP root must be readable")
if self.vfs.access(self.root, 0, os.W_OK):
logger.debug(
"TFTP root {} is writable".format(self.vfs.getcwd() + self.root)
)
else:
logger.warning(
"The TFTP root {} is not writable".format(self.vfs.getcwd() + self.root)
)
def handle(self, buffer, client_addr):
session = conpot_core.get_session(
"tftp",
client_addr[0],
client_addr[1],
get_interface_ip(client_addr[0]),
self.server._socket.getsockname()[1],
)
logger.info(
"New TFTP client has connected. Connection from {}:{}. ".format(
client_addr[0], client_addr[1]
)
)
session.add_event({"type": "NEW_CONNECTION"})
logger.debug("Read %d bytes", len(buffer))
context = tftp_handler.TFTPContextServer(
client_addr[0], client_addr[1], self.timeout, self.root, None, None
)
context.vfs, context.data_fs = self.vfs, self.data_fs
if self.shutdown:
logger.info("Shutting down now. Disconnecting {}".format(client_addr))
session.add_event({"type": "CONNECTION_TERMINATED"})
try:
context.start(buffer)
context.cycle()
except TftpTimeout as err:
logger.info("Timeout occurred %s: %s" % (context, str(err)))
session.add_event({"type": "CONNECTION_TIMEOUT"})
context.retry_count += 1
# TODO: We should accept retries from the user.
if context.retry_count >= self.TIMEOUT_RETRIES:
logger.info(
"TFTP: Hit max {} retries on {}, giving up".format(
self.TIMEOUT_RETRIES, context
)
)
else:
logger.info("TFTP: resending on session %s" % context)
context.state.resendLast()
except TftpException as err:
logger.info(
"TFTP: Fatal exception thrown from session {}: {}".format(
context, str(err)
)
)
session.add_event({"type": "CONNECTION_LOST"})
logger.info("TFTP: terminating connection: {}".format(context))
session.set_ended()
context.end()
# Gathering up metrics before terminating the connection.
metrics = context.metrics
if metrics.duration == 0:
logger.info("Duration too short, rate undetermined")
else:
logger.info(
"Transferred %d bytes in %.2f seconds"
% (metrics.bytes, metrics.duration)
)
logger.info("Average rate: %.2f kbps" % metrics.kbps)
logger.info("%.2f bytes in resent data" % metrics.resent_bytes)
logger.info("%d duplicate packets" % metrics.dupcount)
del context
def start(self, host, port):
conn = (host, port)
# FIXME - sockets should be non-blocking
self.listener = gevent.socket.socket(
gevent.socket.AF_INET, gevent.socket.SOCK_DGRAM
)
self.listener.bind(conn)
self.listener.settimeout(self.timeout)
self.server = DatagramServer(self.listener, self.handle)
logger.info("Starting TFTP server at {}".format(conn))
self.server.serve_forever()
def stop(self):
self.server.close()
| 7,169 | Python | .py | 170 | 32.464706 | 103 | 0.599971 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,496 | tftp_handler.py | mushorg_conpot/conpot/protocols/tftp/tftp_handler.py | import fs
import os
import logging
import tftpy
import time
from gevent import socket
from tftpy import TftpException, TftpErrors
from tftpy.TftpStates import TftpStateExpectACK, TftpStateExpectDAT
from tftpy.TftpPacketTypes import TftpPacketRRQ, TftpPacketWRQ
from conpot.utils.networking import sanitize_file_name
logger = logging.getLogger(__name__)
class TFTPState(tftpy.TftpStates.TftpState):
def __int__(self, context):
super().__init__(context)
def handle(self, pkt, raddress, rport):
raise NotImplementedError
class TFTPServerState(TFTPState):
"""The base class for server states."""
# We had to rewrite the because -- had to check os.* wrappers.
vfs, data_fs = None, None
full_path = None
def handle(self, pkt, raddress, rport):
raise NotImplementedError
def serverInitial(self, pkt, raddress, rport):
options = pkt.options
sendoack = False
if not self.context.tidport:
self.context.tidport = rport
logger.info("Setting tidport to %s" % rport)
logger.debug("Setting default options, blksize")
self.context.options = {"blksize": tftpy.DEF_BLKSIZE}
if options:
logger.debug("Options requested: %s", options)
supported_options = self.returnSupportedOptions(options)
self.context.options.update(supported_options)
sendoack = True
# FIXME - only octet mode is supported at this time.
if pkt.mode != "octet":
logger.info("Received non-octet mode request. Replying with binary data.")
# test host/port of client end
if self.context.host != raddress or self.context.port != rport:
self.sendError(TftpErrors.UnknownTID)
logger.error(
"Expected traffic from %s:%s but received it from %s:%s instead."
% (self.context.host, self.context.port, raddress, rport)
)
# Return same state, we're still waiting for valid traffic.
return self
logger.debug("Requested filename is %s", pkt.filename)
if pkt.filename.startswith(self.context.root):
full_path = pkt.filename
else:
full_path = os.path.join(self.context.root, pkt.filename.lstrip("/"))
try:
logger.info("Full path of file to be uploaded is {}".format(full_path))
self.full_path = full_path
except fs.errors.FSError:
logger.warning("requested file is not within the server root - bad")
self.sendError(TftpErrors.IllegalTftpOp)
raise TftpException("Bad file path")
self.context.file_to_transfer = pkt.filename
return sendoack
class TFTPStateServerRecvRRQ(TFTPServerState):
def handle(self, pkt, raddress, rport):
"""Handle an initial RRQ packet as a server."""
logger.debug("In TftpStateServerRecvRRQ.handle")
sendoack = self.serverInitial(pkt, raddress, rport)
path = self.full_path
logger.info("Opening file %s for reading" % path)
if self.context.vfs.norm_path(path):
self.context.fileobj = self.context.vfs.open(
path.replace(self.context.root + "/", ""), "rb"
)
else:
logger.info("File not found: %s", path.replace(self.context.root + "/", ""))
self.sendError(TftpErrors.FileNotFound)
raise TftpException("File not found: {}".format(path))
# Options negotiation.
if sendoack and "tsize" in self.context.options:
# getting the file size for the tsize option. As we handle
# file-like objects and not only real files, we use this seeking
# method instead of asking the OS
self.context.fileobj.seek(0, os.SEEK_END)
tsize = str(self.context.fileobj.tell())
self.context.fileobj.seek(0, 0)
self.context.options["tsize"] = tsize
if sendoack:
# Note, next_block is 0 here since that's the proper
# acknowledgement to an OACK.
# FIXME: perhaps we do need a TftpStateExpectOACK class...
self.sendOACK()
# Note, self.context.next_block is already 0.
else:
self.context.next_block = 1
logger.debug("No requested options, starting send...")
self.context.pending_complete = self.sendDAT()
# Note, we expect an ack regardless of whether we sent a DAT or an
# OACK.
return TftpStateExpectACK(self.context)
# Note, we don't have to check any other states in this method, that's
# up to the caller.
class TFTPStateServerRecvWRQ(TFTPServerState):
"""This class represents the state of the TFTP server when it has just
received a WRQ packet."""
def make_subdirs(self):
"""The purpose of this method is to, if necessary, create all of the
subdirectories leading up to the file to the written."""
# Pull off everything below the root.
subpath = self.full_path[len(self.context.root) :]
subpath = subpath.decode() if isinstance(subpath, bytes) else subpath
logger.debug("make_subdirs: subpath is %s", subpath)
# Split on directory separators, but drop the last one, as it should
# be the filename.
dirs = subpath.split("/")[:-1]
logger.debug("dirs is %s", dirs)
current = self.context.root
for dir in dirs:
if dir:
current = os.path.join(current, dir)
if self.context.vfs.isdir(current):
logger.debug("%s is already an existing directory", current)
else:
self.context.vfs.makedir(current, 0o700)
def handle(self, pkt, raddress, rport):
"""Handle an initial WRQ packet as a server."""
logger.debug("In TFTPStateServerRecvWRQ.handle")
sendoack = self.serverInitial(pkt, raddress, rport)
path = self.full_path
self.context.file_path = path
path = path.decode() if isinstance(path, bytes) else path
logger.info("Opening file %s for writing" % path)
if self.context.vfs.exists(path):
logger.warning(
"File %s exists already, overwriting..."
% (self.context.file_to_transfer)
)
self.make_subdirs()
self.context.fileobj = self.context.vfs.open(path, "wb")
# Options negotiation.
if sendoack:
logger.debug("Sending OACK to client")
self.sendOACK()
else:
logger.debug("No requested options, expecting transfer to begin...")
self.sendACK()
self.context.next_block = 1
return TftpStateExpectDAT(self.context)
class TFTPStateServerStart(TFTPState):
"""The start state for the server. This is a transitory state since at
this point we don't know if we're handling an upload or a download. We
will commit to one of them once we interpret the initial packet."""
def handle(self, pkt, raddress, rport):
"""Handle a packet we just received."""
logger.debug("Using TFTPStateServerStart.handle")
if isinstance(pkt, TftpPacketRRQ):
logger.debug("Handling an RRQ packet")
return TFTPStateServerRecvRRQ(self.context).handle(pkt, raddress, rport)
elif isinstance(pkt, TftpPacketWRQ):
logger.debug("Handling a WRQ packet")
return TFTPStateServerRecvWRQ(self.context).handle(pkt, raddress, rport)
else:
self.sendError(tftpy.TftpErrors.IllegalTftpOp)
raise TftpException("Invalid packet to begin upload/download: %s" % pkt)
class TFTPContextServer(tftpy.TftpContexts.TftpContextServer):
"""Simple TFTP server handler wrapper. Use conpot's filesystem wrappers rather than os.*"""
file_path = None
_already_uploaded = (
False # Since with UDP, we can't differentiate between when a user disconnected
)
# after successful upload and when the client timed out, we would allow file copy on data_fs only once
def __int__(self, host, port, timeout, root, dyn_file_func, upload_open):
tftpy.TftpContexts.TftpContextServer.__init__(
self,
host=host,
port=port,
timeout=timeout,
root=root,
dyn_file_func=None,
upload_open=None,
)
self.state = TFTPStateServerStart(self)
self.log = logger
self.state.log = logger
self.root = root
if self.sock:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind("", 0)
self.data_fs_fileobj = None
self.vfs = None
self.data_fs = None
self.sock.setblocking(False)
def start(self, buffer):
logger.debug(
"In TFTPContextServer - Starting TFTP context with : {}".format(buffer)
)
self.metrics.start_time = time.time()
self.last_update = time.time()
pkt = self.factory.parse(buffer)
self.state = TFTPStateServerStart(self)
self.state = self.state.handle(pkt, self.host, self.port)
def end(self):
logger.debug("In TFTPContextServer.end - closing socket and files.")
self.sock.close()
if self.fileobj is not None and not self.fileobj.closed:
logger.debug("self.fileobj is open - closing")
self.fileobj.close()
if not self.state and (not self._already_uploaded):
if self.file_path:
# Return None only when transfer is complete!
logger.info("TFTP : Transfer Complete!")
_file_path = (
self.file_path
if isinstance(self.file_path, str)
else self.file_path.decode()
)
_data_fs_filename = sanitize_file_name(
"".join(_file_path.split("/")[-1:]), self.host, self.port
)
logger.info("Opening {} for data_fs writing.".format(_data_fs_filename))
with self.vfs.open(_file_path, "rb") as _vfs_file:
with self.data_fs.open(_data_fs_filename, "wb") as _data_file:
content = _vfs_file.read()
_data_file.write(content)
self._already_uploaded = True
self.metrics.end_time = time.time()
logger.debug("Set metrics.end_time to %s", self.metrics.end_time)
self.metrics.compute()
| 10,621 | Python | .py | 227 | 36.45815 | 106 | 0.623118 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,497 | databus_mediator.py | mushorg_conpot/conpot/protocols/snmp/databus_mediator.py | # Copyright (C) 2013 Daniel creo Haslinger <creo-conpot@blackmesa.at>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# this class mediates between the SNMP attack surface and conpots databus
# furthermore it keeps request statistics iot evade being used as a DOS
# reflection tool
from pysnmp.smi import builder
from datetime import datetime
import conpot.core as conpot_core
class DatabusMediator(object):
def __init__(self, oid_mappings):
"""initiate variables"""
self.evasion_table = {} # stores the number of requests
self.start_time = datetime.now()
self.oid_map = oid_mappings # mapping between OIDs and databus keys
self.databus = conpot_core.get_databus()
def get_response(self, reference_class, OID):
if OID in self.oid_map:
if reference_class == "DisplayString":
(response_class,) = builder.MibBuilder().importSymbols(
"SNMPv2-TC", "DisplayString"
)
elif reference_class == "OctetString":
(response_class,) = builder.MibBuilder().importSymbols(
"ASN1", "OctetString"
)
elif reference_class == "Integer32":
(response_class,) = builder.MibBuilder().importSymbols(
"SNMPv2-SMI", "Integer32"
)
elif reference_class == "Counter32":
(response_class,) = builder.MibBuilder().importSymbols(
"SNMPv2-SMI", "Counter32"
)
elif reference_class == "Gauge32":
(response_class,) = builder.MibBuilder().importSymbols(
"SNMPv2-SMI", "Gauge32"
)
elif reference_class == "TimeTicks":
(response_class,) = builder.MibBuilder().importSymbols(
"SNMPv2-SMI", "TimeTicks"
)
elif reference_class == "DateAndTime":
(response_class,) = builder.MibBuilder().importSymbols(
"SNMPv2-TC", "DateAndTime"
)
# TODO: All mode classes - or autodetect'ish?
else:
# dynamic responses are not supported for this class (yet)
return False
response_value = self.databus.get_value(self.oid_map[OID])
return response_class(response_value)
else:
return None
def set_value(self, OID, value):
# TODO: Access control. The profile shold indicate which OIDs are writable
self.databus.set_value(self.oid_map[OID], value)
def update_evasion_table(self, client_ip):
"""updates dynamic evasion table"""
# get current minute as epoch..
now = datetime.now()
epoch_minute = int(
(
datetime(now.year, now.month, now.day, now.hour, now.minute)
- datetime(1970, 1, 1)
).total_seconds()
)
# if this is a new minute, re-initialize the evasion table
if epoch_minute not in self.evasion_table:
self.evasion_table.clear() # purge previous entries
self.evasion_table[epoch_minute] = {} # create current minute
self.evasion_table[epoch_minute][
"overall"
] = 0 # prepare overall request count
# if this is a new client, add him to the evasion table
if client_ip[0] not in self.evasion_table[epoch_minute]:
self.evasion_table[epoch_minute][client_ip[0]] = 0
# increment number of requests..
self.evasion_table[epoch_minute][client_ip[0]] += 1
self.evasion_table[epoch_minute]["overall"] += 1
current_numreq = self.evasion_table[epoch_minute][client_ip[0]]
overall_numreq = self.evasion_table[epoch_minute]["overall"]
# return numreq(per_ip) and numreq(overall)
return current_numreq, overall_numreq
| 4,617 | Python | .py | 97 | 37.237113 | 82 | 0.619333 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,498 | snmp_server.py | mushorg_conpot/conpot/protocols/snmp/snmp_server.py | # Copyright (C) 2013 Lukas Rist <glaslos@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import os
from lxml import etree
import conpot.core as conpot_core
from conpot.core.protocol_wrapper import conpot_protocol
from conpot.protocols.snmp.command_responder import CommandResponder
logger = logging.getLogger()
@conpot_protocol
class SNMPServer(object):
def __init__(self, template, template_directory, args):
"""
:param host: hostname or ip address on which to server the snmp service (string).
:param port: listen port (integer).
:param template: path to the protocol specific xml configuration file (string).
"""
self.dom = etree.parse(template)
self.cmd_responder = None
self.compiled_mibs = args.mibcache
self.raw_mibs = os.path.join(template_directory, "snmp", "mibs")
def xml_general_config(self, dom):
snmp_config = dom.xpath("//snmp/config/*")
if snmp_config:
for entity in snmp_config:
# TARPIT: individual response delays
if entity.attrib["name"].lower() == "tarpit":
if entity.attrib["command"].lower() == "get":
self.cmd_responder.resp_app_get.tarpit = (
self.config_sanitize_tarpit(entity.text)
)
elif entity.attrib["command"].lower() == "set":
self.cmd_responder.resp_app_set.tarpit = (
self.config_sanitize_tarpit(entity.text)
)
elif entity.attrib["command"].lower() == "next":
self.cmd_responder.resp_app_next.tarpit = (
self.config_sanitize_tarpit(entity.text)
)
elif entity.attrib["command"].lower() == "bulk":
self.cmd_responder.resp_app_bulk.tarpit = (
self.config_sanitize_tarpit(entity.text)
)
# EVASION: response thresholds
if entity.attrib["name"].lower() == "evasion":
if entity.attrib["command"].lower() == "get":
self.cmd_responder.resp_app_get.threshold = (
self.config_sanitize_threshold(entity.text)
)
elif entity.attrib["command"].lower() == "set":
self.cmd_responder.resp_app_set.threshold = (
self.config_sanitize_threshold(entity.text)
)
elif entity.attrib["command"].lower() == "next":
self.cmd_responder.resp_app_next.threshold = (
self.config_sanitize_threshold(entity.text)
)
elif entity.attrib["command"].lower() == "bulk":
self.cmd_responder.resp_app_bulk.threshold = (
self.config_sanitize_threshold(entity.text)
)
def xml_mib_config(self):
mibs = self.dom.xpath("//snmp/mibs/*")
# parse mibs and oid tables
for mib in mibs:
mib_name = mib.attrib["name"]
for symbol in mib:
symbol_name = symbol.attrib["name"]
# retrieve instance from template
if "instance" in symbol.attrib:
# convert instance to (int-)tuple
symbol_instance = symbol.attrib["instance"].split(".")
symbol_instance = tuple(map(int, symbol_instance))
else:
# use default instance (0)
symbol_instance = (0,)
# retrieve value from databus
value = conpot_core.get_databus().get_value(
symbol.xpath("./value/text()")[0]
)
profile_map_name = symbol.xpath("./value/text()")[0]
# register this MIB instance to the command responder
self.cmd_responder.register(
mib_name, symbol_name, symbol_instance, value, profile_map_name
)
def config_sanitize_tarpit(self, value):
# checks tarpit value for being either a single int or float,
# or a series of two concatenated integers and/or floats separated by semicolon and returns
# either the (sanitized) value or zero.
if value is not None:
x, _, y = value.partition(";")
try:
_ = float(x)
except ValueError:
logger.error(
"SNMP invalid tarpit value: '%s'. Assuming no latency.", value
)
# first value is invalid, ignore the whole setting.
return "0;0"
try:
_ = float(y)
# both values are fine.
return value
except ValueError:
# second value is invalid, use the first one.
return x
else:
return "0;0"
def config_sanitize_threshold(self, value):
# checks DoS thresholds for being either a single int or a series of two concatenated integers
# separated by semicolon and returns either the (sanitized) value or zero.
if value is not None:
x, _, y = value.partition(";")
try:
_ = int(x)
except ValueError:
logger.error(
"SNMP invalid evasion threshold: '%s'. Assuming no DoS evasion.",
value,
)
# first value is invalid, ignore the whole setting.
return "0;0"
try:
_ = int(y)
# both values are fine.
return value
except ValueError:
# second value is invalid, use the first and ignore the second.
return str(x) + ";0"
else:
return "0;0"
def start(self, host, port):
self.cmd_responder = CommandResponder(
host, port, self.raw_mibs, self.compiled_mibs
)
self.xml_general_config(self.dom)
self.xml_mib_config()
logger.info("SNMP server started on: %s", (host, self.get_port()))
self.cmd_responder.serve_forever()
def stop(self):
if self.cmd_responder:
self.cmd_responder.stop()
def get_port(self):
if self.cmd_responder:
return self.cmd_responder.server_port
else:
return None
| 7,394 | Python | .py | 161 | 32.043478 | 102 | 0.5463 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |
25,499 | command_responder.py | mushorg_conpot/conpot/protocols/snmp/command_responder.py | # Command Responder (GET/GETNEXT)
# Based on examples from http://pysnmp.sourceforge.net/
import logging
from pysmi.reader import FileReader, FtpReader
from pysnmp.entity import config
from pysnmp.entity.rfc3413 import context
from pysnmp.carrier.asynsock.dgram import udp
from pysnmp.entity import engine
from pysnmp.smi.compiler import addMibCompiler
import gevent
from conpot.protocols.snmp import conpot_cmdrsp
from conpot.protocols.snmp.databus_mediator import DatabusMediator
from gevent.server import DatagramServer
logger = logging.getLogger(__name__)
class SNMPDispatcher(DatagramServer):
def __init__(self):
self.__timerResolution = 0.5
def registerRecvCbFun(self, recvCbFun, recvId=None):
self.recvCbFun = recvCbFun
def handle(self, msg, address):
try:
self.recvCbFun(self, self.transportDomain, address, msg)
except Exception as e:
logger.info("SNMP Exception: %s", e)
def registerTransport(self, tDomain, transport):
DatagramServer.__init__(self, transport, self.handle)
self.transportDomain = tDomain
def registerTimerCbFun(self, timerCbFun, tickInterval=None):
pass
def sendMessage(self, outgoingMessage, transportDomain, transportAddress):
self.socket.sendto(outgoingMessage, transportAddress)
def getTimerResolution(self):
return self.__timerResolution
class CommandResponder(object):
def __init__(self, host, port, raw_mibs, compiled_mibs):
self.oid_mapping = {}
self.databus_mediator = DatabusMediator(self.oid_mapping)
# mapping between OID and databus keys
# Create SNMP engine
self.snmpEngine = engine.SnmpEngine()
# Configure SNMP compiler
mib_builder = self.snmpEngine.getMibBuilder()
addMibCompiler(mib_builder, destination=compiled_mibs)
mib_builder.getMibCompiler().addSources(FileReader(raw_mibs))
mib_builder.getMibCompiler().addSources(
FtpReader("ftp.cisco.com", "/pub/mibs/v2/@mib@", 80)
)
# Transport setup
udp_sock = gevent.socket.socket(gevent.socket.AF_INET, gevent.socket.SOCK_DGRAM)
udp_sock.setsockopt(gevent.socket.SOL_SOCKET, gevent.socket.SO_BROADCAST, 1)
udp_sock.bind((host, port))
self.server_port = udp_sock.getsockname()[1]
# UDP over IPv4
self.addSocketTransport(self.snmpEngine, udp.domainName, udp_sock)
# SNMPv1
config.addV1System(self.snmpEngine, "public-read", "public")
# SNMPv3/USM setup
# user: usr-md5-des, auth: MD5, priv DES
config.addV3User(
self.snmpEngine,
"usr-md5-des",
config.usmHMACMD5AuthProtocol,
"authkey1",
config.usmDESPrivProtocol,
"privkey1",
)
# user: usr-sha-none, auth: SHA, priv NONE
config.addV3User(
self.snmpEngine, "usr-sha-none", config.usmHMACSHAAuthProtocol, "authkey1"
)
# user: usr-sha-aes128, auth: SHA, priv AES/128
config.addV3User(
self.snmpEngine,
"usr-sha-aes128",
config.usmHMACSHAAuthProtocol,
"authkey1",
config.usmAesCfb128Protocol,
"privkey1",
)
# Allow full MIB access for each user at VACM
config.addVacmUser(
self.snmpEngine,
1,
"public-read",
"noAuthNoPriv",
readSubTree=(1, 3, 6, 1, 2, 1),
writeSubTree=(1, 3, 6, 1, 2, 1),
)
config.addVacmUser(
self.snmpEngine,
2,
"public-read",
"noAuthNoPriv",
readSubTree=(1, 3, 6, 1, 2, 1),
writeSubTree=(1, 3, 6, 1, 2, 1),
)
config.addVacmUser(
self.snmpEngine,
3,
"usr-md5-des",
"authPriv",
readSubTree=(1, 3, 6, 1, 2, 1),
writeSubTree=(1, 3, 6, 1, 2, 1),
)
config.addVacmUser(
self.snmpEngine,
3,
"usr-sha-none",
"authNoPriv",
readSubTree=(1, 3, 6, 1, 2, 1),
writeSubTree=(1, 3, 6, 1, 2, 1),
)
config.addVacmUser(
self.snmpEngine,
3,
"usr-sha-aes128",
"authPriv",
readSubTree=(1, 3, 6, 1, 2, 1),
writeSubTree=(1, 3, 6, 1, 2, 1),
)
# Get default SNMP context this SNMP engine serves
snmpContext = context.SnmpContext(self.snmpEngine)
# Register SNMP Applications at the SNMP engine for particular SNMP context
self.resp_app_get = conpot_cmdrsp.c_GetCommandResponder(
self.snmpEngine, snmpContext, self.databus_mediator, host, port
)
self.resp_app_set = conpot_cmdrsp.c_SetCommandResponder(
self.snmpEngine, snmpContext, self.databus_mediator, host, port
)
self.resp_app_next = conpot_cmdrsp.c_NextCommandResponder(
self.snmpEngine, snmpContext, self.databus_mediator, host, port
)
self.resp_app_bulk = conpot_cmdrsp.c_BulkCommandResponder(
self.snmpEngine, snmpContext, self.databus_mediator, host, port
)
def addSocketTransport(self, snmpEngine, transportDomain, transport):
"""Add transport object to socket dispatcher of snmpEngine"""
if not snmpEngine.transportDispatcher:
snmpEngine.registerTransportDispatcher(SNMPDispatcher())
snmpEngine.transportDispatcher.registerTransport(transportDomain, transport)
def register(self, mibname, symbolname, instance, value, profile_map_name):
"""Register OID"""
self.snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.loadModules(
mibname
)
s = self._get_mibSymbol(mibname, symbolname)
if s:
self.oid_mapping[s.name + instance] = profile_map_name
(MibScalarInstance,) = (
self.snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols(
"SNMPv2-SMI", "MibScalarInstance"
)
)
x = MibScalarInstance(s.name, instance, s.syntax.clone(value))
self.snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.exportSymbols(
mibname, x
)
logger.debug(
"Registered: OID %s Instance %s ASN.1 (%s @ %s) value %s dynrsp.",
s.name,
instance,
s.label,
mibname,
value,
)
else:
logger.warning(
"Skipped: OID for symbol %s not found in MIB %s", symbolname, mibname
)
def _get_mibSymbol(self, mibname, symbolname):
modules = (
self.snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.mibSymbols
)
if mibname in modules:
if symbolname in modules[mibname]:
return modules[mibname][symbolname]
def serve_forever(self):
self.snmpEngine.transportDispatcher.serve_forever()
def stop(self):
self.snmpEngine.transportDispatcher.stop()
| 7,258 | Python | .py | 180 | 30.172222 | 91 | 0.617852 | mushorg/conpot | 1,224 | 413 | 115 | GPL-2.0 | 9/5/2024, 5:13:34 PM (Europe/Amsterdam) |