code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: zdde.py
# Purpose: Python based DDE link with ZEMAX server, similar to Matlab based
# MZDDE toolbox.
# Licence: MIT License
# This file is subject to the terms and conditions of the MIT License.
# For further details, please refer to LICENSE.txt
# Revision: 2.0.3
#-------------------------------------------------------------------------------
"""PyZDDE, which is a toolbox written in Python, is used for communicating
with ZEMAX using the Microsoft's Dynamic Data Exchange (DDE) messaging
protocol. The docstring examples in the functions assume that PyZDDE is
imported as ``import pyzdde.zdde as pyz`` and a PyZDDE communication object
is then created as ``ln = pyz.createLink()`` or ``ln = pyz.PyZDDE();
ln.zDDEInit()``.
"""
from __future__ import division
from __future__ import print_function
import sys as _sys
#import struct as _struct
import os as _os
import collections as _co
import subprocess as _subprocess
import math as _math
import time as _time
import datetime as _datetime
import re as _re
import shutil as _shutil
import warnings as _warnings
import codecs as _codecs
# Try to import IPython if it is available (for notebook helper functions)
try:
from IPython.core.display import display as _display
from IPython.core.display import Image as _Image
except ImportError:
_global_IPLoad = False
else:
_global_IPLoad = True
# Determine if in IPython Environment
try: # get_ipython() method is not available in IPython versions prior to 2.0
from IPython import get_ipython as _get_ipython
except:
_global_in_IPython_env = False
else:
if _get_ipython(): # if global interactive shell instance is available
_global_in_IPython_env = True
else:
_global_in_IPython_env = False
# Try to import Matplotlib's imread
try:
import matplotlib.image as _matimg
except ImportError:
_global_mpl_img_load = False
else:
_global_mpl_img_load = True
# The first module to import that is not one of the standard modules MUST
# be the config module as it sets up the different global and settings variables
_currDir = _os.path.dirname(_os.path.realpath(__file__))
_pDir = _os.path.split(_currDir)[0]
settings_file = _os.path.join(_currDir, "settings.ini")
if not _os.path.isfile(settings_file):
src = _os.path.join(_currDir, "settings.ini-dist")
_shutil.copy(src, settings_file)
import pyzdde.config as _config
_global_pyver3 = _config._global_pyver3
_global_use_unicode_text = _config._global_use_unicode_text
imageMagickSettings = _config.getImageMagickSettings()
_global_use_installed_imageMagick = imageMagickSettings[0]
_global_imageMagick_dir = imageMagickSettings[1]
# DDEML communication module
_global_ddeclient_load = False # True if module could be loaded.
try:
import pyzdde.ddeclient as _dde
_global_ddeclient_load = True
except ImportError:
# System may not be windows; only provide functions that do not use zemax
print("DDE client couldn't be loaded. All functions prefixed with"
" \"z\" or \"ipz\" may not work.")
# Python 2/ Python 3 differential imports
if _global_pyver3:
_izip = zip
_imap = map
xrange = range
import tkinter as _tk
import tkinter.messagebox as _MessageBox
else:
from itertools import izip as _izip, imap as _imap
import Tkinter as _tk
import tkMessageBox as _MessageBox
# Pyzdde local module imports
import pyzdde.zcodes.zemaxbuttons as zb
import pyzdde.zcodes.zemaxoperands as zo
import pyzdde.utils.pyzddeutils as _putils
import pyzdde.zfileutils as _zfu
#%% Constants
_DEBUG_PRINT_LEVEL = 0 # 0 = No debug prints, but allow all essential prints
# 1 to 2 levels of debug print, 2 = print all
_MAX_PARALLEL_CONV = 2 # Max no of simul. conversations possible with Zemax
_system_aperture = {0 : 'EPD',
1 : 'Image space F/#',
2 : 'Object space NA',
3 : 'Float by stop',
4 : 'Paraxial working F/#',
5 : 'Object cone angle'}
macheps = _sys.float_info.epsilon # machine epsilon
#%% Helper function for debugging
def _debugPrint(level, msg):
"""Internal helper function to print debug messages
Parameters
----------
level : integer (0, 1 or 2)
0 = message will definitely be printed;
1 or 2 = message will be printed if ``level >= _DEBUG_PRINT_LEVEL``.
msg : string
message to print
"""
global _DEBUG_PRINT_LEVEL
if level <= _DEBUG_PRINT_LEVEL:
print("DEBUG PRINT, module - zdde (Level " + str(level)+ "): " + msg)
#%% Module methods
# bind functions from utils module
cropImgBorders = _putils.cropImgBorders
imshow = _putils.imshow
# bind functions from zemax buttons module
findZButtonCode = zb.findZButtonCode
getZButtonCount = zb.getZButtonCount
isZButtonCode = zb.isZButtonCode
showZButtonList = zb.showZButtonList
showZButtonDescription = zb.showZButtonDescription
# bind functions from zemax operand module
findZOperand = zo.findZOperand
getZOperandCount = zo.getZOperandCount
isZOperand = zo.isZOperand
showZOperandList = zo.showZOperandList
showZOperandDescription = zo.showZOperandDescription
# decorator for automatically push and refresh to and from LDE (Experimental)
def autopushandrefresh(func):
def wrapped(self, *args, **kwargs):
if self.apr: # if automatic push refresh is True
if (args[0].startswith('Get') or
args[0].startswith('Set') or
args[0].startswith('Insert') or
args[0].startswith('Delete')):
self._conversation.Request('GetRefresh')
reply = func(self, *args, **kwargs)
if (args[0].startswith('Set') or
args[0].startswith('Insert') or
args[0].startswith('Delete')):
self._conversation.Request('PushLens,1')
else:
reply = func(self, *args, **kwargs)
return reply
return wrapped
_global_dde_linkObj = {}
def createLink(apr=False):
"""Create a DDE communication link with Zemax
Usage: ``import pyzdde.zdde as pyz; ln = pyz.createLink()``
Helper function to create, initialize and return a PyZDDE communication
object.
Parameters
----------
apr : bool
if `True`, automatically push and refresh lens to and from LDE to DDE
Returns
-------
link : object
a PyZDDE communication object if successful, else ``None``.
Notes
-----
1. This module level method may used instead of \
``ln = pyz.PyZDDE(); ln.zDDEInit()``.
2. Zemax application must be running.
See Also
--------
closeLink(), zDDEInit()
"""
global _global_dde_linkObj
global _MAX_PARALLEL_CONV
dlen = len(_global_dde_linkObj)
if dlen < _MAX_PARALLEL_CONV:
link = PyZDDE(apr=apr)
status = link.zDDEInit()
if not status:
_global_dde_linkObj[link] = link._appName # This can be something more useful later
_debugPrint(1,"Link created. Link Dict = {}".format(_global_dde_linkObj))
return link
else:
print("Could not initiate instance.")
return None
else:
print("Link not created. Reached maximum allowable live link of ",
_MAX_PARALLEL_CONV)
return None
def closeLink(link=None):
"""Close DDE communication link with Zemax
Usage: ``pyz.closeLink([ln])``
Helper function, for closing DDE communication.
Parameters
----------
link : PyZDDE link object, optional
If a specific link object is not given, all existing links are
closed.
Returns
-------
None
See Also
--------
zDDEClose() :
PyZDDE instance method to close a link.
Use this method (as ``ln.zDDEClose()``) if the link was created as \
``ln = pyz.PyZDDE(); ln.zDDEInit()``
close() :
Another instance method to close a link for easy typing.
Use this method (as ``ln.close()``) or ``pyz.closeLink(ln)`` if the \
link was created as ``ln = pyz.createLink()``
"""
global _global_dde_linkObj
dde_closedLinkObj = []
if link:
link.zDDEClose()
dde_closedLinkObj.append(link)
else:
for link in _global_dde_linkObj:
link.zDDEClose()
dde_closedLinkObj.append(link)
for item in dde_closedLinkObj:
_global_dde_linkObj.pop(item)
def setTextEncoding(txt_encoding=0):
"""Sets PyZDDE text encoding to match the TXT encoding in Zemax
Usage: ``pyz.setTextEncoding([txt_encoding])``
Parameters
----------
txt_encoding : integer (0 or 1)
0 = ASCII; 1 = UNICODE
Returns
-------
status : string
current setting (& information if the setting was changed or not)
Notes
-----
Not required to set the encoding for every new session as PyZDDE stores
the setting.
See Also
--------
getTextEncoding()
"""
global _global_use_unicode_text
if _global_use_unicode_text and txt_encoding:
print('TXT encoding is UNICODE; no change required')
elif not _global_use_unicode_text and not txt_encoding:
print('TXT encoding is ASCII; no change required')
elif not _global_use_unicode_text and txt_encoding:
if _config.setTextEncoding(txt_encoding=1):
_global_use_unicode_text = True
print('Successfully changed to UNICODE')
else:
print("ERROR: Couldn't change settings")
elif _global_use_unicode_text and not txt_encoding:
if _config.setTextEncoding(txt_encoding=0):
_global_use_unicode_text = False
print('Successfully changed to ASCII')
else:
print("ERROR: Couldn't change settings")
def getTextEncoding():
"""Returns the current text encoding set in PyZDDE
Usage: ``pyz.getTextEncoding()``
Parameters
----------
None
Returns
-------
encoding : string
'ascii' or 'unicode'
See Also
--------
setTextEncoding
"""
return _config.getTextEncoding()
def setImageMagickSettings(use_installed_ImageMagick, imageMagick_dir=None):
"""Set the use-flag and imageMagick installation directory settings
Parameters
----------
use_installed_ImageMagick : bool
boolean flag to indicate whether to use installed version
of ImageMagick (``True``) or not (``False``)
imageMagick_dir : string, optional
full path to the installation directory. For example:
``C:\\Program Files\\ImageMagick-6.8.9-Q8``
Returns
-------
imageMagick_settings : tuple
updated imageMagick settings
"""
global _global_use_installed_imageMagick
global _global_imageMagick_dir
if not isinstance(use_installed_ImageMagick, bool):
raise ValueError("Expecting bool")
if imageMagick_dir and not _os.path.isdir(imageMagick_dir):
raise ValueError("Expecting valid directory or None")
if imageMagick_dir and not _os.path.isfile(_os.path.join(imageMagick_dir,
'convert.exe')):
raise ValueError("Couldn't find program convert.exe in the path!")
_config.setImageMagickSettings(use_installed_ImageMagick, imageMagick_dir)
imageMagickSettings = _config.getImageMagickSettings()
_global_use_installed_imageMagick = imageMagickSettings[0]
_global_imageMagick_dir = imageMagickSettings[1]
return (_global_use_installed_imageMagick, _global_imageMagick_dir)
def getImageMagickSettings():
"""Return the use-flag and imageMagick installation directory settings
Parameters
----------
None
Returns
-------
use_flag : bool
if ``True``, then PyZDDE uses the installed version of ImageMagick
software. If ``False``, then the version of ImageMagick that comes
with PyZDDE will be used.
imageMagick_dir : string
ImageMagick installation directory.
"""
return _config.getImageMagickSettings()
# PyZDDE class' utility function (for internal use)
def _createAppNameDict(maxElements):
"""Internal function to create a pool (dictionary) of possible app-names
Parameters
----------
maxElements : integer
maximum elements in the dictionary
Returns
-------
appNameDict : dictionary
dictionary of app-names (keys) with values, set to False, indicating
name hasn't been taken.
"""
appNameDict = {}
appNameDict['ZEMAX'] = False
for i in range(1, maxElements):
appNameDict['ZEMAX'+str(i)] = False
return appNameDict
def _getAppName(appNameDict):
"""Return available name from the pool of app-names.
"""
if not appNameDict['ZEMAX']:
appNameDict['ZEMAX'] = True
return 'ZEMAX'
else:
k_available = None
for k, v in appNameDict.items():
if not v:
k_available = k
break
if k_available:
appNameDict[k_available] = True
return k_available
else:
return None
#%% PyZDDE class
class PyZDDE(object):
"""PyZDDE class for communicating with Zemax
There are two ways of instantiating and initiating a PyZDDE object:
1. Instantiate using ``ln = pyz.PyZDDE()`` and then initiate \
using ``ln.zDDEInit()`` or
2. ``pyz.createLink()`` instantiates & initiates a PyZDDE object & \
returns (recommended way)
"""
__chNum = 0 # channel Number; there is no restriction on number of ch
__liveCh = 0 # no. of live/ simul channels; Can't be > _MAX_PARALLEL_CONV
__server = 0
__appNameDict = _createAppNameDict(_MAX_PARALLEL_CONV) # {'ZEMAX': False, 'ZEMAX1': False}
version = '2.0.3'
# Other class variables
# Surface data codes for getting and setting surface data
SDAT_TYPE = 0 # Surface type name
SDAT_COMMENT = 1 # Comment
SDAT_CURV = 2 # Curvature
SDAT_THICK = 3 # Thickness
SDAT_GLASS = 4 # Glass
SDAT_SEMIDIA = 5 # Semi-Diameter
SDAT_CONIC = 6 # Conic
SDAT_COAT = 7 # Coating
SDAT_TCE = 8 # Thermal Coefficient of Expansion (TCE)
SDAT_UD_DLL = 9 # User-defined DLL
SDAT_IGNORE_S_FLAG = 20 # Ignore surface flag
SDAT_TILT_DCNTR_ORD_BEFORE = 51 # Before tilt and decenter order
SDAT_DCNTR_X_BEFORE = 52 # Before decenter x
SDAT_DCNTR_Y_BEFORE = 53 # Before decenter y
SDAT_TILT_X_BEFORE = 54 # Before tilt x
SDAT_TILT_Y_BEFORE = 55 # Before tilt y
SDAT_TILT_Z_BEFORE = 56 # Before tilt z
SDAT_TILT_DCNTR_STAT_AFTER = 60 # After status
SDAT_TILT_DCNTR_ORD_AFTER = 61 # After tilt and decenter order
SDAT_DCNTR_X_AFTER = 62 # After decenter x
SDAT_DCNTR_Y_AFTER = 63 # After decenter y
SDAT_TILT_X_AFTER = 64 # After tilt x
SDAT_TILT_Y_AFTER = 65 # After tilt y
SDAT_TILT_Z_AFTER = 66 # After tilt z
SDAT_USE_LAYER_MULTI_INDEX = 70 # Use Layer multipliers and index offsets
SDAT_LAYER_MULTI_VAL = 71 # Layer multiplier value
SDAT_LAYER_MULTI_STAT = 72 # Layer multiplier status
SDAT_LAYER_INDEX_OFFSET_VAL = 73 # Layer index offset value
SDAT_LAYER_INDEX_OFFSET_STAT = 74 # Layer index offset status
SDAT_LAYER_EXTINCT_OFFSET_VAL = 75 # Layer extinction offset value
SDAT_LAYER_EXTINCT_OFFSET_STAT = 76 # Layer extinction offset status
# Surface parameter codes for getting and setting solves
SOLVE_SPAR_CURV = 0 # Curvature
SOLVE_SPAR_THICK = 1 # Thickness
SOLVE_SPAR_GLASS = 2 # Glass
SOLVE_SPAR_SEMIDIA = 3 # Semi-Diameter
SOLVE_SPAR_CONIC = 4 # Conic
SOLVE_SPAR_PAR0 = 17 # Parameter 0
SOLVE_SPAR_PAR1 = 5 # Parameter 1
SOLVE_SPAR_PAR2 = 6 # Parameter 2
SOLVE_SPAR_PAR3 = 7 # Parameter 3
SOLVE_SPAR_PAR4 = 8 # Parameter 4
SOLVE_SPAR_PAR5 = 9 # Parameter 5
SOLVE_SPAR_PAR6 = 10 # Parameter 6
SOLVE_SPAR_PAR7 = 11 # Parameter 7
SOLVE_SPAR_PAR8 = 12 # Parameter 8
SOLVE_SPAR_PAR9 = 13 # Parameter 9
SOLVE_SPAR_PAR10 = 14 # Parameter 10
SOLVE_SPAR_PAR11 = 15 # Parameter 11
SOLVE_SPAR_PAR12 = 16 # Parameter 12
# Solve type code for use with get/set solve function
SOLVE_CURV_FIXED = 0 # Solve on curvature; fixed
SOLVE_CURV_VAR = 1 # Solve on curvature; variable (V)
SOLVE_CURV_MR_ANG = 2 # Solve on curvature; marginal ray angle (M)
SOLVE_CURV_CR_ANG = 3 # Solve on curvature; chief ray angle (C)
SOLVE_CURV_PICKUP = 4 # Solve on curvature; pickup (P)
SOLVE_CURV_MR_NORM = 5 # Solve on curvature; marginal ray normal (N)
SOLVE_CURV_CR_NORM = 6 # Solve on curvature; chief ray normal (N)
SOLVE_CURV_APLAN = 7 # Solve on curvature; aplanatic (A)
SOLVE_CURV_ELE_POWER = 8 # Solve on curvature; element power (X)
SOLVE_CURV_CON_SURF = 9 # Solve on curvature; concentric with surface (S)
SOLVE_CURV_CON_RADIUS = 10 # Solve on curvature; concentric with radius (R)
SOLVE_CURV_FNUM = 11 # Solve on curvature; f/# (F)
SOLVE_CURV_ZPL = 12 # Solve on curvature; zpl macro (Z)
SOLVE_THICK_FIXED = 0 # Solve on thickness; fixed
SOLVE_THICK_VAR = 1 # Solve on thickness; variable (V)
SOLVE_THICK_MR_HGT = 2 # Solve on thickness; marginal ray height (M)
SOLVE_THICK_CR_HGT = 3 # Solve on thickness; chief ray height (C)
SOLVE_THICK_EDGE_THICK = 4 # Solve on thickness; edge thickness (E)
SOLVE_THICK_PICKUP = 5 # Solve on thickness; pickup (P)
SOLVE_THICK_OPD = 6 # Solve on thickness; optical path difference (O)
SOLVE_THICK_POS = 7 # Solve on thickness; position (T)
SOLVE_THICK_COMPENSATE = 8 # Solve on thickness; compensator (S)
SOLVE_THICK_CNTR_CURV = 9 # Solve on thickness; center of curvature (X)
SOLVE_THICK_PUPIL_POS = 10 # Solve on thickness; pupil position (U)
SOLVE_THICK_ZPL = 11 # Solve on thickness; zpl macro (Z)
SOLVE_GLASS_FIXED = 0 # Solve on glass; fixed
SOLVE_GLASS_MODEL = 1 # Solve on glass; model
SOLVE_GLASS_PICKUP = 2 # Solve on glass; pickup (P)
SOLVE_GLASS_SUBS = 3 # Solve on glass; substitute (S)
SOLVE_GLASS_OFFSET = 4 # Solve on glass; offset (O)
SOLVE_SEMIDIA_AUTO = 0 # Solve on semi-diameter; automatic
SOLVE_SEMIDIA_FIXED = 1 # Solve on semi-diameter; fixed (U)
SOLVE_SEMIDIA_PICKUP = 2 # Solve on semi-diameter; pickup (P)
SOLVE_SEMIDIA_MAX = 3 # Solve on semi-diameter; maximum (M)
SOLVE_SEMIDIA_ZPL = 4 # Solve on semi-diameter; zpl macro (Z)
SOLVE_CONIC_FIXED = 0 # Solve on conic; fixed
SOLVE_CONIC_VAR = 1 # Solve on conic; variable (V)
SOLVE_CONIC_PICKUP = 2 # Solve on conic; pickup (P)
SOLVE_CONIC_ZPL = 3 # Solve on conic; zpl macro (Z)
SOLVE_PAR0_FIXED = 0 # Solve on parameter 0; fixed
SOLVE_PAR0_VAR = 1 # Solve on parameter 0; variable (V)
SOLVE_PAR0_PICKUP = 2 # Solve on parameter 0; pickup (P)
SOLVE_PARn_FIXED = 0 # Solve on parameter n (b/w 1 - 12); fixed
SOLVE_PARn_VAR = 1 # Solve on parameter n (b/w 1 - 12); variable (V)
SOLVE_PARn_PICKUP = 2 # Solve on parameter n (b/w 1 - 12); pickup (P)
SOLVE_PARn_CR = 3 # Solve on parameter n (b/w 1 - 12); chief-ray (C)
SOLVE_PARn_ZPL = 4 # Solve on parameter n (b/w 1 - 12); zpl macro (Z)
SOLVE_EDATA_FIXED = 0 # Solve on extra data values; fixed
SOLVE_EDATA_VAR = 1 # Solve on extra data values; variable (V)
SOLVE_EDATA_PICKUP = 2 # Solve on extra data values; pickup (P)
SOLVE_EDATA_ZPL = 3 # Solve on extra data values; zpl macro (Z)
# Object parameter codes for NSC solve
NSCSOLVE_OPAR_XPOS = -1
NSCSOLVE_OPAR_YPOS = -2
NSCSOLVE_OPAR_ZPOS = -3
NSCSOLVE_OPAR_XTILT = -4
NSCSOLVE_OPAR_YTILT = -5
NSCSOLVE_OPAR_ZTILT = -6
ANA_POP_SAMPLE_32 = 1
# Sampling codes for POP analysis
ANA_POP_SAMPLE_64 = 2
ANA_POP_SAMPLE_128 = 3
ANA_POP_SAMPLE_256 = 4
ANA_POP_SAMPLE_512 = 5
ANA_POP_SAMPLE_1024 = 6
ANA_POP_SAMPLE_2048 = 7
ANA_POP_SAMPLE_4096 = 8
ANA_POP_SAMPLE_8192 = 9
ANA_POP_SAMPLE_16384 = 10
# Sampling codes for PSF/MTF analysis
ANA_PSF_SAMPLE_32x32 = 1
ANA_PSF_SAMPLE_64x64 = 2
ANA_PSF_SAMPLE_128x128 = 3
ANA_PSF_SAMPLE_256x256 = 4
ANA_PSF_SAMPLE_512x512 = 5
ANA_PSF_SAMPLE_1024x1024 = 6
ANA_PSF_SAMPLE_2048x2048 = 7
ANA_PSF_SAMPLE_4096x4096 = 8
ANA_PSF_SAMPLE_8192x8192 = 9
ANA_PSF_SAMPLE_16384x16384 = 10
def __init__(self, apr=False):
"""Creates an instance of PyZDDE class
Usage: ``ln = pyz.PyZDDE()``
Parameters
----------
apr : bool
if `True`, automatically push and refresh lens to and from LDE to DDE
Returns
-------
ln : PyZDDE object
Notes
-----
1. Following the creation of PyZDDE object, initiate the
communication channel as ``ln.zDDEInit()``
2. Consider using the module level function ``pyz.createLink()`` to
create and initiate a DDE channel instead of ``ln = pyz.PyZDDE();
ln.zDDEInit()``
See Also
--------
createLink()
"""
PyZDDE.__chNum += 1 # increment channel count
self._appName = _getAppName(PyZDDE.__appNameDict) or '' # wicked :-)
self._appNum = PyZDDE.__chNum # unique & immutable identity of each instance
self._connection = False # 1/0 depending on successful connection or not
self._macroPath = None # variable to store macro path
self._filesCreated = set() # .cfg & other files to be cleaned at session end
self._apr = apr
def __repr__(self):
return ("PyZDDE(appName=%r, appNum=%r, connection=%r, macroPath=%r)" %
(self._appName, self._appNum, self._connection, self._macroPath))
def __hash__(self):
# for storing in internal dictionary
return hash(self._appNum)
def __eq__(self, other):
return (self._appNum == other._appNum)
@property
def apr(self):
return self._apr
@apr.setter
def apr(self, val):
self._apr = val
@property
def connection(self):
"""Checks status of connection
Returns
-------
status: bool
True = connection online
False = connection offline
"""
return self._connection
# ZEMAX <--> PyZDDE client connection methods
#--------------------------------------------
def zDDEInit(self):
"""Initiates DDE link with Zemax server.
Usage: ``ln.zDDEInit()``
Parameters
----------
None
Returns
-------
status : integer (0 or -1)
0 = DDE Zemax link successful;
-1 = DDE link couldn't be established.
See Also
--------
createLink(), zDDEClose(), zDDEStart(), zSetTimeout()
"""
_debugPrint(1,"appName = " + self._appName)
_debugPrint(1,"liveCh = " + str(PyZDDE.__liveCh))
# do this only one time or when there is no channel
if PyZDDE.__liveCh==0:
try:
PyZDDE.__server = _dde.CreateServer()
PyZDDE.__server.Create("ZCLIENT") # Name of the client
_debugPrint(2, "PyZDDE.__server = " + str(PyZDDE.__server))
except Exception as err1:
_sys.stderr.write("{err}: Another application may be"
" using a DDE server!".format(err=str(err1)))
return -1
# Try to create individual conversations for each ZEMAX application.
self._conversation = _dde.CreateConversation(PyZDDE.__server)
_debugPrint(2, "PyZDDE.converstation = " + str(self._conversation))
try:
self._conversation.ConnectTo(self._appName," ")
except Exception as err2:
_debugPrint(2, "Exception occured at attempt to call ConnecTo."
" Error = {err}".format(err=str(err2)))
if self.__liveCh >= _MAX_PARALLEL_CONV:
_sys.stderr.write("ERROR: {err}. \nMore than {liveConv} "
"simultaneous conversations not allowed!\n"
.format(err=str(err2), liveConv =_MAX_PARALLEL_CONV))
else:
_sys.stderr.write("ERROR: {err}.\nZEMAX may not be running!\n"
.format(err=str(err2)))
# should close the DDE server if it exist
self.zDDEClose()
_debugPrint(2,"PyZDDE server: " + str(PyZDDE.__server))
return -1
else:
_debugPrint(1,"Zemax instance successfully connected")
PyZDDE.__liveCh += 1 # increment the number of live channels
self._connection = True
return 0
def close(self):
"""Helper function to close current communication link
Usage: ``ln.close()``
Parameters
----------
None
Returns
-------
None
Notes
-----
This bounded method provides a quick alternative way to close link
rather than calling the module function ``pyz.closeLink()``.
See Also
--------
zDDEClose() :
PyZDDE instance method to close a link.
Use this method (as ``ln.zDDEClose()``) if the link was
created as ``ln = pyz.PyZDDE(); ln.zDDEInit()``
closeLink() :
A moudle level function to close a link.
Use this method (as ``pyz.closeLink(ln)``) or ``ln.close()``
if the link was created as ``ln = pyz.createLink()``
"""
return closeLink(self)
def zDDEClose(self):
"""Close the DDE link with Zemax server.
Usage: ``ln.zDDEClose()``
Parameters
----------
None
Returns
-------
status : integer
0 on success.
Notes
-----
Use this bounded method to close link if the link was created using
the idiom ``ln = pyz.PyZDDE(); ln.zDDEInit()``. If however, the
link was created using ``ln = pyz.createLink()``, use either
``pyz.closeLink()`` or ``ln.close()``.
"""
if PyZDDE.__server and not PyZDDE.__liveCh:
PyZDDE.__server.Shutdown(self._conversation)
PyZDDE.__server = 0
_debugPrint(2,"server shutdown as ZEMAX is not running!")
elif PyZDDE.__server and self._connection and PyZDDE.__liveCh == 1:
PyZDDE.__server.Shutdown(self._conversation)
self._connection = False
PyZDDE.__appNameDict[self._appName] = False # make the name available
_deleteFilesCreatedDuringSession(self)
self._appName = ''
PyZDDE.__liveCh -= 1 # This will become zero now. (reset)
PyZDDE.__server = 0 # previous server obj should be garbage collected
_debugPrint(2,"server shutdown")
elif self._connection: # if additional channels were successfully created.
PyZDDE.__server.Shutdown(self._conversation)
self._connection = False
PyZDDE.__appNameDict[self._appName] = False # make the name available
_deleteFilesCreatedDuringSession(self)
self._appName = ''
PyZDDE.__liveCh -= 1
_debugPrint(2,"liveCh decremented without shutting down DDE channel")
else: # if zDDEClose is called by an object which didn't have a channel
_debugPrint(2, "Nothing to do")
return 0
def zSetTimeout(self, time):
"""Set global timeout value, in seconds, for all Zemax DDE calls.
Parameters
----------
time: integer
timeout value in seconds (if float is given, it is rounded to
integer)
Returns
-------
timeout : integer
the set timeout value in seconds
Notes
-----
This is a global timeout value. Some methods provide means to set
individual timeout values.
See Also
--------
zDDEInit()
"""
self._conversation.SetDDETimeout(round(time))
return self._conversation.GetDDETimeout()
def zGetTimeout(self):
"""Returns the current value of the global timeout in seconds
Parameters
----------
None
Returns
-------
timeout : integer
globally set timeout value in seconds
"""
return self._conversation.GetDDETimeout()
@autopushandrefresh
def _sendDDEcommand(self, cmd, timeout=None):
"""Method to send command to DDE client
"""
global _global_pyver3
reply = self._conversation.Request(cmd, timeout)
if _global_pyver3:
reply = reply.decode('ascii').rstrip()
return reply
def __del__(self):
"""Destructor"""
_debugPrint(2,"Destructor called")
self.zDDEClose()
# ****************************************************************
# ZEMAX DATA ITEM BASED METHODS
# ****************************************************************
def zCloseUDOData(self, bufferCode):
"""Close the User Defined Operand buffer allowing optimizer to
proceed
Parameters
----------
bufferCode : integer
buffercode is an integer value provided by Zemax to the client
that uniquely identifies the correct lens.
Returns
-------
retVal : ?
See Also
--------
zGetUDOSystem(), zSetUDOItem()
"""
return int(self._sendDDEcommand("CloseUDOData,{:d}".format(bufferCode)))
def zDeleteConfig(self, number):
"""Deletes an existing configuration (column) in the multi-
configuration editor
Parameters
----------
number : integer
configuration number to delete
Returns
-------
deleted_config_num : integer
configuration number deleted
Notes
-----
After deleting the configuration, all succeeding configurations
are re-numbered.
See Also
--------
zInsertConfig()
zDeleteMCO() :
(TIP) use zDeleteMCO to delete a row/operand
"""
return int(self._sendDDEcommand("DeleteConfig,{:d}".format(number)))
def zDeleteMCO(self, operNum):
"""Deletes an existing operand (row) in the multi-configuration
editor
Parameters
----------
operNum : integer
operand number (row in the MCE) to delete
Returns
-------
newNumberOfOperands : integer
new number of operands
Notes
-----
After deleting the row, all succeeding rows (operands) are
re-numbered.
See Also
--------
zInsertMCO()
zDeleteConfig() :
(TIP) Use zDeleteConfig() to delete a column/configuration.
"""
return int(self._sendDDEcommand("DeleteMCO,"+str(operNum)))
def zDeleteMFO(self, operand):
"""Deletes an optimization operand (row) in the merit function
editor
Parameters
----------
operand : integer
Operand (row) number (- 1 <= operand <= number_of_operands)
Returns
-------
newNumOfOperands : integer
the new number of operands
See Also
--------
zInsertMFO()
"""
return int(self._sendDDEcommand("DeleteMFO,{:d}".format(operand)))
def zDeleteObject(self, surfNum, objNum):
"""Deletes the NSC object identified by the ``objNum`` and
the surface identified by ``surfNum``
Parameters
----------
surfNum : integer
surface number of Non-Sequential Component surface
objNum : integer
object number in the NSC editor
Returns
-------
status : integer (0 or -1)
0 if successful, -1 if it failed
Notes
-----
1. The ``surfNum`` is 1 if the lens is purely NSC mode.
2. If no more objects are present it simply returns 0.
See Also
--------
zInsertObject()
"""
cmd = "DeleteObject,{:d},{:d}".format(surfNum,objNum)
reply = self._sendDDEcommand(cmd)
rs = reply.rstrip()
if rs == 'BAD COMMAND':
return -1
else:
return int(float(rs))
def zDeleteSurface(self, surfNum):
"""Deletes an existing surface identified by ``surfNum``
Parameters
----------
surfNum : integer
the surface number of the surface to be deleted
Returns
-------
status : integer
0 if successful
.. warning:: Although you cannot delete an object the function \
doesn't return an error (returns 0 instead).
See Also
--------
zInsertSurface()
"""
cmd = "DeleteSurface,{:d}".format(surfNum)
reply = self._sendDDEcommand(cmd)
return int(float(reply))
def zExportCAD(self, fileName, fileType=1, numSpline=32, firstSurf=1,
lastSurf=-1, raysLayer=1, lensLayer=0, exportDummy=0,
useSolids=1, rayPattern=0, numRays=0, wave=0, field=0,
delVignett=1, dummyThick=1.00, split=0, scatter=0,
usePol=0, config=0):
"""Export lens data in IGES/STEP/SAT format for import into CAD
programs
Parameters
----------
fileName : string
filename including extension (including full path is
recommended)
fileType : integer (0, 1, 2 or 3)
0 = IGES; 1 = STEP (default); 2 = SAT; 3 = STL
numSpline : integer
number of spline segments to use (default = 32)
firstSurf : integer
the first surface to export; the first object to export
(in NSC mode)
lastSurf : integer
the last surface to export; the last object to export
(in NSC mode)
(default = -1, i.e. image surface)
raysLayer : integer
layer to place ray data on (default = 1)
lensLayer : integer
layer to place lens data on (default = 0)
exportDummy : integer (0 or 1)
export dummy surface? 1 = export; 0 (default) = not export
useSolids : integer (0 or 1)
export surfaces as solids? 1 (default) = surfaces as solids;
rayPattern : integer (0 <= rayPattern <= 7)
0 (default) = XY fan; 1 = X fan; 2 = Y fan; 3 = ring; 4 = list;
5 = none; 6 = grid; 7 = solid beams.
numRays : integer
the number of rays to render (default = 1)
wave : integer
wavelength number; 0 (default) indicates all
field : integer
the field number; 0 (default) indicates all
delVignett : integer (0 or 1)
delete vignetted rays? 1 (default) = delete vig. rays
dummyThick : float
dummy surface thickness in lens units; (default = 1.00)
split : integer (0 or 1)
split rays from NSC sources? 1 = split sources;
0 (default) = no
scatter : integer (0 or 1)
scatter rays from NSC sources? 1 = Scatter; 0 (deafult) = no
usePol : integer (0 or 1)
use polarization when tracing NSC rays? 1 = use polarization;
0 (default) no. Note that polarization is automatically
selected if ``split`` is ``1``.
config : integer (0 <= config <= n+3)
n is the total number of configurations;
0 (default) = current config;
1 - n for a specific configuration;
n+1 to export "All By File";
n+2 to export "All by Layer";
n+3 for "All at Once".
Returns
-------
status : string
the string "Exporting filename" or "BUSY!" (see notes below)
Notes
-----
1. Exporting lens data data may take longer than the timeout
interval of the DDE communication. Zemax spwans an independent
thread to process this request. Once the thread is launched,
Zemax returns "Exporting filename". However, the export may
take much longer. To verify the completion of export and the
readiness of the file, use ``zExportCheck()``, which returns
``1`` as long as the export is in process, and ``0`` once
completed. Generally, ``zExportCheck()`` should be placed in
a loop, which executes until a ``0`` is returned.
A typical loop-test may look like as follows: ::
# check for completion of CAD export process
still_working = True
while(still_working):
# Delay for 200 milliseconds
time.sleep(.2)
status = ln.zExportCheck()
if status: # still running
pass
else: # Done exporting
still_working = False
2. Zemax cannot export some NSC objects (e.g. slide). The
unexportable objects are ignored.
References
----------
For a detailed exposition on the configuration settings,
see "Export IGES/SAT.STEP Solid" in the Zemax manual [Zemax]_.
"""
# Determine last surface/object depending upon zemax mode
if lastSurf == -1:
zmxMode = self._zGetMode()
if zmxMode[0] != 1:
lastSurf = self.zGetSystem()[0]
else:
lastSurf = self.zGetNSCData(1,0)
args = [str(arg) for arg in ("ExportCAD", fileName, fileType, numSpline,
firstSurf, lastSurf, raysLayer, lensLayer,
exportDummy, useSolids, rayPattern, numRays,
wave, field, delVignett, dummyThick, split,
scatter, usePol, config)]
cmd = ",".join(args)
reply = self._sendDDEcommand(cmd)
return str(reply)
def zExportCheck(self):
"""Indicate the status of the last executed ``zExportCAD()`` command
Parameters
----------
None
Returns
-------
status : integer (0 or 1)
0 = last CAD export completed; 1 = last CAD export in progress
"""
return int(self._sendDDEcommand('ExportCheck'))
def zFindLabel(self, label):
"""Returns the surface that has the integer label associated with
the it.
Parameters
----------
label : integer
label associated with a surface
Returns
-------
surfNum : integer
surface-number of surface associated with the given ``label``;
-1 if no surface with the specified label is found.
See Also
--------
zSetLabel(), zGetLabel()
"""
reply = self._sendDDEcommand("FindLabel,{:d}".format(label))
return int(float(reply))
def zGetAddress(self, addressLineNumber):
"""Extract the address in specified line number
Parameters
----------
addressLineNumber : integer
line number of address to return
Returns
-------
addressLine : string
address line
"""
reply = self._sendDDEcommand("GetAddress,{:d}"
.format(addressLineNumber))
return str(reply)
def zGetAperture(self, surf):
"""Get the surface aperture data for a given surface
Parameters
----------
surf : integer
the surface-number of a surface
Returns
-------
aType : integer
integer codes of aperture types which are:
* 0 = no aperture (na);
* 1 = circular aperture (ca);
* 2 = circular obscuration (co);
* 3 = spider (s);
* 4 = rectangular aperture (ra);
* 5 = rectangular obscuration (ro);
* 6 = elliptical aperture (ea);
* 7 = elliptical obscuration (eo);
* 8 = user defined aperture (uda);
* 9 = user defined obscuration (udo);
* 10 = floating aperture (fa);
aMin : float
min radius(ca); min radius(co); width of arm(s); x-half width
(ra); x-half width(ro); x-half width(ea); x-half width(eo)
aMax : float
max radius(ca); max radius(co); number of arm(s); y-half
width(ra); y-half width(ro); y-half width(ea); y-half width(eo)
xDecenter : float
amount of decenter in x from current optical axis (lens units)
yDecenter : float
amount of decenter in y from current optical axis (lens units)
apertureFile : string
a text file with .UDA extention.
References
----------
The following sections from the Zemax manual should be referred
for details [Zemax]_:
1. "Aperture type and other aperture controls" for details on
aperture
2. "User defined apertures and obscurations" for more on UDA
extension
See Also
--------
zGetSystemAper() :
For system aperture instead of the aperture of surface.
zSetAperture()
"""
reply = self._sendDDEcommand("GetAperture," + str(surf))
rs = reply.split(',')
apertureInfo = [int(rs[i]) if i==5 else float(rs[i])
for i in range(len(rs[:-1]))]
apertureInfo.append(rs[-1].rstrip()) # append the test file (string)
ainfo = _co.namedtuple('ApertureInfo', ['aType', 'aMin', 'aMax',
'xDecenter', 'yDecenter',
'apertureFile'])
return ainfo._make(apertureInfo)
def zGetApodization(self, px, py):
"""Computes the intensity apodization of a ray from the
apodization type and value.
Parameters
----------
px, py : float
normalized pupil coordinates
Returns
-------
intApod : float
intensity apodization
"""
reply = self._sendDDEcommand("GetApodization,{:1.20g},{:1.20g}"
.format(px,py))
return float(reply)
def zGetAspect(self, filename=None):
"""Returns the graphic display aspect-ratio and the width
(or height) of the printed page in current lens units.
Parameters
----------
filename : string
name of the temporary file associated with the window being
created or updated. If the temporary file is left off, then
the default aspect-ratio and width (or height) is returned.
Returns
-------
aspect : float
aspect ratio (height/width)
side : float
width if ``aspect <= 1``; height if ``aspect > 1``
(in lens units)
"""
asd = _co.namedtuple('aspectData', ['aspect', 'side'])
cmd = (filename and "GetAspect,{}".format(filename)) or "GetAspect"
reply = self._sendDDEcommand(cmd).split(",")
aspectSide = asd._make([float(elem) for elem in reply])
return aspectSide
def zGetBuffer(self, n, tempFileName):
"""Retrieve DDE client specific data from a window being updated
Parameters
----------
n : integer (0 <= n <= 15)
the buffer number
tempFileName : string
name of the temporary file associated with the window being
updated. The tempFileName is passed to the client when Zemax
calls the client.
Returns
-------
bufferData : string
buffer data
Notes
-----
Each window may have its own buffer data, and Zemax uses the
filename to identify the window for which the buffer data is
requested.
References
----------
See section "How ZEMAX calls the client" in Zemax manual [Zemax]_.
See Also
--------
zSetBuffer()
"""
cmd = "GetBuffer,{:d},{}".format(n,tempFileName)
reply = self._sendDDEcommand(cmd)
return str(reply.rstrip())
# !!!FIX what is the proper return for this command?
def zGetComment(self, surfNum):
"""Returns the surface comment, if any, associated with the surface
Parameters
----------
surfNum: integer
the surface number
Returns
-------
comment : string
the comment, if any, associated with the surface
"""
reply = self._sendDDEcommand("GetComment,{:d}".format(surfNum))
return str(reply.rstrip())
def zGetConfig(self):
"""Returns tuple containing current configuration number, number of
configurations, and number of multiple configuration operands.
Parameters
----------
none
Returns
-------
currentConfig : integer
current configuration (column) number in MCE
numberOfConfigs : integer
number of configurations (number of columns)
numberOfMutiConfigOper : integer
number of multi config operands (number of rows)
Notes
-----
The function returns ``(1,1,1)`` even if the multi-configuration
editor is empty. This is because, the current lens in the LDE is,
by default, set to the current configuration. The initial number of
configurations is therefore ``1``, and the number of operators in
the multi-configuration editor is also ``1`` (usually, ``MOFF``).
See Also
--------
zInsertConfig() :
Use ``zInsertConfig()`` to insert new configuration in the
multi-configuration editor.
zSetConfig()
"""
reply = self._sendDDEcommand('GetConfig')
rs = reply.split(',')
# !!! FIX: Should this function return "0" when the MCE is empty, just
# like what is done for the zGetNSCData() function?
return tuple([int(elem) for elem in rs])
def zGetDate(self):
"""Get current date from the Zemax DDE server.
Parameters
----------
None
Returns
-------
date : string
date
"""
return self._sendDDEcommand('GetDate').rstrip()
def zGetExtra(self, surfNum, colNum):
"""Returns extra surface data from the Extra Data Editor
Parameters
----------
surfNum : integer
surface number
colNum : integer
column number
Returns
-------
value : float
numeric data value
See Also
--------
zSetExtra()
"""
cmd="GetExtra,{sn:d},{cn:d}".format(sn=surfNum, cn=colNum)
reply = self._sendDDEcommand(cmd)
return float(reply)
def zGetField(self, n):
"""Returns field data for lens in Zemax DDE server
Parameters
----------
n : integer
the field number
Returns
-------
[Case: ``n=0``]
type : integer
0 = angles in degrees; 1 = object height; 2 = paraxial image
height, 3 = real image height
numFields : integer
number of fields currently defined
maxX : float
values used to normalize x field coordinate
maxY : float
values used to normalize y field coordinate
normMethod : integer
normalization method (0 = radial, 1 = rectangular)
[Case: ``0 < n <= number-of-fields``]
xf : float
the field-x value
yf : float
the field-y value
wgt : float
field weight value
vdx : float
decenter-x vignetting factor
vdy : float
decenter-y vignetting factor
vcx : float
compression-x vignetting factor
vcy : float
compression-y vignetting factor
van : float
angle vignetting factor
Notes
-----
The returned tuple's content and structure is exactly same as that
returned by ``zSetField()``
See Also
--------
zSetField()
"""
if n: # n > 0
fd = _co.namedtuple('fieldData', ['xf', 'yf', 'wgt',
'vdx', 'vdy',
'vcx', 'vcy', 'van'])
else: # n = 0
fd = _co.namedtuple('fieldData', ['type', 'numFields',
'maxX', 'maxY', 'normMethod'])
reply = self._sendDDEcommand('GetField,'+ str(n))
rs = reply.split(',')
if n: # n > 0
fieldData = fd._make([float(elem) for elem in rs])
else: # n = 0
fieldData = fd._make([int(elem) if (i==0 or i==1 or i==4)
else float(elem) for i, elem in enumerate(rs)])
return fieldData
def zGetFile(self):
"""Returns the full name of the zmx lens file in Zemax server
Parameters
----------
None
Returns
-------
file_name : string
filename of the zmx file in the Zemax DDE server.
Notes
-----
Extreme caution should be used if the file is to be tampered with;
at any time Zemax may read or write from/to this file.
"""
reply = self._sendDDEcommand('GetFile')
return reply.rstrip()
def zGetFirst(self):
"""Returns the first order lens data
Parameters
----------
None
Returns
-------
EFL : float
Effective Focal Length (EFL) in lens units,
paraWorkFNum : float
paraxial working F/#,
realWorkFNum : float
real working F/#,
paraImgHeight : float
paraxial image height, and
paramag : float
paraxial magnification. See Notes.
Notes
-----
The value of the magnification returned by this function is the
paraxial magnification. This value doesn't depend on the acutal
image height or the actual location of the image surface from the
principal planes; instead it depends on the paraxial image height.
For real magnification see `zGetMagnification()`.
See Also
--------
zGetSystem() :
Use ``zGetSystem()`` to get general system data,
zGetSystemProperty()
ipzGetFirst()
zGetMagnification()
"""
fd = _co.namedtuple('firstOrderData',
['EFL', 'paraWorkFNum', 'realWorkFNum',
'paraImgHeight', 'paraMag'])
reply = self._sendDDEcommand('GetFirst')
rs = reply.split(',')
return fd._make([float(elem) for elem in rs])
def zGetGlass(self, surfNum):
"""Returns glass data of a surface.
Parameters
----------
surfNum : integer
surface number
Returns
-------
glassInfo : 4-tuple or None
glassInfo contains (name, nd, vd, dpgf) if there is a valid glass
associated with the surface, else ``None``
Notes
-----
If the specified surface is not valid, is not made of glass, or is
gradient index, the returned string is empty. This data may be
meaningless for glasses defined only outside of the FdC band.
"""
gd = _co.namedtuple('glassData', ['name', 'nd', 'vd', 'dpgf'])
reply = self._sendDDEcommand("GetGlass,{:d}".format(surfNum))
rs = reply.split(',')
if len(rs) > 1:
glassInfo = gd._make([str(rs[i]) if i == 0 else float(rs[i])
for i in range(len(rs))])
else:
glassInfo = None
return glassInfo
def zGetGlobalMatrix(self, surfNum):
"""Returns the the matrix required to convert any local coordinates
(such as from a ray trace) into global coordinates.
Parameters
----------
surfNum : integer
surface number
Returns
-------
globalMatrix : 9-tuple
the elements of the global matrix:
| (R11, R12, R13,
| R21, R22, R23,
| R31, R32, R33,
| Xo, Yo , Zo)
the function returns -1, if bad command.
References
----------
For details on the global coordinate matrix, see "Global Coordinate
Reference Surface" in the Zemax manual [Zemax]_.
"""
gmd = _co.namedtuple('globalMatrix', ['R11', 'R12', 'R13',
'R21', 'R22', 'R23',
'R31', 'R32', 'R33',
'Xo' , 'Yo', 'Zo'])
cmd = "GetGlobalMatrix,{:d}".format(surfNum)
reply = self._sendDDEcommand(cmd)
rs = reply.rstrip()
globalMatrix = gmd._make([float(elem) for elem in rs.split(',')])
return globalMatrix
def zGetIndex(self, surfNum):
"""Returns the index of refraction data for the specified surface
Parameters
----------
surfNum : integer
surface number
Returns
-------
indexData : tuple of real values
the ``indexData`` is a tuple of index of refraction values
defined for each wavelength in the format (n1, n2, n3, ...).
If the specified surface is not valid, or is gradient index,
the returned string is empty.
See Also
--------
zGetIndexPrimWave()
"""
reply = self._sendDDEcommand("GetIndex,{:d}".format(surfNum))
rs = reply.split(",")
indexData = [float(rs[i]) for i in range(len(rs))]
return tuple(indexData)
def zGetLabel(self, surfNum):
"""Returns the integer label associated with the specified surface.
Parameters
----------
surfNum : integer
the surface number
Returns
-------
label : integer
the integer label
Notes
-----
Labels are retained by Zemax as surfaces are inserted or deleted
around the target surface.
See Also
--------
zSetLabel(), zFindLabel()
"""
reply = self._sendDDEcommand("GetLabel,{:d}".format(surfNum))
return int(float(reply.rstrip()))
def zGetMetaFile(self, metaFileName, analysisType, settingsFile=None,
flag=0, timeout=None):
"""Creates a windows Metafile of any Zemax graphical analysis window
Usage: ``zMetaFile(metaFilename, analysisType [, settingsFile, flag])``
Parameters
----------
metaFileName : string
absolute path name with extension
analysisType : string
3-letter case-sensitive button code for the analysis. If no label
is provided or recognized, a 3D Layout plot is generated.
settingsFile : string
settings file used/ saved by Zemax to compute the metafile graphic
depending upon the value of the flag parameter.
flag : integer
0 = default settings used for the graphic;
1 = use settings in settings file if valid, else default settings;
2 = use settings in settings file if valid, and the settings box
will be displayed for further setting changes.
timeout : integer, optional
timeout in seconds (default=None, i.e. default timeout value)
Returns
-------
status : integer
0 = Success;
-1 = metafile could not be saved;
-998 = command timed out
Notes
-----
No matter what the flag value is, if a valid file-name is provided
for the ``settingsFile``, the settings used will be written to
the settings file, overwriting any data in the file.
Examples
--------
>>> ln.zGetMetaFile("C:\\Projects\\myGraphicfile.EMF",'Lay')
0
See Also
--------
zGetTextFile(), zOpenWindow()
"""
if settingsFile:
settingsFile = settingsFile
else:
settingsFile = ''
retVal = -1
# Check if Valid analysis type
if zb.isZButtonCode(analysisType):
# Check if the file path is valid and has extension
if _os.path.isabs(metaFileName) and _os.path.splitext(metaFileName)[1]!='':
cmd = 'GetMetaFile,"{tF}",{aT},"{sF}",{fl:d}'.format(tF=metaFileName,
aT=analysisType,sF=settingsFile,fl=flag)
reply = self._sendDDEcommand(cmd, timeout)
if 'OK' in reply.split():
retVal = 0
else:
print("Invalid analysis code '{}' passed to zGetMetaFile."
.format(analysisType))
return retVal
def zGetMulticon(self, config, row):
"""Returns data from the multi-configuration editor
Parameters
----------
config : integer
configuration number (column)
row : integer
operand
Returns
-------
multiConData : tuple or None
if the MCE is empty `None` is returned. Else, the exact elements of
``multiConData`` depends on the value of ``config``
If ``config > 0``
then the elements of ``multiConData`` are:
(value, numConfig, numRow, status, pickupRow, pickupConfig,
scale, offset)
The ``status`` is 0 for fixed, 1 for variable, 2 for pickup,
& 3 for thermal pickup. If ``status`` is 2 or 3, the pickuprow &
pickupconfig values indicate the source data for the pickup solve.
If ``config = 0``
then the elements of ``multiConData`` are:
(operandType, num1, num2, num3)
`num1` could be "Surface#", "Surface", "Field#", "Wave#', or
"Ignored".
`num2` could be "Object", "Extra Data Number", or "Parameter".
`num3` could be "Property", or "Face#".
See [MCO]_
References
----------
.. [MCO] "Summary of Multi-Configuration Operands," Zemax manual.
See Also
--------
zSetMulticon(), zGetConfig()
"""
cmd = "GetMulticon,{config:d},{row:d}".format(config=config,row=row)
reply = self._sendDDEcommand(cmd)
if config: # if config > 0
mcd = _co.namedtuple('MCD', ['value', 'numConfig', 'numRow', 'status',
'pickupRow', 'pickupConfig', 'scale',
'offset'])
rs = reply.split(",")
if len(rs) < 8:
if (self.zGetConfig() == (1, 1, 1)): # probably nothing set in MCE
return None
else:
assert False, "Unexpected reply () from Zemax.".format(reply)
else:
multiConData = [float(rs[i]) if (i==0 or i==6 or i==7) else int(rs[i])
for i in range(len(rs))]
else: # if config == 0
mcd = _co.namedtuple('MCD', ['operandType', 'num1', 'num2', 'num3'])
rs = reply.split(",")
multiConData = [int(elem) for elem in rs[1:]]
multiConData.insert(0, rs[0])
return mcd._make(multiConData)
def zGetName(self):
"""Returns the name of the lens
Parameters
----------
None
Returns
-------
lensName : string
name of the current lens (as in the General data dialog box)
"""
reply = self._sendDDEcommand('GetName')
return str(reply.rstrip())
def zGetNSCData(self, surfNum, code):
"""Returns the data for NSC groups
Parameters
----------
surfNum : integer
surface number of the NSC group; Use 1 if for pure NSC mode
code : integer (0)
currently only ``code = 0`` is supported, in which case the
returned data is the number of objects in the NSC group
Returns
-------
nscData :
the number of objects in the NSC group if the command is valid;
-1 if it was a bad commnad (generally if the ``surface`` is not
a non-sequential surface)
Notes
-----
This function returns 1 if the only object in the NSC editor is a
"Null Object".
"""
cmd = "GetNSCData,{:d},{:d}".format(surfNum,code)
reply = self._sendDDEcommand(cmd)
rs = reply.rstrip()
if rs == 'BAD COMMAND':
nscData = -1
else:
nscData = int(float(rs))
if nscData == 1:
nscObjType = self.zGetNSCObjectData(surfNum,1,0)
if nscObjType == 'NSC_NULL': # the NSC editor is actually empty
nscData = 0
return nscData
def zGetNSCMatrix(self, surfNum, objNum):
"""Returns a tuple containing the rotation and position matrices
relative to the NSC surface origin.
Parameters
----------
surfNum : integer
surface number of the NSC group; Use 1 for pure NSC mode
objNum : integer
the NSC ojbect number
Returns
-------
nscMatrix : 9-tuple
the elements of the global matrix:
| (R11, R12, R13,
| R21, R22, R23,
| R31, R32, R33,
| Xo, Yo , Zo)
the function returns -1, if bad command.
"""
nscmat = _co.namedtuple('NSCMatrix', ['R11', 'R12', 'R13',
'R21', 'R22', 'R23',
'R31', 'R32', 'R33',
'Xo' , 'Yo', 'Zo'])
cmd = "GetNSCMatrix,{:d},{:d}".format(surfNum,objNum)
reply = self._sendDDEcommand(cmd)
rs = reply.rstrip()
if rs == 'BAD COMMAND':
nscMatrix = -1
else:
nscMatrix = nscmat._make([float(elem) for elem in rs.split(',')])
return nscMatrix
def zGetNSCObjectData(self, surfNum, objNum, code):
"""Returns the various data for NSC objects.
Parameters
----------
surfNum : integer
surface number of the NSC group. Use 1 if for pure NSC mode
objNum : integer
the NSC ojbect number
code : integer
for the specific code see the nsc-object-data-codes_ table (below)
Returns
-------
nscObjectData : string/integer/float
the nature of the returned data, which depends on the ``code``,
is enumerated in the nsc-object-data-codes_ table (below).
If the command fails, it returns ``-1``.
Notes
-----
.. _nsc-object-data-codes:
::
Table: Codes for NSC object data getter and setter methods
--------------------------------------------------------------------
code - Datum set/returned by zSetNSCObjectData()/zGetNSCObjectData()
--------------------------------------------------------------------
0 - Object type name (string).
1 - Comment and/or the file name if the object is defined by a
file (string).
2 - Color (integer).
5 - Reference object number (integer).
6 - Inside of object number (integer).
These codes set/get values to/from the "Type tab" of the Object
Properties dialog:
3 - 1 if object uses a user defined aperture file, 0 otherwise
4 - User defined aperture file name, if any (string).
29 - "Use Pixel Interpolation" checkbox (1 = checked, 0 = unchecked).
These codes set/get values to/from the "Sources tab" of the Object
Properties dialog:
101 - Source object random polarization (1 = checked, 0 = unchecked)
102 - Source object reverse rays option (1 = checked, 0 for unchecked)
103 - Source object Jones X value.
104 - Source object Jones Y value.
105 - Source object Phase X value.
106 - Source object Phase Y value.
107 - Source object initial phase in degrees value.
108 - Source object coherence length value.
109 - Source object pre-propagation value.
110 - Source object sampling method (0 = random, 1 = Sobol sampling)
111 - Source object bulk scatter method (0 = many, 1 = once, 2 = never)
These codes set/set values to/from the "Bulk Scatter tab" of the Object
Properties dialog:
202 - Mean Path value.
203 - Angle value.
211-226 - DLL parameter 1-16, respectively.
end-of-table
See Also
--------
zSetNSCObjectData()
"""
str_codes = (0, 1, 4)
int_codes = (2, 3, 5, 6, 29, 101, 102, 110, 111)
cmd = ("GetNSCObjectData,{:d},{:d},{:d}"
.format(surfNum, objNum, code))
reply = self._sendDDEcommand(cmd)
rs = reply.rstrip()
if rs == 'BAD COMMAND':
nscObjectData = -1
else:
if code in str_codes:
nscObjectData = str(rs)
elif code in int_codes:
nscObjectData = int(float(rs))
else:
nscObjectData = float(rs)
return nscObjectData
def zGetNSCObjectFaceData(self, surfNum, objNum, faceNum, code):
"""Returns the various data for NSC object faces.
Parameters
----------
surfNum : integer
surface number of the NSC group. Use 1 if for pure NSC mode
objNum : integer
the NSC ojbect number
faceNum : integer
face number
code : integer
code (see below)
Returns
-------
nscObjFaceData : data for NSC object faces (see the table for the
particular type of data) if successful, else -1
Notes
-----
.. _nsc-object-face-data-codes:
::
Table: Codes for NSC object face data getter and setter methods
---------------------------------------------------------------
code - set/get by zGetNSCObjectFaceData/zGetNSCObjectFaceData
---------------------------------------------------------------
10 - Coating name (string).
20 - Scatter code (0 = None, 1 = Lambertian, 2 = Gaussian,
3 = ABg, and 4 = user defined)
21 - Scatter fraction (float).
22 - Number of rays to scatter (integer).
23 - Gaussian scatter sigma (float).
24 - Face is setting(0 = object default, 1 = reflective,
2 = absorbing)
30 - ABg scatter profile name for reflection (string).
31 - ABg scatter profile name for transmission (string).
40 - User Defined Scatter DLL name (string).
41-46 - User Defined Scatter Parameter 1 - 6 (double).
60 - User Defined Scatter data file name (string).
end-of-table
See Also
--------
zSetNSCObjectFaceData()
"""
str_codes = (10,30,31,40,60)
int_codes = (20,22,24)
cmd = ("GetNSCObjectFaceData,{:d},{:d},{:d},{:d}"
.format(surfNum, objNum, faceNum, code))
reply = self._sendDDEcommand(cmd)
rs = reply.rstrip()
if rs == 'BAD COMMAND':
nscObjFaceData = -1
else:
if code in str_codes:
nscObjFaceData = str(rs)
elif code in int_codes:
nscObjFaceData = int(float(rs))
else:
nscObjFaceData = float(rs)
return nscObjFaceData
def zGetNSCParameter(self, surfNum, objNum, paramNum):
"""Returns NSC object's parameter data
Parameters
----------
surfNum : integer
surface number of the NSC group. Use 1 if for pure NSC mode
objNum : integer
the NSC ojbect number
paramNum : integer
parameter number
Returns
-------
nscParaVal : float
parameter value
See Also
--------
zSetNSCParameter()
"""
cmd = ("GetNSCParameter,{:d},{:d},{:d}"
.format(surfNum, objNum, paramNum))
reply = self._sendDDEcommand(cmd)
rs = reply.rstrip()
if rs == 'BAD COMMAND':
nscParaVal = -1
else:
nscParaVal = float(rs)
return nscParaVal
def zGetNSCPosition(self, surfNum, objNum):
"""Returns position data for NSC object
Parameters
----------
surfNum : integer
surface number of the NSC group. Use 1 if for pure NSC mode
objNum : integer
the NSC ojbect number
Returns
-------
nscPos : 7-tuple (x, y, z, tilt-x, tilt-y, tilt-z, material)
Examples
--------
>>> ln.zGetNSCPosition(surfNum=1, objNum=4)
NSCPosition(x=0.0, y=0.0, z=10.0, tiltX=0.0, tiltY=0.0, tiltZ=0.0, material='N-BK7')
See Also
--------
zSetNSCPosition()
"""
nscpd = _co.namedtuple('NSCPosition', ['x', 'y', 'z',
'tiltX', 'tiltY', 'tiltZ',
'material'])
cmd = ("GetNSCPosition,{:d},{:d}".format(surfNum,objNum))
reply = self._sendDDEcommand(cmd)
rs = reply.split(',')
if rs[0].rstrip() == 'BAD COMMAND':
nscPos = -1
else:
nscPos = nscpd._make([str(rs[i].rstrip()) if i==6 else float(rs[i])
for i in range(len(rs))])
return nscPos
def zGetNSCProperty(self, surfNum, objNum, faceNum, code):
"""Returns a numeric or string value from the property pages of
objects defined in NSC editor. It mimics the ZPL function NPRO.
Parameters
----------
surfNum : integer
surface number of the NSC group. Use 1 if for pure NSC mode
objNum : integer
the NSC ojbect number
faceNum : integer
face number
code : integer
for the specific code see the nsc-property-codes_ table (below)
Returns
-------
nscPropData : string/float/integer
the nature of the returned data, which depends on the ``code``,
is enumerated in the nsc-property-codes_ table (below).
If the command fails, it returns ``-1``.
Notes
-----
.. _nsc-property-codes:
::
Table: Codes for NSC property getter and setter methods
---------------------------------------------------------------
code - Datum set/get by zSetNSCProperty()/zGetNSCProperty()
---------------------------------------------------------------
The following codes sets/get values to/from the NSC Editor.
1 - Object comment (string).
2 - Reference object number (integer).
3 - "Inside of" object number (integer).
4 - Object material (string).
The following codes set/get values to/from the "Type tab" of
the Object Properties dialog.
0 - Object type. e.g., "NSC_SLEN" for standard lens (string).
13 - User Defined Aperture (1 = checked, 0 = unchecked)
14 - User Defined Aperture file name (string).
15 - "Use Global XYZ Rotation Order" checkbox; (1 = checked,
0 = unchecked)
16 - "Rays Ignore Object" checkbox; (1=checked, 0=un-checked)
17 - "Object Is Detector" checkbox; (1=checked, 0=un-checked)
18 - "Consider Objects" list. Argument is a string listing the
object numbers delimited by spaces.e.g.,"2 5 14" (string)
19 - "Ignore Objects" list. Argument is a string listing the
object numbers delimited by spaces.e.g.,"1 3 7" (string)
20 - "Use Pixel Interpolation" checkbox, (1=checked, 0=un-
checked).
The following codes set/get values to/from the "Coat/Scatter
tab" of the Object Properties dialog.
5 - Coating name for the specified face (string)
6 - Profile name for the specified face (string)
7 - Scatter mode for the specified face, (0 = none,
1 = Lambertian, 2 = Gaussian, 3 = ABg, 4 = User Defined.)
8 - Scatter fraction for the specified face (float)
9 - Number of scatter rays for the specified face (integer)
10 - Gaussian sigma for the specified face (float)
11 - Reflect ABg data name for the specified face (string)
12 - Transmit ABg data name for the specified face (string)
27 - Name of the user defined scattering DLL (string)
28 - Name of the user defined scattering data file (string)
21-26 - Parameter values on user defined scattering DLL (float)
29 - "Face Is" property for the specified face
(0 = "Object Default", 1 = "Reflective", 2 = "Absorbing")
The following codes set/get values to/from the "Bulk Scattering
tab" of the Object Properties dialog.
81 - "Model" value on the bulk scattering tab (0 = "No Bulk
Scattering", 1 = "Angle Scattering", 2 = "DLL Defined Scattering")
82 - Mean free path to use for bulk scattering.
83 - Angle to use for bulk scattering.
84 - Name of the DLL to use for bulk scattering.
85 - Parameter value to pass to the DLL, where the face value
is used to specify which parameter is being defined. The
first parameter is 1, the second is 2, etc. (float)
86 - Wavelength shift string (string).
The following codes set/get values from the Diffraction tab of
the Object Properties dialog.
91 - "Split" value on diffraction tab (0="Don't Split By Order",
1="Split By Table Below", 2="Split By DLL Function")
92 - Name of the DLL to use for diffraction splitting (string)
93 - Start Order value (float)
94 - Stop Order value (float)
95 - Parameter values ondiffraction tab. These parameters are
passed to the diffraction splitting DLL as well as the
order efficiency values used by "split by table below"
option. The face value is used to specify which parameter
is being defined. The first parameter is 1, the second is
2, etc. (float)
The following codes set/get values to/from the "Sources tab" of
the Object Properties dialog.
101 - Source object random polarization (1=checked, 0=unchecked)
102 - Source object reverse rays option (1=checked, 0=unchecked)
103 - Source object Jones X value
104 - Source object Jones Y value
105 - Source object Phase X value
106 - Source object Phase Y value
107 - Source object initial phase in degrees value
108 - Source object coherence length value
109 - Source object pre-propagation value
110 - Source object sampling method; (0=random, 1=Sobol sampling)
111 - Source object bulk scatter method; (0=many,1=once, 2=never)
112 - Array mode; (0 = none, 1 = rectangular, 2 = circular,
3 = hexapolar, 4 = hexagonal)
113 - Source color mode. For a complete list of the available
modes, see "Defining the color and spectral content of
sources" in the Zemax manual. The source color modes are
numbered starting with 0 for the System Wavelengths, and
then from 1 through the last model listed in the dialog
box control (integer)
114-116 - Number of spectrum steps, start wavelength, and end
wavelength, respectively (float).
117 - Name of the spectrum file (string).
161-162 - Array mode integer arguments 1 and 2.
165-166 - Array mode double precision arguments 1 and 2.
181-183 - Source color mode arguments, for example, the XYZ
values of the Tristimulus (float).
The following codes set/get values to/from the "Grin tab" of
the Object Properties dialog.
121 - "Use DLL Defined Grin Media" checkbox (1 = checked, 0 =
unchecked)
122 - Maximum step size value (float)
123 - DLL name (string)
124 - Grin DLL parameters. These are the parameters passed to
the DLL. The face value is used to specify the parameter
that is being defined. The first parameter is 1, the
second is 2, etc (float)
The following codes set/get values to/from the "Draw tab" of
the Object Properties dialog.
141 - Do not draw object checkbox (1 = checked, 0 = unchecked)
142 - Object opacity (0 = 100%, 1 = 90%, 2 = 80%, etc.)
The following codes set/get values to/from the "Scatter To tab"
of the Object Properties dialog.
151 - Scatter to method (0 = scatter to list, 1 = importance
sampling)
152 - Importance Sampling target data. The argument is a string
listing the ray number, the object number, the size, and
the limit value, separated by spaces. e.g., to set the
Importance Sampling data for ray 3, object 6, size 3.5,
and limit 0.6, the string argument is "3 6 3.5 0.6".
153 - "Scatter To List" values. Argument is a string listing
the object numbers to scatter to delimited by spaces,
such as "4 6 19" (string)
The following codes set/get values to/from the "Birefringence
tab" of the Object Properties dialog.
171 - Birefringent Media checkbox (0 = unchecked, 1 = checked)
172 - Birefringent Media Mode (0 = Trace ordinary and
extraordinary rays, 1 = Trace only ordinary rays, 2 =
Trace only extraordinary rays, and 3 = Waveplate mode)
173 - Birefringent Media Reflections status (0 = Trace
reflected and refracted rays, 1 = Trace only refracted
rays, and 2 = Trace only reflected rays)
174-176 - Ax, Ay, and Az values (float)
177 - Axis Length (float)
200 - Index of refraction of an object (float)
201-203 - nd (201), vd (202), and dpgf (203) parameters of an
object using a model glass.
end-of-table
See Also
--------
zSetNSCProperty()
"""
cmd = ("GetNSCProperty,{:d},{:d},{:d},{:d}"
.format(surfNum, objNum, code, faceNum))
reply = self._sendDDEcommand(cmd)
nscPropData = _process_get_set_NSCProperty(code, reply)
return nscPropData
def zGetNSCSettings(self):
"""Returns the maximum number of intersections, segments, nesting
level, minimum absolute intensity, minimum relative intensity, glue
distance, miss ray distance, ignore errors flag used for NSC ray
tracing.
Parameters
----------
None
Returns
-------
maxIntersec : integer
maximum number of intersections
maxSeg : integer
maximum number of segments
maxNest : integer
maximum nesting level
minAbsI : float
minimum absolute intensity
minRelI : float
minimum relative intensity
glueDist : float
glue distance
missRayLen : float
miss ray distance
ignoreErr : integer
1 if true, 0 if false
See Also
--------
zSetNSCSettings()
"""
reply = str(self._sendDDEcommand('GetNSCSettings'))
rs = reply.rsplit(",")
nscSettingsData = [float(rs[i]) if i in (3,4,5,6) else int(float(rs[i]))
for i in range(len(rs))]
nscSetData = _co.namedtuple('nscSettings', ['maxIntersec', 'maxSeg', 'maxNest',
'minAbsI', 'minRelI', 'glueDist',
'missRayLen', 'ignoreErr'])
return nscSetData._make(nscSettingsData)
def zGetNSCSolve(self, surfNum, objNum, param):
"""Returns the current solve status and settings for NSC position
and parameter data
Parameters
----------
surfNum : integer
surface number of NSC group; use 1 if program mode is pure NSC
objNum : integer
object number
param : integer
the parameter are as follows:
* -1 = extract data for x data
* -2 = extract data for y data
* -3 = extract data for z data
* -4 = extract data for tilt x data
* -5 = extract data for tilt y data
* -6 = extract data for tilt z data
* n > 0 = extract data for the nth parameter
Returns
-------
nscSolveData : 5-tuple
nscSolveData tuple contains
(status, pickupObject, pickupColumn, scaleFactor, offset)
The status value is 0 for fixed, 1 for variable, and 2 for a
pickup solve.
Only when the staus is a pickup solve is the other data
meaningful.
-1 if it a BAD COMMAND
See Also
--------
zSetNSCSolve()
"""
nscSolveData = -1
cmd = ("GetNSCSolve,{:d},{:d},{:d}"
.format(surfNum, objNum, param))
reply = self._sendDDEcommand(cmd)
rs = reply.rstrip()
if 'BAD COMMAND' not in rs:
nscSolveData = tuple([float(e) if i in (3,4) else int(float(e))
for i,e in enumerate(rs.split(","))])
return nscSolveData
def zGetOperand(self, row, column):
"""Return the operand data from the Merit Function Editor
Parameters
----------
row : integer
operand row number in the MFE
column : integer
column number
Returns
-------
operandData : integer/float/string
opernadData's type depends on ``column`` argument if
successful, else -1.
Refer to the column-operand-data_ table for information on the
types of ``operandData`` and ``column`` number
Notes
-----
.. _column-operand-data:
::
Table: Column and operand data types
-----------------------------------------------
column operand data
-----------------------------------------------
1 operand type (string)
2 int1 (integer)
3 int2 (integer)
4-7 data1-data4 (float)
8 target (float)
9 weight (float)
10 value (float)
11 percentage contribution (float)
12-13 data5-data6 (float)
end-of-table
See Also
--------
zGetOperandRow():
Returns all values from a row in MFE
zOperandValue():
Returns the value of any optimization operand, even if the
operand is not currently in the merit function.
Use ``zOperandValue()`` instead of ``zGetOperand()`` if you
just want to observe/retrieve (instead of optimizing) any
operand variable.
zOptimize() :
To update merit function prior to calling ``zGetOperand()``,
call ``zOptimize()`` with the number of cycles set to -1
ipzGetMFE() :
prints/ returns the MFE parameter suitable for interactive
environment
zSetOperand()
"""
cmd = "GetOperand,{:d},{:d}".format(row, column)
reply = self._sendDDEcommand(cmd)
return _process_get_set_Operand(column, reply)
def zGetPath(self):
"""Returns path-name-to-<data> folder and default lenses folder
Parameters
----------
None
Returns
-------
pathToDataFolder : string
full path to the <data> folder
pathToDefaultLensFolder : string
full path to the default folder for lenses
"""
reply = str(self._sendDDEcommand('GetPath'))
rs = str(reply.rstrip())
return tuple(rs.split(','))
def zGetPolState(self):
"""Returns the default polarization state set by the user
Parameters
----------
None
Returns
-------
nlsPol : integer
if ``nlsPol > 0``, then default pol. state is unpolarized
Ex : float
normalized electric field magnitude in x direction
Ey : float
normalized electric field magnitude in y direction
Phax : float
relative phase in x direction in degrees
Phay : float
relative phase in y direction in degrees
Notes
-----
The quantity Ex*Ex + Ey*Ey should have a value of 1.0, although any
values are accepted.
See Also
--------
zSetPolState()
"""
reply = self._sendDDEcommand("GetPolState")
rs = reply.rsplit(",")
polStateData = [int(float(elem)) if i==0 else float(elem)
for i,elem in enumerate(rs[:-1])]
return tuple(polStateData)
def zGetPolTrace(self, waveNum, mode, surf, hx, hy, px, py, Ex, Ey, Phax, Phay):
"""Trace a single polarized ray defined by the normalized field
height, pupil height, electric field magnitude and relative phase.
If ``Ex``, ``Ey``, ``Phax``, ``Phay`` are all zero, two orthogonal
rays are traced; the resulting transmitted intensity is averaged.
Parameters
----------
waveNum : integer
wavelength number as in the wavelength data editor
mode : integer (0/1)
0 = real, 1 = paraxial
surf : integer
surface to trace the ray to. if -1, surf is the image plane
hx : float
normalized field height along x axis
hy : float
normalized field height along y axis
px : float
normalized height in pupil coordinate along x axis
py : float
normalized height in pupil coordinate along y axis
Ex : float
normalized electric field magnitude in x direction
Ey : float
normalized electric field magnitude in y direction
Phax : float
relative phase in x direction in degrees
Phay : float
relative phase in y direction in degrees
Returns
-------
error : integer
0, if the ray traced successfully;
+ve number indicates ray missed the surface
-ve number indicates ray total internal reflected (TIR)
at the surface given by the absolute value of the ``error``
intensity : float
the transmitted intensity of the ray, normalized to an input
electric field intensity of unity. The transmitted intensity
accounts for surface, thin film, and bulk absorption effects,
but does not consider whether or not the ray was vignetted.
Exr,Eyr,Ezr : float
real parts of the electric field components
Exi,Eyi,Ezi : float
imaginary parts of electric field components
For unploarized rays, only the ``error`` and ``intensity`` are
relevant.
Examples
--------
To trace the real unpolarized marginal ray to the image surface at
wavelength 2, the function would be:
>>> ln.zGetPolTrace(2, 0, -1, 0.0, 0.0, 0.0, 1.0, 0, 0, 0, 0)
.. _notes-GetPolTrace:
Notes
-----
1. The quantity ``Ex*Ex + Ey*Ey`` should have a value of 1.0
although any values are accepted.
2. There is an important exception to the above rule -- If ``Ex``,
``Ey``, ``Phax``, ``Phay`` are all zero, Zemax will trace two
orthogonal rays, and the resulting transmitted intensity
will be averaged.
3. Always check to verify the ray data is valid (check ``error``)
before using the rest of the data in the tuple.
4. Use of ``zGetPolTrace()`` has significant overhead as only one
ray per DDE call is traced. Please refer to the Zemax manual for
more details.
See Also
--------
zGetPolTraceDirect(), zGetTrace(), zGetTraceDirect()
"""
args1 = "{wN:d},{m:d},{s:d},".format(wN=waveNum,m=mode,s=surf)
args2 = "{hx:1.4f},{hy:1.4f},".format(hx=hx,hy=hy)
args3 = "{px:1.4f},{py:1.4f},".format(px=px,py=py)
args4 = "{Ex:1.4f},{Ey:1.4f},".format(Ex=Ex,Ey=Ey)
args5 = "{Phax:1.4f},{Phay:1.4f}".format(Phax=Phax,Phay=Phay)
cmd = "GetPolTrace," + args1 + args2 + args3 + args4 + args5
reply = self._sendDDEcommand(cmd)
rs = reply.split(',')
polRayTraceData = [int(elem) if i==0 else float(elem)
for i,elem in enumerate(rs)]
rtd = _co.namedtuple('polRayTraceData', ['error', 'intensity',
'Exr', 'Eyr', 'Ezr',
'Exi', 'Eyi', 'Ezi'])
polRayTraceData = rtd._make(polRayTraceData)
return polRayTraceData
def zGetPolTraceDirect(self, waveNum, mode, startSurf, stopSurf,
x, y, z, l, m, n, Ex, Ey, Phax, Phay):
"""Trace a single polarized ray defined by the ``x``, ``y``,
``z``, ``l``, ``m`` and ``n`` coordinates on any starting
surface as well as electric field magnitude and relative phase.
If ``Ex``, ``Ey``, ``Phax``, ``Phay`` are all zero, Zemax will
trace two orthogonal rays and the resulting transmitted intensity
will be averaged.
Parameters
----------
waveNum : integer
wavelength number as in the wavelength data editor
mode : integer (0/1)
0 = real, 1 = paraxial
startSurf : integer
surface to trace the ray from.
stopSurf : integer
last surface to trace the polarized ray to.
x, y, z : floats
coordinates of the ray at the starting surface
l, m, n : floats
the direction cosines to the entrance pupil aim point for the
x-, y-, z- direction cosines respectively
Ex : float
normalized electric field magnitude in x direction
Ey : float
normalized electric field magnitude in y direction
Phax : float
relative phase in x direction in degrees
Phay : float
relative phase in y direction in degrees
Returns
-------
error : integer
0, if the ray traced successfully;
+ve number indicates ray missed the surface
-ve number indicates ray total internal reflected (TIR)
at the surface given by the absolute value of the ``error``
intensity : float
the transmitted intensity of the ray, normalized to an input
electric field intensity of unity. The transmitted intensity
accounts for surface, thin film, and bulk absorption effects,
but does not consider whether or not the ray was vignetted.
Exr,Eyr,Ezr : float
real parts of the electric field components
Exi,Eyi,Ezi : float
imaginary parts of electric field components
For unploarized rays, only the ``error`` and ``intensity`` are
relevant.
Notes
-----
Refer to the notes (notes-GetPolTrace_) of ``zGetPolTrace()``
See Also
--------
zGetPolTraceDirect(), zGetTrace(), zGetTraceDirect()
"""
args0 = "{wN:d},{m:d},".format(wN=waveNum,m=mode)
args1 = "{sa:d},{sd:d},".format(sa=startSurf,sd=stopSurf)
args2 = "{x:1.20g},{y:1.20g},{z:1.20g},".format(x=x,y=y,z=z)
args3 = "{l:1.20g},{m:1.20g},{n:1.20g},".format(l=l,m=m,n=n)
args4 = "{Ex:1.4f},{Ey:1.4f},".format(Ex=Ex,Ey=Ey)
args5 = "{Phax:1.4f},{Phay:1.4f}".format(Phax=Phax,Phay=Phay)
cmd = ("GetPolTraceDirect," + args0 + args1 + args2 + args3
+ args4 + args5)
reply = self._sendDDEcommand(cmd)
rs = reply.split(',')
polRayTraceData = [int(elem) if i==0 else float(elem)
for i,elem in enumerate(rs)]
rtd = _co.namedtuple('polRayTraceData', ['error', 'intensity',
'Exr', 'Eyr', 'Ezr',
'Exi', 'Eyi', 'Ezi'])
polRayTraceData = rtd._make(polRayTraceData)
return polRayTraceData
def zGetPupil(self):
"""Return the pupil data such as aperture type, ENPD, EXPD, etc.
Parameters
----------
None
Returns
-------
aType : integer
the system aperture defined as follows:
* 0 = entrance pupil diameter
* 1 = image space F/#
* 2 = object space NA
* 3 = float by stop
* 4 = paraxial working F/#
* 5 = object cone angle
value : float
the ``value`` is "stop surface semi-diameter" if
``aperture type == float by stop`` else ``value`` is the
"sytem aperture"
ENPD : float
entrance pupil diameter (in lens units)
ENPP : float
entrance pupil position from the first surface (in lens units)
EXPD : float
exit pupil diameter (in lens units)
EXPP : float
exit pupil position from the image plane (in lens units)
apodization_type : integer
the apodization type is indicated as follows:
* 0 = none
* 1 = Gaussian
* 2 = Tangential/Cosine cubed
apodization_factor : float
number shown on general data dialog box
"""
pupild = _co.namedtuple('PupilData', ['aType', 'value', 'ENPD',
'ENPP', 'EXPD', 'EXPP',
'apoType', 'apoFactor'])
reply = self._sendDDEcommand('GetPupil')
rs = reply.split(',')
pupilData = pupild._make([int(elem) if (i==0 or i==6)
else float(elem) for i, elem in enumerate(rs)])
return pupilData
def zGetRefresh(self):
"""Copy lens data from the LDE into the Zemax server
The lens is updated and Zemax re-computes all data.
Parameters
----------
None
Returns
-------
status : integer (0, -1, or -998)
0 if successful;
-1 if Zemax could not copy the lens data LDE to the server;
-998 if the command times out (Note MZDDE returns -2)
Notes
-----
If ``zGetRefresh()`` returns -1, no ray tracing can be performed.
See Also
--------
zGetUpdate(), zPushLens()
"""
reply = None
reply = self._sendDDEcommand('GetRefresh')
if reply:
return int(reply) #Note: Zemax returns -1 if GetRefresh fails.
else:
return -998
def zGetSag(self, surfNum, x, y):
"""Return the sag of the surface at coordinates (x,y) in lens units
Parameters
----------
surfNum : integer
surface number
x : float
x coordinate in lens units
y : float
y coordinate in lens units
Returns
-------
sag : float
sag of the surface at (x,y) in lens units
alternateSag : float
alternate sag
"""
cmd = "GetSag,{:d},{:1.20g},{:1.20g}".format(surfNum,x,y)
reply = self._sendDDEcommand(cmd)
sagData = reply.rsplit(",")
return (float(sagData[0]),float(sagData[1]))
def zGetSequence(self):
"""Returns the sequence numbers of the lens in the server and in
the LDE
Parameters
----------
None
Returns
-------
seqNum_lenServ : float
sequence number of lens in server
seqNum_lenLDE : float
sequence number of lens in LDE
"""
reply = self._sendDDEcommand("GetSequence")
seqNum = reply.rsplit(",")
return (float(seqNum[0]),float(seqNum[1]))
def zGetSerial(self):
"""Get the serial number of the running Zemax application
Parameters
----------
None
Returns
-------
serial number : integer
serial number
"""
reply = self._sendDDEcommand('GetSerial')
return int(reply.rstrip())
def zGetSettingsData(self, tempFile, number):
"""Returns the settings data used by a window
The data must have been previously stored by a call to
``zSetSettingsData()`` or by a previous execution of the client
program.
Parameters
----------
tempfile : string
the name of the output file passed by Zemax to the client.
Zemax uses this name to identify for the window for which the
``zGetSettingsData()`` request is for.
number : integer
the data number used by the previous ``zSetSettingsData()``
call. Currently, only ``number=0`` is supported.
Returns
-------
settingsData : string
data saved by a previous ``zSetSettingsData()`` call for the
``window`` and ``number``.
See Also
--------
zSetSettingsData()
"""
cmd = "GetSettingsData,{},{:d}".format(tempFile,number)
reply = self._sendDDEcommand(cmd)
return str(reply.rstrip())
def zGetSolve(self, surfNum, code):
"""Returns data about solves and/or pickups on the surface
Parameters
----------
surfNum : integer
surface number
code : integer
indicating the surface parameter, such as curvature, thickness,
glass, conic, semi-diameter, etc. (Refer to the table
surf_param_codes_for_getsolve_ or use the surface
parameter mnemonic codes with signature `ln.SOLVE_SPAR_XXX`, e.g.
`ln.SOLVE_SPAR_CURV`, `ln.SOLVE_SPAR_THICK`, etc. The `SPAR`
stands for surface parameter.
Returns
-------
solveData : tuple
tuple is depending on the code value according to the table;
returns -1 if error occurs
Examples
--------
>>> solvetype, param1, param2, param3, pickup = ln.zGetSolve(3, ln.SOLVE_SPAR_THICK)
In the above example, since the solve is on Thickness (code=ln.SOLVE_SPAR_THICK),
if the `solvetype` is "Position" (7), then `param1` is "From Surface",
`param2` is "Length", and `param3` and `pickup` are un-specified. So, a typical
output could be `(7, 3.0, 0.0, 0.0, 0)`. Instead of "Position", if the `solvetype`
is "Pickup" (5), then `param1` is "From Surface", `param2` is "Scale Factor",
`param3` is "Offset", and `pickup` is "Pickup column"
Notes
-----
.. _surf_param_codes_for_getsolve:
::
Table : Surface parameter codes for zGetsolve() and zSetSolve()
------------------------------------------------------------------------------
code - Datum set/get by zGetSolve()/zSetSolve()
------------------------------------------------------------------------------
0 (curvature) - solvetype, param1, param2, pickupcolumn
1 (thickness) - solvetype, param1, param2, param3, pickupcolumn
2 (glass) - solvetype (for solvetype = 0);
solvetype, Index, Abbe, Dpgf (for solvetype = 1, model glass);
solvetype, pickupsurf (for solvetype = 2, pickup);
solvetype, index_offset, abbe_offset (for solvetype = 4, offset);
solvetype (for solvetype=all other values)
3 (semi-diameter) - solvetype, pickupsurf, pickupcolumn
4 (conic) - solvetype, pickupsurf, pickupcolumn
5-16 (param 1-12) - solvetype, pickupsurf, offset, scalefactor, pickupcolumn
17 (parameter 0) - solvetype, pickupsurf, offset, scalefactor, pickupcolumn
1001+ (extra - solvetype, pickupsurf, scalefactor, offset, pickupcolumn
data values 1+)
end-of-table
The ``solvetype`` is an integer code, & the parameters have
meanings that depend upon the solve type; see the chapter
"SOLVES" in the Zemax manual for details.
See Also
--------
zSetSolve(), zGetNSCSolve(), zSetNSCSolve()
"""
cmd = "GetSolve,{:d},{:d}".format(surfNum,code)
reply = self._sendDDEcommand(cmd)
solveData = _process_get_set_Solve(reply)
return solveData
def zGetSurfaceData(self, surfNum, code, arg2=None):
"""Gets surface data on a sequential lens surface.
Parameters
----------
surfNum : integer
the surface number
code : integer
integer code (see table surf_data_codes_ below). You may also
use the surface data mnemonic codes with signature ln.SDAT_XXX,
e.g. ln.SDAT_TYPE, ln.SDAT_CURV, ln.SDAT_THICK, etc
arg2 : integer, optional
required for item ``codes`` above 70.
Returns
-------
surface_data : string or numeric
the returned data depends on the ``code``. Refer to the table
surf_data_codes_ for details.
Notes
-----
.. _surf_data_codes:
::
Table : Surface data codes for getter and setter of SurfaceData
---------------------------------------------------------------
Code - Datum set/get by zSetSurfaceData()/zGetSurfaceData()
---------------------------------------------------------------
0 - Surface type name (string)
1 - Comment (string)
2 - Curvature (numeric)
3 - Thickness (numeric)
4 - Glass (string)
5 - Semi-Diameter (numeric)
6 - Conic (numeric)
7 - Coating (string)
8 - Thermal Coefficient of Expansion (TCE)
9 - User-defined .dll (string)
20 - Ignore surface flag. 0 for not ignored; 1 for ignored
51 - Before tilt and decenter order; 0 for Decenter
then Tilt; 1 for Tilt then Decenter
52 - Before decenter x
53 - Before decenter y
54 - Before tilt x
55 - Before tilt y
56 - Before tilt z
60 - After status. 0 for explicit; 1 for pickup current surface;
2 for reverse current surface; 3 for pickup previous surface;
4 for reverse previous surface, etc.
61 - After tilt and decenter order; 0 for Decenter
then Tilt, 1 for Tilt then Decenter
62 - After decenter x
63 - After decenter y
64 - After tilt x
65 - After tilt y
66 - After tilt z
70 - Use Layer Multipliers and Index Offsets. Use 1 for
true, 0 for false.
71 - Layer Multiplier value. The coating layer number is
defined by ``arg2``
72 - Layer Multiplier status. Use 0 for fixed; 1 for
variable; or n+1 for pickup from layer n. The coating
layer number is defined by ``arg2``
73 - Layer Index Offset value. The coating layer number is
defined by ``arg2``
74 - Layer Index Offset status. Use 0 for fixed; 1 for
variable, or n+1 for pickup from layer n. The coating
layer number is defined by ``arg2``
75 - Layer Extinction Offset value. The coating layer
number is defined by ``arg2``
76 - Layer Extinction Offset status. Use 0 for fixed; 1 for
variable, or n+1 for pickup from layer n. The coating
layer number is defined by ``arg2``
Other - Reserved for future expansion of this feature.
end-of-table
See Also
--------
zSetSurfaceData(), zGetSurfaceParameter()
"""
if arg2 is None:
cmd = "GetSurfaceData,{sN:d},{c:d}".format(sN=surfNum,c=code)
else:
cmd = "GetSurfaceData,{sN:d},{c:d},{a:d}".format(sN=surfNum,
c=code,a=arg2)
reply = self._sendDDEcommand(cmd)
if code in (0,1,4,7,9):
surfaceDatum = reply.rstrip()
else:
surfaceDatum = float(reply)
return surfaceDatum
def zGetSurfaceDLL(self, surfNum):
"""Return the name of the DLL if the surface is a user defined type
Parameters
----------
surfNum : integer
surface number of the user defined surface
Returns
-------
dllName : string
The name of the defining DLL
surfaceName : string
surface name displayed by the DLL in the surface type column of
the LDE
"""
cmd = "GetSurfaceDLL,{sN:d}".format(surfNum)
reply = self._sendDDEcommand(cmd)
rs = reply.split(',')
return (rs[0],rs[1])
def zGetSurfaceParameter(self, surfNum, param):
"""Return the surface parameter data for the surface associated
with the given surface number `surfNum`
Parameters
----------
surfNum : integer
surface number of the surface
param : integer
parameter number ('Par' in LDE) being queried
Returns
--------
paramData : float
the parameter value
See Also
--------
zGetSurfaceData() :
To get thickness, radius, glass, semi-diameter, conic, etc,
zSetSurfaceParameter()
"""
cmd = ("GetSurfaceParameter,{sN:d},{p:d}"
.format(sN=surfNum,p=param))
reply = self._sendDDEcommand(cmd)
return float(reply)
def zGetSystem(self):
"""Returns a number of general system data (General Lens Data)
Parameters
----------
None
Returns
-------
numSurfs : integer
number of surfaces
unitCode : integer
lens units code (0, 1, 2, or 3 for mm, cm, in, or M)
stopSurf : integer
the stop surface number
nonAxialFlag : integer
flag to indicate if system is non-axial symmetric (0 for axial,
1 if not axial);
rayAimingType : integer
ray aiming type (0, 1, or 2 for off, paraxial or real)
adjustIndex : integer
adjust index data to environment flag (0 if false, 1 if true)
temp : float
the current temperature
pressure : float
the current pressure
globalRefSurf : integer
the global coordinate reference surface number
Notes
-----
The returned data structure is same as the data structure returned
by the ``zSetSystem()`` method
See Also
--------
zGetFirst() :
to get first order lens data such as EFL, F/#, etc.
zSetSystem(), zGetSystemProperty(), zGetSystemAper(),
zGetAperture(), zSetAperture()
"""
sdt = _co.namedtuple('systemData' , ['numSurf', 'unitCode',
'stopSurf', 'nonAxialFlag',
'rayAimingType', 'adjustIndex',
'temp', 'pressure',
'globalRefSurf'])
reply = self._sendDDEcommand("GetSystem")
rs = reply.split(',')
systemData = sdt._make([float(elem) if (i==6) else int(float(elem))
for i,elem in enumerate(rs)])
return systemData
def zGetSystemAper(self):
"""Gets system aperture data -- aperture type, stopSurf and value.
Returns
-------
aType : integer
indicating the system aperture as follows:
| 0 = entrance pupil diameter (EPD)
| 1 = image space F/# (IF/#)
| 2 = object space NA (ONA)
| 3 = float by stop (FBS)
| 4 = paraxial working F/# (PWF/#)
| 5 = object cone angle (OCA)
stopSurf : integer
stop surface
value : float
if aperture type is "float by stop" value is stop surface
semi-diameter else value is the sytem aperture
Notes
-----
The returned tuple is the same as the returned tuple of
``zSetSystemAper()``
See Also
--------
zGetSystem(), zSetSystemAper()
"""
sad = _co.namedtuple('systemAper', ['apertureType', 'stopSurf', 'value'])
reply = self._sendDDEcommand("GetSystemAper")
rs = reply.split(',')
systemAperData = sad._make([float(elem) if i==2 else int(float(elem))
for i, elem in enumerate(rs)])
return systemAperData
def zGetSystemProperty(self, code):
"""Returns properties of the system, such as system aperture, field,
wavelength, and other data, based on the integer ``code`` passed.
This function mimics the ZPL function ``SYPR``.
Parameters
----------
code : integer
value that defines the specific system property. (see the table
system_property_codes_ below).
Returns
-------
sysPropData : string or numeric
Returned system property data
Notes
-----
.. _system_property_codes
::
Table : System property codes
---------------------------------------------------------------
Code set/get by zSetSystemProperty()/zGetSystemProperty()
---------------------------------------------------------------
4 - Adjust Index Data To Environment (0:off, 1:on)
10 - Aperture Type code. (0:EPD, 1:IF/#, 2:ONA, 3:FBS,
4:PWF/#, 5:OCA)
11 - Aperture Value (stop surface semi-diameter if aperture
type is FBS, else system aperture)
12 - Apodization Type code. (0:uniform, 1:Gaussian, 2:cosine
cubed)
13 - Apodization Factor
14 - Telecentric Object Space (0:off, 1:on)
15 - Iterate Solves When Updating (0:off, 1:on)
16 - Lens Title
17 - Lens Notes
18 - Afocal Image Space (0:off or "focal mode", 1:on or
"afocal mode")
21 - Global coordinate reference surface
23 - Glass catalog list (Use a string or string variable
with the glass catalog name, such as "SCHOTT". To
specify multiple catalogs use a single string or string
variable containing names separated by spaces, such as
"SCHOTT HOYA OHARA".)
24 - System Temperature in degrees Celsius.
25 - System Pressure in atmospheres.
26 - Reference OPD method. (0:absolute, 1:infinity, 2:exit
pupil, 3:absolute 2.)
30 - Lens Unit code (0:mm, 1:cm, 2:inches, 3:Meters)
31 - Source Unit Prefix (0:Femto, 1:Pico, 2:Nano, 3:Micro,
4:Milli, 5:None, 6:Kilo, 7:Mega, 8:Giga, 9:Tera)
32 - Source Units. (0:Watts, 1:Lumens, 2:Joules)
33 - Analysis Unit Prefix (0:Femto, 1:Pico, 2:Nano, 3:Micro,
4:Milli, 5:None, 6:Kilo, 7:Mega, 8:Giga, 9:Tera)
34 - Analysis Units "per" Area. (0:mm^2, 1:cm^2, 2:inches^2,
3:Meters^2, 4:feet^2)
35 - MTF Units code. (0:cycles per millimeter, 1:cycles per
milliradian.
40 - Coating File name.
41 - Scatter Profile name.
42 - ABg Data File name.
43 - GRADIUM Profile name.
50 - NSC Maximum Intersections Per Ray.
51 - NSC Maximum Segments Per Ray.
52 - NSC Maximum Nested/Touching Objects.
53 - NSC Minimum Relative Ray Intensity.
54 - NSC Minimum Absolute Ray Intensity.
55 - NSC Glue Distance In Lens Units.
56 - NSC Missed Ray Draw Distance In Lens Units.
57 - NSC Retrace Source Rays Upon File Open. (0:no, 1:yes)
58 - NSC Maximum Source File Rays In Memory.
59 - Simple Ray Splitting. (0:no, 1:yes)
60 - Polarization Jx.
61 - Polarization Jy.
62 - Polarization X-Phase.
63 - Polarization Y-Phase.
64 - Convert thin film phase to ray equivalent (0:no, 1:yes)
65 - Unpolarized. (0:no, 1:yes)
66 - Method. (0:X-axis, 1:Y-axis, 2:Z-axis)
70 - Ray Aiming. (0:off, 1:on (paraxial), 2:aberrated (real))
71 - Ray aiming pupil shift x.
72 - Ray aiming pupil shift y.
73 - Ray aiming pupil shift z.
74 - Use Ray Aiming Cache. (0:no, 1:yes)
75 - Robust Ray Aiming. (0:no, 1:yes)
76 - Scale Pupil Shift Factors By Field. (0:no, 1:yes)
77 - Ray aiming pupil compress x.
78 - Ray aiming pupil compress y.
100 - Field type code. (0=angl, 1=obj ht, 2=parx img ht,
3=rel img ht)
101 - Number of fields.
102,103 - The field number is value1, value2 is the field x,
y coordinate
104 - The field number is value1, value2 is the field weight
105,106 - The field number is value1, value2 is the field
vignetting decenter x, decenter y
107,108 - The field number is value1, value2 is the field
vignetting compression x, compression y
109 - The field number is value1, value2 is the field
vignetting angle
110 - The field normalization method, value 1 is 0 for radial
and 1 for rectangular
200 - Primary wavelength number.
201 - Number of wavelengths
202 - The wavelength number is value1, value 2 is the
wavelength in micrometers.
203 - The wavelength number is value1, value 2 is the
wavelength weight
901 - The number of CPU's to use in multi-threaded
computations, such as optimization. (0=default). See
the manual for details.
NOTE: Currently Zemax returns just "0" for the codes: 102,103,
104,105, 106,107,108,109, and 110. This is unexpected!
So, PyZDDE will return the reply (string) as-is for the
user to handle.
end-of-table
See Also
--------
zSetSystemProperty(), zGetFirst()
"""
cmd = "GetSystemProperty,{c}".format(c=code)
reply = self._sendDDEcommand(cmd)
sysPropData = _process_get_set_SystemProperty(code,reply)
return sysPropData
def zGetTextFile(self, textFileName, analysisType, settingsFile=None,
flag=0, timeout=None):
"""Generate a text file for any analysis that supports text output.
Parameters
----------
textFileName : string
name of the file to be created including the full path and
extension.
analysisType : string
3 letter case-sensitive label that indicates the type of the
analysis to be performed. They are identical to the button
codes. If no label is provided or recognized, a standard
raytrace will be generated
settingsFile : string
If ``settingsFile`` is valid, Zemax will use or save the
settings used to compute the text file, depending upon the
value of the flag parameter
flag : integer (0, 1, or 2)
0 = default settings used for the text;
1 = settings provided in the settings file, if valid, else
default;
2 = settings provided in the settings file, if valid, will be
used and the settings box for the requested feature will
be displayed. After the user makes any changes to the
settings the text will then be generated using the new
settings. Please see the manual for more details
timeout : integer, optional
timeout in seconds (default=None, i.e. default timeout value)
Returns
-------
retVal : integer
0 = success;
-1 = text file could not be saved (Zemax may not have received
a full path name or extention);
-998 = command timed out
Notes
-----
No matter what the flag value is, if a valid file name is provided
for ``settingsFile``, the settings used will be written to the
settings file, overwriting any data in the file.
See Also
--------
zGetMetaFile(), zOpenWindow()
"""
retVal = -1
if settingsFile:
settingsFile = settingsFile
else:
settingsFile = ''
# Check if the file path is valid and has extension
if _os.path.isabs(textFileName) and _os.path.splitext(textFileName)[1]!='':
cmd = 'GetTextFile,"{tF}",{aT},"{sF}",{fl:d}'.format(tF=textFileName,
aT=analysisType,sF=settingsFile,fl=flag)
reply = self._sendDDEcommand(cmd, timeout)
if 'OK' in reply.split():
retVal = 0
return retVal
def zGetTol(self, operNum):
"""Returns the tolerance data
Parameters
----------
operNum : integer
0 or the tolerance operand number (row number in the tolerance
editor, when greater than 0)
Returns
-------
toleranceData : single number or a 6-tuple
It is a number or a 6-tuple, depending upon ``operNum``
as follows:
* if ``operNum==0``, then toleranceData = number where
``number`` is the number of tolerance operands defined.
* if ``operNum > 0``, then toleranceData =
(tolType, int1, int2, min, max, int3)
See Also
--------
zSetTol(), zSetTolRow()
"""
reply = self._sendDDEcommand("GetTol,{:d}".format(operNum))
if operNum == 0:
toleranceData = int(float(reply.rstrip()))
if toleranceData == 1:
reply = self._sendDDEcommand("GetTol,1")
tolType = reply.rsplit(",")[0]
if tolType == 'TOFF': # the tol editor is actually empty
toleranceData = 0
else:
toleranceData = _process_get_set_Tol(operNum,reply)
return toleranceData
def zGetTrace(self, waveNum, mode, surf, hx, hy, px, py):
"""Trace a ray defined by its normalized field and pupil heights
as well as wavelength through the lens in the Zemax DDE server
Parameters
----------
waveNum : integer
wavelength number as in the wavelength data editor
mode : integer
0 = real; 1 = paraxial
surf : integer
surface to trace the ray to. Usually, the ray data is only
needed at the image surface; setting the surface number to
-1 will yield data at the image surface.
hx : real
normalized field height along x axis
hy : real
normalized field height along y axis
px : real
normalized height in pupil coordinate along x axis
py : real
normalized height in pupil coordinate along y axis
Returns
-------
error : integer
0 = ray traced successfully;
+ve number = the ray missed the surface;
-ve number = the ray total internal reflected (TIR) at surface
given by the absolute value of the ``error``
vig : integer
the first surface where the ray was vignetted. Unless an error
occurs at that surface or subsequent to that surface, the ray
will continue to trace to the requested surface.
x, y, z : reals
coordinates of the ray on the requested surface
l, m, n : reals
the direction cosines after refraction into the media following
the requested surface.
l2, m2, n2 : reals
the surface intercept direction normals at requested surface
intensity : real
the relative transmitted intensity of the ray, including any
pupil or surface apodization defined.
Examples
--------
To trace the real chief ray to surface 5 for wavelength 3:
>>> rayTraceData = ln.zGetTrace(3,0,5,0.0,1.0,0.0,0.0)
>>> error,vig,x,y,z,l,m,n,l2,m2,n2,intensity = rayTraceData
Notes
-----
1. Always check to verify the ray data is valid ``error`` before
using the rest of the returned parameters
2. Use of ``zGetTrace()`` has significant overhead as only 1 ray
per DDE call is traced. Use ``arraytrace.zGetTraceArray()`` for
tracing large number of rays.
See Also
--------
arraytrace.zGetTraceArray(), zGetTraceDirect(), zGetPolTrace(),
zGetPolTraceDirect()
"""
args1 = "{wN:d},{m:d},{s:d},".format(wN=waveNum,m=mode,s=surf)
args2 = "{hx:1.4f},{hy:1.4f},".format(hx=hx,hy=hy)
args3 = "{px:1.4f},{py:1.4f}".format(px=px,py=py)
cmd = "GetTrace," + args1 + args2 + args3
reply = self._sendDDEcommand(cmd)
rs = reply.split(',')
rayData = [int(elem) if (i==0 or i==1)
else float(elem) for i,elem in enumerate(rs)]
rtd = _co.namedtuple('rayTraceData', ['error', 'vig', 'x', 'y', 'z',
'dcos_l', 'dcos_m', 'dcos_n',
'dnorm_l2', 'dnorm_m2', 'dnorm_n2',
'intensity'])
rayTraceData = rtd._make(rayData)
return rayTraceData
def zGetTraceDirect(self, waveNum, mode, startSurf, stopSurf, x, y, z, l, m, n):
"""Trace a (single) ray defined by ``x``, ``y``, ``z``, ``l``,
``m`` and ``n`` coordinates on any starting surface as well as
wavelength number, mode and the surface to.
``zGetTraceDirect`` provides a more direct access to the Zemax
ray tracing engine than ``zGetTrace``.
Parameters
----------
waveNum : integer
wavelength number as in the wavelength data editor
mode : integer
0 = real, 1 = paraxial
startSurf : integer
starting surface of the ray
stopSurf : integer
stopping surface of the ray
x, y, z, : floats
coordinates of the ray at the starting surface
l, m, n : floats
direction cosines to the entrance pupil aim point for the x-,
y-, z- direction cosines respectively
Returns
-------
error : integer
0 = ray traced successfully;
+ve number = the ray missed the surface;
-ve number = the ray total internal reflected (TIR) at surface
given by the absolute value of the ``error``
vig : integer
the first surface where the ray was vignetted. Unless an error
occurs at that surface or subsequent to that surface, the ray
will continue to trace to the requested surface.
x, y, z : reals
coordinates of the ray on the requested surface
l, m, n : reals
the direction cosines after refraction into the media following
the requested surface.
l2, m2, n2 : reals
the surface intercept direction normals at requested surface
intensity : real
the relative transmitted intensity of the ray, including any
pupil or surface apodization defined.
Notes
-----
Normally, rays are defined by the normalized field and pupil
coordinates hx, hy, px, and py. Zemax takes these normalized
coordinates and computes the object coordinates (x, y, and z) and
the direction cosines to the entrance pupil aim point (l, m, and n;
for the x-, y-, and z-direction cosines, respectively). However,
there are times when it is more appropriate to trace rays by direct
specification of x, y, z, l, m, and n. The direct specification has
the added flexibility of defining the starting surface for the ray
anywhere in the optical system.
"""
args1 = "{wN:d},{m:d},".format(wN=waveNum,m=mode)
args2 = "{sa:d},{sp:d},".format(sa=startSurf,sp=stopSurf)
args3 = "{x:1.20f},{y:1.20f},{z:1.20f},".format(x=x,y=y,z=z)
args4 = "{l:1.20f},{m:1.20f},{n:1.20f}".format(l=l,m=m,n=n)
cmd = "GetTraceDirect," + args1 + args2 + args3 + args4
reply = self._sendDDEcommand(cmd)
rs = reply.split(',')
rtd = _co.namedtuple('rayTraceData', ['err', 'vig', 'x', 'y', 'z',
'dcos_l', 'dcos_m', 'dcos_n',
'dnorm_l2', 'dnorm_m2', 'dnorm_n2',
'intensity'])
rayTraceData = rtd._make([int(elem) if (i==0 or i==1)
else float(elem) for i,elem in enumerate(rs)])
return rayTraceData
def zGetUDOSystem(self, bufferCode):
"""Load a particular lens from the optimization function memory
into the Zemax server's memory. This will cause Zemax to retrieve
the correct lens from system memory, and all subsequent DDE calls
will be for actions (such as ray tracing) on this lens. The only
time this item name should be used is when implementing a User
Defined Operand, or UDO.
Parameters
----------
bufferCode: integer
The ``buffercode`` is an integer value provided by Zemax to the
client that uniquely identifies the correct lens.
Returns
------
?
Notes
-----
Once the data is computed, up to 1001 values may be sent back to
the server, and ultimately to the optimizer within Zemax, with the
``zSetUDOItem()`` function.
See Also
--------
zSetUDOItem()
"""
cmd = "GetUDOSystem,{:d}".format(bufferCode)
reply = self._sendDDEcommand(cmd)
return _regressLiteralType(reply.rstrip())
# FIX !!! At this time, I am not sure what is the expected return.
def zGetUpdate(self):
"""Update the lens. Zemax recomputes all pupil positions, solves,
and index data.
Parameters
----------
None
Returns
-------
status : integer
0 = Zemax successfully updated the lens;
-1 = No raytrace performed;
-998 = Command timed out
Notes
-----
To update the merit function, use the ``zOptimize()`` with the
number of cycles set to -1.
See Also
--------
zGetRefresh(), zOptimize(), zPushLens()
"""
status,ret = -998, None
ret = self._sendDDEcommand("GetUpdate")
if ret != None:
status = int(ret) #Note: Zemax returns -1 if GetUpdate fails.
return status
def zGetVersion(self):
"""Get the version of Zemax
Parameters
----------
None
Returns
-------
version : integer
The application release date in YY-MM-DD format
"""
return int(self._sendDDEcommand("GetVersion"))
def zGetWave(self, n):
"""Get wavelength data
There are 2 ways of using this function:
* `zGetWave(0)-> waveData` Or
* `zGetWave(wavelengthNumber)-> waveData`
Returns
-------
if n==0 : 2-tuple
* primary : (integer) number indicating the primary wavelength
* number : (integer) number_of_wavelengths currently defined
if 0 < n <= number_of_wavelengths : 2-tuple
* wavelength : (float) value of the specific wavelength (in micrometers)
* weight : (float) weight of the specific wavelength
Notes
-----
1. The returned tuple is exactly same in structure and contents to that
returned by ``zSetWave()``.
2. Wavelength data are always measured in micrometers referenced to "air"
at the system temperature and pressure.
See Also
--------
zSetWave(), zSetWaveTuple(), zGetWaveTuple(), zGetPrimaryWave()
"""
reply = self._sendDDEcommand('GetWave,' + str(n))
rs = reply.split(',')
if n:
wtd = _co.namedtuple('waveData', ['wavelength', 'weight'])
waveData = wtd._make([float(ele) for ele in rs])
else:
wtd = _co.namedtuple('waveData', ['primaryWavelengthNum',
'numberOfWavelengths'])
waveData = wtd._make([int(ele) for ele in rs])
return waveData
def zHammer(self, numOfCycles, algorithm, timeout=60):
"""Calls the Hammer optimizer
Parameters
---------
numOfCycles : integer
the number of cycles to run. If ``numOfCycles < 1``, no
optimization is performed, but Zemax updates all operands in
the MFE and returns the merit-function
algorithm : integer
0 = Damped Least Squares; 1 = Orthogonal descent
timeout : integer
timeout in seconds (default = 60 seconds)
Returns
-------
finalMeritFn : float
the final merit function.
Notes
-----
1. A returned value of 9.0E+009 indicates that the optimization
failed. This is usually because the lens or merit function could
not be evaluated.
2. The number of cycles should be kept small enough to allow the
algorithm to complete and return before the DDE communication
times out, or an error will occur. One possible way to achieve
high number of cycles could be to call ``zHammer()`` multiple
times in a loop, each time comparing the returned merit function
with few of the previously returned (& stored) merit functions
to determine if an optimum has been attained.
See Also
--------
zOptimize(), zLoadMerit(), zsaveMerit()
"""
cmd = "Hammer,{:1.2g},{:d}".format(numOfCycles, algorithm)
reply = self._sendDDEcommand(cmd, timeout)
return float(reply.rstrip())
def zImportExtraData(self, surfNum, fileName):
"""Imports extra data and grid surface data values into an existing
surface.
Parameters
----------
surfNum : integer
surface number
fileName : string
file name (of an ASCII file)
Returns
-------
errorCode : integer
0 if 'OK', -1 if error
Notes
-----
The ASCII file should have .DAT extension for sequential objects. This
is generally used to specifiy uniform array sag/phase data for
Grid Sag / Grid Phase surfaces.
"""
cmd = "ImportExtraData,{:d},{}".format(surfNum, fileName)
reply = self._sendDDEcommand(cmd)
if 'OK' in reply.rstrip():
return 0
else:
return -1
def zInsertConfig(self, configNum):
"""Insert a new configuration (column) in the multi-configuration
editor.
The new configuration will be placed at the location (column)
indicated by the parameter ``configNum``
Parameters
----------
configNum : integer
the configuration (column) number to insert.
Returns
-------
configCol : integer
the column number of the configuration that inserted at
``configNum``
Notes
-----
1. The ``configNum`` returned (configCol) is generally different
from the number in the input ``configNum``.
2. Use ``zInsertMCO()`` to insert a new multi-configuration operand
in the multi-configuration editor.
3. Use ``zSetConfig()`` to switch the current configuration number
See Also
--------
zDeleteConfig()
"""
return int(self._sendDDEcommand("InsertConfig,{:d}".format(configNum)))
def zInsertMCO(self, operNum):
"""Insert a new multi-configuration operand (row) in the multi-
configuration editor.
Parameters
----------
operNum : integer
number between 1 and the current number of operands plus 1,
inclusive.
Returns
-------
numOper : integer
new number of operands (rows)
See Also
--------
zInsertConfig() :
to insert a new configuration (row)
zDeleteMCO()
"""
return int(self._sendDDEcommand("InsertMCO,{:d}".format(operNum)))
def zInsertMFO(self, operNum):
"""Insert a new optimization operand (row) in the merit function
editor.
Parameters
----------
operNum : integer
number between 1 and the current number of operands plus 1,
inclusive.
Returns
-------
numOper : integer
new number of operands (rows).
See Also
--------
zSetOperand() :
Generally, you may want to use ``zSetOperand()`` afterwards.
zDeleteMFO()
"""
return int(self._sendDDEcommand("InsertMFO,{:d}".format(operNum)))
def zInsertObject(self, surfNum, objNum):
"""Insert a new NSC object at the location indicated by the parameters
``surfNum`` and ``objNum``
Parameters
----------
surfNum : integer
surface number of the NSC group. Use 1 if the program mode is
Non-Sequential
objNum : integer
object number
Returns
-------
status : integer
0 if successful, -1 if failed.
See Also
--------
zSetNSCObjectData() :
to define data for the new surface
zDeleteObject()
"""
cmd = "InsertObject,{:d},{:d}".format(surfNum,objNum)
reply = self._sendDDEcommand(cmd)
if reply.rstrip() == 'BAD COMMAND':
return -1
else:
return int(reply.rstrip())
def zInsertSurface(self, surfNum):
"""Insert a surface at the location indicated by ``surfNum``
Parameters
----------
surfNum : integer
location where to insert the surface
Returns
-------
status : integer
0 if success
See Also
--------
zSetSurfaceData() :
to define data for the new surface
zDeleteSurface()
"""
return int(self._sendDDEcommand("InsertSurface,"+str(surfNum)))
def zLoadDetector(self, surfNum, objNum, fileName):
"""Loads the data saved in a file to an NSC Detector Rectangle,
Detector Color, Detector Polar, or Detector Volume object.
Parameters
----------
surfNum : integer
surface number of the NSC group. Use 1 if the program mode is
Non-Sequential
objNum : integer
object number
fileName : string
the filename may include the full path; if no path is provided
the path of the current lens file is used. The extension should
be DDR, DDC, DDP, or DDV for Detector Rectangle, Color, Polar,
and Volume objects, respectively
Returns
-------
status : integer
0 if load was successful; Error code (such as -1,-2) if failed.
"""
isRightExt = _os.path.splitext(fileName)[1] in ('.ddr','.DDR','.ddc','.DDC',
'.ddp','.DDP','.ddv','.DDV')
if not _os.path.isabs(fileName): # full path is not provided
fileName = self.zGetPath()[0] + fileName
isFile = _os.path.isfile(fileName) # check if file exist
if isRightExt and isFile:
cmd = ("LoadDetector,{:d},{:d},{}"
.format(surfNum,objNum,fileName))
reply = self._sendDDEcommand(cmd)
return _regressLiteralType(reply.rstrip())
else:
return -1
def zLoadFile(self, fileName, append=None):
"""Loads a ZEMAX file into the server
Parameters
----------
filename : string
full path of the ZEMAX file to be loaded.
append : integer, optional
if a non-zero value of append is passed, then the new file is
appended to the current file starting at the surface number
defined by the value appended.
Returns
-------
status : integer
0 = file successfully loaded;
-999 = file could not be loaded (check if the filename pattern is
problematic or check the path);
-998 = the command timed out;
other = the upload failed.
Notes
-----
Filename patterns that are fine:
a. "C:\\ZEMAX\\Samples\\cooke.zmx"
b. "C:\ZEMAX\Samples\cooke.zmx"
c. "C:\\ZEMAX\\My Documents\\Sample\\cooke.zmx" # spaces in file
path is OK.
Problematic filename patterns:
a. "C:\ZEMAX\Samples\Example, cooke.zmx" # a comma (,) in the
filename is problematic.
Examples
--------
>>> lens = "C:\ZEMAX\Samples\cooke.zmx"
>>> ln.zLoadFile(lens)
0
>>> lens = os.path.join(ln.zGetPath()[1], 'Sequential', 'Objectives',
'Cooke 40 degree field.zmx')
>>> ln.zLoadFile(lens)
0
>>> usr = os.path.expandvars("%userprofile%")
>>> zmf = 'Double Gauss 5 degree field.zmx'
>>> lens = os.path.join(usr, 'Documents\Zemax\Samples\Sequential\Objectives', zmf)
0
See Also
--------
zSaveFile(), zGetPath(), zPushLens()
"""
reply = None
isAbsPath = _os.path.isabs(fileName)
isRightExt = _os.path.splitext(fileName)[1] in ('.zmx','.ZMX')
isFile = _os.path.isfile(fileName)
if isAbsPath and isRightExt and isFile:
if append:
cmd = "LoadFile,{},{}".format(fileName,append)
else:
cmd = "LoadFile,{}".format(fileName)
reply = self._sendDDEcommand(cmd)
if reply:
return int(reply) #Note: Zemax returns -999 if update fails.
else:
return -998
else:
return -999
def zLoadMerit(self, fileName):
"""Loads a Zemax .MF or .ZMX file and extracts the merit function
and places it in the lens loaded in the server.
Parameters
----------
fileName : string
name of the merit function file with full path and extension.
Returns
-------
number : integer
number of operands in the merit function
merit : float
merit value of the merit function.
Returns -999 if function failed.
Notes
-----
1. If the merit function value is 9.00e+009, the merit function
cannot be evaluated.
2. Loading a merit function file does not change the data displayed
in the LDE; the server process has a separate copy of the lens
data.
See Also
--------
zOptimize(), zSaveMerit()
"""
isAbsPath = _os.path.isabs(fileName)
isRightExt = _os.path.splitext(fileName)[1] in ('.mf','.MF','.zmx','.ZMX')
isFile = _os.path.isfile(fileName)
if isAbsPath and isRightExt and isFile:
reply = self._sendDDEcommand('LoadMerit,'+fileName)
rs = reply.rsplit(",")
meritData = [int(float(e)) if i==0 else float(e)
for i,e in enumerate(rs)]
return tuple(meritData)
else:
return -999
def zLoadTolerance(self, fileName):
"""Loads a tolerance file previously saved with ``zSaveTolerance``
and places the tolerances in the lens loaded in the DDE server.
Parameters
----------
fileName : string
file name of the tolerance file. If no path is provided in the
filename, the <data>\Tolerance folder is assumed.
Returns
-------
numTolOperands : integer
number of tolerance operands loaded;
-999 if file does not exist
"""
if _os.path.isabs(fileName): # full path is provided
fullFilePathName = fileName
else: # full path not provided
fullFilePathName = self.zGetPath()[0] + "\\Tolerance\\" + fileName
if _os.path.isfile(fullFilePathName):
cmd = "LoadTolerance,{}".format(fileName)
reply = self._sendDDEcommand(cmd)
return int(float(reply.rstrip()))
else:
return -999
def zMakeGraphicWindow(self, fileName, moduleName, winTitle, textFlag,
settingsData=None):
"""Notifies Zemax that graphic data has been written to a file and
may now be displayed as a Zemax child window.
The primary purpose of this function is to implement user defined
features in a client application that look and act like native
Zemax features.
Parameters
----------
fileName : string
the full path and file name to the temporary file that holds
the graphic data. This must be the same name as passed to the
client executable in the command line arguments, if any.
moduleName : string
the full path and executable name of the client program that
created the graphic data.
winTitle : string
the string which defines the title Zemax should place in the
top bar of the window.
textFlag : integer (0 or 1)
1 = the client can also generate a text version of the data.
Since the current data is a graphic display (it must be if
the function is ``zMakeGraphicWindow``) Zemax wants to know
if the "Text" menu option should be available to the user,
or if it should be grayed out;
0 = Zemax will gray out the "Text" menu option and will not
attempt to ask the client to generate a text version of the
data;
settingsData : string
the settings data is a string of values delimited by spaces
which are used by the client to define how the data was
generated. These values are only used by the client, not by
Zemax. The settings data string holds the options and data that
would normally appear in a Zemax "settings" style dialog box.
The settings data should be used to recreate the data if
required. Because the total length of a data item cannot exceed
255 characters, the function ``zSetSettingsData()`` may be used
prior to the call to ``zMakeGraphicWindow()`` to specify the
settings data string rather than including the data as part of
``zMakeGraphicWindow()``. See "How Zemax calls the client" in
the manual for more details on the settings data.
Returns
-------
None
Notes
-----
There are two ways of using this command:
>>> ln.zMakeGraphicWindow(fileName, moduleName, winTitle, textFlag, settingsData)
OR
>>> ln.zSetSettingsData(0, settingsData)
>>> ln.zMakeGraphicWindow(fileName, moduleName, winTitle, textFlag)
Examples
--------
A sample item string might look like the following:
>>> ln.zMakeGraphicWindow('C:\TEMP\ZGF001.TMP',
'C:\ZEMAX\FEATURES\CLIENT.EXE',
'ClientWindow', 1, "0 1 2 12.55")
This call indicates that Zemax should open a graphic window,
display the data stored in the file 'C:\TEMP\ZGF001.TMP', and that
any updates or setting changes can be made by calling the client
module 'C:\ZEMAX\FEATURES\CLIENT.EXE'. This client can generate a
text version of the graphic, and the settings data string (used
only by the client) is "0 1 2 12.55".
"""
if settingsData:
cmd = ("MakeGraphicWindow,{},{},{},{:d},{}"
.format(fileName,moduleName,winTitle,textFlag,settingsData))
else:
cmd = ("MakeGraphicWindow,{},{},{},{:d}"
.format(fileName,moduleName,winTitle,textFlag))
reply = self._sendDDEcommand(cmd)
return str(reply.rstrip())
# FIX !!! What is the appropriate reply?
def zMakeTextWindow(self, fileName, moduleName, winTitle, settingsData=None):
"""Notifies Zemax that text data has been written to a file and may
now be displayed as a Zemax child window.
The primary purpose of this item is to implement user defined
features in a client application, that look and act like native
Zemax features.
Parameters
----------
fileName : string
the full path and file name to the temporary file that holds
the text data. This must be the same name as passed to the
client executable in the command line arguments, if any.
moduleName : string
the full path and executable name of the client program that
created the text data.
winTitle : string
the string which defines the title Zemax should place in the
top bar of the window.
settingsData : string
the settings data is a string of values delimited by spaces
which are used by the client to define how the data was
generated. These values are only used by the client, not by
Zemax. The settings data string holds the options and data that
would normally appear in a Zemax "settings" style dialog box.
The settings data should be used to recreate the data if
required. Because the total length of a data item cannot exceed
255 characters, the function ``zSetSettingsData()`` may be used
prior to the call to ``zMakeTextWindow()`` to specify the
settings data string rather than including the data as part of
``zMakeTextWindow()``. See "How Zemax calls the client" in the
manual for more details on the settings data.
Notes
-----
There are two ways of using this command:
>>> ln.zMakeTextWindow(fileName, moduleName, winTitle, settingsData)
OR
>>> ln.zSetSettingsData(0, settingsData)
>>> ln.zMakeTextWindow(fileName, moduleName, winTitle)
Examples
--------
>>> ln.zMakeTextWindow('C:\TEMP\ZGF002.TMP',
'C:\ZEMAX\FEATURES\CLIENT.EXE',
'ClientWindow',"6 5 4 12.55")
This call indicates that Zemax should open a text window, display
the data stored in the file 'C:\TEMP\ZGF002.TMP', and that any
updates or setting changes can be made by calling the client module
'C:\ZEMAX\FEATURES\CLIENT.EXE'. The settingsdata string (used only
by the client) is "6 5 4 12.55".
"""
if settingsData:
cmd = ("MakeTextWindow,{},{},{},{}"
.format(fileName,moduleName,winTitle,settingsData))
else:
cmd = ("MakeTextWindow,{},{},{}"
.format(fileName,moduleName,winTitle))
reply = self._sendDDEcommand(cmd)
return str(reply.rstrip())
# FIX !!! What is the appropriate reply?
def zModifySettings(self, fileName, mType, value):
"""Change specific options in Zemax configuration files (.CFG)
Settings files are used by Zemax analysis windows. They are also
used by ``zMakeTextWindow()`` and ``zMakeGraphicWindow()``. The
modified settings file is written back to the original settings
file-name.
Parameters
----------
fileName : string
full name of the settings file, including the path & extension
mType : string
a mnemonic that indicates which setting within the file is to
be modified. See the ZPL macro command "MODIFYSETTINGS" in the
Zemax manual for a complete list of the ``mType`` codes
value : string or integer
the new data for the specified setting
Returns
-------
status : integer
0 = no error;
-1 = invalid file;
-2 = incorrect version number;
-3 = file access conflict
Examples
--------
>>> ln.zModifySettings("C:\MyPOP.CFG", "POP_BEAMTYPE", 2)
"""
if isinstance(value, str):
cmd = "ModifySettings,{},{},{}".format(fileName,mType,value)
else:
cmd = "ModifySettings,{},{},{:1.20g}".format(fileName,mType,value)
reply = self._sendDDEcommand(cmd)
return int(float(reply.rstrip()))
def zNewLens(self):
"""Erases the current lens
The "minimum" lens that remains is identical to the LDE when
"File >> New" is selected. No prompt to save the existing lens is
given.
Parameters
----------
None
Returns
-------
status : integer
0 = successful
"""
return int(self._sendDDEcommand('NewLens'))
def zNSCCoherentData(self, surfNum, detectNum, pixel, dtype):
"""Return data from an NSC detector (Non-sequential coherent data)
Similar to NSDC optimization operand
Parameters
----------
surfNum : integer
the surface number of the NSC group (1 for pure NSC systems).
detectNum : integer
the object number of the desired detector.
pixel : integer
0 = the sum of the data for all pixels for that detector
object is returned;
+ve int = the data from the specified pixel is returned.
dtype : integer
0 = real; 1 = imaginary; 2 = amplitude; 3 = power
Returns
-------
nsccoherentdata : float
nsc coherent data
"""
cmd = ("NSCCoherentData,{:d},{:d},{:d},{:d}"
.format(surfNum, detectNum, pixel, dtype))
reply = self._sendDDEcommand(cmd)
return float(reply.rstrip())
def zNSCDetectorData(self, surfNum, detectNum, pixel, dtype):
"""Return data from an NSC detector (Non-sequential incoherent
intensity data, similar to NSDD operand)
Parameters
----------
surfNum : integer
the surface number of the NSC group (1 for pure NSC systems).
detectNum : integer
the object number of the desired detector.
0 = all detectors are cleared;
-ve int = only the detector defined by the absolute value of
``detectNum`` is cleared
pixel : integer
the ``pixel`` argument is interpreted differently depending
upon the type of detector as follows:
1. For Detector Rectangles, Detector Surfaces, & all faceted
detectors (type 1):
* +ve int = the data from the specified pixel is returned.
* 0 = the sum of the total flux in position space, average
flux/area in position space, or total flux in angle
space for all pixels for that detector object, for
Data = 0, 1, or 2, respectively.
* -1 = Maximum flux or flux/area.
* -2 = Minimum flux or flux/area.
* -3 = Number of rays striking the detector.
* -4 = Standard deviation (RMS from the mean) of all the
non-zero pixel data.
* -5 = The mean value of all the non-zero pixel data.
* -6,-7,-8 = The x, y, or z coordinate of the position or
angle Irradiance or Intensity centroid, resp.
* -9,-10,-11,-12,-13 = The RMS radius, x, y, z, or xy
cross term distance or angle of all the pixel data
with respect to the centroid. These are the second
moments r^2, x^2, y^2, z^2, & xy, respectively.
2. For Detector volumes (type 2) ``pixel`` is interpreted as
the voxel number. if ``pixel==0``, the value returned
is the sum for all pixels.
dtype : integer
0 = flux or incident flux for type 1 and type 2 detectors
respectively.
1 = flux/area for type 1 detectors. Equals absorbed flux for
type 2 detectors.
2 = flux/solid angle pixel for type 1 detectors. Equals
absorbed flux per unit volume for type 2 detectors.
Notes
-----
Only ``dtype`` values 0 & 1 (for flux & flux/area) are supported
for faceted detectors.
"""
cmd = ("NSCDetectorData,{:d},{:d},{:d},{:d}"
.format(surfNum, detectNum, pixel, dtype))
reply = self._sendDDEcommand(cmd)
return float(reply.rstrip())
def zNSCLightningTrace(self, surfNum, source, raySampling, edgeSampling,
timeout=60):
"""Traces rays from one or all NSC sources using Lighting Trace
Parameters
----------
surfNum : integer
surface number. use 1 for pure NSC mode
source : integer
object number of the desired source. If ``0``, all sources
will be traced.
raySampling : integer
resolution of the LightningTrace mesh with valid values
between 0 (= "Low (1X)") and 5 (= "1024X").
edgeSampling : integer
resolution used in refining the LightningTrace mesh near
the edges of objects, with valid values between 0 ("Low (1X)")
and 4 ("256X").
timeout : integer
timeout value in seconds. Default=60sec
Returns
-------
status : integer
Notes
-----
``zNSCLightningTrace()`` always updates the lens before executing
a LightningTrace to make certain all objects are correctly loaded
and updated.
"""
cmd = ("NSCLightningTrace,{:d},{:d},{:d},{:d}"
.format(surfNum, source, raySampling, edgeSampling))
reply = self._sendDDEcommand(cmd, timeout)
if 'OK' in reply.split():
return 0
elif 'BAD COMMAND' in reply.rstrip():
return -1
else:
return int(float(reply.rstrip())) # return the error code sent by zemax.
def zNSCTrace(self, surfNum, srcNum, split=0, scatter=0, usePolar=0,
ignoreErrors=0, randomSeed=0, save=0, saveFilename=None,
oFilter=None, timeout=180):
"""Trace rays from one or all NSC sources, after updating the lens.
Parameters
----------
surfNum : integer
the surface number of the NSC group (1 for pure NSC systems).
srcNum : integer
the object number of the source. Use 0 to trace all sources.
split : integer, optional
0 = splitting is OFF (default); otherwise splitting is ON
scatter : integer, optional
0 = scattering is OFF (default); otherwise scattering is ON
usePolar : integer
0 = polarization is OFF (default); otherwise polarization is
ON. If splitting is ON polarization is automatically selected.
ignoreErrors : integer, optional
0 = ray errors will terminate the NSC trace & macro execution
and an error will be reported (default). Otherwise errors will
be ignored
randomSeed : integer, optional
0 or omitted = the random number generator will be seeded with
a random value, & every call to ``zNSCTrace()`` will produce
different random rays (default). Any integer other than zero
will ensure that the random number generator be seeded with
the specified value, and every call to ``zNSCTrace()`` using
the same seed will produce identical rays.
save : integer, optional
0 or omitted = the parameters ``saveFilename`` and ``oFilter``
need not be supplied (default). Otherwise the rays will be
saved in a ``ZRD`` file. The ``ZRD`` file will have the name
specified by the ``saveFilename``, and will be placed in the
same directory as the lens file. The extension of
``saveFilename`` should be ``ZRD``, and no path should be
specified.
saveFilename : string, optional
(see above)
oFilter : string, optional
if ``save`` is not zero, then the optional filter name is
either a string variable with the filter, or the literal
filter in double quotes. For information on filter strings
see "The filter string" in the Zemax manual.
timeout : integer
timeout in seconds (default = 60 seconds)
Returns
-------
traceResult : error code
0 if successful, -1 if problem with saveFileName, other
error codes sent by Zemax.
Examples
--------
>>> zNSCTrace(1, 2, 1, 0, 1, 1)
The above command traces rays in NSC group 1, from source 2, with
ray splitting, no ray scattering, using polarization and ignoring
errors.
>>> zNSCTrace(1, 2, 1, 0, 1, 1, 33, 1, "myrays.ZRD", "h2")
Same as above, only a random seed of 33 is given and the data is
saved to the file "myrays.ZRD" after filtering as per h2.
>>> zNSCTrace(1, 2)
The above command traces rays in NSC group 1, from source 2,
without ray splitting, no ray scattering, without using
polarization and will not ignore errors.
See Also
--------
zNSCDetectorClear()
"""
requiredArgs = ("{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:d}"
.format(surfNum,srcNum, split, scatter, usePolar, ignoreErrors,
randomSeed, save))
if save:
isAbsPath = _os.path.isabs(saveFilename)
isRightExt = _os.path.splitext(saveFilename)[1] in ('.ZRD',)
if isRightExt and not isAbsPath:
if oFilter:
optionalArgs = ",{},{}".format(saveFilename,oFilter)
else:
optionalArgs = ",{}".format(saveFilename)
cmd = "NSCTrace,"+requiredArgs+optionalArgs
else:
return -1 # either full path present in saveFileName or extension is not .ZRD
else:
cmd = "NSCTrace,"+requiredArgs
reply = self._sendDDEcommand(cmd, timeout)
if 'OK' in reply.split():
return 0
elif 'BAD COMMAND' in reply.rstrip():
return -1
else:
return int(float(reply.rstrip())) # return error code sent by zemax.
def zOpenWindow(self, analysisType, zplMacro=False, timeout=None):
"""Open a new analysis window in the main Zemax application screen.
Parameters
----------
analysisType : string
the 3-letter button code corresponding to the analysis. A list
of these codes can be seen by calling ``pyz.showZButtons()``
function in an interactive shell.
zplMacro : bool, optional
``True`` if the ``analysisType`` code is the first 3-letters
of a ZPL macro name, else ``False`` (default).
timeout : integer, optional
timeout value in seconds.
Returns
-------
status : integer
0 = successful; -1 = incorrect analysis code; -999 = fail
Notes
-----
1. This function checks if the ``analysisType`` code is a valid
code or not in order to prevent the calling program from
getting stalled. However, it doesn't check the ``analysisType``
code validity if it is a ZPL macro. If the ``analysisType`` is
ZPL macro, please make sure that the macro exist in the
``<data>/Macros`` folder.
2. You may also use ``zExecuteZPLMacro()`` to execute a ZPL macro.
See Also
--------
zGetMetaFile(), zExecuteZPLMacro()
"""
if zb.isZButtonCode(analysisType) ^ zplMacro:
reply = self._sendDDEcommand("OpenWindow,{}".format(analysisType),
timeout)
if 'OK' in reply.split():
return 0
elif 'FAIL' in reply.split():
return -999
else:
return int(float(reply.rstrip())) # error code from Zemax
else:
return -1
def zOperandValue(self, operandType, *values):
"""Returns the value of any optimization operand, even if the
operand is not currently in the merit function.
Parameters
----------
operandType : string
a valid optimization operand
*values : flattened sequence
a sequence of arguments. Possible arguments include:
``int1`` (column 2, integer), ``int2`` (column 3, integer),
``data1`` (column 4, float), ``data2`` (column 5, float),
``data3`` (column 6, float), ``data4`` (column 7, float),
``data5`` (column 12, float), ``data6`` (column 13, float)
Returns
-------
operandValue : float
the value of the operand
Examples
--------
The following example retrieves the total optical path length
of the marginal ray between surfaces 1 and 3
>>> ln.zOperandValue('PLEN', 1, 3, 0, 0, 0, 1)
See Also
--------
zOptimize():
to update MFE prior to calling ``zOperandValue()``, call
``zOptimize(-1)``
zGetOperand(), zSetOperand()
"""
if zo.isZOperand(operandType, 1) and (0 < len(values) < 9):
valList = [str(int(elem)) if i in (0,1) else str(float(elem))
for i,elem in enumerate(values)]
arguments = ",".join(valList)
cmd = "OperandValue," + operandType + "," + arguments
reply = self._sendDDEcommand(cmd)
return float(reply.rstrip())
else:
return -1
def zOptimize(self, numOfCycles=0, algorithm=0, timeout=None):
"""Calls Damped Least Squares/ Orthogonal Descent optimizer.
Parameters
----------
numOfCycles : integer, optional
the number of cycles to run. If ``numOfCycles == 0``
(default), optimization runs in automatic mode. Else if
``numOfCycles < 0``, ``zOptimize()`` updates all operands
in the merit function and returns the current merit function
without performing optimization.
algorithm : integer, optional
0 = Damped Least Squares; 1 = Orthogonal descent
timeout : integer, optional
timeout value in seconds
Returns
-------
finalMeritFn : float
the final merit function.
Notes
-----
1. If the merit function value returned is 9.0E+009, the
optimization failed, usually because the lens or merit function
could not be evaluated.
2. The number of cycles should be kept small enough to allow the
algorithm to complete and return before the DDE communication
times out, or an error will occur. One possible way to achieve
high number of cycles could be to call ``zOptimize()`` multiple
times in a loop, each time comparing the returned merit
function with few of the previously returned (and stored) merit
function values to determine if an optimum has been attained.
For an example implementation, see ``zOptimize2()``
See Also
--------
zHammer(), zLoadMerit(), zSaveMerit(), zOptimize2()
"""
cmd = "Optimize,{:1.2g},{:d}".format(numOfCycles,algorithm)
reply = self._sendDDEcommand(cmd, timeout)
return float(reply.rstrip())
def zPushLens(self, update=None, timeout=None):
"""Copy lens in the Zemax DDE server into Lens Data Editor (LDE).
Parameters
----------
update : integer, optional
if 0 or omitted, the open windows in Zemax main application
are not updated;
if 1, then all open analysis windows are updated.
timeout : integer, optional
if a timeout, in seconds, in passed, the client will wait till
the timeout before returning a timeout error. If no timeout is
passed, the default timeout is used.
Returns
-------
status : integer
0 = lens successfully pushed into the LDE;
-999 = the lens could not be pushed into the LDE. (check
``zPushLensPermission()``);
-998 = the command timed out;
other = the update failed.
Notes
-----
This operation requires the permission of the user running the
Zemax program. The proper use of ``zPushLens`` is to first call
``zPushLensPermission()``.
See Also
--------
zPushLensPermission(), zLoadFile(), zGetUpdate(), zGetPath(),
zGetRefresh(), zSaveFile().
"""
reply = None
if update == 1:
reply = self._sendDDEcommand('PushLens,1', timeout)
elif update == 0 or update is None:
reply = self._sendDDEcommand('PushLens,0', timeout)
else:
raise ValueError('Invalid value for flag')
if reply:
return int(reply) # Note: Zemax returns -999 if push lens fails
else:
return -998 # if timeout reached (assumption!!)
def zPushLensPermission(self):
"""Establish if Zemax extensions are allowed to push lenses in
the LDE.
Parameters
----------
None
Returns
-------
status : integer
1 = Zemax is set to accept PushLens commands;
0 = Extensions are not allowed to use ``zPushLens()``
See Also
--------
zPushLens(), zGetRefresh()
"""
status = None
status = self._sendDDEcommand('PushLensPermission')
return int(status)
def zQuickFocus(self, mode=0, centroid=0):
"""Quick focus adjustment of back focal distance for best focus
The "best" focus is chosen as a wavelength weighted average over
all fields.
Parameters
----------
mode : integer, optional
0 = RMS spot size (default)
1 = spot x
2 = spot y
3 = wavefront OPD
centroid : integer, optional
specify RMS reference
0 = RMS referenced to the chief ray (default);
1 = RMS referenced to image centroid
Returns
-------
retVal : integer
0 for success.
"""
retVal = -1
cmd = "QuickFocus,{mode:d},{cent:d}".format(mode=mode,cent=centroid)
reply = self._sendDDEcommand(cmd)
if 'OK' in reply.split():
retVal = 0
return retVal
def zReleaseWindow(self, tempFileName):
"""Release locked window/menu mar.
Parameters
----------
tempFileName : string
the temporary file name
Returns
-------
status : integer
0 = no window is using the filename;
1 = the file is being used.
Notes
-----
When Zemax calls the client to update or change the settings used
by the client function, the menu bar is grayed out on the window
to prevent multiple updates or setting changes from being
requested simultaneously. Normally, when the client code calls
the functions ``zMakeTextWindow()`` or ``zMakeGraphicWindow()``,
the menu bar is once again activated. However, if during an update
or setting change, the new data cannot be computed, then the
window must be released. The ``zReleaseWindow()`` function serves
just this one purpose. If the user selects "Cancel" when changing
the settings, the client code should send a ``zReleaseWindow()``
call to release the lock out of the menu bar. If this command is
not sent, the window cannot be closed, which will prevent
Zemax from terminating normally.
"""
reply = self._sendDDEcommand("ReleaseWindow,{}".format(tempFileName))
return int(float(reply.rstrip()))
def zRemoveVariables(self):
"""Sets all currently defined solve variables to fixed status
Parameters
----------
None
Returns
-------
status : integer
0 = successful; -1 = fail
"""
reply = self._sendDDEcommand('RemoveVariables')
if 'OK' in reply.split():
return 0
else:
return -1
def zSaveDetector(self, surfNum, objNum, fileName):
"""Saves the data currently on an NSC Detector Rectangle, Detector
Color, Detector Polar, or Detector Volume object to a file.
Parameters
----------
surfNum : integer
surface number of the non-sequential group. Use 1 if the
program mode is Non-Sequential.
objNum : integer
object number of the detector object
fileName : string
the filename may include the full path; if no path is provided
the path of the current lens file is used. The extension should
be DDR, DDC, DDP, or DDV for Detector Rectangle, Color, Polar,
and Volume objects, respectively.
Returns
-------
status : integer
0 if save was successful;
Error code (such as -1,-2) if failed.
"""
isRightExt = _os.path.splitext(fileName)[1] in ('.ddr','.DDR','.ddc','.DDC',
'.ddp','.DDP','.ddv','.DDV')
if not _os.path.isabs(fileName): # full path is not provided
fileName = self.zGetPath()[0] + fileName
if isRightExt:
cmd = ("SaveDetector,{:d},{:d},{}"
.format(surfNum, objNum, fileName))
reply = self._sendDDEcommand(cmd)
return _regressLiteralType(reply.rstrip())
else:
return -1
def zSaveFile(self, fileName):
"""Saves the lens currently loaded in the server to a Zemax file.
Parameters
----------
fileName : string
file name, including full path with extension.
Returns
-------
status : integer
0 = Zemax successfully saved the lens file & updated the
newly saved lens;
-999 = Zemax couldn't save the file;
-1 = Incorrect file name;
Any other value = update failed.
See Also
--------
zGetPath(), zGetRefresh(), zLoadFile(), zPushLens().
"""
isAbsPath = _os.path.isabs(fileName)
isRightExt = _os.path.splitext(fileName)[1] in ('.zmx','.ZMX')
if isAbsPath and isRightExt:
cmd = "SaveFile,{}".format(fileName)
reply = self._sendDDEcommand(cmd)
return int(float(reply.rstrip()))
else:
return -1
def zSaveMerit(self, fileName):
"""Saves the current merit function to a Zemax .MF file
Parameters
----------
fileName : string
name of the merit function file with full path and extension.
Returns
-------
meritData : integer
If successful, it is the number of operands in the merit
function; If ``meritData = -1``, saving failed.
See Also
--------
zOptimize(), zLoadMerit()
"""
isAbsPath = _os.path.isabs(fileName)
isRightExt = _os.path.splitext(fileName)[1] in ('.mf','.MF')
if isAbsPath and isRightExt:
cmd = "SaveMerit,{}".format(fileName)
reply = self._sendDDEcommand(cmd)
return int(float(reply.rstrip()))
else:
return -1
def zSaveTolerance(self, fileName):
"""Saves the tolerances of the current lens to a file.
Parameters
----------
fileName : string
filename of the file to save the tolerance data. If no path
is provided, the ``<data>\Tolerance`` folder is assumed.
Although it is not enforced, it is useful to use ".tol" as
extension.
Returns
-------
numTolOperands : integer
number of tolerance operands saved.
See Also
--------
zLoadTolerance()
"""
cmd = "SaveTolerance,{}".format(fileName)
reply = self._sendDDEcommand(cmd)
return int(float(reply.rstrip()))
def zSetAperture(self, surf, aType, aMin, aMax, xDecenter=0, yDecenter=0,
apertureFile=''):
"""Set aperture characteristics at a lens surface (surface data
dialog box)
Parameters
----------
surf : integer
surface number
aType : integer
code to specify aperture type
* 0 = no aperture (na)
* 1 = circular aperture (ca)
* 2 = circular obscuration (co)
* 3 = spider (s)
* 4 = rectangular aperture (ra)
* 5 = rectangular obscuration (ro)
* 6 = elliptical aperture (ea)
* 7 = elliptical obscuration (eo)
* 8 = user defined aperture (uda)
* 9 = user defined obscuration (udo)
* 10 = floating aperture (fa)
aMin : float
min radius (ca), min radius (co), width of arm (s), x-half
width (ra), x-half width (ro), x-half width (ea), x-half
width (eo)
aMax : float
max radius (ca), max radius (co), number of arm (s),
y-half width (ra), y-half width (ro), y-half width (ea),
y-half width(eo). See [AT]_ for details.
xDecenter : float, optional
amount of decenter from current optical axis (lens units)
yDecenter : float, optional
amount of decenter from current optical axis (lens units)
apertureFile : string, optional
a text file with .UDA extention. See [UDA]_ for detils.
Returns
-------
apertureInfo : tuple
apertureInfo is a tuple containing the following:
* aType : (see above)
* aMin : (see above)
* aMax : (see above)
* xDecenter : (see above)
* yDecenter : (see above)
Examples
--------
>>> apertureInfo = ln.zSetAperture(2, 1, 5, 10, 0.5, 0, 'apertureFile.uda')
or
>>> apertureInfo = ln.zSetAperture(2, 1, 5, 10)
Notes
-----
1. The ``aMin`` and ``aMax`` values have different meanings for
the elliptical, rectangular, and spider apertures than for
circular apertures
2. If ``zSetAperture()`` is used to set user defined apertures
or obscurations, the ``aperturefile`` must be the name of a
file which lists the x, y, coordinates of the user defined
aperture file in a two column format. For more information
on user defined apertures, see [UDA]_
References
----------
.. [AT] "Aperture type and other aperture controls," Zemax manual.
.. [UDA] "User defined apertures and obscurations," Zemax manual.
See Also
--------
zGetAperture()
"""
cmd = ("SetAperture,{sN:d},{aT:d},{aMn:1.20g},{aMx:1.20g},{xD:1.20g},"
"{yD:1.20g},{aF}".format(sN=surf, aT=aType, aMn=aMin, aMx=aMax,
xD=xDecenter, yD=yDecenter, aF=apertureFile))
reply = self._sendDDEcommand(cmd)
rs = reply.split(',')
ainfo = _co.namedtuple('ApertureInfo', ['aType', 'aMin', 'aMax',
'xDecenter', 'yDecenter'])
apertureInfo = ainfo._make([float(elem) for elem in rs])
return apertureInfo
def zSetBuffer(self, bufferNum, textData):
"""Used to store client specific data with the window being
created or updated.
The buffer data can be used to store user selected options
instead of using the settings data on the command line of the
``zMakeTextWindow()`` or ``zMakeGraphicWindow()`` functions.
The data must be in a string format.
Parameters
----------
bufferNum : integer
number between 0 and 15 inclusive (for 16 buffers provided)
textData : string
is the only text that is stored, maximum of 240 characters
Returns
-------
status : integer
0 if successful, else -1
Notes
-----
The buffer data is not associated with any particular window until
either the ``zMakeTextWindow()`` or ``zMakeGraphicWindow()``
functions are issued. Once Zemax receives the ``MakeTextWindow``
or ``MakeGraphicWindow`` items, the buffer data is then copied to
the appropriate window memory, and then may later be retrieved
from that window's buffer using ``zGetBuffer()`` function.
See Also
--------
zGetBuffer()
"""
if (0 < len(textData) < 240) and (0 <= bufferNum < 16):
cmd = "SetBuffer,{:d},{}".format(bufferNum, str(textData))
reply = self._sendDDEcommand(cmd)
return 0 if 'OK' in reply.rsplit() else -1
else:
return -1
def zSetConfig(self, config):
"""Switches the current configuration number (selected column in
the MCE), and updates the system.
Parameters
----------
config : integer
The configuration (column) number to set current
Returns
-------
currentConfig : integer
the current configuration (column) number in MCE
``1 <= currentConfig <= numberOfConfigs``
numberOfConfigs : integer
number of configurations (columns)
error : integer
0 = successful (i.e. new current config is traceable);
-1 = failure
Notes
-----
Use ``zInsertConfig()`` to insert new configuration in the
multi-configuration editor.
See Also
--------
zGetConfig(), zSetMulticon()
"""
reply = self._sendDDEcommand("SetConfig,{:d}".format(config))
rs = reply.split(',')
return tuple([int(elem) for elem in rs])
def zSetExtra(self, surfNum, colNum, value):
"""Sets extra surface data (value) in the Extra Data Editor for
the surface indicatd by ``surf``
Parameters
----------
surfNum : integer
the surface number
colNum : integer
the column number
value : float
the value
Returns
-------
retValue : float
the numeric data value
See Also
--------
zGetExtra()
"""
cmd = ("SetExtra,{:d},{:d},{:1.20g}".format(surfNum, colNum, value))
reply = self._sendDDEcommand(cmd)
return float(reply)
def zSetField(self, n, arg1, arg2, arg3=None, vdx=0.0, vdy=0.0,
vcx=0.0, vcy=0.0, van=0.0):
"""Sets the field data for a particular field point
There are 2 ways of using this function (the parameters ``arg1``,
``arg2`` and ``arg3`` have different meanings depending on ``n``):
* ``zSetField(0, fieldType, totalNumFields, normMethod)``
OR
* ``zSetField(n, xf, yf [,wgt, vdx, vdy, vcx, vcy, van])``
Parameters
----------
[Case: ``n = 0``]
n : 0
to set general field parameters
arg1 : integer
the field type. 0 = angle, 1 = object height, 2 = paraxial
image height, and 3 = real image height
arg2 : integer
total number of fields
arg3 : integer (0 or 1), optional
normalization type. 0 = radial (default), 1 = rectangular
[Case: ``0 < n <= number_of_fields``]
n : integer (greater than 0)
the field number
arg1 (fx) : float
the field-x value
arg2 (fy) : float
the field-y value
arg3 (wgt) : float, optional
the field weight (default = 1.0)
vdx, vdy, vcx, vcy, van : floats, optional
the vignetting factors (default = 0.0). See below.
Returns
-------
[Case: ``n=0``]
type : integer
0 = angles in degrees; 1 = object height; 2 = paraxial image
height, 3 = real image height
number : integer
number of fields currently defined
maxX : float
values used to normalize x field coordinate
maxY : float
values used to normalize y field coordinate
normMethod : integer
normalization method (0 = radial, 1 = rectangular)
[Case: ``0 < n <= number-of-fields``]
xf : float
the field-x value
yf : float
the field-y value
wgt : float
field weight value
vdx : float
decenter-x vignetting factor
vdy : float
decenter-y vignetting factor
vcx : float
compression-x vignetting factor
vcy : float
compression-y vignetting factor
van : float
angle vignetting factor
Notes
-----
1. In Zemax main application, the default field normalization type
is radial. However, the default field normalization
2. The returned tuple's content and structure is exactly same as
that of ``zGetField()``
See Also
--------
zGetField()
"""
if n:
arg3 = 1.0 if arg3 is None else arg3 # default weight
cmd = ("SetField,{:d},{:1.20g},{:1.20g},{:1.20g},{:1.20g},{:1.20g}"
",{:1.20g},{:1.20g},{:1.20g}"
.format(n, arg1, arg2, arg3, vdx, vdy, vcx, vcy, van))
reply = self._sendDDEcommand(cmd)
rs = reply.split(',')
if len(rs) == 2: # The behaviour with the Zemax bug
fieldData = self.zGetField(n)
else: # the expected behaviour, which is also expected to return
fd = _co.namedtuple('fieldData', ['xf', 'yf', 'wgt',
'vdx', 'vdy',
'vcx', 'vcy', 'van'])
fieldData = fd._make([float(elem) for elem in rs])
else:
arg3 = 0 if arg3 is None else arg3 # default normalization
cmd = ("SetField,{:d},{:d},{:d},{:d}".format(0, arg1, arg2, arg3))
reply = self._sendDDEcommand(cmd)
rs = reply.split(',')
if len(rs) == 2: # The behaviour with the Zemax bug
fieldData = self.zGetField(n)
else: # the expected behaviour, which is also expected to return
fd = _co.namedtuple('fieldData', ['type', 'numFields',
'maxX', 'maxY', 'normMethod'])
fieldData = fd._make([int(elem) if (i == 0 or i == 1 or i == 4)
else float(elem) for i, elem in enumerate(rs)])
return fieldData
def zSetFloat(self):
"""Sets all surfaces without surface apertures to have floating
apertures. Floating apertures will vignette rays which trace
beyond the semi-diameter.
Parameters
----------
None
Returns
-------
status : integer
0 = success; -1 = fail
"""
retVal = -1
reply = self._sendDDEcommand('SetFloat')
if 'OK' in reply.split():
retVal = 0
return retVal
def zSetLabel(self, surfNum, label):
"""This command associates an integer label with the specified
surface. The label will be retained by Zemax as surfaces are
inserted or deleted around the target surface.
Parameters
----------
surfNum : integer
the surface number
label : integer
the integer label
Returns
-------
assignedLabel : integer
should be equal to label
See Also
--------
zGetLabel(), zFindLabel()
"""
reply = self._sendDDEcommand("SetLabel,{:d},{:d}"
.format(surfNum,label))
return int(float(reply.rstrip()))
def zSetMulticon(self, config, *multicon_args):
"""Set data or operand type in the multi-configuration editior.
Note that there are 2 ways of using this function.
[``USAGE TYPE - I``]
If ``config > 0``, then the function is used to set data in the
MCE using the following syntax:
``ln.zSetMulticon(config, row, value, status, pickupRow, pickupConfig, scale, offset) -> multiConData``
Parameters
----------
config : integer (``> 0``)
configuration number (column)
row : integer
the row or operand number
value : float
the value to set
status : integer
the ``status`` is 0 for fixed, 1 for variable, 2 for pickup,
and 3 for thermal pickup.
If ``status`` is 2 or 3, the ``pickupRow`` and ``pickupConfig``
values indicate the source data for the pickup solve.
pickupRow : integer
see ``status``
pickupConfig : integer
see ``status``
scale : float
scale factor for the pickup value
offset : float
offset to add to the pickup value.
Returns
-------
multiConData : namedtuple
the ``multiConData`` is a 8-tuple whose elements are:
(value, numConfig, numRow, status, pickupRow,
pickupConfig, scale, offset)
[``USAGE TYPE - II``]
If the ``config = 0``, the function may be used to set the operand
type and number data using the following syntax:
``ln.zSetMulticon(0, row, operandType, num1, num2, num3) -> multiConData``
Parameters
----------
config : 0
for usage type II
row : integer
row or operand number in the MCE
operandType : string
the operand type, such as 'THIC', 'WLWT', etc.
num1 : integer
number data. `num1` could be "Surface#", "Surface", "Field#",
"Wave#', or "Ignored". See [MCO]_
num2 : integer
number data. `num2` could be "Object", "Extra Data Number",
or "Parameter". See [MCO]_
num3 : integer
number data. `num3` could be "Property", or "Face#". See [MCO]_
Returns
-------
multiConData is a 4-tuple (named) whose elements are:
(operandType, num1, num2, num3)
Examples
--------
The following example shows the USEAGE TYPE - I:
>>> multiConData = ln.zSetMulticon(1, 5, 5.6, 0, 0, 0, 1.0, 0.0)
The following two lines show how to set a variable solve on the operand
on the 4th row for configuration number 1 (the third line is the output):
>>> config=1; row=4; value=0.5; status=1; pickupRow=0; pickupConfig=0; scale=1; offset=0
>>> ln.zSetMulticon(config, row, value, status, pickupRow, pickupConfig, scale, offset)
MCD(value=0.5, numConfig=2, numRow=4, status=1, pickupRow=0, pickupConfig=0, scale=1.0, offset=0.0)
The following example shows the USAGE TYPE - II:
>>> multiConData = ln.zSetMulticon(0, 5, 'THIC', 15, 0, 0)
Notes
-----
1. If there are current operands in the MCE, it is recommended to
first use ``zInsertMCO()`` to insert a row, and then use
``zSetMulticon(0,...)``. For example, use ``zInsertMCO(5)``
and then use ``zSetMulticon(0, 5, 'THIC', 15, 0, 0)``.
If a row is not inserted first, then existing rows may be
overwritten.
2. The function raises an exception if it determines the arguments
to be invalid.
References
----------
.. [MCO] "Summary of Multi-Configuration Operands," Zemax manual.
See Also
--------
zGetMulticon()
"""
if config > 0 and len(multicon_args) == 7:
(row,value,status,pickuprow,pickupconfig,scale,offset) = multicon_args
cmd=("SetMulticon,{:d},{:d},{:1.20g},{:d},{:d},{:d},{:1.20g},{:1.20g}"
.format(config,row,value,status,pickuprow,pickupconfig,scale,offset))
elif ((config == 0) and (len(multicon_args) == 5) and
(zo.isZOperand(multicon_args[1],3))):
(row,operand_type,number1,number2,number3) = multicon_args
cmd=("SetMulticon,{:d},{:d},{},{:d},{:d},{:d}"
.format(config,row,operand_type,number1,number2,number3))
else:
raise ValueError('Invalid input, expecting proper argument')
# FIX !!! Should it just return -1, instead of raising a value error?
# If the raise is removed, change code accordingly in the unittest.
reply = self._sendDDEcommand(cmd)
if config: # if config > 0
mcd = _co.namedtuple('MCD', ['value', 'numConfig', 'numRow', 'status',
'pickupRow', 'pickupConfig', 'scale',
'offset'])
rs = reply.split(",")
multiConData = [float(rs[i]) if (i == 0 or i == 6 or i== 7) else int(rs[i])
for i in range(len(rs))]
else: # if config == 0
mcd = _co.namedtuple('MCD', ['operandType', 'num1', 'num2', 'num3'])
rs = reply.split(",")
multiConData = [int(elem) for elem in rs[1:]]
multiConData.insert(0,rs[0])
return mcd._make(multiConData)
def zSetNSCObjectData(self, surfNum, objNum, code, data):
"""Sets the various data for NSC objects.
Parameters
----------
surfNum : integer
surface number of the NSC group. Use 1 if for pure NSC mode
objNum : integer
the NSC ojbect number
code : integer
integer code
data : string/integer/float
data to set NSC object
Returns
-------
nscObjectData : string/integer/float
the returned data (same as returned by ``zGetNSCObjectData()``)
depends on the ``code``. If the command fails, it returns ``-1``.
Refer table nsc-object-data-codes_.
Notes
-----
Refer table nsc-object-data-codes_ in the docstring of
``zGetNSCObjectData()`` for ``code`` and ``data`` specific details.
See Also
--------
zGetNSCObjectData(), zSetNSCObjectFaceData()
"""
str_codes = (0,1,4)
int_codes = (2,3,5,6,29,101,102,110,111)
if code in str_codes:
cmd = ("SetNSCObjectData,{:d},{:d},{:d},{}"
.format(surfNum,objNum,code,data))
elif code in int_codes:
cmd = ("SetNSCObjectData,{:d},{:d},{:d},{:d}"
.format(surfNum,objNum,code,data))
else: # data is float
cmd = ("SetNSCObjectData,{:d},{:d},{:d},{:1.20g}"
.format(surfNum,objNum,code,data))
reply = self._sendDDEcommand(cmd)
rs = reply.rstrip()
if rs == 'BAD COMMAND':
nscObjectData = -1
else:
if code in str_codes:
nscObjectData = str(rs)
elif code in int_codes:
nscObjectData = int(float(rs))
else:
nscObjectData = float(rs)
return nscObjectData
def zSetNSCObjectFaceData(self, surfNum, objNum, faceNum, code, data):
"""Sets the various data for NSC object faces
Parameters
----------
surfNum : integer
surface number of the NSC group. Use 1 if for pure NSC mode
objNum : integer
the NSC ojbect number
faceNum : integer
face number
code : integer
integer code
data : float/integer/string
data to set NSC object face
Returns
-------
nscObjFaceData : string/integer/float
the returned data (same as returned by ``zGetNSCObjectFaceData()``)
depends on the ``code``. If the command fails, it returns ``-1``.
Refer table nsc-object-face-data-codes_.
Notes
-----
Refer table nsc-object-face-data-codes_ in the docstring of
``zGetNSCObjectData()`` for ``code`` and ``data`` specific details.
See Also
--------
zGetNSCObjectFaceData()
"""
str_codes = (10,30,31,40,60)
int_codes = (20,22,24)
if code in str_codes:
cmd = ("SetNSCObjectFaceData,{:d},{:d},{:d},{:d},{}"
.format(surfNum,objNum,faceNum,code,data))
elif code in int_codes:
cmd = ("SetNSCObjectFaceData,{:d},{:d},{:d},{:d},{:d}"
.format(surfNum,objNum,faceNum,code,data))
else: # data is float
cmd = ("SetNSCObjectFaceData,{:d},{:d},{:d},{:d},{:1.20g}"
.format(surfNum,objNum,faceNum,code,data))
reply = self._sendDDEcommand(cmd)
rs = reply.rstrip()
if rs == 'BAD COMMAND':
nscObjFaceData = -1
else:
if code in str_codes:
nscObjFaceData = str(rs)
elif code in int_codes:
nscObjFaceData = int(float(rs))
else:
nscObjFaceData = float(rs)
return nscObjFaceData
def zSetNSCParameter(self, surfNum, objNum, paramNum, data):
"""Sets the parameter data for NSC objects.
Parameters
----------
surfNum : integer
the surface number. Use 1 if Non-Sequential program mode
objNum : integer
the object number
paramNum : integer
the parameter number
data : float
the new numeric value for the ``paramNum``
Returns
-------
nscParaVal : float
the parameter value
See Also
--------
zGetNSCParameter()
"""
cmd = ("SetNSCParameter,{:d},{:d},{:d},{:1.20g}"
.format(surfNum, objNum, paramNum, data))
reply = self._sendDDEcommand(cmd)
rs = reply.rstrip()
if rs == 'BAD COMMAND':
nscParaVal = -1
else:
nscParaVal = float(rs)
return nscParaVal
def zSetNSCPosition(self, surfNum, objNum, code, data):
"""Sets the position data for NSC objects.
Parameters
----------
surfNum : integer
the surface number. Use 1 if Non-Sequential program mode
objNum : integer
the object number
code : integer
1-7 for x, y, z, tilt-x, tilt-y, tilt-z, and material,
respectively.
data : float or string
numeric (float) for codes 1-6, string for material (code-7)
Returns
-------
nscPosData : tuple
a 7-tuple containing x, y, z, tilt-x, tilt-y, tilt-z, material
See Also
--------
zSetNSCPositionTuple(), zGetNSCPosition()
"""
if code == 7:
cmd = ("SetNSCPosition,{:d},{:d},{:d},{}"
.format(surfNum, objNum, code, data))
else:
cmd = ("SetNSCPosition,{:d},{:d},{:d},{:1.20g}"
.format(surfNum, objNum, code, data))
reply = self._sendDDEcommand(cmd)
rs = reply.split(',')
if rs[0].rstrip() == 'BAD COMMAND':
nscPosData = -1
else:
nscPosData = tuple([str(rs[i].rstrip()) if i==6 else float(rs[i])
for i in range(len(rs))])
return nscPosData
def zSetNSCProperty(self, surfNum, objNum, faceNum, code, value):
"""Sets a numeric or string value to the property pages of objects
defined in the NSC editor. It mimics the ZPL function NPRO.
Parameters
----------
surfNum : integer
surface number of the NSC group. Use 1 if for pure NSC mode
objNum : integer
the NSC ojbect number
faceNum : integer
face number. Use 0 for "All Faces"
code : integer
for the specific code
value : string/integer/float
value to set NSC property
Returns
-------
nscPropData : string/float/integer
the returned data (same as returned by ``zGetNSCProperty()``)
depends on the ``code``. If the command fails, it returns
``-1``. Refer table nsc-property-codes_.
Notes
-----
Refer table nsc-property-codes_ in the docstring of
``zGetNSCProperty()`` for ``code`` and ``value`` specific details.
See Also
--------
zGetNSCProperty()
"""
cmd = ("SetNSCProperty,{:d},{:d},{:d},{:d},".format(surfNum, objNum, code, faceNum))
if code in (0,1,4,5,6,11,12,14,18,19,27,28,84,86,92,117,123):
cmd = cmd + value
elif code in (2,3,7,9,13,15,16,17,20,29,81,91,101,102,110,111,113,121,
141,142,151,152,153161,162,171,172,173):
cmd = cmd + str(int(value))
else:
cmd = cmd + str(float(value))
reply = self._sendDDEcommand(cmd)
nscPropData = _process_get_set_NSCProperty(code, reply)
return nscPropData
def zSetNSCSettings(self, maxInt, maxSeg, maxNest, minAbsI, minRelI,
glueDist, missRayLen, ignoreErr):
"""Sets the maximum number of intersections, segments, nesting
level, minimum absolute intensity, minimum relative intensity,
glue distance, miss ray distance, and ignore errors flag used
for NSC ray tracing.
Parameters
----------
maxInt : integer
maximum number of intersections
maxSeg : integer
maximum number of segments
maxNest : integer
maximum nesting level
minAbsI : float
minimum absolute intensity
minRelI : float
minimum relative intensity
glueDist : float
glue distance
missRayLen : float
miss ray distance
ignoreErr : integer
1 if yes, 0 if no
Returns
-------
nscSettingsDataRet : 8-tuple
the returned tuple is also an 8-tuple with the same elements
as ``nscSettingsData``.
Notes
-----
Since the ``maxSeg`` value may require large amounts of RAM,
verify that the new value was accepted by checking the returned
tuple.
See Also
--------
zGetNSCSettings()
"""
cmd = ("SetNSCSettings,{:d},{:d},{:d},{:1.20g},{:1.20g},{:1.20g},{:1.20g},{:d}"
.format(maxInt, maxSeg, maxNest, minAbsI, minRelI, glueDist, missRayLen, ignoreErr))
reply = str(self._sendDDEcommand(cmd))
rs = reply.rsplit(",")
nscSettingsData = [float(rs[i]) if i in (3,4,5,6) else int(float(rs[i]))
for i in range(len(rs))]
return tuple(nscSettingsData)
def zSetNSCSolve(self, surfNum, objNum, param, solveType,
pickupObject=0, pickupColumn=0, scale=0, offset=0):
"""Sets the solve type on NSC position and parameter data.
Parameters
----------
surfNum : integer
the surface number. Use 1 if in Non-Sequential mode.
objNum : integer
the object number
param : integer
* -1 = data for x position;
* -2 = data for y position;
* -3 = data for z position;
* -4 = data for tilt x ;
* -5 = data for tilt y ;
* -6 = data for tilt z ;
* n > 0 = data for the nth parameter;
solveType : integer
0 = fixed; 1 = variable; 2 = pickup;
pickupObject : integer, optional
if ``solveType = 2``, pickup object number
pickupColumn : integer, optional
if ``solveType = 2``, pickup column number (0 for current column)
scale : float, optional
if ``solveType = 2``, scale factor
offset : float, optional
if ``solveType = 2``, offset
Returns
-------
nscSolveData : tuple or errorCode
5-tuple containing
``(status, pickupObject, pickupColumn, scaleFactor, offset)``
The status value is 0 for fixed, 1 for variable, and 2 for
a pickup solve. Only when the stauts is a pickup solve is the
other data meaningful.
-1 if it a BAD COMMAND
See Also
--------
zGetNSCSolve()
"""
nscSolveData = -1
args1 = "{:d},{:d},{:d},".format(surfNum, objNum, param)
args2 = "{:d},{:d},{:d},".format(solveType, pickupObject, pickupColumn)
args3 = "{:1.20g},{:1.20g}".format(scale, offset)
cmd = ''.join(["SetNSCSolve,",args1, args2, args3])
reply = self._sendDDEcommand(cmd)
rs = reply.rstrip()
if 'BAD COMMAND' not in rs:
nscSolveData = tuple([float(e) if i in (3,4) else int(float(e))
for i,e in enumerate(rs.split(","))])
return nscSolveData
def zSetOperand(self, row, column, value):
"""Sets the operand data in the Merit Function Editor
Parameters
----------
row : integer
operand row number in the MFE
column : integer
column number
value : string/integer/float
the type of ``value`` depends on the ``column`` number
Refer to the column-operand-data_ table (in the docstring of
``zGetOperand()`` for the column-value mapping)
Returns
-------
operandData : string/integer/float
the value set in the MFE cell. Refer table column-operand-data_.
Notes
-----
1. To update the merit function after calling ``zSetOperand()``,
call ``zOptimize()`` with the number of cycles set to -1.
2. Use ``zInsertMFO()`` to insert additional rows, before calling
``zSetOperand()``.
See Also
--------
zSetOperandRow():
sets an entire row of the MFE
zGetOperand(), zOptimize(), zInsertMFO()
"""
if column == 1:
if zo.isZOperand(str(value)):
value = str(value)
else:
print("Not a valid operand in zSetOperand().")
return -1
elif column in (2,3):
value = '{}'.format(int(float(value)))
else:
value = '{}'.format(float(value))
cmd = "SetOperand,{:d},{:d},{}".format(row, column, value)
reply = self._sendDDEcommand(cmd)
return _process_get_set_Operand(column, reply)
def zSetPolState(self, nlsPolarized, Ex, Ey, Phx, Phy):
"""Sets the default polarization state.
These parameters correspond to the Polarization tab under
the General settings.
Parameters
----------
nlsPolarized : integer
if ``nlsPolarized > 0``, then default polarization state
is unpolarized.
Ex : float
normalized electric field magnitude in x direction
Ey : float
normalized electric field magnitude in y direction
Phax : float
relative phase in x direction in degrees
Phay : float
relative phase in y direction in degrees
Returns
-------
polStateData : tuple
the 5-tuple contains ``(nlsPolarized, Ex, Ey, Phax, Phay)``
Notes
-----
The quantity ``Ex*Ex + Ey*Ey`` should have a value of 1.0
although any values are accepted.
See Also
--------
zGetPolState()
"""
cmd = ("SetPolState,{:d},{:1.20g},{:1.20g},{:1.20g},{:1.20g}"
.format(nlsPolarized,Ex,Ey,Phx,Phy))
reply = self._sendDDEcommand(cmd)
rs = reply.rsplit(",")
polStateData = [int(float(elem)) if i==0 else float(elem)
for i,elem in enumerate(rs[:-1])]
return tuple(polStateData)
def zSetSettingsData(self, number, data):
"""Sets the settings data used by a window in temporary storage
before calling ``zMakeGraphicWindow()`` or ``zMakeTextWindow()``.
The data may be retrieved using zGetSettingsData.
Parameters
----------
number : integer
currently, only ``number = 0`` is supported. This number may be
used to expand the feature in the future.
data : ??
Returns
-------
settingsData : string
settings data returned by Zemax
Notes
-----
Please refer to "How ZEMAX calls the client" in the Zemax manual.
See Also
--------
zGetSettingsData()
"""
cmd = "SettingsData,{:d},{}".format(number, data)
reply = self._sendDDEcommand(cmd)
return str(reply.rstrip())
def zSetSolve(self, surfNum, code, *solveData):
"""Sets data for solves and/or pickups on the surface
Parameters
----------
surfNum : integer
surface number for which the solve is to be set.
code : integer
surface parameter code for curvature, thickness, glass, conic,
semi-diameter, etc. (refer to table surf_param_codes_for_setsolve_
or use surface parameter mnemonic codes with signature
`ln.SOLVE_SPAR_XXX`. for e.g. `ln.SOLVE_SPAR_CURV`, etc.
solveData : splattered tuple
the tuple of arguments are ``solvetype, param1, param2, param3, param4``.
Refer to the table refer to table surf_param_codes_for_setsolve_ to
construct the `solveData` sequence for specific solve type code.
There are two ways of passing this parameter:
1. As a tuple of the above arguments preceded by the ``*``
operator to flatten/splatter the tuple (see example below).
2. As a sequence of arguments:
``solvetype, param1, param2, param3, param4`` or
IMPORTANT:
(1) All parameters should be passed as there is no default
arguments to the function.
(2) The order of parameters for `solveData` for code 5-17
do not match the order in the pop-up window for setting
pickup solve in Zemax application. For others, i.e.
when `solveData` is specified as `solvetype, param1, param2 ...`
the order of `param1`, `param2` matches the corresponding
order of parameters in Zemax solve window.
(3) For `solvetypes` that has pickup column, use 0 for
"current column".
Returns
-------
solveData : tuple
tuple depending on the code value according to the table
surf_param_codes_for_setsolve_ (same return as ``zGetSolve()``),
if successful. The first element in the tuple is always the
`solvetype`. -1 if the command failed.
Notes
-----
1. The ``solvetype`` is an integer code, & the parameters have
meanings that depend upon the solve type; see the chapter
"SOLVES" in the Zemax manual for details. You may also use
the mnemonic codes with signature ln.SOLVE_XXX, such as
ln.SOLVE_CURV_FIXED, ln.SOLVE_CURV_FIXED, ln.SOLVE_THICK_VAR,
etc. Additionally, it may also help to directly
refer to the function body to quickly get an idea about the
``solvetype`` codes and parameters.
2. If the ``solvetype`` is fixed, then the ``value`` in the
``solveData`` is ignored.
3. Surface parameter codes
.. _surf_param_codes_for_setsolve:
::
Table : Surface parameter codes for zGetsolve() and zSetSolve()
--------------------------------------------------------------------------
code - Datum set/get by zGetSolve()/zSetSolve()
--------------------------------------------------------------------------
0 (curvature) - solvetype, param1, param2, pickupcolumn
1 (thickness) - solvetype, param1, param2, param3, pickupcolumn
2 (glass) - solvetype (for solvetype = 0);
solvetype, Index, Abbe, Dpgf (for solvetype = 1, model glass);
solvetype, pickupsurf (for solvetype = 2, pickup);
solvetype, index_offset, abbe_offset (for solvetype = 4, offset);
solvetype (for solvetype=all other values)
3 (semi-diameter) - solvetype, pickupsurf, pickupcolumn
4 (conic) - solvetype, pickupsurf, pickupcolumn
5-16 (param 1-12) - solvetype, pickupsurf, offset, scalefactor, pickupcolumn
17 (parameter 0) - solvetype, pickupsurf, offset, scalefactor, pickupcolumn
1001+ (extra - solvetype, pickupsurf, scalefactor, offset, pickupcolumn
data values 1+)
end-of-table
4. If a parameter in the LDE is also present in the Multi-Configuration-Editor,
Zemax doesn't allow the solve on that parameter to be set in the LDE. Instead,
change the "status" of that parameter to set a solve in the MCE using the
command `zSetMulticon()`.
Examples
--------
To set a solve on the curvature (0) of surface number 6 such that
the Marginal Ray angle (2) value is 0.1, use any of the following:
>>> sdata = ln.zSetSolve(6, 0, *(2, 0.1))
OR
>>> sdata = ln.zSetSolve(6, 0, 2, 0.1 )
OR
>>> sdata = ln.zSetSolve(6, ln.SOLVE_SPAR_CURV, ln.SOLVE_CURV_MR_ANG, 0.1)
See Also
--------
zSetMulticon() : for setting solves on parameters in Multi-Configuration-Editor;
zGetSolve(), zGetNSCSolve(), zSetNSCSolve(), zRemoveVariables().
"""
if not solveData:
print("Error [zSetSolve] No solve data passed.")
return -1
try:
if code == self.SOLVE_SPAR_CURV: # Solve specified on CURVATURE
if solveData[0] == self.SOLVE_CURV_FIXED:
data = ''
elif solveData[0] == self.SOLVE_CURV_VAR: # (V)
data = ''
elif solveData[0] == self.SOLVE_CURV_MR_ANG: # (M)
data = '{:1.20g}'.format(solveData[1]) # angle
elif solveData[0] == self.SOLVE_CURV_CR_ANG: # (C)
data = '{:1.20g}'.format(solveData[1]) # angle
elif solveData[0] == self.SOLVE_CURV_PICKUP: # (P)
data = ('{:d},{:1.20g},{:d}'
.format(solveData[1], solveData[2], solveData[3])) # suface, scale-factor, column
elif solveData[0] == self.SOLVE_CURV_MR_NORM: # (N)
data = ''
elif solveData[0] == self.SOLVE_CURV_CR_NORM: # (N)
data = ''
elif solveData[0] == self.SOLVE_CURV_APLAN: # (A)
data = ''
elif solveData[0] == self.SOLVE_CURV_ELE_POWER: # (X)
data = '{:1.20g}'.format(solveData[1]) # power
elif solveData[0] == self.SOLVE_CURV_CON_SURF: # (S)
data = '{:d}'.format(solveData[1]) # surface to be concentric to
elif solveData[0] == self.SOLVE_CURV_CON_RADIUS: # (R)
data = '{:d}'.format(solveData[1]) # surface to be concentric with
elif solveData[0] == self.SOLVE_CURV_FNUM: # (F)
data = '{:1.20g}'.format(solveData[1]) # paraxial f/#
elif solveData[0] == self.SOLVE_CURV_ZPL: # (Z)
data = str(solveData[1]) # macro name
elif code == self.SOLVE_SPAR_THICK: # Solve specified on THICKNESS
if solveData[0] == self.SOLVE_THICK_FIXED:
data = ''
elif solveData[0] == self.SOLVE_THICK_VAR: # (V)
data = ''
elif solveData[0] == self.SOLVE_THICK_MR_HGT: # (M)
data = '{:1.20g},{:1.20g}'.format(solveData[1], solveData[2]) # height, pupil zone
elif solveData[0] == self.SOLVE_THICK_CR_HGT: # (C)
data = '{:1.20g}'.format(solveData[1]) # height
elif solveData[0] == self.SOLVE_THICK_EDGE_THICK: # (E)
data = '{:1.20g},{:1.20g}'.format(solveData[1], solveData[2]) # thickness, radial height (0 for semi-diameter)
elif solveData[0] == self.SOLVE_THICK_PICKUP: # (P)
data = ('{:d},{:1.20g},{:1.20g},{:d}'
.format(solveData[1], solveData[2], solveData[3], solveData[4])) # surface, scale-factor, offset, column
elif solveData[0] == self.SOLVE_THICK_OPD: # (O)
data = '{:1.20g},{:1.20g}'.format(solveData[1], solveData[2]) # opd, pupil zone
elif solveData[0] == self.SOLVE_THICK_POS: # (T)
data = '{:d},{:1.20g}'.format(solveData[1], solveData[2]) # surface, length from surface
elif solveData[0] == self.SOLVE_THICK_COMPENSATE: # (S)
data = '{:d},{:1.20g}'.format(solveData[1], solveData[2]) # surface, sum of surface thickness
elif solveData[0] == self.SOLVE_THICK_CNTR_CURV: # (X)
data = '{:d}'.format(solveData[1]) # surface to be at the COC of
elif solveData[0] == self.SOLVE_THICK_PUPIL_POS: # (U)
data = ''
elif solveData[0] == self.SOLVE_THICK_ZPL: # (Z)
data = str(solveData[1]) # macro name
elif code == self.SOLVE_SPAR_GLASS: # GLASS
if solveData[0] == self.SOLVE_GLASS_FIXED:
data = ''
elif solveData[0] == self.SOLVE_GLASS_MODEL:
data = ('{:1.20g},{:1.20g},{:1.20g}'
.format(solveData[1], solveData[2], solveData[3])) # index Nd, Abbe Vd, Dpgf
elif solveData[0] == self.SOLVE_GLASS_PICKUP: # (P)
data = '{:d}'.format(solveData[1]) # surface
elif solveData[0] == self.SOLVE_GLASS_SUBS: # (S)
data = str(solveData[1]) # catalog name
elif solveData[0] == self.SOLVE_GLASS_OFFSET: # (O)
data = '{:1.20g},{:1.20g}'.format(solveData[1], solveData[2]) # index Nd offset, Abbe Vd offset
elif code == self.SOLVE_SPAR_SEMIDIA: # Solve specified on SEMI-DIAMETER
if solveData[0] == self.SOLVE_SEMIDIA_AUTO:
data = ''
elif solveData[0] == self.SOLVE_SEMIDIA_FIXED: # (U)
data = ''
elif solveData[0] == self.SOLVE_SEMIDIA_PICKUP: # (P)
data = ('{:d},{:1.20g},{:d}'
.format(solveData[1], solveData[2], solveData[3])) # surface, scale-factor, column
elif solveData[0] == self.SOLVE_SEMIDIA_MAX: # (M)
data = ''
elif solveData[0] == self.SOLVE_SEMIDIA_ZPL: # (Z)
data = str(solveData[1]) # macro name
elif code == self.SOLVE_SPAR_CONIC: # Solve specified on CONIC
if solveData[0] == self.SOLVE_CONIC_FIXED:
data = ''
elif solveData[0] == self.SOLVE_CONIC_VAR: # (V)
data = ''
elif solveData[0] == self.SOLVE_CONIC_PICKUP: # (P)
data = ('{:d},{:1.20g},{:d}'
.format(solveData[1], solveData[2], solveData[3])) # surface, scale-factor, column
elif solveData[0] == self.SOLVE_CONIC_ZPL: # (Z)
data = str(solveData[1]) # macro name
elif self.SOLVE_SPAR_PAR1 <= code <= self.SOLVE_SPAR_PAR12: # Solve specified on PARAMETERS 1-12
if solveData[0] == self.SOLVE_PARn_FIXED:
data = ''
elif solveData[0] == self.SOLVE_PARn_VAR: # (V)
data = ''
elif solveData[0] == self.SOLVE_PARn_PICKUP: # (P)
data = ('{:d},{:1.20g},{:1.20g},{:d}'
.format(solveData[1],solveData[2],solveData[3],solveData[4])) # surface, scale-factor, offset, column
elif solveData[0] == self.SOLVE_PARn_CR: # (C)
data = '{:d},{:1.20g}'.format(solveData[1], solveData[2]) # field, wavelength
elif solveData[0] == self.SOLVE_PARn_ZPL: # (Z)
data = str(solveData[1]) # macro name
elif code == self.SOLVE_SPAR_PAR0: # Solve specified on PARAMETER 0
if solveData[0] == self.SOLVE_PAR0_FIXED:
data = ''
elif solveData[0] == self.SOLVE_PAR0_VAR: # (V)
data = ''
elif solveData[0] == self.SOLVE_PAR0_PICKUP: # (P)
data = '{:d}'.format(solveData[1]) # surface
elif code > 1000: # Solve specified on EXTRA DATA VALUES
if solveData[0] == self.SOLVE_EDATA_FIXED:
data = ''
elif solveData[0] == self.SOLVE_EDATA_VAR: # (V)
data = ''
elif solveData[0] == self.SOLVE_EDATA_PICKUP: # (P)
data = ('{:d},{:1.20g},{:1.20g},{:d}'
.format(solveData[1], solveData[2], solveData[3], solveData[4])) # surface, scale-factor, offset, column
elif solveData[0] == self.SOLVE_EDATA_ZPL: # (Z)
data = str(solveData[1]) # macro name
except IndexError:
print("Error [zSetSolve]: Check number of solve parameters!")
return -1
#synthesize the command to pass to zemax
if data:
cmd = ("SetSolve,{:d},{:d},{:d},{}"
.format(surfNum, code, solveData[0], data))
else:
cmd = ("SetSolve,{:d},{:d},{:d}"
.format(surfNum, code, solveData[0]))
reply = self._sendDDEcommand(cmd)
solveData = _process_get_set_Solve(reply)
return solveData
def zSetSurfaceData(self, surfNum, code, value, arg2=None):
"""Sets surface data on a sequential lens surface
Parameters
----------
surfNum : integer
the surface number
code : integer
number (Refer to the table surf_data_codes_ in the docstring
of ``zGetSurfaceData()``). You may also use the surface data
mnemonic codes with signature ln.SDAT_XXX, e.g. ln.SDAT_TYPE,
ln.SDAT_CURV, ln.SDAT_THICK, etc
value : string or float
string if ``code`` is 0, 1, 4, 7 or 9, else float
arg2 : optional
required for item codes above 70.
Returns
-------
surface_data : string or numeric
the returned data depends on the ``code``. Refer to the table
surf_data_codes_ (in docstring of ``zGetSurfaceData()``) for
details
See Also
--------
zGetSurfaceData()
"""
cmd = "SetSurfaceData,{:d},{:d}".format(surfNum,code)
if code in (0,1,4,7,9):
if isinstance(value,str):
cmd = cmd+','+value
else:
raise ValueError('Invalid input, expecting string type code')
else:
if not isinstance(value,str):
cmd = cmd+','+str(value)
else:
raise ValueError('Invalid input, expecting additional argument')
if code in (71, 72, 73, 74, 75, 76):
if arg2 != None:
cmd = cmd+","+str(arg2)
else:
raise ValueError('Invalid input, expecting argument')
reply = self._sendDDEcommand(cmd)
if code in (0,1,4,7,9):
surfaceDatum = reply.rstrip()
else:
surfaceDatum = float(reply)
return surfaceDatum
def zSetSurfaceParameter(self, surfNum, param, value):
"""Set surface parameter data.
Parameters
----------
surfNum : integer
surface number of the surface
param : integer
parameter (Par in LDE) number being set
value : float
value to set for the ``param``
Returns
-------
paramData : float
the parameter value
See Also
--------
zSetSurfaceData(), zGetSurfaceParameter()
"""
cmd = ("SetSurfaceParameter,{:d},{:d},{:1.20g}"
.format(surfNum,param,value))
reply = self._sendDDEcommand(cmd)
return float(reply)
def zSetSystem(self, unitCode=0, stopSurf=1, rayAimingType=0, useEnvData=0,
temp=20.0, pressure=1, globalRefSurf=1):
"""Sets a number of general systems property (General Lens Data)
Parameters
----------
unitCode : integer, optional
lens units code (0, 1, 2, or 3 for mm, cm, in, or M)
stopSurf : integer, optional
the stop surface number
rayAimingType : integer, optional
ray aiming type (0, 1, or 2 for off, paraxial or real)
useEnvData : integer, optional
use environment data flag (0 or 1 for no or yes) [ignored]
temp : float, optional
the current temperature
pressure : float, optional
the current pressure
globalRefSurf : integer, optional
the global coordinate reference surface number
Returns
-------
numSurfs : integer
number of surfaces
unitCode : integer
lens units code (0, 1, 2, or 3 for mm, cm, in, or M)
stopSurf : integer
the stop surface number
nonAxialFlag : integer
flag to indicate if system is non-axial symmetric (0 for axial,
1 if not axial);
rayAimingType : integer
ray aiming type (0, 1, or 2 for off, paraxial or real)
adjustIndex : integer
adjust index data to environment flag (0 if false, 1 if true)
temp : float
the current temperature
pressure : float
the current pressure
globalRefSurf : integer
the global coordinate reference surface number
See Also
--------
zSetSystemAper():
for setting the system aperture such as aperture type, aperture
value, etc.
zGetSystem(), zGetSystemAper(), zGetAperture(), zSetAperture()
"""
cmd = ("SetSystem,{:d},{:d},{:d},{:d},{:1.20g},{:1.20g},{:d}"
.format(unitCode,stopSurf,rayAimingType,useEnvData,temp,pressure,
globalRefSurf))
reply = self._sendDDEcommand(cmd)
rs = reply.split(',')
systemData = tuple([float(elem) if (i==6) else int(float(elem))
for i,elem in enumerate(rs)])
return systemData
def zSetSystemAper(self, aType, stopSurf, aperVal):
"""Sets the lens system aperture and corresponding data.
Parameters
----------
aType : integer indicating the system aperture
0 = entrance pupil diameter
1 = image space F/#
2 = object space NA
3 = float by stop
4 = paraxial working F/#
5 = object cone angle
stopSurf : stop surface
value : if aperture type == float by stop
value is stop surface semi-diameter
else
value is the sytem aperture
Returns
-------
aType : integer
indicating the system aperture as follows:
| 0 = entrance pupil diameter (EPD)
| 1 = image space F/# (IF/#)
| 2 = object space NA (ONA)
| 3 = float by stop (FBS)
| 4 = paraxial working F/# (PWF/#)
| 5 = object cone angle (OCA)
stopSurf : integer
stop surface
value : float
if aperture type is "float by stop" value is stop surface
semi-diameter else value is the sytem aperture
Notes
-----
The returned tuple is the same as the returned tuple of
``zGetSystemAper()``
See Also
--------
zGetSystem(), zGetSystemAper()
"""
cmd = ("SetSystemAper,{:d},{:d},{:1.20g}".format(aType, stopSurf, aperVal))
reply = self._sendDDEcommand(cmd)
rs = reply.split(',')
systemAperData = tuple([float(elem) if i==2 else int(float(elem))
for i, elem in enumerate(rs)])
return systemAperData
def zSetSystemProperty(self, code, value1, value2=0):
"""Sets system properties
Parameters
----------
code : integer
value that defines the specific system property to be set
(see the table system_property_codes_ in docstring of
``zGetSystemProperty()``)
value1 : integer or float or string
the nature and type of ``value1`` depends on the ``code``
value2 : integer or float, oprional
the nature and type of ``value2`` depends on the ``code``.
Ignored if not used
Returns
-------
sysPropData : string or numeric
system property data (refer to the table system_property_codes_
in docstring of ``zGetSystemProperty()``)
See Also
--------
zGetSystemProperty()
"""
cmd = ("SetSystemProperty,{c:d},{v1},{v2}".format(c=code, v1=value1, v2=value2))
reply = self._sendDDEcommand(cmd)
sysPropData = _process_get_set_SystemProperty(code,reply)
return sysPropData
def zSetTol(self, operNum, col, value):
"""Sets the tolerance operand data.
Parameters
----------
operNum : integer
tolerance operand number (row number in the tolerance editor,
when greater than 0)
col : integer
* 1 for tolerance Type;
* 2-4 for int1 - int3;
* 5 for min;
* 6 for max;
value : string or float
4-character string (tolerancing operand code) if ``col==1``,
else float value to set
Returns
-------
toleranceData : number or tuple or errorCode
the ``toleranceData`` is a number or a 6-tuple, depending
upon ``operNum`` as follows:
* if ``operNum = 0``, then ``toleranceData`` is a number
indicating the number of tolerance operands defined.
* if ``operNum > 0``, then ``toleranceData`` is a tuple
with elements ``(tolType, int1, int2, min, max, int3)``
* Returns -1 if an error occurs.
See Also
--------
zSetTolRow(), zGetTol(),
"""
if col == 1: # value is string code for the operand
if zo.isZOperand(str(value),2):
cmd = "SetTol,{:d},{:d},{}".format(operNum,col,value)
else:
return -1
else:
cmd = "SetTol,{:d},{:d},{:1.20g}".format(operNum,col,value)
reply = self._sendDDEcommand(cmd)
if operNum == 0: # returns just the number of operands
return int(float(reply.rstrip()))
else:
return _process_get_set_Tol(operNum,reply)
# FIX !!! currently, I am not able to set more than 1 row in the tolerance
# editor, through this command. I don't find anything like zInsertTol ...
# A similar function exist for Multi-Configuration editor (zInsertMCO) and
# for Multi-function editor (zInsertMFO). May need to contact Zemax Support.
def zSetUDOItem(self, bufferCode, dataNum, data):
"""This function is used to pass just one datum computed by the
client program to the Zemax optimizer.
Parameters
----------
bufferCode : integer
the integer value provided by Zemax to the client that
uniquely identifies the correct lens.
dataNum : integer
?
data : float
data item number being passed
Returns
-------
?
Notes
-----
1. The only time this item name should be used is when implementing
a User Defined Operand, or UDO. UDO's are described in
"Optimizing with externally compiled programs" in the Zemax
manual.
2. After the last data item has been sent, the buffer must be
closed using the ``zCloseUDOData()`` function before the
optimization may proceed. A typical implementation may consist
of the following series of function calls:
::
ln.zSetUDOItem(bufferCode, 0, value0)
ln.zSetUDOItem(bufferCode, 1, value1)
ln.zSetUDOItem(bufferCode, 2, value2)
ln.zCloseUDOData(bufferCode)
See Also
--------
zGetUDOSystem(), zCloseUDOData().
"""
cmd = "SetUDOItem,{:d},{:d},{:1.20g}".format(bufferCode, dataNum, data)
reply = self._sendDDEcommand(cmd)
return _regressLiteralType(reply.rstrip())
# FIX !!! At this time, I am not sure what is the expected return.
def zSetVig(self):
"""Request Zemax to set the vignetting factors automatically.
Parameters
----------
None
Returns
-------
retVal : integer
0 = success, -1 = fail
Notes
-----
Calling this function is equivalent to clicking the "Set Vig"
button from the "Field Data" window. For more information on
how Zemax calculates the vignetting factors automatically, please
refer to "Vignetting factors" under the "Systems Menu" chapter in
the Zemax Manual.
"""
retVal = -1
reply = self._sendDDEcommand("SetVig")
if 'OK' in reply.split():
retVal = 0
return retVal
def zSetWave(self, n, arg1, arg2):
"""Sets wavelength data in the Zemax DDE server
There are 2 ways to use this function:
``zSetWave(0, primary, number) -> waveData``
OR
``zSetWave(n, wavelength, weight) -> waveData``
Parameters
----------
[Case: ``n=0``]:
n : 0
the function sets general wavelength data
arg1 : integer
primary wavelength number to set
arg2 : integer
total number of wavelengths to set
[Case: ``0 < n <= number-of-wavelengths``]:
n : integer (> 0)
wavelength number to set
arg1 : float
wavelength in micrometers
arg2 : float
weight
Returns
-------
The function returns a tuple. The elements in the tuple has
different meaning depending on the value of ``n``.
[Case: ``n=0``]:
primary : integer
number indicating the primary wavelength
number : integer
number of wavelengths currently defined
[Case: ``0 < n <= number-of-wavelengths``]:
wavelength : float
value of the specific wavelength
weight : float
weight of the specific wavelength
Notes
-----
The returned tuple is exactly same in structure and contents to
that returned by ``zGetWave()``.
See Also
--------
zGetWave(), zSetPrimaryWave(), zSetWaveTuple(), zGetWaveTuple()
"""
if n:
cmd = "SetWave,{:d},{:1.20g},{:1.20g}".format(n,arg1,arg2)
else:
cmd = "SetWave,{:d},{:d},{:d}".format(0,arg1,arg2)
reply = self._sendDDEcommand(cmd)
rs = reply.split(',')
if n:
waveData = tuple([float(ele) for ele in rs])
else:
waveData = tuple([int(ele) for ele in rs])
return waveData
def zWindowMaximize(self, windowNum=0):
"""Maximize the main Zemax window or any analysis window Zemax
currently displayed.
Parameters
----------
windowNum : integer
the window number. use 0 for the main Zemax window
Returns
-------
retVal : integer
0 if success, -1 if failed.
"""
retVal = -1
reply = self._sendDDEcommand("WindowMaximize,{:d}".format(windowNum))
if 'OK' in reply.split():
retVal = 0
return retVal
def zWindowMinimize(self, windowNum=0):
"""Minimize the main Zemax window or any analysis window Zemax
currently
Parameters
-----------
windowNum : integer
the window number. use 0 for the main Zemax window
Returns
-------
retVal : integer
0 if success, -1 if failed.
"""
retVal = -1
reply = self._sendDDEcommand("WindowMinimize,{:d}".format(windowNum))
if 'OK' in reply.split():
retVal = 0
return retVal
def zWindowRestore(self, windowNum=0):
"""Restore the main Zemax window or any analysis window to it's
previous size and position.
Parameters
----------
windowNum : integer
the window number. use 0 for the main Zemax window
Returns
-------
retVal : integer
0 if success, -1 if failed.
"""
retVal = -1
reply = self._sendDDEcommand("WindowRestore,{:d}".format(windowNum))
if 'OK' in reply.split():
retVal = 0
return retVal
#%% ADDITIONAL FUNCTIONS
# -------------------------------------------------------
# Editor function for both getting and setting parameters
# -------------------------------------------------------
def zGetOperandRow(self, row):
"""Returns a row of the Multi Function Editor
Parameters
----------
row : integer
the operand row number
Returns
-------
opertype : string
operand type, column 1 in MFE
int1 : integer or string
column 2 in MFE. The column 2 is a string, usually when opertype
is 'BLNK', and there is some comments in the second column
int2 : integer
column 3 in MFE
data1 : float
column 4 in MFE
data2 : float
column 5 in MFE
data3 : float
column 6 in MFE
data4 : float
column 7 in MFE
data5 : float
column 12 in MFE
data6 : float
column 13 in MFE
tgt : float
target
wgt : float
weight
value : float
value
percentage : float
percentage contribution
See Also
--------
zGetOperand(), zSetOperandRow()
"""
operData = []
for i in range(1,8):
operData.append(self.zGetOperand(row=row, column=i))
for i in range(12, 14):
operData.append(self.zGetOperand(row=row, column=i))
for i in range(8, 12):
operData.append(self.zGetOperand(row=row, column=i))
rowdat = _co.namedtuple('OperandData', ['opertype', 'int1', 'int2', 'data1',
'data2', 'data3', 'data4', 'data5', 'data6', 'tgt',
'wgt', 'value', 'percentage'])
return rowdat._make(operData)
def zSetOperandRow(self, row, opertype, int1=None, int2=None, data1=None, data2=None,
data3=None, data4=None, data5=None, data6=None, tgt=None, wgt=None):
"""Sets a row in the Merit Function Editor
Parameters
----------
row : integer
operand row number in the MFE
opertype : string
operand type
int1 : integer, optional
column 2 in MFE
int2 : integer, optional
column 3 in MFE
data1 : float, optional
column 4 in MFE
data2 : float, optional
column 5 in MFE
data3 : float, optional
column 6 in MFE
data4 : float, optional
column 7 in MFE
data5 : float, optional
column 12 in MFE
data6 : float, optional
column 13 in MFE
tgt : float, optional
target
wgt : float, optional
weight
Returns
-------
the contents of the row. (same as that returned by
``zGetOperandRow()``)
Notes
-----
1. Use ``zInsertMFO()`` to insert a new row in the MFE at a
specified row number.
2. To update the merit function after calling ``zSetOperand()``,
call ``zOptimize()`` with the number of cycles set to -1.
See Also
--------
zInsertMFO(), zSetOperand(), zOperandValue(), zGetOperand()
"""
values1_9 = (opertype, int1, int2, data1, data2, data3, data4, tgt, wgt)
values12_13 = (data5, data6)
for i, val in enumerate(values1_9):
if val is not None:
self.zSetOperand(row=row, column=i+1, value=val)
for i, val in enumerate(values12_13):
if val is not None:
self.zSetOperand(row=row, column=i+12, value=val)
return self.zGetOperandRow(row)
# -------------------
# System functions
# -------------------
def zGetAngularMagnification(self, wave=None):
"""Get angular magnification of paraxial system.
The angular magnification is defined as the ratio of the image space
paraxial chief ray angle to the object space paraxial chief ray angle
Parameters
----------
wave : integer, optional
the wavelength defined by `wave`. If `None`, the primary wave
number is used.
Returns
-------
amag : real
angular magnification, at least one non-zero field point is
defined in the Field Data Editor.
Returns error code -999 if only on-axis field is defined.
See Notes.
Notes
-----
Zemax returns zero (0) for angular magnification if the only field
defined in the field editor is the on-axis field.
See Also
--------
zGetPupilMagnification()
"""
if wave==None:
wave = self.zGetPrimaryWave()
if self.zAnyOffAxisField():
return self.zOperandValue('AMAG', wave)
else:
return -999
def zGetMagnification(self):
"""Returns the real magnification evaluated as the ratio of the image
height to the object height
Parameters
----------
None
Returns
-------
mag : real
real magnification. see Notes.
Notes
-----
1. The function returns the real magnification of the system. It is
affected by distortions, and the actual location of the image
surface. For paraxial magnification use `zGetFirst().paraMag`
"""
objHt = self.zGetSemiDiameter(0)
if objHt:
rtd = self.zGetTrace(waveNum=1, mode=0, surf=-1, hx=0, hy=1, px=0, py=0)
return rtd.y/objHt
else:
return 0.0
def zGetNumField(self):
"""Returns the total number of fields defined
Equivalent to ZPL macro ``NFLD``
Parameters
----------
None
Returns
-------
nfld : integer
number of fields defined
"""
return self.zGetSystemProperty(101)
def zAnyOffAxisField(self):
"""Returns `True` if at least one off-axis X-Field or Y-Field is
defined in the Field Data Editor.
Fields with zero weights are also considered to be "defined".
Parameters
----------
None
Returns
-------
retVal : bool
`True` if any off-axis field is found, else `False`
"""
fdata = self.zGetFieldTuple()
fx = [f[0] for f in fdata]
fy = [f[1] for f in fdata]
return any(fy) or any(fx)
def zGetFieldTuple(self):
"""Get all field data in a single n-tuple.
Parameters
----------
None
Returns
-------
fieldDataTuple: n-tuple (``0 < n <= 12``)
the tuple elements represent field loactions with each element
containing all 8 field parameters.
Examples
--------
This example shows the namedtuple returned by ``zGetFieldTuple()``
>>> ln.zGetFieldTuple()
(fieldData(xf=0.0, yf=0.0, wgt=1.0, vdx=0.0, vdy=0.0, vcx=0.0, vcy=0.0, van=0.0),
fieldData(xf=0.0, yf=14.0, wgt=1.0, vdx=0.0, vdy=0.0, vcx=0.0, vcy=0.0, van=0.0),
fieldData(xf=0.0, yf=20.0, wgt=1.0, vdx=0.0, vdy=0.0, vcx=0.0, vcy=0.0, van=0.0))
See Also
--------
zGetField(), zSetField(), zSetFieldTuple()
"""
fieldCount = self.zGetField(0)[1]
fd = _co.namedtuple('fieldData', ['xf', 'yf', 'wgt',
'vdx', 'vdy',
'vcx', 'vcy', 'van'])
fieldData = []
for i in range(fieldCount):
reply = self._sendDDEcommand('GetField,' + str(i+1))
rs = reply.split(',')
data = fd._make([float(elem) for elem in rs])
fieldData.append(data)
return tuple(fieldData)
def zSetFieldTuple(self, ftype, norm, fields):
"""Sets all field points from a 2D field tuple
Parameters
----------
ftype : integer
the field type (0 = angle, 1 = object height, 2 = paraxial
image height, and 3 = real image height)
norm : integer 0 or 1
the field normalization (0=radial, 1=rectangular)
fields : n-tuple
the input field data tuple is an N-D tuple (0 < N <= 12) with
every dimension representing a single field location. It can
be constructed as shown in the example (see below)
Returns
-------
fields : n-tuple
the output field data tuple is also a N-D tuple similar to the
``fields``, except that for each field location all
8 field parameters are returned.
Examples
--------
The following example sets 3 field points defined as angles with
field normalization = 1:
* xf=0.0, yf=0.0, wgt=1.0, vdx=vdy=vcx=vcy=van=0.0
* xf=0.0, yf=5.0, wgt=1.0
* xf=0.0, yf=10.0
>>> ln.zSetFieldTuple(0, 0,
(0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0),
(0.0, 5.0, 1.0),
(0.0, 7.0))
See Also
--------
zSetField(), zGetField(), zGetFieldTuple()
"""
fieldCount = len(fields)
if not 0 < fieldCount <= 12:
raise ValueError('Invalid number of fields')
cmd = ("SetField,{:d},{:d},{:d},{:d}"
.format(0, ftype, fieldCount, norm))
self._sendDDEcommand(cmd)
retFields = []
for i in range(fieldCount):
fieldData = self.zSetField(i+1,*fields[i])
retFields.append(fieldData)
return tuple(retFields)
def zGetNumSurf(self):
"""Return the total number of surfaces defined
Equivalent to ZPL macro ``NSUR``
Parameters
----------
None
Returns
-------
nsur : integer
number of surfaces defined
Notes
-----
The count doesn't include the object (OBJ) surface.
"""
return self.zGetSystem()[0]
def zGetNumWave(self):
"""Return the total number of wavelengths defined
Equivalent to ZPL macro ``NWAV``
Parameters
----------
None
Returns
-------
nwav : integer
number of wavelengths defined
"""
return self.zGetSystemProperty(201)
def zGetPrimaryWave(self):
"""Return the primary wavelength number
Equivalent to ZPL macro ``PWAV``
Parameters
----------
None
Returns
-------
primary_wave_number : integer
primary wavelength number
Notes
-----
To get the primary wavelength (in microns) do the following:
>>> ln.zGetWave(ln.zGetPrimaryWave()).wavelength
See Also
--------
zSetPrimaryWave()
"""
return self.zGetSystemProperty(200)
def zSetPrimaryWave(self, waveNum):
"""Sets the primary wavelength for the lens in Zemax DDE server.
Parameters
----------
waveNum : integer
the wavelength number to set as primary
Returns
-------
primary : integer
number indicating the primary wavelength
number : integer
number of wavelengths currently defined
See Also
--------
zGetPrimaryWave(), zSetWave(), zSetWave(), zSetWaveTuple(),
zGetWaveTuple()
"""
waveData = self.zGetWave(0)
cmd = "SetWave,{:d},{:d},{:d}".format(0, waveNum, waveData[1])
reply = self._sendDDEcommand(cmd)
rs = reply.split(',')
waveData = tuple([int(elem) for elem in rs])
return waveData
def zGetWaveTuple(self):
"""Gets data on all defined wavelengths
Parameters
----------
None
Returns
-------
waveDataTuple : 2D tuple
the first dimension (first subtuple) contains the wavelengths
and the second dimension containing the weights as follows:
((wave1, wave2, wave3 ,..., waveN),(wgt1, wgt2, wgt3,..., wgtN))
Notes
-----
This function is similar to "zGetWaveDataMatrix()" in MZDDE.
Examples
--------
This example shows the named tuple returned by the function
>>> ln.zGetWaveTuple()
waveDataTuple(wavelengths=(0.48, 0.55, 0.65), weights=(0.800000012, 1.0, 0.800000012))
See Also
--------
zSetWaveTuple(), zGetWave(), zSetWave()
"""
waveCount = self.zGetWave(0)[1]
waveData = [[],[]]
wdt = _co.namedtuple('waveDataTuple', ['wavelengths', 'weights'])
for i in range(waveCount):
cmd = "GetWave,{wC:d}".format(wC=i+1)
reply = self._sendDDEcommand(cmd)
rs = reply.split(',')
waveData[0].append(float(rs[0])) # store the wavelength
waveData[1].append(float(rs[1])) # store the weight
waveDataTuple = wdt(tuple(waveData[0]),tuple(waveData[1]))
return waveDataTuple
def zSetWaveTuple(self, waves):
"""Sets wavelength and weight data from a matrix.
Parameters
----------
waves : 2-D tuple
the input wave data tuple is a 2D tuple with the first
dimension (first sub-tuple) containing the wavelengths
and the second dimension containing the weights as:
``((wave1,wave2,wave3,...,waveN),(wgt1,wgt2,wgt3,...,wgtN))``
The first wavelength (wave1) is assigned to be the primary
wavelength. To change the primary wavelength use
``zSetWavePrimary()``
Returns
-------
retWaves : 2-D tuple
the output wave data tuple is also a 2D tuple similar to the
``waves``.
See Also
--------
zGetWaveTuple(), zSetWave(), zSetWavePrimary()
"""
waveCount = len(waves[0])
retWaves = [[],[]]
self.zSetWave(0,1,waveCount) # Set no. of wavelen & the wavelen to 1
for i in range(waveCount):
cmd = ("SetWave,{:d},{:1.20g},{:1.20g}".format(i+1,waves[0][i],waves[1][i]))
reply = self._sendDDEcommand(cmd)
rs = reply.split(',')
retWaves[0].append(float(rs[0])) # store the wavelength
retWaves[1].append(float(rs[1])) # store the weight
return (tuple(retWaves[0]),tuple(retWaves[1]))
def zSetNSCPositionTuple(self, surfNum, objNum, x=0.0, y=0.0, z=0.0,
tiltX=0.0, tiltY=0.0, tiltZ=0.0, material=''):
"""Sets position and tilt data for NSC objects
Parameters
----------
surfNum : integer
the surface number. Use 1 if Non-Sequential program mode
objNum : integer
the object number
x, y, z, tiltX, tiltY, tiltZ : floats, optional
x, y, z position and tilts about X, Y, and Z axis respectively
material : string, optional
valid string code to specify the material
Returns
-------
nscPosData : tuple
a 7-tuple containing x, y, z, tilt-x, tilt-y, tilt-z, material
See Also
--------
zSetNSCPositionTuple(), zGetNSCPosition()
"""
for code, item in enumerate((x, y, z, tiltX, tiltY, tiltZ, material), 1):
self.zSetNSCPosition(surfNum, objNum, code, item)
return self.zGetNSCPosition(surfNum, objNum)
def zSetTolRow(self, operNum, tolType, int1, int2, int3, minT, maxT):
"""Helper function to set all the elements of a row (given
by ``operNum``) in the tolerance editor.
Parameters
----------
operNum : integer (> 0)
the tolerance operand number (row number in the tolerance
editor)
tolType : string
4-character string (tolerancing operand code)
int1 : integer
'int1' parameter
int2 : integer
'int2' parameter
int3 : integer
'int3' parameter
minT : float
minimum value
maxT : float
maximum value
Returns
-------
tolData : tolerance data or errorCode
the data for the row indicated by the ``operNum``
if successful, else -1
"""
tolData = self.zSetTol(operNum, 1, tolType)
if tolData != -1:
self.zSetTol(operNum, 2, int1)
self.zSetTol(operNum, 3, int2)
self.zSetTol(operNum, 4, int3)
self.zSetTol(operNum, 5, minT)
self.zSetTol(operNum, 6, maxT)
return self.zGetTol(operNum)
else:
return -1
def _zGetMode(self):
"""Returns the mode (Sequential, Non-sequential or Mixed) of the current
lens in the DDE server
Parameters
----------
None
Returns
-------
zmxModeInformation : 2-tuple (mode, nscSurfNums)
mode (0 = Sequential; 1 = Non-sequential; 2 = Mixed mode)
nscSurfNums = (tuple of integers) the surfaces (in mixed mode)
that are non-sequential. In Non-sequential mode and in purely
sequential mode, this tuple is empty (of length 0).
Notes
-----
This only works when a zmx file is loaded into the server. Currently this
function is meant to be used for internal purpose only.
For the purpose of this function, "Sequential" implies that there are no
non-sequential surfaces in the LDE.
"""
nscSurfNums = []
nscData = self.zGetNSCData(1, 0)
if nscData > 0: # Non-sequential mode
mode = 1
else: # Not Non-sequential mode
numSurf = self.zGetSystem()[0]
for i in range(1,numSurf+1):
surfType = self.zGetSurfaceData(i, 0)
if surfType == 'NONSEQCO':
nscSurfNums.append(i)
if len(nscSurfNums) > 0:
mode = 2 # mixed mode
else:
mode = 0 # sequential
return (mode,tuple(nscSurfNums))
# -------------------
# Analysis functions
# -------------------
# Spot diagram analysis functions
def zSpiralSpot(self, hx, hy, waveNum, spirals, rays, mode=0):
"""Returns positions and intensity of rays traced in a spiral
over the entrance pupil to the image surface.
The final destination of the rays is the image surface.
Parameters
----------
hx : float
normalized field height along x axis
hy : float
normalized field height along y axis
waveNum : integer
wavelength number as in the wavelength data editor
spirals : integer
number of spirals
rays : integer
total number of rays to trace
mode : integer (0 or 1)
0 = real; 1 = paraxial ray trace
Returns
-------
rayInfo : 4-tuple
(x, y, z, intensity)
Notes
-----
This function imitates its namesake from MZDDE toolbox.
Unlike the ``spiralSpot()`` of MZDDE, there is no need to call
``zLoadLens()`` before calling ``zSpiralSpot()``.
"""
# Calculate the ray pattern on the pupil plane
pi, cos, sin = _math.pi, _math.cos, _math.sin
lastAng = spirals*2*pi
delta_t = lastAng/(rays - 1)
theta = lambda dt, rays: (i*dt for i in range(rays))
r = (i/(rays-1) for i in range(rays))
pXY = ((r*cos(t), r*sin(t)) for r, t in _izip(r, theta(delta_t, rays)))
x = [] # x-coordinate of the image surface
y = [] # y-coordinate of the image surface
z = [] # z-coordinate of the image surface
intensity = [] # the relative transmitted intensity of the ray
for px, py in pXY:
rayTraceData = self.zGetTrace(waveNum, mode, -1, hx, hy, px, py)
if rayTraceData[0] == 0:
x.append(rayTraceData[2])
y.append(rayTraceData[3])
z.append(rayTraceData[4])
intensity.append(rayTraceData[11])
else:
print("Raytrace Error")
exit()
# !!! FIX raise an error here
return (x, y, z, intensity)
# POP analysis functions
def zGetPOP(self, settingsFile=None, displayData=False, txtFile=None,
keepFile=False, timeout=None):
"""Returns Physical Optics Propagation (POP) data
Parameters
----------
settingsFile : string, optional
* if passed, the POP will be called with this configuration
file;
* if no ``settingsFile`` is passed, and config file ending
with the same name as the lens file post-fixed with
"_pyzdde_POP.CFG" is present, the settings from this file
will be used;
* if no ``settingsFile`` and no file name post-fixed with
"_pyzdde_POP.CFG" is found, but a config file with the same
name as the lens file is present, the settings from that
file will be used;
* if no settings file is found, then a default settings will
be used
displayData : bool
if ``true`` the function returns the 2D display data; default
is ``false``
txtFile : string, optional
if passed, the POP data file will be named such. Pass a
specific ``txtFile`` if you want to dump the file into a
separate directory.
keepFile : bool, optional
if ``False`` (default), the POP text file will be deleted
after use.
If ``True``, the file will persist. If ``keepFile`` is ``True``
but a ``txtFile`` is not passed, the POP text file will be
saved in the same directory as the lens (provided the required
folder access permissions are available)
timeout : integer, optional
timeout in seconds
Returns
-------
popData : tuple
popData is a 1-tuple containing just ``popInfo`` (see below)
if ``displayData`` is ``False`` (default).
If ``displayData`` is ``True``, ``popData`` is a 2-tuple
containing ``popInfo`` (a tuple) and ``powerGrid`` (a 2D list):
popInfo : named tuple
surf : integer
surface number at which the POP is analysis was done
peakIrr/ cenPhase : float
the peak irradiance is the maximum power per unit area
at any point in the beam, measured in source units per
lens unit squared. It returns center phase if the data
type is "Phase" in POP settings
totPow : float
the total power, or the integral of the irradiance
over the entire beam if data type is "Irradiance" in
POP settings. This field is blank for "Phase" data
fibEffSys : float
the efficiency of power transfer through the system
fibEffRec : float
the efficiency of the receiving fiber
coupling : float
the total coupling efficiency, the product of the
system and receiver efficiencies
pilotSize : float
the size of the gaussian beam at the surface
pilotWaist : float
the waist of the gaussian beam
pos : float
relative z position of the gaussian beam
rayleigh : float
the rayleigh range of the gaussian beam
gridX : integer
the X-sampling
gridY : interger
the Y-sampling
widthX : float
width along X in lens units
widthY : float
width along Y in lens units
powerGrid : 2D list/ None
a two-dimensional list of the powers in the analysis grid
if ``displayData`` is ``true``
Notes
-----
The function returns ``None`` for any field which was not
found in POP text file. This is most common in the case of
``fiberEfficiency_system`` and ``fiberEfficiency_receiver``
as they need to be set explicitly in the POP settings
See Also
--------
zSetPOPSettings(), zModifyPOPSettings()
"""
settings = _txtAndSettingsToUse(self, txtFile, settingsFile, 'Pop')
textFileName, cfgFile, getTextFlag = settings
ret = self.zGetTextFile(textFileName, 'Pop', cfgFile, getTextFlag,
timeout)
assert ret == 0
# get line list
line_list = _readLinesFromFile(_openFile(textFileName))
# Get data type ... phase or Irradiance?
find_irr_data = _getFirstLineOfInterest(line_list, 'POP Irradiance Data',
patAtStart=False)
data_is_irr = False if find_irr_data is None else True
# Get the Surface number and Grid size
grid_line_num = _getFirstLineOfInterest(line_list, 'Grid size')
surf_line = line_list[grid_line_num - 1]
surf = int(_re.findall(r'\d{1,4}', surf_line)[0]) # assume: first int num in the line
# is surf number. surf comment can have int or float nums
grid_line = line_list[grid_line_num]
grid_x, grid_y = [int(i) for i in _re.findall(r'\d{2,5}', grid_line)]
# Point spacing
pts_line = line_list[_getFirstLineOfInterest(line_list, 'Point spacing')]
pat = r'-?\d\.\d{4,6}[Ee][-\+]\d{2,3}'
pts_x, pts_y = [float(i) for i in _re.findall(pat, pts_line)]
width_x = pts_x*grid_x
width_y = pts_y*grid_y
if data_is_irr:
# Peak Irradiance and Total Power
pat_i = r'-?\d\.\d{4,6}[Ee][-\+]\d{2,3}' # pattern for P. Irr, T. Pow,
peakIrr, totPow = None, None
pi_tp_line = _getFirstLineOfInterest(line_list, 'Peak Irradiance')
if pi_tp_line: # Transfer magnitude doesn't have Peak Irradiance info
pi_tp_line = line_list[pi_tp_line]
pi_info, tp_info = pi_tp_line.split(',')
pi = _re.search(pat_i, pi_info)
tp = _re.search(pat_i, tp_info)
if pi:
peakIrr = float(pi.group())
if tp:
totPow = float(tp.group())
else:
# Center Phase
pat_p = r'-?\d+\.\d{4,6}' # pattern for Center Phase Info
centerPhase = None
#cp_line = line_list[_getFirstLineOfInterest(line_list, 'Center Phase')]
cp_line = _getFirstLineOfInterest(line_list, 'Center Phase')
if cp_line: # Transfer magnitude / Phase doesn't have Center Phase info
cp_line = line_list[cp_line]
cp = _re.search(pat_p, cp_line)
if cp:
centerPhase = float(cp.group())
# Pilot_size, Pilot_Waist, Pos, Rayleigh [... available for
# both Phase and Irr data]
pat_fe = r'\d\.\d{6}' # pattern for fiber efficiency
pat_pi = r'-?\d\.\d{4,6}[Ee][-\+]\d{2,3}' # pattern for Pilot size/waist
pilotSize, pilotWaist, pos, rayleigh = None, None, None, None
pilot_line = line_list[_getFirstLineOfInterest(line_list, 'Pilot')]
p_size_info, p_waist_info, p_pos_info, p_rayleigh_info = pilot_line.split(',')
p_size = _re.search(pat_pi, p_size_info)
p_waist = _re.search(pat_pi, p_waist_info)
p_pos = _re.search(pat_pi, p_pos_info)
p_rayleigh = _re.search(pat_pi, p_rayleigh_info)
if p_size:
pilotSize = float(p_size.group())
if p_waist:
pilotWaist = float(p_waist.group())
if p_pos:
pos = float(p_pos.group())
if p_rayleigh:
rayleigh = float(p_rayleigh.group())
# Fiber Efficiency, Coupling [... if enabled in settings]
fibEffSys, fibEffRec, coupling = None, None, None
effi_coup_line_num = _getFirstLineOfInterest(line_list, 'Fiber Efficiency')
if effi_coup_line_num:
efficiency_coupling_line = line_list[effi_coup_line_num]
efs_info, fer_info, cou_info = efficiency_coupling_line.split(',')
fes = _re.search(pat_fe, efs_info)
fer = _re.search(pat_fe, fer_info)
cou = _re.search(pat_fe, cou_info)
if fes:
fibEffSys = float(fes.group())
if fer:
fibEffRec = float(fer.group())
if cou:
coupling = float(cou.group())
if displayData:
# Get the 2D data
pat = (r'(-?\d\.\d{4,6}[Ee][-\+]\d{2,3}\s*)' + r'{{{num}}}'
.format(num=grid_x))
start_line = _getFirstLineOfInterest(line_list, pat)
powerGrid = _get2DList(line_list, start_line, grid_y)
if not keepFile:
_deleteFile(textFileName)
if data_is_irr: # Irradiance data
popi = _co.namedtuple('POPinfo', ['surf', 'peakIrr', 'totPow',
'fibEffSys', 'fibEffRec', 'coupling',
'pilotSize', 'pilotWaist', 'pos',
'rayleigh', 'gridX', 'gridY',
'widthX', 'widthY' ])
popInfo = popi(surf, peakIrr, totPow, fibEffSys, fibEffRec, coupling,
pilotSize, pilotWaist, pos, rayleigh,
grid_x, grid_y, width_x, width_y)
else: # Phase data
popi = _co.namedtuple('POPinfo', ['surf', 'cenPhase', 'blank',
'fibEffSys', 'fibEffRec', 'coupling',
'pilotSize', 'pilotWaist', 'pos',
'rayleigh', 'gridX', 'gridY',
'widthX', 'widthY' ])
popInfo = popi(surf, centerPhase, None, fibEffSys, fibEffRec, coupling,
pilotSize, pilotWaist, pos, rayleigh,
grid_x, grid_y, width_x, width_y)
if displayData:
return (popInfo, powerGrid)
else:
return popInfo
def zModifyPOPSettings(self, settingsFile, startSurf=None,
endSurf=None, field=None, wave=None, auto=None,
beamType=None, paramN=((),()), pIrr=None, tPow=None,
sampx=None, sampy=None, srcFile=None, widex=None,
widey=None, fibComp=None, fibFile=None, fibType=None,
fparamN=((),()), ignPol=None, pos=None, tiltx=None,
tilty=None):
"""Modify an existing POP settings (configuration) file
Only those parameters that are non-None or non-zero-length (in
case of tuples) will be set.
Parameters
----------
settingsFile : string
filename of the settings file including path and extension
startSurf : integer, optional
the starting surface (in General Tab)
endSurf : integer, optional
the end surface (in General Tab)
field : integer, optional
the field number (in General Tab)
wave : integer, optional
the wavelength number (in General Tab)
auto : integer, optional
simulates the pressing of the "auto" button which chooses
appropriate X and Y widths based upon the sampling and
other settings (in Beam Definition Tab)
beamType : integer (0...6), optional
0 = Gaussian Waist; 1 = Gaussian Angle; 2 = Gaussian Size +
Angle; 3 = Top Hat; 4 = File; 5 = DLL; 6 = Multimode.
(in Beam Definition Tab)
paramN : 2-tuple, optional
sets beam parameter n, for example ((1, 4),(0.1, 0.5)) sets
parameters 1 and 4 to 0.1 and 0.5 respectively. These
parameter names and values change depending upon the beam type
setting. For example, for the Gaussian Waist beam, n=1 for
Waist X, 2 for Waist Y, 3 for Decenter X, 4 for Decenter Y,
5 for Aperture X, 6 for Aperture Y, 7 for Order X, and 8 for
Order Y (in Beam Definition Tab)
pIrr : float, optional
sets the normalization by peak irradiance. It is the initial
beam peak irradiance in power per area. It is an alternative
to Total Power (tPow) [in Beam Definition Tab]
tPow : float, optional
sets the normalization by total beam power. It is the initial
beam total power. This is an alternative to Peak Irradiance
(pIrr) [in Beam Definition Tab]
sampx : integer (1...10), optional
the X direction sampling. 1 for 32; 2 for 64; 3 for 128;
4 for 256; 5 for 512; 6 for 1024; 7 for 2048; 8 for 4096;
9 for 8192; 10 for 16384; (in Beam Definition Tab)
sampy : integer (1...10), optional
the Y direction sampling. 1 for 32; 2 for 64; 3 for 128;
4 for 256; 5 for 512; 6 for 1024; 7 for 2048; 8 for 4096;
9 for 8192; 10 for 16384; (in Beam Definition Tab)
srcFile : string, optional
The file name if the starting beam is defined by a ZBF file,
DLL, or multimode file; (in Beam Definition Tab)
widex : float, optional
the initial X direction width in lens units;
(X-Width in Beam Definition Tab)
widey : float, optional
the initial Y direction width in lens units;
(Y-Width in Beam Definition Tab)
fibComp : integer (1/0), optional
use 1 to check the fiber coupling integral ON, 0 for OFF
(in Fiber Data Tab)
fibFile : string, optional
the file name if the fiber mode is defined by a ZBF or DLL
(in Fiber Data Tab)
fibType : string, optional
use the same values as ``beamType`` above, except for
multimode which is not yet supported
(in Fiber Data Tab)
fparamN : 2-tuple, optional
sets fiber parameter n, for example ((2,3),(0.5, 0.6)) sets
parameters 2 and 3 to 0.5 and 0.6 respectively. See the hint
for ``paramN`` (in Fiber Data Tab)
ignPol : integer (0/1), optional
use 1 to ignore polarization, 0 to consider polarization
(in Fiber Data Tab)
pos : integer (0/1), optional
fiber position setting. use 0 for chief ray, 1 for surface vertex
(in Fiber Data Tab)
tiltx : float, optional
tilt about X in degrees (in Fiber Data Tab)
tilty : float, optional
tilt about Y in degrees (in Fiber Data Tab)
Returns
-------
statusTuple : tuple or -1
tuple of codes returned by ``zModifySettings()`` for each
non-None parameters. The status codes are as follows:
0 = no error;
-1 = invalid file;
-2 = incorrect version number;
-3 = file access conflict
The function returns -1 if ``settingsFile`` is invalid.
See Also
--------
zSetPOPSettings(), zGetPOP()
"""
sTuple = [] # status tuple
if (_os.path.isfile(settingsFile) and
settingsFile.lower().endswith('.cfg')):
dst = settingsFile
else:
return -1
if startSurf is not None:
sTuple.append(self.zModifySettings(dst, "POP_START", startSurf))
if endSurf is not None:
sTuple.append(self.zModifySettings(dst, "POP_END", endSurf))
if field is not None:
sTuple.append(self.zModifySettings(dst, "POP_FIELD", field))
if wave is not None:
sTuple.append(self.zModifySettings(dst, "POP_WAVE", wave))
if auto is not None:
sTuple.append(self.zModifySettings(dst, "POP_AUTO", auto))
if beamType is not None:
sTuple.append(self.zModifySettings(dst, "POP_BEAMTYPE", beamType))
if paramN[0]:
tst = []
for i, j in _izip(paramN[0], paramN[1]):
tst.append(self.zModifySettings(dst, "POP_PARAM{}".format(i), j))
sTuple.append(tuple(tst))
if pIrr is not None:
sTuple.append(self.zModifySettings(dst, "POP_PEAKIRRAD", pIrr))
if tPow is not None:
sTuple.append(self.zModifySettings(dst, "POP_POWER", tPow))
if sampx is not None:
sTuple.append(self.zModifySettings(dst, "POP_SAMPX", sampx))
if sampy is not None:
sTuple.append(self.zModifySettings(dst, "POP_SAMPY", sampy))
if srcFile is not None:
sTuple.append(self.zModifySettings(dst, "POP_SOURCEFILE", srcFile))
if widex is not None:
sTuple.append(self.zModifySettings(dst, "POP_WIDEX", widex))
if widey is not None:
sTuple.append(self.zModifySettings(dst, "POP_WIDEY", widey))
if fibComp is not None:
sTuple.append(self.zModifySettings(dst, "POP_COMPUTE", fibComp))
if fibFile is not None:
sTuple.append(self.zModifySettings(dst, "POP_FIBERFILE", fibFile))
if fibType is not None:
sTuple.append(self.zModifySettings(dst, "POP_FIBERTYPE", fibType))
if fparamN[0]:
tst = []
for i, j in _izip(fparamN[0], fparamN[1]):
tst.append(self.zModifySettings(dst, "POP_FPARAM{}".format(i), j))
sTuple.append(tuple(tst))
if ignPol is not None:
sTuple.append(self.zModifySettings(dst, "POP_IGNOREPOL", ignPol))
if pos is not None:
sTuple.append(self.zModifySettings(dst, "POP_POSITION", pos))
if tiltx is not None:
sTuple.append(self.zModifySettings(dst, "POP_TILTX", tiltx))
if tilty is not None:
sTuple.append(self.zModifySettings(dst, "POP_TILTY", tilty))
return tuple(sTuple)
def zSetPOPSettings(self, data=0, settingsFile=None, startSurf=None,
endSurf=None, field=None, wave=None, auto=None,
beamType=None, paramN=((),()), pIrr=None, tPow=None,
sampx=None, sampy=None, srcFile=None, widex=None,
widey=None, fibComp=None, fibFile=None, fibType=None,
fparamN=((),()), ignPol=None, pos=None, tiltx=None,
tilty=None):
"""Create and set a new settings file starting from the "reset"
settings state of the most basic lens in Zemax.
To modify an existing POP settings file, use
``zModifyPOPSettings()``. Only those parameters that are non-None
or non-zero-length (in case of tuples) will be set.
Parameters
----------
data : integer
0 = irradiance, 1 = phase
settingsFile : string, optional
name to give to the settings file to be created. It must be
the full file name, including path and extension of settings
file.
If ``None``, then a CFG file with the name of the lens
followed by the string "_pyzdde_POP.CFG" will be created in
the same directory as the lens file and returned
startSurf : integer, optional
the starting surface (in General Tab)
endSurf : integer, optional
the end surface (in General Tab)
field : integer, optional
the field number (in General Tab)
wave : integer, optional
the wavelength number (in General Tab)
auto : integer, optional
simulates the pressing of the "auto" button which chooses
appropriate X and Y widths based upon the sampling and
other settings (in Beam Definition Tab)
beamType : integer (0...6), optional
0 = Gaussian Waist; 1 = Gaussian Angle; 2 = Gaussian Size +
Angle; 3 = Top Hat; 4 = File; 5 = DLL; 6 = Multimode.
(in Beam Definition Tab)
paramN : 2-tuple, optional
sets beam parameter n, for example ((1, 4),(0.1, 0.5)) sets
parameters 1 and 4 to 0.1 and 0.5 respectively. These
parameter names and values change depending upon the beam type
setting. For example, for the Gaussian Waist beam, n=1 for
Waist X, 2 for Waist Y, 3 for Decenter X, 4 for Decenter Y,
5 for Aperture X, 6 for Aperture Y, 7 for Order X, and 8 for
Order Y (in Beam Definition Tab)
pIrr : float, optional
sets the normalization by peak irradiance. It is the initial
beam peak irradiance in power per area. It is an alternative
to Total Power (tPow) [in Beam Definition Tab]
tPow : float, optional
sets the normalization by total beam power. It is the initial
beam total power. This is an alternative to Peak Irradiance
(pIrr) [in Beam Definition Tab]
sampx : integer (1...10), optional
the X direction sampling. 1 for 32; 2 for 64; 3 for 128;
4 for 256; 5 for 512; 6 for 1024; 7 for 2048; 8 for 4096;
9 for 8192; 10 for 16384; (in Beam Definition Tab)
sampy : integer (1...10), optional
the Y direction sampling. 1 for 32; 2 for 64; 3 for 128;
4 for 256; 5 for 512; 6 for 1024; 7 for 2048; 8 for 4096;
9 for 8192; 10 for 16384; (in Beam Definition Tab)
srcFile : string, optional
The file name if the starting beam is defined by a ZBF file,
DLL, or multimode file; (in Beam Definition Tab)
widex : float, optional
the initial X direction width in lens units;
(X-Width in Beam Definition Tab)
widey : float, optional
the initial Y direction width in lens units;
(Y-Width in Beam Definition Tab)
fibComp : integer (1/0), optional
use 1 to check the fiber coupling integral ON, 0 for OFF
(in Fiber Data Tab)
fibFile : string, optional
the file name if the fiber mode is defined by a ZBF or DLL
(in Fiber Data Tab)
fibType : string, optional
use the same values as ``beamType`` above, except for
multimode which is not yet supported
(in Fiber Data Tab)
fparamN : 2-tuple, optional
sets fiber parameter n, for example ((2,3),(0.5, 0.6)) sets
parameters 2 and 3 to 0.5 and 0.6 respectively. See the hint
for ``paramN`` (in Fiber Data Tab)
ignPol : integer (0/1), optional
use 1 to ignore polarization, 0 to consider polarization
(in Fiber Data Tab)
pos : integer (0/1), optional
fiber position setting. use 0 for chief ray, 1 for surface vertex
(in Fiber Data Tab)
tiltx : float, optional
tilt about X in degrees (in Fiber Data Tab)
tilty : float, optional
tilt about Y in degrees (in Fiber Data Tab)
Returns
-------
settingsFile : string
the full name, including path and extension, of the just
created settings file
Notes
-----
1. Further modifications of the settings file can be made using
``zModifySettings()`` or ``zModifyPOPSettings()`` functions
2. The function creates settings file ending with '_pyzdde_POP.CFG'
in order to prevent overwritting any existing settings file not
created by pyzdde for POP.
This file eventually gets deleted when ``ln.close()`` or
``pyz.closeLink()`` or ``ln.zDDEClose()`` is called.
See Also
--------
zGetPOP(), zModifyPOPSettings()
"""
# Create a settings file with "reset" settings
global _pDir
if data == 1:
clean_cfg = 'RESET_SETTINGS_POP_PHASE.CFG'
else:
clean_cfg = 'RESET_SETTINGS_POP_IRR.CFG'
src = _os.path.join(_pDir, 'ZMXFILES', clean_cfg)
if settingsFile:
dst = settingsFile
else:
filename_partial = _os.path.splitext(self.zGetFile())[0]
dst = filename_partial + '_pyzdde_POP.CFG'
self._filesCreated.add(dst)
try:
_shutil.copy(src, dst)
except IOError:
print("ERROR: Invalid settingsFile {}".format(dst))
return
else:
self.zModifyPOPSettings(dst, startSurf, endSurf, field, wave, auto,
beamType, paramN, pIrr, tPow, sampx, sampy,
srcFile, widex, widey, fibComp, fibFile,
fibType, fparamN, ignPol, pos, tiltx, tilty)
return dst
# FFT and Huygens PSF, MTF analysis functions
def zGetPSFCrossSec(self, which='fft', settingsFile=None, txtFile=None,
keepFile=False, timeout=120):
"""Returns the cross-section data of FFT or Huygens PSF analysis
Parameters
----------
which : string, optional
if 'fft' (default), then the FFT PSF cross-section data is
returned;
if 'huygens', then the Huygens PSF cross-section data is
returned;
settingsFile : string, optional
* if passed, the FFT/Huygens PSF analysis will be called with
the given configuration file (settings);
* if no ``settingsFile`` is passed, and config file ending
with the same name as the lens file post-fixed with
"_pyzdde_FFTPSFCS.CFG"/"_pyzdde_HUYGENSPSFCS.CFG" is present,
the settings from this file will be used;
* if no ``settingsFile`` and no file name post-fixed with
"_pyzdde_FFTPSFCS.CFG"/"_pyzdde_HUYGENSPSFCS.CFG" is found,
but a config file with the same name as the lens file is
present, the settings from that file will be used;
* if no settings file is found, then a default settings will
be used
txtFile : string, optional
if passed, the PSF analysis text file will be named such.
Pass a specific txtFile if you want to dump the file into
a separate directory.
keepFile : bool, optional
if ``False`` (default), the PSF text file will be deleted
after use.
If ``True``, the file will persist. If ``keepFile`` is ``True``
but a ``txtFile`` is not passed, the PSF text file will be
saved in the same directory as the lens (provided the required
folder access permissions are available)
timeout : integer, optional
timeout in seconds. Note that Huygens PSF calculations
may take few minutes to complete
Returns
-------
indices : list
row index of the data
position : list
position in microns
value : list
the value of the FFT/Huygens based PSF
Notes
-----
The function doesn't check for inconsistencies of results. In
most cases, if not all cases, the ``indices``, ``position``, and
``value`` lists should be of the same length.
See Also
--------
zModifyFFTPSFCrossSecSettings(), zSetFFTPSFCrossSecSettings(),
zModifyHuygensPSFCrossSecSettings(), zSetHuygensPSFCrossSecSettings()
"""
if which=='huygens':
anaType = 'Hcs'
else:
anaType = 'Pcs'
settings = _txtAndSettingsToUse(self, txtFile, settingsFile, anaType)
textFileName, cfgFile, getTextFlag = settings
ret = self.zGetTextFile(textFileName, anaType, cfgFile, getTextFlag,
timeout)
assert ret == 0
line_list = _readLinesFromFile(_openFile(textFileName))
# Get Image grid size
img_grid_line = line_list[_getFirstLineOfInterest(line_list,
'Image grid size')]
_, img_grid_y = [int(i) for i in _re.findall(r'\d{2,5}', img_grid_line)]
pat = (r'\d{1,5}\s*(-?\d{1,3}\.\d{4,6}\s*)' + r'{{{num}}}'.format(num=2))
start_line = _getFirstLineOfInterest(line_list, pat)
data_mat = _get2DList(line_list, start_line, img_grid_y*2 + 1)
data_matT = _transpose2Dlist(data_mat)
indices = [int(i) for i in data_matT[0]]
position = data_matT[1]
value = data_matT[2]
if not keepFile:
_deleteFile(textFileName)
return indices, position, value
def zGetPSF(self, which='fft', settingsFile=None, txtFile=None,
keepFile=False, timeout=120):
"""Returns FFT or Huygens PSF data
Parameters
----------
which : string, optional
if 'fft' (default), then the FFT PSF data is returned;
if 'huygens', then the Huygens PSF data is returned;
settingsFile : string, optional
* if passed, the FFT/Huygens PSF analysis will be called with
the given configuration file (settings);
* if no ``settingsFile`` is passed, and config file ending
with the same name as the lens-file post-fixed with
"_pyzdde_FFTPSF.CFG"/"_pyzdde_HUYGENSPSF.CFG"is present, the
settings from this file will be used;
* if no ``settingsFile`` and no file-name post-fixed with
"_pyzdde_FFTPSF.CFG"/"_pyzdde_HUYGENSPSF.CFG" is found, but
a config file with the same name as the lens file is present,
the settings from that file will be used;
* if no settings file is found, then a default settings will
be used
txtFile : string, optional
if passed, the PSF analysis text file will be named such.
Pass a specific txtFile if you want to dump the file into
a separate directory.
keepFile : bool, optional
if ``False`` (default), the PSF text file will be deleted
after use.
If ``True``, the file will persist. If ``keepFile`` is ``True``
but a ``txtFile`` is not passed, the PSF text file will be
saved in the same directory as the lens (provided the required
folder access permissions are available)
timeout : integer, optional
timeout in seconds. Note that Huygens PSF/MTF calculations with
``pupil_sample`` and/or ``image_sample`` greater than 4
usually take several minutes to complete
Returns
-------
psfInfo : named tuple
meta data about the PSF analysis data, such as data spacing
(microns), data area (microns wide), pupil and image grid
sizes, center point, and center/reference coordinate information
psfGridData : 2D list
the two-dimensional list of the PSF data
See Also
--------
zModifyFFTPSFSettings(), zSetFFTPSFSettings(),
zModifyHuygensPSFSettings(), zSetHuygensPSFSettings()
"""
if which=='huygens':
anaType = 'Hps'
else:
anaType = 'Fps'
settings = _txtAndSettingsToUse(self, txtFile, settingsFile, anaType)
textFileName, cfgFile, getTextFlag = settings
ret = self.zGetTextFile(textFileName, anaType, cfgFile, getTextFlag,
timeout)
assert ret == 0
line_list = _readLinesFromFile(_openFile(textFileName))
# Meta data
data_spacing_line = line_list[_getFirstLineOfInterest(line_list, 'Data spacing')]
data_spacing = float(_re.search(r'\d{1,3}\.\d{2,6}', data_spacing_line).group())
data_area_line = line_list[_getFirstLineOfInterest(line_list, 'Data area')]
data_area = float(_re.search(r'\d{1,5}\.\d{2,6}', data_area_line).group())
if which=='huygens':
ctr_ref_line = line_list[_getFirstLineOfInterest(line_list, 'Center coordinates')]
else:
ctr_ref_line = line_list[_getFirstLineOfInterest(line_list, 'Reference Coordinates')]
ctr_ref_x, ctr_ref_y = [float(i) for i in _re.findall('-?\d\.\d{4,10}[Ee][-\+]\d{2,3}', ctr_ref_line)]
img_grid_line = line_list[_getFirstLineOfInterest(line_list, 'Image grid size')]
img_grid_x, img_grid_y = [int(i) for i in _re.findall(r'\d{2,5}', img_grid_line)]
pupil_grid_line = line_list[_getFirstLineOfInterest(line_list, 'Pupil grid size')]
pupil_grid_x, pupil_grid_y = [int(i) for i in _re.findall(r'\d{2,5}', pupil_grid_line)]
center_point_line = line_list[_getFirstLineOfInterest(line_list, 'Center point')]
center_point_x, center_point_y = [int(i) for i in _re.findall(r'\d{2,5}', center_point_line)]
# The 2D data
pat = (r'(-?\d\.\d{4,6}[Ee][-\+]\d{2,3}\s*)' + r'{{{num}}}'
.format(num=img_grid_x))
start_line = _getFirstLineOfInterest(line_list, pat)
psfGridData = _get2DList(line_list, start_line, img_grid_y)
if which=='huygens':
psfi = _co.namedtuple('PSFinfo', ['dataSpacing', 'dataArea', 'pupilGridX',
'pupilGridY', 'imgGridX', 'imgGridY',
'centerPtX', 'centerPtY',
'centerCoordX', 'centerCoordY'])
else:
psfi = _co.namedtuple('PSFinfo', ['dataSpacing', 'dataArea', 'pupilGridX',
'pupilGridY', 'imgGridX', 'imgGridY',
'centerPtX', 'centerPtY',
'refCoordX', 'refCoordY'])
psfInfo = psfi(data_spacing, data_area, pupil_grid_x, pupil_grid_y,
img_grid_x, img_grid_y, center_point_x, center_point_y,
ctr_ref_x, ctr_ref_y)
if not keepFile:
_deleteFile(textFileName)
return (psfInfo, psfGridData)
def zModifyFFTPSFCrossSecSettings(self, settingsFile, dtype=None, row=None,
sample=None, wave=None, field=None,
pol=None, norm=None, scale=None):
"""Modify an existing FFT PSF Cross section analysis settings
(configuration) file
Parameters
----------
settingsFile : string
filename of the settings file including path and extension
dtype : integer (0-9), optional
0 = x-linear, 1 = y-linear, 2 = x-log, 3 = y-log, 4 = x-phase,
5 = y-phase, 6 = x-real, 7 = y-real, 8 = x-imaginary,
9 = y-imaginary.
row : integer, optional
the row number (for x scan) or column number (for y scan) or
use 0 for center.
sample : integer, optional
the sampling. 1 = 32x32; 2 = 64x64; 3 = 128x128; 4 = 256x256;
5 = 512x512; 6 = 1024x1024; 7 = 2048x2048; 8 = 4096x4096;
9 = 8192x8192; 10 = 16384x16384;
wave : integer, optional
the wavelength number, use 0 for polychromatic.
field : integer, optional
the field number
pol : integer (0/1), optional
the polarization. 0 for unpolarized, 1 for polarized.
norm : integer (0/1), optional
normalization. 0 for unnormalized, 1 for unity normalization
scale : float, optional
the plot scale
Returns
-------
statusTuple : tuple or -1
tuple of codes returned by ``zModifySettings()`` for each
non-None parameters. The status codes are as follows:
0 = no error;
-1 = invalid file;
-2 = incorrect version number;
-3 = file access conflict
The function returns -1 if ``settingsFile`` is invalid.
See Also
--------
zSetFFTPSFCrossSecSettings() :
to create and set FFT PSF Crosssection settings
zGetPSFCrossSec(),
"""
sTuple = [] # status tuple
if (_os.path.isfile(settingsFile) and
settingsFile.lower().endswith('.cfg')):
dst = settingsFile
else:
return -1
if dtype is not None:
sTuple.append(self.zModifySettings(dst, "PSF_TYPE", dtype))
if row is not None:
sTuple.append(self.zModifySettings(dst, "PSF_ROW", row))
if sample is not None:
sTuple.append(self.zModifySettings(dst, "PSF_SAMP", sample))
if wave is not None:
sTuple.append(self.zModifySettings(dst, "PSF_WAVE", wave))
if field is not None:
sTuple.append(self.zModifySettings(dst, "PSF_FIELD", field))
if pol is not None:
sTuple.append(self.zModifySettings(dst, "PSF_POLARIZATION", pol))
if norm is not None:
sTuple.append(self.zModifySettings(dst, "PSF_NORMALIZE", norm))
if scale is not None:
sTuple.append(self.zModifySettings(dst, "PSF_PLOTSCALE", scale))
return tuple(sTuple)
def zSetFFTPSFCrossSecSettings(self, settingsFile=None, dtype=None, row=None,
sample=None, wave=None, field=None, pol=None,
norm=None, scale=None):
"""create and set a new FFT PSF Crosssection settings file starting
from the "reset" settings state of the most basic lens in Zemax
To modify an existing FFT PSF Crosssection settings file, use
``zModifyFFTPSFCrossSecSettings()``. Only those parameters with
non-None will be set
Parameters
----------
settingsFile : string, optional
name to give to the settings file to be created. It must be
the full file name, including path and extension of the
settings file.
If ``None``, then a CFG file with the name of the lens
followed by the string '_pyzdde_FFTPSFCS.CFG' will be created
in the same directory as the lens file and returned
dtype : integer (0-9), optional
0 = x-linear, 1 = y-linear, 2 = x-log, 3 = y-log, 4 = x-phase,
5 = y-phase, 6 = x-real, 7 = y-real, 8 = x-imaginary,
9 = y-imaginary.
row : integer, optional
the row number (for x scan) or column number (for y scan) or
use 0 for center.
sample : integer, optional
the sampling. 1 = 32x32; 2 = 64x64; 3 = 128x128; 4 = 256x256;
5 = 512x512; 6 = 1024x1024; 7 = 2048x2048; 8 = 4096x4096;
9 = 8192x8192; 10 = 16384x16384;
wave : integer, optional
the wavelength number, use 0 for polychromatic.
field : integer, optional
the field number
pol : integer (0/1), optional
the polarization. 0 for unpolarized, 1 for polarized.
norm : integer (0/1), optional
normalization. 0 for unnormalized, 1 for unity normalization
scale : float, optional
the plot scale
Returns
-------
settingsFile : string
the full name, including path and extension, of the just
created settings file
Notes
-----
1. Further modifications of the settings file can be made using
``zModifySettings()`` or ``zModifyFFTPSFCrossSecSettings()``
functions
2. The function creates settings file ending with
'_pyzdde_FFTPSFCS.CFG' in order to prevent overwritting any
existing settings file not created by pyzdde for FFT PSF Cross
section analysis.
This file eventually gets deleted when ``ln.close()`` or
``pyz.closeLink()`` or ``ln.zDDEClose()`` is called.
See Also
--------
zGetPSFCrossSec(), zModifyFFTPSFCrossSecSettings()
"""
clean_cfg = 'RESET_SETTINGS_FFTPSFCS.CFG'
src = _os.path.join(_pDir, 'ZMXFILES', clean_cfg)
if settingsFile:
dst = settingsFile
else:
filename_partial = _os.path.splitext(self.zGetFile())[0]
dst = filename_partial + '_pyzdde_FFTPSFCS.CFG'
self._filesCreated.add(dst)
try:
_shutil.copy(src, dst)
except IOError:
print("ERROR: Invalid settingsFile {}".format(dst))
return
else:
self.zModifyFFTPSFCrossSecSettings(dst, dtype, row, sample, wave,
field, pol, norm, scale)
return dst
def zModifyFFTPSFSettings(self, settingsFile, dtype=None, sample=None,
wave=None, field=None, surf=None, pol=None,
norm=None, imgDelta=None):
"""Modify an existing FFT PSF analysis settings (configuration)
file
Only those parameters that are non-None will be set.
Parameters
----------
settingsFile : string
filename of the settings file including path and extension
dtype : integer (0-4), optional
0 = linear, 1 = log, 2 = phase, 3 = real, 4 = imaginary.
sample : integer, optional
the (pupil) sampling. 1 = 32x32; 2 = 64x64; 3 = 128x128;
4 = 256x256; 5 = 512x512; 6 = 1024x1024; 7 = 2048x2048;
8 = 4096x4096; 9 = 8192x8192; 10 = 16384x16384;
wave : integer, optional
the wavelength number, use 0 for polychromatic.
field : integer, optional
the field number
surf : integer, optional
the surface number. Use 0 for image
pol : integer (0/1), optional
the polarization. 0 for unpolarized, 1 for polarized.
norm : integer (0/1), optional
normalization. 0 for unnormalized, 1 for unity normalization
imgDelta : float, optional
the image point spacing in micrometers
Returns
-------
statusTuple : tuple or -1
tuple of codes returned by ``zModifySettings()`` for each
non-None parameters. The status codes are as follows:
0 = no error;
-1 = invalid file;
-2 = incorrect version number;
-3 = file access conflict
The function returns -1 if ``settingsFile`` is invalid.
Notes
-----
See the notes of ``zSetFFTPSFSettings()``
See Also
--------
zSetFFTPSFSettings(), zGetPSF()
"""
sTuple = [] # status tuple
if (_os.path.isfile(settingsFile) and
settingsFile.lower().endswith('.cfg')):
dst = settingsFile
else:
return -1
if dtype is not None:
sTuple.append(self.zModifySettings(dst, "PSF_TYPE", dtype))
if sample is not None:
sTuple.append(self.zModifySettings(dst, "PSF_SAMP", sample))
if wave is not None:
sTuple.append(self.zModifySettings(dst, "PSF_WAVE", wave))
if field is not None:
sTuple.append(self.zModifySettings(dst, "PSF_FIELD", field))
if surf is not None:
sTuple.append(self.zModifySettings(dst, "PSF_SURFACE", surf))
if pol is not None:
sTuple.append(self.zModifySettings(dst, "PSF_POLARIZATION", pol))
if norm is not None:
sTuple.append(self.zModifySettings(dst, "PSF_NORMALIZE", norm))
if imgDelta is not None:
sTuple.append(self.zModifySettings(dst, "PSF_IMAGEDELTA", imgDelta))
return tuple(sTuple)
def zSetFFTPSFSettings(self, settingsFile=None, dtype=None, sample=None,
wave=None, field=None, surf=None, pol=None,
norm=None, imgDelta=None):
"""create and set a new FFT PSF analysis settings file starting
from the "reset" settings state of the most basic lens in Zemax
To modify an existing FFT PSF settings file, use
``zModifyFFTPSFSettings()``. Only those parameters that are
non-None will be set
Parameters
----------
settingsFile : string, optional
name to give to the settings file to be created. It must be
the full file name, including path and extension of the
settings file.
If ``None``, then a CFG file with the name of the lens
followed by the string '_pyzdde_FFTPSF.CFG' will be created
in the same directory as the lens file and returned
dtype : integer (0-4), optional
0 = linear, 1 = log, 2 = phase, 3 = real, 4 = imaginary.
sample : integer, optional
the (pupil) sampling. 1 = 32x32; 2 = 64x64; 3 = 128x128;
4 = 256x256; 5 = 512x512; 6 = 1024x1024; 7 = 2048x2048;
8 = 4096x4096; 9 = 8192x8192; 10 = 16384x16384;
wave : integer, optional
the wavelength number, use 0 for polychromatic.
field : integer, optional
the field number
surf : integer, optional
the surface number. Use 0 for image
pol : integer (0/1), optional
the polarization. 0 for unpolarized, 1 for polarized.
norm : integer (0/1), optional
normalization. 0 for unnormalized, 1 for unity normalization
imgDelta : float, optional
the image point spacing in micrometers
Returns
-------
settingsFile : string
the full name, including path and extension, of the just
created settings file
Notes
-----
1. Currently, Zemax doesn't provide a way to change the image
sampling parameter for this function. It seems that the image
sampling value is set to twice the value set for pupil sampling.
2. Further modifications of the settings file can be made using
``zModifySettings()`` or ``zModifyFFTPSFSettings()`` functions
3. The function creates settings file ending with
'_pyzdde_FFTPSF.CFG' in order to prevent overwritting any
existing settings file not created by pyzdde for FFT PSF.
This file eventually gets deleted when ``ln.close()`` or
``pyz.closeLink()`` or ``ln.zDDEClose()`` is called.
See Also
--------
zGetPSF(), zModifyFFTPSFSettings()
"""
clean_cfg = 'RESET_SETTINGS_FFTPSF.CFG'
src = _os.path.join(_pDir, 'ZMXFILES', clean_cfg)
if settingsFile:
dst = settingsFile
else:
filename_partial = _os.path.splitext(self.zGetFile())[0]
dst = filename_partial + '_pyzdde_FFTPSF.CFG'
self._filesCreated.add(dst)
try:
_shutil.copy(src, dst)
except IOError:
print("ERROR: Invalid settingsFile {}".format(dst))
return
else:
self.zModifyFFTPSFSettings(dst, dtype, sample, wave, field, surf, pol,
norm, imgDelta)
return dst
def zModifyHuygensPSFCrossSecSettings(self, settingsFile, pupilSample=None,
imgSample=None, wave=None, field=None,
imgDelta=None, dtype=None):
"""Modify an existing Huygens PSF Cross section analysis settings
(configuration) file
Parameters
----------
settingsFile : string
filename of the settings file including path and extension
pupilSample : integer, optional
the pupil sampling. 1 = 32x32; 2 = 64x64; 3 = 128x128;
4 = 256x256; 5 = 512x512; 6 = 1024x1024; 7 = 2048x2048;
8 = 4096x4096; 9 = 8192x8192; 10 = 16384x16384;
imgSample : integer, optional
the image sampling. 1 = 32x32; 2 = 64x64; 3 = 128x128;
4 = 256x256; 5 = 512x512; 6 = 1024x1024; 7 = 2048x2048;
8 = 4096x4096; 9 = 8192x8192; 10 = 16384x16384;
wave : integer, optional
the wavelength number, use 0 for polychromatic
field : integer, optional
the field number
imgDelta : float, optional
the image point spacing in micrometers
dtype : integer (0-9), optional
0 = x-linear, 1 = y-log, 2 = y-linear, 3 = y-log, 4 = x-real,
5 = y-real, 6 = x-imaginary, 7 = y-imaginary, 8 = x-phase,
9 = y-phase.
Returns
-------
statusTuple : tuple or -1
tuple of codes returned by ``zModifySettings()`` for each
non-None parameters. The status codes are as follows:
0 = no error;
-1 = invalid file;
-2 = incorrect version number;
-3 = file access conflict
The function returns -1 if ``settingsFile`` is invalid.
See Also
--------
zSetHuygensPSFCrossSecSettings() :
to create and set Huygens PSF Crosssection settings
zGetPSFCrossSec(),
"""
sTuple = [] # status tuple
if (_os.path.isfile(settingsFile) and
settingsFile.lower().endswith('.cfg')):
dst = settingsFile
else:
return -1
if pupilSample is not None:
sTuple.append(self.zModifySettings(dst, "HPC_PUPILSAMP", pupilSample))
if imgSample is not None:
sTuple.append(self.zModifySettings(dst, "HPC_IMAGESAMP", imgSample))
if wave is not None:
sTuple.append(self.zModifySettings(dst, "HPC_WAVE", wave))
if field is not None:
sTuple.append(self.zModifySettings(dst, "HPC_FIELD", field))
if imgDelta is not None:
sTuple.append(self.zModifySettings(dst, "HPC_IMAGEDELTA", imgDelta))
if dtype is not None:
sTuple.append(self.zModifySettings(dst, "HPC_TYPE", dtype))
return tuple(sTuple)
def zSetHuygensPSFCrossSecSettings(self, settingsFile=None, pupilSample=None,
imgSample=None, wave=None, field=None,
imgDelta=None, dtype=None):
"""create and set a new Huygens PSF Crosssection settings file
starting from the "reset" settings state of the most basic lens in
Zemax.
To modify an existing Huygens PSF Crosssection settings file, use
``zModifyHuygensPSFCrossSecSettings()``. Only those parameters
with non-None will be set
Parameters
----------
settingsFile : string, optional
name to give to the settings file to be created. It must be
the full file name, including path and extension of the
settings file.
If ``None``, then a CFG file with the name of the lens
followed by the string '_pyzdde_HUYGENSPSFCS.CFG' will be
created in the same directory as the lens file and returned
pupilSample : integer, optional
the pupil sampling. 1 = 32x32; 2 = 64x64; 3 = 128x128;
4 = 256x256; 5 = 512x512; 6 = 1024x1024; 7 = 2048x2048;
8 = 4096x4096; 9 = 8192x8192; 10 = 16384x16384;
imgSample : integer, optional
the image sampling. 1 = 32x32; 2 = 64x64; 3 = 128x128;
4 = 256x256; 5 = 512x512; 6 = 1024x1024; 7 = 2048x2048;
8 = 4096x4096; 9 = 8192x8192; 10 = 16384x16384;
wave : integer, optional
the wavelength number, use 0 for polychromatic
field : integer, optional
the field number
imgDelta : float, optional
the image point spacing in micrometers
dtype : integer (0-9), optional
0 = x-linear, 1 = y-log, 2 = y-linear, 3 = y-log, 4 = x-real,
5 = y-real, 6 = x-imaginary, 7 = y-imaginary, 8 = x-phase,
9 = y-phase.
Returns
-------
settingsFile : string
the full name, including path and extension, of the just
created settings file
Notes
-----
1. Further modifications of the settings file can be made using
``zModifySettings()`` or ``zModifyHuygensPSFCrosSecSettings()``
functions
2. The function creates settings file ending with
'_pyzdde_HUYGENSPSFCS.CFG' in order to prevent overwritting any
existing settings file not created by pyzdde for Huygens PSF
Crosssection analysis.
This file eventually gets deleted when ``ln.close()`` or
``pyz.closeLink()`` or ``ln.zDDEClose()`` is called.
See Also
--------
zGetPSFCrossSec(), zModifyHuygensPSFCrossSecSettings()
"""
clean_cfg = 'RESET_SETTINGS_HUYGENSPSFCS.CFG'
src = _os.path.join(_pDir, 'ZMXFILES', clean_cfg)
if settingsFile:
dst = settingsFile
else:
filename_partial = _os.path.splitext(self.zGetFile())[0]
dst = filename_partial + '_pyzdde_HUYGENSPSFCS.CFG'
self._filesCreated.add(dst)
try:
_shutil.copy(src, dst)
except IOError:
print("ERROR: Invalid settingsFile {}".format(dst))
return
else:
self.zModifyHuygensPSFCrossSecSettings(dst, pupilSample, imgSample,
wave, field, imgDelta, dtype)
return dst
def zModifyHuygensPSFSettings(self, settingsFile, pupilSample=None,
imgSample=None, wave=None, field=None,
imgDelta=None, dtype=None):
"""Modify an existing Huygens PSF analysis settings (configuration)
file
Only those parameters that are non-None will be set.
Parameters
----------
settingsFile : string
filename of the settings file including path and extension
pupilSample : integer, optional
the pupil sampling. 1 = 32x32; 2 = 64x64; 3 = 128x128;
4 = 256x256; 5 = 512x512; 6 = 1024x1024; 7 = 2048x2048;
8 = 4096x4096; 9 = 8192x8192; 10 = 16384x16384;
imgSample : integer, optional
the image sampling. 1 = 32x32; 2 = 64x64; 3 = 128x128;
4 = 256x256; 5 = 512x512; 6 = 1024x1024; 7 = 2048x2048;
8 = 4096x4096; 9 = 8192x8192; 10 = 16384x16384;
wave : integer, optional
the wavelength number, use 0 for polychromatic
field : integer, optional
the field number
imgDelta : float, optional
the image point spacing in micrometers
dtype : integer (0-8), optional
0 = linear, 1 = log -1, 2 = log -2, 3 = log -3, 4 = log -4,
5 = log -5, 6 = real, 7 = imaginary, 8 = phase.
Returns
-------
statusTuple : tuple or -1
tuple of codes returned by ``zModifySettings()`` for each
non-None parameters. The status codes are as follows:
0 = no error;
-1 = invalid file;
-2 = incorrect version number;
-3 = file access conflict
The function returns -1 if ``settingsFile`` is invalid.
See Also
--------
zSetHuygensPSFSettings(), zGetPSF()
"""
sTuple = [] # status tuple
if (_os.path.isfile(settingsFile) and
settingsFile.lower().endswith('.cfg')):
dst = settingsFile
else:
return -1
if pupilSample is not None:
sTuple.append(self.zModifySettings(dst, "HPS_PUPILSAMP", pupilSample))
if imgSample is not None:
sTuple.append(self.zModifySettings(dst, "HPS_IMAGESAMP", imgSample))
if wave is not None:
sTuple.append(self.zModifySettings(dst, "HPS_WAVE", wave))
if field is not None:
sTuple.append(self.zModifySettings(dst, "HPS_FIELD", field))
if imgDelta is not None:
sTuple.append(self.zModifySettings(dst, "HPS_IMAGEDELTA", imgDelta))
if dtype is not None:
sTuple.append(self.zModifySettings(dst, "HPS_TYPE", dtype))
return tuple(sTuple)
def zSetHuygensPSFSettings(self, settingsFile=None, pupilSample=None,
imgSample=None, wave=None, field=None,
imgDelta=None, dtype=None):
"""create and set a new Huygens PSF analysis settings file starting
from the "reset" settings state of the most basic lens in Zemax
To modify an existing Huygens PSF settings file, use
``zModifyHuygensPSFSettings()``. Only those parameters that are
non-None will be set
Parameters
----------
settingsFile : string, optional
name to give to the settings file to be created. It must be
the full file name, including path and extension of the
settings file.
If ``None``, then a CFG file with the name of the lens
followed by the string '_pyzdde_HUYGENSPSF.CFG' will be
created in the same directory as the lens file and returned
pupilSample : integer, optional
the pupil sampling. 1 = 32x32; 2 = 64x64; 3 = 128x128;
4 = 256x256; 5 = 512x512; 6 = 1024x1024; 7 = 2048x2048;
8 = 4096x4096; 9 = 8192x8192; 10 = 16384x16384;
imgSample : integer, optional
the image sampling. 1 = 32x32; 2 = 64x64; 3 = 128x128;
4 = 256x256; 5 = 512x512; 6 = 1024x1024; 7 = 2048x2048;
8 = 4096x4096; 9 = 8192x8192; 10 = 16384x16384;
wave : integer, optional
the wavelength number, use 0 for polychromatic
field : integer, optional
the field number
imgDelta : float, optional
the image point spacing in micrometers
dtype : integer (0-8), optional
0 = linear, 1 = log -1, 2 = log -2, 3 = log -3, 4 = log -4,
5 = log -5, 6 = real, 7 = imaginary, 8 = phase.
Returns
-------
settingsFile : string
the full name, including path and extension, of the just
created settings file
Notes
-----
1. Further modifications of the settings file can be made using
``zModifySettings()`` or ``zModifyHuygensPSFSettings()``
functions
2. The function creates settings file ending with
'_pyzdde_HUYGENSPSF.CFG' in order to prevent overwritting any
existing settings file not created by pyzdde for Huygens PSF
analysis.
This file eventually gets deleted when ``ln.close()`` or
``pyz.closeLink()`` or ``ln.zDDEClose()`` is called.
See Also
--------
zGetPSF(), zModifyHuygensPSFSettings()
"""
clean_cfg = 'RESET_SETTINGS_HUYGENSPSF.CFG'
src = _os.path.join(_pDir, 'ZMXFILES', clean_cfg)
if settingsFile:
dst = settingsFile
else:
filename_partial = _os.path.splitext(self.zGetFile())[0]
dst = filename_partial + '_pyzdde_HUYGENSPSF.CFG'
self._filesCreated.add(dst)
try:
_shutil.copy(src, dst)
except IOError:
print("ERROR: Invalid settingsFile {}".format(dst))
return
else:
self.zModifyHuygensPSFSettings(dst, pupilSample, imgSample, wave,
field, imgDelta, dtype)
return dst
def zGetMTF(self, which='fft', settingsFile=None, txtFile=None,
keepFile=False, timeout=120):
"""Returns FFT or Huygens MTF data
Parameters
----------
which : string, optional
if 'fft' (default), then the FFT MTF data is returned;
if 'huygens', then the Huygens MTF data is returned;
settingsFile : string, optional
* if passed, the FFT/Huygens MTF analysis will be called with
the given configuration file (settings);
* if no ``settingsFile`` is passed, and config file ending
with the same name as the lens-file post-fixed with
"_pyzdde_FFTMTF.CFG"/"_pyzdde_HUYGENSMTF.CFG"is present, the
settings from this file will be used;
* if no ``settingsFile`` and no file name post-fixed with
"_pyzdde_FFTMTF.CFG"/"_pyzdde_HUYGENSMTF.CFG" is found, but
a config file with the same name as the lens file is present,
the settings from that file will be used;
* if no settings file is found, then a default settings will
be used
txtFile : string, optional
if passed, the MTF analysis text file will be named such.
Pass a specific txtFile if you want to dump the file into
a separate directory.
keepFile : bool, optional
if ``False`` (default), the MTF text file will be deleted
after use.
If ``True``, the file will persist. If ``keepFile`` is ``True``
but a ``txtFile`` is not passed, the MTF text file will be
saved in the same directory as the lens (provided the required
folder access permissions are available)
timeout : integer, optional
timeout in seconds. Note that Huygens PSF/MTF calculations with
``pupil_sample`` and/or ``image_sample`` greater than 4
usuallly take several minutes to complete
Returns
-------
mtfs : tuple of tuples
the tuple contains MTF data for the number of fields defined
in the MTF analysis configuration/settings. The len of the
tuple equals the number of fields. Each sub-tuple is a named
tuple that contains Spatial frequency, Tangential, and
Sagittal MTF values. The information can be retrieved as shown
in the example below.
Examples
--------
The following example plots the MTFs for each defined field points
of a Zemax lens
>>> mtfs = ln.zGetMTF()
>>> for field, mtf in enumerate(mtfs):
>>> plt.plot(mtf.SpatialFreq, mtf.Tangential, label='F-{}, T'.format(field + 1))
>>> plt.plot(mtf.SpatialFreq, mtf.Sagittal, label='F-{}, S'.format(field + 1))
>>> plt.xlabel('Spatial Frequency in cycles per mm')
>>> plt.ylabel('Modulus of the OTF')
>>> plt.grid('on')
>>> plt.legend(frameon=False)
>>> plt.show()
See Also
--------
zModifyFFTMTFSettings(), zSetFFTMTFSettings(),
zModifyHuygensMTFSettings(), zSetHuygensMTFSettings()
"""
if which=='huygens':
anaType = 'Hmf'
else:
anaType = 'Mtf'
settings = _txtAndSettingsToUse(self, txtFile, settingsFile, anaType)
textFileName, cfgFile, getTextFlag = settings
ret = self.zGetTextFile(textFileName, anaType, cfgFile, getTextFlag,
timeout)
assert ret == 0
line_list = _readLinesFromFile(_openFile(textFileName))
pat = r'Field:\s-?\d{1,3}\.\d{1,5},?\s?'
fields = _getRePatPosInLineList(line_list, pat)
if len(fields) > 1:
data_start_pos = [p + 2 for p in fields]
data_len = [fields[1] - fields[0] - 3]*len(fields)
else:
data_start_pos = [fields[0] + 2,]
data_len = [len(line_list) - data_start_pos[0],]
mtfs = []
mtf = _co.namedtuple('MTF', ['SpatialFreq', 'Tangential', 'Sagittal'])
for start, length in zip(data_start_pos, data_len):
data_mat = _get2DList(line_list, start, length)
data_matT = _transpose2Dlist(data_mat)
spat_freq = data_matT[0]
mtf_tang = data_matT[1]
mtf_sagi = data_matT[2]
mtfs.append(mtf(spat_freq, mtf_tang, mtf_sagi))
if not keepFile:
_deleteFile(textFileName)
return tuple(mtfs)
def zModifyFFTMTFSettings(self, settingsFile, sample=None, wave=None,
field=None, dtype=None, surf=None, maxFreq=None,
showDiff=None, pol=None, useDash=None):
"""Modify an existing FFT MTF analysis settings (configuration)
file
Parameters
----------
settingsFile : string
filename of the settings file including path and extension
sample : integer, optional
the sampling. 1 = 32x32; 2 = 64x64; 3 = 128x128; 4 = 256x256;
5 = 512x512; 6 = 1024x1024; 7 = 2048x2048; 8 = 4096x4096;
9 = 8192x8192; 10 = 16384x16384;
wave : integer, optional
the wavelength number, use 0 for polychromatic.
field : integer, optional
the field number, 0 for all
dtype : integer (0-4), optional
0 = modulation, 1 = real, 2 = imaginary, 3 = phase, 4 = square
wave.
surf : integer, optional
the surface number. Use 0 for image
maxFreq : real, optional
the maximum frequency, use 0 for default
showDiff : integer (0/1)
show diffraction limit, 0 for no, 1 for yes
pol : integer (0/1), optional
the polarization. 0 for unpolarized, 1 for polarized.
useDash : integer (0/1)
use dashes, 0 for no, 1 for yes
Returns
-------
statusTuple : tuple or -1
tuple of codes returned by ``zModifySettings()`` for each
non-None parameters. The status codes are as follows:
0 = no error;
-1 = invalid file;
-2 = incorrect version number;
-3 = file access conflict
The function returns -1 if ``settingsFile`` is invalid.
See Also
--------
zSetFFTMTFSettings() :
to create and set FFT MTF settings/configuration file
zGetMTF(),
"""
sTuple = [] # status tuple
if (_os.path.isfile(settingsFile) and
settingsFile.lower().endswith('.cfg')):
dst = settingsFile
else:
return -1
if sample is not None:
sTuple.append(self.zModifySettings(dst, "MTF_SAMP", sample))
if wave is not None:
sTuple.append(self.zModifySettings(dst, "MTF_WAVE", wave))
if field is not None:
sTuple.append(self.zModifySettings(dst, "MTF_FIELD", field))
if dtype is not None:
sTuple.append(self.zModifySettings(dst, "MTF_TYPE", dtype))
if surf is not None:
sTuple.append(self.zModifySettings(dst, "MTF_SURF", surf))
if maxFreq is not None:
sTuple.append(self.zModifySettings(dst, "MTF_MAXF", maxFreq))
if showDiff is not None:
sTuple.append(self.zModifySettings(dst, "MTF_SDLI", showDiff))
if pol is not None:
sTuple.append(self.zModifySettings(dst, "MTF_POLAR", pol))
if useDash is not None:
sTuple.append(self.zModifySettings(dst, "MTF_DASH", useDash))
return tuple(sTuple)
def zSetFFTMTFSettings(self, settingsFile=None, sample=None, wave=None,
field=None, dtype=None, surf=None, maxFreq=None,
showDiff=None, pol=None, useDash=None):
"""create and set a new FFT MTF analysis settings file starting
from the "reset" settings state of the most basic lens in Zemax
To modify an existing FFT MTF settings file, use
``zModifyFFTMTFSettings()``. Only those parameters that are
non-None will be set
Parameters
----------
settingsFile : string, optional
name to give to the settings file to be created. It must be
the full file name, including path and extension of the
settings file.
If ``None``, then a CFG file with the name of the lens
followed by the string '_pyzdde_FFTMTF.CFG' will be created
in the same directory as the lens file and returned
sample : integer, optional
the sampling. 1 = 32x32; 2 = 64x64; 3 = 128x128; 4 = 256x256;
5 = 512x512; 6 = 1024x1024; 7 = 2048x2048; 8 = 4096x4096;
9 = 8192x8192; 10 = 16384x16384;
wave : integer, optional
the wavelength number, use 0 for polychromatic.
field : integer, optional
the field number, 0 for all
dtype : integer (0-4), optional
0 = modulation, 1 = real, 2 = imaginary, 3 = phase, 4 = square
wave.
surf : integer, optional
the surface number. Use 0 for image
maxFreq : real, optional
the maximum frequency, use 0 for default
showDiff : integer (0/1)
show diffraction limit, 0 for no, 1 for yes
pol : integer (0/1), optional
the polarization. 0 for unpolarized, 1 for polarized.
useDash : integer (0/1)
use dashes, 0 for no, 1 for yes
Returns
-------
settingsFile : string
the full name, including path and extension, of the just
created settings file
Notes
-----
1. Further modifications of the settings file can be made using
``zModifySettings()`` or ``zModifyFFTMTFSettings()`` functions
2. The function creates settings file ending with
'_pyzdde_FFTMTF.CFG' in order to prevent overwritting any
existing settings file not created by pyzdde for FFT MTF.
This file eventually gets deleted when ``ln.close()`` or
``pyz.closeLink()`` or ``ln.zDDEClose()`` is called.
See Also
--------
zGetMTF(), zModifyFFTMTFSettings()
"""
clean_cfg = 'RESET_SETTINGS_FFTMTF.CFG'
src = _os.path.join(_pDir, 'ZMXFILES', clean_cfg)
if settingsFile:
dst = settingsFile
else:
filename_partial = _os.path.splitext(self.zGetFile())[0]
dst = filename_partial + '_pyzdde_FFTMTF.CFG'
self._filesCreated.add(dst)
try:
_shutil.copy(src, dst)
except IOError:
print("ERROR: Invalid settingsFile {}".format(dst))
return
else:
self.zModifyFFTMTFSettings(dst, sample, wave, field, dtype, surf,
maxFreq, showDiff, pol, useDash)
return dst
def zModifyHuygensMTFSettings(self, settingsFile, pupilSample=None,
imgSample=None, imgDelta=None, config=None,
wave=None, field=None, dtype=None, maxFreq=None,
pol=None, useDash=None):
"""Modify an existing Huygens MTF analysis settings (configuration)
file
Only those parameters that are non-None will be set.
Parameters
----------
settingsFile : string
filename of the settings file including path and extension
pupilSample : integer, optional
the pupil sampling. 1 = 32x32; 2 = 64x64; 3 = 128x128;
4 = 256x256; 5 = 512x512; 6 = 1024x1024; 7 = 2048x2048;
8 = 4096x4096; 9 = 8192x8192; 10 = 16384x16384;
imgSample : integer, optional
the image sampling. 1 = 32x32; 2 = 64x64; 3 = 128x128;
4 = 256x256; 5 = 512x512; 6 = 1024x1024; 7 = 2048x2048;
8 = 4096x4096; 9 = 8192x8192; 10 = 16384x16384;
imgDelta : float, optional
the image point spacing in micrometers
config : integer, optional
the configuration number. Use 0 for all, 1 for current, etc.
wave : integer, optional
the wavelength number. Use 0 for polychromatic
field : integer, optional
the field number
dtype : integer, optional
the data type. Currently only 0 is supported
maxFreq : float, optional
the maximum spatial frequency
pol : integer, optional
polarization. 0 for no, 1 for yes
useDash : integer, optional
use dashes. 0 for no, 1 for yes
Returns
-------
statusTuple : tuple or -1
tuple of codes returned by ``zModifySettings()`` for each
non-None parameters. The status codes are as follows:
0 = no error;
-1 = invalid file;
-2 = incorrect version number;
-3 = file access conflict
The function returns -1 if ``settingsFile`` is invalid.
See Also
--------
zSetHuygensMTFSettings(), zGetMTF()
"""
sTuple = [] # status tuple
if (_os.path.isfile(settingsFile) and
settingsFile.lower().endswith('.cfg')):
dst = settingsFile
else:
return -1
if pupilSample is not None:
sTuple.append(self.zModifySettings(dst, "HMF_PUPILSAMP", pupilSample))
if imgSample is not None:
sTuple.append(self.zModifySettings(dst, "HMF_IMAGESAMP", imgSample))
if imgDelta is not None:
sTuple.append(self.zModifySettings(dst, "HMF_IMAGEDELTA", imgDelta))
if config is not None:
sTuple.append(self.zModifySettings(dst, "HMF_CONFIG", config))
if wave is not None:
sTuple.append(self.zModifySettings(dst, "HMF_WAVE", wave))
if field is not None:
sTuple.append(self.zModifySettings(dst, "HMF_FIELD", field))
if dtype is not None:
sTuple.append(self.zModifySettings(dst, "HMF_TYPE", dtype))
if maxFreq is not None:
sTuple.append(self.zModifySettings(dst, "HMF_MAXF", maxFreq))
if pol is not None:
sTuple.append(self.zModifySettings(dst, "HMF_POLAR", pol))
if useDash is not None:
sTuple.append(self.zModifySettings(dst, "HMF_DASH", useDash))
return tuple(sTuple)
def zSetHuygensMTFSettings(self, settingsFile=None, pupilSample=None,
imgSample=None, imgDelta=None, config=None,
wave=None, field=None, dtype=None, maxFreq=None,
pol=None, useDash=None):
"""create and set a new Huygens MTF analysis settings file starting
from the "reset" settings state of the most basic lens in Zemax
To modify an existing Huygens MTF settings file, use
``zModifyHuygensMTFSettings()``. Only those parameters that are
non-None will be set
Parameters
----------
settingsFile : string, optional
name to give to the settings file to be created. It must be
the full file name, including path and extension of the
settings file.
If ``None``, then a CFG file with the name of the lens
followed by the string '_pyzdde_HUYGENSMTF.CFG' will be
created in the same directory as the lens file and returned
pupilSample : integer, optional
the pupil sampling. 1 = 32x32; 2 = 64x64; 3 = 128x128;
4 = 256x256; 5 = 512x512; 6 = 1024x1024; 7 = 2048x2048;
8 = 4096x4096; 9 = 8192x8192; 10 = 16384x16384;
imgSample : integer, optional
the image sampling. 1 = 32x32; 2 = 64x64; 3 = 128x128;
4 = 256x256; 5 = 512x512; 6 = 1024x1024; 7 = 2048x2048;
8 = 4096x4096; 9 = 8192x8192; 10 = 16384x16384;
imgDelta : float, optional
the image point spacing in micrometers
config : integer, optional
the configuration number. Use 0 for all, 1 for current, etc.
wave : integer, optional
the wavelength number. Use 0 for polychromatic
field : integer, optional
the field number
dtype : integer, optional
the data type. Currently only 0 is supported
maxFreq : float, optional
the maximum spatial frequency
pol : integer, optional
polarization. 0 for no, 1 for yes
useDash : integer, optional
use dashes. 0 for no, 1 for yes
Returns
-------
settingsFile : string
the full name, including path and extension, of the just
created settings file
Notes
-----
1. Further modifications of the settings file can be made using
``zModifySettings()`` or ``zModifyHuygensMTFSettings()``
functions
2. The function creates settings file ending with
'_pyzdde_HUYGENSMTF.CFG' in order to prevent overwritting any
existing settings file not created by pyzdde for Huygens MTF
analysis.
This file eventually gets deleted when ``ln.close()`` or
``pyz.closeLink()`` or ``ln.zDDEClose()`` is called.
See Also
--------
zGetMTF(), zModifyHuygensMTFSettings()
"""
clean_cfg = 'RESET_SETTINGS_HUYGENSMTF.CFG'
src = _os.path.join(_pDir, 'ZMXFILES', clean_cfg)
if settingsFile:
dst = settingsFile
else:
filename_partial = _os.path.splitext(self.zGetFile())[0]
dst = filename_partial + '_pyzdde_HUYGENSMTF.CFG'
self._filesCreated.add(dst)
try:
_shutil.copy(src, dst)
except IOError:
print("ERROR: Invalid settingsFile {}".format(dst))
return
else:
self.zModifyHuygensMTFSettings(dst, pupilSample, imgSample, imgDelta,
config, wave, field, dtype, maxFreq,
pol, useDash)
return dst
# Image simulation functions
def zGetImageSimulation(self, settingsFile=None, txtFile=None, keepFile=False,
timeout=120):
"""Returns image simulation analysis results
Parameters
----------
settingsFile : string, optional
* if passed, the image simulation analysis will be called with
the given configuration file (settings);
* if no ``settingsFile`` is passed, and config file ending
with the same name as the lens-file post-fixed with
"_pyzdde_IMGSIM.CFG" is present, the settings from this file
will be used;
* if no ``settingsFile`` and no file-name post-fixed with
"_pyzdde_IMGSIM.CFG" is found, but a config file with the
same name as the lens file is present, the settings from
that file will be used;
* if no settings file is found, then a default settings will
be used
txtFile : string, optional
if passed, the image simulation analysis text file will be
named such. Pass a specific txtFile if you want to dump the
file into a separate directory.
keepFile : bool, optional
if ``False`` (default), the image simulation text file will be
deleted after use.
If ``True``, the file will persist. If ``keepFile`` is ``True``
but a ``txtFile`` is not passed, the text file will be
saved in the same directory as the lens (provided the required
folder access permissions are available)
timeout : integer, optional
timeout in seconds.
Returns
-------
imgInfo : named tuple
meta data about the image analysis data containing 'xpix',
'ypix', 'objHeight', 'fieldPos', 'imgW', and 'imgH'. PSF
Grid data doesn't have `imgW` and `imgH` and Source bitmap
image data only has `xpix` and `ypix`.
imgData : 3D list
the 3D list containing the RGB values of the output image.
The first dimension of ``imgData`` represents height (rows),
the second dimension represents width (cols), and the third
dimension represents the channel (r, g, b)
Examples
--------
In the following example the image simulation function is called with
default arguments, and the returned data is plotted using matplotlib's
imshow function after converting the data into a Numpy (np) array.
>>> cfgfile = ln.zSetImageSimulationSettings(image='RGB_CIRCLES.BMP', height=1)
>>> img_info, img_data = ln.zGetImageSimulationData(settingsFile=cfgfile)
>>> img = np.array(img_data, dtype='uint8')
>>> fig, ax = plt.subplots(1,1, figsize=(10, 8))
>>> if len(img_info)==6: # image simulation data
>>> bottom, top = -img_info.imgH/2, img_info.imgH/2
>>> left, right = -img_info.imgW/2, img_info.imgW/2
>>> extent=[left, right, bottom, top]
>>> xl, yl = 'Image width (mm)', 'Image height (mm)'
>>> t = 'Simulated Image'
>>> elif len(img_info)==4: # psf grid data
>>> bottom, top = -img_info.objHeight/2, img_info.objHeight/2
>>> aratio = img_info.xpix/img_info.ypix
>>> left, right = bottom*aratio, top*aratio
>>> extent=[left, right, bottom, top]
>>> xl, yl = 'Field width (mm)', 'Field height (mm)'
>>> t = 'PSF Grid at field pos {:2.2f}'.format(img_info.fieldPos)
>>> else: # source bitmap
>>> extent = [0, img_info.xpix, 0, img_info.ypix]
>>> xl = '{} pixels wide'.format(img_info.xpix)
>>> yl = '{} pixels high'.format(img_info.ypix)
>>> t = 'Source Bitmap'
>>> ax.imshow(img, extent=extent, interpolation='none')
>>> ax.set_xlabel(xl); ax.set_ylabel(yl); ax.set_title(t)
>>> plt.show()
Notes
-----
It is recommended that a settings files is first generated using the
``zSetImageSimulationSettings()`` functions prior to calling
``zGetImageSimulation()``.
See Also
--------
zModifyImageSimulationSettings(), zSetImageSimulationSettings()
"""
settings = _txtAndSettingsToUse(self, txtFile, settingsFile, 'Sim')
textFileName, cfgFile, getTextFlag = settings
ret = self.zGetTextFile(textFileName, 'Sim', cfgFile, getTextFlag,
timeout)
assert ret == 0, 'zGetTextFile() returned error code {}'.format(ret)
line_list = _readLinesFromFile(_openFile(textFileName))
# Meta data
data = None
data_line = line_list[_getFirstLineOfInterest(line_list, 'Data')]
dataType = data_line.split(':')[1].strip()
if dataType == 'Simulated Image':
data = 'img'
elif dataType == 'PSF Grid':
data = 'psf'
else: # source bitmap
data = 'src'
bm_ht_line = line_list[_getFirstLineOfInterest(line_list, 'Bitmap Height')]
bm_ht = int(_re.search(r'\b\d{1,5}\b', bm_ht_line).group()) # pixels
bm_wd_line = line_list[_getFirstLineOfInterest(line_list, 'Bitmap Width')]
bm_wd = int(_re.search(r'\b\d{1,5}\b', bm_wd_line).group()) # pixels
if data=='img' or data=='psf':
obj_ht_line = line_list[_getFirstLineOfInterest(line_list, 'Object Height')]
obj_ht = float(_re.search(r'\b-?\d{1,3}\.\d{1,5}\b', obj_ht_line).group())
fld_pos_line = line_list[_getFirstLineOfInterest(line_list, 'Field position')]
fld_pos = float(_re.search(r'\b-?\d{1,3}\.\d{1,5}\b', fld_pos_line).group())
if data=='img':
img_siz_line = line_list[_getFirstLineOfInterest(line_list, 'Image Size')]
pat = r'\d{1,3}\.\d{4,6}'
img_wd, img_ht = [float(i) for i in _re.findall(pat, img_siz_line)] # physical units
if data=='img':
img_info = _co.namedtuple('ImgSimInfo', ['xpix', 'ypix', 'objHeight',
'fieldPos', 'imgW', 'imgH'])
img_info_data = img_info._make([bm_wd, bm_ht, obj_ht, fld_pos, img_wd, img_ht])
elif data=='psf':
img_info = _co.namedtuple('PSFGridInfo', ['xpix', 'ypix', 'objHeight',
'fieldPos'])
img_info_data = img_info._make([bm_wd, bm_ht, obj_ht, fld_pos])
else: # source bitmap / data = src
img_info = _co.namedtuple('SrcImgInfo', ['xpix', 'ypix'])
img_info_data = img_info._make([bm_wd, bm_ht])
img_data = [[[0 for c in range(3)] for i in range(bm_wd)] for j in range(bm_ht)]
r, g, b = 0, 1, 2
pat = r'xpix\s{1,4}ypix\s{1,4}R\s{1,4}G\s{1,4}B'
start = _getFirstLineOfInterest(line_list, pat) + 1
for xpix in range(bm_wd): # along width
for ypix in range(bm_ht): # along height
pixel_data = line_list[start + xpix*bm_ht + ypix].split()[2:]
pix_r, pix_g, pix_b = pixel_data
img_data[ypix][xpix][r] = int(pix_r)
img_data[ypix][xpix][g] = int(pix_g)
img_data[ypix][xpix][b] = int(pix_b)
if not keepFile:
_deleteFile(textFileName)
return img_info_data, img_data
def zModifyImageSimulationSettings(self, settingsFile, image=None, height=None,
over=None, guard=None, flip=None, rotate=None,
wave=None, field=None, pupilSample=None,
imgSample=None, psfx=None, psfy=None, aberr=None,
pol=None, fixedAper=None, illum=None, showAs=None,
reference=None, suppress=None, pixelSize=None,
xpix=None, ypix=None, flipSimImg=None, outFile=None):
"""Modify an existing image simulation analysis settings
(configuration) file
Only those parameters that are non-None will be set.
Parameters
----------
settingsFile : string
filename of the settings file including path and extension
image : string, optional
The input file name. This should be specified without a path.
height : float, optional
The field height, which defines the full height of the source
bitmap in field coordinates, may be either lens units or
degrees, depending upon the current field definition (heights
or angles, respectively).
over : integer, optional, [0-6]
Oversample value. Use 0 for None, 1 for 2X, 2 for 4x, etc.
guard : integer, optional, [0-6]
Guard band value. Use 0 for None, 1 for 2X, 2 for 4x, etc.
flip : integer, optional, [0-3]
Flip Source. Use 0 for None, 1 for top-bottom, 2 for left-right,
3 for top-bottom & left-right.
rotate : integer, optional, [0-3]
Rotate Source. Use 0 for none, 1 for 90, 2 for 180, 3 for 270.
wave : integer, optional,
Wavelength. Use 0 for RGB, 1 for 1+2+3, 2 for wave #1, 3 for
wave #2, etc.
field : integer, optional
Field number.
pupilSample : integer, optional, [1-10]
Pupil Sampling. Use 1 for 32x32, 2 for 64x64, etc.
imgSample : integer, optional, [1-5]
Image Sampling. Use 1 for 32x32, 2 for 64x64, etc.
psfx, psfy : integer, optional, [1-51]
The number of PSF grid points.
aberr : integer, optional, [0-2]
Use 0 for none, 1 for geometric, 2 for diffraction.
pol : integer, optional, [0-1]
Polarization. Use 0 for no, 1 for yes.
fixedAper : integer, optional, [0-1]
Apply fixed aperture? Use 0 for no, 1 for yes (apply fixed
aperture).
illum : integer, optional, [0-1]
Relative illumination. Use 0 for no, 1 for yes.
showAs : integer, optional, [0-2]
Use 0 for Simulated Image, 1 for Source Bitmap, and 2 for PSF
Grid.
reference : integer, optional, [0-2]
Use 0 for chief ray, 1 for vertex, 2 for primary chief ray.
suppress : integer, optional, [0-1]
Use 0 for no, 1 for yes.
pixelSize : integer, optional
Use 0 for default or the size in lens units.
xpix, ypix : integer, optional
Use 0 for default or the number of pixels.
flipSimImg : integer, optional
Use 0 for none, 1 for top-bottom, etc.
outFile : string, optional
The output file name or empty string for no output file.
Returns
-------
statusTuple : tuple or -1
tuple of codes returned by ``zModifySettings()`` for each
non-None parameters. The status codes are as follows:
0 = no error;
-1 = invalid file;
-2 = incorrect version number;
-3 = file access conflict
The function returns -1 if ``settingsFile`` is invalid.
See Also
--------
zSetImageSimulationSettings(), zGetImageSimulation()
"""
sTuple = [] # status tuple
if (_os.path.isfile(settingsFile) and
settingsFile.lower().endswith('.cfg')):
dst = settingsFile
else:
return -1
if image is not None:
sTuple.append(self.zModifySettings(dst, "ISM_INPUTFILE", image))
if height is not None:
sTuple.append(self.zModifySettings(dst, "ISM_FIELDHEIGHT", height))
if over is not None:
sTuple.append(self.zModifySettings(dst, "ISM_OVERSAMPLING", over))
if guard is not None:
sTuple.append(self.zModifySettings(dst, "ISM_GUARDBAND", guard))
if flip is not None:
sTuple.append(self.zModifySettings(dst, "ISM_FLIP", flip))
if rotate is not None:
sTuple.append(self.zModifySettings(dst, "ISM_ROTATE", rotate))
if wave is not None:
sTuple.append(self.zModifySettings(dst, "ISM_WAVE", wave))
if field is not None:
sTuple.append(self.zModifySettings(dst, "ISM_FIELD", field))
if pupilSample is not None:
sTuple.append(self.zModifySettings(dst, "ISM_PSAMP", pupilSample))
if imgSample is not None:
sTuple.append(self.zModifySettings(dst, "ISM_ISAMP", imgSample))
if psfx is not None:
sTuple.append(self.zModifySettings(dst, "ISM_PSFX", psfx))
if psfy is not None:
sTuple.append(self.zModifySettings(dst, "ISM_PSFY", psfy))
if aberr is not None:
sTuple.append(self.zModifySettings(dst, "ISM_ABERRATIONS", aberr))
if pol is not None:
sTuple.append(self.zModifySettings(dst, "ISM_POLARIZATION", pol))
if fixedAper is not None:
sTuple.append(self.zModifySettings(dst, "ISM_FIXEDAPERTURES", fixedAper))
if illum is not None:
sTuple.append(self.zModifySettings(dst, "ISM_USERI", illum))
if showAs is not None:
sTuple.append(self.zModifySettings(dst, "ISM_SHOWAS", showAs))
if reference is not None:
sTuple.append(self.zModifySettings(dst, "ISM_REFERENCE", reference))
if suppress is not None:
sTuple.append(self.zModifySettings(dst, "ISM_SUPPRESS", suppress))
if pixelSize is not None:
sTuple.append(self.zModifySettings(dst, "ISM_PIXELSIZE", pixelSize))
if xpix is not None:
sTuple.append(self.zModifySettings(dst, "ISM_XSIZE", xpix))
if ypix is not None:
sTuple.append(self.zModifySettings(dst, "ISM_YSIZE", ypix))
if flipSimImg is not None:
sTuple.append(self.zModifySettings(dst, "ISM_FLIPIMAGE", flipSimImg))
if outFile is not None:
sTuple.append(self.zModifySettings(dst, "ISM_OUTPUTFILE", outFile))
return tuple(sTuple)
def zSetImageSimulationSettings(self, settingsFile=None, image=None, height=None,
over=None, guard=None, flip=None, rotate=None,
wave=None, field=None, pupilSample=None,
imgSample=None, psfx=None, psfy=None, aberr=None,
pol=None, fixedAper=None, illum=None, showAs=None,
reference=None, suppress=None, pixelSize=None,
xpix=None, ypix=None, flipSimImg=None, outFile=None):
"""create and set a new image simulation analysis settings file
starting from the "reset" settings state of the most basic lens in
Zemax
To modify an existing image simulation analysis settings file, use
``zModifyImageSimulationSettings()``. Only those parameters that
are non-None will be set
Parameters
----------
settingsFile : string, optional
name to give to the settings file to be created. It must be
the full file name, including path and extension of the
settings file.
If ``None``, then a CFG file with the name of the lens
followed by the string '_pyzdde_IMGSIM.CFG' will be created
in the same directory as the lens file and returned
image : string, optional
The input file name. This should be specified without a path.
height : float, optional
The field height, which defines the full height of the source
bitmap in field coordinates, may be either lens units or
degrees, depending upon the current field definition (heights
or angles, respectively).
over : integer, optional, [0-6]
Oversample value. Use 0 for None, 1 for 2X, 2 for 4x, etc.
guard : integer, optional, [0-6]
Guard band value. Use 0 for None, 1 for 2X, 2 for 4x, etc.
flip : integer, optional, [0-3]
Flip Source. Use 0 for None, 1 for top-bottom, 2 for left-right,
3 for top-bottom & left-right.
rotate : integer, optional, [0-3]
Rotate Source. Use 0 for none, 1 for 90, 2 for 180, 3 for 270.
wave : integer, optional,
Wavelength. Use 0 for RGB, 1 for 1+2+3, 2 for wave #1, 3 for
wave #2, etc.
field : integer, optional
Field number.
pupilSample : integer, optional, [1-10]
Pupil Sampling. Use 1 for 32x32, 2 for 64x64, etc.
i.e. [32*(2**i) for i in range(10)]
imgSample : integer, optional, [1-5]
Image Sampling. Use 1 for 32x32, 2 for 64x64, etc.
i.e. [32*(2**i) for i in range(5)]
psfx, psfy : integer, optional, [1-51]
The number of PSF grid points.
aberr : integer, optional, [0-2]
Use 0 for none, 1 for geometric, 2 for diffraction.
pol : integer, optional, [0-1]
Polarization. Use 0 for no, 1 for yes.
fixedAper : integer, optional, [0-1]
Apply fixed aperture? Use 0 for no, 1 for yes (apply fixed
aperture).
illum : integer, optional, [0-1]
Relative illumination. Use 0 for no, 1 for yes.
showAs : integer, optional, [0-2]
Use 0 for Simulated Image, 1 for Source Bitmap, and 2 for PSF
Grid.
reference : integer, optional, [0-2]
Use 0 for chief ray, 1 for vertex, 2 for primary chief ray.
suppress : integer, optional, [0-1]
Use 0 for no, 1 for yes.
pixelSize : integer, optional
Use 0 for default or the size in lens units.
xpix, ypix : integer, optional
Use 0 for default or the number of pixels.
flipSimImg : integer, optional
Use 0 for none, 1 for top-bottom, etc.
outFile : string, optional
The output file name or empty string for no output file.
Returns
-------
settingsFile : string
the full name, including path and extension, of the just
created settings file
Notes
-----
1. Further modifications of the settings file can be made using
``zModifySettings()`` or ``zModifyImageSimulationSettings()``
functions
2. The function creates settings file ending with
'_pyzdde_IMGSIM.CFG' in order to prevent overwritting any
existing settings file not created by pyzdde for image
simulation.
This file eventually gets deleted when ``ln.close()`` or
``pyz.closeLink()`` or ``ln.zDDEClose()`` is called.
See Also
--------
zGetImageSimulation(), zModifyImageSimulationSettings()
"""
clean_cfg = 'RESET_SETTINGS_IMGSIM.CFG'
src = _os.path.join(_pDir, 'ZMXFILES', clean_cfg)
if settingsFile:
dst = settingsFile
else:
filename_partial = _os.path.splitext(self.zGetFile())[0]
dst = filename_partial + '_pyzdde_IMGSIM.CFG'
self._filesCreated.add(dst)
try:
_shutil.copy(src, dst)
except IOError:
print("ERROR: Invalid settingsFile {}".format(dst))
return
else:
self.zModifyImageSimulationSettings(dst, image, height, over, guard,
flip, rotate, wave, field, pupilSample,
imgSample, psfx, psfy, aberr, pol,
fixedAper, illum, showAs, reference,
suppress, pixelSize, xpix, ypix,
flipSimImg, outFile)
return dst
# NSC detector viewer data
def zGetDetectorViewer(self, settingsFile=None, displayData=False, txtFile=None,
keepFile=False, timeout=60):
"""Returns NSC detector viewer data.
Please execute `zNSCTrace()` before calling this function.
Parameters
----------
settingsFile : string, optional
* if passed, the detector viewer uses this configuration file;
* if no ``settingsFile`` is passed, and config file ending
with the same name as the lens file post-fixed with
"_pyzdde_DVW.CFG" is present, the settings from this file
will be used;
* if no ``settingsFile`` and no file name post-fixed with
"_pyzdde_DVW.CFG" is found, but a config file with the same
name as the lens file is present, the settings from that
file will be used;
* if no settings file is found, then a default settings will
be used
displayData : bool
if ``true`` the function returns the 1D or 2D display data as
specified in the settings file; default is ``false``
txtFile : string, optional
if passed, the detector viewer data file will be named such.
Pass a specific ``txtFile`` if you want to dump the file into a
separate directory.
keepFile : bool, optional
if ``False`` (default), the detector viewer text file will be
deleted after use.
If ``True``, the file will persist. If ``keepFile`` is ``True``
but a ``txtFile`` is not passed, the detector viewer text file
will be saved in the same directory as the lens (provided the
required folder access permissions are available)
timeout : integer, optional
timeout in seconds.
Return
------
dvwData : tuple
dvwData is a 1-tuple containing just ``dvwInfo`` (see below)
if ``displayData`` is ``False`` (default).
If ``displayData`` is ``True``, ``dvwData`` is a 2-tuple
containing ``dvwInfo`` (a named tuple) and ``data``. ``data``
is either a 2-tuple containing ``coordinate`` and ``values``
as list elements if "Show as" is row/column cross-section, or
a 2D list of values otherwise.
dvwInfo : named tuple
surfNum : integer
NSCG surface number
detNum : integer
detector number
width, height : float
width and height of the detector
xPix, yPix : integer
number of pixels in x and y direction
totHits : integer
total ray hits
peakIrr : float or None
peak irradiance (only available for Irradiance type of data)
totPow : float or None
total power (only available for Irradiance type of data)
smooth : integer
the integer smoothing value
dType : string
the "Show Data" type
x, y, z, tiltX, tiltY, tiltZ : float
the x, y, z positions and tilt values
posUnits : string
position units
units : string
units
rowOrCol : string or None
indicate whether the cross-section data is a row or column
cross-section
rowColNum : float or None
the row or column number for cross-section data
rowColVal : float or None
the row or column value for cross-section data
data : 2-tuple or 2-D list
if cross-section data then `data = (coordinates, values)` where,
`coordinates` and `values` are 1-D lists otherwise, `data` is
a 2-D list of grid data. Note that the coherent phase data is
in degrees.
Examples
--------
>>> info = ln.zGetDetectorViewer(settingsFile)
>>> # following line assumes row/column cross-section data
>>> info, coordinates, values =ln.zGetDetectorViewer(settingsFile, True)
>>> # following line assumes 2d data
>>> info, gridData = zfu.zGetDetectorViewer(settingsFile, True)
See Also
--------
zSetDetectorViewerSettings(), zModifyDetectorViewerSettings()
"""
settings = _txtAndSettingsToUse(self, txtFile, settingsFile, 'Dvw')
textFileName, cfgFile, getTextFlag = settings
ret = self.zGetTextFile(textFileName, 'Dvr', cfgFile, getTextFlag,
timeout)
assert ret == 0, 'zGetTextFile returned {}'.format(ret)
pyz = _sys.modules[__name__]
ret = _zfu.readDetectorViewerTextFile(pyz, textFileName, displayData)
if not keepFile:
_deleteFile(textFileName)
return ret
def zModifyDetectorViewerSettings(self, settingsFile, surfNum=None,
detectNum=None, showAs=None, rowcolNum=None,
zPlaneNum=None, scale=None, smooth=None,
dType=None, zrd=None, dfilter=None,
maxPltScale=None, minPltScale=None,
outFileName=None):
"""Modify an existing detector viewer settings (configuration) file
Only those parameters that are non-None or non-zero-length (in
case of tuples) will be set.
Parameters
----------
settingsFile : string
filename of the settings file including path and extension
surfNum : integer, optional
the surface number. Use 1 for Non-Sequential mode
detectNum : integer, optional
the detector number
showAs : integer, optional
0 = full pixel data; 1 = cross section row; 2 = cross
section column. For Graphics Windows see Notes below.
rowcolNum: integer, optional
the row or column number for cross section plots
zPlaneNum : integer, optional
the Z-Plane number for detector volumes
scale : integer, optional
the scale mode. Use 0 for linear, 1 for Log -5, 2 for Log -10, and
3 for Log - 15.
smooth : integer, optional
the smoothing value
dType : integer, optional
use 0 for incoherent irradiance, 1 for coherent irradiance, 2 for
coherent phase, 3 for radiant intensity, 4 for radiance (position
space), and 5 for radiance (angle space).
zrd : string, optional
the ray data base name, or null for none.
dfilter : string, optional
the filter string
maxPltScale : float, optional
the maximum plot scale
minPltScale : float, optional
the minimim plot scale
outFileName : string, optional
the output file name
Returns
-------
statusTuple : tuple or -1
tuple of codes returned by ``zModifySettings()`` for each
non-None parameters. The status codes are as follows:
0 = no error;
-1 = invalid file;
-2 = incorrect version number;
-3 = file access conflict
The function returns -1 if ``settingsFile`` is invalid.
See Also
--------
zSetDetectorViewerSettings(), zGetDetectorViewer()
"""
sTuple = [] # status tuple
if (_os.path.isfile(settingsFile) and
settingsFile.lower().endswith('.cfg')):
dst = settingsFile
else:
return -1
if surfNum is not None:
sTuple.append(self.zModifySettings(dst, "DVW_SURFACE", surfNum))
if detectNum is not None:
sTuple.append(self.zModifySettings(dst, "DVW_DETECTOR", detectNum))
if showAs is not None:
sTuple.append(self.zModifySettings(dst, "DVW_SHOW", showAs))
if rowcolNum is not None:
sTuple.append(self.zModifySettings(dst, "DVW_ROWCOL", rowcolNum))
if zPlaneNum is not None:
sTuple.append(self.zModifySettings(dst, "DVW_ZPLANE", zPlaneNum))
if scale is not None:
sTuple.append(self.zModifySettings(dst, "DVW_SCALE", scale))
if smooth is not None:
sTuple.append(self.zModifySettings(dst, "DVW_SMOOTHING", smooth))
if dType is not None:
sTuple.append(self.zModifySettings(dst, "DVW_DATA", dType))
if zrd is not None:
sTuple.append(self.zModifySettings(dst, "DVW_ZRD", zrd))
if dfilter is not None:
sTuple.append(self.zModifySettings(dst, "DVW_FILTER", dfilter))
if maxPltScale is not None:
sTuple.append(self.zModifySettings(dst, "DVW_MAXPLOT", maxPltScale))
if minPltScale is not None:
sTuple.append(self.zModifySettings(dst, "DVW_MINPLOT", minPltScale))
if outFileName is not None:
sTuple.append(self.zModifySettings(dst, "DVW_OUTPUTFILE", outFileName))
return tuple(sTuple)
def zSetDetectorViewerSettings(self, settingsFile=None, surfNum=None,
detectNum=None, showAs=None, rowcolNum=None,
zPlaneNum=None, scale=None, smooth=None,
dType=None, zrd=None, dfilter=None,
maxPltScale=None, minPltScale=None,
outFileName=None):
"""Create and set a new detector viewer settings file starting
from the "reset" settings state of the most basic lens in Zemax
To modify an existing detector viewer settings file, use
``zModifyDetectorViewerSettings()``. Only those parameters that
are non-None will be set
Parameters
----------
settingsFile : string, optional
full name of the settings file (with .CFG extension). If ``None``,
then a CFG file with the name of the lens followed by the string
'_pyzdde_DVW.CFG' will be created in the same directory as the
lens file and returned
surfNum : integer, optional
the surface number. Use 1 for Non-Sequential mode
detectNum : integer, optional
the detector number
showAs : integer, optional
0 = full pixel data; 1 = cross section row; 2 = cross
section column. For Graphics Windows see Notes below.
rowcolNum: integer, optional
the row or column number for cross section plots
zPlaneNum : integer, optional
the Z-Plane number for detector volumes
scale : integer, optional
the scale mode. Use 0 for linear, 1 for Log -5, 2 for Log -10, and
3 for Log - 15.
smooth : integer, optional
the smoothing value
dType : integer, optional
use 0 for incoherent irradiance, 1 for coherent irradiance, 2 for
coherent phase, 3 for radiant intensity, 4 for radiance (position
space), and 5 for radiance (angle space).
zrd : string, optional
the ray data base name, or null for none.
dfilter : string, optional
the filter string
maxPltScale : float, optional
the maximum plot scale
minPltScale : float, optional
the minimim plot scale
outFileName : string, optional
the output file name
Returns
-------
settingsFile : string
the full name, including path and extension, of the just
created settings file
Notes
-----
The meaning of the integer value of ``showAs`` depends upon the type
of window displayed -- For Graphics Windows, use 0 for grey scale,
1 for inverted grey scale, 2 for false color, 3 for inverted
false color, 4 for cross section row, and 5 for cross section
column; For Text Windows (which is mostly likely the case when
using externally), use 0 for full pixel data, 1 for cross
See Also
--------
zGetDetectorViewer(), zModifyDetectorViewerSettings()
"""
clean_cfg = 'RESET_SETTINGS_DVW.CFG'
src = _os.path.join(_pDir, 'ZMXFILES', clean_cfg)
if settingsFile:
dst = settingsFile
else:
filename_partial = _os.path.splitext(self.zGetFile())[0]
dst = filename_partial + '_pyzdde_DVW.CFG'
self._filesCreated.add(dst)
try:
_shutil.copy(src, dst)
except IOError:
print("ERROR: Invalid settingsFile {}".format(dst))
return
else:
self.zModifyDetectorViewerSettings(dst, surfNum, detectNum, showAs,
rowcolNum, zPlaneNum, scale, smooth, dType, zrd, dfilter, maxPltScale,
minPltScale, outFileName)
return dst
# Aberration coefficients analysis functions
def zGetSeidelAberration(self, which='wave', txtFile=None, keepFile=False):
"""Return the Seidel Aberration coefficients
Parameters
----------
which : string, optional
'wave' = Wavefront aberration coefficient (summary) is
returned;
'aber' = Seidel aberration coefficients (total) is returned
'both' = both Wavefront (summary) and Seidel aberration
(total) coefficients are returned
txtFile : string, optional
if passed, the seidel text file will be named such. Pass a
specific txtFile if you want to dump the file into a separate
directory.
keepFile : bool, optional
if ``False`` (default), the Seidel text file will be deleted
after use.
If ``True``, the file will persist. If ``keepFile`` is ``True``
but a ``txtFile`` is not passed, the Seidel text file will be
saved in the same directory as the lens (provided the required
folder access permissions are availabl
Returns
-------
sac : dictionary or tuple (see below)
- if 'which' is 'wave', then a dictionary of Wavefront
aberration coefficient summary is returned;
- if 'which' is 'aber', then a dictionary of Seidel total
aberration coefficient is returned;
- if 'which' is 'both', then a tuple of dictionaries containing
Wavefront aberration coefficients and Seidel aberration
coefficients is returned.
"""
settings = _txtAndSettingsToUse(self, txtFile, 'None', 'Sei')
textFileName, _, _ = settings
ret = self.zGetTextFile(textFileName,'Sei', 'None', 0)
assert ret == 0
recSystemData = self.zGetSystem() # Get the current system parameters
numSurf = recSystemData[0]
line_list = _readLinesFromFile(_openFile(textFileName))
seidelAberrationCoefficients = {} # Aberration Coefficients
seidelWaveAberrationCoefficients = {} # Wavefront Aberr. Coefficients
for line_num, line in enumerate(line_list):
# Get the Seidel aberration coefficients
sectionString1 = ("Seidel Aberration Coefficients:")
if line.rstrip()== sectionString1:
sac_keys_tmp = line_list[line_num + 2].rstrip()[7:] # remove "Surf" and "\n" from start and end
sac_keys = sac_keys_tmp.split(' ')
sac_vals = line_list[line_num + numSurf+3].split()[1:]
# Get the Seidel Wavefront Aberration Coefficients (swac)
sectionString2 = ("Wavefront Aberration Coefficient Summary:")
if line.rstrip()== sectionString2:
swac_keys01 = line_list[line_num + 2].split() # names
swac_vals01 = line_list[line_num + 3].split()[1:] # values
swac_keys02 = line_list[line_num + 5].split() # names
swac_vals02 = line_list[line_num + 6].split()[1:] # values
break
else:
raise Exception("Could not find section strings '{}'"
" and '{}' in seidel aberrations file. "
" \n\nPlease check if there is a mismatch in text encoding between"
" Zemax and PyZDDE.".format(sectionString1, sectionString2))
# Assert if the lengths of key-value lists are not equal
assert len(sac_keys) == len(sac_vals)
assert len(swac_keys01) == len(swac_vals01)
assert len(swac_keys02) == len(swac_vals02)
# Create the dictionary
for k, v in zip(sac_keys, sac_vals):
seidelAberrationCoefficients[k] = float(v)
for k, v in zip(swac_keys01, swac_vals01):
seidelWaveAberrationCoefficients[k] = float(v)
for k, v in zip(swac_keys02, swac_vals02):
seidelWaveAberrationCoefficients[k] = float(v)
if not keepFile:
_deleteFile(textFileName)
if which == 'wave':
return seidelWaveAberrationCoefficients
elif which == 'aber':
return seidelAberrationCoefficients
elif which == 'both':
return seidelWaveAberrationCoefficients, seidelAberrationCoefficients
else:
return None
def zGetZernike(self, which='fringe', settingsFile=None, txtFile=None,
keepFile=False, timeout=5):
"""returns the Zernike Fringe, Standard, or Annular coefficients
for the currently loaded lens file.
It provides similar functionality to ZPL command "GETZERNIKE". The
only difference is that this function returns "Peak to valley to
centroid" in the `zInfo` metadata instead of "RMS to the zero OPD
line)
Parameters
----------
which : string, optional
``fringe`` for "Fringe" zernike terms (default), ``standard``
for "Standard" zernike terms, and ``annular`` for "Annular"
zernike terms.
settingsFile : string, optional
* if passed, the aberration coefficient analysis will be called
with the given configuration file (settings);
* if no ``settingsFile`` is passed, and a config file ending
with the same name as the lens-file post-fixed with
"_pyzdde_ZFR.CFG"/"_pyzdde_ZST.CFG"/"_pyzdde_ZAT.CFG" is
present, the settings from this file will be used;
* if no ``settingsFile`` and no file-name post-fixed with
"_pyzdde_ZFR.CFG"/"_pyzdde_ZST.CFG"/"_pyzdde_ZAT.CFG" is
found, but a config file with the same name as the lens file
is present, the settings from that file will be used;
* if none of the above types of settings file is found, then a
default settings will be used
txtFile : string, optional
if passed, the aberration coefficient analysis text file will
be named such. Pass a specific ``txtFile`` if you want to dump
the file into a separate directory.
keepFile : bool, optional
if ``False`` (default), the aberration coefficient text file
will be deleted after use.
If ``True``, the file will persist. If ``keepFile`` is ``True``
but a ``txtFile`` is not passed, the analysis text file will be
saved in the same directory as the lens (provided the required
folder access permissions are available)
timeout : integer, optional
timeout in seconds.
Returns
-------
zInfo : named tuple
the 8-tuple contains 1. Peak to Valley (to chief), 2. Peak to
valley (to centroid), 3. RMS to chief ray, 4. RMS to image
centroid, 5. Variance, 6. Strehl ratio, 7. RMS fit error, and
8. Maximum fit error. All parameters except for Strehl ratio
has units of waves.
zCoeff : 1-D named tuple
the actual Zernike Fringe, Standard, or Annular coefficients.
The coefficient names conform to the Zemax manual naming
staring from Z1, Z2, Z3 .... (see example below)
Notes
-----
1. As of current writing, Zemax doesn't provide a way to modify the
parameters of any aberration coefficient settings file through
extensions. Thus a settings file for the aberration coefficients
analysis has to be created manually using the Zemax menu (as
opposed to programmatic creation and modification of settings)
Examples
--------
>>> zInfo, zCoeff = ln.zGetZernike(which='fringe')
>>> zInfo
zInfo(pToVChief=0.08397624, pToVCentroid=0.08397624, rmsToChief=0.02455132, rmsToCentroid=0.02455132, variance=0.00060277, strehl=0.9764846, rmsFitErr=0.0, maxFitErr=0.0)
>>> print(zInfo.rmsToChief)
0.02455132
>>> print(zCoeff)
zCoeff(Z1=-0.55311265, Z2=0.0, Z3=0.0, Z4=-0.34152763, Z5=0.0, Z6=0.0, Z7=0.0, Z8=0.0, Z9=0.19277286, Z10=0.0, Z11=0.0, Z12=0.0, Z13=0.0, Z14=0.0, Z15=0.0, Z16=-0.01968138, Z17=0.0, Z18=0.0, Z19=0.0, Z20=0.0, Z21=0.0, Z22=0.0, Z23=0.0, Z24=0.0, Z25=-0.00091852, Z26=0.0, Z27=0.0, Z28=0.0, Z29=0.0, Z30=0.0, Z31=0.0, Z32=0.0, Z33=0.0, Z34=0.0, Z35=0.0, Z36=-3.368e-05, Z37=-1.44e-06)
>>> print(zCoeff.Z1) # zCoeff.Z1 is same as zCoeff[0]
-0.55311265
"""
anaTypeDict = {'fringe':'Zfr', 'standard':'Zst', 'annular':'Zat'}
assert which in anaTypeDict
anaType = anaTypeDict[which]
settings = _txtAndSettingsToUse(self, txtFile, settingsFile, anaType)
textFileName, cfgFile, getTextFlag = settings
ret = self.zGetTextFile(textFileName, anaType, cfgFile, getTextFlag,
timeout)
assert ret == 0
line_list = _readLinesFromFile(_openFile(textFileName))
line_list_len = len(line_list)
# Extract Meta data
meta_patterns = ["Peak to Valley\s+\(to chief\)",
"Peak to Valley\s+\(to centroid\)",
"RMS\s+\(to chief\)",
"RMS\s+\(to centroid\)",
"Variance",
"Strehl Ratio",
"RMS fit error",
"Maximum fit error"]
meta = []
for i, pat in enumerate(meta_patterns):
line_index = _getFirstLineOfInterest(line_list, pat)
if line_index is not None:
meta_line = line_list[line_index]
meta.append(float(_re.search(r'\d{1,3}\.\d{4,8}', meta_line).group()))
else:
meta.append(_math.nan)
info = _co.namedtuple('zInfo', ['pToVChief', 'pToVCentroid', 'rmsToChief',
'rmsToCentroid', 'variance', 'strehl',
'rmsFitErr', 'maxFitErr'])
zInfo = info(*meta)
# Extract coefficients
start_line_pat = "Z\s+1\s+-?\d{1,3}\.\d{4,8}"
start_line = _getFirstLineOfInterest(line_list, start_line_pat)
if start_line is not None: # Zernikes obtained successfully
coeff_pat = _re.compile("-?\d{1,3}\.\d{4,8}")
zCoeffs = [0] * (line_list_len - start_line)
for i, line in enumerate(line_list[start_line:]):
zCoeffs[i] = float(_re.findall(coeff_pat, line)[0])
zCoeffId = _co.namedtuple('zCoeff', ['Z{}'.format(i + 1) for i in range(line_list_len - start_line)])
else: # Zernikes were not obtained
# Return maximum amount of zernike coefficients, filled with NaN
if which.lower() == 'fringe':
maxZern = 37 # maximum for the fringe zernike coefficients
else:
maxZern = 231 # maximum for standard and annular zernike coefficients
zCoeffs = [_math.nan] * maxZern
zCoeffId = _co.namedtuple('zCoeff', ['Z{}'.format(i) for i in range(1, maxZern + 1)])
zCoeff = zCoeffId(*zCoeffs)
if not keepFile:
_deleteFile(textFileName)
return zInfo, zCoeff
# -------------------
# Tools functions
# -------------------
# System modification functions
def zLensScale(self, factor=2.0, ignoreSurfaces=None):
"""Scale the lens design by factor specified.
``Usage: zLensScale([factor,ignoreSurfaces]) -> ret``
Parameters
----------
factor : float
the scale factor. If no factor are passed, the design will
be scaled by a factor of 2.0
ignoreSurfaces : tuple
a tuple of surfaces that are not to be scaled. Such as
(0,2,3) to ignore surfaces 0 (object surface), 2 and 3.
Or (OBJ, 2, STO, IMG) to ignore object surface, surface
number 2, stop surface and image surface.
Returns
-------
status : integer
0 = success; 1 = success with warning; -1 = failure;
.. warning::
1. This function implementation is not yet complete.
* Not all surfaces have been implemented.
* ``ignoreSurface`` not implemented yet.
2. (Limitations) Cannot scale pupil shift x,y, and z in the
General settings as Zemax hasn't provided any command to
do so using the extensions. The pupil shift values are also
scaled, when a lens design is scaled, when the ray-aiming
is on. However, this is not a serious limitation for most
cases.
"""
ret = 0 # assuming successful return
lensFile = self.zGetFile()
if factor == 1:
return ret
#Scale the "system aperture" appropriately
sysAperData = self.zGetSystemAper()
if sysAperData[0] == 0: # System aperture if EPD
stopSurf = sysAperData[1]
aptVal = sysAperData[2]
self.zSetSystemAper(0,stopSurf,factor*aptVal)
elif sysAperData[0] in (1,2,4): # Image Space F/#, Object Space NA, Working Para F/#
##print(Warning: Scaling of aperture may be incorrect)
pass
elif sysAperData[0] == 3: # System aperture if float by stop
pass
elif sysAperData[0] == 5: # Object Cone Angle
print(("WARNING: Scaling OCA aperture type may be incorrect for {lF}"
.format(lF=lensFile)))
ret = 1
#Get the number of surfaces
numSurf = 0
recSystemData_g = self.zGetSystem() #Get the current system parameters
numSurf = recSystemData_g[0]
#print("Number of surfaces in the lens: ", numSurf)
if recSystemData_g[4] > 0:
print("Warning: Ray aiming is ON in {lF}. But cannot scale"
" Pupil Shift values.".format(lF=lensFile))
#Scale individual surface properties in the LDE
for surfNum in range(0,numSurf+1): #Start from the object surface ... to scale thickness if not infinity
#Scale the basic data common to all surface types such as radius, thickness
#and semi-diameter
surfName = self.zGetSurfaceData(surfNum,0)
curv = self.zGetSurfaceData(surfNum,2)
thickness = self.zGetSurfaceData(surfNum,3)
semiDiam = self.zGetSurfaceData(surfNum,5)
##print("Surf#:",surfNum,"Name:",surfName,"Curvature:",curv,"Thickness:",thickness,"Semi-Diameter:", semiDiam)
#scale the basic data
scaledCurv = self.zSetSurfaceData(surfNum,2,curv/factor)
if thickness < 1.0E+10: #Scale the thickness if it not Infinity (-1.0E+10 in Zemax)
scaledThickness = self.zSetSurfaceData(surfNum,3,factor*thickness)
scaledSemiDiam = self.zSetSurfaceData(surfNum,5,factor*semiDiam)
##print("scaled", surfNum,surfName,scaledCurv,scaledThickness,scaledSemiDiam)
#scaling parameters of surface individually
if surfName == 'STANDARD': #Std surface - plane, spherical, or conic aspheric
pass #Nothing to do
elif surfName in ('BINARY_1','BINARY_2'):
binSurMaxNum = {'BINARY_1':233,'BINARY_2':243}
for pNum in range(1,9): # from Par 1 to Par 8
par = self.zGetSurfaceParameter(surfNum,pNum)
self.zSetSurfaceParameter(surfNum,pNum,
factor**(1-2.0*pNum)*par)
#Scale norm radius in the extra data editor
epar2 = self.zGetExtra(surfNum,2) #Norm radius
self.zSetExtra(surfNum,2,factor*epar2)
#scale the coefficients of the Zernike Fringe polynomial terms in the EDE
numBTerms = int(self.zGetExtra(surfNum,1))
if numBTerms > 0:
for i in range(3,binSurMaxNum[surfName]): # scaling of terms 3 to 232, p^480
# for Binary1 and Binary 2 respectively
if i > numBTerms + 2: #(+2 because the terms starts from par 3)
break
else:
epar = self.zGetExtra(surfNum,i)
self.zSetExtra(surfNum,i,factor*epar)
elif surfName == 'BINARY_3':
#Scaling of parameters in the LDE
par1 = self.zGetSurfaceParameter(surfNum,1) # R2
self.zSetSurfaceParameter(surfNum,1,factor*par1)
par4 = self.zGetSurfaceParameter(surfNum,4) # A2, need to scale A2 before A1,
# because A2>A1>0.0 always
self.zSetSurfaceParameter(surfNum,4,factor*par4)
par3 = self.zGetSurfaceParameter(surfNum,3) # A1
self.zSetSurfaceParameter(surfNum,3,factor*par3)
numBTerms = int(self.zGetExtra(surfNum,1)) #Max possible is 60
for i in range(2,243,4): #242
if i > 4*numBTerms + 1: #(+1 because the terms starts from par 2)
break
else:
par_r1 = self.zGetExtra(surfNum,i)
self.zSetExtra(surfNum,i,par_r1/factor**(i/2))
par_p1 = self.zGetExtra(surfNum,i+1)
self.zSetExtra(surfNum,i+1,factor*par_p1)
par_r2 = self.zGetExtra(surfNum,i+2)
self.zSetExtra(surfNum,i+2,par_r2/factor**(i/2))
par_p2 = self.zGetExtra(surfNum,i+3)
self.zSetExtra(surfNum,i+3,factor*par_p2)
elif surfName == 'COORDBRK': #Coordinate break,
par = self.zGetSurfaceParameter(surfNum,1) # decenter X
self.zSetSurfaceParameter(surfNum,1,factor*par)
par = self.zGetSurfaceParameter(surfNum,2) # decenter Y
self.zSetSurfaceParameter(surfNum,2,factor*par)
elif surfName == 'EVENASPH': #Even Asphere,
for pNum in range(1,9): # from Par 1 to Par 8
par = self.zGetSurfaceParameter(surfNum,pNum)
self.zSetSurfaceParameter(surfNum,pNum,
factor**(1-2.0*pNum)*par)
elif surfName == 'GRINSUR1': #Gradient1
par1 = self.zGetSurfaceParameter(surfNum,1) #Delta T
self.zSetSurfaceParameter(surfNum,1,factor*par1)
par3 = self.zGetSurfaceParameter(surfNum,3) #coeff of radial quadratic index
self.zSetSurfaceParameter(surfNum,3,par3/(factor**2))
par4 = self.zGetSurfaceParameter(surfNum,4) #index of radial linear index
self.zSetSurfaceParameter(surfNum,4,par4/factor)
elif surfName == 'GRINSUR9': #Gradient9
par = self.zGetSurfaceParameter(surfNum,1) #Delta T
self.zSetSurfaceParameter(surfNum,1,factor*par)
elif surfName == 'GRINSU11': #Grid Gradient surface with 1 parameter
par = self.zGetSurfaceParameter(surfNum,1) #Delta T
self.zSetSurfaceParameter(surfNum,1,factor*par)
elif surfName == 'PARAXIAL': #Paraxial
par = self.zGetSurfaceParameter(surfNum,1) #Focal length
self.zSetSurfaceParameter(surfNum,1,factor*par)
elif surfName == 'PARAX_XY': #Paraxial XY
par = self.zGetSurfaceParameter(surfNum,1) # X power
self.zSetSurfaceParameter(surfNum,1,par/factor)
par = self.zGetSurfaceParameter(surfNum,2) # Y power
self.zSetSurfaceParameter(surfNum,2,par/factor)
elif surfName == 'PERIODIC':
par = self.zGetSurfaceParameter(surfNum,1) #Amplitude/ peak to valley height
self.zSetSurfaceParameter(surfNum,1,factor*par)
par = self.zGetSurfaceParameter(surfNum,2) #spatial frequency of oscillation in x
self.zSetSurfaceParameter(surfNum,2,par/factor)
par = self.zGetSurfaceParameter(surfNum,3) #spatial frequency of oscillation in y
self.zSetSurfaceParameter(surfNum,3,par/factor)
elif surfName == 'POLYNOMI':
for pNum in range(1,5): # from Par 1 to Par 4 for x then Par 5 to Par 8 for y
parx = self.zGetSurfaceParameter(surfNum,pNum)
pary = self.zGetSurfaceParameter(surfNum,pNum+4)
self.zSetSurfaceParameter(surfNum,pNum,
factor**(1-2.0*pNum)*parx)
self.zSetSurfaceParameter(surfNum,pNum+4,
factor**(1-2.0*pNum)*pary)
elif surfName == 'TILTSURF': #Tilted surface
pass #No parameters to scale
elif surfName == 'TOROIDAL':
par = self.zGetSurfaceParameter(surfNum,1) #Radius of rotation
self.zSetSurfaceParameter(surfNum, 1, factor*par)
for pNum in range(2,9): # from Par 1 to Par 8
par = self.zGetSurfaceParameter(surfNum,pNum)
self.zSetSurfaceParameter(surfNum,pNum,
factor**(1-2.0*(pNum-1))*par)
#scale parameters from the extra data editor
epar = self.zGetExtra(surfNum, 2)
self.zSetExtra(surfNum, 2, factor*epar)
elif surfName == 'FZERNSAG': # Zernike fringe sag
for pNum in range(1,9): # from Par 1 to Par 8
par = self.zGetSurfaceParameter(surfNum,pNum)
self.zSetSurfaceParameter(surfNum,pNum,
factor**(1-2.0*pNum)*par)
par9 = self.zGetSurfaceParameter(surfNum,9) # decenter X
self.zSetSurfaceParameter(surfNum,9,factor*par9)
par10 = self.zGetSurfaceParameter(surfNum,10) # decenter Y
self.zSetSurfaceParameter(surfNum,10,factor*par10)
#Scale norm radius in the extra data editor
epar2 = self.zGetExtra(surfNum,2) #Norm radius
self.zSetExtra(surfNum,2,factor*epar2)
#scale the coefficients of the Zernike Fringe polynomial terms in the EDE
numZerTerms = int(self.zGetExtra(surfNum,1))
if numZerTerms > 0:
epar3 = self.zGetExtra(surfNum,3) #Zernike Term 1
self.zSetExtra(surfNum,3,factor*epar3)
#Zernike terms 2,3,4,5 and 6 are not scaled.
for i in range(9,40): #scaling of Zernike terms 7 to 37
if i > numZerTerms + 2: #(+2 because the Zernike terms starts from par 3)
break
else:
epar = self.zGetExtra(surfNum,i)
self.zSetExtra(surfNum,i,factor*epar)
else:
print(("WARNING: Scaling for surf type {sN} in file {lF} not implemented!!"
.format(sN=surfName,lF=lensFile)))
ret = -1
pass
#Scale appropriate parameters in the Multi-configuration editor, such as THIC, APER ...
#maybe, use GetConfig(), SetConfig() and GetMulticon
#Scale appropriate parameters in the Tolerance Data Editor
#Scale the parameters in the Field data Editor if the field positions are
#NOT of angle type.
(fType,fNum,fxMax,fyMax,fNorm) = self.zGetField(0)
if fType != 0:
fieldDataTuple = self.zGetFieldTuple()
fieldDataTupleScaled = []
for i in range(fNum):
tField = list(fieldDataTuple[i])
tField[0],tField[1] = factor*tField[0],factor*tField[1]
fieldDataTupleScaled.append(tuple(tField))
fieldDataTupleScaled = self.zSetFieldTuple(fType,fNorm,
tuple(fieldDataTupleScaled))
return ret
# Design functions
def zOptimize2(self, numCycle=1, algo=0, histLen=5, precision=1e-12,
minMF=1e-15, tMinCycles=5, tMaxCycles=None, timeout=None):
"""A wrapper around zOptimize() providing few control features
Parameters
----------
numCycles : integer
number of cycles per DDE call to optimization (default=1)
algo : integer
0=DLS, 1=Orthogonal descent (default=0)
histLen : integer
length of the array of past merit functions returned from each
DDE call to ``zOptimize()`` for determining steady state of
merit function values (default=5)
precision : float
minimum acceptable absolute difference between the merit-
function values in the array for steady state computation
(default=1e-12)
minMF : float
minimum Merit Function following which to the optimization
loop is to be terminated even if a steady state hasn't reached.
This might be useful if a target merit function is desired.
tMinCycles : integer
total number of cycles to run optimization at the very least.
This is NOT the number of cycles per DDE call, but it is
calculated by multiplying the number of cycles per DDL
optimize call to the total number of DDE calls (default=5).
tMaxCycles : integer
the maximum number of cycles after which the optimizaiton
should be terminated even if a steady state hasn't reached
timeout : integer
timeout value, in seconds, used in each pass
Returns
-------
finalMerit : float
the final merit function.
tCycles : integer
total number of cycles calculated by multiplying the number
of cycles per DDL optimize call to the total number of DDE
calls.
Notes
-----
``zOptimize2()`` basically calls ``zOptimize()`` mutiple number of
times in a loop. It can be useful if a large number of optimization
cycles are required.
"""
mfvList = [0.0]*histLen # create a list of zeros
count = 0
mfvSettled = False
finalMerit = 9e9
tCycles = 0
if not tMaxCycles:
tMaxCycles = 2**31 - 1 # Largest plain positive integer value
while not mfvSettled and (finalMerit > minMF) and (tCycles < tMaxCycles):
finalMerit = self.zOptimize(numCycle, algo, timeout)
self.zOptimize(-1,algo) # update all the operands in the MFE (not necessary?)
if finalMerit > 8.9999e9: # optimization failure (Zemax returned 9.0E+009)
break
# populate mfvList in circular fashion
mfvList[count % histLen] = finalMerit
if (tCycles >= tMinCycles-1): # only after the minimum number of cycles are over,
# test to see if the merit-function has settled down
mfvList_shifted = mfvList[:-1]
mfvList_shifted.append(mfvList[0])
for i,j in zip(mfvList,mfvList_shifted):
if abs(i-j) >= precision:
break
else:
mfvSettled = True
count += 1
tCycles = count*numCycle
return (finalMerit, tCycles)
# Other functions
def zExecuteZPLMacro(self, zplMacroCode, timeout=None):
"""Executes a ZPL macro present in the <data>/Macros folder.
Parameters
----------
zplMacroCode : string
The first 3 letters (case-sensitive) of the ZPL macro present
in the <data>/Macros folder
timeout : integer
timeout value in seconds
Returns
--------
status : integer (0 or 1)
0 = successfully executed the ZPL macro;
-1 = macro code is incorrect & error code returned by Zemax
Notes
-----
If the macro path is different from the default macro path at
``<data>/Macros``, then first use ``zSetMacroPath()`` to set the
macropath and then use ``zExecuteZPLMacro()``.
.. warning::
1. can only "execute" an existing ZPL macro. i.e. you can't
create a ZPL macro on-the-fly and execute it.
2. If it is required to redirect the result of executing the ZPL
to a text file, modify the ZPL macro in the following way:
- Add the following two lines at the beginning of the file:
``CLOSEWINDOW`` # to suppress the display of default text window
``OUTPUT "full_path_with_extension_of_result_fileName"``
- Add the following line at the end of the file:
``OUTPUT SCREEN`` # close the file and re-enable screen printing
3. If there are more than one macros which have the same first 3 letters
then the top macro in the list as sorted by the filesystem
will be executed.
"""
status = -1
if self._macroPath:
zplMpath = self._macroPath
else:
zplMpath = _os.path.join(self.zGetPath()[0], 'Macros')
macroList = [f for f in _os.listdir(zplMpath)
if f.endswith(('.zpl','.ZPL')) and f.startswith(zplMacroCode)]
if macroList:
zplCode = macroList[0][:3]
status = self.zOpenWindow(zplCode, True, timeout)
return status
def zSetMacroPath(self, macroFolderPath):
"""Set the full path name to the macro folder
Parameters
----------
macroFolderPath : string
full-path name of the macro folder path. Also, this folder
path should match the folder path specified for Macros in the
Zemax Preferences setting.
Returns
-------
status : integer
0 = success; -1 = failure
Notes
-----
Use this method to set the full-path name of the macro folder
path if it is different from the default path at <data>/Macros
See Also
--------
zExecuteZPLMacro()
"""
if _os.path.isabs(macroFolderPath):
self._macroPath = macroFolderPath
return 0
else:
return -1
# -------------------
# Report functions
# -------------------
def zGetImageSpaceNA(self):
"""Return the Image Space Numerical Aperture (ISNA) of the lens
Parameters
----------
None
Returns
-------
isna : real
image space numerical aperture
Notes
-----
1. The ISNA is calculated using paraxial ray tracing. It is defined
as the index of the image space multiplied by the sine of the
angle between the paraxial on-axis chief ray and the paraxial
on-axis +y marginal ray calculated at the defined conjugates for
the primary wavelength [UPRT]_.
2. Relation to F-number :
``isna = pyz.fnum2numAper(paraxial_working_fnumber)``
References
----------
.. [UPRT] Understanding Paraxial Ray-Tracing, Mark Nicholson, Zemax
Knowledgebase, July 21, 2005.
See Also
--------
pyz.numAper2fnum()
"""
prim_wave_num = self.zGetPrimaryWave()
last_surf = self.zGetNumSurf()
# Trace paraxial on-axis chief ray at primary wavelength
chief_ray_dat = self.zGetTrace(prim_wave_num, mode=1, surf=last_surf,
hx=0, hy=0, px=0, py=0)
chief_angle = _math.asin(chief_ray_dat[6])
# Trace paraxial marginal ray at primary wavelength
margi_ray_dat = self.zGetTrace(prim_wave_num, mode=1, surf=last_surf,
hx=0, hy=0, px=0, py=1)
margi_angle = _math.asin(margi_ray_dat[6])
index = self.zGetIndexPrimWave(last_surf)
return index*_math.sin(chief_angle - margi_angle)
def zGetIndexPrimWave(self, surfNum):
"""Returns the index of refraction at primary wavelength for the
specified surface
Emulates the ZPL macro ``INDX(surface)``
Parameters
----------
surfNum : integer
surface number
Returns
-------
index : float
index of refraction at primary wavelength
See Also
--------
zGetIndex()
"""
prime_wave_num = self.zGetPrimaryWave()
return self.zGetIndex(surfNum)[prime_wave_num-1]
def zGetHiatus(self, txtFile=None, keepFile=False):
"""Returns the Hiatus, which is the distance between the two
principal planes of the optical system
Parameters
----------
txtFile : string, optional
if passed, the prescription file will be named such. Pass a
specific ``txtFile`` if you want to dump the file into a
separate directory.
keepFile : bool, optional
if ``False`` (default), the prescription file will be deleted
after use.
If ``True``, the file will persist. If ``keepFile`` is ``True``
but a ``txtFile`` is not passed, the prescription file will be
saved in the same directory as the lens (provided the required
folder access permissions are available)
Returns
-------
hiatus : float
the value of the hiatus
Notes
-----
The hiatus is also known as the Null space or nodal space or the
interstitium.
"""
settings = _txtAndSettingsToUse(self, txtFile, 'None', 'Pre')
textFileName, _, _ = settings
sysProp = self.zGetSystem()
numSurf = sysProp.numSurf
# Since the object space cardinal points are reported w.r.t. the
# surface 1, ensure that surface 1 is global reference surface
if sysProp.globalRefSurf is not 1:
self.zSetSystem(unitCode=sysProp.unitCode, stopSurf=sysProp.stopSurf,
rayAimingType=sysProp.rayAimingType, temp=sysProp.temp,
pressure=sysProp.pressure, globalRefSurf=1)
ret = self.zGetTextFile(textFileName, 'Pre', "None", 0)
assert ret == 0
# The number of expected Principal planes in each Pre file is equal to the
# number of wavelengths in the general settings of the lens design
line_list = _readLinesFromFile(_openFile(textFileName))
principalPlane_objSpace = 0.0
principalPlane_imgSpace = 0.0
hiatus = 0.0
count = 0
for line_num, line in enumerate(line_list):
# Extract the image surface distance from the global ref sur (surface 1)
sectionString = ("GLOBAL VERTEX COORDINATES, ORIENTATIONS,"
" AND ROTATION/OFFSET MATRICES:")
if line.rstrip() == sectionString:
ima_3 = line_list[line_num + numSurf*4 + 6]
ima_z = float(ima_3.split()[3])
# Extract the Principal plane distances.
if "Principal Planes" in line and "Anti" not in line:
principalPlane_objSpace += float(line.split()[3])
principalPlane_imgSpace += float(line.split()[4])
count +=1 #Increment (wavelength) counter for averaging
# Calculate the average (for all wavelengths) of the principal plane distances
if count > 0:
principalPlane_objSpace = principalPlane_objSpace/count
principalPlane_imgSpace = principalPlane_imgSpace/count
# Calculate the hiatus (only if count > 0) as
hiatus = abs(ima_z + principalPlane_imgSpace - principalPlane_objSpace)
# Restore the Global ref surface if it was changed
if sysProp.globalRefSurf is not 1:
self.zSetSystem(unitCode=sysProp.unitCode, stopSurf=sysProp.stopSurf,
rayAimingType=sysProp.rayAimingType, temp=sysProp.temp,
pressure=sysProp.pressure,
globalRefSurf=sysProp.globalRefSurf)
if not keepFile:
_deleteFile(textFileName)
return hiatus
def zGetPupilMagnification(self):
"""Return the pupil magnification, which is the ratio of the
exit-pupil diameter to the entrance pupil diameter.
The pupils are paraxial pupils.
Parameters
----------
None
Returns
-------
pupilMag : real
the pupil magnification
"""
_, _, ENPD, ENPP, EXPD, EXPP, _, _ = self.zGetPupil()
return (EXPD/ENPD)
def zGetOpticalPathLength(self, surf1=0, surf2=2, hx=0, hy=0, px=0, py=0):
"""Returns the total optical path length (OPL) between surfaces
surf1 and surf2 for a ray traced at primary wavelength
Parameters
----------
surf1 : integer, optional
start surface number
surf2 : integer, optional
end surface number
hx : float, optional
normalized field coordinate along x
hy : float, optional
normalized field coordinate along y
px : float, optional
normalized pupil coordinate along x
py : float, optional
normalized pupil coordinate along y
Returns
-------
oplen : float
total optical path length (including refraction and phase
surfaces) between surfaces
Notes
-----
The function uses the optimization operand "PLEN" to retrieve
the value of the optical path length
See Also
--------
zGetOpticalPathDifference()
"""
oplen = self.zOperandValue('PLEN', surf1, surf2, hx, hy, px, py)
return oplen
def zGetOpticalPathDifference(self, hx=0, hy=0, px=0, py=0, ref=0, wave=None):
"""Returns the optical path difference (OPD) with respect to the
chief ray or mean OPD in waves
Parameters
----------
hx : float, optional
normalized field coordinate along x
hy : float, optional
normalized field coordinate along y
px : float, optional
normalized pupil coordinate along x
py : float, optional
normalized pupil coordinate along y
ref : integer, optional
integer code to indicate reference ray/OPD.
0 = chief ray (Default); 1 = mean OPD over the pupil;
2 = mean OPD over the pupil with tilt removed
wave : integer, optional
wavelength number to trace ray. If ``None``, the ray is
traced at the primary wavelength.
Returns
-------
opd : float
optical path difference
See Also
--------
zGetOpticalPathLength()
"""
if ref == 2:
code = 'OPDX'
elif ref == 1:
code = 'OPDM'
elif ref == 0:
code = 'OPDC'
else:
raise ValueError("Unexpected ref input value")
if wave is None:
wave = self.zGetWave(self.zGetPrimaryWave()).wavelength
opd = self.zOperandValue(code, 0, wave, hx, hy, px, py)
return opd
def zGetSemiDiameter(self, surfNum):
"""Get the Semi-Diameter value of the surface with number `surfNum`
Parameters
----------
surfNum : integer
surface number
Returns
-------
semidia : real
semi-diameter of the surface
"""
return self.zGetSurfaceData(surfNum=surfNum, code=self.SDAT_SEMIDIA)
def zSetSemiDiameter(self, surfNum, value=0):
"""Set the Semi-Diameter of the surface with number `surfNum`.
A "fixed" solve type is set on the semi-diameter of the surface.
Parameters
----------
surfNum : integer
surface number
value : real, optional
value of the semi-diameter to set
Returns
-------
semidia : real
value of the semi-diameter of the surface after setting it.
"""
self.zSetSolve(surfNum, self.SOLVE_SPAR_SEMIDIA, self.SOLVE_SEMIDIA_FIXED)
return self.zSetSurfaceData(surfNum=surfNum, code=self.SDAT_SEMIDIA, value=value)
def zGetThickness(self, surfNum):
"""Get the Thickness value of the surface with number `surfNum`
Parameters
----------
surfNum : integer
surface number
Returns
-------
thick : real
thickness of the surface
"""
return self.zGetSurfaceData(surfNum=surfNum, code=self.SDAT_THICK)
def zSetThickness(self, surfNum, value=0):
"""Set the thickness of the surface with number `surfNum`.
Parameters
----------
surfNum: integer
surface number
value : real, optional
value of the thickness to set
Returns
-------
thick : real
value of the thickness of the surface after setting it.
"""
return self.zSetSurfaceData(surfNum=surfNum, code=self.SDAT_THICK, value=value)
def zGetRadius(self, surfNum):
"""Get the radius of the surface with number `surfNum`.
Parameters
----------
surfNum : integer
surface number
Returns
-------
radius : real
radius of the surface
"""
value = self.zGetSurfaceData(surfNum=surfNum, code=self.SDAT_CURV)
radius = 1.0/value if value else 1E10
return radius
def zSetRadius(self, surfNum, value=1E10):
"""Set the radius of the surface with number `surfNum`.
Parameters
----------
surfNum : integer
surface number
value : real
radius of the surface
Returns
-------
radius : real
radius of the surface
"""
curv = 1.0/value if value else 1E10
ret = self.zSetSurfaceData(surfNum=surfNum, code=self.SDAT_CURV, value=curv)
return 1.0/ret if ret else 1E10
def zSetGlass(self, surfNum, value=''):
"""Set the glass of the surface with number `surfNum`
Parameters
----------
surfNum : integer
surface number
value : string
valid glass string code
Returns
-------
glass : string
glass for the surface
"""
return self.zSetSurfaceData(surfNum=surfNum, code=self.SDAT_GLASS, value=value)
def zGetConic(self, surfNum):
"""Get the conic value of the surface with number `surfNum`
Parameters
----------
surfNum : integer
surface number
Returns
-------
conic : real
conic of the surface
"""
return self.zGetSurfaceData(surfNum=surfNum, code=self.SDAT_CONIC)
def zSetConic(self, surfNum, value=0):
"""Set the conic value of the surface with number `surfNum`
Parameters
----------
surfNum : integer
surface number
value : real
conic value
Returns
-------
conic : real
conic of the surface
"""
return self.zSetSurfaceData(surfNum=surfNum, code=self.SDAT_CONIC, value=value)
def zInsertDummySurface(self, surfNum, comment='dummy', thick=None, semidia=None):
"""Insert dummy surface at surface number indicated by `surfNum`
Parameters
----------
surfNum : integer
surface number at which to insert the dummy surface
comment : string, optional, default is 'dummy'
comment on the surface
thick : real, optional
thickness of the surface
semidia : real, optional
semi diameter of the surface
Returns
-------
nsur : integer
total number of surfaces in the LDE including the new dummy surface.
"""
self.zInsertSurface(surfNum)
self.zSetSurfaceData(surfNum=surfNum, code=self.SDAT_COMMENT, value=comment)
if thick is not None:
self.zSetSurfaceData(surfNum=surfNum, code=self.SDAT_THICK, value=thick)
if semidia is not None:
self.zSetSemiDiameter(surfNum=surfNum, value=semidia)
return self.zGetNumSurf()
def zInsertCoordinateBreak(self, surfNum, xdec=0.0, ydec=0.0, xtilt=0.0, ytilt=0.0,
ztilt=0.0, order=0, thick=None, comment=None):
"""Insert Coordinate Break at the surface position indicated by `surfNum`
Parameters
----------
surfNum : integer
surface number at which to insert the coordinate break
xdec : float, optional, default = 0.0
decenter x (in lens units)
ydec : float, optional, default = 0.0
decenter y (in lens units)
xtilt : float, optional, default = 0.0
tilt about x (degrees)
ytilt : float, optional, default = 0.0
tilt about y (degrees)
ztilt : float, optional, default = 0.0
tilt about z (degrees)
order : integer (0/1), optional, default = 0
0 = decenter then tilt; 1 = tilt then decenter
thick : real, optional
set the thickness of the cb surface
comment : string, optional
surface comment
Returns
-------
ret : integer
0 if no error
"""
self.zInsertSurface(surfNum=surfNum)
self.zSetSurfaceData(surfNum=surfNum, code=self.SDAT_TYPE, value='COORDBRK')
# set the decenter and tilt values and order
params = range(1, 7)
values = [xdec, ydec, xtilt, ytilt, ztilt, order]
for par, val in zip(params, values):
self.zSetSurfaceParameter(surfNum=surfNum, param=par, value=val)
if thick is not None:
self.zSetSurfaceData(surfNum=surfNum, code=self.SDAT_THICK, value=thick)
if comment is not None:
self.zSetSurfaceData(surfNum=surfNum, code=self.SDAT_COMMENT, value=comment)
return 0
def zTiltDecenterElements(self, firstSurf, lastSurf, xdec=0.0, ydec=0.0, xtilt=0.0,
ytilt=0.0, ztilt=0.0, order=0, cbComment1=None,
cbComment2=None, dummySemiDiaToZero=False):
'''Tilt decenter elements using CBs around the `firstSurf` and `lastSurf`.
Parameters
----------
firstSurf : integer
first surface
lastSurf : integer
last surface
xdec : float
decenter x (in lens units)
ydec : float
decenter y (in lens units)
xtilt : float
tilt about x (degrees)
ytilt : float
tilt about y (degrees)
ztilt : float
tilt about z (degrees)
order : integer (0/1), optional, default = 0
0 = decenter then tilt; 1 = tilt then decenter
comment1 : string, optional, default = 'Element Tilt'
comment on the first CB surface
comment2 : string, optional, default = 'Element Tilt:return'
comment on the second CB surface.
dummySemiDiaToZero : bool, optional, default = False
if `True` the semi-diameter of the dummy surface (afer CB2) is
set to zero.
Returns
-------
cb1 : integer
surface number of the first coordinate break surface
cb2 : integer
surface number of the second (for restoring axis) coordinate
break surface
dummy : integer
surface number of the dummy surface. It is always `CB2` + 1
Notes
-----
1. In total, 3 more surfaces are added to the existing system -- the
first CB, before `firstSurf`, the second CB after `lastSurf`, and
a dummy surface after the second CB.
'''
numSurfBetweenCBs = lastSurf - firstSurf + 1
cb1 = firstSurf
cb2 = cb1 + numSurfBetweenCBs + 1
dummy = cb2 + 1
# store the thickness and solve on thickness (if any) of the last surface
thick = self.zGetSurfaceData(surfNum=lastSurf, code=self.SDAT_THICK)
solve = self.zGetSolve(surfNum=lastSurf, code=self.SOLVE_SPAR_THICK)
# insert surfaces
self.zInsertSurface(surfNum=cb1) # 1st cb
self.zInsertSurface(surfNum=cb2) # 2nd cb to restore the original axis
self.zInsertSurface(surfNum=dummy) # dummy after 2nd cb
cbComment1 = cbComment1 if cbComment1 else 'Element Tilt'
self.zSetSurfaceData(surfNum=cb1, code=self.SDAT_COMMENT, value=cbComment1)
self.zSetSurfaceData(surfNum=cb1, code=self.SDAT_TYPE, value='COORDBRK')
cbComment2 = cbComment2 if cbComment2 else 'Element Tilt:return'
self.zSetSurfaceData(surfNum=cb2, code=self.SDAT_COMMENT, value=cbComment2)
self.zSetSurfaceData(surfNum=cb2, code=self.SDAT_TYPE, value='COORDBRK')
self.zSetSurfaceData(surfNum=dummy, code=self.SDAT_COMMENT, value='Dummy')
if dummySemiDiaToZero:
self.zSetSemiDiameter(surfNum=dummy, value=0)
# transfer thickness of the surface just before the cb2 (originally
# lastSurf) to the dummy surface
lastSurf += 1 # last surface number incremented by 1 bcoz of cb 1
self.zSetSurfaceData(surfNum=lastSurf, code=self.SDAT_THICK, value=0.0)
self.zSetSolve(lastSurf, self.SOLVE_SPAR_THICK, self.SOLVE_THICK_FIXED)
self.zSetSurfaceData(surfNum=dummy, code=self.SDAT_THICK, value=thick)
# transfer the solve on the thickness (if any) of the surface just before
# the cb2 (originally lastSurf) to the dummy surface. The param1 of
# solve type "Thickness" may need to be modified before transferring.
if solve[0] in {5, 7, 8, 9}: # param1 is a integer surface number
param1 = int(solve[1]) if solve[1] < cb1 else int(solve[1]) + 1
else: # param1 is a floating value, or macro name
param1 = solve[1]
self.zSetSolve(dummy, self.SOLVE_SPAR_THICK, solve[0], param1, solve[2],
solve[3], solve[4])
# use pick-up solve on glass surface of dummy to pickup from lastSurf
self.zSetSolve(dummy, self.SOLVE_SPAR_GLASS, self.SOLVE_GLASS_PICKUP, lastSurf)
# use pick-up solves on second CB; set scale factor of -1 to lock the second
# cb to the first.
pickupcolumns = range(6, 11)
params = [self.SOLVE_SPAR_PAR1, self.SOLVE_SPAR_PAR2,
self.SOLVE_SPAR_PAR3, self.SOLVE_SPAR_PAR4, self.SOLVE_SPAR_PAR5]
offset, scale = 0, -1
for para, pcol in zip(params, pickupcolumns):
self.zSetSolve(cb2, para, self.SOLVE_PARn_PICKUP, cb1, offset, scale, pcol)
# Set solves to co-locate the two CBs
# use position solve to track back through the lens
self.zSetSolve(lastSurf, self.SOLVE_SPAR_THICK, self.SOLVE_THICK_POS, cb1 , 0)
# use a pickup solve to restore position at the back of the lastSurf
self.zSetSolve(cb2, self.SOLVE_SPAR_THICK, self.SOLVE_THICK_PICKUP,
lastSurf, scale, offset, 0)
# set the appropriate orders on the surfaces
if order: # Tilt and then decenter
cb1Ord, cb2Ord = 1, 0
else: # Decenter and then tilt (default)
cb1Ord, cb2Ord = 0, 1
self.zSetSurfaceParameter(surfNum=cb1, param=6, value=cb1Ord)
self.zSetSurfaceParameter(surfNum=cb2, param=6, value=cb2Ord)
# set the decenter and tilt values in the first cb
params = range(1, 6)
values = [xdec, ydec, xtilt, ytilt, ztilt]
for par, val in zip(params, values):
self.zSetSurfaceParameter(surfNum=cb1, param=par, value=val)
self.zGetUpdate()
return cb1, cb2, dummy
def zInsertNSCSourceEllipse(self, surfNum=1, objNum=1, x=0.0, y=0.0, z=0.0,
tiltX=0.0, tiltY=0.0, tiltZ=0.0, xHalfWidth=0,
yHalfWidth=0, numLayRays=20, numAnaRays=500,
refObjNum=0, insideOf=0, power=1, waveNum=0,
srcDist=0.0, cosExp=0.0, gaussGx=0.0, gaussGy=0.0,
srcX=0.0, srcY=0.0, minXHalfWidth=0.0, minYHalfWidth=0.0,
color=0, comment='', overwrite=False):
"""Insert a new NSC source ellipse at the location indicated by the
parameters ``surfNum`` and ``objNum``
Parameters
----------
surfNum : integer, optional
surface number of the NSC group. Use 1 if the program mode is
Non-Sequential
objNum : integer, optional
object number
x, y, z, tiltX, tiltY, tiltZ : floats, optional
x, y, z position and tilts about X, Y, and Z axis respectively
xHalfWidth, yHalfWidth : floats
half widths along x and y axis
numLayRays, numAnaRays : integers, optional
number of layout rays and analysis rays respectively
refObjNum : integer, optional
reference object number
insideOf : integer, optional
inside of object number
power : float, optional
power in Watts
waveNum : integer, optional
the wave number
srcDist, cosExp, gaussGx, gaussGy, srcX, srcY, minXHalfWidth, minYHalfWidth : floats
see the manual for details
color : integer, optional
The pen color to use when drawing rays from this source. If 0,
the default color will be chosen.
comment : string, optional
comment for the object
overwrite : bool, optional
if `False` (default), a new object is inserted at the position and existing
objects (if any) are pushed. If `True`, then existing at the ``objNum`` is
overwritten
Returns
-------
None
Note
----
If an object with the same number as ``objNum`` already exist in the NSCE,
that (and any subsequent) object is pushed by one row, unless ``overwrite``
is ``True``.
See Also
--------
zInsertNSCSourceRectangle()
"""
numObjsExist = self.zGetNSCData(surfNum, code=0)
if objNum > numObjsExist + 1:
raise ValueError('objNum ({}) cannot be greater than {}.'
.format(objNum, numObjsExist+1))
if not overwrite:
assert self.zInsertObject(surfNum, objNum) == 0, \
'Error inserting object at object Number {}'.format(objNum)
objData = {0:'NSC_SRCE', 1:comment, 5:refObjNum, 6:insideOf}
for code, data in objData.iteritems():
assert self.zSetNSCObjectData(surfNum, objNum, code, data) == data, \
'Error in setting NSC object code {}'.format(code)
assert self.zSetNSCPositionTuple(surfNum, objNum, x, y, z, tiltX, tiltY, tiltZ) \
== (x, y, z, tiltX, tiltY, tiltZ, '')
param = (numLayRays, numAnaRays, power, waveNum, color, xHalfWidth, yHalfWidth,
srcDist, cosExp, gaussGx, gaussGy, srcX, srcY, minXHalfWidth, minYHalfWidth)
for i, each in enumerate(param, 1):
assert self.zSetNSCParameter(surfNum, objNum, paramNum=i, data=each) == each, \
'Error in setting NSC parameter {} to {} at object {}'.format(i, param[i], objNum)
def zInsertNSCSourceRectangle(self, surfNum=1, objNum=1, x=0.0, y=0.0, z=0.0,
tiltX=0.0, tiltY=0.0, tiltZ=0.0, xHalfWidth=0,
yHalfWidth=0, numLayRays=20, numAnaRays=500,
refObjNum=0, insideOf=0, power=1, waveNum=0,
srcDist=0.0, cosExp=0.0, gaussGx=0.0, gaussGy=0.0,
srcX=0.0, srcY=0.0, color=0, comment='', overwrite=False):
"""Insert a new NSC source rectangle at the location indicated by the
parameters ``surfNum`` and ``objNum``
Parameters
----------
surfNum : integer, optional
surface number of the NSC group. Use 1 if the program mode is
Non-Sequential
objNum : integer, optional
object number
x, y, z, tiltX, tiltY, tiltZ : floats, optional
x, y, z position and tilts about X, Y, and Z axis respectively
xHalfWidth, yHalfWidth : floats
half widths along x and y axis
numLayRays, numAnaRays : integers, optional
number of layout rays and analysis rays respectively
refObjNum : integer, optional
reference object number
insideOf : integer, optional
inside of object number
power : float, optional
power in Watts
waveNum : integer, optional
the wave number
srcDist, cosExp, gaussGx, gaussGy, srcX, srcY : floats
see the manual for details
color : integer, optional
The pen color to use when drawing rays from this source. If 0,
the default color will be chosen.
comment : string, optional
comment for the object
overwrite : bool, optional
if `False` (default), a new object is inserted at the position and existing
objects (if any) are pushed. If `True`, then existing at the ``objNum`` is
overwritten
Returns
-------
None
Note
----
If an object with the same number as ``objNum`` already exist in the NSCE,
that (and any subsequent) object is pushed by one row, unless ``overwrite``
is ``True``.
See Also
--------
zInsertNSCSourceEllipse()
"""
numObjsExist = self.zGetNSCData(surfNum, code=0)
if objNum > numObjsExist + 1:
raise ValueError('objNum ({}) cannot be greater than {}.'
.format(objNum, numObjsExist+1))
if not overwrite:
assert self.zInsertObject(surfNum, objNum) == 0, \
'Error inserting object at object Number {}'.format(objNum)
objData = {0:'NSC_SRCR', 1:comment, 5:refObjNum, 6:insideOf}
for code, data in objData.iteritems():
assert self.zSetNSCObjectData(surfNum, objNum, code, data) == data, \
'Error in setting NSC object code {}'.format(code)
assert self.zSetNSCPositionTuple(surfNum, objNum, x, y, z, tiltX, tiltY, tiltZ) \
== (x, y, z, tiltX, tiltY, tiltZ, '')
param = (numLayRays, numAnaRays, power, waveNum, color, xHalfWidth, yHalfWidth,
srcDist, cosExp, gaussGx, gaussGy, srcX, srcY)
for i, each in enumerate(param, 1):
assert self.zSetNSCParameter(surfNum, objNum, paramNum=i, data=each) == each, \
'Error in setting NSC parameter {} to {} at object {}'.format(i, param[i], objNum)
def zInsertNSCEllipse(self, surfNum=1, objNum=1, x=0.0, y=0.0, z=0.0,
tiltX=0.0, tiltY=0.0, tiltZ=0.0, xHalfWidth=0.0,
yHalfWidth=0.0, material='', refObjNum=0, insideOf=0,
comment='', overwrite=False):
"""Insert a new NSC ellipse object at the location indicated by the
parameters ``surfNum`` and ``objNum``
Parameters
----------
surfNum : integer, optional
surface number of the NSC group. Use 1 if the program mode is
Non-Sequential
objNum : integer, optional
object number
x, y, z, tiltX, tiltY, tiltZ : floats, optional
x, y, z position and tilts about X, Y, and Z axis respectively
xHalfWidth, yHalfWidth : floats
half widths along x and y axis
material : string, optional
material such as ABSORB, MIRROR, etc.
refObjNum : integer, optional
reference object number
insideOf : integer, optional
inside of object number
comment : string, optional
comment for the object
overwrite : bool, optional
if `False` (default), a new object is inserted at the position and existing
objects (if any) are pushed. If `True`, then existing at the ``objNum`` is
overwritten
Returns
-------
None
Note
----
If an object with the same number as ``objNum`` already exist in the NSCE,
that (and any subsequent) object is pushed by one row, unless ``overwrite``
is ``True``.
See Also
--------
zInsertNSCRectangle()
"""
numObjsExist = self.zGetNSCData(surfNum, code=0)
if objNum > numObjsExist + 1:
raise ValueError('objNum ({}) cannot be greater than {}.'
.format(objNum, numObjsExist+1))
if not overwrite:
assert self.zInsertObject(surfNum, objNum) == 0, \
'Error inserting object at object Number {}'.format(objNum)
objData = {0:'NSC_ELLI', 1:comment, 5:refObjNum, 6:insideOf}
for code, data in objData.iteritems():
assert self.zSetNSCObjectData(surfNum, objNum, code, data) == data, \
'Error in setting NSC object code {}'.format(code)
assert self.zSetNSCPositionTuple(surfNum, objNum, x, y, z, tiltX, tiltY, tiltZ, material) \
== (x, y, z, tiltX, tiltY, tiltZ, material)
param = (xHalfWidth, yHalfWidth,)
for i, each in enumerate(param, 1):
assert self.zSetNSCParameter(surfNum, objNum, paramNum=i, data=each) == each, \
'Error in setting NSC parameter {} to {} at object {}'.format(i, param[i], objNum)
def zInsertNSCRectangle(self, surfNum=1, objNum=1, x=0.0, y=0.0, z=0.0,
tiltX=0.0, tiltY=0.0, tiltZ=0.0, xHalfWidth=0.0,
yHalfWidth=0.0, material='', refObjNum=0, insideOf=0,
comment='', overwrite=False):
"""Insert a new NSC rectangle object at the location indicated by the
parameters ``surfNum`` and ``objNum``
Parameters
----------
surfNum : integer, optional
surface number of the NSC group. Use 1 if the program mode is
Non-Sequential
objNum : integer, optional
object number
x, y, z, tiltX, tiltY, tiltZ : floats, optional
x, y, z position and tilts about X, Y, and Z axis respectively
xHalfWidth, yHalfWidth : floats
half widths along x and y axis
material : string, optional
material such as ABSORB, MIRROR, etc.
refObjNum : integer, optional
reference object number
insideOf : integer, optional
inside of object number
comment : string, optional
comment for the object
overwrite : bool, optional
if `False` (default), a new object is inserted at the position and existing
objects (if any) are pushed. If `True`, then existing at the ``objNum`` is
overwritten
Returns
-------
None
Note
----
If an object with the same number as ``objNum`` already exist in the NSCE,
that (and any subsequent) object is pushed by one row, unless ``overwrite``
is ``True``.
See Also
--------
zInsertNSCEllipse()
"""
numObjsExist = self.zGetNSCData(surfNum, code=0)
if objNum > numObjsExist + 1:
raise ValueError('objNum ({}) cannot be greater than {}.'
.format(objNum, numObjsExist+1))
if not overwrite:
assert self.zInsertObject(surfNum, objNum) == 0, \
'Error inserting object at object Number {}'.format(objNum)
objData = {0:'NSC_SRCR', 1:comment, 5:refObjNum, 6:insideOf}
for code, data in objData.iteritems():
assert self.zSetNSCObjectData(surfNum, objNum, code, data) == data, \
'Error in setting NSC object code {}'.format(code)
assert self.zSetNSCPositionTuple(surfNum, objNum, x, y, z, tiltX, tiltY, tiltZ, material) \
== (x, y, z, tiltX, tiltY, tiltZ, material)
param = (xHalfWidth, yHalfWidth,)
for i, each in enumerate(param, 1):
assert self.zSetNSCParameter(surfNum, objNum, paramNum=i, data=each) == each, \
'Error in setting NSC parameter {} to {} at object {}'.format(i, param[i], objNum)
def zNSCDetectorClear(self, surfNum, detectNum=0):
"""Clear NSC detector data
Parameters
----------
surfNum : integer
surface number of NSC group (use 1 for pure NSC system)
detectNum : integer
the object number of the detector to be cleared. Use 0 to
clear all detectors
Returns
-------
ret : integer
0 if successful
"""
return self.zNSCDetectorData(surfNum, -detectNum, 0, 0)
def zInsertNSCDetectorRectangle(self, surfNum=1, objNum=1, x=0.0, y=0.0, z=0.0,
tiltX=0.0, tiltY=0.0, tiltZ=0.0, xHalfWidth=1.0,
yHalfWidth=1.0, numXPix=1, numYPix=1, material='',
dType=0, fntOnly=0, refObjNum=0, insideOf=0,
color=0, smooth=0, scale=0, pltScale=0.0,
psfWaveNum=0, xAngMin=-90.0, xAngMax=90.0,
yAngMin=-90.0, yAngMax=90.0, pol=0, mirror=0,
comment='', overwrite=False):
"""Insert a new NSC detector rectangle at the location indicated by the
parameters ``surfNum`` and ``objNum``
Parameters
----------
surfNum : integer, optional
surface number of the NSC group. Use 1 if the program mode is
Non-Sequential
objNum : integer, optional
object number
x, y, z, tiltX, tiltY, tiltZ : floats, optional
x, y, z position and tilts about X, Y, and Z axis respectively
xHalfWidth, yHalfWidth : floats
half widths along x and y axis
numXPix, numYPix: integers, optional
number of pixels along x- and y- axis respectively
material : string, optional
material such as ABSORB, MIRROR, etc.
dType : integer, optional
whether coherent or incoherent
fntOnly : integer, optional
whether detection occurs only on the front surface
refObjNum : integer, optional
reference object number
insideOf : integer, optional
inside of object number
color, smooth, scale, pltScale, psfWaveNum :
see manual for details
xAngMin, xAngMax, yAngMin, yAngMax, pol, mirror :
see manual for details
comment : string, optional
comment for the object
overwrite : bool, optional
if `False` (default), a new object is inserted at the position and existing
objects (if any) are pushed. If `True`, then existing at the ``objNum`` is
overwritten
Returns
-------
None
Note
----
If an object with the same number as ``objNum`` already exist in the NSCE,
that (and any subsequent) object is pushed by one row, unless ``overwrite``
is ``True``.
"""
numObjsExist = self.zGetNSCData(surfNum, code=0)
if objNum > numObjsExist + 1:
raise ValueError('objNum ({}) cannot be greater than {}.'
.format(objNum, numObjsExist+1))
if not overwrite:
assert self.zInsertObject(surfNum, objNum) == 0, \
'Error inserting object at object Number {}'.format(objNum)
objData = {0:'NSC_DETE', 1:comment, 5:refObjNum, 6:insideOf}
for code, data in objData.iteritems():
assert self.zSetNSCObjectData(surfNum, objNum, code, data) == data, \
'Error in setting NSC object code {}'.format(code)
assert self.zSetNSCPositionTuple(surfNum, objNum, x, y, z, tiltX, tiltY, tiltZ, material) \
== (x, y, z, tiltX, tiltY, tiltZ, material)
param = (xHalfWidth, yHalfWidth, numXPix, numYPix, dType, color, smooth, scale,
pltScale, fntOnly, psfWaveNum, xAngMin, xAngMax, yAngMin, yAngMax, pol, mirror)
for i, each in enumerate(param, 1):
assert self.zSetNSCParameter(surfNum, objNum, paramNum=i, data=each) == each, \
'Error in setting NSC parameter {} to {} at object {}'.format(i, param[i], objNum)
#%% Interaction friendly (but duplicate) functions
@property
def refresh(self):
"""push lens from LDE to DDE server"""
return self.zGetRefresh()
@property
def push(self):
"""push lens from DDE server to LDE and update lens"""
return self.zPushLens(1)
@property
def update(self):
"""update -- recompute all pupil positions, slovles, etc."""
return self.zGetUpdate()
#%% IPYTHON NOTEBOOK UTILITY FUNCTIONS
def ipzCaptureWindowLQ(self, num=1, *args, **kwargs):
"""Capture graphic window from Zemax and display in IPython
(Low Quality)
ipzCaptureWindowLQ(num [, *args, **kwargs])-> displayGraphic
Parameters
----------
num : integer
the graphic window to capture is indicated by the window
number ``num``.
Returns
-------
None (embeds image in IPython cell)
Notes
-----
1. This function is useful for quickly capturing a graphic window,
and embedding into a IPython notebook or QtConsole. The quality
of JPG image is limited by the JPG export quality from Zemax.
2. In order to use this function, please copy the ZPL macros from
PyZDDE\ZPLMacros to the macro directory where Zemax is expecting
(i.e. as set in Zemax->Preference->Folders)
3. For earlier versions (before 2010) please use
``ipzCaptureWindow()`` for better quality.
"""
global _global_IPLoad
if _global_IPLoad:
macroCode = "W{n}".format(n=str(num).zfill(2))
dataPath = self.zGetPath()[0]
imgPath = (r"{dp}\IMAFiles\{mc}_Win{n}.jpg"
.format(dp=dataPath, mc=macroCode, n=str(num).zfill(2)))
if not self.zExecuteZPLMacro(macroCode):
if _checkFileExist(imgPath):
_display(_Image(filename=imgPath))
_deleteFile(imgPath)
else:
print("Timeout reached before image file was ready.")
print("The specified graphic window may not be open in ZEMAX!")
else:
print("ZPL Macro execution failed.\nZPL Macro path in PyZDDE is set to {}."
.format(self._macroPath))
if not self._macroPath:
print("Use zSetMacroPath() to set the correct macro path.")
else:
print("Couldn't import IPython modules.")
def ipzCaptureWindow(self, analysisType, percent=12, MFFtNum=0, gamma=0.35,
settingsFile=None, flag=0, retArr=False, wait=10):
"""Capture any analysis window from Zemax main window, using
3-letter analysis code.
Parameters
----------
analysisType : string
3-letter button code for the type of analysis
percent : float
percentage of the Zemax metafile to display (default=12). Used
for resizing the large metafile.
MFFtNum : integer
type of metafile. 0 = Enhanced Metafile; 1 = Standard Metafile
gamma : float
gamma for the PNG image (default = 0.35). Use a gamma value of
around 0.9 for color surface plots.
settingsFile : string
If a valid file name is used for the ``settingsFile``, Zemax
will use or save the settings used to compute the metafile,
depending upon the value of the flag parameter.
flag : integer
* 0 = default settings used for the metafile graphic;
* 1 = settings provided in the settings file, if valid, else
default settings used;
* 2 = settings in the settings file, if valid, will be used &
the settings box for the requested feature will be displayed.
After the user changes the settings, the graphic will be
generated using the new settings.
retArr : boolean
whether to return the image as an array or not.
If ``False`` (default), the image is embedded and no array is
returned;
If ``True``, an numpy array is returned that may be plotted
using Matplotlib.
wait : integer
time in sec sent to Zemax for the requested analysis to
complete and produce a file.
Returns
-------
None if ``retArr`` is ``False`` (default). The graphic is embedded
into the notebook, else ``pixel_array`` (ndarray) if ``retArr``
is ``True``.
Notes
-----
1. PyZDDE uses ImageMagick's convert program to resize and
convert the meta file produced by Zemax into a smaller PNG
file suitable for embedding into IPython cells. A copy of
convert.exe comes along with PyZDDE. However, the user may
choose to use a version of ImageMagick that is installed in
the system already. Please use the module level function
``pyz.setImageMagickSettings()`` to do so.
2. In some environments, Zemax outputs large EMF files [1]_.
Converting large files may take some time (around 10 sec.)
to complete.
3. If the function doesn't work as expected, please check the
EMF to PNG conversion command being used and the output of
executing this command by setting the debug print level to
1 as ``pyz._DEBUG_PRINT_LEVEL=1``, and running the function
again.
4. The dataitem `GetMetaFile` has been removed since OpticStudio
14. Therefore, this function does not work in OpticStudio.
References
----------
.. [1] https://github.com/indranilsinharoy/PyZDDE/issues/34
See Also
--------
ipzCaptureWindowLQ():
low quality screen-shot of a graphic window
pyz.setImageMagickSettings():
set ImageMagick settings
pyz.getImageMagickSettings():
view current ImageMagick settings
"""
global _global_IPLoad, _global_mpl_img_load
global _global_use_installed_imageMagick
global _global_imageMagick_dir
if _global_IPLoad:
tmpImgPath = _os.path.dirname(self.zGetFile()) # dir of the lens file
if MFFtNum==0:
ext = 'EMF'
else:
ext = 'WMF'
tmpMetaImgName = "{tip}\\TEMPGPX.{ext}".format(tip=tmpImgPath, ext=ext)
tmpPngImgName = "{tip}\\TEMPGPX.png".format(tip=tmpImgPath)
if _global_use_installed_imageMagick:
cd = _global_imageMagick_dir
else:
cd = _os.path.dirname(_os.path.realpath(__file__))
if MFFtNum==0:
imagickCmd = ('{cd}\convert.exe \"{MetaImg}\" -flatten '
'-resize {per}% -gamma {ga} \"{PngImg}\"'
.format(cd=cd,MetaImg=tmpMetaImgName,per=percent,
ga=gamma,PngImg=tmpPngImgName))
else:
imagickCmd = ("{cd}\convert.exe \"{MetaImg}\" -resize {per}% \"{PngImg}\""
.format(cd=cd,MetaImg=tmpMetaImgName,per=percent,
PngImg=tmpPngImgName))
_debugPrint(1, "imagickCmd = {}".format(imagickCmd))
# Get the metafile and display the image
if not self.zGetMetaFile(tmpMetaImgName,analysisType,
settingsFile,flag):
if _checkFileExist(tmpMetaImgName, timeout=wait):
# Convert Metafile to PNG using ImageMagick's convert
startupinfo = _subprocess.STARTUPINFO()
startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
proc = _subprocess.Popen(args=imagickCmd,
stdout=_subprocess.PIPE,
stderr=_subprocess.PIPE,
startupinfo=startupinfo)
msg = proc.communicate()
_debugPrint(1, "imagickCmd execution return message = "
"{}".format(msg))
if _checkFileExist(tmpPngImgName, timeout=10):
_time.sleep(0.2)
if retArr:
if _global_mpl_img_load:
arr = _matimg.imread(tmpPngImgName, 'PNG')
_deleteFile(tmpMetaImgName)
_deleteFile(tmpPngImgName)
return arr
else:
print("Couldn't import Matplotlib")
else: # Display the image if not retArr
_display(_Image(filename=tmpPngImgName))
_deleteFile(tmpMetaImgName)
_deleteFile(tmpPngImgName)
else:
print("Timeout reached before PNG file was ready")
else:
print(("Timeout reached before Metafile file was ready. "
"This function doesn't work in newer OpticStudio. "
"Please consider using ipzCaptureWindowLQ()."))
else:
print("Metafile couldn't be created.")
else:
print("Couldn't import IPython modules.")
def ipzGetTextWindow(self, analysisType, sln=0, eln=None, settingsFile=None,
flag=0, *args, **kwargs):
"""Print the text output of a Zemax analysis type into a IPython
cell.
Parameters
----------
analysisType : string
3 letter case-sensitive label that indicates the type of the
analysis to be performed. They are identical to those used for
the button bar in Zemax. The labels are case sensitive. If no
label is provided or recognized, a standard raytrace will be
generated.
sln : integer, optional
starting line number (default = 0)
eln : integer, optional
ending line number (default = None). If ``None`` all lines in
the file are printed.
settingsFile : string, optional
If a valid file name is used for the ``settingsFile``, Zemax
will use or save the settings used to compute the text file,
depending upon the value of the flag parameter.
flag : integer, optional
0 = default settings used for the text;
1 = settings provided in the settings file, if valid, else
default settings used
2 = settings provided in the settings file, if valid, will
be used and the settings box for the requested feature
will be displayed. After the user makes any changes to
the settings the text will then be generated using the
new settings. Please see the ZEMAX manual for details.
Returns
-------
None (the contents of the text file is dumped into an IPython cell)
"""
if not eln:
eln = 1e10 # Set a very high number
linePrintCount = 0
global _global_IPLoad
if _global_IPLoad:
# Use the lens file path to store and process temporary images
tmpTxtPath = self.zGetPath()[1] # lens file path
tmpTxtFile = "{ttp}\\TEMPTXT.txt".format(ttp=tmpTxtPath)
if not self.zGetTextFile(tmpTxtFile,analysisType,settingsFile,flag):
if _checkFileExist(tmpTxtFile):
for line in _getDecodedLineFromFile(_openFile(tmpTxtFile)):
if linePrintCount >= sln and linePrintCount <= eln:
print(line) # print in the execution cell
linePrintCount += 1
_deleteFile(tmpTxtFile)
else:
print("Text file of analysis window not created")
else:
print("GetTextFile didn't succeed")
else:
print("Couldn't import IPython modules.")
def ipzGetFirst(self, pprint=True):
"""Prints or returns first order data in human readable form
Parameters
----------
pprint : boolean
If True (default), the parameters are printed, else a
dictionary is returned
Returns
-------
firstData : dictionary or None
if ``pprint`` is True then None
"""
firstData = self.zGetFirst()
first = {}
first['Effective focal length'] = firstData[0]
first['Paraxial working F/#'] = firstData[1]
first['Real working F/#'] = firstData[2]
first['Paraxial image height'] = firstData[3]
first['Paraxial magnification'] = firstData[4]
if pprint:
_print_dict(first)
else:
return first
def ipzGetMFE(self, start_row=1, end_row=2, pprint=True):
"""Prints or returns the Oper, Target, Weight and Value parameters
in the MFE for the specified rows in an IPython notebook cell
Parameters
----------
start_row : integer, optional
starting row in the MFE to print (default=1)
end_row : integer, optional
end row in the MFE to print (default=2)
pprint : boolean
If True (default), the parameters are printed, else a
dictionary is returned.
Returns
-------
mfeData : tuple or None
if ``pprint`` is True, it returns None
See Also
--------
zOptimize() :
To update the merit function prior to calling ``ipzGetMFE()``,
call ``zOptimize()`` with the number of cycles set to -1
"""
if pprint:
print("Multi-Function Editor contents:")
print('{:^8}{:>6}{:>6}{:>8}{:>8}{:>8}{:>8}{:>8}{:>10}{:>8}{:>10}'
.format("Oper", "int1", "int2", "data1", "data2", "data3", "data4",
"data5", "Target", "Weight", "Value"))
else:
mfed = _co.namedtuple('MFEdata', ['Oper', 'int1', 'int2', 'data1', 'data2',
'data3', 'data4', 'data5', 'Target',
'Weight', 'Value'])
mfeData = []
for i in range(start_row, end_row + 1):
#opr, tgt = self.zGetOperand(i, 1), self.zGetOperand(i, 8)
#wgt, val = self.zGetOperand(i, 9), self.zGetOperand(i, 10)
odata = self.zGetOperandRow(row=i)
opr, i1, i2, d1, d2, d3, d4, d5, d6, tgt, wgt, val, per = odata
if pprint:
if isinstance(i1, str):
print('{:^8}{}'
.format(opr, i1, tgt, wgt, val))
else:
print('{:^8}{:>6.2f}{:>6.2f}{:>8.4f}{:>8.4f}{:>8.4f}{:>8.4f}{:>8.4f}{:>10.6f}{:>8.4f}{:>10.6f}'
.format(opr, i1, i2, d1, d2, d3, d4, d5, tgt, wgt, val))
else:
data = mfed._make([opr, i1, i2, d1, d2, d3, d4, d5, tgt, wgt, val])
mfeData.append(data)
if not pprint:
return tuple(mfeData)
def ipzGetPupil(self, pprint=True):
"""Print/ return pupil data in human readable form
Parameters
----------
pprint : boolean
If True (default), the parameters are printed, else a
dictionary is returned
Returns
-------
Print or return dictionary containing pupil information in human
readable form that meant to be used in interactive environment.
"""
pupilData = self.zGetPupil()
pupil = {}
apo_type = {0 : 'None', 1: 'Gaussian', 2 : 'Tangential/Cosine cubed'}
pupil['Aperture Type'] = _system_aperture[pupilData[0]]
if pupilData[0]==3: # if float by stop
pupil['Value (stop surface semi-diameter)'] = pupilData[1]
else:
pupil['Value (system aperture)'] = pupilData[1]
pupil['Entrance pupil diameter'] = pupilData[2]
pupil['Entrance pupil position (from surface 1)'] = pupilData[3]
pupil['Exit pupil diameter'] = pupilData[4]
pupil['Exit pupil position (from IMA)'] = pupilData[5]
pupil['Apodization type'] = apo_type[pupilData[6]]
pupil['Apodization factor'] = pupilData[7]
if pprint:
_print_dict(pupil)
else:
return pupil
def ipzGetSystemAper(self, pprint=True):
"""Print or return system aperture data in human readable form
Parameters
----------
pprint : boolean
If True (default), the parameters are printed, else a
dictionary is returned
Returns
-------
Print or return dictionary containing system aperture information
in human readable form that meant to be used in interactive
environment.
"""
sysAperData = self.zGetSystemAper()
sysaper = {}
sysaper['Aperture Type'] = _system_aperture[sysAperData[0]]
sysaper['Stop surface'] = sysAperData[1]
if sysAperData[0]==3: # if float by stop
sysaper['Value (stop surface semi-diameter)'] = sysAperData[2]
else:
sysaper['Value (system aperture)'] = sysAperData[2]
if pprint:
_print_dict(sysaper)
else:
return sysaper
def ipzGetSurfaceData(self, surfNum, pprint=True):
"""Print or return basic (not all) surface data in human readable
form
Parameters
----------
surfNum : integer
surface number
pprint : boolean
If True (default), the parameters are printed, else a
dictionary is returned.
Returns
-------
Print or return dictionary containing basic surface data (radius
of curvature, thickness, glass, semi-diameter, and conic) in human
readable form that meant to be used in interactive environment.
"""
surfdata = {}
surfdata['Radius of curvature'] = 1.0/self.zGetSurfaceData(surfNum, 2)
surfdata['Thickness'] = self.zGetSurfaceData(surfNum, 3)
surfdata['Glass'] = self.zGetSurfaceData(surfNum, 4)
surfdata['Semi-diameter'] = self.zGetSurfaceData(surfNum, 5)
surfdata['Conic'] = self.zGetSurfaceData(surfNum, 6)
if pprint:
_print_dict(surfdata)
else:
return surfdata
def ipzGetLDE(self, num=None):
"""Prints the sequential mode LDE data into the IPython cell
Usage: ``ipzGetLDE()``
Parameters
----------
num : integer, optional
if not None, sufaces upto surface number equal to `num`
will be retrieved
Returns
-------
None
Note
----
Only works in sequential/hybrid mode. Can't retrieve NSC objects.
"""
cd = _os.path.dirname(_os.path.realpath(__file__))
textFileName = cd +"\\"+"prescriptionFile.txt"
ret = self.zGetTextFile(textFileName,'Pre', "None", 0)
assert ret == 0
recSystemData = self.zGetSystem() # Get the current system parameters
numSurf = recSystemData[0]
numSurf2show = num if num is not None else numSurf
line_list = _readLinesFromFile(_openFile(textFileName))
for line_num, line in enumerate(line_list):
sectionString = ("SURFACE DATA SUMMARY:") # to use re later
if line.rstrip()== sectionString:
for i in range(numSurf2show + 4): # 1 object surf + 3 extra lines before actual data
lde_line = line_list[line_num + i].rstrip()
print(lde_line)
break
else:
raise Exception("Could not find string '{}' in Prescription file."
" \n\nPlease check if there is a mismatch in text encoding between"
" Zemax and PyZDDE, ``Surface Data`` is enabled in prescription"
" file, and the mode is not pure NSC".format(sectionString))
_deleteFile(textFileName)
def ipzGetFieldData(self):
"""Prints formatted field data in IPython QtConsole or Notebook
"""
fieldType = {0 : 'Angles in degrees',
1 : 'Object height',
2 : 'Paraxial image height',
3 : 'Real image height'}
fieldNormalization = {0 : 'Radial', 1 : 'Rectangular'}
fieldMetaData = self.zGetField(0)
fieldMeta = {}
fieldMeta['Type'] = fieldType[fieldMetaData.type]
fieldMeta['Number of Fields'] = fieldMetaData.numFields
fieldMeta['Max X'] = fieldMetaData.maxX
fieldMeta['Max Y'] = fieldMetaData.maxY
fieldMeta['Field Normalization'] = fieldNormalization[fieldMetaData.normMethod]
_print_dict(fieldMeta)
print(("{:^8}{:^8}{:^8}{:^8}{:^8}{:^8}{:^8}{:^8}"
.format('X', 'Y', 'Weight', 'VDX', 'VDY', 'VCX', 'VCY', 'VAN')))
for each in self.zGetFieldTuple():
print(("{:< 8.2f}{:< 8.2f}{:<8.4f}{:<8.4f}{:<8.4f}"
"{:<8.4f}{:<8.4f}{:<8.4f}"
.format(each.xf, each.yf, each.wgt, each.vdx, each.vdy,
each.vcx, each.vcy, each.van)))
#%% OTHER MODULE HELPER FUNCTIONS THAT DO NOT REQUIRE A RUNNING ZEMAX SESSION
def numAper(aperConeAngle, rIndex=1.0):
"""Returns the Numerical Aperture (NA) for the associated aperture
cone angle
Parameters
----------
aperConeAngle : float
aperture cone angle, in radians
rIndex : float
refractive index of the medium
Returns
-------
na : float
Numerical Aperture
"""
return rIndex*_math.sin(aperConeAngle)
def numAper2fnum(na, ri=1.0):
"""Convert numerical aperture (NA) to F-number
Parameters
----------
na : float
Numerical aperture value
ri : float
Refractive index of the medium
Returns
-------
fn : float
F-number value
"""
return 1.0/(2.0*_math.tan(_math.asin(na/ri)))
def fnum2numAper(fn, ri=1.0):
"""Convert F-number to numerical aperture (NA)
Parameters
----------
fn : float
F-number value
ri : float
Refractive index of the medium
Returns
-------
na : float
Numerical aperture value
"""
return ri*_math.sin(_math.atan(1.0/(2.0*fn)))
def fresnelNumber(r, z, wl=550e-6, approx=False):
"""calculate the fresnel number
Parameters
----------
r : float
radius of the aperture in units of length (usually mm)
z : float
distance of the observation plane from the aperture. this is equal
to the focal length of the lens for infinite conjugate, or the
image plane distance, in the same units of length as ``r``
wl : float
wavelength of light (default=550e-6 mm)
approx : boolean
if True, uses the approximate expression (default is False)
Returns
-------
fN : float
fresnel number
Notes
-----
1. The Fresnel number is calculated based on a circular aperture or a
an unaberrated rotationally symmetric beam with finite extent [Zemax]_.
2. From the Huygens-Fresnel principle perspective, the Fresnel number
represents the number of annular Fresnel zones in the aperture
opening [Wolf2011]_, or from the center of the beam to the edge in
case of a propagating beam [Zemax]_.
References
----------
.. [Zemax] Zemax manual
.. [Born&Wolf2011] Principles of Optics, Born and Wolf, 2011
"""
if approx:
return (r**2)/(wl*z)
else:
return 2.0*(_math.sqrt(z**2 + r**2) - z)/wl
def approx_equal(x, y, tol=macheps):
"""compare two float values using relative difference as measure
Parameters
----------
x, y : floats
floating point values to be compared
tol : float
tolerance (default=`macheps`, which is the difference between 1 and the next
representable float. `macheps` is equal to 2^{−23} ≃ 1.19e-07 for 32 bit
representation and equal to 2^{−52} ≃ 2.22e-16 for 64 bit representation)
Returns
-------
rel_diff : bool
``True`` if ``x`` and ``y`` are approximately equal within the tol
Notes
-----
1. relative difference: http://en.wikipedia.org/wiki/Relative_change_and_difference
3. In future, this function could be replaced by a standard library function. See
PEP0485 for details. https://www.python.org/dev/peps/pep-0485/
"""
return abs(x - y) <= max(abs(x), abs(y)) * tol
# scales to SI-meter
# mm , cm , inch , m
_zbf_unit_factors = [1e-3, 1e-2, 0.0254, 1]
def zemaxUnitToMeter(zemaxUnitId, value):
"""Converts a zemax unit to SI-meter.
Parameters
----------
zemaxUnitId : int
0: mm
1: cm
2: inch
3: m
Returns
-------
value in meter(m)
"""
return _zbf_unit_factors[zemaxUnitId] * value
def readBeamFile(beamfilename):
"""Read in a Zemax Beam file
Parameters
----------
beamfilename : string
the filename of the beam file to read
Returns
-------
version : integer
the file format version number
n : 2-tuple, (nx, ny)
the number of samples in the x and y directions
ispol : boolean
is the beam polarized?
units : integer (0 or 1 or 2 or 3)
the units of the beam, 0 = mm, 1 = cm, 2 = in, 3 for m
d : 2-tuple, (dx, dy)
the x and y grid spacing
zposition : 2-tuple, (zpositionx, zpositiony)
the x and y z position of the beam
rayleigh : 2-tuple, (rayleighx, rayleighy)
the x and y rayleigh ranges of the beam
waist : 2-tuple, (waistx, waisty)
the x and y waists of the beam
lamda : double
the wavelength of the beam
index : double
the index of refraction in the current medium
receiver_eff : double
the receiver efficiency. Zero if fiber coupling is not computed
system_eff : double
the system efficiency. Zero if fiber coupling is not computed.
grid_pos : 2-tuple of lists, (x_matrix, y_matrix)
lists of x and y positions of the grid defining the beam
efield : 4-tuple of 2D lists, (Ex_real, Ex_imag, Ey_real, Ey_imag)
a tuple containing two dimensional lists with the real and
imaginary parts of the x and y polarizations of the beam
"""
_warnings.warn('Function readBeamFile() has been moved to zfileutils module. '
'Please update code and use the zfileutils module. This function '
'will be removed from the zdde module in future.')
return _zfu.readBeamFile(beamfilename)
def writeBeamFile(beamfilename, version, n, ispol, units, d, zposition, rayleigh,
waist, lamda, index, receiver_eff, system_eff, efield):
"""Write a Zemax Beam file
Parameters
----------
beamfilename : string
the filename of the beam file to read
version : integer
the file format version number
n : 2-tuple, (nx, ny)
the number of samples in the x and y directions
ispol : boolean
is the beam polarized?
units : integer
the units of the beam, 0 = mm, 1 = cm, 2 = in, 3 = m
d : 2-tuple, (dx, dy)
the x and y grid spacing
zposition : 2-tuple, (zpositionx, zpositiony)
the x and y z position of the beam
rayleigh : 2-tuple, (rayleighx, rayleighy)
the x and y rayleigh ranges of the beam
waist : 2-tuple, (waistx, waisty)
the x and y waists of the beam
lamda : double
the wavelength of the beam
index : double
the index of refraction in the current medium
receiver_eff : double
the receiver efficiency. Zero if fiber coupling is not computed
system_eff : double
the system efficiency. Zero if fiber coupling is not computed.
efield : 4-tuple of 2D lists, (Ex_real, Ex_imag, Ey_real, Ey_imag)
a tuple containing two dimensional lists with the real and
imaginary parts of the x and y polarizations of the beam
Returns
-------
status : integer
0 = success; -997 = file write failure; -996 = couldn't convert
data to integer, -995 = unexpected error.
"""
_warnings.warn('Function writeBeamFile() has been moved to zfileutils module. '
'Please update code and use the zfileutils module. This function '
'will be removed from the zdde module in future')
return _zfu.writeBeamFile(beamfilename, version, n, ispol, units, d, zposition,
rayleigh, waist, lamda, index, receiver_eff, system_eff, efield)
def showMessageBox(msg, title='', msgtype='info'):
"""helper function (blocking) to show a simple Tkinter based messagebox.
Note that the call is a blocking call, halting the execution of the
program till an action (click of the OK button) is performed by the
user.
Parameters
----------
msg : string
text to be displayed as a message (can occupy multiple lines)
title : string, optional
the text to be displayed in the title bar of a message box
msgtype : string, optional
'info', 'warn', or 'error' to indicate the type of message.
If no `msgtype` not given, or the string is not one of the
above, an info type messagebox is displayed.
Returns
-------
None
"""
_tk.Tk().withdraw()
msg_func_dict = { 'info': _MessageBox.showinfo,
'warn': _MessageBox.showwarning,
'error': _MessageBox.showerror}
try:
func = msg_func_dict[msgtype]
except KeyError:
func = msg_func_dict['info']
func(title=title, message=msg)
#%% Helper functions to process data from ZEMAX DDE server.
# This is especially convenient for processing replies from Zemax for
# those function calls that a known data structure. These functions are
# mainly used intenally and may not be exposed directly.
def _regressLiteralType(x):
"""The function returns the literal with its proper type, such as int,
float, or string from the input string x
Examples
--------
>>> _regressLiteralType("1")->1
>>> _regressLiteralType("1.0")->1.0
>>> _regressLiteralType("1e-3")->0.001
>>> _regressLiteralType("YUV")->'YUV'
>>> _regressLiteralType("YO8")->'YO8'
"""
try:
float(x) # Test for numeric or string
lit = float(x) if set(['.','e','E']).intersection(x) else int(x)
except ValueError:
lit = str(x)
return lit
def _checkFileExist(filename, mode='r', timeout=.25):
"""This function checks if a file exist
If the file exist then it is ready to be read, written to, or deleted
Parameters
----------
filename : string
filename with full path
mode : string, optional
mode for opening file
timeout : integer,
timeout in seconds for how long to wait before returning
Returns
-------
status : bool
True = file exist, and file operations are possible;
False = timeout reached
"""
ti = _datetime.datetime.now()
status = True
while True:
try:
f = open(filename, mode)
except IOError:
timeDelta = _datetime.datetime.now() - ti
if timeDelta.total_seconds() > timeout:
status = False
break
else:
_time.sleep(0.25)
else:
f.close()
break
return status
def _deleteFile(fileName, n=10):
"""Cleanly deletes a file. It takes n attempts to delete the file.
If it can't delete the file in n attempts then it returns fail.
Parameters
----------
fileName : string
file name of file to be deleted with full path
n : integer
number of times to attempt before giving up
Returns
-------
status : bool
True = file deleting successful;
False = reached maximum number of attempts, without deleting file.
Notes
-----
It assumes that the file with filename actually exist and doesn't do
any error checking on its existance. This is OK as this function is
for internal use only.
"""
status = False
count = 0
while not status and count < n:
try:
_os.remove(fileName)
except OSError:
count += 1
_time.sleep(0.2)
else:
status = True
return status
def _deleteFilesCreatedDuringSession(self):
"""Helper function to clean up files creatd by PyZDDE during a session.
Examples of such files include configuration files, etc.
"""
filesToDelete = self._filesCreated
filesNotDeleted = set()
for filename in filesToDelete:
if not _deleteFile(filename):
filesNotDeleted.add(filename)
remaining = filesToDelete.intersection(filesNotDeleted)
self._filesCreated = remaining
def _process_get_set_NSCProperty(code, reply):
"""Process reply for functions zGetNSCProperty and zSETNSCProperty"""
rs = reply.rstrip()
if rs == 'BAD COMMAND':
nscPropData = -1
else:
if code in (0,1,4,5,6,11,12,14,18,19,27,28,84,86,92,117,123):
nscPropData = str(rs)
elif code in (2,3,7,9,13,15,16,17,20,29,81,91,101,102,110,111,113,
121,141,142,151,152,153161,162,171,172,173):
nscPropData = int(float(rs))
else:
nscPropData = float(rs)
return nscPropData
def _process_get_set_Operand(column, reply):
"""Process reply for functions zGetOperand and zSetOperand"""
rs = reply.rstrip()
if column == 1:
# ensure that it is a string ... as it is supposed to return the operand
if isinstance(_regressLiteralType(rs), str):
return str(rs)
else:
return -1
elif column in (2,3): # if thre is a comment, it will be in column 2
#return int(float(rs))
return _regressLiteralType(rs)
else:
return float(rs)
def _process_get_set_Solve(reply):
"""Process reply for functions zGetSolve and zSetSolve"""
reply = reply.rstrip()
rs = reply.split(",")
if 'BAD COMMAND' in rs:
return -1
else:
return tuple([_regressLiteralType(x) for x in rs])
def _process_get_set_SystemProperty(code, reply):
"""Process reply for functions zGetSystemProperty and zSetSystemProperty"""
# Convert reply to proper type
if code in (102,103, 104,105,106,107,108,109,110,202,203): # unexpected (identified) cases
sysPropData = reply
elif code in (16,17,23,40,41,42,43): # string
sysPropData = reply.rstrip() #str(reply)
elif code in (11,13,24,53,54,55,56,60,61,62,63,71,72,73,77,78): # floats
sysPropData = float(reply)
else:
sysPropData = int(float(reply)) # integer
return sysPropData
def _process_get_set_Tol(operNum,reply):
"""Process reply for functions zGetTol and zSetTol"""
rs = reply.rsplit(",")
tolType = [rs[0]]
tolParam = [float(e) if i in (2,3) else int(float(e))
for i,e in enumerate(rs[1:])]
toleranceData = tuple(tolType + tolParam)
return toleranceData
def _print_dict(data):
"""Helper function to print a dictionary so that the key and value are
arranged into nice rows and columns
"""
leftColMaxWidth = max(_imap(len, data))
for key, value in data.items():
print("{}: {}".format(key.ljust(leftColMaxWidth + 1), value))
def _openFile(fileName):
"""opens the file in the appropriate mode and returns the file object
Parameters
----------
fileName (string) : name of the file to open
Returns
-------
f (file object)
Notes
-----
This is just a wrapper around the open function.
It is the responsibility of the calling function to close the file by
calling the ``close()`` method of the file object. Alternatively use
either use a with/as context to close automatically or use
``_readLinesFromFile()`` or ``_getDecodedLineFromFile()`` that uses a
with context manager to handle exceptions and file close.
"""
global _global_use_unicode_text
if _global_use_unicode_text:
f = open(fileName, u'rb')
else:
f = open(fileName, 'r')
return f
def _getDecodedLineFromFile(fileObj):
"""generator function; yields a decoded (ascii/Unicode) line
The file is automatically closed when after reading the file or if any
exception occurs while reading the file.
"""
global _global_pyver3
global _global_use_unicode_text
global _global_in_IPython_env
# I am not exactly sure why there is a difference in behavior
# between IPython environment and normal Python shell, but it is there!
if _global_in_IPython_env:
unicode_type = 'utf-16-le'
else:
unicode_type = 'utf-16'
if _global_use_unicode_text:
fenc = _codecs.EncodedFile(fileObj, unicode_type)
with fenc as f:
for line in f:
decodedLine = line.decode(unicode_type)
decodedLine = _zfu.checkDecimalSeparators(decodedLine)
yield decodedLine.rstrip()
else: # ascii
with fileObj as f:
for line in f:
if _global_pyver3: # ascii and Python 3.x
line = _zfu.checkDecimalSeparators(line)
yield line.rstrip()
else: # ascii and Python 2.x
try:
decodedLine = line.decode('raw-unicode-escape')
except:
decodedLine = line.decode('ascii', 'replace')
decodedLine = _zfu.checkDecimalSeparators(decodedLine)
yield decodedLine.rstrip()
def _readLinesFromFile(fileObj):
"""returns a list of lines (as unicode literals) in the file
This function emulates the functionality of ``readlines()`` method of file
objects. The caller doesn't have to explicitly close the file as it is
handled in ``_getDecodedLineFromFile()`` function.
Parameters
----------
fileObj : file object returned by ``open()`` method
Returns
-------
lines (list) : list of lines (as unicode literals with u'string' notation)
from the file
"""
lines = list(_getDecodedLineFromFile(fileObj))
return lines
def _getFirstLineOfInterest(line_list, pattern, patAtStart=True):
"""returns the line number (index in the list of lines) that matches the
regex pattern.
This function can be used to return the starting line of the data-of-interest,
identified by the regex pattern, from a list of lines.
Parameters
----------
line_list : list
list of lines in the file returned by ``_readLinesFromFile()``
pattern : string
regex pattern that should be used to identify the line of interest
patAtStart : bool
if ``True``, match pattern at the beginning of line string (default)
Returns
-------
line_number : integer
line_number/ index in the list where the ``pattern`` first matched.
If no match could be found, the function returns ``None``
Notes
-----
If it is known that the pattern will be matched at the beginning, then
letting ``patAtStart==True`` is more efficient.
"""
pat = _re.compile(pattern) if patAtStart else _re.compile('.*'+pattern)
for line_num, line in enumerate(line_list):
if _re.match(pat, line.strip()):
return line_num
def _get2DList(line_list, start_line, number_of_lines,
startCol=None, endCol=None, stride=None):
"""returns a 2D list of data read between ``start_line`` and
``start_line + number_of_lines`` of a list
Parameters
----------
line_list : list
list of lines read from a file using ``_readLinesFromFile()``
start_line : integer
index of line_list
number_of_lines : integer
number of lines to read (number of lines which contain the 2D data)
startCol : integer, optional
the column number to start reading in each row (similar to list
slicing pattern). Default is `None`
endCol : integer, optional
the end column number upto which (but excluding `endCol`) to read
in each row (similar to list slicing pattern). Default is `None`
stride : integer, optional
stride along each column (similar to list slicing pattern).
Default is `None`
Returns
-------
data : list
data is a 2-d list of float type data read from the lines in
line_list
"""
data = []
end_line = start_line + number_of_lines - 1
for lineNum, row in enumerate(line_list):
if start_line <= lineNum <= end_line:
data.append([float(i) for i in row.split()][startCol:endCol:stride])
return data
def _transpose2Dlist(mat):
"""transpose a matrix that is constructed as a list of lists in pure
Python
The inner lists represents rows.
Parameters
----------
mat : list of lists (2-d list)
the 2D list represented as
| [[a_00, a_01, ..., a_0n],
| [a_10, a_11, ..., a_1n],
| ... ,
| [a_m0, a_m1, ..., a_mn]]
Returns
-------
matT : list of lists (2-d list)
transposed of ``mat``
Notes
-----
The function assumes that all the inner lists are of the same lengths.
It doesn't do any error checking.
"""
cols = len(mat[0])
matT = []
for i in range(cols):
matT.append([row[i] for row in mat])
return matT
def _getRePatPosInLineList(line_list, re_pattern):
"""internal helper function for retrieving the positions of specific
patterns in a list of lines read from a file
Parameters
----------
line_list : list
list of lines read from a file
re_pattern : string
regular expression pattern to loop for
Returns
-------
positions : list
the list containing the position of the pattern in the input list
"""
positions = []
for line_number, line in enumerate(line_list):
if _re.search(re_pattern, line):
positions.append(line_number)
return positions
def _txtAndSettingsToUse(self, txtFile, settingsFile, anaType):
"""internal helper function for use by zGet type of functions
that call ``zGetTextFile()``, to decide the type of settings
file and settings flag to use
Parameters
----------
self : object
pyzdde link object
txtFile : string
text file that may have been passed by the user
settingsFile : string
settings file that may have been passed by the user
anaType : string
3-letter analysis code
Returns
-------
cfgFile : string
full name and path of the configuration/ settings file to
use for calling ``zGetTextFile()``
getTextFlag : integer
flag to be used for calling ``zGetTextFile()``
"""
# note to the developer -- maintain exactly same keys in both
# txtFileDict and anaCfgDict. Note that some analysis have common
# txt file and settings files associated with them. Of course they
# may be changed if required in future.
txtFileDict = {'Pop':'popData.txt',
'Hcs':'huygensPsfCSAnalysisFile.txt', # Huygens PSF cross-section
'Hps':'huygensPsfAnalysisFile.txt', # Huygens PSF
'Hmf':'huygensMtfAnalysisFile.txt', # Huygens MTF
'Pcs':'fftPsfCSAnalysisFile.txt', # FFT PSF cross-section
'Fps':'fftPsfAnalysisFile.txt', # FFT PSF
'Mtf':'fftMtfAnalysisFile.txt', # FFT MTF
'Sei':'seidelAberrationFile.txt', # Seidel aberration coefficients
'Pre':'prescriptionFile.txt', # Prescription
'Sim':'imageSimulationAnalysisFile.txt', # Image Simulation
'Zfr':'zernikeFringeAnalysisFile.txt', # Zernike Fringe coefficients
'Zst':'zernikeStandardAnalysisFile.txt', # Zernike Standard coefficients
'Zat':'zernikeAnnularAnalysisFile.txt', # Zernike Annular coefficients
'Dvw':'detectorViewerFile.txt', # NSC detector viewer
}
anaCfgDict = {'Pop':'_pyzdde_POP.CFG',
'Hcs':'_pyzdde_HUYGENSPSFCS.CFG',
'Hps':'_pyzdde_HUYGENSPSF.CFG',
'Hmf':'_pyzdde_HUYGENSMTF.CFG',
'Pcs':'_pyzdde_FFTPSFCS.CFG',
'Fps':'_pyzdde_FFTPSF.CFG',
'Mtf':'_pyzdde_FFTMTF.CFG',
'Sei':'None',
'Pre':'None', # change this to the appropriate file when implemented
'Sim':'_pyzdde_IMGSIM.CFG',
'Zfr':'_pyzdde_ZFR.CFG', # Note that currently MODIFYSETTINGS
'Zst':'_pyzdde_ZST.CFG', # is not supported of Aberration
'Zat':'_pyzdde_ZAT.CFG', # coefficients by Zemax extensions
'Dvw':'_pyzdde_DVW.CFG', # NSC detector viewer
}
assert txtFileDict.keys() == anaCfgDict.keys(), \
"Dicts don't have matching keys" # for code integrity
assert anaType in anaCfgDict
#fdir = _os.path.dirname(_os.path.realpath(__file__))
fdir = _os.path.dirname(self.zGetFile())
if txtFile != None:
textFileName = txtFile
else:
textFileName = _os.path.join(fdir, txtFileDict[anaType])
if settingsFile:
cfgFile = settingsFile
getTextFlag = 1
else:
f = _os.path.splitext(self.zGetFile())[0] + anaCfgDict[anaType]
if _checkFileExist(f): # use "*_pyzdde_XXX.CFG" settings file
cfgFile = f
getTextFlag = 1
else: # use default settings file
cfgFile = ''
getTextFlag = 0
return textFileName, cfgFile, getTextFlag
#
#
if __name__ == "__main__":
print("Please import this module as 'import pyzdde.zdde as pyz' ")
_sys.exit(0)
| indranilsinharoy/PyZDDE | pyzdde/zdde.py | Python | mit | 493,037 |
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="size",
parent_name="scatterternary.marker.colorbar.tickfont",
**kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/scatterternary/marker/colorbar/tickfont/_size.py | Python | mit | 546 |
from django.apps import AppConfig
class ClothesConfig(AppConfig):
name = 'clothes'
| Meerkat007/Clothes-Shop-Website | server/clothes/apps.py | Python | mit | 89 |
# -*- coding: utf-8 -*-
from flask import jsonify
from flask.ext.bcrypt import generate_password_hash
from itsdangerous import (TimedJSONWebSignatureSerializer
as Serializer, BadSignature, SignatureExpired)
from app import db, app
class User(db.Model):
"""
Represent a user in database
"""
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False, index=True)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String(120), nullable=False)
def __init__(self, username, email, password):
self.username = username
self.email = email
self.password = generate_password_hash(password)
def __repr__(self):
return '<User %s>' % (self.username)
def generate_token_key(self):
s = Serializer(app.config['SECRET_KEY'])
return s.dumps({'username':self.username})
@staticmethod
def verify_auth_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
data = s.loads(token)
except BadSignature:
return None # invalid token
# Get user by username. If user doesn't exists None is returned
user = User.query.filter_by(username = (data['username'])).first()
return user
def to_dict(self):
return {
'id': self.id,
'username': self.username,
'email': self.email
} | gardyna/WalkerAppGame | NewAPI/app/mod_user/models.py | Python | mit | 1,496 |
import numpy as np
import glob
import math
import matplotlib.image as mpimg
import cv2
from lane_assist_pipeline_ops import LaneAssistOp, CameraCalibrationOp, PlotImageOp
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
# This constant ultimately contributes to deriving a given
# period when computing SMA and EMA for line noise smoothing
FPS = 30
class LaneLine:
def __init__(self, x1, y1, x2, y2):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
def angle(self):
return math.atan2(self.y2 - self.y1, self.x2 - self.x1) * 180.0 / np.pi
def slope(self):
return (self.y2 - self.y1) / (self.x2 - self.x1)
def y_intercept(self):
return self.y1 - self.slope() * self.x1
def __str__(self):
return "(x1, y1, x2, y2, slope, y_intercept, angle) == (%s, %s, %s, %s, %s, %s, %s)" % (
self.x1, self.y1, self.x2, self.y2, self.slope(), self.y_intercept(), self.angle())
class PipelineRunner:
def __init__(self, calibration_op, ema_period_alpha=0.65):
self.current_frame = 0
self.__calibration_op = calibration_op
self.l_poly_coefficients = np.array([[],[],[]])
self.l_ema = np.array([0,0,0])
self.r_poly_coefficients = np.array([[],[],[]])
self.r_ema = np.array([0,0,0])
self.ema_fps_period = ema_period_alpha * FPS
def process_video(self, src_video_path, dst_video_path, audio=False):
self.current_frame = 0
VideoFileClip(src_video_path).fl_image(self.process_image).write_videofile(dst_video_path, audio=audio)
def process_image(self, image):
self.current_frame += 1
cv2.imwrite('processed_images/frame_{}_in.jpg'.format(self.current_frame), cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
result = LaneAssistOp(
image,
self.__calibration_op,
margin=100,
kernel_size=15,
sobelx_thresh=(20,100),
sobely_thresh=(20,100),
mag_grad_thresh=(20,250),
dir_grad_thresh=(0.3, 1.3)
).perform().output()
cv2.imwrite('processed_images/frame_{}_out.jpg'.format(self.current_frame), cv2.cvtColor(result, cv2.COLOR_RGB2BGR))
# PlotImageOp(result, title="{} - frame {}".format(self.__class__.__name__, current_frame)).perform()
return result
def draw_lane(self, undistorted, binary_warped, fit_leftx, fit_rightx, fity, warper_op):
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([fit_leftx, fity]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([fit_rightx, fity])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0, 195, 255))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, warper_op['Minv'], (binary_warped.shape[1], binary_warped.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(undistorted, 1, newwarp, 0.5, 0)
cv2.imwrite('detected_lanes/frame_'+self.current_frame+'.jpg', cv2.cvtColor(result, cv2.COLOR_RGB2BGR))
def compute_ema(self, measurement, all_measurements, curr_ema):
sma = sum(all_measurements) / (len(all_measurements))
if len(all_measurements) < self.ema_fps_period:
# let's just use SMA until
# our EMA buffer is filled
return sma
multiplier = 2 / float(len(all_measurements) + 1)
ema = (measurement - curr_ema) * multiplier + curr_ema
# print("sma: %s, multiplier: %s" % (sma, multiplier))
return ema
@staticmethod
def compute_least_squares_line(lines):
all_x1 = []
all_y1 = []
all_x2 = []
all_y2 = []
for line in lines:
x1, y1, x2, y2, angle, m, b = line.x1, line.y1, line.x2, line.y2, line.angle(), line.slope(), line.y_intercept()
all_x1.append(x1)
all_y1.append(y1)
all_x2.append(x2)
all_y2.append(y2)
all_x = (all_x1 + all_x2)
all_y = (all_y1 + all_y2)
n = len(all_x)
all_x_y_dot_prod = sum([xi * yi for xi, yi in zip(all_x, all_y)])
all_x_squares = sum([xi ** 2 for xi in all_x])
a = ((n * all_x_y_dot_prod) - (sum(all_x) * sum(all_y))) / ((n * all_x_squares) - (sum(all_x) ** 2))
b = ((sum(all_y) * all_x_squares) - (sum(all_x) * all_x_y_dot_prod)) / ((n * all_x_squares) - (sum(all_x) ** 2))
return a, b
if __name__ == '__main__':
calibration_images = glob.glob('camera_cal/calibration*.jpg')
calibration_op = CameraCalibrationOp(calibration_images=calibration_images, x_inside_corners=9, y_inside_corners=6).perform()
pipeline_runner = PipelineRunner(calibration_op)
# result = pipeline_runner.process_image(mpimg.imread('test_images/frame_9_in.jpg'))
pipeline_runner.process_video('project_video.mp4', 'project_video_final.mp4') | matthewzimmer/CarND-AdvancedLaneLines-P4 | pipeline_runner.py | Python | mit | 5,364 |
# -*- coding: utf-8 -*-
from collections import namedtuple
import hashlib
import io
import struct
import zlib
from . import sbon
from .btreedb5 import BTreeDB5
from .sbasset6 import SBAsset6
__version__ = '1.0.0'
# Override range with xrange when running Python 2.x.
try:
range = xrange
except:
pass
# Utility descriptor for memoized properties.
class lazyproperty(object):
def __init__(self, fget):
self.fget = fget
self.__doc__ = fget.__doc__
self.propname = '_lazyproperty_{}'.format(self.fget.__name__)
def __delete__(self, obj):
if hasattr(obj, self.propname):
delattr(obj, self.propname)
def __get__(self, obj, objtype=None):
if obj is None:
return self
if not hasattr(obj, self.propname):
setattr(obj, self.propname, self.fget(obj))
return getattr(obj, self.propname)
def __set__(self, obj, value):
setattr(obj, self.propname, value)
class CelestialChunks(BTreeDB5):
def get(self, key):
key = hashlib.sha256(key.encode('utf-8')).digest()
data = super(CelestialChunks, self).get(key)
data = zlib.decompress(data)
stream = io.BytesIO(data)
return read_versioned_json(stream)
def read_header(self):
super(CelestialChunks, self).read_header()
assert self.name == 'Celestial2', 'Invalid header'
Tile = namedtuple('Tile', [
'foreground_material',
'foreground_hue_shift',
'foreground_variant',
'foreground_mod',
'foreground_mod_hue_shift',
'background_material',
'background_hue_shift',
'background_variant',
'background_mod',
'background_mod_hue_shift',
'liquid',
'liquid_level',
'liquid_pressure',
'liquid_infinite',
'collision',
'dungeon_id',
'biome',
'biome_2',
'indestructible',
])
VersionedJSON = namedtuple('VersionedJSON', ['name', 'version', 'data'])
class World(BTreeDB5):
@lazyproperty
def info(self):
if not hasattr(self, 'metadata'):
self.read_metadata()
return WorldInfo(self.metadata)
def get(self, layer, x, y):
# World keys are based on a layer followed by X and Y coordinates.
data = super(World, self).get(struct.pack('>BHH', layer, x, y))
return zlib.decompress(data)
def get_all_regions_with_tiles(self):
"""
Generator which yields a set of (rx, ry) tuples which describe
all regions for which the world has tile data
"""
for key in self.get_all_keys():
(layer, rx, ry) = struct.unpack('>BHH', key)
if layer == 1:
yield (rx, ry)
def get_entities(self, x, y):
stream = io.BytesIO(self.get(2, x, y))
count = sbon.read_varint(stream)
return [read_versioned_json(stream) for _ in range(count)]
def get_entity_uuid_coords(self, uuid):
"""
Returns the coordinates of the given entity UUID inside this world, or
`None` if the UUID is not found.
"""
if uuid in self._entity_to_region_map:
coords = self._entity_to_region_map[uuid]
entities = self.get_entities(*coords)
for entity in entities:
if 'uniqueId' in entity.data and entity.data['uniqueId'] == uuid:
return tuple(entity.data['tilePosition'])
return None
def get_tiles(self, x, y):
stream = io.BytesIO(self.get(1, x, y))
# TODO: Figure out what this means.
unknown = stream.read(3)
# There are 1024 (32x32) tiles in a region.
return [self.read_tile(stream) for _ in range(1024)]
def read_header(self):
super(World, self).read_header()
assert self.name == 'World4', 'Not a World4 file'
def read_metadata(self):
# World metadata is held at a special layer/x/y combination.
stream = io.BytesIO(self.get(0, 0, 0))
self.width, self.height = struct.unpack('>ii', stream.read(8))
name, version, data = read_versioned_json(stream)
assert name == 'WorldMetadata', 'Invalid world data'
self.metadata = data
self.metadata_version = version
@classmethod
def read_tile(cls, stream):
values = struct.unpack('>hBBhBhBBhBBffBBHBB?x', stream.read(31))
return Tile(*values)
@lazyproperty
def _entity_to_region_map(self):
"""
A dict whose keys are the UUIDs (or just IDs, in some cases) of
entities, and whose values are the `(rx, ry)` coordinates in which that
entity can be found. This can be used to easily locate particular
entities inside the world.
"""
entity_to_region = {}
for key in self.get_all_keys():
layer, rx, ry = struct.unpack('>BHH', key)
if layer != 4:
continue
stream = io.BytesIO(self.get(layer, rx, ry))
num_entities = sbon.read_varint(stream)
for _ in range(num_entities):
uuid = sbon.read_string(stream)
if uuid in entity_to_region:
raise ValueError('Duplicate UUID {}'.format(uuid))
entity_to_region[uuid] = (rx, ry)
return entity_to_region
class WorldInfo(object):
"""
Convenience class to provide some information about a World without having
to know which keys to look at.
"""
def __init__(self, metadata):
self.metadata = metadata
@property
def biomes(self):
"""
Returns a set of all biomes found in the world. This should be a
complete list even if the world isn't fully-explored.
"""
return self._worldParameters.biomes
@property
def coords(self):
"""
The coordinates of the system. The first two elements of the tuple will
be the `(x, y)` coordinates in the universe map, and the third is
largely useless.
"""
return self._celestialParameters.coords
@property
def description(self):
"""
A description of the world - will include a "Tier" ranking for
planets/moons.
"""
return self._celestialParameters.description
@property
def dungeons(self):
"""
Returns a set of all dungeons found in the world. This should be a
complete list even if the world isn't fully-explored.
"""
return self._worldParameters.dungeons
@property
def name(self):
"""
The name of the world. Note that this will often include coloration
markup.
"""
return self._celestialParameters.name
@lazyproperty
def size(self):
"""
The size of the world, as a tuple.
"""
return tuple(self.metadata.get('worldTemplate', {})['size'])
@property
def world_biomes(self):
"""
A set of main biomes which define the world as a whole. This will be a
much shorter list than the full list of biomes found in the world --
generally only a couple of entries.
"""
return self._celestialParameters.biomes
@lazyproperty
def _celestialParameters(self):
t = namedtuple('celestialParameters', 'name description coords biomes')
name = None
description = None
coords = None
biomes = set()
cp = self.metadata.get('worldTemplate', {}).get('celestialParameters')
if cp:
name = cp.get('name')
if 'parameters' in cp:
description = cp['parameters'].get('description')
if 'terrestrialType' in cp['parameters']:
biomes.update(cp['parameters']['terrestrialType'])
if 'coordinate' in cp and 'location' in cp['coordinate']:
coords = tuple(cp['coordinate']['location'])
return t(name, description, coords, biomes)
@lazyproperty
def _worldParameters(self):
t = namedtuple('worldParameters', 'biomes dungeons')
biomes = set()
dungeons = set()
wp = self.metadata.get('worldTemplate', {}).get('worldParameters')
if wp:
SCAN_LAYERS = [
('atmosphereLayer', False),
('coreLayer', False),
('spaceLayer', False),
('subsurfaceLayer', False),
('surfaceLayer', False),
('undergroundLayers', True),
]
for name, is_list in SCAN_LAYERS:
if name not in wp:
continue
layers = wp[name] if is_list else [wp[name]]
for layer in layers:
dungeons.update(layer['dungeons'])
for label in ['primaryRegion', 'primarySubRegion']:
biomes.add(layer[label]['biome'])
for label in ['secondaryRegions', 'secondarySubRegions']:
for inner_region in layer[label]:
biomes.add(inner_region['biome'])
return t(biomes, dungeons)
def read_sbvj01(stream):
assert stream.read(6) == b'SBVJ01', 'Invalid header'
return read_versioned_json(stream)
def read_versioned_json(stream):
name = sbon.read_string(stream)
# The object only has a version if the following bool is true.
if stream.read(1) == b'\x00':
version = None
else:
version, = struct.unpack('>i', stream.read(4))
data = sbon.read_dynamic(stream)
return VersionedJSON(name, version, data)
def write_sbvj01(stream, vj):
stream.write(b'SBVJ01')
write_versioned_json(stream, vj)
def write_versioned_json(stream, vj):
sbon.write_string(stream, vj.name)
if vj.version is None:
stream.write(struct.pack('>b', 0))
else:
stream.write(struct.pack('>bi', 1, vj.version))
sbon.write_dynamic(stream, vj.data)
| blixt/py-starbound | starbound/__init__.py | Python | mit | 9,935 |
#!/usr/bin/env python
from nose.tools import *
import networkx
from test_graph import BaseGraphTester, BaseAttrGraphTester, TestGraph
class BaseDiGraphTester(BaseGraphTester):
def test_has_successor(self):
G=self.K3
assert_equal(G.has_successor(0,1),True)
assert_equal(G.has_successor(0,-1),False)
def test_successors(self):
G=self.K3
assert_equal(sorted(G.successors(0)),[1,2])
assert_raises((KeyError,networkx.NetworkXError), G.successors,-1)
def test_successors_iter(self):
G=self.K3
assert_equal(sorted(G.successors_iter(0)),[1,2])
assert_raises((KeyError,networkx.NetworkXError), G.successors_iter,-1)
def test_has_predecessor(self):
G=self.K3
assert_equal(G.has_predecessor(0,1),True)
assert_equal(G.has_predecessor(0,-1),False)
def test_predecessors(self):
G=self.K3
assert_equal(sorted(G.predecessors(0)),[1,2])
assert_raises((KeyError,networkx.NetworkXError), G.predecessors,-1)
def test_predecessors_iter(self):
G=self.K3
assert_equal(sorted(G.predecessors_iter(0)),[1,2])
assert_raises((KeyError,networkx.NetworkXError), G.predecessors_iter,-1)
def test_edges(self):
G=self.K3
assert_equal(sorted(G.edges()),[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
assert_equal(sorted(G.edges(0)),[(0,1),(0,2)])
assert_raises((KeyError,networkx.NetworkXError), G.edges,-1)
def test_edges_iter(self):
G=self.K3
assert_equal(sorted(G.edges_iter()),
[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
assert_equal(sorted(G.edges_iter(0)),[(0,1),(0,2)])
def test_edges_data(self):
G=self.K3
assert_equal(sorted(G.edges(data=True)),
[(0,1,{}),(0,2,{}),(1,0,{}),(1,2,{}),(2,0,{}),(2,1,{})])
assert_equal(sorted(G.edges(0,data=True)),[(0,1,{}),(0,2,{})])
assert_raises((KeyError,networkx.NetworkXError), G.edges,-1)
def test_out_edges(self):
G=self.K3
assert_equal(sorted(G.out_edges()),
[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
assert_equal(sorted(G.out_edges(0)),[(0,1),(0,2)])
assert_raises((KeyError,networkx.NetworkXError), G.out_edges,-1)
def test_out_edges_iter(self):
G=self.K3
assert_equal(sorted(G.out_edges_iter()),
[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
assert_equal(sorted(G.edges_iter(0)),[(0,1),(0,2)])
def test_out_edges_dir(self):
G=self.P3
assert_equal(sorted(G.out_edges()),[(0, 1), (1, 2)])
assert_equal(sorted(G.out_edges(0)),[(0, 1)])
assert_equal(sorted(G.out_edges(2)),[])
def test_out_edges_iter_dir(self):
G=self.P3
assert_equal(sorted(G.out_edges_iter()),[(0, 1), (1, 2)])
assert_equal(sorted(G.out_edges_iter(0)),[(0, 1)])
assert_equal(sorted(G.out_edges_iter(2)),[])
def test_in_edges_dir(self):
G=self.P3
assert_equal(sorted(G.in_edges()),[(0, 1), (1, 2)])
assert_equal(sorted(G.in_edges(0)),[])
assert_equal(sorted(G.in_edges(2)),[(1,2)])
def test_in_edges_iter_dir(self):
G=self.P3
assert_equal(sorted(G.in_edges_iter()),[(0, 1), (1, 2)])
assert_equal(sorted(G.in_edges_iter(0)),[])
assert_equal(sorted(G.in_edges_iter(2)),[(1,2)])
def test_degree(self):
G=self.K3
assert_equal(list(G.degree().values()),[4,4,4])
assert_equal(G.degree(),{0:4,1:4,2:4})
assert_equal(G.degree(0),4)
assert_equal(G.degree([0]),{0:4})
assert_raises((KeyError,networkx.NetworkXError), G.degree,-1)
def test_degree_iter(self):
G=self.K3
assert_equal(list(G.degree_iter()),[(0,4),(1,4),(2,4)])
assert_equal(dict(G.degree_iter()),{0:4,1:4,2:4})
assert_equal(list(G.degree_iter(0)),[(0,4)])
assert_equal(list(G.degree_iter(iter([0]))),[(0,4)]) #run through iterator
def test_in_degree(self):
G=self.K3
assert_equal(list(G.in_degree().values()),[2,2,2])
assert_equal(G.in_degree(),{0:2,1:2,2:2})
assert_equal(G.in_degree(0),2)
assert_equal(G.in_degree([0]),{0:2})
assert_equal(G.in_degree(iter([0])),{0:2})
assert_raises((KeyError,networkx.NetworkXError), G.in_degree,-1)
def test_in_degree_iter(self):
G=self.K3
assert_equal(list(G.in_degree_iter()),[(0,2),(1,2),(2,2)])
assert_equal(dict(G.in_degree_iter()),{0:2,1:2,2:2})
assert_equal(list(G.in_degree_iter(0)),[(0,2)])
assert_equal(list(G.in_degree_iter(iter([0]))),[(0,2)]) #run through iterator
def test_in_degree_iter_weighted(self):
G=self.K3
G.add_edge(0,1,weight=0.3,other=1.2)
assert_equal(list(G.in_degree_iter(weight='weight')),[(0,2),(1,1.3),(2,2)])
assert_equal(dict(G.in_degree_iter(weight='weight')),{0:2,1:1.3,2:2})
assert_equal(list(G.in_degree_iter(1,weight='weight')),[(1,1.3)])
assert_equal(list(G.in_degree_iter(weight='other')),[(0,2),(1,2.2),(2,2)])
assert_equal(dict(G.in_degree_iter(weight='other')),{0:2,1:2.2,2:2})
assert_equal(list(G.in_degree_iter(1,weight='other')),[(1,2.2)])
assert_equal(list(G.in_degree_iter(iter([1]),weight='other')),[(1,2.2)])
def test_out_degree(self):
G=self.K3
assert_equal(list(G.out_degree().values()),[2,2,2])
assert_equal(G.out_degree(),{0:2,1:2,2:2})
assert_equal(G.out_degree(0),2)
assert_equal(G.out_degree([0]),{0:2})
assert_equal(G.out_degree(iter([0])),{0:2})
assert_raises((KeyError,networkx.NetworkXError), G.out_degree,-1)
def test_out_degree_iter_weighted(self):
G=self.K3
G.add_edge(0,1,weight=0.3,other=1.2)
assert_equal(list(G.out_degree_iter(weight='weight')),[(0,1.3),(1,2),(2,2)])
assert_equal(dict(G.out_degree_iter(weight='weight')),{0:1.3,1:2,2:2})
assert_equal(list(G.out_degree_iter(0,weight='weight')),[(0,1.3)])
assert_equal(list(G.out_degree_iter(weight='other')),[(0,2.2),(1,2),(2,2)])
assert_equal(dict(G.out_degree_iter(weight='other')),{0:2.2,1:2,2:2})
assert_equal(list(G.out_degree_iter(0,weight='other')),[(0,2.2)])
assert_equal(list(G.out_degree_iter(iter([0]),weight='other')),[(0,2.2)])
def test_out_degree_iter(self):
G=self.K3
assert_equal(list(G.out_degree_iter()),[(0,2),(1,2),(2,2)])
assert_equal(dict(G.out_degree_iter()),{0:2,1:2,2:2})
assert_equal(list(G.out_degree_iter(0)),[(0,2)])
assert_equal(list(G.out_degree_iter(iter([0]))),[(0,2)])
def test_size(self):
G=self.K3
assert_equal(G.size(),6)
assert_equal(G.number_of_edges(),6)
def test_to_undirected_reciprocal(self):
G=self.Graph()
G.add_edge(1,2)
assert_true(G.to_undirected().has_edge(1,2))
assert_false(G.to_undirected(reciprocal=True).has_edge(1,2))
G.add_edge(2,1)
assert_true(G.to_undirected(reciprocal=True).has_edge(1,2))
def test_reverse_copy(self):
G=networkx.DiGraph([(0,1),(1,2)])
R=G.reverse()
assert_equal(sorted(R.edges()),[(1,0),(2,1)])
R.remove_edge(1,0)
assert_equal(sorted(R.edges()),[(2,1)])
assert_equal(sorted(G.edges()),[(0,1),(1,2)])
def test_reverse_nocopy(self):
G=networkx.DiGraph([(0,1),(1,2)])
R=G.reverse(copy=False)
assert_equal(sorted(R.edges()),[(1,0),(2,1)])
R.remove_edge(1,0)
assert_equal(sorted(R.edges()),[(2,1)])
assert_equal(sorted(G.edges()),[(2,1)])
class BaseAttrDiGraphTester(BaseDiGraphTester,BaseAttrGraphTester):
pass
class TestDiGraph(BaseAttrDiGraphTester,TestGraph):
"""Tests specific to dict-of-dict-of-dict digraph data structure"""
def setUp(self):
self.Graph=networkx.DiGraph
# build dict-of-dict-of-dict K3
ed1,ed2,ed3,ed4,ed5,ed6 = ({},{},{},{},{},{})
self.k3adj={0: {1: ed1, 2: ed2}, 1: {0: ed3, 2: ed4}, 2: {0: ed5, 1:ed6}}
self.k3edges=[(0, 1), (0, 2), (1, 2)]
self.k3nodes=[0, 1, 2]
self.K3=self.Graph()
self.K3.adj = self.K3.succ = self.K3.edge = self.k3adj
self.K3.pred={0: {1: ed3, 2: ed5}, 1: {0: ed1, 2: ed6}, 2: {0: ed2, 1:ed4}}
ed1,ed2 = ({},{})
self.P3=self.Graph()
self.P3.adj={0: {1: ed1}, 1: {2: ed2}, 2: {}}
self.P3.succ=self.P3.adj
self.P3.pred={0: {}, 1: {0: ed1}, 2: {1: ed2}}
self.K3.node={}
self.K3.node[0]={}
self.K3.node[1]={}
self.K3.node[2]={}
self.P3.node={}
self.P3.node[0]={}
self.P3.node[1]={}
self.P3.node[2]={}
def test_data_input(self):
G=self.Graph(data={1:[2],2:[1]}, name="test")
assert_equal(G.name,"test")
assert_equal(sorted(G.adj.items()),[(1, {2: {}}), (2, {1: {}})])
assert_equal(sorted(G.succ.items()),[(1, {2: {}}), (2, {1: {}})])
assert_equal(sorted(G.pred.items()),[(1, {2: {}}), (2, {1: {}})])
def test_add_edge(self):
G=self.Graph()
G.add_edge(0,1)
assert_equal(G.adj,{0: {1: {}}, 1: {}})
assert_equal(G.succ,{0: {1: {}}, 1: {}})
assert_equal(G.pred,{0: {}, 1: {0:{}}})
G=self.Graph()
G.add_edge(*(0,1))
assert_equal(G.adj,{0: {1: {}}, 1: {}})
assert_equal(G.succ,{0: {1: {}}, 1: {}})
assert_equal(G.pred,{0: {}, 1: {0:{}}})
def test_add_edges_from(self):
G=self.Graph()
G.add_edges_from([(0,1),(0,2,{'data':3})],data=2)
assert_equal(G.adj,{0: {1: {'data':2}, 2: {'data':3}}, 1: {}, 2: {}})
assert_equal(G.succ,{0: {1: {'data':2}, 2: {'data':3}}, 1: {}, 2: {}})
assert_equal(G.pred,{0: {}, 1: {0: {'data':2}}, 2: {0: {'data':3}}})
assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,)]) # too few in tuple
assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,1,2,3)]) # too many in tuple
assert_raises(TypeError, G.add_edges_from,[0]) # not a tuple
def test_remove_edge(self):
G=self.K3
G.remove_edge(0,1)
assert_equal(G.succ,{0:{2:{}},1:{0:{},2:{}},2:{0:{},1:{}}})
assert_equal(G.pred,{0:{1:{}, 2:{}}, 1:{2:{}}, 2:{0:{},1:{}}})
assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0)
def test_remove_edges_from(self):
G=self.K3
G.remove_edges_from([(0,1)])
assert_equal(G.succ,{0:{2:{}},1:{0:{},2:{}},2:{0:{},1:{}}})
assert_equal(G.pred,{0:{1:{}, 2:{}}, 1:{2:{}}, 2:{0:{},1: {}}})
G.remove_edges_from([(0,0)]) # silent fail
| LumPenPacK/NetworkExtractionFromImages | win_build/nefi2_win_amd64_msvc_2015/site-packages/networkx/classes/tests/test_digraph.py | Python | bsd-2-clause | 10,754 |
#!/usr/bin/python
"""
getTweetsId.py: get tweets from Twitter by tweet ids
usage: ./getTweets < file
20170711 erikt(at)xs4all.nl
"""
import json
import operator
import re
import sys
import time
# import twitter library: https://github.com/sixohsix/twitter
# /usr/local/lib/python2.7/dist-packages/twitter-1.17.1-py2.7.egg
from twitter import *
# put your authentication keys for Twitter in the local file definitions.py
# like: token = "..."
import definitions
# constants
COMMAND = sys.argv[0]
# stop the program after this many warnings
MAXWARNINGS = 50
# maximum count for remaining Twitter requests
MAXREMAINING = 900
# group of Twitter REST api used
APIGROUP = "statuses"
# Twitter REST api used
API = "/"+APIGROUP+"/lookup"
# maximum number of tweets we can retrieve from Twitter with one call
MAXTWEETS = 100
def readIds():
ids = []
for line in sys.stdin:
line = line.rstrip()
fields = line.split()
for field in fields:
if field != "": ids.append(field)
return(ids)
def checkRemaining(t,apigroup,api):
# check the rate limit; if 0 then wait
rates = t.application.rate_limit_status(resources = apigroup)
remaining = rates['resources'][apigroup][api]['remaining']
# check if there are remaining calls
while remaining < 1:
# if not: wait one minute
time.sleep(60)
# fetch the value of the remaining count from Twitter
rates = t.application.rate_limit_status(resources = apigroup)
remaining = rates['resources'][apigroup][api]['remaining']
return(remaining)
def main():
# Twitter autnetication keys
token = definitions.token
token_secret = definitions.token_secret
consumer_key = definitions.consumer_key
consumer_secret = definitions.consumer_secret
# warning count
nbrOfWarnings = 0
# authenticate
t = Twitter(auth=OAuth(token, token_secret, consumer_key, consumer_secret))
# check if we can access the api at Twitter, wait if necessary
remaining = checkRemaining(t,APIGROUP,API)
# read tweet ids from stdin
ids = readIds()
# repeat for every user
while len(ids) > 0:
# set number of retrieved tweets: MAXTWEETS is default value
batch = ids[0:MAXTWEETS]
ids = ids[MAXTWEETS:]
batchString = ""
for b in batch:
if batchString != "": batchString += ","
batchString += b
results = []
try:
results = t.statuses.lookup(_id=batchString)
except TwitterHTTPError as e:
# if there is an error: report this
sys.stderr.write("error: "+str(e))
nbrOfWarnings += 1
# stop if there were too many errors
if nbrOfWarnings >= MAXWARNINGS:
sys.exit(COMMAND+": too many warnings: "+nbrOfWarnings+"\n")
# check if we have some results
# if not "statuses" in results:
# sys.exit(COMMAND+": incomplete results: aborting!\n")
# process results
for tweet in results:
# print the tweet in json format
print json.dumps(tweet,sort_keys=True)
# decrement remaining counter
remaining -= 1
# check if we can still access the api
if remaining < 1: remaining = checkRemaining(t,APIGROUP,API)
# default action on script call: run main function
if __name__ == "__main__":
sys.exit(main())
| online-behaviour/machine-learning | getTweetsId.py | Python | apache-2.0 | 3,407 |
#!/usr/bin/env python
#
# Copyright 2004,2005,2007,2008,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# GNU Radio example program to record a dial tone to a WAV file
from __future__ import unicode_literals
from gnuradio import gr
from gnuradio import blocks
from gnuradio.eng_arg import eng_float
from argparse import ArgumentParser
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
parser = ArgumentParser()
parser.add_argument("-r", "--sample-rate", type=eng_float, default=48000,
help="set sample rate to RATE (%(default)r)")
parser.add_argument("-N", "--samples", type=eng_float, required=True,
help="number of samples to record")
parser.add_argument('file_name', metavar='WAV-FILE',
help='Output WAV file name', nargs=1)
args = parser.parse_args()
sample_rate = int(args.sample_rate)
ampl = 0.1
src0 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 350, ampl)
src1 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 440, ampl)
head0 = blocks.head(gr.sizeof_float, int(args.samples))
head1 = blocks.head(gr.sizeof_float, int(args.samples))
dst = blocks.wavfile_sink(args.file_name[0], 2, int(args.sample_rate), 16)
self.connect(src0, head0, (dst, 0))
self.connect(src1, head1, (dst, 1))
if __name__ == '__main__':
try:
my_top_block().run()
except KeyboardInterrupt:
pass
| iohannez/gnuradio | gr-audio/examples/python/dial_tone_wav.py | Python | gpl-3.0 | 2,414 |
# -*- coding: utf-8 -*-
import Queue
import functools
import utils.builtin
import jobs.definition.cloudshare_zhilian
from jobs.definition.batchconvert import *
class Batchconvert(BatchconvertCloudshare,
jobs.definition.cloudshare_zhilian.Zhilian):
CVDB_PATH = 'convert/zhilian'
ORIGIN_CVDB_PATH = 'output/zhilian'
def jobgenerator(self, classify_id_list):
for classify_id in classify_id_list:
yamlname = classify_id + '.yaml'
yamldata = utils.builtin.load_yaml('output/zhilian/JOBTITLES', yamlname)
sorted_id = sorted(yamldata,
key = lambda cvid: yamldata[cvid]['peo'][-1],
reverse=True)
for cv_id in sorted_id:
if self.oristorage.existsraw(cv_id) and not self.cvstorage.exists(cv_id):
cv_info = yamldata[cv_id]
job_process = functools.partial(self.convertjob, cv_info)
yield job_process
def extract_details(self, uploaded_details, cv_content):
details = super(Batchconvert, self).extract_details(uploaded_details, cv_content)
details['date'] = uploaded_details['date']
return details
if __name__ == '__main__':
industry_yamls = ['160000', ##计算机/网络技术
'249', ##质量管理/测试经理(QA/QC经理)(质量管理/安全防护)
'250', ##质量管理/测试主管(QA/QC主管)(质量管理/安全防护)
'251', ##质量管理/测试工程师(QA/QC工程师)(质量管理/安全防护)
'732', ##机电工程师(工程机械)
'410', ##测试/可靠性工程师(电子/电气/半导体/仪器仪表)
'84' ##FAE现场应用工程师(电子/电气/半导体/仪器仪表)
]
instance = Batchconvert()
PROCESS_GEN = instance.jobgenerator(industry_yamls)
queue_saver = Queue.Queue(0)
t1 = ThreadConverter('1', queue_saver, PROCESS_GEN)
t2 = ThreadConverter('2', queue_saver, PROCESS_GEN)
t3 = ThreadConverter('3', queue_saver, PROCESS_GEN)
t4 = ThreadConverter('4', queue_saver, PROCESS_GEN)
saver = ThreadSaver('saver', queue_saver, instance.cvstorage)
saver.start()
t1.start()
t2.start()
t3.start()
t4.start()
| followcat/predator | jobs/definition/batchconvert_cloudshare_zhilian.py | Python | lgpl-3.0 | 2,388 |
import sys
import numpy
from python.src.data_processing.normalizer import Normalizer
from python.src.neural_networks.neural_network import NeuralNetwork
from python.src.neurons.receive_all_neuron import ReceiveAllNeuron
from python.src.neurons.output_only_neuron import OutputOnlyNeuron
from python.src.neurons.activation_functions.linear_activation import LinearActivation
class FeedForwardNN(NeuralNetwork):
def __init__(self, normalizer = Normalizer(), structure = [1, 5, 1],
has_bias_nodes = True, is_regression = False):
NeuralNetwork.__init__(self, normalizer)
self.structure = structure
self.has_bias_nodes = has_bias_nodes
self.is_regression = is_regression
self.bias_nodes = []
num_layers = len(structure)
last_layer_index = num_layers - 1
for layer_index in range(num_layers):
num_neurons = structure[layer_index]
if has_bias_nodes and layer_index != last_layer_index:
num_neurons += 1
self.add_layer(self.create_layer(
num_neurons, lambda neuron_index: self.neuron_constructor(
last_layer_index, layer_index, neuron_index)))
self.connect_neurons()
@property
def num_inputs(self):
input_count = NeuralNetwork.num_inputs.fget(self)
if self.has_bias_nodes:
input_count -= 1
return input_count
def neuron_constructor(self, last_layer_index, current_layer_index,
neuron_index):
if (self.has_bias_nodes and
current_layer_index != last_layer_index and
neuron_index == self.structure[current_layer_index]):
bias_node = OutputOnlyNeuron()
self.bias_nodes.append(bias_node)
return bias_node
#if self.is_regression and current_layer_index == last_layer_index:
# return ReceiveAllNeuron(activation=LinearActivation())
return ReceiveAllNeuron()
def connect_neurons(self):
# Connect the neurons
for iLayer in range(self.num_layers - 1):
for sender in self.layers[iLayer]:
for reciever in self.layers[iLayer + 1]:
sender.connect_to(reciever)
def prepair_for_input(self):
for neuron in self.neurons:
neuron.reset()
def receive_inputs(self, inputs):
if len(inputs) != self.num_inputs:
raise ValueError("Inputs lenth must equal num_inputs")
self.prepair_for_input()
if self.has_bias_nodes:
for bias in self.bias_nodes:
bias.receive_signal(1)
for i in range(len(inputs)):
input = inputs[i]
neuron = self.input_layer[i]
neuron.receive_signal(self.normalizer.norm_input(input))
return [self.normalizer.norm_output(output.output)
for output in self.output_layer]
# TASK
# 1) [Done] Implement a simple feed forward neural network
# 2) Implement a backpropagation training algorithm for the simple network
# 3) Train simple network to represent a simple forth degree quadratic function
"""
http://www.iro.umontreal.ca/~bengioy/dlbook/mlp.html#pf2
f?(x) = b + V sigmoid(c + W x),
""" | DomenicD/dom_ml_playground | python/src/neural_networks/feed_forward.py | Python | mit | 3,406 |
'''
This module should be called to regenerate cog code (it doesn't need to be rerun every time, as the
generated code itself is commited).
'''
from string import Template
import os
import sys
parent_dir = os.path.split(__file__)[0]
#=======================================================================================================================
# RunCog
#=======================================================================================================================
def RunCog():
#Add cog to the pythonpath
cog_dir = parent_dir[:parent_dir.index('plugins')]
cog_src_dir = os.path.join(cog_dir, 'builders', 'org.python.pydev.build', 'cog_src')
assert os.path.exists(cog_src_dir), '%s does not exist' % (cog_src_dir,)
sys.path.append(cog_src_dir)
import cog
cog.RunCogInFiles([os.path.join(parent_dir, 'src', 'org', 'python', 'pydev', 'red_core', 'AddRedCorePreferences.java')])
#=======================================================================================================================
# main
#=======================================================================================================================
if __name__ == '__main__':
RunCog() | smkr/pyclipse | plugins/org.python.pydev.red_core/install.py | Python | epl-1.0 | 1,220 |
"""Version info"""
short_version = '0.1'
version = '0.1.70'
| urinieto/msaf | msaf/version.py | Python | mit | 60 |
"""
Django settings for sisweb project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yz0@4=g&kngm(iru!sufgxe7%uh(x7qdx)g569$5q9fp1d8!&='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blablaPro',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sisweb.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sisweb.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| HumbertValles/BlaBlaPro | sisweb/settings.py | Python | gpl-3.0 | 3,175 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Post'
db.create_table('blog_pages_post', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('shop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shops.Shop'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=60)),
('body', self.gf('django.db.models.fields.TextField')()),
('date_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('blog_pages', ['Post'])
# Adding model 'Home'
db.create_table('blog_pages_home', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('shop', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['shops.Shop'], unique=True)),
('title', self.gf('django.db.models.fields.CharField')(default='Welcome to My Store', max_length=60)),
('body', self.gf('django.db.models.fields.TextField')(default='Praesent a enim ac nunc egestas egestas. Integer auctor justo et lorem pulvinar eleifend. Curabitur accumsan massa lectus. Pellentesque ac ipsum sed odio mattis aliquam at egestas odio. Vestibulum gravida augue sapien, sit amet posuere quam. Duis dui mauris, pretium sed cursus quis, semper vitae metus. Sed et ante quam. Morbi nunc diam, tristique at vulputate a, ornare sed odio. Donec semper dolor nisl. Maecenas ac felis mauris, eget ornare metus. Pellentesque ac vehicula ligula. Nam semper nibh quis tortor eleifend et ultricies sapien tempus.')),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('blog_pages', ['Home'])
# Adding model 'About'
db.create_table('blog_pages_about', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('shop', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['shops.Shop'], unique=True)),
('title', self.gf('django.db.models.fields.CharField')(default='About Us', max_length=60)),
('body', self.gf('django.db.models.fields.TextField')(default='Nam est mauris, pretium eu imperdiet ut, iaculis sit amet sapien. Ut aliquet laoreet odio, ut hendrerit lectus suscipit quis. Sed condimentum elementum sollicitudin. Praesent accumsan, nisi nec sagittis dignissim, ante massa lobortis diam, id tincidunt arcu ipsum non purus. Duis et leo non diam feugiat congue ut in nulla. Suspendisse et faucibus mi. Fusce imperdiet volutpat sollicitudin. Suspendisse potenti.')),
('location', self.gf('django.db.models.fields.CharField')(default='39.29038,-76.61219', max_length=255)),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('blog_pages', ['About'])
# Adding model 'Page'
db.create_table('blog_pages_page', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('shop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shops.Shop'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=60)),
('name_link', self.gf('django.db.models.fields.CharField')(max_length=60)),
('title', self.gf('django.db.models.fields.CharField')(max_length=60)),
('body', self.gf('django.db.models.fields.TextField')()),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('visible', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True)),
))
db.send_create_signal('blog_pages', ['Page'])
# Adding model 'Menu'
db.create_table('blog_pages_menu', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('shop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shops.Shop'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=60)),
))
db.send_create_signal('blog_pages', ['Menu'])
# Adding model 'Link'
db.create_table('blog_pages_link', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=60)),
('to', self.gf('django.db.models.fields.CharField')(max_length=120)),
('title', self.gf('django.db.models.fields.CharField')(max_length=60)),
('menu', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['blog_pages.Menu'])),
('order', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('blog_pages', ['Link'])
def backwards(self, orm):
# Deleting model 'Post'
db.delete_table('blog_pages_post')
# Deleting model 'Home'
db.delete_table('blog_pages_home')
# Deleting model 'About'
db.delete_table('blog_pages_about')
# Deleting model 'Page'
db.delete_table('blog_pages_page')
# Deleting model 'Menu'
db.delete_table('blog_pages_menu')
# Deleting model 'Link'
db.delete_table('blog_pages_link')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
'blog_pages.about': {
'Meta': {'object_name': 'About'},
'body': ('django.db.models.fields.TextField', [], {'default': "'Nam est mauris, pretium eu imperdiet ut, iaculis sit amet sapien. Ut aliquet laoreet odio, ut hendrerit lectus suscipit quis. Sed condimentum elementum sollicitudin. Praesent accumsan, nisi nec sagittis dignissim, ante massa lobortis diam, id tincidunt arcu ipsum non purus. Duis et leo non diam feugiat congue ut in nulla. Suspendisse et faucibus mi. Fusce imperdiet volutpat sollicitudin. Suspendisse potenti.'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'39.29038,-76.61219'", 'max_length': '255'}),
'shop': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shops.Shop']", 'unique': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "'About Us'", 'max_length': '60'})
},
'blog_pages.home': {
'Meta': {'object_name': 'Home'},
'body': ('django.db.models.fields.TextField', [], {'default': "'Praesent a enim ac nunc egestas egestas. Integer auctor justo et lorem pulvinar eleifend. Curabitur accumsan massa lectus. Pellentesque ac ipsum sed odio mattis aliquam at egestas odio. Vestibulum gravida augue sapien, sit amet posuere quam. Duis dui mauris, pretium sed cursus quis, semper vitae metus. Sed et ante quam. Morbi nunc diam, tristique at vulputate a, ornare sed odio. Donec semper dolor nisl. Maecenas ac felis mauris, eget ornare metus. Pellentesque ac vehicula ligula. Nam semper nibh quis tortor eleifend et ultricies sapien tempus.'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'shop': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shops.Shop']", 'unique': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "'Welcome to My Store'", 'max_length': '60'})
},
'blog_pages.link': {
'Meta': {'object_name': 'Link'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blog_pages.Menu']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'to': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
'blog_pages.menu': {
'Meta': {'object_name': 'Menu'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"})
},
'blog_pages.page': {
'Meta': {'object_name': 'Page'},
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'name_link': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'blog_pages.post': {
'Meta': {'object_name': 'Post'},
'body': ('django.db.models.fields.TextField', [], {}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'market.marketplace': {
'Meta': {'object_name': 'MarketPlace'},
'base_domain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '92'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'template_prefix': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '92'})
},
'shops.shop': {
'Meta': {'object_name': 'Shop'},
'admin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'bids': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['blog_pages']
| StephenPower/CollectorCity-Market-Place | stores/apps/blog_pages/migrations/0001_initial.py | Python | apache-2.0 | 14,914 |
WEB_APPS_ENVIRONMENT = 'web-apps'
PREVIEW_APP_ENVIRONMENT = 'preview-app'
DEVICE_ID = 'Formplayer'
| dimagi/commcare-hq | corehq/apps/cloudcare/const.py | Python | bsd-3-clause | 99 |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.CPSM.Equipment.Wires.Switch import Switch
class Disconnector(Switch):
"""A manually operated or motor operated mechanical switching device used for changing the connections in a circuit, or for isolating a circuit or equipment from a source of power. It is required to open or close circuits when negligible current is broken or made.- [R6.2] and [R6.3] are satisfied by navigation to ConnectivityNode and Substation - [R6.4] is satisfied by the class name.
"""
def __init__(self, *args, **kw_args):
"""Initialises a new 'Disconnector' instance.
"""
super(Disconnector, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = []
_many_refs = []
| rwl/PyCIM | CIM14/CPSM/Equipment/Wires/Disconnector.py | Python | mit | 1,865 |
# pyuDMX.py - Anyma (and clones) uDMX interface module
# Copyright (C) 2016 Dave Hocker (email: AtHomeX10@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the LICENSE file for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program (the LICENSE file). If not, see <http://www.gnu.org/licenses/>.
#
# To build distribution:
# python setup.py sdist
#
# To install distribution in current venv:
# pip install -U dist/pyudmx-x.y.z.tar.gz
# where x.y.z is the version number (e.g. 1.0.0)
#
import os
from setuptools import setup, find_packages
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
setup(
name='udmx-pyusb',
version='2.0.0',
description='uDMX Interface Library',
long_description=(read('Readme.md')),
long_description_content_type="text/markdown",
url='https://www.github.com/dhocker/udmx-pyusb',
license='GPLv3. See LICENSE file.',
author='Dave Hocker',
author_email='AtHomeX10@gmail.com',
py_modules=[],
include_package_data=True,
packages=find_packages(exclude=['tests*']),
install_requires=['pyusb>=1.0.2'],
classifiers = [
"Programming Language :: Python :: 3",
"Operating System :: OS Independent"
]
)
| dhocker/uDMX-pyusb | setup.py | Python | gpl-3.0 | 1,718 |
#! /usr/bin/env python
# SCardConnect_DIRECT2.py : Unitary test for SCardReconnect
# Copyright (C) 2010 Ludovic Rousseau
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>.
# Scenario
# use a T=1 card, a TPDU reader
# Connect in SCARD_SHARE_SHARED
# driver should negociate PPS
# Disconnect
# Connect in SCARD_SHARE_DIRECT
# driver should NOT negociate PPS (the card has not been reset)
# Disconnect
# Connect in SCARD_SHARE_SHARED
# driver should NOT negociate PPS (the card has not been reset)
# Disconnect
# same issue with Reconnect instead of connect
# bug fixed in revision 4940
from smartcard.scard import *
from smartcard.pcsc.PCSCExceptions import *
SELECT = [0x00, 0xA4, 0x00, 0x00, 0x02, 0x3F, 0x00]
hresult, hcontext = SCardEstablishContext(SCARD_SCOPE_USER)
if hresult != SCARD_S_SUCCESS:
raise EstablishContextException(hresult)
hresult, readers = SCardListReaders(hcontext, [])
if hresult != SCARD_S_SUCCESS:
raise ListReadersException(hresult)
print 'PC/SC Readers:', readers
reader = readers[0]
print "Using reader:", reader
# Connect in SCARD_SHARE_SHARED mode
hresult, hcard, dwActiveProtocol = SCardConnect(hcontext, reader,
SCARD_SHARE_SHARED, SCARD_PROTOCOL_ANY)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
# Transmit
hresult, response = SCardTransmit(hcard, dwActiveProtocol, SELECT)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
print response
# Disconnect
hresult = SCardDisconnect(hcard, SCARD_LEAVE_CARD)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
# Connect in SCARD_SHARE_DIRECT mode
hresult, hcard, dwActiveProtocol = SCardConnect(hcontext, reader,
SCARD_SHARE_DIRECT, SCARD_PROTOCOL_ANY)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
# Disconnect
hresult = SCardDisconnect(hcard, SCARD_LEAVE_CARD)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
# Connect in SCARD_SHARE_SHARED mode
hresult, hcard, dwActiveProtocol = SCardConnect(hcontext, reader,
SCARD_SHARE_SHARED, SCARD_PROTOCOL_ANY)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
# Transmit
hresult, response = SCardTransmit(hcard, dwActiveProtocol, SELECT)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
print response
# Reconnect in SCARD_SHARE_DIRECT mode
hresult, dwActiveProtocol = SCardReconnect(hcard,
SCARD_SHARE_DIRECT, SCARD_PROTOCOL_ANY, SCARD_LEAVE_CARD)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
# Disconnect
hresult = SCardDisconnect(hcard, SCARD_LEAVE_CARD)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
# Connect in SCARD_SHARE_SHARED mode
hresult, hcard, dwActiveProtocol = SCardConnect(hcontext, reader,
SCARD_SHARE_SHARED, SCARD_PROTOCOL_ANY)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
# Transmit
hresult, response = SCardTransmit(hcard, dwActiveProtocol, SELECT)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
print response
# Disconnect
hresult = SCardDisconnect(hcard, SCARD_LEAVE_CARD)
if hresult != SCARD_S_SUCCESS:
raise BaseSCardException(hresult)
hresult = SCardReleaseContext(hcontext)
if hresult != SCARD_S_SUCCESS:
raise ReleaseContextException(hresult)
| sudheesh001/RFID-DBSync | PCSC/UnitaryTests/SCardConnect_DIRECT2.py | Python | gpl-2.0 | 3,899 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, tools, _
from odoo.exceptions import AccessError, MissingError
from odoo.tools import pickle
EXCLUDED_FIELDS = set((
'report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml',
'report_sxw_content_data', 'report_rml_content_data', 'search_view', ))
#: Possible slots to bind an action to with :meth:`~.set_action`
ACTION_SLOTS = [
"client_action_multi", # sidebar wizard action
"client_print_multi", # sidebar report printing button
"client_action_relate", # sidebar related link
"tree_but_open", # double-click on item in tree view
"tree_but_action", # deprecated: same as tree_but_open
]
class IrValues(models.Model):
"""Holds internal model-specific action bindings and user-defined default
field values. definitions. This is a legacy internal model, mixing
two different concepts, and will likely be updated or replaced in a
future version by cleaner, separate models. You should not depend
explicitly on it.
The purpose of each ``ir.values`` entry depends on its type, defined
by the ``key`` column:
* 'default': user-defined default values, used when creating new
records of this model:
* 'action': binding of an action to a particular *action slot* of
this model, making the action easily available in the user
interface for this model.
The ``key2`` column acts as a qualifier, further refining the type
of the entry. The possible values are:
* for 'default' entries: an optional condition restricting the
cases where this particular default value will be applicable,
or ``False`` for no condition
* for 'action' entries: the ``key2`` qualifier is one of the available
action slots, defining how this action can be invoked:
* ``'client_print_multi'`` for report printing actions that will
be available on views displaying items from this model
* ``'client_action_multi'`` for assistants (wizards) actions
that will be available in views displaying objects of this model
* ``'client_action_relate'`` for links towards related documents
that should be available in views displaying objects of this model
* ``'tree_but_open'`` for actions that will be triggered when
double-clicking an item from this model in a hierarchical tree view
Each entry is specific to a model (``model`` column), and for ``'actions'``
type, may even be made specific to a given record of that model when the
``res_id`` column contains a record ID (``False`` means it's global for
all records).
The content of the entry is defined by the ``value`` column, which may either
contain an arbitrary value, or a reference string defining the action that
should be executed.
.. rubric:: Usage: default values
The ``'default'`` entries are usually defined manually by the
users, and set by their UI clients calling :meth:`~.set_default`.
These default values are then automatically used by the
ORM every time a new record is about to be created, i.e. when
:meth:`~odoo.models.Model.default_get`
or :meth:`~odoo.models.Model.create` are called.
.. rubric:: Usage: action bindings
Business applications will usually bind their actions during
installation, and Odoo UI clients will apply them as defined,
based on the list of actions included in the result of
:meth:`~odoo.models.Model.fields_view_get`,
or directly returned by explicit calls to :meth:`~.get_actions`.
"""
_name = 'ir.values'
name = fields.Char(required=True)
model = fields.Char(string='Model Name', index=True, required=True,
help="Model to which this entry applies")
# TODO: model_id and action_id should be read-write function fields
model_id = fields.Many2one('ir.model', string='Model (change only)',
help="Model to which this entry applies - "
"helper field for setting a model, will "
"automatically set the correct model name")
action_id = fields.Many2one('ir.actions.actions', string='Action (change only)',
help="Action bound to this entry - "
"helper field for binding an action, will "
"automatically set the correct reference")
value = fields.Text(help="Default value (pickled) or reference to an action")
value_unpickle = fields.Text(string='Default value or action reference',
compute='_value_unpickle', inverse='_value_pickle')
key = fields.Selection([('action', 'Action'), ('default', 'Default')],
string='Type', index=True, required=True, default='action',
help="- Action: an action attached to one slot of the given model\n"
"- Default: a default value for a model field")
key2 = fields.Char(string='Qualifier', index=True, default='tree_but_open',
help="For actions, one of the possible action slots: \n"
" - client_action_multi\n"
" - client_print_multi\n"
" - client_action_relate\n"
" - tree_but_open\n"
"For defaults, an optional condition")
res_id = fields.Integer(string='Record ID', index=True,
help="Database identifier of the record to which this applies. "
"0 = for all records")
user_id = fields.Many2one('res.users', string='User', ondelete='cascade', index=True,
help="If set, action binding only applies for this user.")
company_id = fields.Many2one('res.company', string='Company', ondelete='cascade', index=True,
help="If set, action binding only applies for this company")
@api.depends('key', 'value')
def _value_unpickle(self):
for record in self:
value = record.value
if record.key == 'default' and value:
# default values are pickled on the fly
with tools.ignore(Exception):
value = str(pickle.loads(value))
record.value_unpickle = value
def _value_pickle(self):
context = dict(self._context)
context.pop(self.CONCURRENCY_CHECK_FIELD, None)
for record in self.with_context(context):
value = record.value_unpickle
if record.key == 'default':
value = pickle.dumps(value)
record.value = value
@api.onchange('model_id')
def onchange_object_id(self):
if self.model_id:
self.model = self.model_id.model
@api.onchange('action_id')
def onchange_action_id(self):
if self.action_id:
self.value_unpickle = self.action_id
@api.model_cr_context
def _auto_init(self):
super(IrValues, self)._auto_init()
self._cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = 'ir_values_key_model_key2_res_id_user_id_idx'")
if not self._cr.fetchone():
self._cr.execute("CREATE INDEX ir_values_key_model_key2_res_id_user_id_idx ON ir_values (key, model, key2, res_id, user_id)")
@api.model
def create(self, vals):
self.clear_caches()
return super(IrValues, self).create(vals)
@api.multi
def write(self, vals):
self.clear_caches()
return super(IrValues, self).write(vals)
@api.multi
def unlink(self):
self.clear_caches()
return super(IrValues, self).unlink()
@api.model
@api.returns('self', lambda value: value.id)
def set_default(self, model, field_name, value, for_all_users=True, company_id=False, condition=False):
"""Defines a default value for the given model and field_name. Any previous
default for the same scope (model, field_name, value, for_all_users, company_id, condition)
will be replaced and lost in the process.
Defaults can be later retrieved via :meth:`~.get_defaults`, which will return
the highest priority default for any given field. Defaults that are more specific
have a higher priority, in the following order (highest to lowest):
* specific to user and company
* specific to user only
* specific to company only
* global to everyone
:param string model: model name
:param string field_name: field name to which the default applies
:param value: the default field value to set
:type value: any serializable Python value
:param bool for_all_users: whether the default should apply to everybody or only
the user calling the method
:param int company_id: optional ID of the company to which the default should
apply. If omitted, the default will be global. If True
is passed, the current user's company will be used.
:param string condition: optional condition specification that can be used to
restrict the applicability of the default values
(e.g. based on another field's value). This is an
opaque string as far as the API is concerned, but client
stacks typically use single-field conditions in the
form ``'key=stringified_value'``.
(Currently, the condition is trimmed to 200 characters,
so values that share the same first 200 characters always
match)
:return: the newly created ir.values entry
"""
if isinstance(value, unicode):
value = value.encode('utf8')
if company_id is True:
# should be company-specific, need to get company id
company_id = self.env.user.company_id.id
# remove existing defaults for the same scope
search_criteria = [
('key', '=', 'default'),
('key2', '=', condition and condition[:200]),
('model', '=', model),
('name', '=', field_name),
('user_id', '=', False if for_all_users else self._uid),
('company_id', '=', company_id)
]
self.search(search_criteria).unlink()
return self.create({
'name': field_name,
'value': pickle.dumps(value),
'model': model,
'key': 'default',
'key2': condition and condition[:200],
'user_id': False if for_all_users else self._uid,
'company_id': company_id,
})
@api.model
def get_default(self, model, field_name, for_all_users=True, company_id=False, condition=False):
""" Return the default value defined for model, field_name, users, company and condition.
Return ``None`` if no such default exists.
"""
search_criteria = [
('key', '=', 'default'),
('key2', '=', condition and condition[:200]),
('model', '=', model),
('name', '=', field_name),
('user_id', '=', False if for_all_users else self._uid),
('company_id', '=', company_id)
]
defaults = self.search(search_criteria)
return pickle.loads(defaults.value.encode('utf-8')) if defaults else None
@api.model
def get_defaults(self, model, condition=False):
"""Returns any default values that are defined for the current model and user,
(and match ``condition``, if specified), previously registered via
:meth:`~.set_default`.
Defaults are global to a model, not field-specific, but an optional
``condition`` can be provided to restrict matching default values
to those that were defined for the same condition (usually based
on another field's value).
Default values also have priorities depending on whom they apply
to: only the highest priority value will be returned for any
field. See :meth:`~.set_default` for more details.
:param string model: model name
:param string condition: optional condition specification that can be used to
restrict the applicability of the default values
(e.g. based on another field's value). This is an
opaque string as far as the API is concerned, but client
stacks typically use single-field conditions in the
form ``'key=stringified_value'``.
(Currently, the condition is trimmed to 200 characters,
so values that share the same first 200 characters always
match)
:return: list of default values tuples of the form ``(id, field_name, value)``
(``id`` is the ID of the default entry, usually irrelevant)
"""
# use a direct SQL query for performance reasons,
# this is called very often
query = """ SELECT v.id, v.name, v.value FROM ir_values v
LEFT JOIN res_users u ON (v.user_id = u.id)
WHERE v.key = %%s AND v.model = %%s
AND (v.user_id = %%s OR v.user_id IS NULL)
AND (v.company_id IS NULL OR
v.company_id = (SELECT company_id FROM res_users WHERE id = %%s)
)
%s
ORDER BY v.user_id, u.company_id"""
params = ('default', model, self._uid, self._uid)
if condition:
query = query % 'AND v.key2 = %s'
params += (condition[:200],)
else:
query = query % 'AND v.key2 IS NULL'
self._cr.execute(query, params)
# keep only the highest priority default for each field
defaults = {}
for row in self._cr.dictfetchall():
value = pickle.loads(row['value'].encode('utf-8'))
defaults.setdefault(row['name'], (row['id'], row['name'], value))
return defaults.values()
# use ormcache: this is called a lot by BaseModel.default_get()!
@api.model
@tools.ormcache('self._uid', 'model', 'condition')
def get_defaults_dict(self, model, condition=False):
""" Returns a dictionary mapping field names with their corresponding
default value. This method simply improves the returned value of
:meth:`~.get_defaults`.
"""
return dict((f, v) for i, f, v in self.get_defaults(model, condition))
@api.model
@api.returns('self', lambda value: value.id)
def set_action(self, name, action_slot, model, action, res_id=False):
"""Binds an the given action to the given model's action slot - for later
retrieval via :meth:`~.get_actions`. Any existing binding of the same action
to the same slot is first removed, allowing an update of the action's name.
See the class description for more details about the various action
slots: :class:`~ir_values`.
:param string name: action label, usually displayed by UI client
:param string action_slot: the action slot to which the action should be
bound to - one of ``client_action_multi``,
``client_print_multi``, ``client_action_relate``,
``tree_but_open``.
:param string model: model name
:param string action: action reference, in the form ``'model,id'``
:param int res_id: optional record id - will bind the action only to a
specific record of the model, not all records.
:return: the newly created ir.values entry
"""
assert isinstance(action, basestring) and ',' in action, \
'Action definition must be an action reference, e.g. "ir.actions.act_window,42"'
assert action_slot in ACTION_SLOTS, \
'Action slot (%s) must be one of: %r' % (action_slot, ACTION_SLOTS)
# remove existing action definition of same slot and value
search_criteria = [
('key', '=', 'action'),
('key2', '=', action_slot),
('model', '=', model),
('res_id', '=', res_id or 0), # int field -> NULL == 0
('value', '=', action),
]
self.search(search_criteria).unlink()
return self.create({
'key': 'action',
'key2': action_slot,
'model': model,
'res_id': res_id,
'name': name,
'value': action,
})
@api.model
@tools.ormcache_context('self._uid', 'action_slot', 'model', 'res_id', keys=('lang',))
def get_actions(self, action_slot, model, res_id=False):
"""Retrieves the list of actions bound to the given model's action slot.
See the class description for more details about the various action
slots: :class:`~.ir_values`.
:param string action_slot: the action slot to which the actions should be
bound to - one of ``client_action_multi``,
``client_print_multi``, ``client_action_relate``,
``tree_but_open``.
:param string model: model name
:param int res_id: optional record id - will bind the action only to a
specific record of the model, not all records.
:return: list of action tuples of the form ``(id, name, action_def)``,
where ``id`` is the ID of the default entry, ``name`` is the
action label, and ``action_def`` is a dict containing the
action definition as obtained by calling
:meth:`~odoo.models.Model.read` on the action record.
"""
assert action_slot in ACTION_SLOTS, 'Illegal action slot value: %s' % action_slot
# use a direct SQL query for performance reasons,
# this is called very often
query = """ SELECT v.id, v.name, v.value FROM ir_values v
WHERE v.key = %s AND v.key2 = %s AND v.model = %s
AND (v.res_id = %s OR v.res_id IS NULL OR v.res_id = 0)
ORDER BY v.id """
self._cr.execute(query, ('action', action_slot, model, res_id or None))
# map values to their corresponding action record
actions = []
for id, name, value in self._cr.fetchall():
if not value:
continue # skip if undefined
action_model, action_id = value.split(',')
if action_model not in self.pool:
continue # unknown model? skip it!
action = self.env[action_model].browse(int(action_id))
actions.append((id, name, action))
# process values and their action
results = {}
for id, name, action in actions:
fields = [field for field in action._fields if field not in EXCLUDED_FIELDS]
# FIXME: needs cleanup
try:
action_def = {
field: action._fields[field].convert_to_read(action[field])
for field in fields
}
if action._name in ('ir.actions.report.xml', 'ir.actions.act_window'):
if action.groups_id and not action.groups_id & self.env.user.groups_id:
if name == 'Menuitem':
raise AccessError(_('You do not have the permission to perform this operation!!!'))
continue
# keep only the last action registered for each action name
results[name] = (id, name, action_def)
except (AccessError, MissingError):
continue
return sorted(results.values())
def _map_legacy_model_list(self, model_list, map_fn, merge_results=False):
"""Apply map_fn to the various models passed, according to
legacy way to specify models/records.
"""
assert isinstance(model_list, (list, tuple)), \
"model_list should be in the form [model,..] or [(model,res_id), ..]"
results = []
for model in model_list:
res_id = False
if isinstance(model, (list, tuple)):
model, res_id = model
result = map_fn(model, res_id)
# some of the functions return one result at a time (tuple or id)
# and some return a list of many of them - care for both
if merge_results:
results.extend(result)
else:
results.append(result)
return results
# Backards-compatibility adapter layer to retrofit into split API
@api.model
def set(self, key, key2, name, models, value, replace=True, isobject=False, meta=False, preserve_user=False, company=False):
"""Deprecated legacy method to set default values and bind actions to models' action slots.
Now dispatches to the newer API methods according to the value of ``key``: :meth:`~.set_default`
(``key=='default'``) or :meth:`~.set_action` (``key == 'action'``).
:deprecated: As of v6.1, ``set_default()`` or ``set_action()`` should be used directly.
"""
assert key in ['default', 'action'], "ir.values entry keys must be in ['default','action']"
if key == 'default':
def do_set(model, res_id):
return self.set_default(model, field_name=name, value=value,
for_all_users=(not preserve_user), company_id=company,
condition=key2)
elif key == 'action':
def do_set(model, res_id):
return self.set_action(name, action_slot=key2, model=model, action=value, res_id=res_id)
return self._map_legacy_model_list(models, do_set)
@api.model
def get(self, key, key2, models, meta=False, res_id_req=False, without_user=True, key2_req=True):
"""Deprecated legacy method to get the list of default values or actions bound to models' action slots.
Now dispatches to the newer API methods according to the value of ``key``: :meth:`~.get_defaults`
(``key=='default'``) or :meth:`~.get_actions` (``key == 'action'``)
:deprecated: As of v6.1, ``get_defaults()`` or ``get_actions()`` should be used directly.
"""
assert key in ['default', 'action'], "ir.values entry keys must be in ['default','action']"
if key == 'default':
def do_get(model, res_id):
return self.get_defaults(model, condition=key2)
elif key == 'action':
def do_get(model, res_id):
return self.get_actions(action_slot=key2, model=model, res_id=res_id)
return self._map_legacy_model_list(models, do_get, merge_results=True)
| akhmadMizkat/odoo | openerp/addons/base/ir/ir_values.py | Python | gpl-3.0 | 24,020 |
from __future__ import absolute_import
import logging
import flask
from flask.signals import got_request_exception
from raygun4py import raygunprovider
log = logging.getLogger(__name__)
class Provider(object):
def __init__(self, flaskApp, apiKey):
self.flaskApp = flaskApp
self.apiKey = apiKey
self.sender = None
got_request_exception.connect(self.send_exception, sender=flaskApp)
flaskApp.extensions['raygun'] = self
def attach(self):
if not hasattr(self.flaskApp, 'extensions'):
self.flaskApp.extensions = {}
self.sender = raygunprovider.RaygunSender(self.apiKey)
return self.sender
def send_exception(self, *args, **kwargs):
if not self.sender:
log.error("Raygun-Flask: Cannot send as provider not attached")
env = self._get_flask_environment()
self.sender.send_exception(extra_environment_data=env)
def _get_flask_environment(self):
return {
'frameworkVersion': 'Flask ' + getattr(flask, '__version__', '')
}
| MindscapeHQ/raygun4py | python2/raygun4py/middleware/flask.py | Python | mit | 1,082 |
from django.shortcuts import render
from django.http import HttpResponseRedirect
from ticket.models import *
from trains.models import *
from .forms import *
def get_availability(request, train_id, coach_id):
request.session['train_id'] = train_id
request.session['coach_id'] = coach_id
print train_id, coach_id
availSeats = TrainClass.objects.values().get(
id=coach_id)['availSeats']
if availSeats <= 0:
status = 'Sorry! All booked. WaitList is ' + str(w)
else:
status = "Available " + str(availSeats) + " seats"
print availSeats
return render(request, 'book.html', {'status': status})
def passenger_form(request):
if request.method == 'POST':
form = PassengerForm(request.POST)
if form.is_valid():
data = form.cleaned_data
print data
return HttpResponseRedirect('/booked/')
else:
form = PassengerForm()
return render(request, 'passenger.html', {'form': form})
| KeshavSeth/RailwayReservationSystem | src/ticket/views.py | Python | mit | 995 |
# Copyright 2017--2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Code for training
"""
import glob
import logging
import os
import pickle
import random
import shutil
import time
from collections import deque
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Iterable, Tuple, Union, Set
import numpy as np
import torch
import torch.distributed
try:
import apex.amp
except ImportError:
# Not an issue because Apex AMP is only used when the trainer setting is
# activated. We check that Apex can be imported before creating the trainer.
pass
from . import average
from . import checkpoint_decoder
from . import constants as C
from . import data_io
from . import loss
from . import lr_scheduler
from . import model
from . import optimizers
from . import utils
from . import vocab
from .config import Config
logger = logging.getLogger(__name__)
@dataclass
class TrainerConfig(Config):
output_dir: str
early_stopping_metric: str
max_params_files_to_keep: int
keep_initializations: bool
max_params_files_to_cache: int
cache_strategy: str
cache_metric: str
checkpoint_interval: int
max_num_checkpoint_not_improved: int
checkpoint_improvement_threshold: float
max_checkpoints: Optional[int] = None
min_samples: Optional[int] = None
max_samples: Optional[int] = None
min_updates: Optional[int] = None
max_updates: Optional[int] = None
min_epochs: Optional[int] = None
max_epochs: Optional[int] = None
max_seconds: Optional[int] = None
update_interval: int = 1
stop_training_on_decoder_failure: bool = False
class TrainState:
"""
Stores the state an EarlyStoppingTrainer instance.
"""
__slots__ = ['num_not_improved', 'epoch', 'checkpoint', 'best_checkpoint', 'batches', 'updates', 'samples',
'metrics', 'start_tic', '_tic_last_time_elapsed', '_time_elapsed', 'early_stopping_metric',
'best_metric', 'best_metric_history', 'best_checkpoint', 'converged', 'diverged']
def __init__(self, early_stopping_metric: str) -> None:
self.num_not_improved = 0
self.epoch = 0
self.checkpoint = 0
self.best_checkpoint = 0
self.batches = 0
self.updates = 0
self.samples = 0
# stores dicts of metric names & values for each checkpoint
self.metrics = [] # type: List[Dict]
self.start_tic = time.time()
self._tic_last_time_elapsed = self.start_tic
self._time_elapsed = 0.0
self.early_stopping_metric = early_stopping_metric
self.best_metric = C.METRIC_WORST[early_stopping_metric]
# List of the last N best metrics, used for threshold-based stopping
self.best_metric_history = deque([self.best_metric])
self.best_checkpoint = 0
self.converged = False
self.diverged = False
def save(self, fname: str):
"""
Saves this training state to fname.
"""
self.update_time_elapsed()
assert len(self.metrics) == self.checkpoint
with open(fname, "wb") as fp:
pickle.dump(self, fp)
@staticmethod
def load(fname: str) -> 'TrainState':
"""
Loads a training state from fname.
"""
with open(fname, "rb") as fp:
state = pickle.load(fp)
state._tic_last_time_elapsed = time.time()
assert len(state.metrics) == state.checkpoint
return state
def update_time_elapsed(self):
current_time = time.time()
self._time_elapsed += current_time - self._tic_last_time_elapsed
self._tic_last_time_elapsed = current_time
@property
def time_elapsed(self):
return self._time_elapsed
def __getstate__(self):
return {k: getattr(self, k) for k in self.__slots__}
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
class EarlyStoppingTrainer:
def __init__(self,
config: TrainerConfig,
optimizer_config: optimizers.OptimizerConfig,
sockeye_model: model.SockeyeModel,
training_model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
zero_grad_kwargs: Dict[str, Any],
loss_functions: List[loss.Loss],
device: torch.device,
using_amp: bool = False,
using_apex_amp: bool = False,
custom_metrics_logger: Optional[Callable] = None,
checkpoint_callback: Optional[Callable] = None) -> None:
self.config = config
self.optimizer_config = optimizer_config
self.sockeye_model = sockeye_model
self.training_model = training_model
self.optimizer = optimizer
self.zero_grad_kwargs = zero_grad_kwargs
self.loss_functions = loss_functions
self.device = device
self.using_amp = using_amp
if using_amp:
self._scaler = torch.cuda.amp.GradScaler()
self.using_apex_amp = using_apex_amp
self.state = None # type: Optional[TrainState]
self._speedometer = Speedometer(frequency=C.MEASURE_SPEED_EVERY, auto_reset=False)
self._custom_metrics_logger = custom_metrics_logger
self._tflogger = TensorboardLogger(logdir=os.path.join(self.config.output_dir, C.TENSORBOARD_NAME))
self.checkpoint_callback = checkpoint_callback
def fit(self,
train_iter: data_io.BaseParallelSampleIter,
validation_iter: data_io.BaseParallelSampleIter,
checkpoint_decoder: Optional[checkpoint_decoder.CheckpointDecoder] = None):
logger.info("Early stopping by optimizing '%s'", self.config.early_stopping_metric)
if utils.is_primary_worker() and self.config.early_stopping_metric in C.METRICS_REQUIRING_DECODER:
utils.check_condition(checkpoint_decoder is not None,
"%s requires CheckpointDecoder" % self.config.early_stopping_metric)
resume_training = os.path.exists(self.training_state_dirname)
if resume_training:
logger.info("Found partial training in '%s'. Resuming from saved state.", self.training_state_dirname)
self._load_training_state(train_iter)
else:
self.state = TrainState(self.config.early_stopping_metric)
if utils.is_primary_worker():
self.sockeye_model.save_config(self.config.output_dir)
self.sockeye_model.save_version(self.config.output_dir)
self.sockeye_model.save_parameters(self.current_params_fname)
logger.info("Training started.")
tic = time.time()
if self.config.max_checkpoints is not None:
self.config.max_updates = self.state.updates + self.config.max_checkpoints * self.config.checkpoint_interval
logger.info("Resetting max_updates to %d + %d * %d = %d in order to implement stopping "
"after (an additional) %d checkpoints.",
self.state.updates,
self.config.max_checkpoints,
self.config.checkpoint_interval,
self.config.max_updates,
self.config.max_checkpoints)
checkpoint_up_to_date = False
while True:
if self.config.max_epochs is not None and self.state.epoch == self.config.max_epochs:
logger.info("Maximum # of epochs (%s) reached.", self.config.max_epochs)
if not checkpoint_up_to_date:
time_cost = time.time() - tic
self._create_checkpoint(checkpoint_decoder, time_cost, train_iter, validation_iter)
break
if self.config.max_updates is not None and self.state.updates == self.config.max_updates:
logger.info("Maximum # of updates (%s) reached.", self.config.max_updates)
if not checkpoint_up_to_date:
time_cost = time.time() - tic
self._create_checkpoint(checkpoint_decoder, time_cost, train_iter, validation_iter)
break
if self.config.max_samples is not None and self.state.samples >= self.config.max_samples:
logger.info("Maximum # of samples (%s) reached", self.config.max_samples)
if not checkpoint_up_to_date:
time_cost = time.time() - tic
self._create_checkpoint(checkpoint_decoder, time_cost, train_iter, validation_iter)
break
did_grad_step = self._step(batch=train_iter.next())
checkpoint_up_to_date = checkpoint_up_to_date and not did_grad_step
if not train_iter.iter_next():
self.state.epoch += 1
train_iter.reset()
if self.state.updates > 0 and self.state.batches % (
self.config.checkpoint_interval * self.config.update_interval) == 0:
time_cost = time.time() - tic
self._create_checkpoint(checkpoint_decoder, time_cost, train_iter, validation_iter)
checkpoint_up_to_date = True
if self.config.max_seconds is not None and self.state.time_elapsed >= self.config.max_seconds:
logger.info("Maximum # of seconds (%s) reached. Training ran for %d seconds.",
self.config.max_seconds, self.state.time_elapsed)
break
if self.state.converged or self.state.diverged:
break
tic = time.time()
logger.info("Training finished%s. Best checkpoint: %d. Best validation %s: %.6f",
", can be continued later" if not self.state.converged else "",
self.state.best_checkpoint, self.state.early_stopping_metric, self.state.best_metric)
# Always keep the training state to allow continuing training with
# different stopping criteria
if utils.is_primary_worker():
self._cleanup(keep_training_state=True)
return self.state
def _create_checkpoint(self, checkpoint_decoder: checkpoint_decoder.CheckpointDecoder, time_cost: float,
train_iter: data_io.BaseParallelSampleIter,
validation_iter: data_io.BaseParallelSampleIter):
"""
Creates a checkpoint, which will update self.state.converged/self.state.diverged, evaluate validation
metrics and update the best known parameters accordingly.
"""
self.state.checkpoint += 1
# save parameters and evaluate on validation data
if utils.is_primary_worker():
self._save_params()
train_metrics = [lf.metric for lf in self.loss_functions]
logger.info("Checkpoint [%d]\tUpdates=%d Epoch=%d Samples=%d Time-cost=%.3f Updates/sec=%.3f",
self.state.checkpoint, self.state.updates, self.state.epoch,
self.state.samples, time_cost, self.config.checkpoint_interval / time_cost)
logger.info('Checkpoint [%d]\t%s', self.state.checkpoint,
"\t".join("Train-%s" % str(metric) for metric in train_metrics))
val_metrics = self._evaluate(self.state.checkpoint, validation_iter, checkpoint_decoder)
has_improved = self._determine_improvement(val_metrics)
self.state.converged = self._determine_convergence()
self.state.diverged = self._determine_divergence(val_metrics)
self._adjust_learning_rate(has_improved)
if utils.is_primary_worker():
if has_improved:
self._update_best_params()
self._save_optimizer_state(self.best_optimizer_state_fname)
self._save_lr_scheduler(self.best_lr_scheduler_fname)
self._write_and_log_metrics(train_metrics=train_metrics, val_metrics=val_metrics)
self._save_training_state(train_iter)
for metric in train_metrics:
metric.reset()
if self.checkpoint_callback:
self.checkpoint_callback(self.state.checkpoint)
def _forward_backward(self, batch: data_io.Batch, is_update_batch: bool = True):
"""
Performs forward-backward pass on a batch.
:param batch: Current data batch.
:param is_update_batch: Whether this is the final batch before updating
weights.
:return: List loss values.
"""
batch = batch.load(device=self.device)
with torch.cuda.amp.autocast(cache_enabled=False) if self.using_amp else utils.no_context(): # type: ignore
# Forward
outputs = self.training_model(batch.source, batch.source_length, batch.target, batch.target_length)
# Loss (scaled by update interval)
loss_outputs = [loss_function(outputs, batch.labels) for loss_function in self.loss_functions]
# TODO(mdenkows): We currently give 1/N weight to every batch in the
# update, but batches have subtly different sizes (different numbers
# of padding tokens). Consider normalizing by relative batch size.
loss_values = [v / self.config.update_interval if self.config.update_interval > 1
else v for v, _ in loss_outputs]
sum_losses = sum(loss_values) if len(loss_values) > 1 else loss_values[0]
# Backward. PyTorch AMP and Apex AMP use different loss scaling APIs.
if self.using_amp:
sum_losses = self._scaler.scale(sum_losses)
if self.using_apex_amp:
with apex.amp.scale_loss(sum_losses, self.optimizer,
delay_unscale=not is_update_batch) as scaled_sum_losses:
scaled_sum_losses.backward()
else:
sum_losses.backward() # type: ignore
return loss_outputs
def _step(self, batch: data_io.Batch) -> bool:
self.state.batches += 1
self.state.samples += batch.samples
# We accumulate gradients over N=update_interval batches before running
# the optimizer to update model weights. Every Nth batch is an update
# batch.
is_update_batch = self.state.batches % self.config.update_interval == 0
# Forward/loss/backward (compute gradients). In distributed mode,
# workers accumulate gradients locally for N-1 batches (no_sync), then
# average the accumulated gradients across workers during the update
# batch.
with (self.training_model.no_sync() if utils.is_distributed() and not is_update_batch # type: ignore
else utils.no_context()):
loss_outputs = self._forward_backward(batch, is_update_batch)
for loss_func, (loss_value, num_samples) in zip(self.loss_functions, loss_outputs):
loss_func.metric.update(loss_value.item(), num_samples.item())
did_grad_step = False
if is_update_batch:
self.state.updates += 1
if self.using_amp:
self._scaler.unscale_(self.optimizer)
# Clip gradients
if self.optimizer_config.gradient_clipping_type == C.GRADIENT_CLIPPING_TYPE_ABS:
torch.nn.utils.clip_grad.clip_grad_value_(self.training_model.parameters(),
self.optimizer_config.gradient_clipping_threshold)
elif self.optimizer_config.gradient_clipping_type == C.GRADIENT_CLIPPING_TYPE_NORM:
torch.nn.utils.clip_grad.clip_grad_norm_(self.training_model.parameters(),
self.optimizer_config.gradient_clipping_threshold)
# Set learning rate for current step
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.optimizer_config.lr_scheduler(self.state.updates) \
if self.optimizer_config.lr_scheduler is not None else self.optimizer_config.lr
# Update weights and reset gradients
if self.using_amp:
self._scaler.step(self.optimizer)
self._scaler.update()
else:
self.optimizer.step()
self.optimizer.zero_grad(**self.zero_grad_kwargs)
did_grad_step = True
self._speedometer(self.state.epoch, self.state.batches,
self.state.updates, batch.samples, batch.tokens, (lf.metric for lf in self.loss_functions))
return did_grad_step
def _evaluate(self, checkpoint: int, data_iter,
checkpoint_decoder: Optional[checkpoint_decoder.CheckpointDecoder]) -> List[loss.LossMetric]:
"""
Computes loss(es) on validation data and returns their metrics.
:param data_iter: Validation data iterator.
:return: List of validation metrics, same order as self.loss_functions.
"""
# Switch model to eval mode (disable dropout, etc.) to score validation
# set and run checkpoint decoder.
self.sockeye_model.eval()
data_iter.reset()
val_metrics = [lf.create_metric() for lf in self.loss_functions]
for batch in data_iter:
batch = batch.load(device=self.device)
with torch.inference_mode():
# Forward: use sockeye_model because (traced) training_model
# doesn't support eval mode (still runs dropout, etc.)
outputs = self.sockeye_model(batch.source, batch.source_length, batch.target, batch.target_length)
# Loss
loss_outputs = [loss_function(outputs, batch.labels) for loss_function in self.loss_functions]
# Update validation metrics for batch
for loss_metric, (loss_value, num_samples) in zip(val_metrics, loss_outputs):
loss_metric.update(loss_value.item(), num_samples.item())
# Primary worker optionally runs the checkpoint decoder
decoder_metrics = {} # type: Dict[str, float]
if utils.is_primary_worker() and checkpoint_decoder is not None:
output_name = os.path.join(self.config.output_dir, C.DECODE_OUT_NAME.format(checkpoint=checkpoint))
decoder_metrics = checkpoint_decoder.decode_and_evaluate(output_name=output_name)
# Broadcast decoder metrics (if any) from primary worker to secondary
# workers
if utils.is_distributed():
decoder_metrics = utils.broadcast_object(decoder_metrics)
# Add decoder metrics (if any) to validation metrics
for metric_name, metric_value in decoder_metrics.items():
assert metric_name not in val_metrics, "Duplicate validation metric %s" % metric_name
metric = loss.LossMetric(name=metric_name)
metric.update(metric_value, num_samples=1)
val_metrics.append(metric)
logger.info('Checkpoint [%d]\t%s',
self.state.checkpoint, "\t".join("Validation-%s" % str(lm) for lm in val_metrics))
# Switch model back to train mode to continue training
self.sockeye_model.train()
return val_metrics
def _determine_improvement(self, val_metrics: List[loss.LossMetric]) -> bool:
"""
Determines whether early stopping metric on validation data improved and updates best value and checkpoint in
the state.
:param val_metrics: Validation metrics.
:return: Whether model has improved on held-out data since last checkpoint.
"""
value = None
value_is_better = False
for val_metric in val_metrics:
if val_metric.name == self.config.early_stopping_metric:
value = val_metric.get()
# In distributed mode, the primary worker makes an authoritative
# check of whether the metric value has improved and broadcasts
# the result to secondary workers. Non-determinism in the order
# of GPU operations can lead to slight numeric variations across
# workers, causing potential desync if each worker makes its own
# check for key training decisions (reducing learning rate,
# early stopping, etc.).
if utils.is_primary_worker():
# Authoritative check
value_is_better = utils.metric_value_is_better(value,
self.state.best_metric,
self.config.early_stopping_metric)
if utils.is_distributed():
# Broadcast result
value_is_better = utils.broadcast_object(value_is_better)
if value_is_better:
logger.info("Validation-%s improved to %f (delta=%f).", self.config.early_stopping_metric,
value, abs(value - self.state.best_metric))
self.state.best_metric = value
self.state.best_checkpoint = self.state.checkpoint
self.state.num_not_improved = 0
assert value is not None, "Early stopping metric %s not found in validation metrics." % self.config.early_stopping_metric
if not value_is_better:
self.state.num_not_improved += 1
logger.info("Validation-%s has not improved for %d checkpoints, best so far: %f",
self.config.early_stopping_metric, self.state.num_not_improved, self.state.best_metric)
# Update best metric history
self.state.best_metric_history.append(self.state.best_metric)
if (self.config.max_num_checkpoint_not_improved is not None
and len(self.state.best_metric_history) > self.config.max_num_checkpoint_not_improved + 1):
self.state.best_metric_history.popleft()
return value_is_better
def _determine_convergence(self) -> bool:
"""
True if model has converged w.r.t early stopping criteria (patience).
Order: first check required minimums (samples, updates, epochs), then
check early stopping criteria (checkpoints not improved).
"""
if self.config.min_samples is not None and self.state.samples < self.config.min_samples:
logger.info("Minimum number of samples (%d) not reached yet: %d",
self.config.min_samples, self.state.samples)
return False
if self.config.min_updates is not None and self.state.updates < self.config.min_updates:
logger.info("Minimum number of updates (%d) not reached yet: %d",
self.config.min_updates, self.state.updates)
return False
if self.config.min_epochs is not None and self.state.epoch < self.config.min_epochs:
logger.info("Minimum number of epochs (%d) not reached yet: %d",
self.config.min_epochs, self.state.epoch)
return False
if (self.config.max_num_checkpoint_not_improved is not None
and 0 <= self.config.max_num_checkpoint_not_improved
and self.state.checkpoint >= self.config.max_num_checkpoint_not_improved):
# In distrubted mode, the primary worker makes the authoritative
# calculation of improvement over the window for evaluating stopping
window_improvement = 0.
if utils.is_primary_worker():
window_improvement = abs(self.state.best_metric - self.state.best_metric_history[0])
if utils.is_distributed():
window_improvement = utils.broadcast_object(window_improvement)
# <= to correctly handle threshold == 0
if window_improvement <= self.config.checkpoint_improvement_threshold:
logger.info("Maximum number of not improved checkpoints reached: "
"improvement %f <= %f over %d checkpoints", window_improvement,
self.config.checkpoint_improvement_threshold, self.config.max_num_checkpoint_not_improved)
return True
else:
logger.info("Sufficient improvement to continue: %f > %f over %d checkpoints", window_improvement,
self.config.checkpoint_improvement_threshold, self.config.max_num_checkpoint_not_improved)
return False
def _determine_divergence(self, val_metrics: List[loss.LossMetric]) -> bool:
"""
True if last perplexity is infinite or >2*target_vocab_size.
"""
# (5) detect divergence with respect to the perplexity value at the last checkpoint
last_ppl = float('nan')
for metric in val_metrics:
if metric.name == C.PERPLEXITY:
last_ppl = metric.get()
break
# using a double of uniform distribution's value as a threshold
if not np.isfinite(last_ppl) or last_ppl > 2 * self.sockeye_model.config.vocab_target_size:
logger.warning("Model optimization diverged. Last checkpoint's perplexity: %f", last_ppl)
return True
return False
def _adjust_learning_rate(self, has_improved: bool):
"""
Adjusts the optimizer learning rate if required and logs it.
"""
scheduler = self.optimizer_config.lr_scheduler
lr = self.optimizer_config.lr
if scheduler is not None:
if issubclass(type(scheduler), lr_scheduler.AdaptiveLearningRateScheduler):
lr_adjusted = scheduler.new_evaluation_result(has_improved) # type: ignore
else:
lr_adjusted = False
if lr_adjusted and not has_improved:
logger.info("Loading model parameters and optimizer states from best checkpoint: %d",
self.state.best_checkpoint)
if os.path.exists(self.best_params_fname):
self.sockeye_model.load_parameters(filename=self.best_params_fname, device=self.device)
if os.path.exists(self.best_optimizer_state_fname):
self._load_optimizer_state(self.best_optimizer_state_fname)
lr = scheduler.lr
logger.info("Checkpoint [%d]\tLearning-rate=%.6f", self.state.checkpoint, lr)
def _write_and_log_metrics(self,
train_metrics: Iterable[loss.LossMetric],
val_metrics: Iterable[loss.LossMetric]):
"""
Updates metrics for current checkpoint.
Writes all metrics to the metrics file, optionally logs to tensorboard, and sends metrics to custom logger.
"""
data = {"epoch": self.state.epoch,
"learning-rate": (self.optimizer_config.lr if self.optimizer_config.lr_scheduler is None
else self.optimizer_config.lr_scheduler.lr),
"time-elapsed": self.state.time_elapsed,
"max-gpu-memory": torch.cuda.max_memory_allocated(self.device),
"converged": self.state.converged,
"diverged": self.state.diverged}
for metric in train_metrics:
data["%s-train" % metric.name] = metric.get()
for metric in val_metrics:
data["%s-val" % metric.name] = metric.get()
self.state.metrics.append(data)
utils.write_metrics_file(self.state.metrics, self.metrics_fname)
self._tflogger.log_metrics(metrics=data, checkpoint=self.state.checkpoint)
safe_custom_metrics_logger(logging_function=self._custom_metrics_logger,
metrics=data,
global_step=self.state.checkpoint)
def _update_best_params(self):
"""
Updates the params.best link to the latest best parameter file.
"""
actual_best_params_fname = C.PARAMS_NAME % self.state.best_checkpoint
if os.path.lexists(self.best_params_fname):
os.remove(self.best_params_fname)
os.symlink(actual_best_params_fname, self.best_params_fname)
logger.info("'%s' now points to '%s'", self.best_params_fname, actual_best_params_fname)
def _save_params(self):
"""
Saves model parameters at current checkpoint and optionally cleans up older parameter files to save disk space.
"""
self.sockeye_model.save_parameters(self.current_params_fname)
cleanup_params_files(self.config.output_dir, self.config.max_params_files_to_keep, self.state.checkpoint,
self.state.best_checkpoint, self.config.keep_initializations,
self.config.max_params_files_to_cache, self.config.cache_metric,
self.config.cache_strategy)
def _save_optimizer_state(self, fname):
torch.save(self.optimizer.state_dict(), fname)
logger.info('Saved optimizer state to "%s"', fname)
def _load_optimizer_state(self, fname):
self.optimizer.load_state_dict(torch.load(fname, map_location=self.device))
logger.info('Loaded optimizer state from "%s"', fname)
def _save_lr_scheduler(self, fname):
if self.optimizer_config.lr_scheduler is not None:
with open(fname, "wb") as fp:
pickle.dump(self.optimizer_config.lr_scheduler, fp)
logger.info("Saved '%s' to '%s'", self.optimizer_config.lr_scheduler, fname)
def _load_lr_scheduler(self, fname):
if os.path.exists(fname):
with open(fname, "rb") as fp:
self.optimizer_config.lr_scheduler = pickle.load(fp)
logger.info("Loaded '%s' from '%s'", self.optimizer_config.lr_scheduler, fname)
def _save_training_state(self, train_iter: data_io.BaseParallelSampleIter):
"""
Saves current training state.
"""
# Create temporary directory for storing the state of the optimization process
training_state_dirname = os.path.join(self.config.output_dir, C.TRAINING_STATE_TEMP_DIRNAME)
if not os.path.exists(training_state_dirname):
os.mkdir(training_state_dirname)
# (1) Parameters: link current file
params_base_fname = C.PARAMS_NAME % self.state.checkpoint
params_file = os.path.join(training_state_dirname, C.TRAINING_STATE_PARAMS_NAME)
if os.path.exists(params_file):
os.unlink(params_file)
os.symlink(os.path.join("..", params_base_fname), params_file)
# (2) Optimizer state
opt_state_fname = os.path.join(training_state_dirname, C.OPT_STATE_LAST)
self._save_optimizer_state(opt_state_fname)
# (3) Data iterator
train_iter.save_state(os.path.join(training_state_dirname, C.BUCKET_ITER_STATE_NAME))
# (4) Random generators
# RNG states: python, numpy, torch
with open(os.path.join(training_state_dirname, C.RNG_STATE_NAME), "wb") as fp:
pickle.dump(random.getstate(), fp)
pickle.dump(np.random.get_state(), fp)
pickle.dump(torch.random.get_rng_state(), fp)
# (5) Training state
self.state.save(os.path.join(training_state_dirname, C.TRAINING_STATE_NAME))
# (5.5) lr_scheduler
lr_scheduler_fname = os.path.join(training_state_dirname, C.LR_SCHEDULER_LAST)
self._save_lr_scheduler(lr_scheduler_fname)
# (6) AMP grad scaler state
if self.using_amp:
torch.save(self._scaler.state_dict(), os.path.join(training_state_dirname, C.GRAD_SCALER_STATE_NAME))
if self.using_apex_amp:
torch.save(apex.amp.state_dict(), os.path.join(training_state_dirname, C.APEX_AMP_STATE_NAME))
# First we rename the existing directory to minimize the risk of state
# loss if the process is aborted during deletion (which will be slower
# than directory renaming)
delete_training_state_dirname = os.path.join(self.config.output_dir, C.TRAINING_STATE_TEMP_DELETENAME)
if os.path.exists(self.training_state_dirname):
os.rename(self.training_state_dirname, delete_training_state_dirname)
os.rename(training_state_dirname, self.training_state_dirname)
if os.path.exists(delete_training_state_dirname):
try:
shutil.rmtree(delete_training_state_dirname)
except FileNotFoundError:
# This can be occur on file systems with higher latency, such as
# distributed file systems. While repeated occurrences of this
# warning may indicate a problem, seeing one or two warnings
# during training is usually fine.
logger.warning('Directory has already been removed: %s', delete_training_state_dirname)
def _load_training_state(self, train_iter: data_io.BaseParallelSampleIter):
"""
Loads the full training state from disk.
:param train_iter: training data iterator.
"""
# (1) Parameters
params_fname = os.path.join(self.training_state_dirname, C.TRAINING_STATE_PARAMS_NAME)
self.sockeye_model.load_parameters(params_fname, device=self.device, allow_missing=False, ignore_extra=False)
# (2) Optimizer states
opt_state_fname = os.path.join(self.training_state_dirname, C.OPT_STATE_LAST)
self._load_optimizer_state(opt_state_fname)
# (3) Data Iterator
train_iter.load_state(os.path.join(self.training_state_dirname, C.BUCKET_ITER_STATE_NAME))
# (4) Random generators
# RNG states: python, numpy, torch
with open(os.path.join(self.training_state_dirname, C.RNG_STATE_NAME), "rb") as fp:
random.setstate(pickle.load(fp))
np.random.set_state(pickle.load(fp))
torch.random.set_rng_state(pickle.load(fp))
# (5) Training state
self.state = TrainState.load(os.path.join(self.training_state_dirname, C.TRAINING_STATE_NAME))
# (5.5) lr_scheduler
lr_scheduler_fname = os.path.join(self.training_state_dirname, C.LR_SCHEDULER_LAST)
self._load_lr_scheduler(lr_scheduler_fname)
# (6) AMP grad scaler state
if self.using_amp:
self._scaler.load_state_dict(
torch.load(os.path.join(self.training_state_dirname, C.GRAD_SCALER_STATE_NAME)))
if self.using_apex_amp:
apex.amp.load_state_dict(torch.load(os.path.join(self.training_state_dirname, C.APEX_AMP_STATE_NAME)))
logger.info("Training State: epoch=%d, checkpoint=%d batches=%d updates=%d best_metric=%.2f, " \
"best_checkpoint=%d time_elapsed=%d" % (
self.state.epoch, self.state.checkpoint, self.state.batches, self.state.updates,
self.state.best_metric, self.state.best_checkpoint, self.state.time_elapsed))
def _cleanup(self, keep_training_state=False):
"""
Cleans parameter files, training state directory and waits for remaining decoding processes.
"""
cleanup_params_files(self.config.output_dir, self.config.max_params_files_to_keep,
self.state.checkpoint, self.state.best_checkpoint, self.config.keep_initializations,
self.config.max_params_files_to_cache, self.config.cache_metric,
self.config.cache_strategy)
if not keep_training_state:
if os.path.exists(self.training_state_dirname):
shutil.rmtree(self.training_state_dirname)
if os.path.exists(self.best_optimizer_state_fname):
os.remove(self.best_optimizer_state_fname)
if os.path.exists(self.best_lr_scheduler_fname):
os.remove(self.best_lr_scheduler_fname)
@property
def metrics_fname(self) -> str:
return os.path.join(self.config.output_dir, C.METRICS_NAME)
@property
def current_params_fname(self) -> str:
return os.path.join(self.config.output_dir, C.PARAMS_NAME % self.state.checkpoint)
@property
def best_params_fname(self) -> str:
return os.path.join(self.config.output_dir, C.PARAMS_BEST_NAME)
@property
def training_state_dirname(self) -> str:
return os.path.join(self.config.output_dir, C.TRAINING_STATE_DIRNAME)
@property
def best_optimizer_state_fname(self) -> str:
return os.path.join(self.config.output_dir, C.OPT_STATE_BEST)
@property
def best_lr_scheduler_fname(self) -> str:
return os.path.join(self.config.output_dir, C.LR_SCHEDULER_BEST)
class TensorboardLogger:
"""
Thin wrapper for TensorBoard API to log training events.
Flushes logging events to disk every 60 seconds.
:param logdir: Directory to write Tensorboard event files to.
:param source_vocab: Optional source vocabulary to log source embeddings.
:param target_vocab: Optional target vocabulary to log target and output embeddings.
"""
def __init__(self,
logdir: str,
source_vocab: Optional[vocab.Vocab] = None,
target_vocab: Optional[vocab.Vocab] = None) -> None:
self.logdir = logdir
self.source_labels = vocab.get_ordered_tokens_from_vocab(source_vocab) if source_vocab is not None else None
self.target_labels = vocab.get_ordered_tokens_from_vocab(target_vocab) if target_vocab is not None else None
try:
from torch.utils.tensorboard import SummaryWriter
logger.info("Logging training events for Tensorboard at '%s'", self.logdir)
self._writer = SummaryWriter(log_dir=self.logdir, flush_secs=60)
except ImportError:
logger.info("tensorboard not found. Consider 'pip install tensorboard' to log events to Tensorboard.")
self._writer = None
def log_metrics(self, metrics: Dict[str, Union[float, int, torch.Tensor]], checkpoint: int):
if self._writer is None:
return
for name, value in metrics.items():
if isinstance(value, torch.Tensor):
if torch.isfinite(value).sum().item() == value.size:
self._writer.add_histogram(tag=name, values=value, bins=100, global_step=checkpoint)
else:
logger.warning("Histogram of %s not logged to tensorboard because of infinite data.")
elif value is None:
continue
else:
self._writer.add_scalar(tag=name, scalar_value=value, global_step=checkpoint)
self._writer.flush()
class Speedometer:
"""
Custom Speedometer to log samples and words per second.
"""
def __init__(self, frequency: int = 50, auto_reset: bool = True) -> None:
self.frequency = frequency
self.init = False
self.tic = 0.0
self.last_count = 0
self.auto_reset = auto_reset
self.samples = 0
self.tokens = 0
self.msg = 'E=%d B=%d\ts/sec=%.2f tok/sec=%.2f u/sec=%.2f\t'
def __call__(self, epoch: int, batches: int, updates: int, samples: int,
tokens: int, metrics: Optional[Iterable[loss.LossMetric]] = None):
count = batches
if self.last_count > count:
self.init = False
self.last_count = count
self.samples += samples
self.tokens += tokens
if self.init:
if count % self.frequency == 0:
toc = (time.time() - self.tic)
update_interval = batches / max(1, updates)
updates_per_sec = self.frequency / update_interval / toc
samples_per_sec = self.samples / toc
tokens_per_sec = self.tokens / toc
self.samples = 0
self.tokens = 0
if metrics is not None:
metric_values = [] # type: List[Tuple[str, float]]
for metric in metrics:
metric_values.append((metric.short_name, metric.get()))
if self.auto_reset:
metric.reset()
logger.info(self.msg + '%s=%f ' * len(metric_values),
epoch, count, samples_per_sec, tokens_per_sec, updates_per_sec, *sum(metric_values, ()))
else:
logger.info(self.msg, epoch, count, samples_per_sec, tokens_per_sec, updates_per_sec)
self.tic = time.time()
else:
self.init = True
self.tic = time.time()
def safe_custom_metrics_logger(logging_function: Callable,
metrics: Dict,
global_step: int = None):
"""
A thin wrapper for calling a custom metrics logging function, if supplied. As it uses an external function,
it should never throw an exception. If there is no logging_function supplied, the function does nothing.
:param logging_function: The function supplied by a caller of sockeye.train
:param metrics: A non-empty dict of (nonempty str, float/int/bool) pairs.
:param global_step: Optional argument, which can be used e.g. by Tensorboard.
"""
if logging_function is None:
return
try:
logging_function(metrics, global_step)
except Exception as e:
logging.warning("Didn't use custom metrics logger, exception '{}' occurred".format(str(e)))
def cleanup_params_files(output_folder: str, max_to_keep: int, checkpoint: int, best_checkpoint: int, keep_first: bool,
max_params_files_to_cache: int, cache_metric: str, cache_strategy: str):
"""
Deletes oldest parameter files from a model folder.
:param output_folder: Folder where param files are located.
:param max_to_keep: Maximum number of files to keep, negative to keep all.
:param checkpoint: Current checkpoint (i.e. index of last params file created).
:param best_checkpoint: Best checkpoint. The parameter file corresponding to this checkpoint will not be deleted.
:param keep_first: Don't delete the first checkpoint.
:param max_params_files_to_cache: Maximum number of best param files to cache.
:param cache_metric: Metric to determine best param files.
:param cache_strategy: Strategy to select 'best' param files.
"""
if max_to_keep <= 0:
return
# make sure we keep N best params files from .metrics file according to strategy.
top_n: Set[int] = set()
metrics_path = os.path.join(output_folder, C.METRICS_NAME)
if max_params_files_to_cache > 0 and os.path.exists(metrics_path):
maximize = C.METRIC_MAXIMIZE[cache_metric]
points = utils.get_validation_metric_points(model_path=output_folder, metric=cache_metric)
if cache_strategy == C.AVERAGE_BEST:
# N best scoring points
top = average.strategy_best(points, max_params_files_to_cache, maximize)
elif cache_strategy == C.AVERAGE_LAST:
# N sequential points ending with overall best
top = average.strategy_last(points, max_params_files_to_cache, maximize)
elif cache_strategy == C.AVERAGE_LIFESPAN:
# Track lifespan of every "new best" point
# Points dominated by a previous better point have lifespan 0
top = average.strategy_lifespan(points, max_params_files_to_cache, maximize)
else:
raise RuntimeError("Unknown strategy, options are: %s" % C.AVERAGE_CHOICES)
top_n = set([x[1] for x in top])
# get rid of params files that are neither among the latest, nor among the best
existing_files = glob.glob(os.path.join(output_folder, C.PARAMS_PREFIX + "*"))
params_name_with_dir = os.path.join(output_folder, C.PARAMS_NAME)
for n in range(1 if keep_first else 0, max(1, checkpoint - max_to_keep + 1)):
if n != best_checkpoint:
param_fname_n = params_name_with_dir % n
if param_fname_n in existing_files and n not in top_n:
try:
os.remove(param_fname_n)
except FileNotFoundError:
# This can be occur on file systems with higher latency,
# such as distributed file systems. While repeated
# occurrences of this warning may indicate a problem, seeing
# one or two warnings during training is usually fine.
logger.warning('File has already been removed: %s', param_fname_n)
| awslabs/sockeye | sockeye/training.py | Python | apache-2.0 | 45,283 |
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import mock
import os
import unittest
from mkdocs import nav, utils, exceptions
from mkdocs.tests.base import dedent
class UtilsTests(unittest.TestCase):
def test_html_path(self):
expected_results = {
'index.md': 'index.html',
'api-guide.md': 'api-guide/index.html',
'api-guide/index.md': 'api-guide/index.html',
'api-guide/testing.md': 'api-guide/testing/index.html',
}
for file_path, expected_html_path in expected_results.items():
html_path = utils.get_html_path(file_path)
self.assertEqual(html_path, expected_html_path)
def test_url_path(self):
expected_results = {
'index.md': '/',
'api-guide.md': '/api-guide/',
'api-guide/index.md': '/api-guide/',
'api-guide/testing.md': '/api-guide/testing/',
}
for file_path, expected_html_path in expected_results.items():
html_path = utils.get_url_path(file_path)
self.assertEqual(html_path, expected_html_path)
def test_is_markdown_file(self):
expected_results = {
'index.md': True,
'index.MARKDOWN': True,
'index.txt': False,
'indexmd': False
}
for path, expected_result in expected_results.items():
is_markdown = utils.is_markdown_file(path)
self.assertEqual(is_markdown, expected_result)
def test_is_html_file(self):
expected_results = {
'index.htm': True,
'index.HTML': True,
'index.txt': False,
'indexhtml': False
}
for path, expected_result in expected_results.items():
is_html = utils.is_html_file(path)
self.assertEqual(is_html, expected_result)
def test_create_media_urls(self):
pages = [
{'Home': 'index.md'},
{'About': 'about.md'},
{'Sub': [
{'Sub Home': 'index.md'},
{'Sub About': 'about.md'},
]}
]
expected_results = {
'https://media.cdn.org/jq.js': 'https://media.cdn.org/jq.js',
'http://media.cdn.org/jquery.js': 'http://media.cdn.org/jquery.js',
'//media.cdn.org/jquery.js': '//media.cdn.org/jquery.js',
'media.cdn.org/jquery.js': './media.cdn.org/jquery.js',
'local/file/jquery.js': './local/file/jquery.js',
'image.png': './image.png',
}
site_navigation = nav.SiteNavigation(pages)
for path, expected_result in expected_results.items():
urls = utils.create_media_urls(site_navigation, [path])
self.assertEqual(urls[0], expected_result)
def test_create_relative_media_url_sub_index(self):
'''
test special case where there's a sub/index.md page
'''
site_navigation = nav.SiteNavigation([
{'Home': 'index.md'},
{'Sub': [
{'Sub Home': '/subpage/index.md'},
]}
])
site_navigation.url_context.set_current_url('/subpage/')
site_navigation.file_context.current_file = "subpage/index.md"
def assertPathGenerated(declared, expected):
url = utils.create_relative_media_url(site_navigation, declared)
self.assertEqual(url, expected)
assertPathGenerated("img.png", "./img.png")
assertPathGenerated("./img.png", "./img.png")
assertPathGenerated("/img.png", "../img.png")
def test_reduce_list(self):
self.assertEqual(
utils.reduce_list([1, 2, 3, 4, 5, 5, 2, 4, 6, 7, 8]),
[1, 2, 3, 4, 5, 6, 7, 8]
)
def test_get_themes(self):
self.assertEqual(
sorted(utils.get_theme_names()),
['mkdocs', 'readthedocs'])
@mock.patch('pkg_resources.iter_entry_points', autospec=True)
def test_get_theme_dir(self, mock_iter):
path = 'some/path'
theme = mock.Mock()
theme.name = 'mkdocs2'
theme.dist.key = 'mkdocs2'
theme.load().__file__ = os.path.join(path, '__init__.py')
mock_iter.return_value = iter([theme])
self.assertEqual(utils.get_theme_dir(theme.name), os.path.abspath(path))
def test_get_theme_dir_keyerror(self):
self.assertRaises(KeyError, utils.get_theme_dir, 'nonexistanttheme')
@mock.patch('pkg_resources.iter_entry_points', autospec=True)
def test_get_theme_dir_importerror(self, mock_iter):
theme = mock.Mock()
theme.name = 'mkdocs2'
theme.dist.key = 'mkdocs2'
theme.load.side_effect = ImportError()
mock_iter.return_value = iter([theme])
self.assertRaises(ImportError, utils.get_theme_dir, theme.name)
@mock.patch('pkg_resources.iter_entry_points', autospec=True)
def test_get_themes_warning(self, mock_iter):
theme1 = mock.Mock()
theme1.name = 'mkdocs2'
theme1.dist.key = 'mkdocs2'
theme1.load().__file__ = "some/path1"
theme2 = mock.Mock()
theme2.name = 'mkdocs2'
theme2.dist.key = 'mkdocs3'
theme2.load().__file__ = "some/path2"
mock_iter.return_value = iter([theme1, theme2])
self.assertEqual(
sorted(utils.get_theme_names()),
sorted(['mkdocs2', ]))
@mock.patch('pkg_resources.iter_entry_points', autospec=True)
@mock.patch('pkg_resources.get_entry_map', autospec=True)
def test_get_themes_error(self, mock_get, mock_iter):
theme1 = mock.Mock()
theme1.name = 'mkdocs'
theme1.dist.key = 'mkdocs'
theme1.load().__file__ = "some/path1"
theme2 = mock.Mock()
theme2.name = 'mkdocs'
theme2.dist.key = 'mkdocs2'
theme2.load().__file__ = "some/path2"
mock_iter.return_value = iter([theme1, theme2])
mock_get.return_value = {'mkdocs': theme1, }
self.assertRaises(exceptions.ConfigurationError, utils.get_theme_names)
def test_nest_paths(self):
j = os.path.join
result = utils.nest_paths([
'index.md',
j('user-guide', 'configuration.md'),
j('user-guide', 'styling-your-docs.md'),
j('user-guide', 'writing-your-docs.md'),
j('about', 'contributing.md'),
j('about', 'license.md'),
j('about', 'release-notes.md'),
])
self.assertEqual(
result,
[
'index.md',
{'User guide': [
j('user-guide', 'configuration.md'),
j('user-guide', 'styling-your-docs.md'),
j('user-guide', 'writing-your-docs.md')]},
{'About': [
j('about', 'contributing.md'),
j('about', 'license.md'),
j('about', 'release-notes.md')]}
]
)
def test_unicode_yaml(self):
yaml_src = dedent(
'''
key: value
key2:
- value
'''
)
config = utils.yaml_load(yaml_src)
self.assertTrue(isinstance(config['key'], utils.text_type))
self.assertTrue(isinstance(config['key2'][0], utils.text_type))
| d0ugal/mkdocs | mkdocs/tests/utils/utils_tests.py | Python | bsd-2-clause | 7,365 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
__author__ = 'Dirk Braunschweiger'
__version__ = "0.01"
# db_folders -> List of DB Folder (for space check)
# db_client -> name of ycsb client
# db_args -> special ycsb arguments for this db
# db_name -> name of this db (e.g. for workload file)
# db_desc -> more detailed name/description
# jvm_args -> special jvm_args for this db and ycsb
# prerun_once -> list of commands to run local once before ycsb (%%IP%% uses first db vm) (without ycsb, sync or space diff or poweroff commands!)
# postrun_once -> list of commands to run local once after ycsb (%%IP%% uses first db vm) (without ycsb, sync or space diff or poweroff commands!)
# prerun -> list of commands to run before ycsb (all vms or local) (without ycsb, sync or space diff or poweroff commands!)
# postrun -> list of commands to run after ycsb (all vms or local) (without ycsb, sync or space diff or poweroff commands!)
# prerun_master -> list of commands to run before ycsb (only on master(first=ID 0) vm or local)) (without ycsb, sync or space diff or poweroff commands!)
# postrun_master -> list of commands to run after ycsb (only on master(first=ID 0) vm or local)) (without ycsb, sync or space diff or poweroff commands!)
# prerun_slaves -> list of commands to run before ycsb (only on slave (all without master(=ID 0)) vms or local)) (without ycsb, sync or space diff or poweroff commands!)
# postrun_slaves -> list of commands to run after ycsb (only on slave (all without master(=ID 0)) vms or local)) (without ycsb, sync or space diff or poweroff commands!)
# prerun_dict -> list of commands to run before ycsb for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)
# postrun_dict -> list of commands to run after ycsb for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)
# check -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (systemctl start xyz oftern returns true even if start failed somehow. Check that here!)
# check_master -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (only on master(first=ID 0) vm or local))
# check_slaves -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (all without master(=ID 0)) vms or local))
# check_dict -> list of commands to run after prerun for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)
# basic -> True/False, if True this is a basic database, so no need to ssh for space checking
# sequence -> which vm should be provisioned first? (for all postrun/prerun dicts/lists. First number is considered master db vm, rest are slaves.)
# the following variables are possible in prerun_once, postrun_once, prerun, prerun_master, prerun_slaves, check, check_master, check_slaves, postrun, postrun_master, postrun_slaves, prerun_dict, postrun_dict, check_dict, db_args:
# %%IP%% -> IP of (actual) db vm
# %%IPgen%% -> IP of (actual) generator vm (on which this script runs)
# %%IPn%% -> IP of db vm number n (e.g. %%IP2%%)
# %%IPall%% -> give String with IP of all vms)
# %%SSH%% -> if SSH should be used (set at the beginning)
# Order of Preruns/Postruns:
# 1. prerun/postrun/check, 2. prerun_master/postrun_master/check_master, 3. preun_skaves/postrun_slaves/check_slaves, 4.prerun_dict/postrun_dict/check_dict
# General Order:
# prerun -> check -> ycsb -> postrun
def getDict():
dbConfig={}
dbConfig["db_folders"]=["/root/.akumuli/"]
dbConfig["db_client"]="akumuli"
dbConfig["db_args"]="-p tcpPort=8282 -p httpPort=8181 -p ip=%%IP%% -p username=admin -p passwd=admin"
dbConfig["db_name"]="akumuli"
dbConfig["db_desc"]="akumuli DB on 1 VM."
dbConfig["jvm_args"]="-jvm-args='-Xmx4096m'"
dbConfig["prerun_once"]= []
dbConfig["postrun_once"]= []
dbConfig["prerun"]= [] #no schema creation
dbConfig["postrun"]= []
dbConfig["prerun_master"]= []
dbConfig["postrun_master"]= []
dbConfig["prerun_slaves"]= []
dbConfig["postrun_slaves"]= []
dbConfig["prerun_dict"]= {}
dbConfig["postrun_dict"]= {}
dbConfig["check"]= []
dbConfig["check_master"]= []
dbConfig["check_slaves"]= []
dbConfig["check_dict"]= {}
dbConfig["basic"]= False
dbConfig["sequence"]=[0]
return dbConfig
| TSDBBench/Overlord | vagrant_files/generator/files/databases/akumuli_cl1_rf1.py | Python | apache-2.0 | 4,455 |
''' Assemble WebSocket wire message fragments into complete Bokeh Server
message objects that can be processed.
'''
from __future__ import absolute_import
import six
from tornado import gen
from .exceptions import ValidationError
import logging
log = logging.getLogger(__name__)
class Receiver(object):
''' Receive wire message fragments and assemble complete Bokeh server
message objects.
On ``MessageError`` or ``ValidationError``, the receiver will reset its
state and attempt to consume a new message.
The *fragment* received can be either bytes or unicode, depending on
the transport's semantics (WebSocket allows both).
.. code-block:: python
[
# these are required
b'{header}', # serialized header dict
b'{metadata}', # serialized metadata dict
b'{content}, # serialized content dict
# these are optional, and come in pairs; header contains num_buffers
b'{buf_header}', # serialized buffer header dict
b'array' # raw buffer payload data
...
]
The ``header`` fragment will have the form:
.. code-block:: python
header = {
# these are required
'msgid' : <str> # a unique id for the message
'msgtype' : <str> # a message type, e.g. 'ACK', 'PATCH-DOC', etc
# these are optional
'num_buffers' : <int> # the number of additional buffers, if any
}
The ``metadata`` fragment may contain any arbitrary information. It is not
processed by Bokeh for any purpose, but may be useful for external
monitoring or instrumentation tools.
The ``content`` fragment is defined by the specific message type.
'''
def __init__(self, protocol):
''' Configure a Receiver with a specific Bokeh protocol version.
Args:
protocol (Protocol) :
A Bokeh protocol object to use to assemble colleted message
fragments.
'''
self._protocol = protocol
self._current_consumer = self._HEADER
self._message = None
self._buf_header = None
@gen.coroutine
def consume(self, fragment):
''' Consume individual protocol message fragments.
Args:
fragment (``JSON``) :
A message fragment to assemble. When a complete message is
assembled, the receiver state will reset to begin consuming a
new message.
'''
self._current_consumer(fragment)
raise gen.Return(self._message)
def _HEADER(self, fragment):
self._assume_text(fragment)
self._message = None
self._partial = None
self._fragments = [fragment]
self._current_consumer = self._METADATA
def _METADATA(self, fragment):
self._assume_text(fragment)
self._fragments.append(fragment)
self._current_consumer = self._CONTENT
def _CONTENT(self, fragment):
self._assume_text(fragment)
self._fragments.append(fragment)
header_json, metadata_json, content_json = self._fragments[:3]
self._partial = self._protocol.assemble(header_json, metadata_json, content_json)
self._check_complete()
def _BUFFER_HEADER(self, fragment):
self._assume_text(fragment)
self._buf_header = fragment
self._current_consumer = self._BUFFER_PAYLOAD
def _BUFFER_PAYLOAD(self, fragment):
self._assume_binary(fragment)
self._partial.assemble_buffer(self._buf_header, fragment)
self._check_complete()
def _check_complete(self):
if self._partial.complete:
self._message = self._partial
self._current_consumer = self._HEADER
else:
self._current_consumer = self._BUFFER_HEADER
def _assume_text(self, fragment):
if not isinstance(fragment, six.text_type):
raise ValidationError("expected text fragment but received binary fragment for %s" % (self._current_consumer.__name__))
def _assume_binary(self, fragment):
if not isinstance(fragment, six.binary_type):
raise ValidationError("expected binary fragment but received text fragment for %s" % (self._current_consumer.__name__))
| dennisobrien/bokeh | bokeh/protocol/receiver.py | Python | bsd-3-clause | 4,356 |
# Copyright (C) 2015 – 2021 Noa-Emil Nissinen (4shadoww)
from core.hakkuframework import *
import os
from core import colors
from scapy.all import *
from core import network_scanner
import random
from core import getpath
from core.setvar import setvar
conf = {
"name": "mac_spoof",
"version": "1.0",
"shortdesc": "mac spoof",
"author": "4shadoww",
"github": "4shadoww",
"email": "4shadoww0@gmail.com",
"initdate": "2016-03-09",
"lastmod": "2017-01-03",
"apisupport": True,
"needroot": 1,
"dependencies": ["ethtool"]
}
# Custom commands
customcommands = {
'scan': 'scan network',
'random_mac': 'generate random mac',
'reset': 'end mac spoof'
}
# List of the variables
variables = OrderedDict((
('fake_mac', ['02:a0:04:d3:00:11', 'fake mac']),
('interface', ['eth0', 'network interface']),
))
# Additional help notes
help_notes = colors.red+"this module will not work without root permissions, and ethtool!"+colors.end
# Additional notes to options
option_notes = colors.yellow+" you can generate fake_mac using 'random_mac' command\n use 'reset' command to end mac spoof"+colors.end
# Simple changelog
changelog = "Version 1.0:\nrelease"
def run():
xterm1 = "service network-manager stop"
xterm2 = "ifconfig "+variables['interface'][0]+" hw ether "+variables['fake_mac'][0]
xterm3 = "service network-manager start"
print_info("status: starting mac spoof")
os.system(xterm1)
print_info("status: trying to set fake mac address...")
os.system(xterm2)
os.system(xterm3)
print_success("status: done!")
def scan(args):
network_scanner.scan()
def random_mac(args):
mac = "f4:ac:c1:%02x:%02x:%02x" % (
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
)
setvar('fake_mac', mac, variables)
def reset(args):
command = ['ethtool', '-P', variables['interface'][0]]
output = subprocess.Popen( command, stdout=subprocess.PIPE ).communicate()[0]
realmac = str(output)
realmac = realmac.replace("b'Permanent address: ", "")
realmac = realmac.replace("'", "")
realmac = realmac[:-2]
if not realmac:
print_error("error")
return ModuleError("error")
else:
print_info("realmac: "+realmac)
xterm1a = "service network-manager stop"
xterm2a = "ifconfig "+variables['interface'][0]+" hw ether "+realmac
xterm3a = "service network-manager start"
print_info("setting real mac")
os.system(xterm1a)
print_info("trying to set real mac address...")
os.system(xterm2a)
os.system(xterm3a)
print_success("done!")
| 4shadoww/hakkuframework | modules/mac_spoof.py | Python | mit | 2,683 |
from __future__ import unicode_literals
from copy import copy
import os
import shutil
import sys
import tempfile
from django.apps import apps
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.migrations.loader import MigrationLoader
from django.db.models.signals import post_migrate, pre_migrate
from django.test import override_settings, TransactionTestCase
from django.utils import six
from django_migrate_project.loader import PROJECT_MIGRATIONS_MODULE_NAME
import mock
TEST_MIGRATIONS_DIR = os.path.join(settings.BASE_DIR, 'test_migrations')
PROJECT_MIGRATIONS_DIRECTORY = os.path.join(
TEST_MIGRATIONS_DIR, 'project_migration')
class MigrateProjectTest(TransactionTestCase):
""" Tests for 'migrateproject' """
def setUp(self):
# Roll back migrations to a blank state
call_command('migrate', 'blog', 'zero', verbosity=0)
call_command('migrate', 'cookbook', 'zero', verbosity=0)
call_command('migrate', 'event_calendar', 'zero', verbosity=0)
call_command('migrate', 'newspaper', 'zero', verbosity=0)
self._old_sys_path = copy(sys.path)
def tearDown(self):
# Delete any temp directories
if getattr(self, 'tempdir', None):
shutil.rmtree(self.tempdir)
self.clear_migrations_modules()
sys.path = self._old_sys_path
def clear_migrations_modules(self):
# Destroy modules that were loaded for migrations
sys.modules.pop("blog_0001_project", None)
sys.modules.pop("cookbook_0001_project", None)
sys.modules.pop("cookbook_0002_project", None)
def setup_migration_tree(self, dir):
# Move the files to the correct location
shutil.copytree(
PROJECT_MIGRATIONS_DIRECTORY,
os.path.join(dir, PROJECT_MIGRATIONS_MODULE_NAME)
)
sys.path.insert(0, dir)
return os.path.join(dir, PROJECT_MIGRATIONS_MODULE_NAME)
def test_routine_migration(self):
""" Test applying a routine project migration """
self.tempdir = tempfile.mkdtemp()
with override_settings(BASE_DIR=self.tempdir):
self.setup_migration_tree(settings.BASE_DIR)
connection = connections[DEFAULT_DB_ALIAS]
loader = MigrationLoader(connection)
applied_migrations = copy(loader.applied_migrations)
migrated_apps = [app for app, _ in loader.applied_migrations]
self.assertNotIn('event_calendar', migrated_apps)
self.assertNotIn('newspaper', migrated_apps)
call_command('migrateproject', verbosity=0)
try:
# Check that database changed
loader = MigrationLoader(connection)
self.assertNotEqual(
loader.applied_migrations, applied_migrations)
migrated_apps = [app for app, _ in loader.applied_migrations]
self.assertIn('event_calendar', migrated_apps)
self.assertIn('newspaper', migrated_apps)
finally:
# Roll back migrations to a blank state
# NOTE: This needs to be done before deleting anything or else
# Django won't find the migrations on disk
call_command('migrate', 'event_calendar', 'zero', verbosity=0)
call_command('migrate', 'newspaper', 'zero', verbosity=0)
def test_migrations_dir_error(self):
""" Test running the management command with a bad migrations dir """
self.tempdir = tempfile.mkdtemp()
with override_settings(BASE_DIR=self.tempdir):
# No migrations folder at all
with self.assertRaises(CommandError):
call_command('migrateproject', verbosity=0)
migrations_dir = self.setup_migration_tree(settings.BASE_DIR)
os.remove(os.path.join(migrations_dir, '__init__.py'))
# Missing __init__.py file
with self.assertRaises(CommandError):
call_command('migrateproject', verbosity=0)
def test_unapply(self):
""" Test unapplying an applied project migration """
self.tempdir = tempfile.mkdtemp()
def perform_unapply():
connection = connections[DEFAULT_DB_ALIAS]
loader = MigrationLoader(connection)
applied_migrations = copy(loader.applied_migrations)
# Apply the migrations, then unapply them
call_command('migrateproject', verbosity=0)
# Check that database changed
loader = MigrationLoader(connection)
self.assertNotEqual(
loader.applied_migrations, applied_migrations)
migrated_apps = [app for app, _ in loader.applied_migrations]
self.assertIn('event_calendar', migrated_apps)
self.assertIn('newspaper', migrated_apps)
out = six.StringIO()
# Call command to unapply the changes
call_command('migrateproject', unapply=True, stdout=out,
verbosity=1)
# Check that it says it was unapplied
self.assertIn("unapply all", out.getvalue().lower())
# Check that database is back to original
loader = MigrationLoader(connection)
migrated_apps = [app for app, _ in loader.applied_migrations]
self.assertEqual(loader.applied_migrations, applied_migrations)
self.assertNotIn('event_calendar', migrated_apps)
self.assertNotIn('newspaper', migrated_apps)
with override_settings(BASE_DIR=self.tempdir):
self.setup_migration_tree(settings.BASE_DIR)
field = None
try:
# Do a normal unapply
perform_unapply()
# Then make some new changes via monkey patching
from event_calendar.models import Event
for field in Event._meta.fields:
if field.name == 'description':
field.blank = True
field.null = True
break
out = six.StringIO()
# Create the new migration
call_command('makeprojectmigrations', stdout=out, verbosity=1)
self.assertIn("migrations for", out.getvalue().lower())
# The cached package won't see the new module
sys.modules.pop("migrations", None)
# And apply/unapply those new migrations for better
# statement coverage
perform_unapply()
finally:
if field:
field.blank = False
field.null = False
# Roll back migrations to a blank state
# NOTE: This needs to be done before deleting anything or else
# Django won't find the migrations on disk
call_command('migrate', 'event_calendar', 'zero', verbosity=0)
call_command('migrate', 'newspaper', 'zero', verbosity=0)
def test_nothing_to_apply(self):
""" Test applying already applied project migration """
self.tempdir = tempfile.mkdtemp()
with override_settings(BASE_DIR=self.tempdir):
self.setup_migration_tree(settings.BASE_DIR)
connection = connections[DEFAULT_DB_ALIAS]
loader = MigrationLoader(connection)
applied_migrations = copy(loader.applied_migrations)
migrated_apps = [app for app, _ in loader.applied_migrations]
self.assertNotIn('event_calendar', migrated_apps)
self.assertNotIn('newspaper', migrated_apps)
call_command('migrateproject', verbosity=0)
try:
# Check that database changed
loader = MigrationLoader(connection)
self.assertNotEqual(
loader.applied_migrations, applied_migrations)
out = six.StringIO()
# Call command again to show nothing changes
call_command('migrateproject', stdout=out, verbosity=1)
self.assertIn('no migrations', out.getvalue().lower())
finally:
# Roll back migrations to a blank state
# NOTE: This needs to be done before deleting anything or else
# Django won't find the migrations on disk
call_command('migrate', 'event_calendar', 'zero', verbosity=0)
call_command('migrate', 'newspaper', 'zero', verbosity=0)
def test_signals(self):
""" Test the signals emitted during the migration """
app_config = apps.get_app_config('event_calendar')
pre_migrate_callback = mock.MagicMock()
post_migrate_callback = mock.MagicMock()
pre_migrate.connect(pre_migrate_callback, sender=app_config)
post_migrate.connect(post_migrate_callback, sender=app_config)
self.test_routine_migration()
pre_migrate.disconnect(pre_migrate_callback, sender=app_config)
post_migrate.disconnect(post_migrate_callback, sender=app_config)
self.assertEqual(pre_migrate_callback.call_count, 3)
self.assertEqual(post_migrate_callback.call_count, 3)
def test_changes_detected(self):
""" Test a migration with model changes detected """
self.tempdir = tempfile.mkdtemp()
module = 'django_migrate_project.management.commands.migrateproject'
changes_path = module + '.MigrationAutodetector.changes'
with override_settings(BASE_DIR=self.tempdir):
self.setup_migration_tree(settings.BASE_DIR)
# Migrate first, so that no migrations are available to apply
call_command('migrateproject', verbosity=0)
try:
with mock.patch(changes_path) as changes:
changes.return_value = True
out = six.StringIO()
call_command('migrateproject', stdout=out, verbosity=1)
output = out.getvalue().lower()
self.assertIn("have changes", output)
self.assertIn("'manage.py makeprojectmigrations'", output)
self.assertIn("'manage.py migrateproject'", output)
finally:
# Roll back migrations to a blank state
# NOTE: This needs to be done before deleting anything or else
# Django won't find the migrations on disk
call_command('migrate', 'event_calendar', 'zero', verbosity=0)
call_command('migrate', 'newspaper', 'zero', verbosity=0)
def test_alt_database(self):
""" Test migrating a project with an alternate database selected """
self.tempdir = tempfile.mkdtemp()
with override_settings(BASE_DIR=self.tempdir):
self.setup_migration_tree(settings.BASE_DIR)
# Roll back migrations to a blank state in the 'other' database
call_command('migrate', 'event_calendar', 'zero', database='other',
verbosity=0)
call_command('migrate', 'newspaper', 'zero', database='other',
verbosity=0)
default_connection = connections[DEFAULT_DB_ALIAS]
connection = connections['other']
default_loader = MigrationLoader(default_connection)
loader = MigrationLoader(connection)
default_applied_migrations = copy(
default_loader.applied_migrations)
applied_migrations = copy(loader.applied_migrations)
call_command('migrateproject', database='other', verbosity=0)
default_loader = MigrationLoader(default_connection)
loader = MigrationLoader(connection)
# The default database should remain unchanged
self.assertEqual(default_loader.applied_migrations,
default_applied_migrations)
# The 'other' database should have been migrated
self.assertNotEqual(loader.applied_migrations,
applied_migrations)
| dsanders11/django-migrate-project | tests/test_migrateproject.py | Python | mit | 12,384 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of tf.metrics module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sets
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
def metric_variable(shape, dtype, validate_shape=True, name=None):
"""Create variable in `GraphKeys.(LOCAL|METRIC_VARIABLES)` collections.
If running in a `DistributionStrategy` context, the variable will be
"tower local". This means:
* The returned object will be a container with separate variables
per replica/tower of the model.
* When writing to the variable, e.g. using `assign_add` in a metric
update, the update will be applied to the variable local to the
replica/tower.
* To get a metric's result value, we need to sum the variable values
across the replicas/towers before computing the final answer.
Furthermore, the final answer should be computed once instead of
in every replica/tower. Both of these are accomplished by
running the computation of the final result value inside
`tf.contrib.distribute.get_tower_context().merge_call(fn)`.
Inside the `merge_call()`, ops are only added to the graph once
and access to a tower-local variable in a computation returns
the sum across all replicas/towers.
Args:
shape: Shape of the created variable.
dtype: Type of the created variable.
validate_shape: (Optional) Whether shape validation is enabled for
the created variable.
name: (Optional) String name of the created variable.
Returns:
A (non-trainable) variable initialized to zero, or if inside a
`DistributionStrategy` scope a tower-local variable container.
"""
with distribute_lib.get_tower_context().tower_local_var_scope('sum'):
# Note that "tower local" implies trainable=False.
return variable_scope.variable(
lambda: array_ops.zeros(shape, dtype),
collections=[
ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.METRIC_VARIABLES
],
validate_shape=validate_shape,
name=name)
def _remove_squeezable_dimensions(predictions, labels, weights):
"""Squeeze or expand last dim if needed.
Squeezes last dim of `predictions` or `labels` if their rank differs by 1
(using confusion_matrix.remove_squeezable_dimensions).
Squeezes or expands last dim of `weights` if its rank differs by 1 from the
new rank of `predictions`.
If `weights` is scalar, it is kept scalar.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Optional label `Tensor` whose dimensions match `predictions`.
weights: Optional weight scalar or `Tensor` whose dimensions match
`predictions`.
Returns:
Tuple of `predictions`, `labels` and `weights`. Each of them possibly has
the last dimension squeezed, `weights` could be extended by one dimension.
"""
predictions = ops.convert_to_tensor(predictions)
if labels is not None:
labels, predictions = confusion_matrix.remove_squeezable_dimensions(
labels, predictions)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if weights is None:
return predictions, labels, None
weights = ops.convert_to_tensor(weights)
weights_shape = weights.get_shape()
weights_rank = weights_shape.ndims
if weights_rank == 0:
return predictions, labels, weights
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
if (predictions_rank is not None) and (weights_rank is not None):
# Use static rank.
if weights_rank - predictions_rank == 1:
weights = array_ops.squeeze(weights, [-1])
elif predictions_rank - weights_rank == 1:
weights = array_ops.expand_dims(weights, [-1])
else:
# Use dynamic rank.
weights_rank_tensor = array_ops.rank(weights)
rank_diff = weights_rank_tensor - array_ops.rank(predictions)
def _maybe_expand_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff, -1),
lambda: array_ops.expand_dims(weights, [-1]), lambda: weights)
# Don't attempt squeeze if it will fail based on static check.
if ((weights_rank is not None) and
(not weights_shape.dims[-1].is_compatible_with(1))):
maybe_squeeze_weights = lambda: weights
else:
maybe_squeeze_weights = lambda: array_ops.squeeze(weights, [-1])
def _maybe_adjust_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff, 1), maybe_squeeze_weights,
_maybe_expand_weights)
# If weights are scalar, do nothing. Otherwise, try to add or remove a
# dimension to match predictions.
weights = control_flow_ops.cond(
math_ops.equal(weights_rank_tensor, 0), lambda: weights,
_maybe_adjust_weights)
return predictions, labels, weights
def _maybe_expand_labels(labels, predictions):
"""If necessary, expand `labels` along last dimension to match `predictions`.
Args:
labels: `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN]. The latter implies
num_labels=1, in which case the result is an expanded `labels` with shape
[D1, ... DN, 1].
predictions: `Tensor` with shape [D1, ... DN, num_classes].
Returns:
`labels` with the same rank as `predictions`.
Raises:
ValueError: if `labels` has invalid shape.
"""
with ops.name_scope(None, 'expand_labels', (labels, predictions)) as scope:
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
# If sparse, expand sparse shape.
if isinstance(labels, sparse_tensor.SparseTensor):
return control_flow_ops.cond(
math_ops.equal(
array_ops.rank(predictions),
array_ops.size(labels.dense_shape) + 1),
lambda: sparse_ops.sparse_reshape( # pylint: disable=g-long-lambda
labels,
shape=array_ops.concat((labels.dense_shape, (1,)), 0),
name=scope),
lambda: labels)
# Otherwise, try to use static shape.
labels_rank = labels.get_shape().ndims
if labels_rank is not None:
predictions_rank = predictions.get_shape().ndims
if predictions_rank is not None:
if predictions_rank == labels_rank:
return labels
if predictions_rank == labels_rank + 1:
return array_ops.expand_dims(labels, -1, name=scope)
raise ValueError(
'Unexpected labels shape %s for predictions shape %s.' %
(labels.get_shape(), predictions.get_shape()))
# Otherwise, use dynamic shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(predictions),
array_ops.rank(labels) + 1),
lambda: array_ops.expand_dims(labels, -1, name=scope), lambda: labels)
def _safe_div(numerator, denominator, name):
"""Divides two tensors element-wise, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
t = math_ops.truediv(numerator, denominator)
zero = array_ops.zeros_like(t, dtype=denominator.dtype)
condition = math_ops.greater(denominator, zero)
zero = math_ops.cast(zero, t.dtype)
return array_ops.where(condition, t, zero, name=name)
def _safe_scalar_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is 0.
Args:
numerator: A scalar `float64` `Tensor`.
denominator: A scalar `float64` `Tensor`.
name: Name for the returned op.
Returns:
0 if `denominator` == 0, else `numerator` / `denominator`
"""
numerator.get_shape().with_rank_at_most(1)
denominator.get_shape().with_rank_at_most(1)
return control_flow_ops.cond(
math_ops.equal(
array_ops.constant(0.0, dtype=dtypes.float64), denominator),
lambda: array_ops.constant(0.0, dtype=dtypes.float64),
lambda: math_ops.div(numerator, denominator),
name=name)
def _streaming_confusion_matrix(labels, predictions, num_classes, weights=None):
"""Calculate a streaming confusion matrix.
Calculates a confusion matrix. For estimation over a stream of data,
the function creates an `update_op` operation.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
Returns:
total_cm: A `Tensor` representing the confusion matrix.
update_op: An operation that increments the confusion matrix.
"""
# Local variable to accumulate the predictions in the confusion matrix.
total_cm = metric_variable(
[num_classes, num_classes], dtypes.float64, name='total_confusion_matrix')
# Cast the type to int64 required by confusion_matrix_ops.
predictions = math_ops.to_int64(predictions)
labels = math_ops.to_int64(labels)
num_classes = math_ops.to_int64(num_classes)
# Flatten the input if its rank > 1.
if predictions.get_shape().ndims > 1:
predictions = array_ops.reshape(predictions, [-1])
if labels.get_shape().ndims > 1:
labels = array_ops.reshape(labels, [-1])
if (weights is not None) and (weights.get_shape().ndims > 1):
weights = array_ops.reshape(weights, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = confusion_matrix.confusion_matrix(
labels, predictions, num_classes, weights=weights, dtype=dtypes.float64)
update_op = state_ops.assign_add(total_cm, current_cm)
return total_cm, update_op
@tf_export('metrics.mean')
def mean(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the (weighted) mean of the given values.
The `mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean is not supported when eager execution '
'is enabled.')
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = metric_variable([], dtypes.float32, name='total')
count = metric_variable([], dtypes.float32, name='count')
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
else:
values, _, weights = _remove_squeezable_dimensions(
predictions=values, labels=None, weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
values = math_ops.multiply(values, weights)
num_values = math_ops.reduce_sum(weights)
update_total_op = state_ops.assign_add(total, math_ops.reduce_sum(values))
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count, num_values)
def aggregate_across_towers(_, t, c):
mean_t = _safe_div(t, c, 'value')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_t)
return mean_t
mean_t = distribute_lib.get_tower_context().merge_call(
aggregate_across_towers, total, count)
update_op = _safe_div(update_total_op, update_count_op, 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_t, update_op
@tf_export('metrics.accuracy')
def accuracy(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates how often `predictions` matches `labels`.
The `accuracy` function creates two local variables, `total` and
`count` that are used to compute the frequency with which `predictions`
matches `labels`. This frequency is ultimately returned as `accuracy`: an
idempotent operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
predictions: The predicted values, a `Tensor` of any shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
accuracy: A `Tensor` representing the accuracy, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.accuracy is not supported when eager '
'execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if labels.dtype != predictions.dtype:
predictions = math_ops.cast(predictions, labels.dtype)
is_correct = math_ops.to_float(math_ops.equal(predictions, labels))
return mean(is_correct, weights, metrics_collections, updates_collections,
name or 'accuracy')
def _confusion_matrix_at_thresholds(labels,
predictions,
thresholds,
weights=None,
includes=None):
"""Computes true_positives, false_negatives, true_negatives, false_positives.
This function creates up to four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives`.
`true_positive[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `True`.
`false_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
`true_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
`false_positives[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `False`.
For estimation of these metrics over a stream of data, for each metric the
function respectively creates an `update_op` operation that updates the
variable and returns its value.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
default to all four.
Returns:
values: Dict of variables of shape `[len(thresholds)]`. Keys are from
`includes`.
update_ops: Dict of operations that increments the `values`. Keys are from
`includes`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
all_includes = ('tp', 'fn', 'tn', 'fp')
if includes is None:
includes = all_includes
else:
for include in includes:
if include not in all_includes:
raise ValueError('Invalid key: %s.' % include)
with ops.control_dependencies([
check_ops.assert_greater_equal(
predictions,
math_ops.cast(0.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]'),
check_ops.assert_less_equal(
predictions,
math_ops.cast(1.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]')
]):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.to_float(predictions),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
num_thresholds = len(thresholds)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.stack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
if ('fn' in includes) or ('tn' in includes):
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
if ('fp' in includes) or ('tn' in includes):
label_is_neg = math_ops.logical_not(label_is_pos)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), predictions)
weights_tiled = array_ops.tile(
array_ops.reshape(weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
weights_tiled.get_shape())
else:
weights_tiled = None
values = {}
update_ops = {}
if 'tp' in includes:
true_p = metric_variable(
[num_thresholds], dtypes.float32, name='true_positives')
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(true_p,
math_ops.reduce_sum(
is_true_positive, 1))
values['tp'] = true_p
if 'fn' in includes:
false_n = metric_variable(
[num_thresholds], dtypes.float32, name='false_negatives')
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(false_n,
math_ops.reduce_sum(
is_false_negative, 1))
values['fn'] = false_n
if 'tn' in includes:
true_n = metric_variable(
[num_thresholds], dtypes.float32, name='true_negatives')
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(true_n,
math_ops.reduce_sum(
is_true_negative, 1))
values['tn'] = true_n
if 'fp' in includes:
false_p = metric_variable(
[num_thresholds], dtypes.float32, name='false_positives')
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(false_p,
math_ops.reduce_sum(
is_false_positive, 1))
values['fp'] = false_p
return values, update_ops
def _aggregate_variable(v, collections):
def f(distribution, value):
value = distribution.read_var(value)
if collections:
ops.add_to_collections(collections, value)
return value
return distribute_lib.get_tower_context().merge_call(f, v)
@tf_export('metrics.auc')
def auc(labels,
predictions,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None,
summation_method='trapezoidal'):
"""Computes the approximate AUC via a Riemann sum.
The `auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case. Setting `summation_method`
to 'minoring' or 'majoring' can help quantify the error in the approximation
by providing lower or upper bound estimate of the AUC.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `auc`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
summation_method: Specifies the Riemann summation method used
(https://en.wikipedia.org/wiki/Riemann_sum): 'trapezoidal' [default] that
applies the trapezoidal rule; 'careful_interpolation', a variant of it
differing only by a more correct interpolation scheme for PR-AUC -
interpolating (true/false) positives but not the ratio that is precision;
'minoring' that applies left summation for increasing intervals and right
summation for decreasing intervals; 'majoring' that does the opposite.
Note that 'careful_interpolation' is strictly preferred to 'trapezoidal'
(to be deprecated soon) as it applies the same method for ROC, and a
better one (see Davis & Goadrich 2006 for details) for the PR curve.
Returns:
auc: A scalar `Tensor` representing the current area-under-curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `auc`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.auc is not supported when eager execution '
'is enabled.')
with variable_scope.variable_scope(name, 'auc',
(labels, predictions, weights)):
if curve != 'ROC' and curve != 'PR':
raise ValueError('curve must be either ROC or PR, %s unknown' % (curve))
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
def interpolate_pr_auc(tp, fp, fn):
"""Interpolation formula inspired by section 4 of Davis & Goadrich 2006.
Note here we derive & use a closed formula not present in the paper
- as follows:
Modeling all of TP (true positive weight),
FP (false positive weight) and their sum P = TP + FP (positive weight)
as varying linearly within each interval [A, B] between successive
thresholds, we get
Precision = (TP_A + slope * (P - P_A)) / P
with slope = dTP / dP = (TP_B - TP_A) / (P_B - P_A).
The area within the interval is thus (slope / total_pos_weight) times
int_A^B{Precision.dP} = int_A^B{(TP_A + slope * (P - P_A)) * dP / P}
int_A^B{Precision.dP} = int_A^B{slope * dP + intercept * dP / P}
where intercept = TP_A - slope * P_A = TP_B - slope * P_B, resulting in
int_A^B{Precision.dP} = TP_B - TP_A + intercept * log(P_B / P_A)
Bringing back the factor (slope / total_pos_weight) we'd put aside, we get
slope * [dTP + intercept * log(P_B / P_A)] / total_pos_weight
where dTP == TP_B - TP_A.
Note that when P_A == 0 the above calculation simplifies into
int_A^B{Precision.dTP} = int_A^B{slope * dTP} = slope * (TP_B - TP_A)
which is really equivalent to imputing constant precision throughout the
first bucket having >0 true positives.
Args:
tp: true positive counts
fp: false positive counts
fn: false negative counts
Returns:
pr_auc: an approximation of the area under the P-R curve.
"""
dtp = tp[:num_thresholds - 1] - tp[1:]
p = tp + fp
prec_slope = _safe_div(dtp, p[:num_thresholds - 1] - p[1:], 'prec_slope')
intercept = tp[1:] - math_ops.multiply(prec_slope, p[1:])
safe_p_ratio = array_ops.where(
math_ops.logical_and(p[:num_thresholds - 1] > 0, p[1:] > 0),
_safe_div(p[:num_thresholds - 1], p[1:], 'recall_relative_ratio'),
array_ops.ones_like(p[1:]))
return math_ops.reduce_sum(
_safe_div(
prec_slope * (dtp + intercept * math_ops.log(safe_p_ratio)),
tp[1:] + fn[1:],
name='pr_auc_increment'),
name='interpolate_pr_auc')
def compute_auc(tp, fn, tn, fp, name):
"""Computes the roc-auc or pr-auc based on confusion counts."""
if curve == 'PR':
if summation_method == 'trapezoidal':
logging.warning(
'Trapezoidal rule is known to produce incorrect PR-AUCs; '
'please switch to "careful_interpolation" instead.')
elif summation_method == 'careful_interpolation':
# This one is a bit tricky and is handled separately.
return interpolate_pr_auc(tp, fp, fn)
rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
x = fp_rate
y = rec
else: # curve == 'PR'.
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
x = rec
y = prec
if summation_method in ('trapezoidal', 'careful_interpolation'):
# Note that the case ('PR', 'careful_interpolation') has been handled
# above.
return math_ops.reduce_sum(
math_ops.multiply(x[:num_thresholds - 1] - x[1:],
(y[:num_thresholds - 1] + y[1:]) / 2.),
name=name)
elif summation_method == 'minoring':
return math_ops.reduce_sum(
math_ops.multiply(x[:num_thresholds - 1] - x[1:],
math_ops.minimum(y[:num_thresholds - 1], y[1:])),
name=name)
elif summation_method == 'majoring':
return math_ops.reduce_sum(
math_ops.multiply(x[:num_thresholds - 1] - x[1:],
math_ops.maximum(y[:num_thresholds - 1], y[1:])),
name=name)
else:
raise ValueError('Invalid summation_method: %s' % summation_method)
# sum up the areas of all the trapeziums
def aggregate_auc(_, values):
auc_value = compute_auc(values['tp'], values['fn'], values['tn'],
values['fp'], 'value')
if metrics_collections:
ops.add_to_collections(metrics_collections, auc_value)
return auc_value
auc_value = distribute_lib.get_tower_context().merge_call(
aggregate_auc, values)
update_op = compute_auc(update_ops['tp'], update_ops['fn'],
update_ops['tn'], update_ops['fp'], 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return auc_value, update_op
@tf_export('metrics.mean_absolute_error')
def mean_absolute_error(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean absolute error between the labels and predictions.
The `mean_absolute_error` function creates two local variables,
`total` and `count` that are used to compute the mean absolute error. This
average is weighted by `weights`, and it is ultimately returned as
`mean_absolute_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_absolute_error`. Internally, an `absolute_errors` operation computes the
absolute value of the differences between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `absolute_errors`, and it increments `count` with the reduced
sum of `weights`
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_absolute_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_absolute_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_absolute_error is not supported '
'when eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
absolute_errors = math_ops.abs(predictions - labels)
return mean(absolute_errors, weights, metrics_collections,
updates_collections, name or 'mean_absolute_error')
@tf_export('metrics.mean_cosine_distance')
def mean_cosine_distance(labels,
predictions,
dim,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the cosine distance between the labels and predictions.
The `mean_cosine_distance` function creates two local variables,
`total` and `count` that are used to compute the average cosine distance
between `predictions` and `labels`. This average is weighted by `weights`,
and it is ultimately returned as `mean_distance`, which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_distance`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of arbitrary shape.
predictions: A `Tensor` of the same shape as `labels`.
dim: The dimension along which the cosine distance is computed.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension). Also,
dimension `dim` must be `1`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
mean_distance: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_cosine_distance is not supported when '
'eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum(
radial_diffs, reduction_indices=[
dim,
], keepdims=True)
mean_distance, update_op = mean(radial_diffs, weights, None, None, name or
'mean_cosine_distance')
mean_distance = math_ops.subtract(1.0, mean_distance)
update_op = math_ops.subtract(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
@tf_export('metrics.mean_per_class_accuracy')
def mean_per_class_accuracy(labels,
predictions,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates the mean of the per-class accuracies.
Calculates the accuracy for each class, then takes the mean of that.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates the accuracy of each class and returns
them.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since two variables with shape =
[num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_per_class_accuracy'
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_accuracy: A `Tensor` representing the mean per class accuracy.
update_op: An operation that updates the accuracy tensor.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_per_class_accuracy is not supported '
'when eager execution is enabled.')
with variable_scope.variable_scope(name, 'mean_accuracy',
(predictions, labels, weights)):
labels = math_ops.to_int64(labels)
# Flatten the input if its rank > 1.
if labels.get_shape().ndims > 1:
labels = array_ops.reshape(labels, [-1])
if predictions.get_shape().ndims > 1:
predictions = array_ops.reshape(predictions, [-1])
# Check if shape is compatible.
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
total = metric_variable([num_classes], dtypes.float32, name='total')
count = metric_variable([num_classes], dtypes.float32, name='count')
ones = array_ops.ones([array_ops.size(labels)], dtypes.float32)
if labels.dtype != predictions.dtype:
predictions = math_ops.cast(predictions, labels.dtype)
is_correct = math_ops.to_float(math_ops.equal(predictions, labels))
if weights is not None:
if weights.get_shape().ndims > 1:
weights = array_ops.reshape(weights, [-1])
weights = math_ops.to_float(weights)
is_correct *= weights
ones *= weights
update_total_op = state_ops.scatter_add(total, labels, ones)
update_count_op = state_ops.scatter_add(count, labels, is_correct)
def aggregate_mean_accuracy(_, count, total):
per_class_accuracy = _safe_div(count, total, None)
mean_accuracy_v = math_ops.reduce_mean(
per_class_accuracy, name='mean_accuracy')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_accuracy_v)
return mean_accuracy_v
mean_accuracy_v = distribute_lib.get_tower_context().merge_call(
aggregate_mean_accuracy, count, total)
update_op = _safe_div(update_count_op, update_total_op, name='update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_accuracy_v, update_op
@tf_export('metrics.mean_iou')
def mean_iou(labels,
predictions,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculate per-step mean Intersection-Over-Union (mIOU).
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by `weights`,
and mIOU is then calculated from it.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean_iou`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `mean_iou`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_iou: A `Tensor` representing the mean intersection-over-union.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_iou is not supported when '
'eager execution is enabled.')
with variable_scope.variable_scope(name, 'mean_iou',
(predictions, labels, weights)):
# Check if shape is compatible.
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
total_cm, update_op = _streaming_confusion_matrix(labels, predictions,
num_classes, weights)
def compute_mean_iou(total_cm, name):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0))
sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1))
cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))
denominator = sum_over_row + sum_over_col - cm_diag
# The mean is only computed over classes that appear in the
# label or prediction tensor. If the denominator is 0, we need to
# ignore the class.
num_valid_entries = math_ops.reduce_sum(
math_ops.cast(
math_ops.not_equal(denominator, 0), dtype=dtypes.float32))
# If the value of the denominator is 0, set it to 1 to avoid
# zero division.
denominator = array_ops.where(
math_ops.greater(denominator, 0), denominator,
array_ops.ones_like(denominator))
iou = math_ops.div(cm_diag, denominator)
# If the number of valid entries is 0 (no classes) we return 0.
result = array_ops.where(
math_ops.greater(num_valid_entries, 0),
math_ops.reduce_sum(iou, name=name) / num_valid_entries, 0)
return result
def mean_iou_across_towers(_, v):
mean_iou_v = compute_mean_iou(v, 'mean_iou')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_iou_v)
return mean_iou_v
mean_iou_v = distribute_lib.get_tower_context().merge_call(
mean_iou_across_towers, total_cm)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_iou_v, update_op
@tf_export('metrics.mean_relative_error')
def mean_relative_error(labels,
predictions,
normalizer,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_relative_error is not supported when '
'eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
predictions, normalizer = confusion_matrix.remove_squeezable_dimensions(
predictions, normalizer)
predictions.get_shape().assert_is_compatible_with(normalizer.get_shape())
relative_errors = array_ops.where(
math_ops.equal(normalizer, 0.0), array_ops.zeros_like(labels),
math_ops.div(math_ops.abs(labels - predictions), normalizer))
return mean(relative_errors, weights, metrics_collections,
updates_collections, name or 'mean_relative_error')
@tf_export('metrics.mean_squared_error')
def mean_squared_error(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean squared error between the labels and predictions.
The `mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_squared_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_squared_error`. Internally, a `squared_error` operation computes the
element-wise square of the difference between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_squared_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_squared_error is not supported when '
'eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
squared_error = math_ops.square(labels - predictions)
return mean(squared_error, weights, metrics_collections, updates_collections,
name or 'mean_squared_error')
@tf_export('metrics.mean_tensor')
def mean_tensor(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the element-wise (weighted) mean of the given tensors.
In contrast to the `mean` function which returns a scalar with the
mean, this function returns an average tensor with the same shape as the
input tensors.
The `mean_tensor` function creates two local variables,
`total_tensor` and `count_tensor` that are used to compute the average of
`values`. This average is ultimately returned as `mean` which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A float `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_tensor is not supported when '
'eager execution is enabled.')
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = metric_variable(
values.get_shape(), dtypes.float32, name='total_tensor')
count = metric_variable(
values.get_shape(), dtypes.float32, name='count_tensor')
num_values = array_ops.ones_like(values)
if weights is not None:
values, _, weights = _remove_squeezable_dimensions(
predictions=values, labels=None, weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
values = math_ops.multiply(values, weights)
num_values = math_ops.multiply(num_values, weights)
update_total_op = state_ops.assign_add(total, values)
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count, num_values)
def aggregate_across_towers(_, t, c):
mean_t = _safe_div(t, c, 'value')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_t)
return mean_t
mean_t = distribute_lib.get_tower_context().merge_call(
aggregate_across_towers, total, count)
update_op = _safe_div(update_total_op, update_count_op, 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_t, update_op
@tf_export('metrics.percentage_below')
def percentage_below(values,
threshold,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `percentage_below` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.percentage_below is not supported when '
'eager execution is enabled.')
is_below_threshold = math_ops.to_float(math_ops.less(values, threshold))
return mean(is_below_threshold, weights, metrics_collections,
updates_collections, name or 'percentage_below_threshold')
def _count_condition(values,
weights=None,
metrics_collections=None,
updates_collections=None):
"""Sums the weights of cases where the given values are True.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `bool` `Tensor` of arbitrary size.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
check_ops.assert_type(values, dtypes.bool)
count = metric_variable([], dtypes.float32, name='count')
values = math_ops.to_float(values)
if weights is not None:
with ops.control_dependencies((check_ops.assert_rank_in(
weights, (0, array_ops.rank(values))),)):
weights = math_ops.to_float(weights)
values = math_ops.multiply(values, weights)
value_tensor = _aggregate_variable(count, metrics_collections)
update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value_tensor, update_op
@tf_export('metrics.false_negatives')
def false_negatives(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.false_negatives is not supported when '
'eager execution is enabled.')
with variable_scope.variable_scope(name, 'false_negatives',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_false_negative = math_ops.logical_and(
math_ops.equal(labels, True), math_ops.equal(predictions, False))
return _count_condition(is_false_negative, weights, metrics_collections,
updates_collections)
@tf_export('metrics.false_negatives_at_thresholds')
def false_negatives_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes false negatives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `false_negatives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negatives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `false_negatives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.false_negatives_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'false_negatives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('fn',))
fn_value = _aggregate_variable(values['fn'], metrics_collections)
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['fn'])
return fn_value, update_ops['fn']
@tf_export('metrics.false_positives')
def false_positives(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.false_positives is not supported when '
'eager execution is enabled.')
with variable_scope.variable_scope(name, 'false_positives',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_false_positive = math_ops.logical_and(
math_ops.equal(labels, False), math_ops.equal(predictions, True))
return _count_condition(is_false_positive, weights, metrics_collections,
updates_collections)
@tf_export('metrics.false_positives_at_thresholds')
def false_positives_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes false positives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `false_positives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `false_positives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.false_positives_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'false_positives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('fp',))
fp_value = _aggregate_variable(values['fp'], metrics_collections)
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['fp'])
return fp_value, update_ops['fp']
@tf_export('metrics.true_negatives')
def true_negatives(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.true_negatives is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'true_negatives',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_true_negative = math_ops.logical_and(
math_ops.equal(labels, False), math_ops.equal(predictions, False))
return _count_condition(is_true_negative, weights, metrics_collections,
updates_collections)
@tf_export('metrics.true_negatives_at_thresholds')
def true_negatives_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes true negatives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `true_negatives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
true_negatives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `true_negatives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.true_negatives_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'true_negatives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('tn',))
tn_value = _aggregate_variable(values['tn'], metrics_collections)
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['tn'])
return tn_value, update_ops['tn']
@tf_export('metrics.true_positives')
def true_positives(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.true_positives is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'true_positives',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_true_positive = math_ops.logical_and(
math_ops.equal(labels, True), math_ops.equal(predictions, True))
return _count_condition(is_true_positive, weights, metrics_collections,
updates_collections)
@tf_export('metrics.true_positives_at_thresholds')
def true_positives_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes true positives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `true_positives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
true_positives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `true_positives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.true_positives_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'true_positives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('tp',))
tp_value = _aggregate_variable(values['tp'], metrics_collections)
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['tp'])
return tp_value, update_ops['tp']
@tf_export('metrics.precision')
def precision(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
The `precision` function creates two local variables,
`true_positives` and `false_positives`, that are used to compute the
precision. This value is ultimately returned as `precision`, an idempotent
operation that simply divides `true_positives` by the sum of `true_positives`
and `false_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`. `update_op` weights each prediction by the corresponding value in
`weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
`precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'precision',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
true_p, true_positives_update_op = true_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
false_p, false_positives_update_op = false_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_precision(tp, fp, name):
return array_ops.where(
math_ops.greater(tp + fp, 0), math_ops.div(tp, tp + fp), 0, name)
def once_across_towers(_, true_p, false_p):
p = compute_precision(true_p, false_p, 'value')
if metrics_collections:
ops.add_to_collections(metrics_collections, p)
return p
p = distribute_lib.get_tower_context().merge_call(
once_across_towers, true_p, false_p)
update_op = compute_precision(true_positives_update_op,
false_positives_update_op, 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return p, update_op
@tf_export('metrics.precision_at_thresholds')
def precision_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision values for different `thresholds` on `predictions`.
The `precision_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `precision[i]` is defined as the total
weight of values in `predictions` above `thresholds[i]` whose corresponding
entry in `labels` is `True`, divided by the total weight of values in
`predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
false_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'precision_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights, includes=('tp', 'fp'))
# Avoid division by zero.
epsilon = 1e-7
def compute_precision(tp, fp, name):
return math_ops.div(tp, epsilon + tp + fp, name='precision_' + name)
def precision_across_towers(_, values):
prec = compute_precision(values['tp'], values['fp'], 'value')
if metrics_collections:
ops.add_to_collections(metrics_collections, prec)
return prec
prec = distribute_lib.get_tower_context().merge_call(
precision_across_towers, values)
update_op = compute_precision(update_ops['tp'], update_ops['fp'],
'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return prec, update_op
@tf_export('metrics.recall')
def recall(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall of the predictions with respect to the labels.
The `recall` function creates two local variables, `true_positives`
and `false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `recall`. `update_op`
weights each prediction by the corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
`recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.recall is not supported is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'recall',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
true_p, true_positives_update_op = true_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
false_n, false_negatives_update_op = false_negatives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_recall(true_p, false_n, name):
return array_ops.where(
math_ops.greater(true_p + false_n, 0),
math_ops.div(true_p, true_p + false_n), 0, name)
def once_across_towers(_, true_p, false_n):
rec = compute_recall(true_p, false_n, 'value')
if metrics_collections:
ops.add_to_collections(metrics_collections, rec)
return rec
rec = distribute_lib.get_tower_context().merge_call(
once_across_towers, true_p, false_n)
update_op = compute_recall(true_positives_update_op,
false_negatives_update_op, 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rec, update_op
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
def _select_class_id(ids, selected_id):
"""Filter all but `selected_id` out of `ids`.
Args:
ids: `int64` `Tensor` or `SparseTensor` of IDs.
selected_id: Int id to select.
Returns:
`SparseTensor` of same dimensions as `ids`. This contains only the entries
equal to `selected_id`.
"""
ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids)
if isinstance(ids, sparse_tensor.SparseTensor):
return sparse_ops.sparse_retain(ids, math_ops.equal(ids.values,
selected_id))
# TODO(ptucker): Make this more efficient, maybe add a sparse version of
# tf.equal and tf.reduce_any?
# Shape of filled IDs is the same as `ids` with the last dim collapsed to 1.
ids_shape = array_ops.shape(ids, out_type=dtypes.int64)
ids_last_dim = array_ops.size(ids_shape) - 1
filled_selected_id_shape = math_ops.reduced_shape(ids_shape,
array_ops.reshape(
ids_last_dim, [1]))
# Intersect `ids` with the selected ID.
filled_selected_id = array_ops.fill(filled_selected_id_shape,
math_ops.to_int64(selected_id))
result = sets.set_intersection(filled_selected_id, ids)
return sparse_tensor.SparseTensor(
indices=result.indices, values=result.values, dense_shape=ids_shape)
def _maybe_select_class_id(labels, predictions_idx, selected_id=None):
"""If class ID is specified, filter all other classes.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k]
where N >= 1. Commonly, N=1 and `predictions_idx` has shape
[batch size, k].
selected_id: Int id to select.
Returns:
Tuple of `labels` and `predictions_idx`, possibly with classes removed.
"""
if selected_id is None:
return labels, predictions_idx
return (_select_class_id(labels, selected_id),
_select_class_id(predictions_idx, selected_id))
def _sparse_true_positive_at_k(labels,
predictions_idx,
class_id=None,
weights=None,
name=None):
"""Calculates true positives for recall@k and precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of operation.
Returns:
A [D1, ... DN] `Tensor` of true positive counts.
"""
with ops.name_scope(name, 'true_positives',
(predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx,
class_id)
tp = sets.set_size(sets.set_intersection(predictions_idx, labels))
tp = math_ops.to_double(tp)
if weights is not None:
with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(
weights, tp),)):
weights = math_ops.to_double(weights)
tp = math_ops.multiply(tp, weights)
return tp
def _streaming_sparse_true_positive_at_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step true positives for recall@k and precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(name, _at_k_name('true_positive', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
tp = _sparse_true_positive_at_k(
predictions_idx=predictions_idx,
labels=labels,
class_id=class_id,
weights=weights)
batch_total_tp = math_ops.to_double(math_ops.reduce_sum(tp))
var = metric_variable([], dtypes.float64, name=scope)
return var, state_ops.assign_add(var, batch_total_tp, name='update')
def _sparse_false_negative_at_k(labels,
predictions_idx,
class_id=None,
weights=None):
"""Calculates false negatives for recall@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
Returns:
A [D1, ... DN] `Tensor` of false negative counts.
"""
with ops.name_scope(None, 'false_negatives',
(predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx,
class_id)
fn = sets.set_size(
sets.set_difference(predictions_idx, labels, aminusb=False))
fn = math_ops.to_double(fn)
if weights is not None:
with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(
weights, fn),)):
weights = math_ops.to_double(weights)
fn = math_ops.multiply(fn, weights)
return fn
def _streaming_sparse_false_negative_at_k(labels,
predictions_idx,
k,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false negatives for recall@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(name, _at_k_name('false_negative', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
fn = _sparse_false_negative_at_k(
predictions_idx=predictions_idx,
labels=labels,
class_id=class_id,
weights=weights)
batch_total_fn = math_ops.to_double(math_ops.reduce_sum(fn))
var = metric_variable([], dtypes.float64, name=scope)
return var, state_ops.assign_add(var, batch_total_fn, name='update')
@tf_export('metrics.recall_at_k')
def recall_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing
the fraction of them for which `class_id` is in the top-k `predictions`.
If `class_id` is not specified, we'll calculate recall as how often on
average a class among the labels of a batch entry is in the top-k
`predictions`.
`sparse_recall_at_k` creates two local variables,
`true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
the recall_at_k frequency. This frequency is ultimately returned as
`recall_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_negative_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.recall_at_k is not '
'supported when eager execution is enabled.')
with ops.name_scope(name, _at_k_name('recall', k, class_id=class_id),
(predictions, labels, weights)) as scope:
_, top_k_idx = nn.top_k(predictions, k)
return recall_at_top_k(
labels=labels,
predictions_idx=top_k_idx,
k=k,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
@tf_export('metrics.recall_at_top_k')
def recall_at_top_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of top-k predictions with respect to sparse labels.
Differs from `recall_at_k` in that predictions must be in the form of top `k`
class indices, whereas `recall_at_k` expects logits. Refer to `recall_at_k`
for more details.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and predictions has shape [batch size, k]. The final
dimension contains the top `k` predicted class indices. [D1, ... DN] must
match `labels`.
k: Integer, k for @k metric. Only used for the default op name.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
with ops.name_scope(name, _at_k_name('recall', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions_idx)
top_k_idx = math_ops.to_int64(predictions_idx)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
weights=weights)
fn, fn_update = _streaming_sparse_false_negative_at_k(
predictions_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
weights=weights)
def aggregate_across_towers(_, tp, fn):
metric = math_ops.div(tp, math_ops.add(tp, fn), name=scope)
if metrics_collections:
ops.add_to_collections(metrics_collections, metric)
return metric
metric = distribute_lib.get_tower_context().merge_call(
aggregate_across_towers, tp, fn)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fn_update), name='update')
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
@tf_export('metrics.recall_at_thresholds')
def recall_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `recall[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `True`, divided by the total weight of `True` values in `labels`
(`true_positives[i] / (true_positives[i] + false_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `recall`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.recall_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'recall_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights, includes=('tp', 'fn'))
# Avoid division by zero.
epsilon = 1e-7
def compute_recall(tp, fn, name):
return math_ops.div(tp, epsilon + tp + fn, name='recall_' + name)
def recall_across_towers(_, values):
rec = compute_recall(values['tp'], values['fn'], 'value')
if metrics_collections:
ops.add_to_collections(metrics_collections, rec)
return rec
rec = distribute_lib.get_tower_context().merge_call(
recall_across_towers, values)
update_op = compute_recall(update_ops['tp'], update_ops['fn'], 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rec, update_op
@tf_export('metrics.root_mean_squared_error')
def root_mean_squared_error(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.root_mean_squared_error is not '
'supported when eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
mse, update_mse_op = mean_squared_error(labels, predictions, weights, None,
None, name or
'root_mean_squared_error')
def once_across_towers(_, mse):
rmse = math_ops.sqrt(mse)
if metrics_collections:
ops.add_to_collections(metrics_collections, rmse)
return rmse
rmse = distribute_lib.get_tower_context().merge_call(
once_across_towers, mse)
update_rmse_op = math_ops.sqrt(update_mse_op)
if updates_collections:
ops.add_to_collections(updates_collections, update_rmse_op)
return rmse, update_rmse_op
@tf_export('metrics.sensitivity_at_specificity')
def sensitivity_at_specificity(labels,
predictions,
specificity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the specificity at a given sensitivity.
The `sensitivity_at_specificity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the sensitivity at the given
specificity value. The threshold for the given specificity value is computed
and used to evaluate the corresponding sensitivity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
specificity: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
sensitivity: A scalar `Tensor` representing the sensitivity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `sensitivity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`specificity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.sensitivity_at_specificity is not '
'supported when eager execution is enabled.')
if specificity < 0 or specificity > 1:
raise ValueError('`specificity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'sensitivity_at_specificity',
(predictions, labels, weights)):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
def compute_sensitivity_at_specificity(tp, tn, fp, fn, name):
specificities = math_ops.div(tn, tn + fp + kepsilon)
tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the sensitivity:
return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + kepsilon,
name)
def aggregate_across_towers(_, values):
sensitivity = compute_sensitivity_at_specificity(
values['tp'], values['tn'], values['fp'], values['fn'], 'value')
if metrics_collections:
ops.add_to_collections(metrics_collections, sensitivity)
return sensitivity
sensitivity = distribute_lib.get_tower_context().merge_call(
aggregate_across_towers, values)
update_op = compute_sensitivity_at_specificity(
update_ops['tp'], update_ops['tn'], update_ops['fp'], update_ops['fn'],
'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return sensitivity, update_op
def _expand_and_tile(tensor, multiple, dim=0, name=None):
"""Slice `tensor` shape in 2, then tile along the sliced dimension.
A new dimension is inserted in shape of `tensor` before `dim`, then values are
tiled `multiple` times along the new dimension.
Args:
tensor: Input `Tensor` or `SparseTensor`.
multiple: Integer, number of times to tile.
dim: Integer, dimension along which to tile.
name: Name of operation.
Returns:
`Tensor` result of expanding and tiling `tensor`.
Raises:
ValueError: if `multiple` is less than 1, or `dim` is not in
`[-rank(tensor), rank(tensor)]`.
"""
if multiple < 1:
raise ValueError('Invalid multiple %s, must be > 0.' % multiple)
with ops.name_scope(name, 'expand_and_tile',
(tensor, multiple, dim)) as scope:
# Sparse.
tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)
if isinstance(tensor, sparse_tensor.SparseTensor):
if dim < 0:
expand_dims = array_ops.reshape(
array_ops.size(tensor.dense_shape) + dim, [1])
else:
expand_dims = [dim]
expanded_shape = array_ops.concat(
(array_ops.slice(tensor.dense_shape, [0], expand_dims), [1],
array_ops.slice(tensor.dense_shape, expand_dims, [-1])),
0,
name='expanded_shape')
expanded = sparse_ops.sparse_reshape(
tensor, shape=expanded_shape, name='expand')
if multiple == 1:
return expanded
return sparse_ops.sparse_concat(
dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope)
# Dense.
expanded = array_ops.expand_dims(
tensor, dim if (dim >= 0) else (dim - 1), name='expand')
if multiple == 1:
return expanded
ones = array_ops.ones_like(array_ops.shape(tensor))
tile_multiples = array_ops.concat(
(ones[:dim], (multiple,), ones[dim:]), 0, name='multiples')
return array_ops.tile(expanded, tile_multiples, name=scope)
def _num_relevant(labels, k):
"""Computes number of relevant values for each row in labels.
For labels with shape [D1, ... DN, num_labels], this is the minimum of
`num_labels` and `k`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels].
k: Integer, k for @k metric.
Returns:
Integer `Tensor` of shape [D1, ... DN], where each value is the number of
relevant values for that row.
Raises:
ValueError: if inputs have invalid dtypes or values.
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
# For SparseTensor, calculate separate count for each row.
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
if isinstance(labels, sparse_tensor.SparseTensor):
return math_ops.minimum(sets.set_size(labels), k, name=scope)
# For dense Tensor, calculate scalar count based on last dimension, and
# tile across labels shape.
labels_shape = array_ops.shape(labels)
labels_size = labels_shape[-1]
num_relevant_scalar = math_ops.minimum(labels_size, k)
return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
def _sparse_average_precision_at_top_k(labels, predictions_idx):
"""Computes average precision@k of predictions with respect to sparse labels.
From en.wikipedia.org/wiki/Information_retrieval#Average_precision, formula
for each row is:
AveP = sum_{i=1...k} P_{i} * rel_{i} / num_relevant_items
A "row" is the elements in dimension [D1, ... DN] of `predictions_idx`,
`labels`, and the result `Tensors`. In the common case, this is [batch_size].
Each row of the results contains the average precision for that row.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`.
Values should be in range [0, num_classes).
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension must be set and contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`. Values should be in range
[0, num_classes).
Returns:
`float64` `Tensor` of shape [D1, ... DN], where each value is the average
precision for that row.
Raises:
ValueError: if the last dimension of predictions_idx is not set.
"""
with ops.name_scope(None, 'average_precision',
(predictions_idx, labels)) as scope:
predictions_idx = math_ops.to_int64(predictions_idx, name='predictions_idx')
if predictions_idx.get_shape().ndims == 0:
raise ValueError('The rank of predictions_idx must be at least 1.')
k = predictions_idx.get_shape().as_list()[-1]
if k is None:
raise ValueError('The last dimension of predictions_idx must be set.')
labels = _maybe_expand_labels(labels, predictions_idx)
# Expand dims to produce [D1, ... DN, k, 1] tensor. This gives us a separate
# prediction for each k, so we can calculate separate true positive values
# for each k.
predictions_idx_per_k = array_ops.expand_dims(
predictions_idx, -1, name='predictions_idx_per_k')
# Replicate labels k times to produce [D1, ... DN, k, num_labels] tensor.
labels_per_k = _expand_and_tile(
labels, multiple=k, dim=-1, name='labels_per_k')
# The following tensors are all of shape [D1, ... DN, k], containing values
# per row, per k value.
# `relevant_per_k` (int32) - Relevance indicator, 1 if the prediction at
# that k value is correct, 0 otherwise. This is the "rel_{i}" term from
# the formula above.
# `tp_per_k` (int32) - True positive counts.
# `retrieved_per_k` (int32) - Number of predicted values at each k. This is
# the precision denominator.
# `precision_per_k` (float64) - Precision at each k. This is the "P_{i}"
# term from the formula above.
# `relevant_precision_per_k` (float64) - Relevant precisions; i.e.,
# precisions at all k for which relevance indicator is true.
relevant_per_k = _sparse_true_positive_at_k(
labels_per_k, predictions_idx_per_k, name='relevant_per_k')
tp_per_k = math_ops.cumsum(relevant_per_k, axis=-1, name='tp_per_k')
retrieved_per_k = math_ops.cumsum(
array_ops.ones_like(relevant_per_k), axis=-1, name='retrieved_per_k')
precision_per_k = math_ops.div(
math_ops.to_double(tp_per_k),
math_ops.to_double(retrieved_per_k),
name='precision_per_k')
relevant_precision_per_k = math_ops.multiply(
precision_per_k,
math_ops.to_double(relevant_per_k),
name='relevant_precision_per_k')
# Reduce along k dimension to get the sum, yielding a [D1, ... DN] tensor.
precision_sum = math_ops.reduce_sum(
relevant_precision_per_k, reduction_indices=(-1,), name='precision_sum')
# Divide by number of relevant items to get average precision. These are
# the "num_relevant_items" and "AveP" terms from the formula above.
num_relevant_items = math_ops.to_double(_num_relevant(labels, k))
return math_ops.div(precision_sum, num_relevant_items, name=scope)
def _streaming_sparse_average_precision_at_top_k(labels,
predictions_idx,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`sparse_average_precision_at_top_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate
the true positives and false positives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_positive_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`.
Values should be in range [0, num_classes).
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension contains the top `k` predicted class indices. [D1, ... DN] must
match `labels`. Values should be in range [0, num_classes).
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
with ops.name_scope(name, 'average_precision_at_top_k',
(predictions_idx, labels, weights)) as scope:
# Calculate per-example average precision, and apply weights.
average_precision = _sparse_average_precision_at_top_k(
predictions_idx=predictions_idx, labels=labels)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_double(weights), average_precision)
average_precision = math_ops.multiply(average_precision, weights)
# Create accumulation variables and update ops for max average precision and
# total average precision.
with ops.name_scope(None, 'max', (average_precision,)) as max_scope:
# `max` is the max possible precision. Since max for any row is 1.0:
# - For the unweighted case, this is just the number of rows.
# - For the weighted case, it's the sum of the weights broadcast across
# `average_precision` rows.
max_var = metric_variable([], dtypes.float64, name=max_scope)
if weights is None:
batch_max = math_ops.to_double(
array_ops.size(average_precision, name='batch_max'))
else:
batch_max = math_ops.reduce_sum(weights, name='batch_max')
max_update = state_ops.assign_add(max_var, batch_max, name='update')
with ops.name_scope(None, 'total', (average_precision,)) as total_scope:
total_var = metric_variable([], dtypes.float64, name=total_scope)
batch_total = math_ops.reduce_sum(average_precision, name='batch_total')
total_update = state_ops.assign_add(total_var, batch_total, name='update')
# Divide total by max to get mean, for both vars and the update ops.
def aggregate_across_towers(_, total_var, max_var):
mean_average_precision = _safe_scalar_div(total_var, max_var, name='mean')
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_average_precision)
return mean_average_precision
mean_average_precision = distribute_lib.get_tower_context().merge_call(
aggregate_across_towers, total_var, max_var)
update = _safe_scalar_div(total_update, max_update, name=scope)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return mean_average_precision, update
@tf_export('metrics.sparse_average_precision_at_k')
@deprecated(None, 'Use average_precision_at_k instead')
def sparse_average_precision_at_k(labels,
predictions,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Renamed to `average_precision_at_k`, please use that method instead."""
return average_precision_at_k(
labels=labels,
predictions=predictions,
k=k,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@tf_export('metrics.average_precision_at_k')
def average_precision_at_k(labels,
predictions,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`average_precision_at_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
Raises:
ValueError: if k is invalid.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.sparse_average_precision_at_k is not '
'supported when eager execution is enabled.')
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(name, _at_k_name('average_precision', k),
(predictions, labels, weights)) as scope:
# Calculate top k indices to produce [D1, ... DN, k] tensor.
_, predictions_idx = nn.top_k(predictions, k)
return _streaming_sparse_average_precision_at_top_k(
labels=labels,
predictions_idx=predictions_idx,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
def _sparse_false_positive_at_k(labels,
predictions_idx,
class_id=None,
weights=None):
"""Calculates false positives for precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
Returns:
A [D1, ... DN] `Tensor` of false positive counts.
"""
with ops.name_scope(None, 'false_positives',
(predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx,
class_id)
fp = sets.set_size(
sets.set_difference(predictions_idx, labels, aminusb=True))
fp = math_ops.to_double(fp)
if weights is not None:
with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(
weights, fp),)):
weights = math_ops.to_double(weights)
fp = math_ops.multiply(fp, weights)
return fp
def _streaming_sparse_false_positive_at_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false positives for precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(name, _at_k_name('false_positive', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
fp = _sparse_false_positive_at_k(
predictions_idx=predictions_idx,
labels=labels,
class_id=class_id,
weights=weights)
batch_total_fp = math_ops.to_double(math_ops.reduce_sum(fp))
var = metric_variable([], dtypes.float64, name=scope)
return var, state_ops.assign_add(var, batch_total_fp, name='update')
@tf_export('metrics.precision_at_top_k')
def precision_at_top_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
Differs from `sparse_precision_at_k` in that predictions must be in the form
of top `k` class indices, whereas `sparse_precision_at_k` expects logits.
Refer to `sparse_precision_at_k` for more details.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, k].
The final dimension contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`.
k: Integer, k for @k metric. Only used for the default op name.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision_at_top_k is not '
'supported when eager execution is enabled.')
with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions_idx)
top_k_idx = math_ops.to_int64(predictions_idx)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
weights=weights)
fp, fp_update = _streaming_sparse_false_positive_at_k(
predictions_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
weights=weights)
def aggregate_across_towers(_, tp, fp):
metric = math_ops.div(tp, math_ops.add(tp, fp), name=scope)
if metrics_collections:
ops.add_to_collections(metrics_collections, metric)
return metric
metric = distribute_lib.get_tower_context().merge_call(
aggregate_across_towers, tp, fp)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fp_update), name='update')
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
@tf_export('metrics.sparse_precision_at_k')
@deprecated(None, 'Use precision_at_k instead')
def sparse_precision_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Renamed to `precision_at_k`, please use that method instead."""
return precision_at_k(
labels=labels,
predictions=predictions,
k=k,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@tf_export('metrics.precision_at_k')
def precision_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is in the top-k highest
`predictions`, and computing the fraction of them for which `class_id` is
indeed a correct label.
If `class_id` is not specified, we'll calculate precision as how often on
average a class among the top-k classes with the highest predicted values
of a batch entry is correct and can be found in the label for that entry.
`precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.sparse_precision_at_k is not '
'supported when eager execution is enabled.')
with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),
(predictions, labels, weights)) as scope:
_, top_k_idx = nn.top_k(predictions, k)
return precision_at_top_k(
labels=labels,
predictions_idx=top_k_idx,
k=k,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
@tf_export('metrics.specificity_at_sensitivity')
def specificity_at_sensitivity(labels,
predictions,
sensitivity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the specificity at a given sensitivity.
The `specificity_at_sensitivity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the specificity at the given
sensitivity value. The threshold for the given sensitivity value is computed
and used to evaluate the corresponding specificity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`specificity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
sensitivity: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
specificity: A scalar `Tensor` representing the specificity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `specificity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`sensitivity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.specificity_at_sensitivity is not '
'supported when eager execution is enabled.')
if sensitivity < 0 or sensitivity > 1:
raise ValueError('`sensitivity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'specificity_at_sensitivity',
(predictions, labels, weights)):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 - kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
def compute_specificity_at_sensitivity(tp, tn, fp, fn, name):
"""Computes the specificity at the given sensitivity.
Args:
tp: True positives.
tn: True negatives.
fp: False positives.
fn: False negatives.
name: The name of the operation.
Returns:
The specificity using the aggregated values.
"""
sensitivities = math_ops.div(tp, tp + fn + kepsilon)
# We'll need to use this trick until tf.argmax allows us to specify
# whether we should use the first or last index in case of ties.
min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))
indices_at_minval = math_ops.equal(
math_ops.abs(sensitivities - sensitivity), min_val)
indices_at_minval = math_ops.to_int64(indices_at_minval)
indices_at_minval = math_ops.cumsum(indices_at_minval)
tf_index = math_ops.argmax(indices_at_minval, 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the specificity:
return math_ops.div(tn[tf_index], tn[tf_index] + fp[tf_index] + kepsilon,
name)
def aggregate_across_towers(_, values):
specificity = compute_specificity_at_sensitivity(
values['tp'], values['tn'], values['fp'], values['fn'], 'value')
if metrics_collections:
ops.add_to_collections(metrics_collections, specificity)
return specificity
specificity = distribute_lib.get_tower_context().merge_call(
aggregate_across_towers, values)
update_op = compute_specificity_at_sensitivity(
update_ops['tp'], update_ops['tn'], update_ops['fp'], update_ops['fn'],
'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return specificity, update_op
| lukeiwanski/tensorflow | tensorflow/python/ops/metrics_impl.py | Python | apache-2.0 | 162,105 |
# Copyright 2017--2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
import logging.config
import sys
from typing import Optional
FORMATTERS = {
'verbose': {
'format': '[%(asctime)s:%(levelname)s:%(name)s:%(funcName)s] %(message)s',
'datefmt': "%Y-%m-%d:%H:%M:%S",
},
'simple': {
'format': '[%(levelname)s:%(name)s] %(message)s'
},
}
FILE_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': FORMATTERS,
'handlers': {
'rotating': {
'level': 'INFO',
'formatter': 'verbose',
'class': 'logging.handlers.RotatingFileHandler',
'maxBytes': 10000000,
'backupCount': 5,
'filename': 'sockeye.log',
}
},
'root': {
'handlers': ['rotating'],
'level': 'DEBUG',
}
}
CONSOLE_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': FORMATTERS,
'handlers': {
'console': {
'level': 'INFO',
'formatter': 'simple',
'class': 'logging.StreamHandler',
'stream': None
},
},
'root': {
'handlers': ['console'],
'level': 'DEBUG',
}
}
FILE_CONSOLE_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': FORMATTERS,
'handlers': {
'console': {
'level': 'INFO',
'formatter': 'simple',
'class': 'logging.StreamHandler',
'stream': None
},
'rotating': {
'level': 'INFO',
'formatter': 'verbose',
'class': 'logging.handlers.RotatingFileHandler',
'maxBytes': 10000000,
'backupCount': 5,
'filename': 'sockeye.log',
}
},
'root': {
'handlers': ['console', 'rotating'],
'level': 'DEBUG',
}
}
NO_LOGGING = {
'version': 1,
'disable_existing_loggers': True,
}
LOGGING_CONFIGS = {
"file_only": FILE_LOGGING,
"console_only": CONSOLE_LOGGING,
"file_console": FILE_CONSOLE_LOGGING,
"none": NO_LOGGING,
}
def setup_main_logger(file_logging=True, console=True, path: Optional[str] = None, level=logging.INFO,
console_level=None):
"""
Configures logging for the main application.
:param file_logging: Whether to log to a file.
:param console: Whether to log to the console.
:param path: Optional path to write logfile to.
:param level: Log level. Default: INFO.
:param console_level: Optionally specify a separate log level for the console.
"""
if file_logging and console:
log_config = LOGGING_CONFIGS["file_console"] # type: ignore
elif file_logging:
log_config = LOGGING_CONFIGS["file_only"]
elif console:
log_config = LOGGING_CONFIGS["console_only"]
else:
log_config = LOGGING_CONFIGS["none"]
if file_logging:
assert path is not None, "Must provide a logfile path"
log_config["handlers"]["rotating"]["filename"] = path # type: ignore
for _, handler_config in log_config['handlers'].items(): # type: ignore
handler_config['level'] = level
if 'console' in log_config['handlers'] and console_level is not None: # type: ignore
log_config['handlers']['console']['level'] = console_level # type: ignore
logging.config.dictConfig(log_config) # type: ignore
def exception_hook(exc_type, exc_value, exc_traceback):
logging.exception("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = exception_hook
def log_sockeye_version(logger):
from sockeye import __version__, __file__
try:
from sockeye.git_version import git_hash
except ImportError:
git_hash = "unknown"
logger.info(f"Sockeye: {__version__}, commit {git_hash}, path {__file__}")
def log_torch_version(logger):
try:
from torch import __version__, __file__
info = f'PyTorch: {__version__} ({__file__})'
except ImportError:
info = 'PyTorch unavailable'
logger.info(info)
| awslabs/sockeye | sockeye/log.py | Python | apache-2.0 | 4,642 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-07 15:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('siteapp', '0010_auto_20161207_1540'),
]
operations = [
migrations.AddField(
model_name='organization',
name='allowed_modules',
field=models.TextField(default='', help_text='A list of module keys of `access: private` project modules that this Organization has permission to use, separated by spaces or newlines.'),
),
]
| GovReady/govready-q | siteapp/migrations/0011_organization_allowed_modules.py | Python | gpl-3.0 | 612 |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.rpc import status_pb2
from google.cloud import bigtable_admin_v2
from google.cloud.bigtable_admin_v2 import enums
from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2
from google.cloud.bigtable_admin_v2.proto import instance_pb2
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import policy_pb2
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestBigtableInstanceAdminClient(object):
def test_create_instance(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
expected_response = {"name": name, "display_name": display_name}
expected_response = instance_pb2.Instance(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_create_instance", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
parent = client.project_path("[PROJECT]")
instance_id = "instanceId-2101995259"
instance = {}
clusters = {}
response = client.create_instance(parent, instance_id, instance, clusters)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.CreateInstanceRequest(
parent=parent, instance_id=instance_id, instance=instance, clusters=clusters
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_instance_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_create_instance_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
parent = client.project_path("[PROJECT]")
instance_id = "instanceId-2101995259"
instance = {}
clusters = {}
response = client.create_instance(parent, instance_id, instance, clusters)
exception = response.exception()
assert exception.errors[0] == error
def test_get_instance(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
expected_response = {"name": name_2, "display_name": display_name}
expected_response = instance_pb2.Instance(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
name = client.instance_path("[PROJECT]", "[INSTANCE]")
response = client.get_instance(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_instance_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
name = client.instance_path("[PROJECT]", "[INSTANCE]")
with pytest.raises(CustomException):
client.get_instance(name)
def test_list_instances(self):
# Setup Expected Response
next_page_token = "nextPageToken-1530815211"
expected_response = {"next_page_token": next_page_token}
expected_response = bigtable_instance_admin_pb2.ListInstancesResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
parent = client.project_path("[PROJECT]")
response = client.list_instances(parent)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.ListInstancesRequest(
parent=parent
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_instances_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
parent = client.project_path("[PROJECT]")
with pytest.raises(CustomException):
client.list_instances(parent)
def test_update_instance(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name_2 = "displayName21615000987"
expected_response = {"name": name_2, "display_name": display_name_2}
expected_response = instance_pb2.Instance(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
name = client.instance_path("[PROJECT]", "[INSTANCE]")
display_name = "displayName1615086568"
type_ = enums.Instance.Type.TYPE_UNSPECIFIED
labels = {}
response = client.update_instance(name, display_name, type_, labels)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = instance_pb2.Instance(
name=name, display_name=display_name, type=type_, labels=labels
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_instance_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
name = client.instance_path("[PROJECT]", "[INSTANCE]")
display_name = "displayName1615086568"
type_ = enums.Instance.Type.TYPE_UNSPECIFIED
labels = {}
with pytest.raises(CustomException):
client.update_instance(name, display_name, type_, labels)
def test_partial_update_instance(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
expected_response = {"name": name, "display_name": display_name}
expected_response = instance_pb2.Instance(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_partial_update_instance", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
instance = {}
update_mask = {}
response = client.partial_update_instance(instance, update_mask)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest(
instance=instance, update_mask=update_mask
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_partial_update_instance_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_partial_update_instance_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
instance = {}
update_mask = {}
response = client.partial_update_instance(instance, update_mask)
exception = response.exception()
assert exception.errors[0] == error
def test_delete_instance(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
name = client.instance_path("[PROJECT]", "[INSTANCE]")
client.delete_instance(name)
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_instance_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
name = client.instance_path("[PROJECT]", "[INSTANCE]")
with pytest.raises(CustomException):
client.delete_instance(name)
def test_create_cluster(self):
# Setup Expected Response
name = "name3373707"
location = "location1901043637"
serve_nodes = 1288838783
expected_response = {
"name": name,
"location": location,
"serve_nodes": serve_nodes,
}
expected_response = instance_pb2.Cluster(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_create_cluster", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
parent = client.instance_path("[PROJECT]", "[INSTANCE]")
cluster_id = "clusterId240280960"
cluster = {}
response = client.create_cluster(parent, cluster_id, cluster)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.CreateClusterRequest(
parent=parent, cluster_id=cluster_id, cluster=cluster
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_cluster_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_create_cluster_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
parent = client.instance_path("[PROJECT]", "[INSTANCE]")
cluster_id = "clusterId240280960"
cluster = {}
response = client.create_cluster(parent, cluster_id, cluster)
exception = response.exception()
assert exception.errors[0] == error
def test_get_cluster(self):
# Setup Expected Response
name_2 = "name2-1052831874"
location = "location1901043637"
serve_nodes = 1288838783
expected_response = {
"name": name_2,
"location": location,
"serve_nodes": serve_nodes,
}
expected_response = instance_pb2.Cluster(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]")
response = client.get_cluster(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.GetClusterRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_cluster_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]")
with pytest.raises(CustomException):
client.get_cluster(name)
def test_list_clusters(self):
# Setup Expected Response
next_page_token = "nextPageToken-1530815211"
expected_response = {"next_page_token": next_page_token}
expected_response = bigtable_instance_admin_pb2.ListClustersResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
parent = client.instance_path("[PROJECT]", "[INSTANCE]")
response = client.list_clusters(parent)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.ListClustersRequest(
parent=parent
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_clusters_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
parent = client.instance_path("[PROJECT]", "[INSTANCE]")
with pytest.raises(CustomException):
client.list_clusters(parent)
def test_update_cluster(self):
# Setup Expected Response
name_2 = "name2-1052831874"
location = "location1901043637"
serve_nodes_2 = 1623486220
expected_response = {
"name": name_2,
"location": location,
"serve_nodes": serve_nodes_2,
}
expected_response = instance_pb2.Cluster(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_update_cluster", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]")
serve_nodes = 1288838783
response = client.update_cluster(name, serve_nodes)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = instance_pb2.Cluster(name=name, serve_nodes=serve_nodes)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_cluster_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_update_cluster_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]")
serve_nodes = 1288838783
response = client.update_cluster(name, serve_nodes)
exception = response.exception()
assert exception.errors[0] == error
def test_delete_cluster(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]")
client.delete_cluster(name)
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_cluster_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]")
with pytest.raises(CustomException):
client.delete_cluster(name)
def test_create_app_profile(self):
# Setup Expected Response
name = "name3373707"
etag = "etag3123477"
description = "description-1724546052"
expected_response = {"name": name, "etag": etag, "description": description}
expected_response = instance_pb2.AppProfile(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
parent = client.instance_path("[PROJECT]", "[INSTANCE]")
app_profile_id = "appProfileId1262094415"
app_profile = {}
response = client.create_app_profile(parent, app_profile_id, app_profile)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.CreateAppProfileRequest(
parent=parent, app_profile_id=app_profile_id, app_profile=app_profile
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_app_profile_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
parent = client.instance_path("[PROJECT]", "[INSTANCE]")
app_profile_id = "appProfileId1262094415"
app_profile = {}
with pytest.raises(CustomException):
client.create_app_profile(parent, app_profile_id, app_profile)
def test_get_app_profile(self):
# Setup Expected Response
name_2 = "name2-1052831874"
etag = "etag3123477"
description = "description-1724546052"
expected_response = {"name": name_2, "etag": etag, "description": description}
expected_response = instance_pb2.AppProfile(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]")
response = client.get_app_profile(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_app_profile_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]")
with pytest.raises(CustomException):
client.get_app_profile(name)
def test_list_app_profiles(self):
# Setup Expected Response
next_page_token = ""
app_profiles_element = {}
app_profiles = [app_profiles_element]
expected_response = {
"next_page_token": next_page_token,
"app_profiles": app_profiles,
}
expected_response = bigtable_instance_admin_pb2.ListAppProfilesResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
parent = client.instance_path("[PROJECT]", "[INSTANCE]")
paged_list_response = client.list_app_profiles(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.app_profiles[0] == resources[0]
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.ListAppProfilesRequest(
parent=parent
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_app_profiles_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
parent = client.instance_path("[PROJECT]", "[INSTANCE]")
paged_list_response = client.list_app_profiles(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_update_app_profile(self):
# Setup Expected Response
name = "name3373707"
etag = "etag3123477"
description = "description-1724546052"
expected_response = {"name": name, "etag": etag, "description": description}
expected_response = instance_pb2.AppProfile(**expected_response)
operation = operations_pb2.Operation(
name="operations/test_update_app_profile", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
app_profile = {}
update_mask = {}
response = client.update_app_profile(app_profile, update_mask)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.UpdateAppProfileRequest(
app_profile=app_profile, update_mask=update_mask
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_app_profile_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_update_app_profile_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
app_profile = {}
update_mask = {}
response = client.update_app_profile(app_profile, update_mask)
exception = response.exception()
assert exception.errors[0] == error
def test_delete_app_profile(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]")
ignore_warnings = True
client.delete_app_profile(name, ignore_warnings)
assert len(channel.requests) == 1
expected_request = bigtable_instance_admin_pb2.DeleteAppProfileRequest(
name=name, ignore_warnings=ignore_warnings
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_app_profile_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]")
ignore_warnings = True
with pytest.raises(CustomException):
client.delete_app_profile(name, ignore_warnings)
def test_get_iam_policy(self):
# Setup Expected Response
version = 351608024
etag = b"etag3123477"
expected_response = {"version": version, "etag": etag}
expected_response = policy_pb2.Policy(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
resource = client.instance_path("[PROJECT]", "[INSTANCE]")
response = client.get_iam_policy(resource)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_iam_policy_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
resource = client.instance_path("[PROJECT]", "[INSTANCE]")
with pytest.raises(CustomException):
client.get_iam_policy(resource)
def test_set_iam_policy(self):
# Setup Expected Response
version = 351608024
etag = b"etag3123477"
expected_response = {"version": version, "etag": etag}
expected_response = policy_pb2.Policy(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
resource = client.instance_path("[PROJECT]", "[INSTANCE]")
policy = {}
response = client.set_iam_policy(resource, policy)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.SetIamPolicyRequest(
resource=resource, policy=policy
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_set_iam_policy_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
resource = client.instance_path("[PROJECT]", "[INSTANCE]")
policy = {}
with pytest.raises(CustomException):
client.set_iam_policy(resource, policy)
def test_test_iam_permissions(self):
# Setup Expected Response
expected_response = {}
expected_response = iam_policy_pb2.TestIamPermissionsResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup Request
resource = client.instance_path("[PROJECT]", "[INSTANCE]")
permissions = []
response = client.test_iam_permissions(resource, permissions)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.TestIamPermissionsRequest(
resource=resource, permissions=permissions
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_test_iam_permissions_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = bigtable_admin_v2.BigtableInstanceAdminClient()
# Setup request
resource = client.instance_path("[PROJECT]", "[INSTANCE]")
permissions = []
with pytest.raises(CustomException):
client.test_iam_permissions(resource, permissions)
| dhermes/google-cloud-python | bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py | Python | apache-2.0 | 35,949 |
#!/usr/bin/env python
import sys
import os.path
from zvm import zmachine, trivialzui
def usage():
print """Usage: %s <story file>
Run a Z-Machine story under ZVM.
""" % sys.argv[0]
sys.exit(1)
def main():
if len(sys.argv) != 2:
usage()
story_file = sys.argv[1]
if not os.path.isfile(story_file):
print "%s is not a file." % story_file
usage()
try:
f = file(story_file)
story_image = f.read()
f.close()
except IOError:
print "Error accessing %s" % story_file
sys.exit(1)
machine = zmachine.ZMachine(story_image,
ui=trivialzui.create_zui(),
debugmode=True)
machine.run()
if __name__ == '__main__':
main()
| BGCX262/zvm-hg-to-git | run_story.py | Python | bsd-3-clause | 777 |
import datetime
from flask_sqlalchemy import Pagination
from app.exceptions.base import ResourceNotFoundException, \
BusinessRuleException, DuplicateResourceException
from app.models.course import Course
from app.repository import examination_repository
def get_examination_by_id(examination_id):
exam = examination_repository.find_examination_by_id(examination_id)
if not exam:
raise ResourceNotFoundException("Examination", examination_id)
return exam
def get_education_by_id(education_id):
education = examination_repository.find_education_by_id(education_id)
if not education:
raise ResourceNotFoundException("Education", education_id)
return education
def get_course_by_id(course_id: int) -> Course:
course = examination_repository.find_course_by_id(course_id)
if not course:
raise ResourceNotFoundException("Course", course_id)
return course
def find_all_courses():
return examination_repository.find_all_courses()
def paginated_search_courses(search: str, page: int) -> Pagination:
return examination_repository.paginated_search_all_courses(
search=search, page=page)
def find_all_educations():
return examination_repository.find_all_educations()
def paginated_search_educations(search: str, page: int) -> Pagination:
return examination_repository.paginated_search_all_educations(
search=search, page=page)
def find_all_examinations_by_course(course_id):
get_course_by_id(course_id)
return examination_repository.find_all_examinations_by_course(course_id)
def find_all_examinations_by_education(education_id):
get_education_by_id(education_id)
return examination_repository \
.find_all_examinations_by_education(education_id)
def find_all_examinations(page_nr, per_page):
return examination_repository.find_all_examinations(page_nr, per_page)
def search_examinations_by_courses(courses, page_nr, per_page):
return examination_repository \
.search_examinations_by_courses(courses, page_nr, per_page)
def add_course(name, description):
existing_course = examination_repository.find_course_by_name(name)
if existing_course:
raise DuplicateResourceException(name, existing_course.id)
course = examination_repository.create_course()
course.name = name
course.description = description
examination_repository.save_course(course)
return course
def add_education(name):
existing_education = examination_repository.find_education_by_name(name)
if existing_education:
raise DuplicateResourceException(name, existing_education.id)
education = examination_repository.create_education()
education.name = name
examination_repository.save_education(education)
return education
def add_examination(examination_file, date, comment,
course_id, education_id, test_type,
answers_file=None):
exam = examination_repository.create_examination()
exam.timestamp = datetime.datetime.utcnow()
exam.examination_file = examination_file
exam.date = date
exam.comment = comment
exam.course_id = course_id
exam.education_id = education_id
exam.answers_file = answers_file
exam.test_type = test_type
examination_repository.save_examination(exam)
return exam
def update_examination(exam_id, examination_file, date, comment,
course_id, education_id, test_type,
answers_file=None):
exam = examination_repository.find_examination_by_id(exam_id)
exam.timestamp = datetime.datetime.utcnow()
exam.examination_file = examination_file
exam.date = date
exam.comment = comment
exam.course_id = course_id
exam.education_id = education_id
exam.answers_file = answers_file
exam.test_type = test_type
examination_repository.save_education(exam)
return exam
def update_education(education_id, name):
education = examination_repository.find_education_by_id(education_id)
if education.name != name and \
examination_repository.find_course_by_name(name):
raise DuplicateResourceException("Education", name)
education.name = name
examination_repository.save_education(education)
return education
def update_course(course_id, name, description):
course = examination_repository.find_course_by_id(course_id)
if course.name != name and \
examination_repository.find_course_by_name(name):
raise DuplicateResourceException("Course", name)
course.name = name
course.description = description
examination_repository.save_course(course)
return course
def delete_examination(examination_id):
examination_repository.delete_examination(examination_id)
def count_examinations_by_course(course: Course):
exams = examination_repository.find_all_examinations_by_course(course.id)
return len(exams)
def count_examinations_by_education(education_id):
exams = examination_repository. \
find_all_examinations_by_education(education_id)
return len(exams)
def delete_education(education_id):
if count_examinations_by_education(education_id) >= 1:
raise BusinessRuleException("Education has examinations")
else:
examination_repository.delete_education(education_id)
def delete_course(course_id: int):
course = get_course_by_id(course_id)
if count_examinations_by_course(course) >= 1:
raise BusinessRuleException("Course has examinations")
else:
examination_repository.delete_course(course)
| viaict/viaduct | app/service/examination_service.py | Python | mit | 5,625 |
"""
Copyright 2007, 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import pygtk
pygtk.require('2.0')
import gtk
from . import Actions, Colors, Utils
from .Constants import (
PORT_SEPARATION, PORT_SPACING, CONNECTOR_EXTENSION_MINIMAL,
CONNECTOR_EXTENSION_INCREMENT, PORT_LABEL_PADDING, PORT_MIN_WIDTH, PORT_LABEL_HIDDEN_WIDTH, PORT_FONT
)
from .Element import Element
from ..core.Constants import DEFAULT_DOMAIN, GR_MESSAGE_DOMAIN
from ..core.Port import Port as _Port
PORT_MARKUP_TMPL="""\
<span foreground="black" font_desc="$font">$encode($port.get_name())</span>"""
class Port(_Port, Element):
"""The graphical port."""
def __init__(self, block, n, dir):
"""
Port constructor.
Create list of connector coordinates.
"""
_Port.__init__(self, block, n, dir)
Element.__init__(self)
self.W = self.H = self.w = self.h = 0
self._connector_coordinate = (0, 0)
self._connector_length = 0
self._hovering = True
self._force_label_unhidden = False
def create_shapes(self):
"""Create new areas and labels for the port."""
Element.create_shapes(self)
if self.get_hide():
return # this port is hidden, no need to create shapes
if self.get_domain() == GR_MESSAGE_DOMAIN:
pass
elif self.get_domain() != DEFAULT_DOMAIN:
self.line_attributes[0] = 2
#get current rotation
rotation = self.get_rotation()
#get all sibling ports
ports = self.get_parent().get_sources_gui() \
if self.is_source else self.get_parent().get_sinks_gui()
ports = filter(lambda p: not p.get_hide(), ports)
#get the max width
self.W = max([port.W for port in ports] + [PORT_MIN_WIDTH])
W = self.W if not self._label_hidden() else PORT_LABEL_HIDDEN_WIDTH
#get a numeric index for this port relative to its sibling ports
try:
index = ports.index(self)
except:
if hasattr(self, '_connector_length'):
del self._connector_length
return
length = len(filter(lambda p: not p.get_hide(), ports))
#reverse the order of ports for these rotations
if rotation in (180, 270):
index = length-index-1
port_separation = PORT_SEPARATION \
if not self.get_parent().has_busses[self.is_source] \
else max([port.H for port in ports]) + PORT_SPACING
offset = (self.get_parent().H - (length-1)*port_separation - self.H)/2
#create areas and connector coordinates
if (self.is_sink and rotation == 0) or (self.is_source and rotation == 180):
x = -W
y = port_separation*index+offset
self.add_area((x, y), (W, self.H))
self._connector_coordinate = (x-1, y+self.H/2)
elif (self.is_source and rotation == 0) or (self.is_sink and rotation == 180):
x = self.get_parent().W
y = port_separation*index+offset
self.add_area((x, y), (W, self.H))
self._connector_coordinate = (x+1+W, y+self.H/2)
elif (self.is_source and rotation == 90) or (self.is_sink and rotation == 270):
y = -W
x = port_separation*index+offset
self.add_area((x, y), (self.H, W))
self._connector_coordinate = (x+self.H/2, y-1)
elif (self.is_sink and rotation == 90) or (self.is_source and rotation == 270):
y = self.get_parent().W
x = port_separation*index+offset
self.add_area((x, y), (self.H, W))
self._connector_coordinate = (x+self.H/2, y+1+W)
#the connector length
self._connector_length = CONNECTOR_EXTENSION_MINIMAL + CONNECTOR_EXTENSION_INCREMENT*index
def create_labels(self):
"""Create the labels for the socket."""
Element.create_labels(self)
self._bg_color = Colors.get_color(self.get_color())
# create the layout
layout = gtk.DrawingArea().create_pango_layout('')
layout.set_markup(Utils.parse_template(PORT_MARKUP_TMPL, port=self, font=PORT_FONT))
self.w, self.h = layout.get_pixel_size()
self.W = 2 * PORT_LABEL_PADDING + self.w
self.H = 2 * PORT_LABEL_PADDING + self.h * (
3 if self.get_type() == 'bus' else 1)
self.H += self.H % 2
# create the pixmap
pixmap = self.get_parent().get_parent().new_pixmap(self.w, self.h)
gc = pixmap.new_gc()
gc.set_foreground(self._bg_color)
pixmap.draw_rectangle(gc, True, 0, 0, self.w, self.h)
pixmap.draw_layout(gc, 0, 0, layout)
# create vertical and horizontal pixmaps
self.horizontal_label = pixmap
if self.is_vertical():
self.vertical_label = self.get_parent().get_parent().new_pixmap(self.h, self.w)
Utils.rotate_pixmap(gc, self.horizontal_label, self.vertical_label)
def draw(self, gc, window):
"""
Draw the socket with a label.
Args:
gc: the graphics context
window: the gtk window to draw on
"""
Element.draw(
self, gc, window, bg_color=self._bg_color,
border_color=self.is_highlighted() and Colors.HIGHLIGHT_COLOR or
self.get_parent().is_dummy_block and Colors.MISSING_BLOCK_BORDER_COLOR or
Colors.BORDER_COLOR,
)
if not self._areas_list or self._label_hidden():
return # this port is either hidden (no areas) or folded (no label)
X, Y = self.get_coordinate()
(x, y), (w, h) = self._areas_list[0] # use the first area's sizes to place the labels
if self.is_horizontal():
window.draw_drawable(gc, self.horizontal_label, 0, 0, x+X+(self.W-self.w)/2, y+Y+(self.H-self.h)/2, -1, -1)
elif self.is_vertical():
window.draw_drawable(gc, self.vertical_label, 0, 0, x+X+(self.H-self.h)/2, y+Y+(self.W-self.w)/2, -1, -1)
def get_connector_coordinate(self):
"""
Get the coordinate where connections may attach to.
Returns:
the connector coordinate (x, y) tuple
"""
x, y = self._connector_coordinate
X, Y = self.get_coordinate()
return (x + X, y + Y)
def get_connector_direction(self):
"""
Get the direction that the socket points: 0,90,180,270.
This is the rotation degree if the socket is an output or
the rotation degree + 180 if the socket is an input.
Returns:
the direction in degrees
"""
if self.is_source: return self.get_rotation()
elif self.is_sink: return (self.get_rotation() + 180)%360
def get_connector_length(self):
"""
Get the length of the connector.
The connector length increases as the port index changes.
Returns:
the length in pixels
"""
return self._connector_length
def get_rotation(self):
"""
Get the parent's rotation rather than self.
Returns:
the parent's rotation
"""
return self.get_parent().get_rotation()
def move(self, delta_coor):
"""
Move the parent rather than self.
Args:
delta_corr: the (delta_x, delta_y) tuple
"""
self.get_parent().move(delta_coor)
def rotate(self, direction):
"""
Rotate the parent rather than self.
Args:
direction: degrees to rotate
"""
self.get_parent().rotate(direction)
def get_coordinate(self):
"""
Get the parent's coordinate rather than self.
Returns:
the parents coordinate
"""
return self.get_parent().get_coordinate()
def set_highlighted(self, highlight):
"""
Set the parent highlight rather than self.
Args:
highlight: true to enable highlighting
"""
self.get_parent().set_highlighted(highlight)
def is_highlighted(self):
"""
Get the parent's is highlight rather than self.
Returns:
the parent's highlighting status
"""
return self.get_parent().is_highlighted()
def _label_hidden(self):
"""
Figure out if the label should be hidden
Returns:
true if the label should not be shown
"""
return self._hovering and not self._force_label_unhidden and Actions.TOGGLE_AUTO_HIDE_PORT_LABELS.get_active()
def force_label_unhidden(self, enable=True):
"""
Disable showing the label on mouse-over for this port
Args:
enable: true to override the mouse-over behaviour
"""
self._force_label_unhidden = enable
def mouse_over(self):
"""
Called from flow graph on mouse-over
"""
self._hovering = False
return Actions.TOGGLE_AUTO_HIDE_PORT_LABELS.get_active() # only redraw if necessary
def mouse_out(self):
"""
Called from flow graph on mouse-out
"""
self._hovering = True
return Actions.TOGGLE_AUTO_HIDE_PORT_LABELS.get_active() # only redraw if necessary
| SaikWolf/gnuradio | grc/gui/Port.py | Python | gpl-3.0 | 10,040 |
# -*- coding: utf-8 -*-
from flask import Flask, render_template, session, request, redirect, g
import psycopg2, bcrypt
def render(error=None):
return render_template('login.html', error=error)
def post(request):
cursor = g.db.cursor()
username = unicode(request.form['username'])
password = unicode(request.form['password'])
query = 'SELECT id, name, permissions, password FROM RegUser WHERE name = %s'
cursor.execute(query, [username])
user = cursor.fetchone()
cursor.close()
g.db.commit()
if not user:
return render(u"Käyttäjää ei löytynyt")
if not bcrypt.hashpw(str(password), str(user[3])) == str(user[3]):
return render(u"Väärä salasana")
# return user's data to the session without the password
userNoPassword = user[0:3]
session['loggedUser'] = userNoPassword
return redirect('/topics/')
| jclakkis/discus-inferno | routes/login.py | Python | mit | 885 |
from django.utils.translation import ugettext as _
#=========================================================================
# HELPERS
#=========================================================================
def get_display(key, list):
d = dict(list)
if key in d:
return d[key]
return None
def get_creative_fields(category=None):
"""
Access MAP_CATEGORY_FIELDS and extract creative fields
:param category: filter
:returns: all the creative fields belonging to the specified category. If no category is specified, returns all the creative fields
"""
def __get_values(res, v):
val = list(v)
if isinstance(v[0], tuple):
for el in val:
res = res + ((el[0], el[1]),)
else:
res = res + ((v[0],v[1]),)
return res
# Extract fields
res = ()
for k,v in MAP_CATEGORY_FIELDS.items():
if category != None and category != k:
pass
else:
res = __get_values(res, v)
# Sort alphabetically
return sorted(res, key=lambda x: x[1])
def get_creative_field_verbose(id):
return get_display(id, CREATIVE_FIELDS)
def get_category_verbose(id):
return get_display(id, CATEGORIES)
#=========================================================================
# OPTIONS
#=========================================================================
"""
Dictionary of tuples:
- each key is a tuple representing a category
- each item is a tuple of tuples, each one representing a creative field
"""
MAP_CATEGORY_FIELDS = {
('AR', 'Architecture'): (
('A1', _('Architecture')),
('A2', _('Landscape Design')),
('A3', _('Street Design')),
),
('BS', 'Business'): (
('B1', _('Advertising')),
('B2', _('Branding')),
('B3', _('Entrepreneurship')),
),
('CY', 'Cinematography'): (
('C1', _('Cinematography')),
('C2', _('Directing')),
('C3', _('Film')),
('C4', _('Storyboarding')),
),
('CU', 'Culinary Arts'): (
('U1', _('Cooking')),
('U2', _('Bakering')),
('U3', _('Food and Beverage')),
('U4', _('Food Critic')),
('U5', _('Food Instructor')),
('U6', _('Food Styling')),
('U7', _('Food Writing')),
),
('DS', 'Design'): (
('D1', _('Automotive Design')),
('D2', _('Exhibition Design')),
('D3', _('Furniture Design')),
('D4', _('Industrial Design')),
('D5', _('Interior Design')),
('D6', _('Light Design')),
('D7', _('Packaging')),
),
('EN', 'Engineering'): (
('E1', _('Engineering')),
('E2', _('Information Architecture')),
('E3', _('Industrial Design')),
('E4', _('Product Design')),
),
('FH', 'Fashion'): (
('F1', _('Fashion')),
('F2', _('Fashion Styling')),
('F3', _('Jewelry Design')),
('F4', _('MakeUp Arts')),
),
('FI', 'Fine Arts'): (
('R1', _('Calligraphy')),
('R2', _('Comics')),
('R3', _('Drawing')),
('R4', _('Illustration')),
('R5', _('Mosaics')),
('R6', _('Painting')),
('R7', _('Sculpting')),
),
('GR', 'Graphics'): (
('G1', _('Animation')),
('G2', _('Computer Animation')),
('G3', _('Digital Art')),
('G4', _('Graphic Design')),
('G5', _('Icon Design')),
('G6', _('Motion Graphics')),
('G7', _('Visual Effects')),
),
('IT', 'Information Technology'): (
('I1', _('Mobile Programming')),
('I2', _('Programming')),
('I3', _('Software Engineering')),
('I4', _('User Interface Design')),
('I5', _('Videogame Design')),
('I6', _('Web Design')),
('I7', _('Web Development')),
),
('JU', 'Journalism'): (
('J1', _('Journalism')),
('J2', _('Photojournalism')),
('J3', _('Photoreporting')),
),
('MA', 'Manual Arts'): (
('M1', _('Crafts')),
('M2', _('Graffiti')),
),
('PF', 'Performing Arts'): (
('P1', _('Acting')),
('P2', _('Dancing')),
('P3', _('Music')),
),
('PH', 'Photography'): (
('H1', _('Digital Photography')),
('H2', _('Photography')),
),
('WR', 'Writing'): (
('W1', _('Character Design')),
('W2', _('Copywriting')),
('W3', _('Illustration')),
('W4', _('Typography')),
('W5', _('Writing')),
),
}
# List of categories
CATEGORIES = MAP_CATEGORY_FIELDS.keys()
# List of creative fields
CREATIVE_FIELDS = get_creative_fields()
| marco-lancini/Showcase | app_collaborations/options.py | Python | mit | 5,831 |
import numpy as np
import pandas as pd
from pandas import date_range, Index, DataFrame, Series, Timestamp
from pandas.util import testing as tm
class TestDatetimeIndex(object):
def test_setitem_with_datetime_tz(self):
# 16889
# support .loc with alignment and tz-aware DatetimeIndex
mask = np.array([True, False, True, False])
idx = pd.date_range('20010101', periods=4, tz='UTC')
df = pd.DataFrame({'a': np.arange(4)}, index=idx).astype('float64')
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
idx = pd.date_range('20010101', periods=4)
df = pd.DataFrame({'a': np.arange(4)}, index=idx).astype('float64')
result = df.copy()
result.loc[mask, :] = df.loc[mask, :]
tm.assert_frame_equal(result, df)
result = df.copy()
result.loc[mask] = df.loc[mask]
tm.assert_frame_equal(result, df)
def test_indexing_with_datetime_tz(self):
# 8260
# support datetime64 with tz
idx = Index(date_range('20130101', periods=3, tz='US/Eastern'),
name='foo')
dr = date_range('20130110', periods=3)
df = DataFrame({'A': idx, 'B': dr})
df['C'] = idx
df.iloc[1, 1] = pd.NaT
df.iloc[1, 2] = pd.NaT
# indexing
result = df.iloc[1]
expected = Series([Timestamp('2013-01-02 00:00:00-0500',
tz='US/Eastern'), np.nan, np.nan],
index=list('ABC'), dtype='object', name=1)
tm.assert_series_equal(result, expected)
result = df.loc[1]
expected = Series([Timestamp('2013-01-02 00:00:00-0500',
tz='US/Eastern'), np.nan, np.nan],
index=list('ABC'), dtype='object', name=1)
tm.assert_series_equal(result, expected)
# indexing - fast_xs
df = DataFrame({'a': date_range('2014-01-01', periods=10, tz='UTC')})
result = df.iloc[5]
expected = Timestamp('2014-01-06 00:00:00+0000', tz='UTC', freq='D')
assert result == expected
result = df.loc[5]
assert result == expected
# indexing - boolean
result = df[df.a > df.a[3]]
expected = df.iloc[4:]
tm.assert_frame_equal(result, expected)
# indexing - setting an element
df = DataFrame(data=pd.to_datetime(
['2015-03-30 20:12:32', '2015-03-12 00:11:11']), columns=['time'])
df['new_col'] = ['new', 'old']
df.time = df.set_index('time').index.tz_localize('UTC')
v = df[df.new_col == 'new'].set_index('time').index.tz_convert(
'US/Pacific')
# trying to set a single element on a part of a different timezone
# this converts to object
df2 = df.copy()
df2.loc[df2.new_col == 'new', 'time'] = v
expected = Series([v[0], df.loc[1, 'time']], name='time')
tm.assert_series_equal(df2.time, expected)
v = df.loc[df.new_col == 'new', 'time'] + pd.Timedelta('1s')
df.loc[df.new_col == 'new', 'time'] = v
tm.assert_series_equal(df.loc[df.new_col == 'new', 'time'], v)
def test_consistency_with_tz_aware_scalar(self):
# xef gh-12938
# various ways of indexing the same tz-aware scalar
df = Series([Timestamp('2016-03-30 14:35:25',
tz='Europe/Brussels')]).to_frame()
df = pd.concat([df, df]).reset_index(drop=True)
expected = Timestamp('2016-03-30 14:35:25+0200',
tz='Europe/Brussels')
result = df[0][0]
assert result == expected
result = df.iloc[0, 0]
assert result == expected
result = df.loc[0, 0]
assert result == expected
result = df.iat[0, 0]
assert result == expected
result = df.at[0, 0]
assert result == expected
result = df[0].loc[0]
assert result == expected
result = df[0].at[0]
assert result == expected
def test_indexing_with_datetimeindex_tz(self):
# GH 12050
# indexing on a series with a datetimeindex with tz
index = pd.date_range('2015-01-01', periods=2, tz='utc')
ser = pd.Series(range(2), index=index,
dtype='int64')
# list-like indexing
for sel in (index, list(index)):
# getitem
tm.assert_series_equal(ser[sel], ser)
# setitem
result = ser.copy()
result[sel] = 1
expected = pd.Series(1, index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
tm.assert_series_equal(ser.loc[sel], ser)
# .loc setitem
result = ser.copy()
result.loc[sel] = 1
expected = pd.Series(1, index=index)
tm.assert_series_equal(result, expected)
# single element indexing
# getitem
assert ser[index[1]] == 1
# setitem
result = ser.copy()
result[index[1]] = 5
expected = pd.Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
assert ser.loc[index[1]] == 1
# .loc setitem
result = ser.copy()
result.loc[index[1]] = 5
expected = pd.Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
def test_partial_setting_with_datetimelike_dtype(self):
# GH9478
# a datetimeindex alignment issue with partial setting
df = pd.DataFrame(np.arange(6.).reshape(3, 2), columns=list('AB'),
index=pd.date_range('1/1/2000', periods=3,
freq='1H'))
expected = df.copy()
expected['C'] = [expected.index[0]] + [pd.NaT, pd.NaT]
mask = df.A < 1
df.loc[mask, 'C'] = df.loc[mask].index
tm.assert_frame_equal(df, expected)
def test_loc_setitem_datetime(self):
# GH 9516
dt1 = Timestamp('20130101 09:00:00')
dt2 = Timestamp('20130101 10:00:00')
for conv in [lambda x: x, lambda x: x.to_datetime64(),
lambda x: x.to_pydatetime(), lambda x: np.datetime64(x)]:
df = pd.DataFrame()
df.loc[conv(dt1), 'one'] = 100
df.loc[conv(dt2), 'one'] = 200
expected = DataFrame({'one': [100.0, 200.0]}, index=[dt1, dt2])
tm.assert_frame_equal(df, expected)
def test_series_partial_set_datetime(self):
# GH 11497
idx = date_range('2011-01-01', '2011-01-02', freq='D', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
result = ser.loc[[Timestamp('2011-01-01'), Timestamp('2011-01-02')]]
exp = Series([0.1, 0.2], index=idx, name='s')
tm.assert_series_equal(result, exp, check_index_type=True)
keys = [Timestamp('2011-01-02'), Timestamp('2011-01-02'),
Timestamp('2011-01-01')]
exp = Series([0.2, 0.2, 0.1], index=pd.DatetimeIndex(keys, name='idx'),
name='s')
tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
keys = [Timestamp('2011-01-03'), Timestamp('2011-01-02'),
Timestamp('2011-01-03')]
exp = Series([np.nan, 0.2, np.nan],
index=pd.DatetimeIndex(keys, name='idx'), name='s')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
def test_series_partial_set_period(self):
# GH 11497
idx = pd.period_range('2011-01-01', '2011-01-02', freq='D', name='idx')
ser = Series([0.1, 0.2], index=idx, name='s')
result = ser.loc[[pd.Period('2011-01-01', freq='D'),
pd.Period('2011-01-02', freq='D')]]
exp = Series([0.1, 0.2], index=idx, name='s')
tm.assert_series_equal(result, exp, check_index_type=True)
keys = [pd.Period('2011-01-02', freq='D'),
pd.Period('2011-01-02', freq='D'),
pd.Period('2011-01-01', freq='D')]
exp = Series([0.2, 0.2, 0.1], index=pd.PeriodIndex(keys, name='idx'),
name='s')
tm.assert_series_equal(ser.loc[keys], exp, check_index_type=True)
keys = [pd.Period('2011-01-03', freq='D'),
pd.Period('2011-01-02', freq='D'),
pd.Period('2011-01-03', freq='D')]
exp = Series([np.nan, 0.2, np.nan],
index=pd.PeriodIndex(keys, name='idx'), name='s')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = ser.loc[keys]
tm.assert_series_equal(result, exp)
| NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/indexing/test_datetime.py | Python | apache-2.0 | 9,075 |
# ******************************************************
# * EXEMPLE SCRIPT : Using Property file
# * - Check 2 conditions with "Logical AND"
# *****************************************************/
from org.ow2.proactive.scripting.helper.selection import SelectionUtils
from org.ow2.proactive.scripting.helper.selection import Condition
from org.ow2.proactive.scripting.helper.selection import Conditions
#--We set our 2 conditions by creating "Condition" objects
condition1 = Condition("ram", SelectionUtils.GREATER_THAN, "1024");
condition2 = Condition("architecture", SelectionUtils.CONTAINS, "6");
#--We set a table of "Condition" objects
conditions = Conditions();
#--and put our 2 objects
conditions.add(condition1);
conditions.add(condition2);
#--Evaluation by calling CheckConfig method
if SelectionUtils.checkProperties("samplePropertiesFile.txt",conditions):
print "PY>selected = true";
selected = True;
else:
print "PY>selected = false";
selected = False;
| acontes/scheduling | samples/scripts/selection/checkProperties.py | Python | agpl-3.0 | 985 |
#!/usr/bin/python
#~ This file is part of NoStaples.
#~ NoStaples is free software: you can redistribute it and/or modify
#~ it under the terms of the GNU General Public License as published by
#~ the Free Software Foundation, either version 3 of the License, or
#~ (at your option) any later version.
#~ NoStaples is distributed in the hope that it will be useful,
#~ but WITHOUT ANY WARRANTY; without even the implied warranty of
#~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#~ GNU General Public License for more details.
#~ You should have received a copy of the GNU General Public License
#~ along with NoStaples. If not, see <http://www.gnu.org/licenses/>.
"""
This module holds the L{DocumentController}, which manages interaction
between the L{DocumentModel} and L{DocumentView}.
"""
import logging
import gtk
from gtkmvc.controller import Controller
import nostaples.utils.gui
class DocumentController(Controller):
"""
Manages interaction between the L{DocumentModel} and
L{DocumentView}.
"""
# SETUP METHODS
def __init__(self, application):
"""
Constructs the DocumentsController, as well as necessary
sub-controllers.
"""
self.application = application
Controller.__init__(self, application.get_document_model())
preferences_model = application.get_preferences_model()
preferences_model.register_observer(self)
status_controller = application.get_status_controller()
self.status_context = \
status_controller.get_context_id(self.__class__.__name__)
application.get_document_model().connect(
'row-changed', self.on_document_model_row_changed)
self.log = logging.getLogger(self.__class__.__name__)
self.log.debug('Created.')
def register_view(self, view):
"""
Registers this controller with a view.
"""
Controller.register_view(self, view)
view['thumbnails_tree_view'].set_model(
self.application.get_document_model())
view['thumbnails_tree_view'].get_selection().connect(
'changed', self.on_thumbnails_tree_view_selection_changed)
view['thumbnails_tree_view'].connect(
'button-press-event', self.on_thumbnails_tree_view_button_press_event)
view['delete_menu_item'].connect(
"activate", self.on_delete_menu_item_activated)
self.log.debug('%s registered.', view.__class__.__name__)
# USER INTERFACE CALLBACKS
def on_thumbnails_tree_view_selection_changed(self, selection):
"""
Set the current visible page to the be newly selected one.
"""
document_model = self.application.get_document_model()
document_view = self.application.get_document_view()
page_controller = self.application.get_page_controller()
selection_iter = selection.get_selected()[1]
if selection_iter:
page_model = document_model.get_value(selection_iter, 0)
page_controller.set_current_page_model(page_model)
document_view['brightness_scale'].set_value(page_model.brightness)
document_view['contrast_scale'].set_value(page_model.contrast)
document_view['sharpness_scale'].set_value(page_model.sharpness)
def on_thumbnails_tree_view_button_press_event(self, treeview, event):
"""
Popup the context menu when the user right-clicks.
Method from U{http://faq.pygtk.org/index.py?req=show&file=faq13.017.htp}.
"""
document_view = self.application.get_document_view()
if event.button == 3:
info = treeview.get_path_at_pos(int(event.x), int(event.y))
if info is not None:
document_view['thumbnails_context_menu'].popup(
None, None, None, event.button, event.time)
else:
return True
def on_document_model_row_changed(self, model, path, iter):
"""
Select a new rows when they are added.
Per the following FAQ entry, must use row-changed event,
not row-inserted.
U{http://faq.pygtk.org/index.py?file=faq13.028.htp&req=show}
Regarding the voodoo in this method:
Whenever an adjustment causes page_model.thumbnail_pixbuf
to be updated, the treeview emits a row-changed signal.
If this method is allowed to handle those changes, then the
change will be treated as a new row and it will be
selected. This causes all sorts of unusual problems. To
avoid this, all changes to a page_model that will cause
thumbnail_pixbuf to be updated should be preceeded by
setting the manually_updating_row flag so that this event
can bypass them appropriately.
"""
document_model = self.application.get_document_model()
document_view = self.application.get_document_view()
if document_model.manually_updating_row:
document_model.manually_updating_row = False
else:
document_view['thumbnails_tree_view'].get_selection().select_path(path)
# TICKET #49
# This code causes the application to hang:
# if document_model.adjust_all_pages:
# page_model = document_model.get_value(iter, 0)
# document_model.manually_updating_row = True
# page_model.set_adjustments(
# document_view['brightness_scale'].get_value(),
# document_view['contrast_scale'].get_value(),
# document_view['sharpness_scale'].get_value())
def on_brightness_scale_value_changed(self, widget):
"""
Sets the brightness of the current page or,
if "Apply to all pages?" is checked, all scanned
pages.
See L{on_document_model_row_changed} for an
explanation of the voodoo in this method.
"""
document_model = self.application.get_document_model()
document_view = self.application.get_document_view()
page_model = self.application.get_current_page_model()
status_controller = self.application.get_status_controller()
if document_model.adjust_all_pages:
i = 1
page_iter = document_model.get_iter_first()
while page_iter:
status_controller.push(self.status_context, 'Updating page %i...' % i)
nostaples.utils.gui.flush_pending_events()
page = document_model.get(page_iter, 0)[0]
document_model.manually_updating_row = True
page.brightness = \
document_view['brightness_scale'].get_value()
page_iter = document_model.iter_next(page_iter)
status_controller.pop(self.status_context)
i = i + 1
else:
status_controller.push(self.status_context, 'Updating current page...')
nostaples.utils.gui.flush_pending_events()
page_model.brightness = \
document_view['brightness_scale'].get_value()
status_controller.pop(self.status_context)
def on_contrast_scale_value_changed(self, widget):
"""
Sets the contrast of the current page or,
if "Apply to all pages?" is checked, all scanned
pages.
See L{on_document_model_row_changed} for an
explanation of the voodoo in this method.
"""
document_model = self.application.get_document_model()
document_view = self.application.get_document_view()
page_model = self.application.get_current_page_model()
status_controller = self.application.get_status_controller()
if document_model.adjust_all_pages:
i = 1
page_iter = document_model.get_iter_first()
while page_iter:
status_controller.push(self.status_context, 'Updating page %i...' % i)
nostaples.utils.gui.flush_pending_events()
page = document_model.get(page_iter, 0)[0]
document_model.manually_updating_row = True
page.contrast = \
document_view['contrast_scale'].get_value()
page_iter = document_model.iter_next(page_iter)
status_controller.pop(self.status_context)
i = i + 1
else:
status_controller.push(self.status_context, 'Updating current page...')
nostaples.utils.gui.flush_pending_events()
page_model.contrast = \
document_view['contrast_scale'].get_value()
status_controller.pop(self.status_context)
def on_sharpness_scale_value_changed(self, widget):
"""
Sets the sharpness of the current page or,
if "Apply to all pages?" is checked, all scanned
pages.
See L{on_document_model_row_changed} for an
explanation of the voodoo in this method.
"""
document_model = self.application.get_document_model()
document_view = self.application.get_document_view()
page_model = self.application.get_current_page_model()
status_controller = self.application.get_status_controller()
if document_model.adjust_all_pages:
i = 1
page_iter = document_model.get_iter_first()
while page_iter:
status_controller.push(self.status_context, 'Updating page %i...' % i)
nostaples.utils.gui.flush_pending_events()
page = document_model.get(page_iter, 0)[0]
document_model.manually_updating_row = True
page.sharpness = \
document_view['sharpness_scale'].get_value()
page_iter = document_model.iter_next(page_iter)
status_controller.pop(self.status_context)
i = i + 1
else:
status_controller.push(self.status_context, 'Updating current page...')
nostaples.utils.gui.flush_pending_events()
page_model.sharpness = \
document_view['sharpness_scale'].get_value()
status_controller.pop(self.status_context)
def on_adjust_all_pages_check_toggled(self, checkbox):
"""
When this box is checked, synchronize all page
adjustments.
See L{on_document_model_row_changed} for an
explanation of the voodoo in this method.
# TODO: should set hourglass cursor
"""
document_model = self.application.get_document_model()
document_view = self.application.get_document_view()
status_controller = self.application.get_status_controller()
document_model.adjust_all_pages = checkbox.get_active()
if document_model.adjust_all_pages:
i = 1
page_iter = document_model.get_iter_first()
while page_iter:
status_controller.push(self.status_context, 'Updating page %i...' % i)
nostaples.utils.gui.flush_pending_events()
page = document_model.get(page_iter, 0)[0]
document_model.manually_updating_row = True
page.set_adjustments(
document_view['brightness_scale'].get_value(),
document_view['contrast_scale'].get_value(),
document_view['sharpness_scale'].get_value())
page_iter = document_model.iter_next(page_iter)
status_controller.pop(self.status_context)
i = i + 1
def on_delete_menu_item_activated(self, menu_item):
"""Delete the currently selected page."""
self.delete_selected()
# PROPERTY CALLBACKS
def property_count_value_change(self, model, old_value, new_value):
"""
If all pages have been removed/deleted, switch to the null_page
model for display.
"""
if new_value == 0:
self.application.get_page_controller().set_current_page_model(
self.application.get_null_page_model())
def property_thumbnail_size_value_change(self, model, old_value, new_value):
"""
Update the size of the thumbnail column and redraw all existing
thumbnails.
"""
document_model = self.application.get_document_model()
document_view = self.application.get_document_view()
document_view['thumbnails_column'].set_fixed_width(new_value)
page_iter = document_model.get_iter_first()
while page_iter:
page = document_model.get(page_iter, 0)[0]
page._update_thumbnail_pixbuf()
page_iter = document_model.iter_next(page_iter)
# PUBLIC METHODS
def toggle_thumbnails_visible(self, visible):
"""Toggle the visibility of the thumbnails view."""
document_view = self.application.get_document_view()
if visible:
document_view['thumbnails_scrolled_window'].show()
else:
document_view['thumbnails_scrolled_window'].hide()
def toggle_adjustments_visible(self, visible):
"""Toggles the visibility of the adjustments view."""
document_view = self.application.get_document_view()
if visible:
document_view['adjustments_alignment'].show()
else:
document_view['adjustments_alignment'].hide()
def delete_selected(self):
"""
Move the selection to the next page and delete the
currently selected page.
Selection is done here in place of catching the model's row-deleted
signal, which seems like it I{should} be the proper way to
do it. Unfortunantly, when trying to do it that way it
was impossible to actually select another row as part of
the event. This seems to work much more reliably.
"""
document_model = self.application.get_document_model()
document_view = self.application.get_document_view()
selection_iter = document_view['thumbnails_tree_view'].get_selection().get_selected()[1]
if selection_iter:
# Get the row element of the path
row = document_model.get_path(selection_iter)[0]
# Don't switch selections if this is the only page
if document_model.count == 1:
pass
else:
# Select previous row if deleting the last page
if row == document_model.count - 1:
document_view['thumbnails_tree_view'].get_selection().select_path(row - 1)
# Otherwise select next row
else:
document_view['thumbnails_tree_view'].get_selection().select_path(row + 1)
# Remove the row that was selected when the delete request was made
document_model.remove(selection_iter)
else:
self.log.warn('Method delete_selected was called, but no selection has been made.')
def rotate_counter_clockwise(self, rotate_all):
"""
Rotate the page counter-clockwise.
"""
status_controller = self.application.get_status_controller()
if not rotate_all:
page_model = self.application.get_current_page_model()
status_controller.push(self.status_context, 'Rotating current page...')
nostaples.utils.gui.flush_pending_events()
page_model.rotate_counter_clockwise()
status_controller.pop(self.status_context)
else:
document_model = self.application.get_document_model()
page_iter = document_model.get_iter_first()
i = 1
while page_iter:
page_model = document_model.get_value(page_iter, 0)
status_controller.push(self.status_context, 'Rotating page %i...' % i)
nostaples.utils.gui.flush_pending_events()
page_model.rotate_counter_clockwise()
status_controller.pop(self.status_context)
page_iter = document_model.iter_next(page_iter)
i = i + 1
def rotate_clockwise(self, rotate_all):
"""
Rotate the page clockwise.
"""
status_controller = self.application.get_status_controller()
if not rotate_all:
page_model = self.application.get_current_page_model()
status_controller.push(self.status_context, 'Rotating current page...')
nostaples.utils.gui.flush_pending_events()
page_model.rotate_clockwise()
status_controller.pop(self.status_context)
else:
document_model = self.application.get_document_model()
page_iter = document_model.get_iter_first()
i = 1
while page_iter:
page_model = document_model.get_value(page_iter, 0)
status_controller.push(self.status_context, 'Rotating page %i...' % i)
nostaples.utils.gui.flush_pending_events()
page_model.rotate_clockwise()
status_controller.pop(self.status_context)
page_iter = document_model.iter_next(page_iter)
i = i + 1
def goto_first_page(self):
"""Select the first scanned page."""
document_view = self.application.get_document_view()
document_view['thumbnails_tree_view'].get_selection().select_path(0)
def goto_previous_page(self):
"""Select the previous scanned page."""
document_model = self.application.get_document_model()
document_view = self.application.get_document_view()
iter = document_view['thumbnails_tree_view'].get_selection().get_selected()[1]
row = document_model.get_path(iter)[0]
if row == 0:
return
document_view['thumbnails_tree_view'].get_selection().select_path(row - 1)
def goto_next_page(self):
"""Select the next scanned page."""
document_model = self.application.get_document_model()
document_view = self.application.get_document_view()
iter = document_view['thumbnails_tree_view'].get_selection().get_selected()[1]
row = document_model.get_path(iter)[0]
document_view['thumbnails_tree_view'].get_selection().select_path(row + 1)
def goto_last_page(self):
"""
Select the last scanned page.
Handles invalid paths gracefully without an exception case.
"""
document_model = self.application.get_document_model()
document_view = self.application.get_document_view()
document_view['thumbnails_tree_view'].get_selection().select_path(len(document_model) - 1) | onyxfish/nostaples | controllers/document.py | Python | gpl-3.0 | 19,586 |
from django.conf.urls import url
from corehq.apps.receiverwrapper.views import post, secure_post
urlpatterns = [
url(r'^/?$', post, name='receiver_post'),
url(r'^/secure/(?P<app_id>[\w-]+)/$', secure_post, name='receiver_secure_post_with_app_id'),
url(r'^/secure/$', secure_post, name='receiver_secure_post'),
# odk urls
url(r'^/submission/?$', post, name="receiver_odk_post"),
url(r'^/(?P<app_id>[\w-]+)/$', post, name='receiver_post_with_app_id'),
]
| qedsoftware/commcare-hq | corehq/apps/receiverwrapper/urls.py | Python | bsd-3-clause | 479 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'SubsessiontypeEnum' : _MetaInfoEnum('SubsessiontypeEnum', 'ydk.models.cisco_ios_xe.CISCO_SUBSCRIBER_SESSION_TC_MIB',
{
'all':'all',
'other':'other',
'pppSubscriber':'pppSubscriber',
'pppoeSubscriber':'pppoeSubscriber',
'l2tpSubscriber':'l2tpSubscriber',
'l2fSubscriber':'l2fSubscriber',
'ipInterfaceSubscriber':'ipInterfaceSubscriber',
'ipPktSubscriber':'ipPktSubscriber',
'ipDhcpv4Subscriber':'ipDhcpv4Subscriber',
'ipRadiusSubscriber':'ipRadiusSubscriber',
'l2MacSubscriber':'l2MacSubscriber',
'l2Dhcpv4Subscriber':'l2Dhcpv4Subscriber',
'l2RadiusSubscriber':'l2RadiusSubscriber',
}, 'CISCO-SUBSCRIBER-SESSION-TC-MIB', _yang_ns._namespaces['CISCO-SUBSCRIBER-SESSION-TC-MIB']),
'SubsessionstateEnum' : _MetaInfoEnum('SubsessionstateEnum', 'ydk.models.cisco_ios_xe.CISCO_SUBSCRIBER_SESSION_TC_MIB',
{
'other':'other',
'pending':'pending',
'up':'up',
}, 'CISCO-SUBSCRIBER-SESSION-TC-MIB', _yang_ns._namespaces['CISCO-SUBSCRIBER-SESSION-TC-MIB']),
'SubsessionredundancymodeEnum' : _MetaInfoEnum('SubsessionredundancymodeEnum', 'ydk.models.cisco_ios_xe.CISCO_SUBSCRIBER_SESSION_TC_MIB',
{
'none':'none',
'other':'other',
'active':'active',
'standby':'standby',
}, 'CISCO-SUBSCRIBER-SESSION-TC-MIB', _yang_ns._namespaces['CISCO-SUBSCRIBER-SESSION-TC-MIB']),
}
| 111pontes/ydk-py | cisco-ios-xe/ydk/models/cisco_ios_xe/_meta/_CISCO_SUBSCRIBER_SESSION_TC_MIB.py | Python | apache-2.0 | 2,087 |
from unittest import TestCase
import inspect
from ..sipmessaging import SIPURI
from ..sipmessaging import ContentLengthSIPHeaderField
from ..sipmessaging import ViaSIPHeaderField
from ..sipmessaging import AcceptSIPHeaderField
from ..sipmessaging import AcceptEncodingSIPHeaderField
from ..sipmessaging import AcceptLanguageSIPHeaderField
from ..sipmessaging import AllowSIPHeaderField
from ..sipmessaging import AuthorizationSIPHeaderField
from ..sipmessaging import CSeqSIPHeaderField
from ..sipmessaging import CallIDSIPHeaderField
from ..sipmessaging import CallInfoSIPHeaderField
from ..sipmessaging import ContactSIPHeaderField
from ..sipmessaging import ContentDispositionSIPHeaderField
from ..sipmessaging import ContentTypeSIPHeaderField
from ..sipmessaging import DateSIPHeaderField
from ..sipmessaging import ExpiresSIPHeaderField
from ..sipmessaging import FromSIPHeaderField
from ..sipmessaging import MaxForwardsSIPHeaderField
from ..sipmessaging import RecordRouteSIPHeaderField
from ..sipmessaging import RequireSIPHeaderField
from ..sipmessaging import RetryAfterSIPHeaderField
from ..sipmessaging import RouteSIPHeaderField
from ..sipmessaging import ServerSIPHeaderField
from ..sipmessaging import SessionExpiresSIPHeaderField
from ..sipmessaging import SupportedSIPHeaderField
from ..sipmessaging import TimestampSIPHeaderField
from ..sipmessaging import ToSIPHeaderField
from ..sipmessaging import UserAgentSIPHeaderField
from ..sipmessaging import WWWAuthenticateSIPHeaderField
from ..sipmessaging import WarningSIPHeaderField
from ..sipmessaging import SubjectSIPHeaderField
from ..sipmessaging import ReferredBySIPHeaderField
from ..sipmessaging import ReferToSIPHeaderField
from ..sipmessaging import AllowEventsSIPHeaderField
from ..sipmessaging import EventSIPHeaderField
from ..sipmessaging import ContentEncodingSIPHeaderField
from ..sipmessaging import RAckSIPHeaderField
from ..sipmessaging import PChargeSIPHeaderField
from ..sipmessaging import ReplyToSIPHeaderField
from ..sipmessaging import UnsupportedSIPHeaderField
from ..sipmessaging import PAssertedIdentitySIPHeaderField
from ..sipmessaging import PPreferredIdentitySIPHeaderField
from ..sipmessaging import RemotePartyIDSIPHeaderField
from ..sipmessaging import AlertInfoSIPHeaderField
from ..sipmessaging import HistoryInfoSIPHeaderField
from ..sipmessaging import PCalledPartyIdSIPHeaderField
from ..sipmessaging import PRTPStatSIPHeaderField
from ..sipmessaging import PrivacySIPHeaderField
from ..sipmessaging import ProxyAuthenticateSIPHeaderField
from ..sipmessaging import ProxyAuthorizationSIPHeaderField
from ..sipmessaging import ProxyRequireSIPHeaderField
from ..sipmessaging import ReasonSIPHeaderField
from ..sipmessaging import RecordSessionExpiresSIPHeaderField
from ..sipmessaging import ReplacesSIPHeaderField
from ..sipmessaging import SubscriptionStateSIPHeaderField
from ..sipmessaging import MinExpiresSIPHeaderField
from ..sipmessaging import UnknownSIPHeaderField
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class AbstractSIPMessageTestCase(TestCase):
@property
def canonicalStrings(self):
answer = []
for startLineString in self.canonicalStartLineStrings:
message_string_io = StringIO()
message_string_io.write(startLineString)
message_string_io.write("\r\n")
for header_field_string in self.canonicalHeaderFieldStrings:
message_string_io.write(header_field_string)
message_string_io.write("\r\n")
message_string_io.write("\r\n")
message_string_io.write(self.canonicalContent)
answer.append(message_string_io.getvalue())
return answer
@property
def sipMethodString(self):
raise NotImplementedError('call to abstract method ' + inspect.stack()[0][3])
@property
def sipMessageClassUnderTest(self):
raise NotImplementedError('call to abstract method ' + inspect.stack()[0][3])
@property
def canonicalStartLineStrings(self):
raise NotImplementedError('call to abstract method ' + inspect.stack()[0][3])
@property
def canonicalHeaderFieldStrings(self):
return ['From: <sip:200.25.3.150:5061>;tag=0ee8d3e272e31c9195299efc500',
'To: <sip:example.com:5061>',
'Call-ID: 0ee8d3e272e31c9195299efc500',
'CSeq: 6711 ' + self.sipMethodString,
'Max-Forwards: 70',
'Via: SIP/2.0/TLS 200.25.3.150;branch=z9hG4bK0ee8d3e272e31ca195299efc500',
'Via: SIP/2.0/TLS 200.25.3.250;branch=z9hG4bKfdkajhdiruyalkghjladksjf',
'Via: SIP/2.0/TLS 200.25.3.255;branch=z9hG4bKduyroiuryaludhgviukfhlasf',
'User-Agent: Example User Agent',
'Contact: <sip:invalid@200.25.3.150:5061;transport=tls>',
'Route: <sip:200.25.3.230:5061;transport=tls;lr>',
'Route: <sip:200.25.3.231:5061;transport=tls;lr>',
'Route: <sip:200.25.3.232:5061;transport=tls;lr>',
'Expires: 0',
'Accept: application/sdp,application/isup,application/dtmf,application/dtmf-relay,multipart/mixed',
'Accept-Encoding: x-nortel-short',
'Accept-Language: en-us,fr-fr',
'Allow: ACK,BYE,CANCEL,INFO,INVITE,OPTIONS,REGISTER,SUBSCRIBE,UPDATE',
'Authorization: Digest username="3122221000",realm="SomeRealm",nonce="1111790769596",uri="sip:3122211004@example.com",response="9bf77d8238664fe08dafd4d2abb6f1cb",algorithm=MD5',
'Call-Info: <https://lsc14pa.example.com:443/pa/direct/pictureServlet?user=3126805100@example.com>;Purpose=icon',
'Content-Disposition: session;handling=required',
'Content-Type: application/sdp',
'Date: Sat, 01 Feb 2014 22:07:34 GMT',
'Record-Route: <sip:200.25.3.230:5061;transport=tls;lr>',
'Record-Route: <sip:200.25.3.231:5061;transport=tls;lr>',
'Record-Route: <sip:200.25.3.232:5061;transport=tls;lr>',
'Require: sdp-anat',
'Retry-After: 30',
'Server: Blargomatic 2.0',
# TODO: We will need to deal with the refresher parameter, i.e. we will need to
# be able to specify parameter dictionaries to the new_for_attributes method for
# Integer header fields. Maybe even more generically; for all SIP header fields.
# 'Session-Expires: 1200;refresher=uac',
'Session-Expires: 1200',
'Supported: 100rel,histinfo,join,replaces,sdp-anat,timer',
'Timestamp: 1392061773',
'WWW-Authenticate: Digest algorithm=MD5,nonce="1111790769596",realm="SomeRealm"',
'Warning: 370 200.21.3.10 "Insufficient Bandwidth"',
'X-RTP-Stat: PR=0;ER=0;PL=0;RB=0/0;DE=PCMU;EN=PCMU;JI=0;DL=0,0;IP=10.1.0.33:16384,132.52.127.200:20048',
'x-channel: ds/ds1-3/12;IP=132.52.127.16',
'Referred-By: <sip:6006665100@example.com;user=phone> ; CorrelationID="0508817f84e7ce64745ef9753e2fbff4664321a4@200.23.3.240"',
'Refer-To: <sip:6006665499;rfrid=28661859@example.com;user=phone?x-nt-resource-priority=YNBvf.2j00qao>',
'Subject: Need more boxes',
'Referred-By: <sip:5556785103@example.com;user=phone> ; CorrelationID="348058f0947acec8745efd367e33542c5cb01436@192.168.0.3"',
'Refer-To: <sip:5556645204@example.com:5064;user=phone;transport=udp>',
'Allow-Events: dialog,message-summary',
'Event: refer;id=10498',
'Content-Encoding: gzip',
'RAck: 1 1 INVITE',
'P-Charge: <sip:6425555555@10.10.10.10>;npi=ISDN;noa=2',
'Reply-To: Bob <sip:bob@biloxi.com>',
'Unsupported: foo',
'P-Asserted-Identity: "500 - SIP Test" <sip:500@192.168.0.3>',
'P-Preferred-Identity: "User 5103" <sip:3126705103@192.168.0.3:5060>',
'Remote-Party-ID: "1234567890" <sip:1234567890@192.168.1.195>;party=calling;privacy=off;screen=no',
'Alert-Info: <cid:internal@example.com>;alert-type=internal',
'History-Info: "555122221002" <sip:555122221002@example.com>;index=1.1',
'P-Called-Party-Id: <sip:2135881@example.com;user=phone>',
'P-RTP-Stat: PS=0,OS=0,PR=5429,OR=955504,PL=0,JI=0,LA=0,DU=108',
'Privacy: id',
'Proxy-Authenticate: Digest realm="1.1.1.1", nonce="8dd33eb2-e3c4-11e5-a55b-83b175043a03", algorithm=MD5, qop="auth"',
'Proxy-Authorization: Digest username="100",realm="209.105.255.124",nonce="7bebcf02-e01d-11e5-931d-83b175043a03",uri="sip:90011@209.105.255.124",response="63faaa2604cae36e9b38f2d5cd0abba4",cnonce="4b41f53e6f00c05",nc=00000001,qop="auth",algorithm=MD5',
'Proxy-Require: foo',
'Reason: Q.850; cause=16; reason=Terminated',
'Record-Session-Expires: 1200;refresher=uac',
'Replaces: 19cd9bf094ff5f0c1745ef975c1cf65d34beb908f@192.168.0.3;to-tag=29bd570-f0a1ec8-13c5-50029-aa872-7d78286-aa872;from-tag=7ca31b4791',
'Subscription-State: active;reason=deactivated;expires=50',
'Min-Expires: 1800',
'Content-Length: 11'] # This last one actually instantiates a ContentLengthSIPHeaderField.
# TODO: Do we want to test different contents? If so, our assertions need to be aware of that.
@property
def canonicalContent(self):
return 'Foo Content'
@property
def oneBigHeaderStringForAssertion(self):
answer = ('From: <sip:200.25.3.150:5061>;tag=0ee8d3e272e31c9195299efc500\r\n'
'To: <sip:example.com:5061>\r\n'
'Call-ID: 0ee8d3e272e31c9195299efc500\r\n'
'CSeq: 6711 SIPMETHODTOREPLACE\r\n'
'Max-Forwards: 70\r\n'
'Via: SIP/2.0/TLS 200.25.3.150;branch=z9hG4bK0ee8d3e272e31ca195299efc500\r\n'
'Via: SIP/2.0/TLS 200.25.3.250;branch=z9hG4bKfdkajhdiruyalkghjladksjf\r\n'
'Via: SIP/2.0/TLS 200.25.3.255;branch=z9hG4bKduyroiuryaludhgviukfhlasf\r\n'
'User-Agent: Example User Agent\r\n'
'Contact: <sip:invalid@200.25.3.150:5061;transport=tls>\r\n'
'Route: <sip:200.25.3.230:5061;transport=tls;lr>\r\n'
'Route: <sip:200.25.3.231:5061;transport=tls;lr>\r\n'
'Route: <sip:200.25.3.232:5061;transport=tls;lr>\r\n'
'Expires: 0\r\n'
'Accept: application/sdp,application/isup,application/dtmf,application/dtmf-relay,multipart/mixed\r\n'
'Accept-Encoding: x-nortel-short\r\n'
'Accept-Language: en-us,fr-fr\r\n'
'Allow: ACK,BYE,CANCEL,INFO,INVITE,OPTIONS,REGISTER,SUBSCRIBE,UPDATE\r\n'
'Authorization: Digest username="3122221000",realm="SomeRealm",nonce="1111790769596",uri="sip:3122211004@example.com",response="9bf77d8238664fe08dafd4d2abb6f1cb",algorithm=MD5\r\n'
'Call-Info: <https://lsc14pa.example.com:443/pa/direct/pictureServlet?user=3126805100@example.com>;Purpose=icon\r\n'
'Content-Disposition: session;handling=required\r\n'
'Content-Type: application/sdp\r\n'
'Date: Sat, 01 Feb 2014 22:07:34 GMT\r\n'
'Record-Route: <sip:200.25.3.230:5061;transport=tls;lr>\r\n'
'Record-Route: <sip:200.25.3.231:5061;transport=tls;lr>\r\n'
'Record-Route: <sip:200.25.3.232:5061;transport=tls;lr>\r\n'
'Require: sdp-anat\r\n'
'Retry-After: 30\r\n'
'Server: Blargomatic 2.0\r\n'
# TODO: We will need to deal with the refresher parameter, i.e. we will need to
# be able to specify parameter dictionaries to the new_for_attributes method for
# Integer header fields. Maybe even more generically; for all SIP header fields.
# 'Session-Expires: 1200;refresher=uac\r\n'
'Session-Expires: 1200\r\n'
'Supported: 100rel,histinfo,join,replaces,sdp-anat,timer\r\n'
'Timestamp: 1392061773\r\n'
'WWW-Authenticate: Digest algorithm=MD5,nonce="1111790769596",realm="SomeRealm"\r\n'
'Warning: 370 200.21.3.10 "Insufficient Bandwidth"\r\n'
'X-RTP-Stat: PR=0;ER=0;PL=0;RB=0/0;DE=PCMU;EN=PCMU;JI=0;DL=0,0;IP=10.1.0.33:16384,132.52.127.200:20048\r\n'
'x-channel: ds/ds1-3/12;IP=132.52.127.16\r\n'
'Referred-By: <sip:6006665100@example.com;user=phone> ; CorrelationID="0508817f84e7ce64745ef9753e2fbff4664321a4@200.23.3.240"\r\n'
'Refer-To: <sip:6006665499;rfrid=28661859@example.com;user=phone?x-nt-resource-priority=YNBvf.2j00qao>\r\n'
'Subject: Need more boxes\r\n'
'Referred-By: <sip:5556785103@example.com;user=phone> ; CorrelationID="348058f0947acec8745efd367e33542c5cb01436@192.168.0.3"\r\n'
'Refer-To: <sip:5556645204@example.com:5064;user=phone;transport=udp>\r\n'
'Allow-Events: dialog,message-summary\r\n'
'Event: refer;id=10498\r\n'
'Content-Encoding: gzip\r\n'
'RAck: 1 1 INVITE\r\n'
'P-Charge: <sip:6425555555@10.10.10.10>;npi=ISDN;noa=2\r\n'
'Reply-To: Bob <sip:bob@biloxi.com>\r\n'
'Unsupported: foo\r\n'
'P-Asserted-Identity: "500 - SIP Test" <sip:500@192.168.0.3>\r\n'
'P-Preferred-Identity: "User 5103" <sip:3126705103@192.168.0.3:5060>\r\n'
'Remote-Party-ID: "1234567890" <sip:1234567890@192.168.1.195>;party=calling;privacy=off;screen=no\r\n'
'Alert-Info: <cid:internal@example.com>;alert-type=internal\r\n'
'History-Info: "555122221002" <sip:555122221002@example.com>;index=1.1\r\n'
'P-Called-Party-Id: <sip:2135881@example.com;user=phone>\r\n'
'P-RTP-Stat: PS=0,OS=0,PR=5429,OR=955504,PL=0,JI=0,LA=0,DU=108\r\n'
'Privacy: id\r\n'
'Proxy-Authenticate: Digest realm="1.1.1.1", nonce="8dd33eb2-e3c4-11e5-a55b-83b175043a03", algorithm=MD5, qop="auth"\r\n'
'Proxy-Authorization: Digest username="100",realm="209.105.255.124",nonce="7bebcf02-e01d-11e5-931d-83b175043a03",uri="sip:90011@209.105.255.124",response="63faaa2604cae36e9b38f2d5cd0abba4",cnonce="4b41f53e6f00c05",nc=00000001,qop="auth",algorithm=MD5\r\n'
'Proxy-Require: foo\r\n'
'Reason: Q.850; cause=16; reason=Terminated\r\n'
'Record-Session-Expires: 1200;refresher=uac\r\n'
'Replaces: 19cd9bf094ff5f0c1745ef975c1cf65d34beb908f@192.168.0.3;to-tag=29bd570-f0a1ec8-13c5-50029-aa872-7d78286-aa872;from-tag=7ca31b4791\r\n'
'Subscription-State: active;reason=deactivated;expires=50\r\n'
'Min-Expires: 1800\r\n'
'Content-Length: 11') # This last one actually instantiates a ContentLengthSIPHeaderField.
return answer.replace("SIPMETHODTOREPLACE", self.sipMethodString)
# TODO: need to do this folding test. Wrote the folded message, need to write the test.
@property
def oneBigHeaderStringWithFoldingForAssertion(self):
answer = ('From: <sip:200.25.3.150:5061>;tag=0ee8d3e272e31c9195299efc500\r\n'
'To: <sip:example.com:5061>\r\n'
'Call-ID: 0ee8d3e272e31c9195299efc500\r\n'
'CSeq: 6711\r\n'
' SIPMETHODTOREPLACE\r\n'
'Max-Forwards: 70\r\n'
'Via: SIP/2.0/TLS 200.25.3.150;branch=z9hG4bK0ee8d3e272e31ca195299efc500\r\n'
'Via:\r\n'
' SIP/2.0/TLS 200.25.3.250;branch=z9hG4bKfdkajhdiruyalkghjladksjf\r\n'
'Via: SIP/2.0/TLS 200.25.3.255;branch=z9hG4bKduyroiuryaludhgviukfhlasf\r\n'
'User-Agent:\r\n'
' Example User Agent\r\n'
'Contact: <sip:invalid@200.25.3.150:5061;transport=tls>\r\n'
'Route: <sip:200.25.3.230:5061;transport=tls;lr>\r\n'
'Route: <sip:200.25.3.231:5061;transport=tls;lr>\r\n'
'Route: <sip:200.25.3.232:5061;transport=tls;lr>\r\n'
'Expires: 0\r\n'
'Accept: application/sdp,application/isup,application/dtmf,application/dtmf-relay,multipart/mixed\r\n'
'Accept-Encoding: x-nortel-short\r\n'
'Accept-Language: en-us,fr-fr\r\n'
'Allow: ACK,BYE,CANCEL,INFO,INVITE,OPTIONS,REGISTER,SUBSCRIBE,UPDATE\r\n'
'Authorization: Digest\r\n'
' username="3122221000",realm="SomeRealm",nonce="1111790769596",uri="sip:3122211004@example.com",response="9bf77d8238664fe08dafd4d2abb6f1cb",algorithm=MD5\r\n'
'Call-Info: <https://lsc14pa.example.com:443/pa/direct/pictureServlet?user=3126805100@example.com>;Purpose=icon\r\n'
'Content-Disposition: session;handling=required\r\n'
'Content-Type: application/sdp\r\n'
'Date: Sat, 01 Feb 2014 22:07:34 GMT\r\n'
'Record-Route: <sip:200.25.3.230:5061;transport=tls;lr>\r\n'
'Record-Route: <sip:200.25.3.231:5061;transport=tls;lr>\r\n'
'Record-Route: <sip:200.25.3.232:5061;transport=tls;lr>\r\n'
'Require: sdp-anat\r\n'
'Retry-After: 30\r\n'
'Server: Blargomatic 2.0\r\n'
# TODO: We will need to deal with the refresher parameter, i.e. we will need to
# be able to specify parameter dictionaries to the new_for_attributes method for
# Integer header fields. Maybe even more generically; for all SIP header fields.
# 'Session-Expires: 1200;refresher=uac\r\n'
'Session-Expires: 1200\r\n'
'Supported: 100rel,histinfo,join,replaces,sdp-anat,timer\r\n'
'Timestamp: 1392061773\r\n'
'WWW-Authenticate:\r\n'
' Digest\r\n'
' algorithm=MD5,nonce="1111790769596",realm="SomeRealm"\r\n'
'Warning:\r\n'
' 370\r\n'
' 200.21.3.10\r\n'
' "Insufficient Bandwidth"\r\n'
'X-RTP-Stat: PR=0;ER=0;PL=0;RB=0/0;DE=PCMU;EN=PCMU;JI=0;DL=0,0;IP=10.1.0.33:16384,132.52.127.200:20048\r\n'
'x-channel: ds/ds1-3/12;IP=132.52.127.16\r\n'
'Referred-By: <sip:6006665100@example.com;user=phone> ; CorrelationID="0508817f84e7ce64745ef9753e2fbff4664321a4@200.23.3.240"\r\n'
'Refer-To: <sip:6006665499;rfrid=28661859@example.com;user=phone?x-nt-resource-priority=YNBvf.2j00qao>\r\n'
'Subject: Need more\r\n'
' boxes\r\n'
'Referred-By: <sip:5556785103@example.com;user=phone> ;\r\n'
' CorrelationID="348058f0947acec8745efd367e33542c5cb01436@192.168.0.3"\r\n'
'Refer-To: <sip:5556645204@example.com:5064;user=phone;transport=udp>\r\n'
'Allow-Events: dialog,message-summary\r\n'
'Event: refer;id=10498\r\n'
'Content-Encoding: gzip\r\n'
'RAck: 1 1\r\n'
' INVITE\r\n'
'P-Charge: <sip:6425555555@10.10.10.10>;npi=ISDN;noa=2\r\n'
'Reply-To: Bob <sip:bob@biloxi.com>\r\n'
'Unsupported: foo\r\n'
'P-Asserted-Identity: "500 - SIP\r\n'
' Test" <sip:500@192.168.0.3>\r\n'
'P-Preferred-Identity: "User 5103" <sip:3126705103@192.168.0.3:5060>\r\n'
'Remote-Party-ID: "1234567890" <sip:1234567890@192.168.1.195>;party=calling;privacy=off;screen=no\r\n'
'Alert-Info: <cid:internal@example.com>;alert-type=internal\r\n'
'History-Info: "555122221002" <sip:555122221002@example.com>;index=1.1\r\n'
'P-Called-Party-Id: <sip:2135881@example.com;user=phone>\r\n'
'P-RTP-Stat: PS=0,OS=0,PR=5429,OR=955504,PL=0,JI=0,LA=0,DU=108\r\n'
'Privacy: id\r\n'
'Proxy-Authenticate: Digest realm="1.1.1.1",\r\n'
' nonce="8dd33eb2-e3c4-11e5-a55b-83b175043a03", algorithm=MD5, qop="auth"\r\n'
'Proxy-Authorization: Digest username="100",realm="209.105.255.124",nonce="7bebcf02-e01d-11e5-931d-83b175043a03",uri="sip:90011@209.105.255.124",response="63faaa2604cae36e9b38f2d5cd0abba4",cnonce="4b41f53e6f00c05",nc=00000001,qop="auth",algorithm=MD5\r\n'
'Proxy-Require:\r\n'
' foo\r\n'
'Reason: Q.850; cause=16; reason=Terminated\r\n'
'Record-Session-Expires:\r\n'
' 1200;refresher=uac\r\n'
'Replaces: 19cd9bf094ff5f0c1745ef975c1cf65d34beb908f@192.168.0.3;to-tag=29bd570-f0a1ec8-13c5-50029-aa872-7d78286-aa872;from-tag=7ca31b4791\r\n'
'Subscription-State:\r\n'
' active;reason=deactivated;expires=50\r\n'
'Min-Expires: 1800\r\n'
'Content-Length: 11') # This last one actually instantiates a ContentLengthSIPHeaderField.
return answer.replace("SIPMETHODTOREPLACE", self.sipMethodString)
@property
def listOfHeaderFieldsForAssertion(self):
return [
# FromSIPHeaderField.new_for_attributes(field_value_string='<sip:200.25.3.150:5061>;tag=0ee8d3e272e31c9195299efc500'),
FromSIPHeaderField.new_for_attributes(tag='0ee8d3e272e31c9195299efc500', display_name=None, sip_uri=SIPURI.new_parsed_from('sip:200.25.3.150:5061')),
# ToSIPHeaderField.new_for_attributes(field_value_string='<sip:example.com:5061>'),
ToSIPHeaderField.new_for_attributes(tag=None, display_name=None, sip_uri=SIPURI.new_parsed_from('sip:example.com:5061')),
CallIDSIPHeaderField.new_for_attributes(field_value_string='0ee8d3e272e31c9195299efc500'),
CSeqSIPHeaderField.new_for_attributes(field_value_string='6711 ' + self.sipMethodString),
MaxForwardsSIPHeaderField.new_for_attributes(value=70),
# ViaSIPHeaderField.new_for_attributes(field_value_string='SIP/2.0/TLS 200.25.3.150;branch=z9hG4bK0ee8d3e272e31ca195299efc500'),
ViaSIPHeaderField.new_for_attributes(transport='TLS', host='200.25.3.150', branch='z9hG4bK0ee8d3e272e31ca195299efc500'),
# ViaSIPHeaderField.new_for_attributes(field_value_string='SIP/2.0/TLS 200.25.3.250;branch=z9hG4bKfdkajhdiruyalkghjladksjf'),
ViaSIPHeaderField.new_for_attributes(transport='TLS', host='200.25.3.250', branch='z9hG4bKfdkajhdiruyalkghjladksjf'),
# ViaSIPHeaderField.new_for_attributes(field_value_string='SIP/2.0/TLS 200.25.3.255;branch=z9hG4bKduyroiuryaludhgviukfhlasf'),
ViaSIPHeaderField.new_for_attributes(transport='TLS', host='200.25.3.255', branch='z9hG4bKduyroiuryaludhgviukfhlasf'),
UserAgentSIPHeaderField.new_for_attributes(field_value_string='Example User Agent'),
# ContactSIPHeaderField.new_for_attributes(field_value_string='<sip:invalid@200.25.3.150:5061;transport=tls>'),
ContactSIPHeaderField.new_for_attributes(display_name=None, sip_uri=SIPURI.new_parsed_from('sip:invalid@200.25.3.150:5061;transport=tls')),
RouteSIPHeaderField.new_for_attributes(sip_uri=SIPURI.new_parsed_from('sip:200.25.3.230:5061;transport=tls;lr')),
RouteSIPHeaderField.new_for_attributes(sip_uri=SIPURI.new_parsed_from('sip:200.25.3.231:5061;transport=tls;lr')),
RouteSIPHeaderField.new_for_attributes(sip_uri=SIPURI.new_parsed_from('sip:200.25.3.232:5061;transport=tls;lr')),
ExpiresSIPHeaderField.new_for_attributes(value=0),
AcceptSIPHeaderField.new_for_attributes(field_value_string='application/sdp,application/isup,application/dtmf,application/dtmf-relay,multipart/mixed'),
AcceptEncodingSIPHeaderField.new_for_attributes(field_value_string='x-nortel-short'),
AcceptLanguageSIPHeaderField.new_for_attributes(field_value_string='en-us,fr-fr'),
AllowSIPHeaderField.new_for_attributes(field_value_string=' ACK,BYE,CANCEL,INFO,INVITE,OPTIONS,REGISTER,SUBSCRIBE,UPDATE'),
AuthorizationSIPHeaderField.new_for_attributes(field_value_string='Digest username="3122221000",realm="SomeRealm",nonce="1111790769596",uri="sip:3122211004@example.com",response="9bf77d8238664fe08dafd4d2abb6f1cb",algorithm=MD5'),
CallInfoSIPHeaderField.new_for_attributes(field_value_string='<https://lsc14pa.example.com:443/pa/direct/pictureServlet?user=3126805100@example.com>;Purpose=icon'),
ContentDispositionSIPHeaderField.new_for_attributes(field_value_string='session;handling=required'),
ContentTypeSIPHeaderField.new_for_attributes(field_value_string='application/sdp'),
DateSIPHeaderField.new_for_attributes(field_value_string='Sat, 01 Feb 2014 22:07:34 GMT'),
RecordRouteSIPHeaderField.new_for_attributes(sip_uri=SIPURI.new_parsed_from('sip:200.25.3.230:5061;transport=tls;lr')),
RecordRouteSIPHeaderField.new_for_attributes(sip_uri=SIPURI.new_parsed_from('sip:200.25.3.231:5061;transport=tls;lr')),
RecordRouteSIPHeaderField.new_for_attributes(sip_uri=SIPURI.new_parsed_from('sip:200.25.3.232:5061;transport=tls;lr')),
RequireSIPHeaderField.new_for_attributes(field_value_string='sdp-anat'),
RetryAfterSIPHeaderField.new_for_attributes(value=30),
ServerSIPHeaderField.new_for_attributes(field_value_string='Blargomatic 2.0'),
# TODO: We will need to deal with the refresher parameter, i.e. we will need to
# be able to specify parameter dictionaries to the new_for_attributes method for
# Integer header fields. Maybe even more generically; for all SIP header fields.
# SessionExpiresSIPHeaderField.new_for_attributes(field_value_string='1200;refresher=uac'),
SessionExpiresSIPHeaderField.new_for_attributes(value=1200),
SupportedSIPHeaderField.new_for_attributes(field_value_string='100rel,histinfo,join,replaces,sdp-anat,timer'),
TimestampSIPHeaderField.new_for_attributes(value=1392061773),
WWWAuthenticateSIPHeaderField.new_for_attributes(field_value_string='Digest algorithm=MD5,nonce="1111790769596",realm="SomeRealm"'),
WarningSIPHeaderField.new_for_attributes(field_value_string='370 200.21.3.10 "Insufficient Bandwidth"'),
UnknownSIPHeaderField.new_for_field_name_and_value_string(field_name='X-RTP-Stat', field_value_string=' PR=0;ER=0;PL=0;RB=0/0;DE=PCMU;EN=PCMU;JI=0;DL=0,0;IP=10.1.0.33:16384,132.52.127.200:20048'),
UnknownSIPHeaderField.new_for_field_name_and_value_string(field_name='x-channel', field_value_string=' ds/ds1-3/12;IP=132.52.127.16'),
ReferredBySIPHeaderField.new_for_attributes(field_value_string='<sip:6006665100@example.com;user=phone> ; CorrelationID="0508817f84e7ce64745ef9753e2fbff4664321a4@200.23.3.240"'),
ReferToSIPHeaderField.new_for_attributes(field_value_string='<sip:6006665499;rfrid=28661859@example.com;user=phone?x-nt-resource-priority=YNBvf.2j00qao>'),
SubjectSIPHeaderField.new_for_attributes(field_value_string='Need more boxes'),
ReferredBySIPHeaderField.new_for_attributes(field_value_string='<sip:5556785103@example.com;user=phone> ; CorrelationID="348058f0947acec8745efd367e33542c5cb01436@192.168.0.3"'),
ReferToSIPHeaderField.new_for_attributes(field_value_string='<sip:5556645204@example.com:5064;user=phone;transport=udp>'),
AllowEventsSIPHeaderField.new_for_attributes(field_value_string='dialog,message-summary'),
EventSIPHeaderField.new_for_attributes(field_value_string='refer;id=10498'),
ContentEncodingSIPHeaderField.new_for_attributes(field_value_string='gzip'),
RAckSIPHeaderField.new_for_attributes(field_value_string='1 1 INVITE'),
PChargeSIPHeaderField.new_for_attributes(field_value_string='<sip:6425555555@10.10.10.10>;npi=ISDN;noa=2'),
ReplyToSIPHeaderField.new_for_attributes(field_value_string='Bob <sip:bob@biloxi.com>'),
UnsupportedSIPHeaderField.new_for_attributes(field_value_string='foo'),
PAssertedIdentitySIPHeaderField.new_for_attributes(field_value_string='"500 - SIP Test" <sip:500@192.168.0.3>'),
PPreferredIdentitySIPHeaderField.new_for_attributes(field_value_string='"User 5103" <sip:3126705103@192.168.0.3:5060>'),
RemotePartyIDSIPHeaderField.new_for_attributes(field_value_string='"1234567890" <sip:1234567890@192.168.1.195>;party=calling;privacy=off;screen=no'),
AlertInfoSIPHeaderField.new_for_attributes(field_value_string='<cid:internal@example.com>;alert-type=internal'),
HistoryInfoSIPHeaderField.new_for_attributes(field_value_string='"555122221002" <sip:555122221002@example.com>;index=1.1'),
PCalledPartyIdSIPHeaderField.new_for_attributes(field_value_string='<sip:2135881@example.com;user=phone>'),
PRTPStatSIPHeaderField.new_for_attributes(field_value_string='PS=0,OS=0,PR=5429,OR=955504,PL=0,JI=0,LA=0,DU=108'),
PrivacySIPHeaderField.new_for_attributes(field_value_string='id'),
ProxyAuthenticateSIPHeaderField.new_for_attributes(field_value_string='Digest realm="1.1.1.1", nonce="8dd33eb2-e3c4-11e5-a55b-83b175043a03", algorithm=MD5, qop="auth"'),
ProxyAuthorizationSIPHeaderField.new_for_attributes(field_value_string='Digest username="100",realm="209.105.255.124",nonce="7bebcf02-e01d-11e5-931d-83b175043a03",uri="sip:90011@209.105.255.124",response="63faaa2604cae36e9b38f2d5cd0abba4",cnonce="4b41f53e6f00c05",nc=00000001,qop="auth",algorithm=MD5'),
ProxyRequireSIPHeaderField.new_for_attributes(field_value_string='foo'),
ReasonSIPHeaderField.new_for_attributes(field_value_string='Q.850; cause=16; reason=Terminated'),
RecordSessionExpiresSIPHeaderField.new_for_value_string(field_value_string='1200;refresher=uac'),
ReplacesSIPHeaderField.new_for_attributes(field_value_string='19cd9bf094ff5f0c1745ef975c1cf65d34beb908f@192.168.0.3;to-tag=29bd570-f0a1ec8-13c5-50029-aa872-7d78286-aa872;from-tag=7ca31b4791'),
SubscriptionStateSIPHeaderField.new_for_attributes(field_value_string='active;reason=deactivated;expires=50'),
MinExpiresSIPHeaderField.new_for_attributes(value=1800),
ContentLengthSIPHeaderField.new_for_attributes(value=11)]
@property
def listOfHeaderFieldStringsForAssertion(self):
return ['From: <sip:200.25.3.150:5061>;tag=0ee8d3e272e31c9195299efc500',
'To: <sip:example.com:5061>',
'Call-ID: 0ee8d3e272e31c9195299efc500',
'CSeq: 6711 ' + self.sipMethodString,
'Max-Forwards: 70',
'Via: SIP/2.0/TLS 200.25.3.150;branch=z9hG4bK0ee8d3e272e31ca195299efc500',
'Via: SIP/2.0/TLS 200.25.3.250;branch=z9hG4bKfdkajhdiruyalkghjladksjf',
'Via: SIP/2.0/TLS 200.25.3.255;branch=z9hG4bKduyroiuryaludhgviukfhlasf',
'User-Agent: Example User Agent',
'Contact: <sip:invalid@200.25.3.150:5061;transport=tls>',
'Route: <sip:200.25.3.230:5061;transport=tls;lr>',
'Route: <sip:200.25.3.231:5061;transport=tls;lr>',
'Route: <sip:200.25.3.232:5061;transport=tls;lr>',
'Expires: 0',
'Accept: application/sdp,application/isup,application/dtmf,application/dtmf-relay,multipart/mixed',
'Accept-Encoding: x-nortel-short',
'Accept-Language: en-us,fr-fr',
'Allow: ACK,BYE,CANCEL,INFO,INVITE,OPTIONS,REGISTER,SUBSCRIBE,UPDATE',
'Authorization: Digest username="3122221000",realm="SomeRealm",nonce="1111790769596",uri="sip:3122211004@example.com",response="9bf77d8238664fe08dafd4d2abb6f1cb",algorithm=MD5',
'Call-Info: <https://lsc14pa.example.com:443/pa/direct/pictureServlet?user=3126805100@example.com>;Purpose=icon',
'Content-Disposition: session;handling=required',
'Content-Type: application/sdp',
'Date: Sat, 01 Feb 2014 22:07:34 GMT',
'Record-Route: <sip:200.25.3.230:5061;transport=tls;lr>',
'Record-Route: <sip:200.25.3.231:5061;transport=tls;lr>',
'Record-Route: <sip:200.25.3.232:5061;transport=tls;lr>',
'Require: sdp-anat',
'Retry-After: 30',
'Server: Blargomatic 2.0',
# TODO: We will need to deal with the refresher parameter, i.e. we will need to
# be able to specify parameter dictionaries to the new_for_attributes method for
# Integer header fields. Maybe even more generically; for all SIP header fields.
# 'Session-Expires: 1200;refresher=uac',
'Session-Expires: 1200',
'Supported: 100rel,histinfo,join,replaces,sdp-anat,timer',
'Timestamp: 1392061773',
'WWW-Authenticate: Digest algorithm=MD5,nonce="1111790769596",realm="SomeRealm"',
'Warning: 370 200.21.3.10 "Insufficient Bandwidth"',
'X-RTP-Stat: PR=0;ER=0;PL=0;RB=0/0;DE=PCMU;EN=PCMU;JI=0;DL=0,0;IP=10.1.0.33:16384,132.52.127.200:20048',
'x-channel: ds/ds1-3/12;IP=132.52.127.16',
'Referred-By: <sip:6006665100@example.com;user=phone> ; CorrelationID="0508817f84e7ce64745ef9753e2fbff4664321a4@200.23.3.240"',
'Refer-To: <sip:6006665499;rfrid=28661859@example.com;user=phone?x-nt-resource-priority=YNBvf.2j00qao>',
'Subject: Need more boxes',
'Referred-By: <sip:5556785103@example.com;user=phone> ; CorrelationID="348058f0947acec8745efd367e33542c5cb01436@192.168.0.3"',
'Refer-To: <sip:5556645204@example.com:5064;user=phone;transport=udp>',
'Allow-Events: dialog,message-summary',
'Event: refer;id=10498',
'Content-Encoding: gzip',
'RAck: 1 1 INVITE',
'P-Charge: <sip:6425555555@10.10.10.10>;npi=ISDN;noa=2',
'Reply-To: Bob <sip:bob@biloxi.com>',
'Unsupported: foo',
'P-Asserted-Identity: "500 - SIP Test" <sip:500@192.168.0.3>',
'P-Preferred-Identity: "User 5103" <sip:3126705103@192.168.0.3:5060>',
'Remote-Party-ID: "1234567890" <sip:1234567890@192.168.1.195>;party=calling;privacy=off;screen=no',
'Alert-Info: <cid:internal@example.com>;alert-type=internal',
'History-Info: "555122221002" <sip:555122221002@example.com>;index=1.1',
'P-Called-Party-Id: <sip:2135881@example.com;user=phone>',
'P-RTP-Stat: PS=0,OS=0,PR=5429,OR=955504,PL=0,JI=0,LA=0,DU=108',
'Privacy: id',
'Proxy-Authenticate: Digest realm="1.1.1.1", nonce="8dd33eb2-e3c4-11e5-a55b-83b175043a03", algorithm=MD5, qop="auth"',
'Proxy-Authorization: Digest username="100",realm="209.105.255.124",nonce="7bebcf02-e01d-11e5-931d-83b175043a03",uri="sip:90011@209.105.255.124",response="63faaa2604cae36e9b38f2d5cd0abba4",cnonce="4b41f53e6f00c05",nc=00000001,qop="auth",algorithm=MD5',
'Proxy-Require: foo',
'Reason: Q.850; cause=16; reason=Terminated',
'Record-Session-Expires: 1200;refresher=uac',
'Replaces: 19cd9bf094ff5f0c1745ef975c1cf65d34beb908f@192.168.0.3;to-tag=29bd570-f0a1ec8-13c5-50029-aa872-7d78286-aa872;from-tag=7ca31b4791',
'Subscription-State: active;reason=deactivated;expires=50',
'Min-Expires: 1800',
'Content-Length: 11'] # This last one actually instantiates a ContentLengthSIPHeaderField.
@property
def listOfHeaderFieldNamesAndValuesForAssertion(self):
return [('From', '<sip:200.25.3.150:5061>;tag=0ee8d3e272e31c9195299efc500'),
('To', '<sip:example.com:5061>'),
('Call-ID', '0ee8d3e272e31c9195299efc500'),
('CSeq', '6711 ' + self.sipMethodString),
('Max-Forwards', 70), # note the integer value.
('Via', 'SIP/2.0/TLS 200.25.3.150;branch=z9hG4bK0ee8d3e272e31ca195299efc500'),
('Via', 'SIP/2.0/TLS 200.25.3.250;branch=z9hG4bKfdkajhdiruyalkghjladksjf'),
('Via', 'SIP/2.0/TLS 200.25.3.255;branch=z9hG4bKduyroiuryaludhgviukfhlasf'),
('User-Agent', 'Example User Agent'),
('Contact', '<sip:invalid@200.25.3.150:5061;transport=tls>'),
('Route', '<sip:200.25.3.230:5061;transport=tls;lr>'),
('Route', '<sip:200.25.3.231:5061;transport=tls;lr>'),
('Route', '<sip:200.25.3.232:5061;transport=tls;lr>'),
('Expires', 0),
('Accept', 'application/sdp,application/isup,application/dtmf,application/dtmf-relay,multipart/mixed'),
('Accept-Encoding', 'x-nortel-short'),
('Accept-Language', 'en-us,fr-fr'),
('Allow', ' ACK,BYE,CANCEL,INFO,INVITE,OPTIONS,REGISTER,SUBSCRIBE,UPDATE'),
('Authorization', 'Digest username="3122221000",realm="SomeRealm",nonce="1111790769596",uri="sip:3122211004@example.com",response="9bf77d8238664fe08dafd4d2abb6f1cb",algorithm=MD5'),
('Call-Info', '<https://lsc14pa.example.com:443/pa/direct/pictureServlet?user=3126805100@example.com>;Purpose=icon'),
('Content-Disposition', 'session;handling=required'),
('Content-Type', 'application/sdp'),
('Date', 'Sat, 01 Feb 2014 22:07:34 GMT'),
('Record-Route', '<sip:200.25.3.230:5061;transport=tls;lr>'),
('Record-Route', '<sip:200.25.3.231:5061;transport=tls;lr>'),
('Record-Route', '<sip:200.25.3.232:5061;transport=tls;lr>'),
('Require', 'sdp-anat'),
('Retry-After', '30'),
('Server', 'Blargomatic 2.0'),
# TODO: We will need to deal with the refresher parameter, i.e. we will need to
# be able to specify parameter dictionaries to the new_for_attributes method for
# Integer header fields. Maybe even more generically; for all SIP header fields.
# ('Session-Expires', '1200;refresher=uac'),
('Session-Expires', 1200),
('Supported', '100rel,histinfo,join,replaces,sdp-anat,timer'),
('Timestamp', '1392061773'),
('WWW-Authenticate', 'Digest algorithm=MD5,nonce="1111790769596",realm="SomeRealm"'),
('Warning', '370 200.21.3.10 "Insufficient Bandwidth"'),
('X-RTP-Stat', ' PR=0;ER=0;PL=0;RB=0/0;DE=PCMU;EN=PCMU;JI=0;DL=0,0;IP=10.1.0.33:16384,132.52.127.200:20048'),
('x-channel', ' ds/ds1-3/12;IP=132.52.127.16'),
('Referred-By', '<sip:6006665100@example.com;user=phone> ; CorrelationID="0508817f84e7ce64745ef9753e2fbff4664321a4@200.23.3.240"'),
('Refer-To', '<sip:6006665499;rfrid=28661859@example.com;user=phone?x-nt-resource-priority=YNBvf.2j00qao>'),
('Subject', 'Need more boxes'),
('Referred-By', '<sip:5556785103@example.com;user=phone> ; CorrelationID="348058f0947acec8745efd367e33542c5cb01436@192.168.0.3"'),
('Refer-To', '<sip:5556645204@example.com:5064;user=phone;transport=udp>'),
('Allow-Events', 'dialog,message-summary'),
('Event', 'refer;id=10498'),
('Content-Encoding', 'gzip'),
('RAck', '1 1 INVITE'),
('P-Charge', '<sip:6425555555@10.10.10.10>;npi=ISDN;noa=2'),
('Reply-To', 'Bob <sip:bob@biloxi.com>'),
('Unsupported', 'foo'),
('P-Asserted-Identity', '"500 - SIP Test" <sip:500@192.168.0.3>'),
('P-Preferred-Identity', '"User 5103" <sip:3126705103@192.168.0.3:5060>'),
('Remote-Party-ID', '"1234567890" <sip:1234567890@192.168.1.195>;party=calling;privacy=off;screen=no'),
('Alert-Info', '<cid:internal@example.com>;alert-type=internal'),
('History-Info', '"555122221002" <sip:555122221002@example.com>;index=1.1'),
('P-Called-Party-Id', '<sip:2135881@example.com;user=phone>'),
('P-RTP-Stat', 'PS=0,OS=0,PR=5429,OR=955504,PL=0,JI=0,LA=0,DU=108'),
('Privacy', 'id'),
('Proxy-Authenticate', 'Digest realm="1.1.1.1", nonce="8dd33eb2-e3c4-11e5-a55b-83b175043a03", algorithm=MD5, qop="auth"'),
('Proxy-Authorization', 'Digest username="100",realm="209.105.255.124",nonce="7bebcf02-e01d-11e5-931d-83b175043a03",uri="sip:90011@209.105.255.124",response="63faaa2604cae36e9b38f2d5cd0abba4",cnonce="4b41f53e6f00c05",nc=00000001,qop="auth",algorithm=MD5'),
('Proxy-Require', 'foo'),
('Reason', 'Q.850; cause=16; reason=Terminated'),
('Record-Session-Expires', '1200;refresher=uac'),
('Replaces', '19cd9bf094ff5f0c1745ef975c1cf65d34beb908f@192.168.0.3;to-tag=29bd570-f0a1ec8-13c5-50029-aa872-7d78286-aa872;from-tag=7ca31b4791'),
('Subscription-State', 'active;reason=deactivated;expires=50'),
('Min-Expires', '1800'),
('Content-Length', 11)] # This last one actually instantiates a ContentLengthSIPHeaderField.
@property
def listOfHeaderFieldNamesAndValuesUsingPropertyDictForAssertion(self):
return [('From', '<sip:200.25.3.150:5061>;tag=0ee8d3e272e31c9195299efc500'),
('To', '<sip:example.com:5061>'),
('Call-ID', '0ee8d3e272e31c9195299efc500'),
('CSeq', '6711 ' + self.sipMethodString),
('Max-Forwards', 70), # note the integer value.
('Via', 'SIP/2.0/TLS 200.25.3.150;branch=z9hG4bK0ee8d3e272e31ca195299efc500'),
('Via', 'SIP/2.0/TLS 200.25.3.250;branch=z9hG4bKfdkajhdiruyalkghjladksjf'),
('Via', 'SIP/2.0/TLS 200.25.3.255;branch=z9hG4bKduyroiuryaludhgviukfhlasf'),
('User-Agent', 'Example User Agent'),
('Contact', '<sip:invalid@200.25.3.150:5061;transport=tls>'),
('Route', '<sip:200.25.3.230:5061;transport=tls;lr>'),
('Route', '<sip:200.25.3.231:5061;transport=tls;lr>'),
('Route', '<sip:200.25.3.232:5061;transport=tls;lr>'),
('Expires', 0),
('Accept', 'application/sdp,application/isup,application/dtmf,application/dtmf-relay,multipart/mixed'),
('Accept-Encoding', 'x-nortel-short'),
('Accept-Language', 'en-us,fr-fr'),
('Allow', ' ACK,BYE,CANCEL,INFO,INVITE,OPTIONS,REGISTER,SUBSCRIBE,UPDATE'),
('Authorization', 'Digest username="3122221000",realm="SomeRealm",nonce="1111790769596",uri="sip:3122211004@example.com",response="9bf77d8238664fe08dafd4d2abb6f1cb",algorithm=MD5'),
('Call-Info', '<https://lsc14pa.example.com:443/pa/direct/pictureServlet?user=3126805100@example.com>;Purpose=icon'),
('Content-Disposition', 'session;handling=required'),
('Content-Type', 'application/sdp'),
('Date', 'Sat, 01 Feb 2014 22:07:34 GMT'),
('Record-Route', '<sip:200.25.3.230:5061;transport=tls;lr>'),
('Record-Route', '<sip:200.25.3.231:5061;transport=tls;lr>'),
('Record-Route', '<sip:200.25.3.232:5061;transport=tls;lr>'),
('Require', 'sdp-anat'),
('Retry-After', '30'),
('Server', 'Blargomatic 2.0'),
# TODO: We will need to deal with the refresher parameter, i.e. we will need to
# be able to specify parameter dictionaries to the new_for_attributes method for
# Integer header fields. Maybe even more generically; for all SIP header fields.
# ('Session-Expires', '1200;refresher=uac'),
('Session-Expires', {"value": 1200}),
('Supported', '100rel,histinfo,join,replaces,sdp-anat,timer'),
('Timestamp', '1392061773'),
('WWW-Authenticate', 'Digest algorithm=MD5,nonce="1111790769596",realm="SomeRealm"'),
('Warning', '370 200.21.3.10 "Insufficient Bandwidth"'),
('X-RTP-Stat', ' PR=0;ER=0;PL=0;RB=0/0;DE=PCMU;EN=PCMU;JI=0;DL=0,0;IP=10.1.0.33:16384,132.52.127.200:20048'),
('x-channel', ' ds/ds1-3/12;IP=132.52.127.16'),
('Referred-By', '<sip:6006665100@example.com;user=phone> ; CorrelationID="0508817f84e7ce64745ef9753e2fbff4664321a4@200.23.3.240"'),
('Refer-To', '<sip:6006665499;rfrid=28661859@example.com;user=phone?x-nt-resource-priority=YNBvf.2j00qao>'),
('Subject', 'Need more boxes'),
('Referred-By', '<sip:5556785103@example.com;user=phone> ; CorrelationID="348058f0947acec8745efd367e33542c5cb01436@192.168.0.3"'),
('Refer-To', '<sip:5556645204@example.com:5064;user=phone;transport=udp>'),
('Allow-Events', 'dialog,message-summary'),
('Event', 'refer;id=10498'),
('Content-Encoding', 'gzip'),
('RAck', '1 1 INVITE'),
('P-Charge', '<sip:6425555555@10.10.10.10>;npi=ISDN;noa=2'),
('Reply-To', 'Bob <sip:bob@biloxi.com>'),
('Unsupported', 'foo'),
('P-Asserted-Identity', '"500 - SIP Test" <sip:500@192.168.0.3>'),
('P-Preferred-Identity', '"User 5103" <sip:3126705103@192.168.0.3:5060>'),
('Remote-Party-ID', '"1234567890" <sip:1234567890@192.168.1.195>;party=calling;privacy=off;screen=no'),
('Alert-Info', '<cid:internal@example.com>;alert-type=internal'),
('History-Info', '"555122221002" <sip:555122221002@example.com>;index=1.1'),
('P-Called-Party-Id', '<sip:2135881@example.com;user=phone>'),
('P-RTP-Stat', 'PS=0,OS=0,PR=5429,OR=955504,PL=0,JI=0,LA=0,DU=108'),
('Privacy', 'id'),
('Proxy-Authenticate', 'Digest realm="1.1.1.1", nonce="8dd33eb2-e3c4-11e5-a55b-83b175043a03", algorithm=MD5, qop="auth"'),
('Proxy-Authorization', 'Digest username="100",realm="209.105.255.124",nonce="7bebcf02-e01d-11e5-931d-83b175043a03",uri="sip:90011@209.105.255.124",response="63faaa2604cae36e9b38f2d5cd0abba4",cnonce="4b41f53e6f00c05",nc=00000001,qop="auth",algorithm=MD5'),
('Proxy-Require', 'foo'),
('Reason', 'Q.850; cause=16; reason=Terminated'),
('Record-Session-Expires', '1200;refresher=uac'),
('Replaces', '19cd9bf094ff5f0c1745ef975c1cf65d34beb908f@192.168.0.3;to-tag=29bd570-f0a1ec8-13c5-50029-aa872-7d78286-aa872;from-tag=7ca31b4791'),
('Subscription-State', 'active;reason=deactivated;expires=50'),
('Min-Expires', '1800'),
('Content-Length', {"value": 11})] # This last one actually instantiates a ContentLengthSIPHeaderField.
def runAssertionsForSIPMessage(self, a_sip_message):
self.assertEqual(a_sip_message.raw_string, self.canonicalStrings[0])
self.assertIsNotNone(a_sip_message.header.content_length_header_field)
self.assertEqual(11, a_sip_message.header.content_length)
self.assertEqual(3, a_sip_message.header.via_header_fields.__len__())
self.assertEqual(3, a_sip_message.header.vias.__len__())
self.assertEqual(3, a_sip_message.vias.__len__())
self.assertEqual(3, a_sip_message.header.route_header_fields.__len__())
self.assertEqual(3, a_sip_message.header.route_uris.__len__())
self.assertEqual(3, a_sip_message.route_uris.__len__())
self.assertEqual(3, a_sip_message.header.record_route_header_fields.__len__())
self.assertEqual(3, a_sip_message.header.record_route_uris.__len__())
self.assertEqual(3, a_sip_message.record_route_uris.__len__())
self.assertIsInstance(a_sip_message.transaction_hash, basestring)
self.assertIsInstance(a_sip_message.dialog_hash, (basestring, type(None)))
self.assertIsInstance(a_sip_message.header.invariant_branch_hash, (basestring, type(None)))
self.assertIsInstance(a_sip_message.header.call_id, basestring)
self.assertIsInstance(a_sip_message.header.cseq, basestring)
self.assertIsInstance(a_sip_message.header.to_tag, (basestring, type(None)))
self.assertIsInstance(a_sip_message.header.from_tag, basestring)
self.assertIsInstance(a_sip_message.header.max_forwards, int)
self.assertIsInstance(a_sip_message.header.route_uris, list)
self.assertIsInstance(a_sip_message.header.record_route_uris, list)
# TODO: assert other headers besides just content-length and via.
self.assertEqual('SIP/2.0/TLS 200.25.3.150;branch=z9hG4bK0ee8d3e272e31ca195299efc500', a_sip_message.header.vias[0])
self.assertEqual('SIP/2.0/TLS 200.25.3.250;branch=z9hG4bKfdkajhdiruyalkghjladksjf', a_sip_message.header.vias[1])
self.assertEqual('SIP/2.0/TLS 200.25.3.255;branch=z9hG4bKduyroiuryaludhgviukfhlasf', a_sip_message.header.vias[2])
self.assertIsInstance(a_sip_message.route_uris[0], SIPURI)
self.assertEqual(a_sip_message.route_uris[0].host, '200.25.3.230')
self.assertIsInstance(a_sip_message.route_uris[1], SIPURI)
self.assertEqual(a_sip_message.route_uris[1].host, '200.25.3.231')
self.assertIsInstance(a_sip_message.route_uris[2], SIPURI)
self.assertEqual(a_sip_message.route_uris[2].host, '200.25.3.232')
self.assertIsInstance(a_sip_message.record_route_uris[0], SIPURI)
self.assertEqual(a_sip_message.record_route_uris[0].host, '200.25.3.230')
self.assertIsInstance(a_sip_message.record_route_uris[1], SIPURI)
self.assertEqual(a_sip_message.record_route_uris[1].host, '200.25.3.231')
self.assertIsInstance(a_sip_message.record_route_uris[2], SIPURI)
self.assertEqual(a_sip_message.record_route_uris[2].host, '200.25.3.232')
self.assertEqual(2, a_sip_message.header.unknown_header_fields.__len__())
self.assertEqual(63, a_sip_message.header.known_header_fields.__len__())
self.assertEqual('Foo Content', a_sip_message.content)
self.assertEqual(65, len(a_sip_message.header.header_fields))
self.assertTrue(a_sip_message.header.header_fields[0].is_from)
self.assertTrue(a_sip_message.header.header_fields[1].is_to)
self.assertTrue(a_sip_message.header.header_fields[2].is_call_id)
self.assertTrue(a_sip_message.header.header_fields[3].is_cseq)
self.assertTrue(a_sip_message.header.header_fields[4].is_max_forwards)
self.assertTrue(a_sip_message.header.header_fields[5].is_via)
self.assertTrue(a_sip_message.header.header_fields[6].is_via)
self.assertTrue(a_sip_message.header.header_fields[7].is_via)
self.assertTrue(a_sip_message.header.header_fields[8].is_user_agent)
self.assertTrue(a_sip_message.header.header_fields[9].is_contact)
self.assertTrue(a_sip_message.header.header_fields[10].is_route)
self.assertTrue(a_sip_message.header.header_fields[11].is_route)
self.assertTrue(a_sip_message.header.header_fields[12].is_route)
self.assertTrue(a_sip_message.header.header_fields[13].is_expires)
self.assertTrue(a_sip_message.header.header_fields[14].is_accept)
self.assertTrue(a_sip_message.header.header_fields[15].is_accept_encoding)
self.assertTrue(a_sip_message.header.header_fields[16].is_accept_language)
self.assertTrue(a_sip_message.header.header_fields[17].is_allow)
self.assertTrue(a_sip_message.header.header_fields[18].is_authorization)
self.assertTrue(a_sip_message.header.header_fields[19].is_call_info)
self.assertTrue(a_sip_message.header.header_fields[20].is_content_disposition)
self.assertTrue(a_sip_message.header.header_fields[21].is_content_type)
self.assertTrue(a_sip_message.header.header_fields[22].is_date)
self.assertTrue(a_sip_message.header.header_fields[23].is_record_route)
self.assertTrue(a_sip_message.header.header_fields[24].is_record_route)
self.assertTrue(a_sip_message.header.header_fields[25].is_record_route)
self.assertTrue(a_sip_message.header.header_fields[26].is_require)
self.assertTrue(a_sip_message.header.header_fields[27].is_retry_after)
self.assertTrue(a_sip_message.header.header_fields[28].is_server)
self.assertTrue(a_sip_message.header.header_fields[29].is_session_expires)
self.assertTrue(a_sip_message.header.header_fields[30].is_supported)
self.assertTrue(a_sip_message.header.header_fields[31].is_timestamp)
self.assertTrue(a_sip_message.header.header_fields[32].is_www_authenticate)
self.assertTrue(a_sip_message.header.header_fields[33].is_warning)
self.assertTrue(a_sip_message.header.header_fields[34].is_unknown)
self.assertTrue(a_sip_message.header.header_fields[35].is_unknown)
self.assertTrue(a_sip_message.header.header_fields[36].is_referred_by)
self.assertTrue(a_sip_message.header.header_fields[37].is_refer_to)
self.assertTrue(a_sip_message.header.header_fields[38].is_subject)
self.assertTrue(a_sip_message.header.header_fields[39].is_referred_by)
self.assertTrue(a_sip_message.header.header_fields[40].is_refer_to)
self.assertTrue(a_sip_message.header.header_fields[41].is_allow_events)
self.assertTrue(a_sip_message.header.header_fields[42].is_event)
self.assertTrue(a_sip_message.header.header_fields[43].is_content_encoding)
self.assertTrue(a_sip_message.header.header_fields[44].is_rack)
self.assertTrue(a_sip_message.header.header_fields[45].is_p_charge)
self.assertTrue(a_sip_message.header.header_fields[46].is_reply_to)
self.assertTrue(a_sip_message.header.header_fields[47].is_unsupported)
self.assertTrue(a_sip_message.header.header_fields[48].is_p_asserted_identity)
self.assertTrue(a_sip_message.header.header_fields[49].is_p_preferred_identity)
self.assertTrue(a_sip_message.header.header_fields[50].is_remote_party_id)
self.assertTrue(a_sip_message.header.header_fields[51].is_alert_info)
self.assertTrue(a_sip_message.header.header_fields[52].is_history_info)
self.assertTrue(a_sip_message.header.header_fields[53].is_p_called_party_id)
self.assertTrue(a_sip_message.header.header_fields[54].is_p_rtp_stat)
self.assertTrue(a_sip_message.header.header_fields[55].is_privacy)
self.assertTrue(a_sip_message.header.header_fields[56].is_proxy_authenticate)
self.assertTrue(a_sip_message.header.header_fields[57].is_proxy_authorization)
self.assertTrue(a_sip_message.header.header_fields[58].is_proxy_require)
self.assertTrue(a_sip_message.header.header_fields[59].is_reason)
self.assertTrue(a_sip_message.header.header_fields[60].is_record_session_expires)
self.assertTrue(a_sip_message.header.header_fields[61].is_replaces)
self.assertTrue(a_sip_message.header.header_fields[62].is_subscription_state)
self.assertTrue(a_sip_message.header.header_fields[63].is_min_expires)
self.assertTrue(a_sip_message.header.header_fields[64].is_content_length)
self.assertTrue(3, len(a_sip_message.vias))
self.assertTrue(a_sip_message.header.header_fields[4].is_max_forwards)
self.assertTrue(a_sip_message.header.header_fields[5].is_via)
self.assertTrue(a_sip_message.header.header_fields[6].is_via)
self.assertTrue(a_sip_message.header.header_fields[7].is_via)
self.assertTrue(a_sip_message.header.header_fields[8].is_user_agent)
self.assertEqual('SIP/2.0/TLS 200.25.3.150;branch=z9hG4bK0ee8d3e272e31ca195299efc500', a_sip_message.vias[0])
self.assertEqual('SIP/2.0/TLS 200.25.3.250;branch=z9hG4bKfdkajhdiruyalkghjladksjf', a_sip_message.vias[1])
self.assertEqual('SIP/2.0/TLS 200.25.3.255;branch=z9hG4bKduyroiuryaludhgviukfhlasf', a_sip_message.vias[2])
a_sip_message.header.add_header_field_after_header_fields_of_same_class(ViaSIPHeaderField.new_for_attributes(host='localhost', transport='TLS'))
self.assertTrue(4, len(a_sip_message.vias))
self.assertTrue(a_sip_message.header.header_fields[4].is_max_forwards)
self.assertTrue(a_sip_message.header.header_fields[5].is_via)
self.assertTrue(a_sip_message.header.header_fields[6].is_via)
self.assertTrue(a_sip_message.header.header_fields[7].is_via)
self.assertTrue(a_sip_message.header.header_fields[8].is_via)
self.assertTrue(a_sip_message.header.header_fields[9].is_user_agent)
self.assertEqual('SIP/2.0/TLS 200.25.3.150;branch=z9hG4bK0ee8d3e272e31ca195299efc500', a_sip_message.vias[0])
self.assertEqual('SIP/2.0/TLS 200.25.3.250;branch=z9hG4bKfdkajhdiruyalkghjladksjf', a_sip_message.vias[1])
self.assertEqual('SIP/2.0/TLS 200.25.3.255;branch=z9hG4bKduyroiuryaludhgviukfhlasf', a_sip_message.vias[2])
self.assertEqual('SIP/2.0/TLS localhost', a_sip_message.vias[3])
a_sip_message.header.add_header_field_before_header_fields_of_same_class(ViaSIPHeaderField.new_for_attributes(host='localhost', transport='TCP'))
self.assertTrue(5, len(a_sip_message.vias))
self.assertTrue(a_sip_message.header.header_fields[4].is_max_forwards)
self.assertTrue(a_sip_message.header.header_fields[5].is_via)
self.assertTrue(a_sip_message.header.header_fields[6].is_via)
self.assertTrue(a_sip_message.header.header_fields[7].is_via)
self.assertTrue(a_sip_message.header.header_fields[8].is_via)
self.assertTrue(a_sip_message.header.header_fields[9].is_via)
self.assertTrue(a_sip_message.header.header_fields[10].is_user_agent)
self.assertEqual('SIP/2.0/TCP localhost', a_sip_message.vias[0])
self.assertEqual('SIP/2.0/TLS 200.25.3.150;branch=z9hG4bK0ee8d3e272e31ca195299efc500', a_sip_message.vias[1])
self.assertEqual('SIP/2.0/TLS 200.25.3.250;branch=z9hG4bKfdkajhdiruyalkghjladksjf', a_sip_message.vias[2])
self.assertEqual('SIP/2.0/TLS 200.25.3.255;branch=z9hG4bKduyroiuryaludhgviukfhlasf', a_sip_message.vias[3])
self.assertEqual('SIP/2.0/TLS localhost', a_sip_message.vias[4])
a_sip_message.header.remove_first_header_field_of_class(ViaSIPHeaderField)
self.assertTrue(4, len(a_sip_message.vias))
self.assertTrue(a_sip_message.header.header_fields[4].is_max_forwards)
self.assertTrue(a_sip_message.header.header_fields[5].is_via)
self.assertTrue(a_sip_message.header.header_fields[6].is_via)
self.assertTrue(a_sip_message.header.header_fields[7].is_via)
self.assertTrue(a_sip_message.header.header_fields[8].is_via)
self.assertTrue(a_sip_message.header.header_fields[9].is_user_agent)
self.assertEqual('SIP/2.0/TLS 200.25.3.150;branch=z9hG4bK0ee8d3e272e31ca195299efc500', a_sip_message.vias[0])
self.assertEqual('SIP/2.0/TLS 200.25.3.250;branch=z9hG4bKfdkajhdiruyalkghjladksjf', a_sip_message.vias[1])
self.assertEqual('SIP/2.0/TLS 200.25.3.255;branch=z9hG4bKduyroiuryaludhgviukfhlasf', a_sip_message.vias[2])
self.assertEqual('SIP/2.0/TLS localhost', a_sip_message.vias[3])
# TODO: test adding and removing header field of class that doesn't already exist in header.
for via in a_sip_message.header.via_header_fields:
via.generate_invariant_branch_for_sip_header(a_sip_message.header)
self.assertIsInstance(via.branch, basestring)
| bobjects/BobStack | bobstack/tests/abstractSIPMessageTestCase.py | Python | apache-2.0 | 60,836 |
"""
Tools for processing mentions:
- Finds candidate entities
- Calculates commonness
@author: Faegheh Hasibi (faegheh.hasibi@idi.ntnu.no)
"""
from nordlys.tagme.config import SF_WIKI
class Mention(object):
def __init__(self, text):
self.text = text.lower()
self.__matched_ens = None # all entities matching a mention (from all sources)
self.__wiki_occurrences = None
@property
def matched_ens(self):
return self.__gen_matched_ens()
@property
def wiki_occurrences(self):
return self.__calc_wiki_occurrences()
def __gen_matched_ens(self):
"""Gets all entities matching the n-gram"""
if self.__matched_ens is None:
matches = SF_WIKI.get(self.text)
matched_ens = matches if matches is not None else {}
self.__matched_ens = matched_ens
return self.__matched_ens
def __calc_wiki_occurrences(self):
"""Calculates the denominator for commonness (for Wiki annotations)."""
if self.__wiki_occurrences is None:
self.__wiki_occurrences = 0
for en, occ in self.matched_ens.get('anchor', {}).iteritems():
self.__wiki_occurrences += occ
return self.__wiki_occurrences
def get_men_candidate_ens(self, commonness_th):
"""
Gets candidate entities for the given n-gram.
:param commonness_th: commonness threshold
:return: dictionary {Wiki_uri: commonness, ..}
"""
candidate_entities = {}
wiki_matches = self.get_wiki_matches(commonness_th)
candidate_entities.update(wiki_matches)
return candidate_entities
def get_wiki_matches(self, commonness_th):
"""
Gets entity matches from Wikipedia anchors (with dbpedia uris).
:param commonness_th: float, Commonness threshold
:return: Dictionary {Wiki_uri: commonness, ...}
"""
if commonness_th is None:
commonness_th = 0
wiki_matches = {}
# calculates commonness for each entity and filter the ones below the commonness threshold.
for wiki_uri in self.matched_ens.get("anchor", {}):
cmn = self.calc_commonness(wiki_uri)
if cmn >= commonness_th:
wiki_matches[wiki_uri] = cmn
sources = ["title", "title-nv", "redirect"]
for source in sources:
for wiki_uri in self.matched_ens.get(source, {}):
if wiki_uri not in wiki_matches:
cmn = self.calc_commonness(wiki_uri)
wiki_matches[wiki_uri] = cmn
return wiki_matches
def calc_commonness(self, en_uri):
"""
Calculates commonness for the given entity:
(times mention is linked) / (times mention linked to entity)
- Returns zero if the entity is not linked by the mention.
:param en_uri: Wikipedia uri
:return Commonness
"""
if not en_uri.startswith("<wikipedia:"):
raise Exception("Only Wikipedia URI should be passed to commonness!")
cmn = self.matched_ens.get('anchor', {}).get(en_uri, 0) / float(self.wiki_occurrences)
return cmn
| hasibi/TAGME-Reproducibility | nordlys/tagme/mention.py | Python | mit | 3,220 |
from django.core.management.base import BaseCommand, CommandError
from treeherder.perf.models import PerformanceFramework
class Command(BaseCommand):
help = "Add a performance framework to treeherder"
args = '<framework>'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("Need to (only) specify the unique identifier for framework")
PerformanceFramework.objects.create(name=args[0])
| moijes12/treeherder | treeherder/perf/management/commands/add_perf_framework.py | Python | mpl-2.0 | 453 |
# coding: utf-8
import re, sys, codecs
import alignment, read_inf_xml, clean_annotated_tweet
def calc_alignment_sentence(sentence, rules):
# need to consider the case of multiple non-standard words
original_converted_list = get_original_informal_pair(sentence)
## hopefully, change it to multiple rules
# current_rules = alignment.reverse_trace(table, original, converted)
current_rules = []
for pair in original_converted_list:
converted = pair[0]
original = pair[1]
print "----before alignining ---"
print converted, original
current_rules.extend(alignment.rules_with_window_size(original, converted, 1))
current_rules.extend(alignment.rules_with_window_size(original, converted, 2))
current_rules.extend(alignment.rules_with_window_size(original, converted, 3))
for rule in current_rules:
#print rule
if not rules.has_key(rule):
rules[rule] = 1
else:
rules[rule] += 1
def get_original_informal_pair(sentence):
attributes = read_inf_xml.parse_attributes(sentence)
if attributes == []: return [("", "")]
converted_original_list = []
i = 0
while i + 3 < len(attributes):
converted = clean_annotated_tweet.filter_juman_interjection(attributes[i + 3])
print "converted = " + converted
original = attributes[i]
pair = (converted, original)
converted_original_list.append(pair)
i += 4
'''
print "--------printing attributes"
for p in attributes:
print p
for pair in converted_original_list:
print pair[0], pair[1]
'''
return converted_original_list
def filter_juman_interjection(raw_segment):
ret_segment = re.sub(u"(感動詞(,)?(非標準表記|長音挿入)?)", "", raw_segment)
return ret_segment
def calc_alignment():
# calculats the frequency, as the initialization of the convergence
argv = sys.argv
f_in = codecs.open(argv[1], "r", "utf-8")
f_out = codecs.open(argv[1] + "_alignment", "w", "utf-8")
rules = {}
for line in f_in:
calc_alignment_sentence(line[:-1], rules)
# calculate the rules applied for each sentence
#print out the simple frequency
for k, v in sorted(rules.items(), key=lambda x:x[1]):
if k[0] == k[1]: continue
print k[0] + " -> " + k[1] + " " + str(rules[k])
f_out.write(k[0] + " -> " + k[1] + " " + str(rules[k]) + "\n")
return rules
def calc_conditional_prob(rules):
rules_prob = {}
total_formal_count = {}
# calculating the total number of a specific informal chracter
for k, v in rules.items():
#TODO: consider the pair, not only the informal word
before = k[0]
if k[0] not in total_formal_count:
total_formal_count[k[0]] = v # informal character
else:
total_formal_count[k[0]] += v
for k, v in rules.items():
rules_prob[k] = (1.0 * v) / total_formal_count[k[0]]
return rules_prob
def calc_current_conditional_prob():
rules = calc_alignment()
for k, v in sorted(calc_conditional_prob(rules).items(), key=lambda x:x[1]):
if k[0] == k[1]: continue
print k[0] + " -> " + k[1] + " " + str(v)
if __name__ == "__main__":
calc_current_conditional_prob()
| akkikiki/Research | calc_alignment.py | Python | apache-2.0 | 3,106 |
from pysnmp.smi import view
from pysnmp.smi.rfc1902 import *
__all__ = ['CommandGeneratorVarBinds', 'NotificationOriginatorVarBinds']
class AbstractVarBinds:
def getMibViewController(self, snmpEngine):
mibViewController = snmpEngine.getUserContext('mibViewController')
if not mibViewController:
mibViewController = view.MibViewController(
snmpEngine.getMibBuilder()
)
snmpEngine.setUserContext(mibViewController=mibViewController)
return mibViewController
class CommandGeneratorVarBinds(AbstractVarBinds):
def makeVarBinds(self, snmpEngine, varBinds):
mibViewController = self.getMibViewController(snmpEngine)
__varBinds = []
for varBind in varBinds:
if isinstance(varBind, ObjectType):
pass
elif isinstance(varBind[0], ObjectIdentity):
varBind = ObjectType(*varBind)
elif isinstance(varBind[0][0], tuple): # legacy
varBind = ObjectType(ObjectIdentity(varBind[0][0][0], varBind[0][0][1], *varBind[0][1:]), varBind[1])
else:
varBind = ObjectType(ObjectIdentity(varBind[0]), varBind[1])
__varBinds.append(varBind.resolveWithMib(mibViewController))
return __varBinds
def unmakeVarBinds(self, snmpEngine, varBinds, lookupMib=True):
if lookupMib:
mibViewController = self.getMibViewController(snmpEngine)
varBinds = [ ObjectType(ObjectIdentity(x[0]), x[1]).resolveWithMib(mibViewController) for x in varBinds ]
return varBinds
class NotificationOriginatorVarBinds(AbstractVarBinds):
def makeVarBinds(self, snmpEngine, varBinds):
mibViewController = self.getMibViewController(snmpEngine)
if isinstance(varBinds, NotificationType):
varBinds.resolveWithMib(mibViewController)
__varBinds = []
for varBind in varBinds:
if isinstance(varBind, ObjectType):
pass
elif isinstance(varBind[0], ObjectIdentity):
varBind = ObjectType(*varBind)
else:
varBind = ObjectType(ObjectIdentity(varBind[0]), varBind[1])
__varBinds.append(varBind.resolveWithMib(mibViewController))
return __varBinds
def unmakeVarBinds(self, snmpEngine, varBinds, lookupMib=False):
if lookupMib:
mibViewController = self.getMibViewController(snmpEngine)
varBinds = [ ObjectType(ObjectIdentity(x[0]), x[1]).resolveWithMib(mibViewController) for x in varBinds ]
return varBinds
| imron/scalyr-agent-2 | scalyr_agent/third_party/pysnmp/hlapi/varbinds.py | Python | apache-2.0 | 2,626 |
#!/usr/bin/env python
import os
import unittest
from six import StringIO
from piprot.piprot import parse_req_file
class TestRequirementsParser(unittest.TestCase):
def setUp(self):
pass
def test_requirement_exact(self):
f = StringIO("requests==1.2.3")
d = parse_req_file(f)
self.assertTrue(d[0][0] == 'requests')
self.assertTrue(d[0][1] == '1.2.3')
self.assertTrue(d[0][2] == False)
def test_requirements_with_extra(self):
f = StringIO("requests[security]==1.2.3")
d = parse_req_file(f)
self.assertEqual(d[0][0], 'requests')
self.assertEqual(d[0][1], '1.2.3')
def test_requirements_ignore(self):
f = StringIO("requests==1.2.3 # norot")
d = parse_req_file(f)
self.assertEqual(d[0][0], 'requests')
self.assertEqual(d[0][1], '1.2.3')
self.assertEqual(d[0][2], True)
def test_requirements_file(self):
with open(
os.path.join(os.path.dirname(__file__), 'files/_develop.txt')
) as f:
d = parse_req_file(f, verbatim=False)
self.assertTrue(d[0][0] == 'ipython')
self.assertTrue(d[0][1] == '1.1.0')
def test_recursive_requirements_file(self):
with open(
os.path.join(
os.path.dirname(__file__),
'files/test-requirements.txt'
)
) as f:
d = parse_req_file(f, verbatim=False)
reqs = [x[0] for x in d]
self.assertTrue('ipython' in reqs)
def test_ignore_in_requirements_file(self):
with open(
os.path.join(
os.path.dirname(__file__),
'files/test-requirements.txt'
)
) as f:
d = parse_req_file(f, verbatim=False)
ignored = [x[0] for x in d if x[2]]
self.assertTrue('piprot' in ignored)
def test_requirements_length(self):
with open(
os.path.join(os.path.dirname(__file__), 'files/_develop.txt')
) as f:
d = parse_req_file(f, verbatim=False)
self.assertEqual(len(d), 1)
def test_recursive_requirements_length(self):
with open(
os.path.join(
os.path.dirname(__file__),
'files/test-requirements.txt'
)
) as f:
d = parse_req_file(f, verbatim=False)
self.assertEqual(len(d), 5)
def test_requirements_file_verbatim(self):
with open(
os.path.join(os.path.dirname(__file__), 'files/_develop.txt')
) as f:
d = parse_req_file(f, verbatim=True)
comments = [x[1] for x in d if not x[0]]
self.assertTrue('# Development Requirements\n' in comments)
def test_recursive_requirements_file_verbatim(self):
with open(
os.path.join(os.path.dirname(__file__), 'files/_develop.txt')
) as f:
d = parse_req_file(f, verbatim=True)
comments = [x[1] for x in d if not x[0]]
self.assertTrue('# Development Requirements\n' in comments)
if __name__ == '__main__':
unittest.main()
| sesh/piprot | piprot/test/test_parser.py | Python | mit | 3,174 |
#!/usr/bin/env python
### ToDo
''' A script to fast analyze relevant measurement parameters, without calculating n, for comparism of different spectrometer
The script should work for single input files, and also for a series of sample and reference measurements
runGetSignalInfos should print:
maxDR of fourier and timedomain data
max SNR of fourier and timedomain data
Bandwidth, highest and lowest accesible frequency
if sample and reference measurements are specified this should be done for both of them, and also for H
runGetSingalInfos should plot:
TD Plot along with uncertainties
FD Plot along with uncertainties
SNR Plot in TD and frequency domain
DR Plot in TD and frequency domain
if sample and reference measurements are specified this should be done for both of them, and also for H Comparison option
Maybe it would be also favorable to specify a series of reference files from one spectrometer (i.e. Marburg ,or INRIM on 20.10.2014 and compare it with another series of reference files)
'''
### Done
'''
SNR Plot in FD&TD, DR Plot in FD&TD, TD with u
'''
import argparse
import sys
import matplotlib.pyplot as plt
import glob
import Terapy
import TeraData
parser = argparse.ArgumentParser(description='Calculate optical constants from THz-TD Data')
parser.add_argument('--silent',action='store_true',help='switch save results off')
parser.add_argument('--outname','-o',nargs='?',type=str,help='prefix output filenames')
parser.add_argument('--isample','-is',nargs='*',help='list of sample filenames')
parser.add_argument('--ireference','-ir',nargs='*',help='list of reference filenames')
parser.add_argument('--mode','-m',type=str,default='INRIM',choices=['INRIM','Marburg','lucastestformat'],help='format of the datafiles')
parser.add_argument('--thickness','-t',type=float,help='sample thickness')
parser.add_argument('--savePlots','-s',action='store_false',help='turn off saving TD and FD plots')
parser.add_argument('--workpath','-w',type=str,default='./',help='specify a base folder')
args = parser.parse_args()
ireffiles=args.ireference
isamfiles=args.isample
mode=args.mode
thickness=args.thickness
basefolder=args.workpath
reffiles=[]
samfiles=[]
for i in range(len(ireffiles)):
tf=glob.glob(basefolder+ireffiles[i])
reffiles+=tf
for i in range(len(isamfiles)):
tf=glob.glob(basefolder+isamfiles[i])
samfiles+=tf
if len(reffiles)==0:
print "no Reference File specified"
sys.exit()
if len(samfiles)==0:
print "no Sample File specified"
sys.exit()
if mode=='lucastestformat':
reftd=TeraData.THzTdData(reffiles)
samtd=TeraData.THzTdData(samfiles)
if mode=='Marburg':
reftd=TeraData.ImportMarburgData(reffiles)
samtd=TeraData.ImportMarburgData(samfiles)
if mode=='INRIM':
reftd=TeraData.ImportInrimData(reffiles)
samtd=TeraData.ImportInrimData(samfiles)
# #initialize the fd_data objects
ref_fd=TeraData.FdData(reftd)
sam_fd=TeraData.FdData(samtd)
## #initialize the mdata object (H,and so on)
H=Terapy.HMeas(ref_fd,sam_fd)
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
ax.set_xlabel('frequency (GHz)')
ax.set_ylabel('SNR')
ax.grid(True)
ax.semilogy(ref_fd.getfreqsGHz(),ref_fd.getSNR(),sam_fd.getfreqsGHz(),sam_fd.getSNR(),H.getfreqsGHz(),H.getSNR())
ax.legend(('ref', 'sam', 'transfer function'))
plt.title('SNR')
#fig2 = plt.figure()
ax2 = fig.add_subplot(2,1,2)
ax2.set_xlabel('time (ps)')
ax2.set_ylabel('SNR')
ax2.grid(True)
ax2.semilogy(reftd.getTimesPs(),reftd.getSNR(),samtd.getTimesPs(),samtd.getSNR())
ax2.legend(('ref', 'sam'))
#plt.title('SNR')
fig1 = plt.figure()
ax1 = fig1.add_subplot(2,1,1)
ax1.set_xlabel('frequency (GHz)')
ax1.set_ylabel('dynamic range')
ax1.grid(True)
ax1.semilogy(ref_fd.getfreqsGHz(),ref_fd.getDR(),sam_fd.getfreqsGHz(),sam_fd.getDR(),H.getfreqsGHz(),H.getDR())
ax1.legend(('ref', 'sam', 'transfer function'))
plt.title('dynamic range')
#fig3 = plt.figure()
ax3 = fig1.add_subplot(2,1,2)
ax3.set_xlabel('time (ps)')
ax3.set_ylabel('dynamic range')
ax3.grid(True)
ax3.semilogy(reftd.getTimesPs(),reftd.getDR(),samtd.getTimesPs(),samtd.getDR())
ax3.legend(('ref', 'sam'))
#plt.title('dynamic range')
fig2 = plt.figure()
ax4 = fig2.add_subplot(2,1,1)
ax4.set_xlabel('time (ps)')
ax4.set_ylabel('X channel (V)')
#ax4.grid(True)
no_std=2
ax4.plot(reftd.getTimesPs(),reftd.getEX(),\
reftd.getTimesPs(),reftd.getEX() + no_std*reftd.getUncEX(),'g--',\
reftd.getTimesPs(),reftd.getEX() - no_std*reftd.getUncEX(),'g--')
#ax4.legend(('ref'))
plt.title('reference signal with uncertainty')
#fig5 = plt.figure()
ax5 = fig2.add_subplot(2,1,2)
ax5.set_xlabel('time (ps)')
ax5.set_ylabel('X channel (V)')
#ax4.grid(True)
no_std=2
ax5.plot(samtd.getTimesPs(),samtd.getEX(),\
samtd.getTimesPs(),samtd.getEX() + no_std*samtd.getUncEX(),'g--',\
samtd.getTimesPs(),samtd.getEX() - no_std*samtd.getUncEX(),'g--')
#ax5.legend(('sam'))
plt.title('sample signal with uncertainty')
fig3 = plt.figure()
ax6 = fig3.add_subplot(2,1,1)
ax6.set_xlabel('frequency (GHz)')
ax6.set_ylabel('dynamic range')
ax6.grid(True)
ax6.semilogy(ref_fd.getfreqsGHz(),ref_fd.getFAbs(),\
ref_fd.getfreqsGHz(), ref_fd.getFAbs() + ref_fd.getFAbsUnc(), 'g--',\
ref_fd.getfreqsGHz(), ref_fd.getFAbs() - ref_fd.getFAbsUnc(), 'g--')
#ax6.legend(('ref'))
plt.title('ABS with U')
ax7 = fig3.add_subplot(2,1,2)
ax7.set_xlabel('frequency (GHz)')
ax7.set_ylabel('dynamic range')
ax7.grid(True)
ax7.semilogy(ref_fd.getfreqsGHz(),ref_fd.fdData[:,3],\
ref_fd.getfreqsGHz(), ref_fd.getFPh() + ref_fd.getFPhUnc(), 'g--',\
ref_fd.getfreqsGHz(), ref_fd.getFPh() - ref_fd.getFPhUnc(), 'g--')
#ax7.legend(('ref'))
plt.title('PHASE with U')
fd = file( basefolder + 'SignalInfo.log', 'w')
fd.write('max DR in FD - ref\t' + str(max(ref_fd.getDR())) + '\n'\
'max DR in FD - sam\t' + str(max(sam_fd.getDR())) + '\n'\
'max DR in TD - ref\t' + str(max(reftd.getDR())) + '\n'\
'max DR in TD - sam\t' + str(max(samtd.getDR())) + '\n\n'\
'max SNR in FD - ref\t' + str(max(ref_fd.getSNR())) + '\n'\
'max SNR in FD - sam\t' + str(max(sam_fd.getSNR())) + '\n'\
'max SNR in TD - ref\t' + str(max(reftd.getSNR())) + '\n'\
'max SNR in TD - sam\t' + str(max(samtd.getSNR())) + '\n')
fd.close()
''' maxDR of fourier and timedomain data
max SNR of fourier and timedomain data
Bandwidth, highest and lowest accesible frequency
'''
'''if args.outname==None:
args.outname=myana.getFilenameSuggestion()
args.outname+='_'
if args.savePlots:
pylab.ioff()
reftd.doPlotWithunc()
samtd.doPlotWithunc()
pylab.legend(('Reference','Sample'))
pylab.savefig(args.workpath+args.outname + 'Time-Domain.png')
pylab.close()
ref_fd.doPlot()
sam_fd.doPlot()
pylab.figure('FD-ABS-Plot')
pylab.legend(('Reference','Sample'))
pylab.savefig(args.outname + 'ABS-Frequency-Domain.png')
pylab.close()
pylab.figure('FD-PHASE-Plot')
pylab.legend(('Reference','Sample'))
pylab.savefig(args.workpath+args.outname + 'PHASE-Frequency-Domain.png')
pylab.close()
mdata.doPlot()
pylab.savefig(args.workpath+args.outname + 'TransferFunction.png')
pylab.close()
#
myana.plotRefractiveIndex(1,1,args.workpath+args.outname)
myana.saveResults(args.workpath+args.outname)
#'''
plt.show()
| DavidJahn86/terapy | getSignalInfos.py | Python | gpl-2.0 | 7,347 |
import time
import datetime
import sys
import getopt, argparse
from collections import defaultdict
import json
import MySQLdb
import unicodecsv
import pprint
ITEM_MAP_VARCHAR_INSERT = "insert into item_map_varchar (item_id, attr_id, value) values ((select item_id from items where client_item_id = %(id)s),(select attr_id from item_attr where name = %(attr_name)s), %(value)s )"
ITEM_MAP_DOUBLE_INSERT = "insert into item_map_double (item_id, attr_id, value) values ((select item_id from items where client_item_id = %(id)s),(select attr_id from item_attr where name = %(attr_name)s), %(value)s )"
ITEM_MAP_DATETIME_INSERT = "insert into item_map_datetime (item_id, attr_id, value) values ((select item_id from items where client_item_id = %(id)s),(select attr_id from item_attr where name = %(attr_name)s), %(value)s )"
ITEM_MAP_ENUM_INSERT = "insert into item_map_enum (item_id, attr_id, value_id) values ((select item_id from items where client_item_id = %(id)s),(select attr_id from item_attr where name = %(attr_name)s), (select e.value_id from ITEM_ATTR_ENUM e, item_attr a where a.name = %(attr_name)s and e.value_name = %(value)s and a.attr_id = e.attr_id) )"
ITEM_MAP_TEXT_INSERT = "insert into item_map_text (item_id, attr_id, value) values ((select item_id from items where client_item_id = %(id)s),(select attr_id from item_attr where name = %(attr_name)s), %(value)s )"
ITEM_MAP_INT_INSERT = "insert into item_map_int (item_id, attr_id, value) values ((select item_id from items where client_item_id = %(id)s),(select attr_id from item_attr where name = %(attr_name)s), %(value)s )"
ITEM_MAP_BOOLEAN_INSERT = "insert into item_map_boolean (item_id, attr_id, value) values ((select item_id from items where client_item_id = %(id)s),(select attr_id from item_attr where name = %(attr_name)s), %(value)s )"
ITEM_INSERT = "INSERT INTO ITEMS (name, first_op, last_op, client_item_id, type) VALUES (%(name)s, NOW(), NOW(), %(id)s, 1)"
ITEM_INSERT_NO_AUTO_INCREMENT = "INSERT INTO ITEMS (item_id, name, first_op, last_op, client_item_id, type) VALUES (%(item_id)s, %(name)s, NOW(), NOW(), %(id)s, 1)"
DB_BATCH_SIZE = 1000
attr_insert_map = {
'ENUM': ITEM_MAP_ENUM_INSERT,
'BOOLEAN': ITEM_MAP_BOOLEAN_INSERT,
'VARCHAR': ITEM_MAP_VARCHAR_INSERT,
'TEXT': ITEM_MAP_TEXT_INSERT,
'DATETIME': ITEM_MAP_DATETIME_INSERT,
'INT': ITEM_MAP_INT_INSERT,
'DOUBLE': ITEM_MAP_DOUBLE_INSERT
}
available_attrs = dict()
available_enums = dict()
def pp(o):
p = pprint.PrettyPrinter(indent=4)
p.pprint(o)
def retrieveDbAttrs(db):
cur = db.cursor()
cur.execute("SELECT ATTR_ID, NAME, TYPE FROM ITEM_ATTR")
rows = cur.fetchall()
attrs = dict()
for row in rows:
attrs[row[1]]= (row[0], row[2])
return attrs
def retrieveDbEnums(db):
cur = db.cursor()
# enum structure:
# attr_id1:
# value_name1 : value_id1
# value_name2 :value_id2
cur.execute("SELECT ATTR_ID, VALUE_NAME, VALUE_ID FROM ITEM_ATTR_ENUM")
rows = cur.fetchall()
enums = defaultdict(dict)
for row in rows:
enums[row[0]][row[1]] = row[2]
return enums
def validateCSVAgainstDb(csv_file, db):
global available_attrs, available_enums
failed = False
attrs = retrieveDbAttrs(db)
available_attrs = attrs
enums = retrieveDbEnums(db)
available_enums = enums
with open(csv_file) as csvFile:
reader = unicodecsv.DictReader(csvFile,encoding='utf-8')
noOfFields = 0
for index, line in enumerate(reader, start=1):
if index is 1:
noOfFields = len(line)
if not validateFieldsAgainstDbFields(set(line), attrs, db):
exit(1)
validateLine(index,line, noOfFields, attrs, enums)
if len(line) != noOfFields:
failLine(index, line)
failed = True
if failed:
exit(1)
def validateLine(index,line, noOfFields, attrs, enums):
if len(line) != noOfFields:
failLine(index, line)
failed = True
else:
for word in line:
if str(word) == 'id':
continue
if str(word) == 'name':
continue
value = line[word]
if str(attrs[word][1]) == 'ENUM':
if value not in enums[attrs[word][0]]:
print 'couldn\'t find enum value', value
exit(1)
def validateFieldsAgainstDbFields(fields,attrs, db):
failed = False
for field in fields:
if field not in attrs and field != 'id' and field != 'name':
failed = True
print 'Field \'',field,'\'not an attribute in the DB'
return not failed
def doItemInserts(csv_file, db):
with open(csv_file) as csvFile:
reader = unicodecsv.DictReader(csvFile,encoding='utf-8')
inserts = []
for line in reader:
client_id = line['id']
name = ''
if 'name' in line:
name = line['name']
inserts.append({'name':name,'id':client_id, 'item_id':client_id})
cur = db.cursor()
print "inserting items into the db"
###cur.executemany(ITEM_INSERT, inserts)
cur.executemany(ITEM_INSERT_NO_AUTO_INCREMENT, inserts)
db.commit()
print 'finished item inserts'
def doAttrInserts(csv_file, db):
inserts = defaultdict(list)
insertNum = 0
with open(csv_file) as csvFile:
reader = unicodecsv.DictReader(csvFile,encoding='utf-8')
for line in reader:
for field_name in line:
if field_name == 'id' or field_name== 'name':
continue
attr_type = available_attrs[str(field_name)][1]
inserts[attr_type].append({'attr_name': field_name, 'value': line[field_name], 'id': line['id']})
if len(inserts[attr_type]) > DB_BATCH_SIZE:
insertNum+=1
reallyDoInserts(inserts[attr_type], attr_insert_map[attr_type], insertNum, db)
del inserts[attr_type]
for index, insert_label in enumerate(inserts, start=1):
insertNum+=1
reallyDoInserts(inserts[insert_label], attr_insert_map[insert_label], insertNum, db)
db.commit()
print 'finished attribute inserts'
def reallyDoInserts(params, insertStatement, insertNum, db):
cur = db.cursor()
print "inserting attribute batch", insertNum,'into the db'
cur.executemany(insertStatement, params)
def failLine(lineNum, line):
print "line",lineNum,"failed as it only had",len(line),"fields"
def cleanUpDb(db):
dbc = db.cursor()
dbc.execute('truncate table items')
dbc.execute('truncate table item_map_varchar')
dbc.execute('truncate table item_map_double')
dbc.execute('truncate table item_map_datetime')
dbc.execute('truncate table item_map_int')
dbc.execute('truncate table item_map_boolean')
dbc.execute('truncate table item_map_enum')
dbc.execute('truncate table item_map_text')
db.commit()
def import_items(client_name, db_settings, data_file_fpath):
db = MySQLdb.connect(
host=db_settings['host'],
user=db_settings['user'],
passwd=db_settings['password'],
db=client_name
)
db.set_character_set('utf8')
dbc = db.cursor()
dbc.execute('SET NAMES utf8;')
dbc.execute('SET CHARACTER SET utf8;')
dbc.execute('SET character_set_connection=utf8;')
dbc.execute("SET GLOBAL max_allowed_packet=1073741824")
try:
validateCSVAgainstDb(data_file_fpath, db)
doItemInserts(data_file_fpath, db)
doAttrInserts(data_file_fpath,db)
except:
print 'Unexpected error ...', sys.exc_info()[0]
print 'Clearing DB of items and attributes'
try:
cleanUpDb(db)
except:
print 'couldn\'t clean up db'
raise
print "Successfully ran all inserts"
| smrjan/seldon-server | python/build/lib/seldon/cli/import_items_utils.py | Python | apache-2.0 | 7,344 |
"""
Created on Dec 09, 2014.
"""
from pyramid.httpexceptions import HTTPOk
from everest.mime import XmlMime
from everest.resources.utils import get_root_collection
from thelma.interfaces import ITubeRack
from thelma.tests.functional.conftest import TestFunctionalBase
class TestRackFunctional(TestFunctionalBase):
path = '/tube-racks'
setup_rdb_context = True
def test_load_racks(self, app_creator):
rsp = app_creator.get(self.path,
params=dict(size=10),
status=HTTPOk.code)
assert not rsp is None
def test_patch_set_location(self, app_creator,
rack_patch_set_location_data):
rack_bc = '02490469'
loc_id = 1513
rsp = app_creator.patch('%s/%s' % (self.path, rack_bc),
params=rack_patch_set_location_data,
content_type=XmlMime.mime_type_string,
status=HTTPOk.code)
assert not rsp is None
coll = get_root_collection(ITubeRack)
rack = coll[rack_bc]
lr = rack.get_entity().location_rack
assert not lr is None
assert lr.location.id == loc_id
def test_patch_unset_location(self, app_creator):
rack_bc = '02481966'
rsp = app_creator.delete('%s/%s/location' % (self.path, rack_bc),
status=HTTPOk.code)
assert not rsp is None
coll = get_root_collection(ITubeRack)
rack = coll[rack_bc]
assert rack.get_entity().location_rack is None
| helixyte/TheLMA | thelma/tests/functional/test_rack.py | Python | mit | 1,605 |
import os
import pickle
import pprint
import time
from collections import deque
from twisted.internet import reactor
from twisted.application import service
from twisted.application.internet import TimerService
from zope.interface import implements
from foolscap.api import eventually, DeadReferenceError, Referenceable, Tub
from allmydata.util import log
from allmydata.util.encodingutil import quote_output
from allmydata.interfaces import RIStatsProvider, RIStatsGatherer, IStatsProducer
class LoadMonitor(service.MultiService):
implements(IStatsProducer)
loop_interval = 1
num_samples = 60
def __init__(self, provider, warn_if_delay_exceeds=1):
service.MultiService.__init__(self)
self.provider = provider
self.warn_if_delay_exceeds = warn_if_delay_exceeds
self.started = False
self.last = None
self.stats = deque()
self.timer = None
def startService(self):
if not self.started:
self.started = True
self.timer = reactor.callLater(self.loop_interval, self.loop)
service.MultiService.startService(self)
def stopService(self):
self.started = False
if self.timer:
self.timer.cancel()
self.timer = None
return service.MultiService.stopService(self)
def loop(self):
self.timer = None
if not self.started:
return
now = time.time()
if self.last is not None:
delay = now - self.last - self.loop_interval
if delay > self.warn_if_delay_exceeds:
log.msg(format='excessive reactor delay (%ss)', args=(delay,),
level=log.UNUSUAL)
self.stats.append(delay)
while len(self.stats) > self.num_samples:
self.stats.popleft()
self.last = now
self.timer = reactor.callLater(self.loop_interval, self.loop)
def get_stats(self):
if self.stats:
avg = sum(self.stats) / len(self.stats)
m_x = max(self.stats)
else:
avg = m_x = 0
return { 'load_monitor.avg_load': avg,
'load_monitor.max_load': m_x, }
class CPUUsageMonitor(service.MultiService):
implements(IStatsProducer)
HISTORY_LENGTH = 15
POLL_INTERVAL = 60
def __init__(self):
service.MultiService.__init__(self)
# we don't use time.clock() here, because the constructor is run by
# the twistd parent process (as it loads the .tac file), whereas the
# rest of the program will be run by the child process, after twistd
# forks. Instead, set self.initial_cpu as soon as the reactor starts
# up.
self.initial_cpu = 0.0 # just in case
eventually(self._set_initial_cpu)
self.samples = []
# we provide 1min, 5min, and 15min moving averages
TimerService(self.POLL_INTERVAL, self.check).setServiceParent(self)
def _set_initial_cpu(self):
self.initial_cpu = time.clock()
def check(self):
now_wall = time.time()
now_cpu = time.clock()
self.samples.append( (now_wall, now_cpu) )
while len(self.samples) > self.HISTORY_LENGTH+1:
self.samples.pop(0)
def _average_N_minutes(self, size):
if len(self.samples) < size+1:
return None
first = -size-1
elapsed_wall = self.samples[-1][0] - self.samples[first][0]
elapsed_cpu = self.samples[-1][1] - self.samples[first][1]
fraction = elapsed_cpu / elapsed_wall
return fraction
def get_stats(self):
s = {}
avg = self._average_N_minutes(1)
if avg is not None:
s["cpu_monitor.1min_avg"] = avg
avg = self._average_N_minutes(5)
if avg is not None:
s["cpu_monitor.5min_avg"] = avg
avg = self._average_N_minutes(15)
if avg is not None:
s["cpu_monitor.15min_avg"] = avg
now_cpu = time.clock()
s["cpu_monitor.total"] = now_cpu - self.initial_cpu
return s
class StatsProvider(Referenceable, service.MultiService):
implements(RIStatsProvider)
def __init__(self, node, gatherer_furl):
service.MultiService.__init__(self)
self.node = node
self.gatherer_furl = gatherer_furl # might be None
self.counters = {}
self.stats_producers = []
# only run the LoadMonitor (which submits a timer every second) if
# there is a gatherer who is going to be paying attention. Our stats
# are visible through HTTP even without a gatherer, so run the rest
# of the stats (including the once-per-minute CPUUsageMonitor)
if gatherer_furl:
self.load_monitor = LoadMonitor(self)
self.load_monitor.setServiceParent(self)
self.register_producer(self.load_monitor)
self.cpu_monitor = CPUUsageMonitor()
self.cpu_monitor.setServiceParent(self)
self.register_producer(self.cpu_monitor)
def startService(self):
if self.node and self.gatherer_furl:
d = self.node.when_tub_ready()
def connect(junk):
nickname_utf8 = self.node.nickname.encode("utf-8")
self.node.tub.connectTo(self.gatherer_furl,
self._connected, nickname_utf8)
d.addCallback(connect)
service.MultiService.startService(self)
def count(self, name, delta=1):
val = self.counters.setdefault(name, 0)
self.counters[name] = val + delta
def register_producer(self, stats_producer):
self.stats_producers.append(IStatsProducer(stats_producer))
def get_stats(self):
stats = {}
for sp in self.stats_producers:
stats.update(sp.get_stats())
ret = { 'counters': self.counters, 'stats': stats }
log.msg(format='get_stats() -> %(stats)s', stats=ret, level=log.NOISY)
return ret
def remote_get_stats(self):
return self.get_stats()
def _connected(self, gatherer, nickname):
gatherer.callRemoteOnly('provide', self, nickname or '')
class StatsGatherer(Referenceable, service.MultiService):
implements(RIStatsGatherer)
poll_interval = 60
def __init__(self, basedir):
service.MultiService.__init__(self)
self.basedir = basedir
self.clients = {}
self.nicknames = {}
self.timer = TimerService(self.poll_interval, self.poll)
self.timer.setServiceParent(self)
def get_tubid(self, rref):
return rref.getRemoteTubID()
def remote_provide(self, provider, nickname):
tubid = self.get_tubid(provider)
if tubid == '<unauth>':
print "WARNING: failed to get tubid for %s (%s)" % (provider, nickname)
# don't add to clients to poll (polluting data) don't care about disconnect
return
self.clients[tubid] = provider
self.nicknames[tubid] = nickname
def poll(self):
for tubid,client in self.clients.items():
nickname = self.nicknames.get(tubid)
d = client.callRemote('get_stats')
d.addCallbacks(self.got_stats, self.lost_client,
callbackArgs=(tubid, nickname),
errbackArgs=(tubid,))
d.addErrback(self.log_client_error, tubid)
def lost_client(self, f, tubid):
# this is called lazily, when a get_stats request fails
del self.clients[tubid]
del self.nicknames[tubid]
f.trap(DeadReferenceError)
def log_client_error(self, f, tubid):
log.msg("StatsGatherer: error in get_stats(), peerid=%s" % tubid,
level=log.UNUSUAL, failure=f)
def got_stats(self, stats, tubid, nickname):
raise NotImplementedError()
class StdOutStatsGatherer(StatsGatherer):
verbose = True
def remote_provide(self, provider, nickname):
tubid = self.get_tubid(provider)
if self.verbose:
print 'connect "%s" [%s]' % (nickname, tubid)
provider.notifyOnDisconnect(self.announce_lost_client, tubid)
StatsGatherer.remote_provide(self, provider, nickname)
def announce_lost_client(self, tubid):
print 'disconnect "%s" [%s]' % (self.nicknames[tubid], tubid)
def got_stats(self, stats, tubid, nickname):
print '"%s" [%s]:' % (nickname, tubid)
pprint.pprint(stats)
class PickleStatsGatherer(StdOutStatsGatherer):
# inherit from StdOutStatsGatherer for connect/disconnect notifications
def __init__(self, basedir=".", verbose=True):
self.verbose = verbose
StatsGatherer.__init__(self, basedir)
self.picklefile = os.path.join(basedir, "stats.pickle")
if os.path.exists(self.picklefile):
f = open(self.picklefile, 'rb')
try:
self.gathered_stats = pickle.load(f)
except Exception:
print ("Error while attempting to load pickle file %s.\n"
"You may need to restore this file from a backup, or delete it if no backup is available.\n" %
quote_output(os.path.abspath(self.picklefile)))
raise
f.close()
else:
self.gathered_stats = {}
def got_stats(self, stats, tubid, nickname):
s = self.gathered_stats.setdefault(tubid, {})
s['timestamp'] = time.time()
s['nickname'] = nickname
s['stats'] = stats
self.dump_pickle()
def dump_pickle(self):
tmp = "%s.tmp" % (self.picklefile,)
f = open(tmp, 'wb')
pickle.dump(self.gathered_stats, f)
f.close()
if os.path.exists(self.picklefile):
os.unlink(self.picklefile)
os.rename(tmp, self.picklefile)
class StatsGathererService(service.MultiService):
furl_file = "stats_gatherer.furl"
def __init__(self, basedir=".", verbose=False):
service.MultiService.__init__(self)
self.basedir = basedir
self.tub = Tub(certFile=os.path.join(self.basedir,
"stats_gatherer.pem"))
self.tub.setServiceParent(self)
self.tub.setOption("logLocalFailures", True)
self.tub.setOption("logRemoteFailures", True)
self.tub.setOption("expose-remote-exception-types", False)
self.stats_gatherer = PickleStatsGatherer(self.basedir, verbose)
self.stats_gatherer.setServiceParent(self)
portnumfile = os.path.join(self.basedir, "portnum")
try:
portnum = open(portnumfile, "r").read()
except EnvironmentError:
portnum = None
self.listener = self.tub.listenOn(portnum or "tcp:0")
d = self.tub.setLocationAutomatically()
if portnum is None:
d.addCallback(self.save_portnum)
d.addCallback(self.tub_ready)
d.addErrback(log.err)
def save_portnum(self, junk):
portnum = self.listener.getPortnum()
portnumfile = os.path.join(self.basedir, 'portnum')
open(portnumfile, 'wb').write('%d\n' % (portnum,))
def tub_ready(self, ignored):
ff = os.path.join(self.basedir, self.furl_file)
self.gatherer_furl = self.tub.registerReference(self.stats_gatherer,
furlFile=ff)
| kytvi2p/tahoe-lafs | src/allmydata/stats.py | Python | gpl-2.0 | 11,418 |
#!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.enums import HTTP_HEADER
from lib.core.settings import WAF_ATTACK_VECTORS
__product__ = "TrafficShield (F5 Networks)"
def detect(get_page):
retval = False
for vector in WAF_ATTACK_VECTORS:
page, headers, code = get_page(get=vector)
retval = re.search(r"F5-TrafficShield", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
retval |= re.search(r"\AASINFO=", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
if retval:
break
return retval
| golismero/golismero | tools/sqlmap/waf/trafficshield.py | Python | gpl-2.0 | 681 |
# Copyright (c) 2007-2009 Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import XenAPI
import commands, re, shutil, sys, tempfile
from pprint import pprint
from XSConsoleAuth import *
from XSConsoleKeymaps import *
from XSConsoleLang import *
from XSConsoleLog import *
from XSConsoleState import *
from XSConsoleUtils import *
class DataMethod:
def __init__(self, inSend, inName):
self.send = inSend
self.name = inName
def __getattr__(self, inName):
return DataMethod(self.send, self.name+[inName])
def __call__(self, inDefault = None):
return self.send(self.name, inDefault)
class Data:
DISK_TIMEOUT_SECONDS = 60
instance = None
def __init__(self):
self.data = {}
self.session = None
@classmethod
def Inst(cls):
if cls.instance is None:
cls.instance = Data()
cls.instance.Create()
return cls.instance
@classmethod
def Reset(cls):
if cls.instance is not None:
del cls.instance
cls.instance = None
def DataCache(self):
# Not for general use
return self.data
def GetData(self, inNames, inDefault = None):
data = self.data
for name in inNames:
if name is '__repr__':
# Error - missing ()
raise Exception('Data call Data.' + '.'.join(inNames[:-1]) + ' must end with ()')
elif name in data:
data = data[name]
else:
return FirstValue(inDefault, Lang('<Unknown>'))
return data
# Attribute access can be used in two ways
# self.host.software_version.oem_model()
# returns the value of self.data['host']['software_version']['oem_model'], or the string '<Unknown>'
# if the element doesn't exist.
# self.host.software_version.oem_model('Default')
# is similar but returns the parameter ('Default' in this case) if the element doesn't exist
def __getattr__(self, inName):
if inName[0].isupper():
# Don't expect elements to start with upper case, so probably an unknown method name
raise Exception("Unknown method Data."+inName)
return DataMethod(self.GetData, [inName])
def RequireSession(self):
if self.session is None:
self.session = Auth.Inst().OpenSession()
return self.session
def Create(self):
# Create fills in data that never changes. Update fills volatile data
self.data = {}
self.ReadTimezones()
self.ReadKeymaps()
(status, output) = commands.getstatusoutput("dmidecode")
if status != 0:
# Use test dmidecode file if there's no real output
(status, output) = commands.getstatusoutput("/bin/cat ./dmidecode.txt")
if status == 0:
self.ScanDmiDecode(output.split("\n"))
(status, output) = commands.getstatusoutput("/sbin/lspci -m")
if status != 0:
(status, output) = commands.getstatusoutput("/usr/bin/lspci -m")
if status == 0:
self.ScanLspci(output.split("\n"))
if os.path.isfile("/usr/bin/ipmitool"):
(status, output) = commands.getstatusoutput("/usr/bin/ipmitool mc info")
if status == 0:
self.ScanIpmiMcInfo(output.split("\n"))
# /proc/cpuinfo has details of the virtual CPUs exposed to DOM-0, not necessarily the real CPUs
(status, output) = commands.getstatusoutput("/bin/cat /proc/cpuinfo")
if status == 0:
self.ScanCPUInfo(output.split("\n"))
(status, output) = commands.getstatusoutput("/usr/bin/openssl x509 -in /etc/xensource/xapi-ssl.pem -fingerprint -noout")
if status == 0:
fp = output.split("=")
if len(fp) >= 2:
self.data['sslfingerprint'] = fp[1]
else:
self.data['sslfingerprint'] = "<Unknown>"
try:
self.data['sshfingerprint'] = ShellPipe('/usr/bin/ssh-keygen', '-lf', '/etc/ssh/ssh_host_rsa_key.pub').AllOutput()[0].split(' ')[1]
except:
self.data['sshfingerprint'] = Lang('<Unknown>')
try:
self.data['state_on_usb_media'] = ( ShellPipe('/bin/bash', '-c', 'source /opt/xensource/libexec/oem-functions; if state_on_usb_media; then exit 1; else exit 0; fi').CallRC() != 0 )
except:
self.data['state_on_usb_media'] = True
self.UpdateFromPatchVersions()
self.Update()
def FakeMetrics(self, inPIF):
retVal = {
'carrier' : False,
'device_name' : '',
'vendor_name' : ''
}
return retVal
def CloseSession(self):
if self.session is not None:
self.session = Auth.Inst().CloseSession(self.session)
def Update(self):
self.data['host'] = {}
self.RequireSession()
if self.session is not None:
try:
try:
thisHost = self.session.xenapi.session.get_this_host(self.session._session)
except XenAPI.Failure, e:
XSLog('Data update connection failed - retrying. Exception was:', e)
self.session = Auth.Inst().CloseSession(self.session)
self.RequireSession()
if self.session is None:
raise Exception('Could not connect to local xapi')
thisHost = self.session.xenapi.session.get_this_host(self.session._session)
hostRecord = self.session.xenapi.host.get_record(thisHost)
self.data['host'] = hostRecord
self.data['host']['opaqueref'] = thisHost
# Expand the items we need in the host record
self.data['host']['metrics'] = self.session.xenapi.host_metrics.get_record(self.data['host']['metrics'])
try:
self.data['host']['suspend_image_sr'] = self.session.xenapi.SR.get_record(self.data['host']['suspend_image_sr'])
except:
# NULL or dangling reference
self.data['host']['suspend_image_sr'] = None
try:
self.data['host']['crash_dump_sr'] = self.session.xenapi.SR.get_record(self.data['host']['crash_dump_sr'])
except:
# NULL or dangling reference
self.data['host']['crash_dump_sr'] = None
convertCPU = lambda cpu: self.session.xenapi.host_cpu.get_record(cpu)
self.data['host']['host_CPUs'] = map(convertCPU, self.data['host']['host_CPUs'])
def convertPIF(inPIF):
retVal = self.session.xenapi.PIF.get_record(inPIF)
try:
retVal['metrics'] = self.session.xenapi.PIF_metrics.get_record(retVal['metrics'])
except XenAPI.Failure:
retVal['metrics' ] = self.FakeMetrics(inPIF)
try:
retVal['network'] = self.session.xenapi.network.get_record(retVal['network'])
except XenAPI.Failure, e:
XSLogError('Missing network record: ', e)
retVal['opaqueref'] = inPIF
return retVal
self.data['host']['PIFs'] = map(convertPIF, self.data['host']['PIFs'])
# Create missing PIF names
for pif in self.data['host']['PIFs']:
if pif['metrics']['device_name'] == '':
if not pif['physical']:
# Bonded PIF
pif['metrics']['device_name'] = Lang("Virtual PIF within ")+pif['network'].get('name_label', Lang('<Unknown>'))
else:
pif['metrics']['device_name'] = Lang('<Unknown>')
# Sort PIFs by device name for consistent order
self.data['host']['PIFs'].sort(lambda x, y : cmp(x['device'], y['device']))
def convertVBD(inVBD):
retVBD = self.session.xenapi.VBD.get_record(inVBD)
retVBD['opaqueref'] = inVBD
return retVBD
def convertVDI(inVDI):
retVDI = self.session.xenapi.VDI.get_record(inVDI)
retVDI['VBDs'] = map(convertVBD, retVDI['VBDs'])
retVDI['opaqueref'] = inVDI
return retVDI
def convertPBD(inPBD):
retPBD = self.session.xenapi.PBD.get_record(inPBD)
srRef = retPBD['SR']
try:
retPBD['SR'] = self.session.xenapi.SR.get_record(retPBD['SR'])
except:
retPBD['SR'] = None # retPBD['SR'] is OpaqueRef:NULL
# Get VDIs for udev SRs only - a pool may have thousands of non-udev VDIs
if retPBD['SR'] is not None:
retPBD['SR']['opaqueref'] = srRef
if retPBD['SR'].get('type', '') == 'udev':
retPBD['SR']['VDIs'] = map(convertVDI, retPBD['SR']['VDIs'])
for vdi in retPBD['SR']['VDIs']:
vdi['SR'] = retPBD['SR']
retPBD['opaqueref'] = inPBD
return retPBD
self.data['host']['PBDs'] = map(convertPBD, self.data['host']['PBDs'])
# Only load the to DOM-0 VM to save time
vmList = self.data['host']['resident_VMs']
for i in range(len(vmList)):
vm = vmList[i]
domID = self.session.xenapi.VM.get_domid(vm)
if domID == '0':
vmList[i] = self.session.xenapi.VM.get_record(vm)
vmList[i]['allowed_VBD_devices'] = self.session.xenapi.VM.get_allowed_VBD_devices(vm)
vmList[i]['opaqueref'] = vm
pools = self.session.xenapi.pool.get_all_records()
def convertPool(inID, inPool):
retPool = inPool
retPool['opaqueref'] = inID
try:
retPool['master_uuid'] = self.session.xenapi.host.get_uuid(inPool['master'])
except:
retPool['master_uuid'] = None
# SRs in the pool record are often apparently valid but dangling references.
# We fetch the uuid to determine whether the SRs are real.
try:
retPool['default_SR_uuid'] = self.session.xenapi.SR.get_uuid(inPool['default_SR'])
except:
retPool['default_SR_uuid'] = None
try:
retPool['suspend_image_SR_uuid'] = self.session.xenapi.SR.get_uuid(inPool['suspend_image_SR'])
except:
retPool['suspend_image_SR_uuid'] = None
try:
retPool['crash_dump_SR_uuid'] = self.session.xenapi.SR.get_uuid(inPool['crash_dump_SR'])
except:
retPool['crash_dump_SR_uuid'] = None
return retPool
self.data['pools'] = {}
for id, pool in pools.iteritems():
self.data['pools'][id] = convertPool(id, pool)
except Exception, e:
XSLogError('Data update failed: ', e)
try:
self.data['sr'] = []
pbdRefs = []
for pbd in self.data['host'].get('PBDs', []):
pbdRefs.append(pbd['opaqueref'])
srMap= self.session.xenapi.SR.get_all_records()
for opaqueRef, values in srMap.iteritems():
values['opaqueref'] = opaqueRef
values['islocal'] = False
for pbdRef in values.get('PBDs', []):
if pbdRef in pbdRefs:
values['islocal'] = True
self.data['sr'].append(values)
except Exception, e:
XSLogError('SR data update failed: ', e)
self.UpdateFromResolveConf()
self.UpdateFromSysconfig()
self.UpdateFromNTPConf()
self.UpdateFromTimezone()
self.UpdateFromKeymap()
if os.path.isfile("/sbin/chkconfig"):
(status, output) = commands.getstatusoutput("/sbin/chkconfig --list sshd && /sbin/chkconfig --list ntpd")
if status == 0:
self.ScanChkConfig(output.split("\n"))
self.DeriveData()
def DeriveData(self):
self.data.update({
'derived' : {
'app_name' : Lang("XenCenter"),
'full_app_name' : Lang("Citrix XenCenter"),
'cpu_name_summary' : {}
}
})
# Gather up the CPU model names into a more convenient form
if 'host_CPUs' in self.data['host']:
hostCPUs = self.data['host']['host_CPUs']
cpuNameSummary = self.data['derived']['cpu_name_summary']
for cpu in hostCPUs:
name = " ".join(cpu['modelname'].split())
if name in cpuNameSummary:
cpuNameSummary[name] += 1
else:
cpuNameSummary[name] = 1
# Select the current management PIFs
self.data['derived']['managementpifs'] = []
if 'PIFs' in self.data['host']:
for pif in self.data['host']['PIFs']:
if pif['management']:
self.data['derived']['managementpifs'].append(pif)
# Add a reference to the DOM-0 VM
if 'resident_VMs' in self.data['host']:
for vm in self.data['host']['resident_VMs']:
if 'domid' in vm and vm['domid'] == '0':
self.data['derived']['dom0_vm'] = vm
# Calculate the full version string
version = self.host.software_version.product_version('') + '-' + self.host.software_version.build_number('')
oemBuildNumber = self.host.software_version.oem_build_number('')
if oemBuildNumber != '':
version += '-'+oemBuildNumber
if version.startswith('-'):
version = Lang("<Unknown>")
self.data['derived']['fullversion'] = version
def Dump(self):
pprint(self.data)
def HostnameSet(self, inHostname):
Auth.Inst().AssertAuthenticated()
# Protect from shell escapes
if not re.match(r'[-A-Za-z0-9.]+$', inHostname):
raise Exception("Invalid hostname '"+inHostname+"'")
IPUtils.AssertValidNetworkName(inHostname)
self.RequireSession()
self.session.xenapi.host.set_hostname_live(self.host.opaqueref(), inHostname)
def NameLabelSet(self, inNameLabel):
self.RequireSession()
self.session.xenapi.host.set_name_label(self.host.opaqueref(), inNameLabel)
def NameserversSet(self, inServers):
self.data['dns']['nameservers'] = inServers
def NTPServersSet(self, inServers):
self.data['ntp']['servers'] = inServers
def LoggingDestinationSet(self, inDestination):
Auth.Inst().AssertAuthenticated()
self.RequireSession()
self.session.xenapi.host.remove_from_logging(self.host.opaqueref(), 'syslog_destination')
self.session.xenapi.host.add_to_logging(self.host.opaqueref(), 'syslog_destination', inDestination)
self.session.xenapi.host.syslog_reconfigure(self.host.opaqueref())
def UpdateFromResolveConf(self):
(status, output) = commands.getstatusoutput("/bin/cat /etc/resolv.conf")
if status == 0:
self.ScanResolvConf(output.split("\n"))
def UpdateFromSysconfig(self):
(status, output) = commands.getstatusoutput("/bin/cat /etc/sysconfig/network")
if status == 0:
self.ScanSysconfigNetwork(output.split("\n"))
def UpdateFromNTPConf(self):
(status, output) = commands.getstatusoutput("/bin/cat /etc/ntp.conf")
if status == 0:
self.ScanNTPConf(output.split("\n"))
def StringToBool(self, inString):
return inString.lower().startswith('true')
def RootLabel(self):
output = commands.getoutput('/bin/cat /proc/cmdline')
match = re.search(r'root=\s*LABEL\s*=\s*(\S+)', output)
if match:
retVal = match.group(1)
else:
retVal = 'xe-0x'
return retVal
def GetVersion(self, inLabel):
match = re.match(r'(xe-|rt-)(\d+)[a-z]', inLabel)
if match:
retVal = int(match.group(2))
else:
retVal = 0
return retVal
def UpdateFromPatchVersions(self):
self.data['backup'] = {}
alternateVersion = None
try:
try:
alternateDev = ShellPipe('/opt/xensource/libexec/find-partition', 'alternate').Stdout()[0].split(',')[0]
alternateMount = tempfile.mkdtemp(".xsconsole")
ShellPipe('/bin/mount', '-t', 'auto', '-o', 'ro', alternateDev, alternateMount).Call()
rootfsDev = alternateMount + '/rootfs'
if not os.path.isfile(rootfsDev):
inventoryMount = alternateMount
else:
rootfsMount = tempfile.mkdtemp(".xsconsole")
ShellPipe('/bin/mount', '-t', 'squashfs', '-o', 'loop,ro', rootfsDev, rootfsMount).Call()
inventoryMount = rootfsMount
inventoryFile = open(inventoryMount+'/etc/xensource-inventory')
for line in inventoryFile:
match = re.match(r"\s*BUILD_NUMBER\s*=\s*'([^']*)'", line)
if match:
alternateVersion = match.group(1)
break
except Exception, e:
XSLog('UpdateFromPatchVersions failed: ', e)
finally:
# Undefined variables raise exceptions, so this code will only undo operations that succeeded
try: inventoryFile.close()
except: pass
try: ShellPipe('/bin/umount', '-d', rootfsMount).Call() # -d for loopback device
except: pass
try: os.rmdir(rootfsMount)
except: pass
try: ShellPipe('/bin/umount', alternateMount).Call()
except: pass
try: os.rmdir(alternateMount)
except: pass
self.data['backup']['alternateversion'] = alternateVersion
def CanRevert(self):
# Revert if the alternate version is earlier than the current version.
try:
numCurrent = int(re.match(r'([0-9]+)', self.host.software_version.build_number()).group(1))
numAlternate = int(re.match(r'([0-9]+)', self.backup.alternateversion()).group(1))
retVal = (numAlternate < numCurrent)
except:
retVal = False
return retVal
def Revert(self):
if self.CanRevert():
ShellPipe('/opt/xensource/libexec/set-boot', 'alternate').Call()
else:
raise Exception("Unable to revert")
def SaveToSysconfig(self):
# Double-check authentication
Auth.Inst().AssertAuthenticated()
file = None
try:
file = open("/etc/sysconfig/network", "w")
for other in self.sysconfig.network.othercontents([]):
file.write(other+"\n")
file.write("HOSTNAME="+self.sysconfig.network.hostname('')+"\n")
finally:
if file is not None: file.close()
self.UpdateFromSysconfig()
def SaveToNTPConf(self):
# Double-check authentication
Auth.Inst().AssertAuthenticated()
file = None
try:
file = open("/etc/ntp.conf", "w")
for other in self.ntp.othercontents([]):
file.write(other+"\n")
for server in self.ntp.servers([]):
file.write("server "+server+"\n")
finally:
if file is not None: file.close()
self.UpdateFromNTPConf()
def ScanDmiDecode(self, inLines):
STATE_NEXT_ELEMENT = 2
state = 0
handles = []
self.data['dmi'] = {
'cpu_sockets' : 0,
'cpu_populated_sockets' : 0,
'memory_sockets' : 0,
'memory_modules' : 0,
'memory_size' : 0
}
for line in inLines:
indent = 0
while len(line) > 0 and line[0] == "\t":
indent += 1
line = line[1:]
if indent == 0 and state > 3:
state = STATE_NEXT_ELEMENT
if state == 0:
self.data['dmi']['dmi_banner'] = line
state += 1
elif state == 1:
match = re.match(r'(SMBIOS\s+\S+).*', line)
if match:
self.data['dmi']['smbios'] = match.group(1)
state += 1
elif state == 2:
# scan for 'Handle...' line
if indent == 0:
match = re.match(r'Handle (.*)$', line)
if match and (match.group(1) not in handles):
handles.append(match.group(1))
state += 1
elif state == 3:
if indent == 0:
elementName = line
if elementName == 'BIOS Information': state = 4
elif elementName == 'System Information': state = 5
elif elementName == 'Chassis Information': state = 6
elif elementName == 'Processor Information': state = 7
elif elementName == 'Memory Device': state = 8
else:
state = STATE_NEXT_ELEMENT
else:
state = STATE_NEXT_ELEMENT
elif state == 4: # BIOS Information
self.Match(line, r'Vendor:\s*(.*?)\s*$', 'bios_vendor')
self.Match(line, r'Version:\s*(.*?)\s*$', 'bios_version')
elif state == 5: # System Information
self.Match(line, r'Manufacturer:\s*(.*?)\s*$', 'system_manufacturer')
self.Match(line, r'Product Name:\s*(.*?)\s*$', 'system_product_name')
self.Match(line, r'Serial Number:\s*(.*?)\s*$', 'system_serial_number')
elif state == 6: # Chassis information
self.Match(line, r'Asset Tag:\s*(.*?)\s*$', 'asset_tag')
elif state == 7: # Processor information
if self.MultipleMatch(line, r'Socket Designation:\s*(.*?)\s*$', 'cpu_socket_designations'):
self.data['dmi']['cpu_sockets'] += 1
if re.match(r'Status:.*Populated.*', line):
self.data['dmi']['cpu_populated_sockets'] += 1
elif state == 8: # Memory Device
if self.MultipleMatch(line, r'Locator:\s*(.*?)\s*$', 'memory_locators'):
self.data['dmi']['memory_sockets'] += 1
match = self.MultipleMatch(line, r'Size:\s*(.*?)\s*$', 'memory_sizes')
if match:
size = re.match(r'(\d+)\s+([MGBmgb]+)', match.group(1))
if size and size.group(2).lower() == 'mb':
self.data['dmi']['memory_size'] += int(size.group(1))
self.data['dmi']['memory_modules'] += 1
elif size and size.group(2).lower() == 'gb':
self.data['dmi']['memory'] += int(size.group(1)) * 1024
self.data['dmi']['memory_modules'] += 1
def Match(self, inLine, inRegExp, inKey):
match = re.match(inRegExp, inLine)
if match:
self.data['dmi'][inKey] = match.group(1)
return match
def MultipleMatch(self, inLine, inRegExp, inKey):
match = re.match(inRegExp, inLine)
if match:
if not self.data['dmi'].has_key(inKey):
self.data['dmi'][inKey] = []
self.data['dmi'][inKey].append(match.group(1))
return match
def ScanLspci(self, inLines):
self.data['lspci'] = {
'storage_controllers' : []
}
# Spot storage controllers by looking for keywords or the phrase 'storage controller' in the lspci output
classExp = re.compile(r'[Ss]torage|IDE|PATA|SATA|SCSI|SAS|RAID|[Ff]iber [Cc]hannel|[Ff]ibre [Cc]hannel')
nameExp = re.compile(r'IDE|PATA|SATA|SCSI|SAS|RAID|[Ff]iber [Cc]hannel|[Ff]ibre [Cc]hannel')
unknownExp = re.compile(r'[Uu]nknown [Dd]evice')
regExp = re.compile(
r'[^"]*' + # Bus position, etc.
r'"([^"]*)"[^"]+' + # Class
r'"([^"]*)"[^"]+' + # Vendor
r'"([^"]*)"[^"]+' + # Device
r'"([^"]*)"[^"]+' + # SVendor
r'"([^"]*)"') # SDevice
for line in inLines:
match = regExp.match(line)
if match:
devClass = match.group(1)
devVendor = match.group(2)
devName = match.group(3)
devSVendor = match.group(4)
devSName = match.group(5)
# Determine whether this device is a storage controller
if (classExp.search(devClass) or
nameExp.search(devName) or
nameExp.search(devSName)):
# Device is a candidate for the list. Do we have a useful name for it?
if not unknownExp.search(devSName) and devSName != '':
self.data['lspci']['storage_controllers'].append((devClass, devSVendor+' '+devSName)) # Tuple so double brackets
elif not unknownExp.search(devName):
self.data['lspci']['storage_controllers'].append((devClass, devName)) # Tuple so double brackets
else:
self.data['lspci']['storage_controllers'].append((devClass, devVendor+' '+devName)) # Tuple so double brackets
def ScanIpmiMcInfo(self, inLines):
self.data['bmc'] = {}
for line in inLines:
match = re.match(r'Firmware\s+Revision\s*:\s*([-0-9.]+)', line)
if match:
self.data['bmc']['version'] = match.group(1)
def ScanChkConfig(self, inLines):
self.data['chkconfig'] = {}
for line in inLines:
# Is sshd on for runlevel 5?
if re.match(r'sshd.*5\s*:\s*on', line, re.IGNORECASE):
self.data['chkconfig']['sshd'] = True
elif re.match(r'sshd.*5\s*:\s*off', line, re.IGNORECASE):
self.data['chkconfig']['sshd'] = False
# else leave as Unknown
elif re.match(r'ntpd.*5\s*:\s*on', line, re.IGNORECASE):
self.data['chkconfig']['ntpd'] = True
elif re.match(r'ntpd.*5\s*:\s*off', line, re.IGNORECASE):
self.data['chkconfig']['ntpd'] = False
def ScanResolvConf(self, inLines):
self.data['dns'] = {
'nameservers' : [],
'othercontents' : []
}
for line in inLines:
match = re.match(r'nameserver\s+(\S+)', line)
if match:
self.data['dns']['nameservers'].append(match.group(1))
else:
self.data['dns']['othercontents'].append(line)
def ScanSysconfigNetwork(self, inLines):
if not 'sysconfig' in self.data:
self.data['sysconfig'] = {}
self.data['sysconfig']['network'] = {'othercontents' : [] }
for line in inLines:
match = re.match(r'HOSTNAME\s*=\s*(.*)', line)
if match:
self.data['sysconfig']['network']['hostname'] = match.group(1)
else:
self.data['sysconfig']['network']['othercontents'].append(line)
def ScanNTPConf(self, inLines):
if not 'ntp' in self.data:
self.data['ntp'] = {}
self.data['ntp']['servers'] = []
self.data['ntp']['othercontents'] = []
for line in inLines:
match = re.match(r'server\s+(\S+)', line)
if match and not match.group(1).startswith('127.127.'):
self.data['ntp']['servers'].append(match.group(1))
else:
self.data['ntp']['othercontents'].append(line)
def ScanCPUInfo(self, inLines):
self.data['cpuinfo'] = {}
for line in inLines:
match = re.match(r'flags\s*:\s*(.*)', line)
if match:
self.data['cpuinfo']['flags'] = match.group(1).split()
def ReadTimezones(self):
self.data['timezones'] = {
'continents': {
Lang('Africa') : 'Africa',
Lang('Americas') : 'America',
Lang('US') : 'US',
Lang('Canada') : 'Canada',
Lang('Asia') : 'Asia',
Lang('Atlantic Ocean') : 'Atlantic',
Lang('Australia') : 'Australia',
Lang('Europe') : 'Europe',
Lang('Indian Ocean') : 'Indian',
Lang('Pacific Ocean') : 'Pacific',
Lang('Other') : 'Etc'
},
'cities' : {}
}
filterExp = re.compile('('+'|'.join(self.data['timezones']['continents'].values())+')')
zonePath = '/usr/share/zoneinfo'
for root, dirs, files in os.walk(zonePath):
for filename in files:
filePath = os.path.join(root, filename)
localPath = filePath[len(zonePath)+1:] # Just the path after /usr/share/zoneinfo/
if filterExp.match(localPath):
# Store only those entries starting with one of our known prefixes
self.data['timezones']['cities'][localPath] = filePath
def UpdateFromTimezone(self):
if os.path.isfile('/etc/timezone'):
file = open('/etc/timezone')
self.data['timezones']['current'] = file.readline().rstrip()
file.close()
def TimezoneSet(self, inTimezone):
localtimeFile = '/etc/localtime'
if os.path.isfile(localtimeFile):
os.remove(localtimeFile)
os.symlink(self.timezones.cities({})[inTimezone], localtimeFile)
file = open('/etc/timezone', 'w')
file.write(inTimezone+"\n")
file.close()
def CurrentTimeString(self):
return commands.getoutput('/bin/date -R')
def ReadKeymaps(self):
self.data['keyboard'] = {
'keymaps' : {}
}
keymapsPath = '/lib/kbd/keymaps/i386'
excludeExp = re.compile(re.escape(keymapsPath)+r'/include')
filterExp = re.compile(r'(.*).map.gz$')
for root, dirs, files in os.walk(keymapsPath):
for filename in files:
if not excludeExp.match(root):
match = filterExp.match(filename)
if match:
filePath = os.path.join(root, filename)
self.data['keyboard']['keymaps'][match.group(1)] = filePath
self.data['keyboard']['namestomaps'] = Keymaps.NamesToMaps()
for value in self.data['keyboard']['namestomaps'].values():
if not value in self.data['keyboard']['keymaps']:
XSLogError("Warning: Missing keymap " + value)
def KeymapSet(self, inKeymap):
# mapFile = self.keyboard.keymaps().get(inKeymap, None)
# if mapFile is None:
# raise Exception(Lang("Unknown keymap '")+str(inKeymap)+"'")
keymapParam = ShellUtils.MakeSafeParam(inKeymap)
# Load the keymap now
status, output = commands.getstatusoutput('/bin/loadkeys "'+keymapParam+'"')
if status != 0:
raise Exception(output)
# Use state-based method to ensure that keymap is set on first run
State.Inst().KeymapSet(keymapParam)
# Store the keymap for next boot
# Currently this has no effect
file = open('/etc/sysconfig/keyboard', 'w')
file.write('KEYTABLE="'+keymapParam+'"\n')
file.close()
def KeymapToName(self, inKeymap):
# Derive a name to present to the user
mapName = FirstValue(inKeymap, Lang('<Default>'))
for key, value in self.keyboard.namestomaps({}).iteritems():
if value == inKeymap:
mapName = key
return mapName
def UpdateFromKeymap(self):
keymap = State.Inst().Keymap()
self.data['keyboard']['currentname'] = self.KeymapToName(keymap)
def SuspendSRSet(self, inSR):
# Double-check authentication
Auth.Inst().AssertAuthenticated()
self.RequireSession()
pool = self.GetPoolForThisHost()
self.session.xenapi.pool.set_suspend_image_SR(pool['opaqueref'], inSR['uuid'])
def CrashDumpSRSet(self, inSR):
# Double-check authentication
Auth.Inst().AssertAuthenticated()
self.RequireSession()
pool = self.GetPoolForThisHost()
self.session.xenapi.pool.set_crash_dump_SR(pool['opaqueref'], inSR['opaqueref'])
def RemovePartitionSuffix(self, inDevice):
regExpList = [
r'(/dev/disk/by-id.*?)-part[0-9]+$',
r'(/dev/cciss/.*?)p[0-9]+$',
r'(/dev/.*?)[0-9]+$'
]
retVal = inDevice
for regExp in regExpList:
match = re.match(regExp, inDevice)
if match:
retVal = match.group(1)
break
return retVal
def GetSRFromDevice(self, inDevice):
retVal = None
for pbd in self.host.PBDs([]):
device = pbd.get('device_config', {}).get('device', '')
if self.RemovePartitionSuffix(device) == inDevice:
# This is the PBD containing the device. Does it have an SR?
sr = pbd.get('SR', None)
if sr.get('name_label', None) is not None:
retVal = sr
return retVal
def SetPoolSRIfRequired(self, inOpaqueRef):
Auth.Inst().AssertAuthenticated()
self.RequireSession()
pool = self.GetPoolForThisHost()
if pool is not None:
if pool['default_SR_uuid'] is None:
self.session.xenapi.pool.set_default_SR(pool['opaqueref'], inOpaqueRef)
if pool['suspend_image_SR_uuid'] is None:
self.session.xenapi.pool.set_suspend_image_SR(pool['opaqueref'], inOpaqueRef)
if pool['crash_dump_SR_uuid'] is None:
self.session.xenapi.pool.set_crash_dump_SR(pool['opaqueref'], inOpaqueRef)
def SetPoolSRsFromDeviceIfNotSet(self, inDevice):
sr = self.GetSRFromDevice(inDevice)
if sr is None:
raise Exception(Lang("Device does not have an associated SR"))
self.SetPoolSRIfRequired(sr['opaqueref'])
def GetPoolForThisHost(self):
self.RequireSession()
retVal = None
for pool in self.pools({}).values():
# Currently there is only one pool
retVal = pool
break
return retVal
def ReconfigureManagement(self, inPIF, inMode, inIP, inNetmask, inGateway, inDNS = None):
# Double-check authentication
Auth.Inst().AssertAuthenticated()
try:
self.RequireSession()
self.session.xenapi.PIF.reconfigure_ip(inPIF['opaqueref'], inMode, inIP, inNetmask, inGateway, FirstValue(inDNS, ''))
self.session.xenapi.host.management_reconfigure(inPIF['opaqueref'])
status, output = commands.getstatusoutput('/opt/xensource/bin/xe host-signal-networking-change')
if status != 0:
raise Exception(output)
finally:
# Network reconfigured so this link is potentially no longer valid
self.session = Auth.Inst().CloseSession(self.session)
def DisableManagement(self):
# Double-check authentication
Auth.Inst().AssertAuthenticated()
try:
self.RequireSession()
# Disable management interfaces
self.session.xenapi.host.management_disable()
# Disable the PIF that the management interface was using
for pif in self.derived.managementpifs([]):
self.session.xenapi.PIF.reconfigure_ip(pif['opaqueref'], 'None','' ,'' ,'' ,'')
finally:
# Network reconfigured so this link is potentially no longer valid
self.session = Auth.Inst().CloseSession(self.session)
def LocalHostEnable(self):
Auth.Inst().AssertAuthenticatedOrPasswordUnset()
self.RequireSession()
self.session.xenapi.host.enable(self.host.opaqueref())
def LocalHostDisable(self):
Auth.Inst().AssertAuthenticatedOrPasswordUnset()
self.RequireSession()
self.session.xenapi.host.disable(self.host.opaqueref())
def ConfigureRemoteShell(self, inEnable):
if inEnable:
status, output = commands.getstatusoutput("/sbin/chkconfig sshd on")
else:
status, output = commands.getstatusoutput("/sbin/chkconfig sshd off")
if status != 0:
raise Exception(output)
def Ping(self, inDest):
# Must be careful that no unsanitised data is passed to the command
if not re.match(r'[0-9a-zA-Z][-0-9a-zA-Z.]*$', inDest):
raise Exception("Invalid destination '"+inDest+"'")
IPUtils.AssertValidNetworkName(inDest)
pipe = ShellPipe('/bin/ping', '-c', '1', '-w', '2', inDest)
status = pipe.CallRC()
return (status == 0, "\n".join(pipe.AllOutput()))
def ManagementIP(self, inDefault = None):
retVal = inDefault
retVal = self.host.address(retVal)
return retVal
def ManagementNetmask(self, inDefault = None):
retVal = inDefault
# FIXME: Address should come from API, but not available at present. For DHCP this is just a guess at the gateway address
for pif in self.derived.managementpifs([]):
if pif['ip_configuration_mode'].lower().startswith('static'):
# For static IP the API address is correct
retVal = pif['netmask']
elif pif['ip_configuration_mode'].lower().startswith('dhcp'):
# For DHCP, find the gateway address by parsing the output from the 'route' command
if 'bridge' in pif['network']:
device = pif['network']['bridge']
else:
device = pif['device']
device = ShellUtils.MakeSafeParam(device)
ipre = r'[0-9a-f.:]+'
ifRE = re.compile(r'\s*inet\s+addr\s*:'+ipre+'\s+bcast\s*:\s*'+ipre+r'\s+mask\s*:\s*('+ipre+r')\s*$',
re.IGNORECASE)
ifconfig = commands.getoutput("/sbin/ifconfig '"+device+"'").split("\n")
for line in ifconfig:
match = ifRE.match(line)
if match:
retVal = match.group(1)
break
return retVal
def ManagementGateway(self, inDefault = None):
retVal = inDefault
# FIXME: Address should come from API, but not available at present. For DHCP this is just a guess at the gateway address
for pif in self.derived.managementpifs([]):
if pif['ip_configuration_mode'].lower().startswith('static'):
# For static IP the API address is correct
retVal = pif['gateway']
elif pif['ip_configuration_mode'].lower().startswith('dhcp'):
# For DHCP, find the gateway address by parsing the output from the 'route' command
if 'bridge' in pif['network']:
device = pif['network']['bridge']
else:
device = pif['device']
routeRE = re.compile(r'([0-9.]+)\s+([0-9.]+)\s+([0-9.]+)\s+UG\s+\d+\s+\d+\s+\d+\s+'+device,
re.IGNORECASE)
routes = commands.getoutput("/sbin/route -n").split("\n")
for line in routes:
match = routeRE.match(line)
if match:
retVal = match.group(2)
break
return retVal
def VBDGetRecord(self, inVBD):
self.RequireSession()
vbdRecord = self.session.xenapi.VBD.get_record(inVBD)
vbdRecord['opaqueref'] = inVBD
return vbdRecord
def CreateVBD(self, inVM, inVDI, inDeviceNum, inMode = None, inType = None):
self.RequireSession()
vbd = {
'VM' : inVM['opaqueref'],
'VDI' : inVDI['opaqueref'],
'userdevice' : inDeviceNum,
'mode' : FirstValue(inMode, 'ro'),
'bootable' : False,
'type' : FirstValue(inType, 'disk'),
'unpluggable' : True,
'empty' : False,
'other_config' : { 'xsconsole_tmp' : 'Created: '+time.asctime(time.gmtime()) },
'qos_algorithm_type' : '',
'qos_algorithm_params' : {}
}
newVBD = self.session.xenapi.VBD.create(vbd)
return self.VBDGetRecord(newVBD)
def PlugVBD(self, inVBD):
def TimedOp():
self.session.xenapi.VBD.plug(inVBD['opaqueref'])
TimeUtils.TimeoutWrapper(TimedOp, self.DISK_TIMEOUT_SECONDS)
# Must reread to get filled-in device fieldcat
return self.VBDGetRecord(inVBD['opaqueref'])
def UnplugVBD(self, inVBD):
self.session.xenapi.VBD.unplug(inVBD['opaqueref'])
return self.VBDGetRecord(inVBD['opaqueref'])
def DestroyVBD(self, inVBD):
self.session.xenapi.VBD.destroy(inVBD['opaqueref'])
def PurgeVBDs(self):
# Destroy any VBDs that xsconsole created but isn't using
vbdRefs = {} # Use a dict to remove duplicates
# Iterate through all VBDs we know about
for pbd in Data.Inst().host.PBDs([]):
sr = pbd.get('SR', {})
for vdi in sr.get('VDIs', []):
for vbd in vdi.get('VBDs', []):
if 'xsconsole_tmp' in vbd.get('other_config', {}):
vbdRefs[ vbd['opaqueref'] ] = vbd
for vbd in vbdRefs.values():
try:
# Currently this won't destroy mounted VBDs
if vbd['currently_attached']:
self.UnplugVBD(vbd)
self.DestroyVBD(vbd)
except Exception, e:
XSLogError('VBD purge failed', e)
def IsXAPIRunning(self):
try:
if ShellPipe('/sbin/pidof', '-s', '/opt/xensource/bin/xapi').CallRC() == 0:
retVal = True
else:
retVal = False
except:
retVal = False
return retVal
def StopXAPI(self):
if self.IsXAPIRunning():
State.Inst().WeStoppedXAPISet(True)
State.Inst().SaveIfRequired()
ShellPipe('/etc/init.d/xapi', 'stop').Call()
def StartXAPI(self):
if not self.IsXAPIRunning():
ShellPipe('/etc/init.d/xapi', 'start').Call()
State.Inst().WeStoppedXAPISet(False)
State.Inst().SaveIfRequired()
def EnableNTP(self):
status, output = commands.getstatusoutput(
"(export TERM=xterm && /sbin/chkconfig ntpd on && /etc/init.d/ntpd start)")
if status != 0:
raise Exception(output)
def DisableNTP(self):
status, output = commands.getstatusoutput(
"(export TERM=xterm && /sbin/chkconfig ntpd off && /etc/init.d/ntpd stop)")
if status != 0:
raise Exception(output)
def RestartNTP(self):
status, output = commands.getstatusoutput("(export TERM=xterm && /etc/init.d/ntpd restart)")
if status != 0:
raise Exception(output)
def NTPStatus(self):
status, output = commands.getstatusoutput("/usr/bin/ntpstat")
return output
def SetVerboseBoot(self, inVerbose):
if inVerbose:
name = 'noisy'
else:
name = 'quiet'
status, output = commands.getstatusoutput(
"(export TERM=xterm && /opt/xensource/libexec/set-boot " + name + ")")
if status != 0:
raise Exception(output)
State.Inst().VerboseBootSet(inVerbose)
| mcclurmc/xcp-console | XSConsoleData.py | Python | gpl-2.0 | 46,581 |
import sure # noqa # pylint: disable=unused-import
from unittest.mock import patch
from moto.server import main, create_backend_app, DomainDispatcherApplication
def test_wrong_arguments():
try:
main(["name", "test1", "test2", "test3"])
assert False, (
"main() when called with the incorrect number of args"
" should raise a system exit"
)
except SystemExit:
pass
@patch("moto.server.run_simple")
def test_right_arguments(run_simple):
main(["s3"])
func_call = run_simple.call_args[0]
func_call[0].should.equal("127.0.0.1")
func_call[1].should.equal(5000)
@patch("moto.server.run_simple")
def test_port_argument(run_simple):
main(["s3", "--port", "8080"])
func_call = run_simple.call_args[0]
func_call[0].should.equal("127.0.0.1")
func_call[1].should.equal(8080)
def test_domain_dispatched():
dispatcher = DomainDispatcherApplication(create_backend_app)
backend_app = dispatcher.get_application(
{"HTTP_HOST": "email.us-east1.amazonaws.com"}
)
keys = list(backend_app.view_functions.keys())
keys[0].should.equal("EmailResponse.dispatch")
def test_domain_dispatched_with_service():
# If we pass a particular service, always return that.
dispatcher = DomainDispatcherApplication(create_backend_app, service="s3")
backend_app = dispatcher.get_application({"HTTP_HOST": "s3.us-east1.amazonaws.com"})
keys = set(backend_app.view_functions.keys())
keys.should.contain("ResponseObject.key_response")
| spulec/moto | tests/test_core/test_server.py | Python | apache-2.0 | 1,547 |
"""
audit_log - File ``/var/log/audit/audit.log``
=============================================
"""
import shlex
from datetime import date
from .. import LogFileOutput, parser, add_filter
from insights.specs import Specs
# Currently, only selinux related(AVC type) audit logs are interested.
# Add this filter in parser directly to filter out too many other types logs.
# Also, filters can be changed to meet any further requirments.
filter_list = [
'type=AVC',
]
add_filter(Specs.audit_log, filter_list)
@parser(Specs.audit_log)
class AuditLog(LogFileOutput):
"""
Class for parsing ``/var/log/audit/audit.log`` file.
Sample log lines::
type=CRYPTO_KEY_USER msg=audit(1506046832.641:53584): pid=16865 uid=0 auid=0 ses=7247 subj=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 msg='op=destroy kind=session fp=? direction=both spid=16865 suid=0 rport=59296 laddr=192.0.2.1 lport=22 exe="/usr/sbin/sshd" hostname=? addr=192.0.2.3 terminal=? res=success'
type=LOGIN msg=audit(1506047401.407:53591): pid=482 uid=0 subj=system_u:system_r:crond_t:s0-s0:c0.c1023 old-auid=4294967295 auid=993 old-ses=4294967295 ses=7389 res=1
type=AVC msg=audit(1506487181.009:32794): avc: denied { create } for pid=27960 comm="mongod" scontext=system_u:system_r:mongod_t:s0 tcontext=system_u:system_r:mongod_t:s0 tclass=unix_dgram_socket
Examples:
>>> log = shared[AuditLog]
>>> log.get('type=AVC')
[{
'is_valid': True,
'timestamp': '1506487181.009',
'unparsed': 'avc: denied { create } for',
'msg_ID': '32794',
'pid': '27960',
'raw_message': 'type=AVC msg=audit(1506487181.009:32794): avc: denied { create } for pid=27960 comm="mongod" scontext=system_u:system_r:mongod_t:s0 tcontext=system_u:system_r:mongod_t:s0 tclass=unix_dgram_socket',
'comm': 'mongod',
'scontext': 'system_u:system_r:mongod_t:s0',
'tclass': 'unix_dgram_socket',
'type': 'AVC',
'tcontext': 'system_u:system_r:mongod_t:s0'
}]
>>> assert len(list(log.get_after(timestamp=date.fromtimestamp(1506047401.407)))) == 3
"""
def _parse_line(self, line):
"""
Parse a log line into a info dictionary.
Parsing logic:
* First, use `shlex.split` to split by empty.
* Next, assert the first two key-value pair is 'type' and 'msg'.
* Next, parse the remained string reversly to get key-value pair data as more as possible.
* The left unparsed string will be stored at "unparsed".
* At last, wrap all key-value pairs into a dict called info.
* Note, if a line isn't parseable by the above steps, return dict with items 'raw_message' and 'is_valid' only.
Parameters:
line(string): Log line to parse
Yields:
Dictionary corresponding to the data in line split into
functional elements:
* **raw_message** - the original unparsed line
* **is_valid** - true if the line is parseable, else false
* **type** - the log type
* **timestamp** - the timestamp of the log line (as a string)
* **msg_ID** - the serial number behind the timestamp
* **unparsed** - the unparsed part in log, may or maynot exist
If the line is not regular, for some reason, then as many fields as
possible are pulled from the line.
"""
info = {'raw_message': line, 'is_valid': False}
linesp = shlex.split(line)
if (len(linesp) < 2 or
not (linesp[0] and linesp[0].startswith('type=')) or
not (linesp[1] and linesp[1].startswith('msg=audit('))):
return info
timestamp_id = linesp[1].lstrip('msg=audit(').rstrip('):')
timestamp_id_sp = timestamp_id.split(':')
if len(timestamp_id_sp) != 2:
return info
info['type'] = linesp[0].lstrip('type=')
info['timestamp'] = timestamp_id_sp[0]
info['msg_ID'] = timestamp_id_sp[1]
for index in range(len(linesp) - 1, 1, -1):
itemsp = linesp[index].split('=', 1)
if len(itemsp) < 2:
unparsed1 = (line.rsplit(linesp[index + 1])[0]
if index + 1 < len(linesp)
else line)
unparsed2 = unparsed1.split(linesp[1])[-1]
info["unparsed"] = unparsed2.strip()
break
info[itemsp[0]] = itemsp[1]
info['is_valid'] = True
return info
def get_after(self, timestamp, s=None):
"""
Find all the (available) logs that are after the given time stamp.
Override this function in class LogFileOutput.
Parameters:
timestamp(datetime.datetime): lines before this time are ignored.
s(str or list): one or more strings to search for.
If not supplied, all available lines are searched.
Yields:
(dict): the parsed data of lines with timestamps after this date in the
same format they were supplied.
"""
search_by_expression = self._valid_search(s)
for line in self.lines:
# If `s` is not None, keywords must be found in the line
if s and not search_by_expression(line):
continue
info = self._parse_line(line)
try:
logtime = date.fromtimestamp(float(info.get('timestamp', 0)))
if logtime > timestamp:
yield info
except:
pass
| RedHatInsights/insights-core | insights/parsers/audit_log.py | Python | apache-2.0 | 5,722 |
import numpy as np
from pydlv.dl_model_base import DLModelBase
class DLModel3(DLModelBase):
'''
This class defines the mathematical model for decision landscape in the form of
a system of two ordinary differential equations. It also encapsulates the definitions of
fit error functions and its jacobians.
coeffs is an array of model parameters, indexed as
0: tau, 1: c_11, 2: c_21, 3: c_12
'''
def __init__(self):
super(DLModelBase, self).__init__()
self.n_params = 4
def dV_dx(self, x, y, coeffs):
return (x - 1)*(x + 1)*x + coeffs[1]*y + coeffs[2]*x*y + (coeffs[3]/2)*y**2
def dV_dy(self, x, y, coeffs):
return (y - 1)*y + coeffs[1]*x + (coeffs[2]/2)*x**2 + coeffs[3]*x*y
def V(self, x, y, coeffs):
V_x = x**4/4 - x**2/2
V_y = y**3/3 - y**2/2
V_xy = coeffs[1]*x*y + (coeffs[2]/2)*x**2*y + (coeffs[3]/2)*x*y**2
V = (V_x + V_y + V_xy)
return V
def model_error_jac(self, coeffs, trajectory):
# defines jacobian of the fit error function, which is needed for some fitting algorithms
model_vx, model_vy = self.model([trajectory.x.values, trajectory.y.values], 0, coeffs)
x, y, vx, vy = trajectory.x.values, trajectory.y.values, \
trajectory.vx.values, trajectory.vy.values
left_operand = -2*np.vstack(((model_vx - vx)/coeffs[0], (model_vy - vy)/coeffs[0]))
de_dtau = (left_operand[0] * model_vx + left_operand[1] * model_vy).mean()
de_dc11 = (left_operand[0] * y + left_operand[1] * x).mean()
de_dc21 = (left_operand[0] * x*y + left_operand[1] * x**2/2).mean()
de_dc12 = (left_operand[0] * y**2/2 + left_operand[1] * x*y).mean()
return np.array([de_dtau, de_dc11, de_dc21, de_dc12])
def get_param_names(self):
return ['tau', 'c_11', 'c_21', 'c_12']
def get_baseline_params(self):
# baseline values of the model parameters
return [0.05, 0, 0, 0]
def get_parameter_bounds(self):
'''
Some fitting algorithms rely on bounded optimization methods (including recommended
L-BFGS-B). This function defines the bounds for each parameter.
'''
k = 3.0
return np.array([[0.005, 1.0], [-k, k], [-k, k], [-k, k]]) | cherepaha/PyDLV | pydlv/dl_model_3.py | Python | gpl-3.0 | 2,380 |
# -*- coding: utf-8 -*-
from nprl.td import TDLearning
import unittest
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_run_td(self):
td = TDLearning(discount_rate=0.5, maximum_state_id=None)
# Simple one-way environment
states = ['ONE', 'TWO', 'THREE', 'FOUR']
for i in range(1000):
now = i % len(states)
if now == (len(states) - 1):
reward = 1
terminal = True
td.step(states[now], reward, terminal)
else:
reward = 0
terminal = False
td.step(states[now], reward, terminal)
# Check values
self.assertAlmostEqual(td.get_v_value()[td._state_encoding_dict[states[3]]], 0, delta=0.1)
self.assertAlmostEqual(td.get_v_value()[td._state_encoding_dict[states[2]]], 1.0, delta=0.05)
self.assertAlmostEqual(td.get_v_value()[td._state_encoding_dict[states[1]]], 0.5, delta=0.05)
self.assertAlmostEqual(td.get_v_value()[td._state_encoding_dict[states[0]]], 0.25, delta=0.05)
if __name__ == '__main__':
unittest.main() | ugo-nama-kun/nonpara_discrete_rl | tests/test_td.py | Python | mit | 1,156 |
#!/usr/bin/env python
# coding: utf-8
import sys
import os
import recursion
import unittest
class KWLTest(unittest.TestCase):
def setUp(self):
self.psr = recursion.recursionParser()
self.maxDiff = None
def testToken(self):
alpha = 'abc'
number = '12'
sem_alpha = {'t': 'alpha', 'v': 'abc'}
sem_number = {'t': 'number', 'v': '12'}
self.assertEquals("1",
self.psr.parse("1", rule_name='expression'))
self.assertEquals([u'{', u'1', u'}'],
self.psr.parse("{1}", rule_name='expression'))
self.assertEquals([u'{', [u'{', u'1', u'}'], u'}'],
self.psr.parse("{{1}}", rule_name='expression'))
self.assertEquals([ '{', [u'{', [u'{', u'1', u'}'], u'}'], '}'],
self.psr.parse("{{{1}}}", rule_name='expression'))
print self.psr.parse("{{{1}}}", rule_name='sentence')
print self.psr.parse("{{{1}}} {1}", rule_name='sentence')
print self.psr.parse("{{{1}}} {1} 1", rule_name='sentence')
if __name__ == '__main__':
unittest.main()
| kasahorow/kwl | kwl2text/recursion_test.py | Python | bsd-2-clause | 1,091 |
from collections import Counter
import heapq
class Solution(object):
def topKFrequent(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
threshold = len(nums) // k
result, left = [], []
for key, val in Counter(nums).items():
if val >= threshold:
result.append(key)
k -= 1
else:
left.append((val, key))
return result + list(map(lambda x: x[1], heapq.nlargest(k, left)))
print(Solution().topKFrequent([1,1,1,2,2,3], 2))
| wufangjie/leetcode | 347. Top K Frequent Elements.py | Python | gpl-3.0 | 596 |
#!/usr/bin/env python
#
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""main script to poll machines which is connected to the switches."""
import functools
import logging
import os
import sys
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_dir)
import switch_virtualenv
import lockfile
from multiprocessing import Pool
from compass.actions import poll_switch
from compass.actions import util
from compass.db.api import database
from compass.db.api import switch as switch_api
from compass.db.api import user as user_api
from compass.tasks.client import celery
from compass.utils import daemonize
from compass.utils import flags
from compass.utils import logsetting
from compass.utils import setting_wrapper as setting
flags.add('switch_ips',
help='comma seperated switch ips',
default='')
flags.add_bool('async',
help='ryn in async mode',
default=True)
flags.add('thread_pool_size', type='int',
help='thread pool size when run in noasync mode',
default=4)
flags.add('run_interval', type='int',
help='run interval in seconds',
default=setting.POLLSWITCH_INTERVAL)
def pollswitches(switch_ips):
"""poll switch."""
user = user_api.get_user_object(setting.COMPASS_ADMIN_EMAIL)
poll_switches = []
all_switches = dict([
(switch['ip'], switch['credentials'])
for switch in switch_api.list_switches(user=user)
])
if switch_ips:
poll_switches = dict([
(switch_ip, all_switches[switch_ip])
for switch_ip in switch_ips
if switch_ip in all_switches
])
else:
poll_switches = all_switches
if flags.OPTIONS.async:
for switch_ip, switch_credentials in poll_switches.items():
celery.send_task(
'compass.tasks.pollswitch',
(user.email, switch_ip, switch_credentials)
)
else:
try:
pool = Pool(processes=flags.OPTIONS.thread_pool_size)
for switch_ip, switch_credentials in poll_switches.items():
pool.apply_async(
poll_switch.poll_switch,
(user.email, switch_ip, switch_credentials)
)
pool.close()
pool.join()
except Exception as error:
logging.error('failed to poll switches %s',
poll_switches)
logging.exception(error)
if __name__ == '__main__':
flags.init()
logsetting.init()
database.init()
logging.info('run poll_switch')
daemonize.daemonize(
functools.partial(
pollswitches,
[switch_ip
for switch_ip in flags.OPTIONS.switch_ips.split(',')
if switch_ip]),
flags.OPTIONS.run_interval,
pidfile=lockfile.FileLock('/var/run/poll_switch.pid'),
stderr=open('/tmp/poll_switch_err.log', 'w+'),
stdout=open('/tmp/poll_switch_out.log', 'w+'))
| baigk/compass-core | bin/poll_switch.py | Python | apache-2.0 | 3,576 |
# Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import copy
import os
import re
import shutil
import jinja2
import netaddr
from oslo.config import cfg
from oslo import messaging
import six
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import rpc as n_rpc
from neutron import context
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.common import constants
from neutron.plugins.common import utils as plugin_utils
from neutron.services.vpn.common import topics
from neutron.services.vpn import device_drivers
LOG = logging.getLogger(__name__)
TEMPLATE_PATH = os.path.dirname(__file__)
ipsec_opts = [
cfg.StrOpt(
'config_base_dir',
default='$state_path/ipsec',
help=_('Location to store ipsec server config files')),
cfg.IntOpt('ipsec_status_check_interval',
default=60,
help=_("Interval for checking ipsec status"))
]
cfg.CONF.register_opts(ipsec_opts, 'ipsec')
openswan_opts = [
cfg.StrOpt(
'ipsec_config_template',
default=os.path.join(
TEMPLATE_PATH,
'template/openswan/ipsec.conf.template'),
help=_('Template file for ipsec configuration')),
cfg.StrOpt(
'ipsec_secret_template',
default=os.path.join(
TEMPLATE_PATH,
'template/openswan/ipsec.secret.template'),
help=_('Template file for ipsec secret configuration'))
]
cfg.CONF.register_opts(openswan_opts, 'openswan')
JINJA_ENV = None
STATUS_MAP = {
'erouted': constants.ACTIVE,
'unrouted': constants.DOWN
}
IPSEC_CONNS = 'ipsec_site_connections'
def _get_template(template_file):
global JINJA_ENV
if not JINJA_ENV:
templateLoader = jinja2.FileSystemLoader(searchpath="/")
JINJA_ENV = jinja2.Environment(loader=templateLoader)
return JINJA_ENV.get_template(template_file)
@six.add_metaclass(abc.ABCMeta)
class BaseSwanProcess():
"""Swan Family Process Manager
This class manages start/restart/stop ipsec process.
This class create/delete config template
"""
binary = "ipsec"
CONFIG_DIRS = [
'var/run',
'log',
'etc',
'etc/ipsec.d/aacerts',
'etc/ipsec.d/acerts',
'etc/ipsec.d/cacerts',
'etc/ipsec.d/certs',
'etc/ipsec.d/crls',
'etc/ipsec.d/ocspcerts',
'etc/ipsec.d/policies',
'etc/ipsec.d/private',
'etc/ipsec.d/reqs',
'etc/pki/nssdb/'
]
DIALECT_MAP = {
"3des": "3des",
"aes-128": "aes128",
"aes-256": "aes256",
"aes-192": "aes192",
"group2": "modp1024",
"group5": "modp1536",
"group14": "modp2048",
"group15": "modp3072",
"bi-directional": "start",
"response-only": "add",
"v2": "insist",
"v1": "never"
}
def __init__(self, conf, root_helper, process_id,
vpnservice, namespace):
self.conf = conf
self.id = process_id
self.root_helper = root_helper
self.updated_pending_status = False
self.namespace = namespace
self.connection_status = {}
self.config_dir = os.path.join(
cfg.CONF.ipsec.config_base_dir, self.id)
self.etc_dir = os.path.join(self.config_dir, 'etc')
self.update_vpnservice(vpnservice)
def translate_dialect(self):
if not self.vpnservice:
return
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
self._dialect(ipsec_site_conn, 'initiator')
self._dialect(ipsec_site_conn['ikepolicy'], 'ike_version')
for key in ['encryption_algorithm',
'auth_algorithm',
'pfs']:
self._dialect(ipsec_site_conn['ikepolicy'], key)
self._dialect(ipsec_site_conn['ipsecpolicy'], key)
def update_vpnservice(self, vpnservice):
self.vpnservice = vpnservice
self.translate_dialect()
def _dialect(self, obj, key):
obj[key] = self.DIALECT_MAP.get(obj[key], obj[key])
@abc.abstractmethod
def ensure_configs(self):
pass
def ensure_config_file(self, kind, template, vpnservice):
"""Update config file, based on current settings for service."""
config_str = self._gen_config_content(template, vpnservice)
config_file_name = self._get_config_filename(kind)
utils.replace_file(config_file_name, config_str)
def remove_config(self):
"""Remove whole config file."""
shutil.rmtree(self.config_dir, ignore_errors=True)
def _get_config_filename(self, kind):
config_dir = self.etc_dir
return os.path.join(config_dir, kind)
def _ensure_dir(self, dir_path):
if not os.path.isdir(dir_path):
os.makedirs(dir_path, 0o755)
def ensure_config_dir(self, vpnservice):
"""Create config directory if it does not exist."""
self._ensure_dir(self.config_dir)
for subdir in self.CONFIG_DIRS:
dir_path = os.path.join(self.config_dir, subdir)
self._ensure_dir(dir_path)
def _gen_config_content(self, template_file, vpnservice):
template = _get_template(template_file)
return template.render(
{'vpnservice': vpnservice,
'state_path': cfg.CONF.state_path})
@abc.abstractmethod
def get_status(self):
pass
@property
def status(self):
if self.active:
return constants.ACTIVE
return constants.DOWN
@property
def active(self):
"""Check if the process is active or not."""
if not self.namespace:
return False
try:
status = self.get_status()
self._update_connection_status(status)
except RuntimeError:
return False
return True
def update(self):
"""Update Status based on vpnservice configuration."""
if self.vpnservice and not self.vpnservice['admin_state_up']:
self.disable()
else:
self.enable()
if plugin_utils.in_pending_status(self.vpnservice['status']):
self.updated_pending_status = True
self.vpnservice['status'] = self.status
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
if plugin_utils.in_pending_status(ipsec_site_conn['status']):
conn_id = ipsec_site_conn['id']
conn_status = self.connection_status.get(conn_id)
if not conn_status:
continue
conn_status['updated_pending_status'] = True
ipsec_site_conn['status'] = conn_status['status']
def enable(self):
"""Enabling the process."""
try:
self.ensure_configs()
if self.active:
self.restart()
else:
self.start()
except RuntimeError:
LOG.exception(
_("Failed to enable vpn process on router %s"),
self.id)
def disable(self):
"""Disabling the process."""
try:
if self.active:
self.stop()
self.remove_config()
except RuntimeError:
LOG.exception(
_("Failed to disable vpn process on router %s"),
self.id)
@abc.abstractmethod
def restart(self):
"""Restart process."""
@abc.abstractmethod
def start(self):
"""Start process."""
@abc.abstractmethod
def stop(self):
"""Stop process."""
def _update_connection_status(self, status_output):
for line in status_output.split('\n'):
m = re.search('\d\d\d "([a-f0-9\-]+).* (unrouted|erouted);', line)
if not m:
continue
connection_id = m.group(1)
status = m.group(2)
if not self.connection_status.get(connection_id):
self.connection_status[connection_id] = {
'status': None,
'updated_pending_status': False
}
self.connection_status[
connection_id]['status'] = STATUS_MAP[status]
class OpenSwanProcess(BaseSwanProcess):
"""OpenSwan Process manager class.
This process class uses three commands
(1) ipsec pluto: IPsec IKE keying daemon
(2) ipsec addconn: Adds new ipsec addconn
(3) ipsec whack: control interface for IPSEC keying daemon
"""
def __init__(self, conf, root_helper, process_id,
vpnservice, namespace):
super(OpenSwanProcess, self).__init__(
conf, root_helper, process_id,
vpnservice, namespace)
self.secrets_file = os.path.join(
self.etc_dir, 'ipsec.secrets')
self.config_file = os.path.join(
self.etc_dir, 'ipsec.conf')
self.pid_path = os.path.join(
self.config_dir, 'var', 'run', 'pluto')
def _execute(self, cmd, check_exit_code=True):
"""Execute command on namespace."""
ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace)
return ip_wrapper.netns.execute(
cmd,
check_exit_code=check_exit_code)
def ensure_configs(self):
"""Generate config files which are needed for OpenSwan.
If there is no directory, this function will create
dirs.
"""
self.ensure_config_dir(self.vpnservice)
self.ensure_config_file(
'ipsec.conf',
self.conf.openswan.ipsec_config_template,
self.vpnservice)
self.ensure_config_file(
'ipsec.secrets',
self.conf.openswan.ipsec_secret_template,
self.vpnservice)
def get_status(self):
return self._execute([self.binary,
'whack',
'--ctlbase',
self.pid_path,
'--status'])
def restart(self):
"""Restart the process."""
self.stop()
self.start()
return
def _get_nexthop(self, address):
routes = self._execute(
['ip', 'route', 'get', address])
if routes.find('via') >= 0:
return routes.split(' ')[2]
return address
def _virtual_privates(self):
"""Returns line of virtual_privates.
virtual_private contains the networks
that are allowed as subnet for the remote client.
"""
virtual_privates = []
nets = [self.vpnservice['subnet']['cidr']]
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
nets += ipsec_site_conn['peer_cidrs']
for net in nets:
version = netaddr.IPNetwork(net).version
virtual_privates.append('%%v%s:%s' % (version, net))
return ','.join(virtual_privates)
def start(self):
"""Start the process.
Note: if there is not namespace yet,
just do nothing, and wait next event.
"""
if not self.namespace:
return
virtual_private = self._virtual_privates()
#start pluto IKE keying daemon
self._execute([self.binary,
'pluto',
'--ctlbase', self.pid_path,
'--ipsecdir', self.etc_dir,
'--use-netkey',
'--uniqueids',
'--nat_traversal',
'--secretsfile', self.secrets_file,
'--virtual_private', virtual_private
])
#add connections
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
nexthop = self._get_nexthop(ipsec_site_conn['peer_address'])
self._execute([self.binary,
'addconn',
'--ctlbase', '%s.ctl' % self.pid_path,
'--defaultroutenexthop', nexthop,
'--config', self.config_file,
ipsec_site_conn['id']
])
#TODO(nati) fix this when openswan is fixed
#Due to openswan bug, this command always exit with 3
#start whack ipsec keying daemon
self._execute([self.binary,
'whack',
'--ctlbase', self.pid_path,
'--listen',
], check_exit_code=False)
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
if not ipsec_site_conn['initiator'] == 'start':
continue
#initiate ipsec connection
self._execute([self.binary,
'whack',
'--ctlbase', self.pid_path,
'--name', ipsec_site_conn['id'],
'--asynchronous',
'--initiate'
])
def disconnect(self):
if not self.namespace:
return
if not self.vpnservice:
return
for conn_id in self.connection_status:
self._execute([self.binary,
'whack',
'--ctlbase', self.pid_path,
'--name', '%s/0x1' % conn_id,
'--terminate'
])
def stop(self):
#Stop process using whack
#Note this will also stop pluto
self.disconnect()
self._execute([self.binary,
'whack',
'--ctlbase', self.pid_path,
'--shutdown',
])
#clean connection_status info
self.connection_status = {}
class IPsecVpnDriverApi(n_rpc.RpcProxy):
"""IPSecVpnDriver RPC api."""
IPSEC_PLUGIN_VERSION = '1.0'
def get_vpn_services_on_host(self, context, host):
"""Get list of vpnservices.
The vpnservices including related ipsec_site_connection,
ikepolicy and ipsecpolicy on this host
"""
return self.call(context,
self.make_msg('get_vpn_services_on_host',
host=host),
version=self.IPSEC_PLUGIN_VERSION)
def update_status(self, context, status):
"""Update local status.
This method call updates status attribute of
VPNServices.
"""
return self.cast(context,
self.make_msg('update_status',
status=status),
version=self.IPSEC_PLUGIN_VERSION)
@six.add_metaclass(abc.ABCMeta)
class IPsecDriver(device_drivers.DeviceDriver):
"""VPN Device Driver for IPSec.
This class is designed for use with L3-agent now.
However this driver will be used with another agent in future.
so the use of "Router" is kept minimul now.
Instead of router_id, we are using process_id in this code.
"""
# history
# 1.0 Initial version
RPC_API_VERSION = '1.0'
# TODO(ihrachys): we can't use RpcCallback here due to inheritance
# issues
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, agent, host):
self.agent = agent
self.conf = self.agent.conf
self.root_helper = self.agent.root_helper
self.host = host
self.conn = n_rpc.create_connection(new=True)
self.context = context.get_admin_context_without_session()
self.topic = topics.IPSEC_AGENT_TOPIC
node_topic = '%s.%s' % (self.topic, self.host)
self.processes = {}
self.process_status_cache = {}
self.endpoints = [self]
self.conn.create_consumer(node_topic, self.endpoints, fanout=False)
self.conn.consume_in_threads()
self.agent_rpc = IPsecVpnDriverApi(topics.IPSEC_DRIVER_TOPIC, '1.0')
self.process_status_cache_check = loopingcall.FixedIntervalLoopingCall(
self.report_status, self.context)
self.process_status_cache_check.start(
interval=self.conf.ipsec.ipsec_status_check_interval)
def _update_nat(self, vpnservice, func):
"""Setting up nat rule in iptables.
We need to setup nat rule for ipsec packet.
:param vpnservice: vpnservices
:param func: self.add_nat_rule or self.remove_nat_rule
"""
local_cidr = vpnservice['subnet']['cidr']
router_id = vpnservice['router_id']
for ipsec_site_connection in vpnservice['ipsec_site_connections']:
for peer_cidr in ipsec_site_connection['peer_cidrs']:
func(
router_id,
'POSTROUTING',
'-s %s -d %s -m policy '
'--dir out --pol ipsec '
'-j ACCEPT ' % (local_cidr, peer_cidr),
top=True)
self.agent.iptables_apply(router_id)
def vpnservice_updated(self, context, **kwargs):
"""Vpnservice updated rpc handler
VPN Service Driver will call this method
when vpnservices updated.
Then this method start sync with server.
"""
self.sync(context, [])
@abc.abstractmethod
def create_process(self, process_id, vpnservice, namespace):
pass
def ensure_process(self, process_id, vpnservice=None):
"""Ensuring process.
If the process doesn't exist, it will create process
and store it in self.processs
"""
process = self.processes.get(process_id)
if not process or not process.namespace:
namespace = self.agent.get_namespace(process_id)
process = self.create_process(
process_id,
vpnservice,
namespace)
self.processes[process_id] = process
elif vpnservice:
process.update_vpnservice(vpnservice)
return process
def create_router(self, process_id):
"""Handling create router event.
Agent calls this method, when the process namespace
is ready.
"""
if process_id in self.processes:
# In case of vpnservice is created
# before router's namespace
process = self.processes[process_id]
self._update_nat(process.vpnservice, self.agent.add_nat_rule)
process.enable()
def destroy_router(self, process_id):
"""Handling destroy_router event.
Agent calls this method, when the process namespace
is deleted.
"""
if process_id in self.processes:
process = self.processes[process_id]
process.disable()
vpnservice = process.vpnservice
if vpnservice:
self._update_nat(vpnservice, self.agent.remove_nat_rule)
del self.processes[process_id]
def get_process_status_cache(self, process):
if not self.process_status_cache.get(process.id):
self.process_status_cache[process.id] = {
'status': None,
'id': process.vpnservice['id'],
'updated_pending_status': False,
'ipsec_site_connections': {}}
return self.process_status_cache[process.id]
def is_status_updated(self, process, previous_status):
if process.updated_pending_status:
return True
if process.status != previous_status['status']:
return True
if (process.connection_status !=
previous_status['ipsec_site_connections']):
return True
def unset_updated_pending_status(self, process):
process.updated_pending_status = False
for connection_status in process.connection_status.values():
connection_status['updated_pending_status'] = False
def copy_process_status(self, process):
return {
'id': process.vpnservice['id'],
'status': process.status,
'updated_pending_status': process.updated_pending_status,
'ipsec_site_connections': copy.deepcopy(process.connection_status)
}
def update_downed_connections(self, process_id, new_status):
"""Update info to be reported, if connections just went down.
If there is no longer any information for a connection, because it
has been removed (e.g. due to an admin down of VPN service or IPSec
connection), but there was previous status information for the
connection, mark the connection as down for reporting purposes.
"""
if process_id in self.process_status_cache:
for conn in self.process_status_cache[process_id][IPSEC_CONNS]:
if conn not in new_status[IPSEC_CONNS]:
new_status[IPSEC_CONNS][conn] = {
'status': constants.DOWN,
'updated_pending_status': True
}
def report_status(self, context):
status_changed_vpn_services = []
for process in self.processes.values():
previous_status = self.get_process_status_cache(process)
if self.is_status_updated(process, previous_status):
new_status = self.copy_process_status(process)
self.update_downed_connections(process.id, new_status)
status_changed_vpn_services.append(new_status)
self.process_status_cache[process.id] = (
self.copy_process_status(process))
# We need unset updated_pending status after it
# is reported to the server side
self.unset_updated_pending_status(process)
if status_changed_vpn_services:
self.agent_rpc.update_status(
context,
status_changed_vpn_services)
@lockutils.synchronized('vpn-agent', 'neutron-')
def sync(self, context, routers):
"""Sync status with server side.
:param context: context object for RPC call
:param routers: Router objects which is created in this sync event
There could be many failure cases should be
considered including the followings.
1) Agent class restarted
2) Failure on process creation
3) VpnService is deleted during agent down
4) RPC failure
In order to handle, these failure cases,
This driver takes simple sync strategies.
"""
vpnservices = self.agent_rpc.get_vpn_services_on_host(
context, self.host)
router_ids = [vpnservice['router_id'] for vpnservice in vpnservices]
# Ensure the ipsec process is enabled
for vpnservice in vpnservices:
process = self.ensure_process(vpnservice['router_id'],
vpnservice=vpnservice)
self._update_nat(vpnservice, self.agent.add_nat_rule)
process.update()
# Delete any IPSec processes that are
# associated with routers, but are not running the VPN service.
for router in routers:
#We are using router id as process_id
process_id = router['id']
if process_id not in router_ids:
process = self.ensure_process(process_id)
self.destroy_router(process_id)
# Delete any IPSec processes running
# VPN that do not have an associated router.
process_ids = [process_id
for process_id in self.processes
if process_id not in router_ids]
for process_id in process_ids:
self.destroy_router(process_id)
self.report_status(context)
class OpenSwanDriver(IPsecDriver):
def create_process(self, process_id, vpnservice, namespace):
return OpenSwanProcess(
self.conf,
self.root_helper,
process_id,
vpnservice,
namespace)
| samsu/neutron | services/vpn/device_drivers/ipsec.py | Python | apache-2.0 | 24,853 |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Federico Ceratto <federico.ceratto@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from oslo_log import log as logging
import oslotest.base
import testtools
from designate import exceptions
from designate import objects
LOG = logging.getLogger(__name__)
def create_test_domain():
return objects.Domain(
name='www.example.org.',
email='foo@example.com',
)
class DomainTest(oslotest.base.BaseTestCase):
def test_init(self):
domain = create_test_domain()
self.assertEqual(domain.name, 'www.example.org.')
def test_masters_none(self):
domain = objects.Domain()
with testtools.ExpectedException(exceptions.RelationNotLoaded):
self.assertEqual(domain.masters, None)
def test_masters(self):
domain = objects.Domain(
masters=objects.DomainMasterList.from_list([
{'host': '1.0.0.0', 'port': 53}
])
)
self.assertEqual(
domain.masters.to_list(), [{'host': '1.0.0.0', 'port': 53}])
def test_masters_2(self):
domain = objects.Domain(
masters=objects.DomainMasterList.from_list([
{'host': '1.0.0.0'},
{'host': '2.0.0.0'}
])
)
self.assertEqual(len(domain.masters), 2)
def test_get_master_by_ip(self):
domain = objects.Domain(
masters=objects.DomainMasterList.from_list([
{'host': '1.0.0.0', 'port': 53},
{'host': '2.0.0.0', 'port': 53}
])
)
m = domain.get_master_by_ip('2.0.0.0').to_data()
self.assertEqual(m, '2.0.0.0:53')
@unittest.expectedFailure # bug: domain.masters is not iterable
def test_get_master_by_ip_none(self):
domain = objects.Domain()
m = domain.get_master_by_ip('2.0.0.0')
self.assertEqual(m, False)
def test_validate(self):
domain = create_test_domain()
domain.validate()
def test_validate_invalid_secondary(self):
domain = objects.Domain(
type='SECONDARY',
)
with testtools.ExpectedException(exceptions.InvalidObject):
domain.validate()
| tonyli71/designate | designate/tests/unit/test_objects/test_domain.py | Python | apache-2.0 | 2,790 |
import operator
import numpy as np
import sys
'''
Description:
Author: Mikko Auvinen
mikko.auvinen@helsinki.fi
University of Helsinki &
Finnish Meteorological Institute
'''
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def writeNumpyZFootprintRaw( filename, arr ):
fstr = filename.strip('.npz')
print(' Writing raw footprint data to file {}.npz ...'.format(fstr))
dims = np.shape(arr)
if( dims[1] != 9 ):
sys.exit(" Error: dims[1] does not equal 9. Exiting ...")
np.savez_compressed(fstr, \
xO=arr[:,0], yO=arr[:,1], zO=arr[:,2], \
xt=arr[:,3], yt=arr[:,4], zt=arr[:,5], \
ut=arr[:,6], vt=arr[:,7], wt=arr[:,8] )
print(' ... done! ')
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def writeNumpyZFootprintIJK(fn, xO, yO, zO, xt, yt, zt, ut, vt, wt, dxyz):
fstr = fn.split('.npz')[0]
np.savez_compressed( fstr, \
xO=xO, yO=yO, zO=zO, xt=xt, yt=yt, zt=zt, ut=ut, vt=vt, wt=wt, dxyz=dxyz )
print(' {}.npz saved successfully!'.format(fstr) )
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def readNumpyZFootprintRaw( filename ):
'''
The saved .npz file contains
'''
print ' Read raw footprint file {} ...'.format(filename)
try: dat = np.load(filename)
except: sys.exit(' Cannot read file {}. Exiting ...'.format(filename))
xO = dat['xO']; yO = dat['yO']; zO = dat['zO']
xt = dat['xt']; yt = dat['yt']; zt = dat['zt']
ut = dat['ut']; vt = dat['vt']; wt = dat['wt']
dat.close()
return xO, yO, zO, xt, yt, zt, ut, vt, wt
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def writeNumpyZFootprint(filename, F, X, Y, Z, C, Ids=None ):
fstr = filename.split('.npz')[0]
if( Ids != None ):
np.savez_compressed( fstr , F=F, X=X, Y=Y, Z=Z, C=C, Ids=Ids )
else:
np.savez_compressed( fstr , F=F, X=X, Y=Y, Z=Z, C=C )
print(' {}.npz saved successfully!'.format(fstr))
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def readNumpyZFootprint( filename, IdsOn=False ):
print ' Read footprint file {} ...'.format(filename)
try: dat = np.load(filename)
except: sys.exit(' Cannot read file {}. Exiting ...'.format(filename))
F = dat['F']; X = dat['X']; Y = dat['Y']; Z = dat['Z']; C = dat['C']
if( IdsOn ):
try: Ids = dat['Ids'].item() # .item() returns the dict inside 0-array.
except: Ids = None
dat.close()
if( IdsOn ):
return F, X, Y, Z, C, Ids
else:
return F, X, Y, Z, C
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def fp2mshIJ(pxO, pyO, pzO, xG, yG, dx, dy ): # IJ as in indecies.
# Elegant and much faster. Use this!
# pxO: particle x-origin, xG: x-grid coordinates.
# First, Create meshgrid from the grid coords.
X, Y = np.meshgrid( xG, yG )
T = np.zeros( np.shape(X) ) # Target
Z = np.zeros( np.shape(X) ) # Heights
ix = ( pxO / dx ).astype(int); iy = ( pyO / dy ).astype(int)
# The loop must be explicitly written open because
# the repeated additions to cells are not accounted properly.
for i in xrange(len(ix)):
T[iy[i],ix[i]] += 1.
Z[iy[:],ix[:]] = pzO[:]
return T, X, Y, Z
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def fp2mshBM( pxO, pyO, pzO, xG, yG, dx, dy ): # BM as in boolean matrix.
# Elegant routine, but awfully slow. Don't use this!
# pxO: particle x-origin, xG: x-grid coordinates.
# First, Create meshgrid from the grid coords.
X, Y = np.meshgrid( xG, yG )
# Then a mesh variable for storing the hits and the topography height.
T = np.zeros( np.shape(X) )
Z = np.zeros( np.shape(X) )
for xi in xG:
print(' Grid x-coord = {} '.format(xi))
x1 = xi-dx/2.; x2 = xi+dx/2.
PXb = ((x1 <= pxO) * (pxO < x2))
if( PXb.any() ):
for yi in yG:
y1 = yi-dy/2.; y2 = yi+dy/2.
# Utilizing the seeding coordinates (origin coords), extract how many hits each cell gets.
PXYb = PXb * ((y1 <= pyO) * (pyO < y2))
if( PXYb.any()):
# Create a boolean matrix which isolates (with True value) the desired grid cell.
MXb = ((x1 <= X) * (X < x2) )
MXYb = MXb * ((y1 <= Y) * (Y < y2) )
Z[MXYb] = np.mean( pzO[ PXYb ] )
T += np.sum( PXYb.astype(int) ) * MXYb.astype(int)
PXb = None; MXb = None; MXYb = None # Clear
return T, X, Y, Z
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def coordsFootprintGrid( NxG, dxG, pxO, pyO, verbose=False ):
# Max values.
xG_max = NxG[0]*dxG[0] # Max dimensions.
yG_max = NxG[1]*dxG[1]
'''
Note: At this point we assume non-cyclic boundary cond. for
the south/north boundaries. Therefore, the particles will be
absorbed if they come in contact with the y-normal boundaries.
The footprint-grid will have to be extended backward only in
the x-direction.
'''
# Smallest and largest x/y-value recorded:
x_min = np.min( pxO ); y_min = np.min( pyO )
x_max = np.max( pxO ); y_max = np.max( pyO )
if(verbose):
print( ' min(xO) = {}, max(xO) = {}'.format(x_min, x_max))
print( ' min(yO) = {}, max(yO) = {}'.format(y_min, y_max))
# Define an integer factor for domain multiplication/extension.
fx = 0.
if( x_min < 0. ):
fx = int( abs(x_min) / xG_max ) + 1.
# Coordinates for the extended footprint grid. Cell-centers.
xD = np.linspace(-fx*xG_max+dxG[0]/2., xG_max-dxG[0]/2., (fx*NxG[0]+NxG[0])) # Last term: resolution.
yD = np.linspace(dxG[1]/2. , yG_max-dxG[1]/2., NxG[1] )
return xD, yD
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def idAppendices(fstring, ijkOn=False):
if( ijkOn ):
fileId = fstring.strip('.npz') # file ID string.
fileId = fileId[-13:]
varId = fileId[-8:]; varId = varId.replace('.','_') # variable ID string.
else:
fileId = str()
varId = str(fn)
return fileId, varId
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def percentileFootprintIds( F , p ):
# 50, 75, 90
p = p/100.
Fsum = np.sum(F)
Fpsum= p*Fsum
fmax = np.max(F) # maximum value.
fv = 0.5*fmax
df = fmax/350. # values to increment.
tol = Fsum/2000.
ic = 0
while 1:
ic += 1
fv -= df
idx = (F>fv)
Fchecksum = np.sum(F[idx])
if( (Fpsum-Fchecksum) < tol ):
print(' i={}) TARGET vs. CURRENT: {} vs. {}'.format(ic,Fpsum,Fchecksum))
break
return idx
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def writeCrossWindSum( F , X, fname, idx=None ):
import scipy.ndimage as sn # contains the filters
nY, nX = np.shape(F) # nRows, nCols
Fm = np.zeros( nX )
if( idx != None): Fx = F*idx
else: Fx = F.copy()
for i in xrange( nX ):
Fm[i] = np.sum(Fx[:,i])
Fx = None
idx = (np.abs(Fm) > 0.) # Select only non-zero entries
Fm[idx] = sn.gaussian_filter( Fm[idx], sigma=2.5 )
if( fname ):
np.savetxt(fname+'_ysum.dat', np.c_[X[0,:],Fm] ) # x,y,z equal sized 1D arrays
return Fm
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*= BEGIN KORMANN & MEIXNER =*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def kormann_and_meixner_fpr(z_0, z_m, u, sigma_v, L, X, Y, x_off=0., y_off=0. ):
from scipy.optimize import fsolve
from scipy.special import gamma
Kappa = 0.41 # Von Karman const.
# Bounds of integration for Eq. 42 to 46, defined on p.218
z_1 = 3.*z_0
z_2 = (1.+Kappa)*z_m
# Data tuple for passing information to fsolve.
data =(L, z_0, z_1, z_2, z_m)
# Final roots for m and n
m0 = 0.5
m = fsolve( feqn_m, m0, args=data )[0]
n = fsolve( feqn_n, m0, args=data )[0]
# Inversion of Eq 31
u_star = u * Kappa / (np.log(z_m/z_0) + fopt1(L, z_m, z_m))
# Eq (41), part 1
U = u_star/Kappa * ( Iz_n(m , L, z_0/z_m, z_1, z_2, z_m, 2 ) + \
+ Iz_n(m , L, z_0, z_1, z_2, z_m, 4, fopt1) ) \
/ ( Iz_n(2.*m, L, z_0, z_1, z_2, z_m, 1 ) * z_m**m )
# Eq (41), part 2
K = Kappa*u_star * Iz_n(n, L, z_0, z_1, z_2, z_m, 4, fopt2)\
/ ( Iz_n(2.*n, L, z_0, z_1, z_2, z_m, 1 ) * z_m**(n-1.))
# r is defined at the top of p.213, mu after Eq. 18
r = 2.+m-n
mu = (1.+m)/r
# Eq. 19
xsi = U * z_m**r /( r**2 * K )
# Eq. 21
Xm = np.abs(X-x_off)
Ym = np.abs(Y-y_off)
Idm = (X-x_off)>0.
phi_x = ( gamma(mu)**(-1) * xsi**(mu)/( Xm**(1.+mu) ) * np.exp(-xsi/np.max(Xm,1e-10)) )* Idm
# Cross wind diffusion
# Eq. 18
u_bar = gamma(mu)/gamma(1./r) * (r**2*K/U)**(m/r)*U*Xm**(m/r)
# Eq. 9, definition of sig right after it
sig = sigma_v*Xm/u_bar
D_y = (np.sqrt(2.*np.pi)*sig)**(-1) * np.exp(-Ym**2./(2.*sig**2))
phi = D_y * phi_x
return phi[:,::-1]
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def fopt1(L, z, z_m=None):
# This is used in eq 39 with J1 (Iz_4) and J2 (Iz_5).
if( L>0 ):
psi_m = 5.*z/L
else:
zeta = (1. - 16.*z/L)**(0.25)
psi_m = (-2.)*np.log((1.+zeta)/2.) - np.log((1.+zeta**2)/2.) + 2.*np.arctan(zeta) - np.pi/2.
return psi_m
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def fopt2(L, z, z_m):
# This is used in eq 40 with J1 (Iz_4) and J2 (Iz_5).
if( L>0 ): phi_c = 1. + 5.*z/L
else: phi_c = (1. - (16. * z/L))**(-0.5)
rz = z/(phi_c * z_m)
return rz
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
'''Following integrals (Eq 42-46) are solved numerically.
They're all bundled within the same function to form a unified
interface. This reduces code duplication. '''
def Iz_n(P, L, z_0, z_1, z_2, z_m, opt=1, fuser=None):
az1 = (z_1/z_m); az2 = (z_2/z_m)
dz = (az2-az1)/1000.
az = np.arange(az1, az2, dz) + dz/2.
if( opt == 1 ): # I_1
c = az**P * dz
elif( opt == 2 ): # I_2
c = az**P * np.log(az/z_0) *dz
elif( opt == 3 ): # I_3
c = az**P * np.log(az) * np.log(az/z_0) *dz
elif( opt == 4 ): # J_1
c = az**P * fuser(L, az*z_m, z_m) * dz
elif( opt == 5 ): # J_2
c = az**P * fuser(L, az*z_m, z_m)*np.log(az) * dz
return np.sum(c)
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def feqn_m( M, *data ):
L, z_0, z_1, z_2, z_m = data
A = Iz_n(2*M, L, z_0 , z_1, z_2, z_m, 1 ) * \
( Iz_n( M, L, z_0/z_m, z_1, z_2, z_m, 3 ) + Iz_n(M, L, z_0, z_1, z_2, z_m, 5, fopt1) )
B = Iz_n(2*M, L, 1 , z_1, z_2, z_m, 2 ) * \
( Iz_n( M, L, z_0/z_m, z_1, z_2, z_m, 2 ) + Iz_n(M, L, z_0, z_1, z_2, z_m, 4, fopt1) )
return (B - A)
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def feqn_n( N, *data ):
L, z_0, z_1, z_2, z_m = data
A = Iz_n(2*N, L, z_0, z_1, z_2, z_m, 1 ) * Iz_n(N, L, z_0, z_1, z_2, z_m, 5, fopt2)
B = Iz_n(2*N, L, 1 , z_1, z_2, z_m, 2 ) * Iz_n(N, L, z_0, z_1, z_2, z_m, 4, fopt2)
return (B - A)
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*= END KORMANN & MEIXNER =*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*= BEGIN KLJUN =*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def kljun_fpr(z_0, z_m, u_mean, sigma_v, L, Xt, Yt, z_i, us, x_off, y_off, nx=4000):
rs=[1.]; wd = 0.; crop = True
fdict = FFP(z_m, z_0, u_mean, z_i, L, sigma_v, us, None, rs, wd, nx, crop)
fp = fdict['f_2d'].copy(); Xp = fdict['x_2d']; Yp = fdict['y_2d']
dXt = Xt[0,2]-Xt[0,1]; dYt = Yt[2,0]-Yt[1,0]
dXp = Xp[0,2]-Xt[0,1]; dYp = Yp[2,0]-Yt[1,0]
ipt = (Xp[0,:]/dXt).astype(int); jpt = (Yp[:,0]/dYt).astype(int)
print(' Xp = {} '.format(Xp[0,:]))
print(' ipt = {} '.format(ipt))
fdict = None
# To be finalized ...
return None # Do not use yet
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*= END KLJUN =*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
| saskartt/P4UL | pyLib/footprintTools.py | Python | mit | 12,182 |
# -*- coding: utf-8 -*-
import re
import urllib.parse
from ..base.simple_downloader import SimpleDownloader
class UloziskoSk(SimpleDownloader):
__name__ = "UloziskoSk"
__type__ = "downloader"
__version__ = "0.31"
__status__ = "testing"
__pattern__ = r"http://(?:www\.)?ulozisko\.sk/.+"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """Ulozisko.sk downloader plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
NAME_PATTERN = r'<div class="down1">(?P<N>.+?)</div>'
SIZE_PATTERN = (
r"Veľkosť súboru: <strong>(?P<S>[\d.,]+) (?P<U>[\w^_]+)</strong><br />"
)
OFFLINE_PATTERN = r'<span class = "red">Zadaný súbor neexistuje z jedného z nasledujúcich dôvodov:</span>'
LINK_FREE_PATTERN = r'<form name = "formular" action = "(.+?)" method = "post">'
ID_PATTERN = r'<input type = "hidden" name = "id" value = "(.+?)" />'
CAPTCHA_PATTERN = r'<img src="(/obrazky/obrazky\.php\?fid=.+?)" alt="" />'
IMG_PATTERN = (
r'<strong>PRE ZVÄČŠENIE KLIKNITE NA OBRÁZOK</strong><br /><a href = "(.+?)">'
)
def process(self, pyfile):
self.data = self.load(pyfile.url)
self.get_file_info()
m = re.search(self.IMG_PATTERN, self.data)
if m is not None:
self.link = "http://ulozisko.sk" + m.group(1)
else:
self.handle_free(pyfile)
def handle_free(self, pyfile):
m = re.search(self.LINK_FREE_PATTERN, self.data)
if m is None:
self.error(self._("LINK_FREE_PATTERN not found"))
parsed_url = "http://www.ulozisko.sk" + m.group(1)
m = re.search(self.ID_PATTERN, self.data)
if m is None:
self.error(self._("ID_PATTERN not found"))
id = m.group(1)
self.log_debug("URL:" + parsed_url + " ID:" + id)
m = re.search(self.CAPTCHA_PATTERN, self.data)
if m is None:
self.error(self._("CAPTCHA_PATTERN not found"))
captcha_url = urllib.parse.urljoin("http://www.ulozisko.sk/", m.group(1))
captcha = self.captcha.decrypt(captcha_url, cookies=True)
self.log_debug("CAPTCHA_URL:" + captcha_url + " CAPTCHA:" + captcha)
self.download(
parsed_url,
post={
"antispam": captcha,
"id": id,
"name": pyfile.name,
"but": "++++STIAHNI+S%DABOR++++",
},
)
| vuolter/pyload | src/pyload/plugins/downloaders/UloziskoSk.py | Python | agpl-3.0 | 2,810 |
import argparse
import json
import html
import logging
import pkg_resources
import atexit
from time import sleep
from threading import Thread
from vexmessage import decode_vex_message
from vexbot.adapters.messaging import Messaging
from vexbot.adapters.socket_io.observer import SocketObserver
try:
from websocket import WebSocketApp
except ImportError:
logging.error('Socket IO needs `websocket` installed. Please run `pip '
'install websocket-client`')
try:
import requests
except ImportError as e:
logging.error('Socket IO needs `requests` installed. Please run `pip '
'install requests`')
class WebSocket(WebSocketApp):
def __init__(self,
streamer_name: str,
namespace: str,
website_url: str,
service_name: str,
connection: dict=None):
self.log = logging.getLogger(__name__)
self.log.setLevel(0)
if connection is None:
connection = {}
self.messaging = Messaging(service_name, run_control_loop=True, **connection)
self._scheduler_thread = Thread(target=self.messaging.start,
daemon=True)
self._scheduler_thread.start()
self.observer = SocketObserver(self, self.messaging)
self.messaging.command.subscribe(self.observer)
self._streamer_name = streamer_name
self.namespace = namespace
self._website_url = website_url
self.log.info('Getting Socket IO key!')
self.key, heartbeat = self._connect_to_server_helper()
self.log.info('Socket IO key got!')
# self.command_manager = AdapterCommandManager(self.messaging)
# alters URL to be more websocket...ie
self._website_socket = self._website_url.replace('http', 'ws')
self._website_socket += 'websocket/'
self.nick = None
super().__init__(self._website_socket + self.key,
on_open=self.on_open,
on_close=self.on_close,
on_message=self.on_message,
on_error=self.on_error)
def _connect_to_server_helper(self):
r = requests.post(self._website_url)
params = r.text
# unused variables are connection_timeout and supported_formats
key, heartbeat_timeout, _, _ = params.split(':')
heartbeat_timeout = int(heartbeat_timeout)
return key, heartbeat_timeout
def on_open(self, *args):
logging.info('Websocket open!')
def on_close(self, *args):
logging.info('Websocket closed :(')
def on_message(self, *args):
message = args[1].split(':', 3)
key = int(message[0])
# namespace = message[2]
if len(message) >= 4:
data = message[3]
else:
data = ''
if key == 1 and args[1] == '1::':
self.send_packet_helper(1)
elif key == 1 and args[1] == '1::{}'.format(self.namespace):
self.send_packet_helper(5, data={'name': 'initialize'})
data = {'name': 'join',
'args': ['{}'.format(self._streamer_name)]}
self.send_packet_helper(5, data=data)
self.log.info('Connected to channel with socket io!')
# self.messaging.send_status('CONNECTED')
elif key == 2:
self.send_packet_helper(2)
elif key == 5:
data = json.loads(data, )
if data['name'] == 'message':
message = data['args'][0]
sender = html.unescape(message['sender'])
if sender == self.nick:
return
message = html.unescape(message['text'])
self.messaging.send_chatter(author=sender, message=message)
elif data['name'] == 'join':
self.nick = data['args'][1]
def on_error(self, *args):
logging.error(args[1])
def disconnect(self):
callback = ''
data = ''
# '1::namespace'
self.send(':'.join([str(self.TYPE_KEYS['DISCONNECT']),
callback, self.namespace, data]))
def send_packet_helper(self,
type_key,
data=None):
if data is None:
data = ''
else:
data = json.dumps(data)
# NOTE: callbacks currently not implemented
callback = ''
message = ':'.join([str(type_key), callback, self.namespace, data])
self.send(message)
| benhoff/vex | vexbot/adapters/socket_io/__init__.py | Python | gpl-3.0 | 4,594 |
__author__ = 'michael'
def solve_depthfirst(gm, heuristic=lambda gm: gm.get_options(), num=1):
curseq = []
solutions = []
hist_gm = [gm.copy()]
gm.copy()
hist_opt = [1, heuristic(gm)]
while len(solutions) != num:
gm.move(hist_opt[-1][0])
curseq.append(hist_opt[-1][0])
del hist_opt[-1][0]
if gm.result():
if gm.result()[0] == 1:
solutions.append(curseq.copy())
else:
del hist_opt[-1][0]
while not hist_opt[-1]:
del hist_opt[-1]
del hist_gm[-1]
del curseq[-1]
if not hist_gm:
break
gm = hist_gm[-1].copy()
else:
hist_opt.append(heuristic(gm))
hist_gm.append(gm.copy())
return solutions | sonnerm/games | games/solver.py | Python | agpl-3.0 | 862 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.core.models import Displayable, RichText, Ownable
class JobPost(Displayable, RichText):
"""
A career job posting
"""
class Meta:
verbose_name = _("Job Post")
verbose_name_plural = _("Job Posts")
ordering = ("-publish_date",)
@models.permalink
def get_absolute_url(self):
url_name = "jobpost_detail"
kwargs = {"slug": self.slug}
return (url_name, (), kwargs)
def keyword_list(self):
return getattr(self, "_keywords", self.keywords.all())
| robmoggach/mezzanine-careers | careers/models.py | Python | bsd-2-clause | 664 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import pytest
from pages.firefox.browsers.what_is_a_browser import WhatIsABrowserPage
@pytest.mark.nondestructive
@pytest.mark.skip_if_firefox(reason="Primary download button shown only to Firefox users.")
def test_download_buttons_are_displayed(base_url, selenium):
page = WhatIsABrowserPage(selenium, base_url).open()
assert page.primary_download_button.is_displayed
assert page.secondary_download_button.is_displayed
| pascalchevrel/bedrock | tests/functional/firefox/browsers/test_what_is_a_browser.py | Python | mpl-2.0 | 636 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Unit tests for the plugin framework
"""
from __future__ import absolute_import
from __future__ import print_function
import re
import mock
from twisted.trial import unittest
from zope.interface import implementer
import buildbot.plugins.db
from buildbot.errors import PluginDBError
from buildbot.interfaces import IPlugin
from buildbot.test.util.warnings import assertNotProducesWarnings
from buildbot.test.util.warnings import assertProducesWarning
from buildbot.worker_transition import DeprecatedWorkerAPIWarning
from buildbot.worker_transition import DeprecatedWorkerNameWarning
# buildbot.plugins.db needs to be imported for patching, however just 'db' is
# much shorter for using in tests
db = buildbot.plugins.db
class FakeEntry(object):
"""
An entry suitable for unit tests
"""
def __init__(self, name, project_name, version, fail_require, value):
self._name = name
self._dist = mock.Mock(spec_set=['project_name', 'version'])
self._dist.project_name = project_name
self._dist.version = version
self._fail_require = fail_require
self._value = value
@property
def name(self):
"entry name"
return self._name
@property
def dist(self):
"dist thingie"
return self._dist
def require(self):
"""
handle external dependencies
"""
if self._fail_require:
raise RuntimeError('Fail require as requested')
def load(self):
"""
handle loading
"""
return self._value
class ITestInterface(IPlugin):
"""
test interface
"""
def hello(name):
"Greets by :param:`name`"
@implementer(ITestInterface)
class ClassWithInterface(object):
"""
a class to implement a simple interface
"""
def __init__(self, name=None):
self._name = name
def hello(self, name=None):
'implement the required method'
return name or self._name
class ClassWithNoInterface(object):
"""
just a class
"""
# NOTE: buildbot.plugins.db prepends the group with common namespace --
# 'buildbot.'
_FAKE_ENTRIES = {
'buildbot.interface': [
FakeEntry('good', 'non-existent', 'irrelevant', False,
ClassWithInterface),
FakeEntry('deep.path', 'non-existent', 'irrelevant', False,
ClassWithInterface)
],
'buildbot.interface_failed': [
FakeEntry('good', 'non-existent', 'irrelevant', True,
ClassWithInterface)
],
'buildbot.no_interface': [
FakeEntry('good', 'non-existent', 'irrelevant', False,
ClassWithNoInterface)
],
'buildbot.no_interface_again': [
FakeEntry('good', 'non-existent', 'irrelevant', False,
ClassWithNoInterface)
],
'buildbot.no_interface_failed': [
FakeEntry('good', 'non-existent', 'irrelevant', True,
ClassWithNoInterface)
],
'buildbot.duplicates': [
FakeEntry('good', 'non-existent', 'first', False,
ClassWithNoInterface),
FakeEntry('good', 'non-existent', 'second', False,
ClassWithNoInterface)
]
}
def provide_fake_entries(group):
"""
give a set of fake entries for known groups
"""
return _FAKE_ENTRIES.get(group, [])
@mock.patch('buildbot.plugins.db.iter_entry_points', provide_fake_entries)
class TestBuildbotPlugins(unittest.TestCase):
def setUp(self):
buildbot.plugins.db._DB = buildbot.plugins.db._PluginDB()
def test_check_group_registration(self):
with mock.patch.object(buildbot.plugins.db, '_DB', db._PluginDB()):
# The groups will be prepended with namespace, so info() will
# return a dictionary with right keys, but no data
groups = set(_FAKE_ENTRIES.keys())
for group in groups:
db.get_plugins(group)
registered = set(db.info().keys())
self.assertEqual(registered, groups)
self.assertEqual(registered, set(db.namespaces()))
def test_interface_provided_simple(self):
# Basic check before the actual test
self.assertTrue(ITestInterface.implementedBy(ClassWithInterface))
plugins = db.get_plugins('interface', interface=ITestInterface)
self.assertTrue('good' in plugins.names)
result_get = plugins.get('good')
result_getattr = plugins.good
self.assertFalse(result_get is None)
self.assertTrue(result_get is result_getattr)
# Make sure we actually got our class
greeter = result_get('yes')
self.assertEqual('yes', greeter.hello())
self.assertEqual('no', greeter.hello('no'))
def test_missing_plugin(self):
plugins = db.get_plugins('interface', interface=ITestInterface)
self.assertRaises(AttributeError, getattr, plugins, 'bad')
self.assertRaises(PluginDBError, plugins.get, 'bad')
self.assertRaises(PluginDBError, plugins.get, 'good.extra')
def test_interface_provided_deep(self):
# Basic check before the actual test
self.assertTrue(ITestInterface.implementedBy(ClassWithInterface))
plugins = db.get_plugins('interface', interface=ITestInterface)
self.assertTrue('deep.path' in plugins.names)
self.assertTrue('deep.path' in plugins)
self.assertFalse('even.deeper.path' in plugins)
result_get = plugins.get('deep.path')
result_getattr = plugins.deep.path
self.assertFalse(result_get is None)
self.assertTrue(result_get is result_getattr)
# Make sure we actually got our class
greeter = result_get('yes')
self.assertEqual('yes', greeter.hello())
self.assertEqual('no', greeter.hello('no'))
def test_interface_provided_deps_failed(self):
plugins = db.get_plugins('interface_failed', interface=ITestInterface,
check_extras=True)
self.assertRaises(PluginDBError, plugins.get, 'good')
def test_required_interface_not_provided(self):
plugins = db.get_plugins('no_interface_again',
interface=ITestInterface)
self.assertTrue(plugins._interface is ITestInterface)
self.assertRaises(PluginDBError, plugins.get, 'good')
def test_no_interface_provided(self):
plugins = db.get_plugins('no_interface')
self.assertFalse(plugins.get('good') is None)
def test_no_interface_provided_deps_failed(self):
plugins = db.get_plugins('no_interface_failed', check_extras=True)
self.assertRaises(PluginDBError, plugins.get, 'good')
def test_failure_on_dups(self):
self.assertRaises(PluginDBError, db.get_plugins, 'duplicates',
load_now=True)
def test_get_info_on_a_known_plugin(self):
plugins = db.get_plugins('interface')
self.assertEqual(('non-existent', 'irrelevant'), plugins.info('good'))
def test_failure_on_unknown_plugin_info(self):
plugins = db.get_plugins('interface')
self.assertRaises(PluginDBError, plugins.info, 'bad')
def test_failure_on_unknown_plugin_get(self):
plugins = db.get_plugins('interface')
self.assertRaises(PluginDBError, plugins.get, 'bad')
class SimpleFakeEntry(FakeEntry):
def __init__(self, name, value):
FakeEntry.__init__(self, name, 'non-existent', 'irrelevant', False,
value)
_WORKER_FAKE_ENTRIES = {
'buildbot.worker': [
SimpleFakeEntry('Worker', ClassWithInterface),
SimpleFakeEntry('EC2LatentWorker', ClassWithInterface),
SimpleFakeEntry('LibVirtWorker', ClassWithInterface),
SimpleFakeEntry('OpenStackLatentWorker', ClassWithInterface),
SimpleFakeEntry('newthirdparty', ClassWithInterface),
SimpleFakeEntry('deep.newthirdparty', ClassWithInterface),
],
'buildbot.buildslave': [
SimpleFakeEntry('thirdparty', ClassWithInterface),
SimpleFakeEntry('deep.thirdparty', ClassWithInterface),
],
'buildbot.util': [
SimpleFakeEntry('WorkerLock', ClassWithInterface),
SimpleFakeEntry('enforceChosenWorker', ClassWithInterface),
SimpleFakeEntry('WorkerChoiceParameter', ClassWithInterface),
],
}
def provide_worker_fake_entries(group):
"""
give a set of fake entries for known groups
"""
return _WORKER_FAKE_ENTRIES.get(group, [])
class TestWorkerPluginsTransition(unittest.TestCase):
def setUp(self):
buildbot.plugins.db._DB = buildbot.plugins.db._PluginDB()
with mock.patch('buildbot.plugins.db.iter_entry_points',
provide_worker_fake_entries):
self.worker_ns = db.get_plugins('worker')
self.buildslave_ns = db.get_plugins('buildslave')
self.util_ns = db.get_plugins('util')
def test_new_api(self):
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
self.assertTrue(self.worker_ns.Worker is ClassWithInterface)
def test_old_api_access_produces_warning(self):
with assertProducesWarning(
DeprecatedWorkerNameWarning,
message_pattern=r"'buildbot\.plugins\.buildslave' plugins "
"namespace is deprecated"):
# Old API, with warning
self.assertTrue(
self.buildslave_ns.BuildSlave is ClassWithInterface)
def test_new_api_through_old_namespace(self):
# Access of newly named workers through old entry point is an error.
with assertProducesWarning(DeprecatedWorkerNameWarning,
message_pattern="namespace is deprecated"):
self.assertRaises(
AttributeError, lambda: self.buildslave_ns.Worker)
def test_old_api_through_new_namespace(self):
# Access of old-named workers through new API is an error.
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
self.assertRaises(AttributeError,
lambda: self.worker_ns.BuildSlave)
def test_old_api_thirdparty(self):
with assertProducesWarning(
DeprecatedWorkerNameWarning,
message_pattern=r"'buildbot\.plugins\.buildslave' plugins "
"namespace is deprecated"):
# Third party plugins that use old API should work through old API.
self.assertTrue(
self.buildslave_ns.thirdparty is ClassWithInterface)
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
# Third party plugins that use old API should work through new API.
self.assertTrue(
self.worker_ns.thirdparty is ClassWithInterface)
def test_old_api_thirdparty_deep(self):
with assertProducesWarning(
DeprecatedWorkerNameWarning,
message_pattern=r"'buildbot\.plugins\.buildslave' plugins "
"namespace is deprecated"):
self.assertTrue(
self.buildslave_ns.deep.thirdparty is ClassWithInterface)
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
self.assertTrue(
self.worker_ns.deep.thirdparty is ClassWithInterface)
def test_new_api_thirdparty(self):
# Third party plugins that use new API should work only through
# new API.
with assertProducesWarning(DeprecatedWorkerNameWarning,
message_pattern="namespace is deprecated"):
self.assertRaises(AttributeError,
lambda: self.buildslave_ns.newthirdparty)
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
self.assertTrue(
self.worker_ns.newthirdparty is ClassWithInterface)
def test_new_api_thirdparty_deep(self):
# TODO: Why it's not AttributeError (as in tests above), but
# PluginDBError?
with assertProducesWarning(DeprecatedWorkerNameWarning,
message_pattern="namespace is deprecated"):
self.assertRaises(PluginDBError,
lambda: self.buildslave_ns.deep.newthirdparty)
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
self.assertTrue(
self.worker_ns.deep.newthirdparty is ClassWithInterface)
def test_util_SlaveLock_import(self):
with assertProducesWarning(
DeprecatedWorkerNameWarning,
message_pattern=re.escape(
"'buildbot.util.SlaveLock' is deprecated, "
"use 'buildbot.util.WorkerLock' instead")):
deprecated = self.util_ns.SlaveLock
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
self.assertIdentical(deprecated, ClassWithInterface)
def test_util_enforceChosenSlave_import(self):
with assertProducesWarning(
DeprecatedWorkerNameWarning,
message_pattern=re.escape(
"'buildbot.util.enforceChosenSlave' is deprecated, "
"use 'buildbot.util.enforceChosenWorker' instead")):
deprecated = self.util_ns.enforceChosenSlave
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
self.assertIdentical(deprecated, ClassWithInterface)
def test_util_BuildslaveChoiceParameter_import(self):
with assertProducesWarning(
DeprecatedWorkerNameWarning,
message_pattern=re.escape(
"'buildbot.util.BuildslaveChoiceParameter' is deprecated, "
"use 'buildbot.util.WorkerChoiceParameter' instead")):
deprecated = self.util_ns.BuildslaveChoiceParameter
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
self.assertIdentical(deprecated, ClassWithInterface)
| seankelly/buildbot | master/buildbot/test/unit/test_plugins.py | Python | gpl-2.0 | 14,759 |
# FlaskTaskr/forms.py
from flask_wtf import Form
from wtforms import StringField, DateField, IntegerField,\
SelectField, PasswordField
from wtforms.validators import DataRequired, length, EqualTo, Email
class AddTaskForm(Form):
task_id = IntegerField()
name = StringField('Task Name', validators=[DataRequired()])
due_date = DateField(
'Date Due (mm/dd/yyyy)',
validators=[DataRequired()], format='%m/%d/%Y'
)
priority = SelectField(
'Priority',
validators=[DataRequired()],
choices=[
('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'),
('6', '6'), ('7', '7'), ('8', '8'), ('9', '9'), ('10', '10'),
]
)
status = IntegerField('Status')
class RegisterForm(Form):
name = StringField(
'Username',
validators=[DataRequired(), length(min=6, max=25)]
)
email = StringField(
'Email',
validators=[DataRequired(), length(min=6, max=40)]
)
password = PasswordField(
'password',
validators=[DataRequired(), length(min=6, max=40)]
)
confirm = PasswordField(
'Repeat Password',
validators=[DataRequired(), EqualTo('password')]
)
class LoginForm(Form):
name = StringField(
'username',
validators=[DataRequired()]
)
password = PasswordField(
'password',
validators=[DataRequired()]
) | yangchandle/FlaskTaskr | project/forms.py | Python | mit | 1,247 |
from twisted.internet import protocol, defer, reactor
from twisted.internet.endpoints import TCP4ClientEndpoint
from ooni.nettest import NetTestCase
from ooni.errors import failureToString
from ooni.utils import log
class TCPSender(protocol.Protocol):
def __init__(self):
self.received_data = ''
self.sent_data = ''
def dataReceived(self, data):
"""
We receive data until the total amount of data received reaches that
which we have sent. At that point we append the received data to the
report and we fire the callback of the test template sendPayload
function.
This is used in pair with a TCP Echo server.
The reason why we put the data received inside of an array is that in
future we may want to expand this to support state and do something
similar to what daphne does, but without the mutation.
XXX Actually daphne will probably be refactored to be a subclass of the
TCP Test Template.
"""
if self.payload_len:
self.received_data += data
def sendPayload(self, payload):
"""
Write the payload to the wire and set the expected size of the payload
we are to receive.
Args:
payload: the data to be sent on the wire.
"""
self.payload_len = len(payload)
self.sent_data = payload
self.transport.write(payload)
class TCPSenderFactory(protocol.Factory):
def buildProtocol(self, addr):
return TCPSender()
class TCPTest(NetTestCase):
name = "Base TCP Test"
version = "0.1"
requiresRoot = False
timeout = 5
address = None
port = None
def _setUp(self):
super(TCPTest, self)._setUp()
self.report['sent'] = []
self.report['received'] = []
def sendPayload(self, payload):
d1 = defer.Deferred()
def closeConnection(proto):
self.report['sent'].append(proto.sent_data)
self.report['received'].append(proto.received_data)
proto.transport.loseConnection()
log.debug("Closing connection")
d1.callback(proto.received_data)
def timedOut(proto):
self.report['failure'] = 'tcp_timed_out_error'
proto.transport.loseConnection()
def errback(failure):
self.report['failure'] = failureToString(failure)
d1.errback(failure)
def connected(proto):
log.debug("Connected to %s:%s" % (self.address, self.port))
proto.report = self.report
proto.deferred = d1
proto.sendPayload(payload)
if self.timeout:
# XXX-Twisted this logic should probably go inside of the protocol
reactor.callLater(self.timeout, closeConnection, proto)
point = TCP4ClientEndpoint(reactor, self.address, self.port)
log.debug("Connecting to %s:%s" % (self.address, self.port))
d2 = point.connect(TCPSenderFactory())
d2.addCallback(connected)
d2.addErrback(errback)
return d1
| lordappsec/ooni-probe | ooni/templates/tcpt.py | Python | bsd-2-clause | 3,113 |
"""
This module contains utilities for use in conjunction with field renderers.
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import re
from django.conf import settings
from .render_fieldvalue import RenderFieldValue
from .render_text import TextValueMapper
from .render_entityid import EntityIdValueMapper
from .render_identifier import IdentifierValueMapper
from .render_text_language import get_text_language_renderer, TextLanguageValueMapper
from .render_placement import get_field_placement_renderer
from .render_tokenset import get_field_tokenset_renderer, TokenSetValueMapper
from .render_bool_checkbox import get_bool_checkbox_renderer, BoolCheckboxValueMapper
from .render_ref_audio import get_ref_audio_renderer, RefAudioValueMapper
from .render_ref_image import get_ref_image_renderer, RefImageValueMapper
from .render_text_markdown import (
get_text_markdown_renderer, get_show_markdown_renderer, TextMarkdownValueMapper
)
from .render_select import (
get_select_renderer, get_choice_renderer,
get_entitytype_renderer, get_view_choice_renderer,
SelectValueMapper
)
from .render_uri_link import get_uri_link_renderer, URILinkValueMapper
from .render_uri_import import get_uri_import_renderer, URIImportValueMapper
from .render_file_upload import get_file_upload_renderer, FileUploadValueMapper
from .render_repeatgroup import (
get_repeatgroup_renderer,
get_repeatgrouprow_renderer,
get_repeatlistrow_renderer,
)
from .render_fieldrow import get_fieldrow_renderer, FieldRowValueMapper
from .render_ref_multifields import get_ref_multifield_renderer, RefMultifieldValueMapper
# Render type mappings to templates and/or renderer access functions
_field_renderers = {} # renderer cache
_field_view_files = (
{ "Text": "field/annalist_view_text.html"
, "Showtext": "field/annalist_view_text.html"
, "Textarea": "field/annalist_view_textarea.html"
, "Codearea": "field/annalist_view_codearea.html"
, "EntityRef": "field/annalist_view_entityref.html"
, "EntityId": "field/annalist_view_entityid.html"
, "Identifier": "field/annalist_view_identifier.html"
, "Padding": "field/annalist_view_padding.html"
, "Placeholder": "field/annalist_view_placeholder.html"
})
_field_edit_files = (
{ "Text": "field/annalist_edit_text.html"
, "Showtext": "field/annalist_view_text.html"
, "Textarea": "field/annalist_edit_textarea.html"
, "Codearea": "field/annalist_edit_codearea.html"
, "EntityRef": "field/annalist_edit_entityref.html"
, "EntityId": "field/annalist_edit_entityid.html"
, "Identifier": "field/annalist_edit_identifier.html"
, "Padding": "field/annalist_edit_padding.html"
, "Placeholder": "field/annalist_view_placeholder.html"
})
_field_get_renderer_functions = (
{ "LangText": get_text_language_renderer
, "Markdown": get_text_markdown_renderer
, "ShowMarkdown": get_show_markdown_renderer
, "Placement": get_field_placement_renderer
, "TokenSet": get_field_tokenset_renderer
, "CheckBox": get_bool_checkbox_renderer
, "RefAudio": get_ref_audio_renderer
, "RefImage": get_ref_image_renderer
, "URILink": get_uri_link_renderer
, "URIImport": get_uri_import_renderer
, "FileUpload": get_file_upload_renderer
, "EntityTypeId": get_entitytype_renderer
, "Enum": get_select_renderer
, "Enum_optional": get_select_renderer
, "Enum_choice": get_choice_renderer
, "Enum_choice_opt": get_choice_renderer
, "View_choice": get_view_choice_renderer
, "RefMultifield": get_ref_multifield_renderer
, "RepeatGroup": get_repeatgroup_renderer
, "Group_Seq": get_repeatgroup_renderer
, "Group_Set": get_repeatgroup_renderer
, "RepeatGroupRow": get_repeatgrouprow_renderer
, "Group_Seq_Row": get_repeatgrouprow_renderer
, "Group_Set_Row": get_repeatgrouprow_renderer
, "RepeatListRow": get_repeatlistrow_renderer
, "FieldRow": get_fieldrow_renderer
# Render types recognized for backward compatibility
, "URIImage": get_ref_image_renderer
, "Type": get_select_renderer
, "View": get_select_renderer
, "List": get_select_renderer
, "Field": get_select_renderer
, "List_sel": get_choice_renderer
})
_field_value_mappers = (
{ "LangText": TextLanguageValueMapper
, "TokenSet": TokenSetValueMapper
, "CheckBox": BoolCheckboxValueMapper
, "Markdown": TextMarkdownValueMapper
, "ShowMarkdown": TextMarkdownValueMapper
, "RefAudio": RefAudioValueMapper
, "RefImage": RefImageValueMapper
, "URILink": URILinkValueMapper
, "URIImport": URIImportValueMapper
, "FileUpload": FileUploadValueMapper
, "EntityId": EntityIdValueMapper
, "EntityTypeId": SelectValueMapper
, "Identifier": IdentifierValueMapper
, "Enum": SelectValueMapper
, "Enum_optional": SelectValueMapper
, "Enum_choice": SelectValueMapper
, "Enum_choice_opt": SelectValueMapper
, "View_choice": SelectValueMapper
, "RefMultifield": RefMultifieldValueMapper
, "FieldRow": FieldRowValueMapper
# Render types recognized for backward compatibility
, "URIImage": RefImageValueMapper
, "Type": SelectValueMapper
, "View": SelectValueMapper
, "List": SelectValueMapper
, "Field": SelectValueMapper
, "List_sel": SelectValueMapper
})
def is_repeat_field_render_type(render_type):
repeat_field_render_types = (
[ "RepeatGroup", "RepeatGroupRow"
, "Group_Seq", "Group_Seq_Row"
, "Group_Set", "Group_Set_Row"
])
return render_type in repeat_field_render_types
def get_field_base_renderer(field_render_type):
"""
Lookup and return base renderer for given field type.
"""
if field_render_type not in _field_renderers:
# Create and cache renderer
if ( (field_render_type in _field_view_files) or
(field_render_type in _field_edit_files) ):
viewfile = _field_view_files.get(field_render_type, None)
editfile = _field_edit_files.get(field_render_type, None)
_field_renderers[field_render_type] = RenderFieldValue(
field_render_type,
view_file=viewfile, edit_file=editfile
)
elif field_render_type in _field_get_renderer_functions:
_field_renderers[field_render_type] = _field_get_renderer_functions[field_render_type]()
return _field_renderers.get(field_render_type, None)
def get_entityref_edit_renderer(renderer, field_render_type):
"""
Returns an updated edit renderer, called for fields with an entity type reference:
used to force a selection renderer for fields with other view render types.
"""
if field_render_type not in ["Enum", "Enum_optional", "Enum_choice", "Enum_choice_opt", "View_choice", "List_sel"]:
renderer = get_field_base_renderer("Enum")
return renderer
def get_uriimport_edit_renderer(renderer, field_render_type):
"""
Returns an updated edit renderer for fields with a URI import value type
"""
if field_render_type not in ["URIImport"]:
renderer = get_field_base_renderer("URIImport")
return renderer
def get_fileupload_edit_renderer(renderer, field_render_type):
"""
Returns an updated edit renderer for fields with a file upload value type
"""
if field_render_type not in ["FileUpload"]:
renderer = get_field_base_renderer("FileUpload")
return renderer
def get_field_edit_renderer(field_render_type, field_value_mode):
"""
Get edit renderer for supplied field details, taking account of variations
on the base renderer due to field reference and field value type.
"""
# log.debug("Render field_render_type %s, field_value_mode %s"%(field_render_type, field_value_mode))
renderer = get_field_base_renderer(field_render_type)
if field_value_mode in ["Value_entity", "Value_field"]:
renderer = get_entityref_edit_renderer(renderer, field_render_type)
elif field_value_mode == "Value_import":
renderer = get_uriimport_edit_renderer(renderer, field_render_type)
elif field_value_mode == "Value_upload":
renderer = get_fileupload_edit_renderer(renderer, field_render_type)
return renderer
def get_label_renderer(field_render_type, field_value_mode):
"""
Returns a field label renderer object that can be referenced in a
Django template "{% include ... %}" element.
"""
class _renderer(object):
def __init__(self):
pass
def render(self, context):
return context.get('field_label', "@@no 'field_label'@@")
return _renderer()
def get_edit_renderer(field_render_type, field_value_mode):
"""
Returns an field edit renderer object that can be referenced in a
Django template "{% include ... %}" element.
The original version returns the name of a template to render the form.
With versions of Django >=1.7, an alternative is to return an
object with a `.render(context)` method that returns a string to be
included in the resulting page:
The variable may also be any object with a render() method that accepts
a context. This allows you to reference a compiled Template in your context.
- https://docs.djangoproject.com/en/dev/ref/templates/builtins/#include
"""
renderer = get_field_edit_renderer(field_render_type, field_value_mode)
if not renderer:
# Default to simple text for unknown renderer type
log.warning("get_edit_renderer: %s not found"%field_render_type)
renderer = get_field_base_renderer("Text")
return renderer.edit()
def get_view_renderer(field_render_type, field_value_mode):
"""
Returns a field view renderer object that can be referenced in a
Django template "{% include ... %}" element.
The original version returns the name of a template to render the form.
With versions of Django >=1.7, an alternative is to return an
object with a `.render(context)` method that returns a string to be
included in the resulting page:
The variable may also be any object with a render() method that accepts
a context. This allows you to reference a compiled Template in your context.
- https://docs.djangoproject.com/en/dev/ref/templates/builtins/#include
"""
renderer = get_field_base_renderer(field_render_type)
if not renderer:
# Default to simple text for unknown renderer type
log.warning("get_view_renderer: '%s' not found"%field_render_type)
renderer = get_field_base_renderer("Text")
return renderer.view()
def get_label_edit_renderer(field_render_type, field_value_mode):
"""
Returns an field edit renderer object that can be referenced in a
Django template "{% include ... %}" element.
"""
renderer = get_field_base_renderer(field_render_type)
if not renderer:
# Default to simple text for unknown renderer type
log.warning("get_label_edit_renderer: '%s' not found"%field_render_type)
renderer = get_field_base_renderer("Text")
return renderer.label_edit()
def get_label_view_renderer(field_render_type, field_value_mode):
"""
Returns a field view renderer object that can be referenced in a
Django template "{% include ... %}" element.
"""
renderer = get_field_base_renderer(field_render_type)
if not renderer:
# Default to simple text for unknown renderer type
log.warning("get_label_view_renderer: '%s' not found"%field_render_type)
renderer = get_field_base_renderer("Text")
return renderer.label_view()
def get_col_head_renderer(field_render_type, field_value_mode):
"""
Returns a field list heading renderer object that can be referenced in a
Django template "{% include ... %}" element.
"""
renderer = get_field_base_renderer(field_render_type)
if renderer:
return renderer.col_head()
log.debug("get_col_head_renderer: '%s' not found"%field_render_type)
return "field/annalist_head_any.html"
def get_col_head_view_renderer(field_render_type, field_value_mode):
"""
Returns a field list heading renderer object that can be referenced in a
Django template "{% include ... %}" element when viewing an entity.
"""
renderer = get_field_base_renderer(field_render_type)
if renderer:
return renderer.col_head_view()
log.debug("get_col_head_view_renderer: '%s' not found"%field_render_type)
return "field/annalist_head_any.html"
def get_col_head_edit_renderer(field_render_type, field_value_mode):
"""
Returns a field list heading renderer object that can be referenced in a
Django template "{% include ... %}" element when editing an entity.
"""
renderer = get_field_base_renderer(field_render_type)
if renderer:
return renderer.col_head_edit()
log.debug("get_col_head_edit_renderer: '%s' not found"%field_render_type)
return "field/annalist_head_any.html"
def get_col_edit_renderer(field_render_type, field_value_mode):
"""
Returns a field list row-item renderer object that can be referenced in a
Django template "{% include ... %}" element.
"""
renderer = get_field_edit_renderer(field_render_type, field_value_mode)
if renderer:
return renderer.col_edit()
log.debug("get_col_edit_renderer: '%s' not found"%field_render_type)
return "field/annalist_item_none.html"
def get_col_view_renderer(field_render_type, field_value_mode):
"""
Returns a field list row-item renderer object that can be referenced in a
Django template "{% include ... %}" element.
"""
renderer = get_field_base_renderer(field_render_type)
if renderer:
return renderer.col_view()
log.debug("get_col_view_renderer: '%s' not found"%field_render_type)
return "field/annalist_item_none.html"
def get_value_mapper(field_render_type):
"""
Returns a value mapper class instance (with encode and decode methods)
which is used to map values between entity fields and textual form fields.
The default 'RenderText' object returned contains identity mappings.
"""
mapper_class = TextValueMapper
if field_render_type in _field_value_mappers:
mapper_class = _field_value_mappers[field_render_type]
return mapper_class()
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
| gklyne/annalist | src/annalist_root/annalist/views/fields/find_renderers.py | Python | mit | 15,530 |
#! /usr/bin/env python
#This runs on M2
#Take the masked value of M1 and multiply by M2
import os
import sys
import shutil
import ds_config
#Get the params
biobank_name = sys.argv[1]
masking_vector_name = sys.argv[2]
data_from_M1 = sys.argv[3]
data_set_name = sys.argv[4]
masked_location_local = sys.argv[5]
masked_location_remote = sys.argv[6]
print 'Calculating '+masking_vector_name+'.'+data_from_M1+'.'+data_set_name
masked_data_path_local = ds_config.temp_dir+masked_location_local+'/'+masking_vector_name+'.'+data_from_M1+'.'+data_set_name
masked_data_path_remote = ds_config.temp_dir+masked_location_remote+'/'+masking_vector_name+'.'+data_from_M1+'.'+data_set_name
print "Saving to: "+masked_data_path_local
#Run R script
cmd = 'Rscript '+ds_config.source_dir+'B/masked_M1_times_M2.R '
cmd += ds_config.temp_dir+biobank_name+'/'+masking_vector_name+' '
cmd += ds_config.temp_dir+biobank_name+'/'+data_from_M1+' '
cmd += ds_config.data_dir+biobank_name+'/'+data_set_name+' '
cmd += masked_data_path_local
os.system(cmd)
#Copy files to data dirs
print "Copying to: "+masked_data_path_remote
#shutil.copyfile(masked_data_path_local,masked_data_path_remote)
if ds_config.local_only == True:
#Copy file to data dir
print "Copying to: "+masked_data_path_remote
shutil.copyfile(masked_data_path_local,masked_data_path_remote)
else:
#Do something
cmd = 'scp '+masked_data_path_local+' '+ds_config.remote_settings['client','username']+'@'+ds_config.remote_settings['client','ip_address']+':'+masked_data_path_remote
print cmd
os.system(cmd)
print 'Finished masking '+masking_vector_name+'.'+data_from_M1+'.'+data_set_name+'\n'
| Vertical-Datashield/vertical_datashield | source/common/masked_M1_times_M2.py | Python | gpl-3.0 | 1,666 |
# SOAP envelope
SOAP_NS = 'http://schemas.xmlsoap.org/soap/envelope/'
# xmldsig
DS_NS = 'http://www.w3.org/2000/09/xmldsig#'
# xmlenc
ENC_NS = 'http://www.w3.org/2001/04/xmlenc#'
WSS_BASE = 'http://docs.oasis-open.org/wss/2004/01/'
# WS-Security
WSSE_NS = WSS_BASE + 'oasis-200401-wss-wssecurity-secext-1.0.xsd'
# WS-Utility
WSU_NS = WSS_BASE + 'oasis-200401-wss-wssecurity-utility-1.0.xsd'
BASE64B = WSS_BASE + 'oasis-200401-wss-soap-message-security-1.0#Base64Binary'
X509TOKEN = WSS_BASE + 'oasis-200401-wss-x509-token-profile-1.0#X509v3'
| orcasgit/py-wsse | wsse/constants.py | Python | bsd-3-clause | 544 |
class Scraper(object):
def __init__(self, client):
self.client = client | rozap/vulnpub_aggregator | scraper.py | Python | mit | 77 |
#!/usr/bin/env python
import os
import time
import proctest
import mozunit
from mozprocess import processhandler
here = os.path.dirname(os.path.abspath(__file__))
class ProcTestKill(proctest.ProcTest):
""" Class to test various process tree killing scenatios """
# This test should ideally be a part of test_mozprocess_kill.py
# It has been separated for the purpose of tempporarily disabling it.
# See https://bugzilla.mozilla.org/show_bug.cgi?id=921632
def test_process_kill_broad_wait(self):
"""Process is started, we use a broad process tree, we let it spawn
for a bit, we kill it"""
p = processhandler.ProcessHandler([self.python, self.proclaunch,
"process_normal_broad_python.ini"],
cwd=here)
p.run()
# Let the tree spawn a bit, before attempting to kill
time.sleep(3)
p.kill()
self.determine_status(p, expectedfail=('returncode',))
if __name__ == '__main__':
mozunit.main()
| Yukarumya/Yukarum-Redfoxes | testing/mozbase/mozprocess/tests/test_mozprocess_kill_broad_wait.py | Python | mpl-2.0 | 1,067 |
"""
The Template Module contains the base definition for Executing Templates
"""
import json
import logging
import os
from argparse import ArgumentParser
from qds_sdk.qubole import Qubole
from qds_sdk.resource import Resource
log = logging.getLogger("qds_template")
from qds_sdk.commands import *
class TemplateCmdLine:
"""
qds_sdk.TemplateCmdLine is the interface used for template related operation in qds.py
"""
@staticmethod
def parsers():
argparser = ArgumentParser(prog="qds.py template", description="Template Client for Qubole Data Service.")
subparsers = argparser.add_subparsers()
#create
create = subparsers.add_parser("create", help="To Create a new Template")
create.add_argument("--data", dest="data",required=True, help="Path to JSON file with template details")
create.set_defaults(func=TemplateCmdLine.create)
#edit
edit = subparsers.add_parser("edit", help="To Edit an existing Template")
edit.add_argument("--data", dest="data", required=True, help="Path to JSON file with template details")
edit.add_argument("--id", dest="id", required=True, help="Id for the Template")
edit.set_defaults(func=TemplateCmdLine.edit)
#clone
clone = subparsers.add_parser("clone", help="To Clone an existing Template")
clone.add_argument("--id", dest="id",required=True, help="Id for the Template to be Cloned")
clone.add_argument("--data", dest="data", required=True, help="Path to JSON file with template details to override")
clone.set_defaults(func=TemplateCmdLine.clone)
#view
view = subparsers.add_parser("view", help="To View an existing Template")
view.add_argument("--id", dest="id", required=True, help="Id for the Template")
view.set_defaults(func=TemplateCmdLine.view)
#list
list = subparsers.add_parser("list", help="To List existing Templates")
list.add_argument("--per-page", dest="per_page", help="Number of items per page")
list.add_argument("--page", dest="page", help="Page Number")
list.set_defaults(func=TemplateCmdLine.list)
#run
run = subparsers.add_parser("run", help="To Run Template and wait to print Result")
run.add_argument("--id", dest="id", required=True, help="Id of the template to run")
run.add_argument("--j", dest="data", required=True, help="Path to JSON file or json string with input field details")
run.set_defaults(func=TemplateCmdLine.execute)
#submit
submit = subparsers.add_parser("submit", help="To Submit Template and get the command Id")
submit.add_argument("--id", dest="id", required=True, help="Id of the template to Submit")
submit.add_argument("--j", dest="data", required=True, help="Path to JSON file or json string with input field details")
submit.set_defaults(func=TemplateCmdLine.submit)
return argparser
@staticmethod
def run(args):
parser = TemplateCmdLine.parsers()
parsed = parser.parse_args(args)
return parsed.func(parsed)
@staticmethod
def create(args):
with open(args.data) as f:
spec = json.load(f)
return Template.createTemplate(spec)
@staticmethod
def edit(args):
with open(args.data) as f:
spec = json.load(f)
return Template.editTemplate(args.id, spec)
@staticmethod
def clone(args):
with open(args.data) as f:
spec = json.load(f)
id = args.id
return Template.cloneTemplate(id, spec)
@staticmethod
def submit(args):
spec = getSpec(args)
res = Template.submitTemplate(args.id, spec)
log.info("Submitted Template with Id: %s, Command Id: %s, CommandType: %s" % (args.id, res['id'], res['command_type']))
return res
@staticmethod
def execute(args):
spec = getSpec(args)
return Template.runTemplate(args.id, spec)
@staticmethod
def view(args):
id = args.id
return Template.viewTemplate(id)
@staticmethod
def list(args):
return Template.listTemplates(args)
def getSpec(args):
if args.data is not None:
if os.path.isfile(args.data):
with open(args.data) as f:
spec = json.load(f)
else:
spec = json.loads(args.data)
if 'input_vars' in spec:
inputs = formatData(spec['input_vars'])
spec["input_vars"] = inputs
else:
spec = {}
return spec
def formatData(inputs):
res = []
if len(inputs) != 0:
for obj in inputs:
o = {}
for key in obj:
o[key] = "'" + obj[key] + "'"
res.append(o)
return res
class Template(Resource):
"""
qds_sdk.Template is the base Qubole Template class.
it uses /command_templates endpoint
"""
rest_entity_path = "command_templates"
@staticmethod
def createTemplate(data):
"""
Create a new template.
Args:
`data`: json data required for creating a template
Returns:
Dictionary containing the details of the template with its ID.
"""
conn = Qubole.agent()
return conn.post(Template.rest_entity_path, data)
@staticmethod
def editTemplate(id, data):
"""
Edit an existing template.
Args:
`id`: ID of the template to edit
`data`: json data to be updated
Returns:
Dictionary containing the updated details of the template.
"""
conn = Qubole.agent()
return conn.put(Template.element_path(id), data)
@staticmethod
def cloneTemplate(id, data):
"""
Clone an existing template.
Args:
`id`: ID of the template to be cloned
`data`: json data to override
Returns:
Dictionary containing the updated details of the template.
"""
conn = Qubole.agent()
path = str(id) + "/duplicate"
return conn.post(Template.element_path(path), data)
@staticmethod
def viewTemplate(id):
"""
View an existing Template details.
Args:
`id`: ID of the template to fetch
Returns:
Dictionary containing the details of the template.
"""
conn = Qubole.agent()
return conn.get(Template.element_path(id))
@staticmethod
def submitTemplate(id, data):
"""
Submit an existing Template.
Args:
`id`: ID of the template to submit
`data`: json data containing the input_vars
Returns:
Dictionary containing Command Object details.
"""
conn = Qubole.agent()
path = str(id) + "/run"
return conn.post(Template.element_path(path), data)
@staticmethod
def runTemplate(id, data):
"""
Run an existing Template and waits for the Result.
Prints result to stdout.
Args:
`id`: ID of the template to run
`data`: json data containing the input_vars
Returns:
An integer as status (0: success, 1: failure)
"""
conn = Qubole.agent()
path = str(id) + "/run"
res = conn.post(Template.element_path(path), data)
cmdType = res['command_type']
cmdId = res['id']
cmdClass = eval(cmdType)
cmd = cmdClass.find(cmdId)
while not Command.is_done(cmd.status):
time.sleep(Qubole.poll_interval)
cmd = cmdClass.find(cmd.id)
return Template.getResult(cmdClass, cmd)
@staticmethod
def getResult(cmdClass, cmd):
if Command.is_success(cmd.status):
log.info("Fetching results for %s, Id: %s" % (cmdClass.__name__, cmd.id))
cmd.get_results(sys.stdout, delim='\t')
return 0
else:
log.error("Cannot fetch results - command Id: %s failed with status: %s" % (cmd.id, cmd.status))
return 1
@staticmethod
def listTemplates(args):
"""
Fetch existing Templates details.
Args:
`args`: dictionary containing the value of page number and per-page value
Returns:
Dictionary containing paging_info and command_templates details
"""
conn = Qubole.agent()
url_path = Template.rest_entity_path
page_attr = []
if args.page is not None:
page_attr.append("page=%s" % args.page)
if args.per_page is not None:
page_attr.append("per_page=%s" % args.per_page)
if page_attr:
url_path = "%s?%s" % (url_path, "&".join(page_attr))
return conn.get(url_path) | msumit/qds-sdk-py | qds_sdk/template.py | Python | apache-2.0 | 9,095 |
from os.path import abspath, dirname, join as pjoin
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
fn = abspath(pjoin(dirname(__file__), 'README.rst'))
fp = open(fn, 'r')
long_description = fp.read()
fp.close()
setup(
name='aino-convert',
version='0.1.0.14',
url='https://github.com/aino/aino-convert',
license='BSD',
author='Mikko Hellsing',
author_email='mikko@aino.se',
description='Magick for Django',
long_description=long_description,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Multimedia :: Graphics',
'Framework :: Django',
],
packages=[
'convert',
'convert.conf',
'convert.templatetags',
],
platforms='any'
)
| aino/aino-convert | setup.py | Python | bsd-3-clause | 1,068 |
# -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .packages.urllib3.util.retry import Retry
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url, urldefragauth)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import ConnectTimeoutError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .packages.urllib3.exceptions import ProtocolError
from .packages.urllib3.exceptions import ReadTimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import ResponseError
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError)
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# because self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
"""
if not proxy in self.proxy_manager:
proxy_headers = self.proxy_headers(proxy)
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return self.proxy_manager[proxy]
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url = urldefragauth(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a (`connect timeout, read
timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| ARMmbed/yotta_osx_installer | workspace/lib/python2.7/site-packages/pip/_vendor/requests/adapters.py | Python | apache-2.0 | 16,810 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .service_update_description import ServiceUpdateDescription
class StatefulServiceUpdateDescription(ServiceUpdateDescription):
"""Describes an update for a stateful service.
:param flags: Flags indicating whether other properties are set. Each of
the associated properties corresponds to a flag, specified below, which,
if set, indicate that the property is specified.
This property can be a combination of those flags obtained using bitwise
'OR' operator.
For example, if the provided value is 6 then the flags for
ReplicaRestartWaitDuration (2) and QuorumLossWaitDuration (4) are set.
- None - Does not indicate any other properties are set. The value is
zero.
- TargetReplicaSetSize/InstanceCount - Indicates whether the
TargetReplicaSetSize property (for Stateful services) or the
InstanceCount property (for Stateless services) is set. The value is 1.
- ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration
property is set. The value is 2.
- QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property
is set. The value is 4.
- StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration
property is set. The value is 8.
- MinReplicaSetSize - Indicates the MinReplicaSetSize property is set.
The value is 16.
- PlacementConstraints - Indicates the PlacementConstraints property is
set. The value is 32.
- PlacementPolicyList - Indicates the ServicePlacementPolicies property
is set. The value is 64.
- Correlation - Indicates the CorrelationScheme property is set. The
value is 128.
- Metrics - Indicates the ServiceLoadMetrics property is set. The value
is 256.
- DefaultMoveCost - Indicates the DefaultMoveCost property is set. The
value is 512.
:type flags: str
:param placement_constraints: The placement constraints as a string.
Placement constraints are boolean expressions on node properties and
allow for restricting a service to particular nodes based on the service
requirements. For example, to place a service on nodes where NodeType is
blue specify the following: "NodeColor == blue)".
:type placement_constraints: str
:param correlation_scheme: The correlation scheme.
:type correlation_scheme: list of :class:`ServiceCorrelationDescription
<azure.servicefabric.models.ServiceCorrelationDescription>`
:param load_metrics: The service load metrics.
:type load_metrics: list of :class:`ServiceLoadMetricDescription
<azure.servicefabric.models.ServiceLoadMetricDescription>`
:param service_placement_policies: The service placement policies.
:type service_placement_policies: list of
:class:`ServicePlacementPolicyDescription
<azure.servicefabric.models.ServicePlacementPolicyDescription>`
:param default_move_cost: The move cost for the service. Possible values
include: 'Zero', 'Low', 'Medium', 'High'
:type default_move_cost: str
:param ServiceKind: Polymorphic Discriminator
:type ServiceKind: str
:param target_replica_set_size: The target replica set size as a number.
:type target_replica_set_size: int
:param min_replica_set_size: The minimum replica set size as a number.
:type min_replica_set_size: int
:param replica_restart_wait_duration_seconds: The duration, in seconds,
between when a replica goes down and when a new replica is created.
:type replica_restart_wait_duration_seconds: str
:param quorum_loss_wait_duration_seconds: The maximum duration, in
seconds, for which a partition is allowed to be in a state of quorum
loss.
:type quorum_loss_wait_duration_seconds: str
:param stand_by_replica_keep_duration_seconds: The definition on how long
StandBy replicas should be maintained before being removed.
:type stand_by_replica_keep_duration_seconds: str
"""
_validation = {
'ServiceKind': {'required': True},
'target_replica_set_size': {'minimum': 1},
'min_replica_set_size': {'minimum': 1},
}
_attribute_map = {
'flags': {'key': 'Flags', 'type': 'str'},
'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'},
'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'},
'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'},
'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'},
'default_move_cost': {'key': 'DefaultMoveCost', 'type': 'str'},
'ServiceKind': {'key': 'ServiceKind', 'type': 'str'},
'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'int'},
'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'int'},
'replica_restart_wait_duration_seconds': {'key': 'ReplicaRestartWaitDurationSeconds', 'type': 'str'},
'quorum_loss_wait_duration_seconds': {'key': 'QuorumLossWaitDurationSeconds', 'type': 'str'},
'stand_by_replica_keep_duration_seconds': {'key': 'StandByReplicaKeepDurationSeconds', 'type': 'str'},
}
def __init__(self, flags=None, placement_constraints=None, correlation_scheme=None, load_metrics=None, service_placement_policies=None, default_move_cost=None, target_replica_set_size=None, min_replica_set_size=None, replica_restart_wait_duration_seconds=None, quorum_loss_wait_duration_seconds=None, stand_by_replica_keep_duration_seconds=None):
super(StatefulServiceUpdateDescription, self).__init__(flags=flags, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, load_metrics=load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost)
self.target_replica_set_size = target_replica_set_size
self.min_replica_set_size = min_replica_set_size
self.replica_restart_wait_duration_seconds = replica_restart_wait_duration_seconds
self.quorum_loss_wait_duration_seconds = quorum_loss_wait_duration_seconds
self.stand_by_replica_keep_duration_seconds = stand_by_replica_keep_duration_seconds
self.ServiceKind = 'Stateful'
| SUSE/azure-sdk-for-python | azure-servicefabric/azure/servicefabric/models/stateful_service_update_description.py | Python | mit | 6,746 |
"""
.. module:: MetaBuilder Credit Card Test
:platform: Linux
:synopsis: An small framework for creating builders or entities with validators. Test for version version >=0.15.
:copyright: (c) 2013 by Ernesto Bossi.
:license: GPL v3.
.. moduleauthor:: Ernesto Bossi <bossi.ernestog@gmail.com>
"""
from unittest import TestCase
from PyMetabuilder import MetaBuilder, ValidatorError
class Logo:
pass
class CreditCard:
pass
class CreditCardMetaBuilder(MetaBuilder):
def __init__(self):
MetaBuilder.__init__(self)
self.model(CreditCard)
self.property("ccnumber", type=str, length=16, required=True)
self.property("ccName", type=str, required=True)
self.property("extraLogo", type=Logo)
class CreditCardCascadeMetaBuilder(MetaBuilder):
def __init__(self):
MetaBuilder.__init__(self)
self.model(CreditCard)\
.property("ccnumber", type=str, length=16, required=True)\
.property("ccName", type=str, required=True)\
.property("extraLogo", type=Logo)
class TestCreditCardMetaBuilder(TestCase):
def setUp(self):
self.cardBuilder = CreditCardMetaBuilder()
self.cascadeCardBuilder = CreditCardCascadeMetaBuilder()
def CCBuild(self):
creditcard = self.cardBuilder.build()
def test_normal_build(self):
self.cardBuilder.ccnumber = "5430422223333444"
self.cardBuilder.ccName = 'John Doe'
self.cardBuilder.build()
def test_attr_error_build(self):
self.cardBuilder.ccnumber = "5430422223333444"
self.assertRaises(AttributeError,self.cardBuilder.build)
def test_invalid_length_build(self):
self.assertRaises(ValidatorError,self.cardBuilder.ccnumber,"543042222333444")
def test_normal_build_cascade(self):
self.cascadeCardBuilder.ccnumber = "5430422223333444"
self.cascadeCardBuilder.ccName = 'John Doe'
self.cascadeCardBuilder.build()
def test_invalid_length_build_cascade(self):
self.assertRaises(ValidatorError,self.cascadeCardBuilder.ccnumber,"543042222333444") | bossiernesto/PyMetaBuilder | test/testTarjetaBuilder.py | Python | gpl-3.0 | 2,103 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework.test_framework import MincoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(MincoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
alert_filename = None # Set by setup_network
def setup_network(self):
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
start_count = self.nodes[0].getblockcount()
# Mine three blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = create_tx(self.nodes[0], coinbase_txids[1], node1_address, 499.99)
spend_102_raw = create_tx(self.nodes[0], coinbase_txids[2], node0_address, 499.99)
spend_103_raw = create_tx(self.nodes[0], coinbase_txids[3], node0_address, 499.99)
# Create a block-height-locked transaction which will be invalid after reorg
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 499.99})
# Set the time lock
timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = create_tx(self.nodes[0], spend_102_id, node1_address, 499.98)
spend_103_1_raw = create_tx(self.nodes[0], spend_103_id, node1_address, 499.98)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
for node in self.nodes:
node.invalidateblock(last_block[0])
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| xieta/mincoin | qa/rpc-tests/mempool_reorg.py | Python | mit | 4,520 |
import os
import shutil
import unittest
import tempfile
import deepchem as dc
import numpy as np
from sklearn.ensemble import RandomForestClassifier
class TestDrop(unittest.TestCase):
"""
Test how loading of malformed compounds is handled.
Called TestDrop since these compounds were silently and erroneously dropped.
"""
def test_drop(self):
"""Test on dataset where RDKit fails on some strings."""
# Set some global variables up top
reload = True
len_full = 25
current_dir = os.path.dirname(os.path.realpath(__file__))
print("About to load emols dataset.")
dataset_file = os.path.join(
current_dir, "mini_emols.csv")
# Featurize emols dataset
print("About to featurize datasets.")
featurizer = dc.feat.CircularFingerprint(size=1024)
emols_tasks = ['activity']
loader = dc.data.CSVLoader(
tasks=emols_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file)
X, y, w, ids = (dataset.X, dataset.y, dataset.w, dataset.ids)
assert len(X) == len(y) == len(w) == len(ids)
| bowenliu16/deepchem | deepchem/data/tests/test_drop.py | Python | gpl-3.0 | 1,096 |
"""
This module defines the interface every store needs to adhere to.
"""
from abc import ABCMeta, abstractmethod
class IStore:
"""
This interface handles object-storage.
Implementations of this interface can be made for different object-storages
Currently this interface is only implemented for PairTreeFileSystemStore
"""
__metaclass__ = ABCMeta
@abstractmethod
def create_object(self, container_key, object_key, object_data):
"""
Save a new object in the data store
:param str container_key: Key of the container to create an object in.
:param str object_key: Key of the object to create.
:param str object_data: The data for the object to create.
:raises augeias.stores.error.NotFoundException: When the container could not be found.
"""
@abstractmethod
def delete_object(self, container_key, object_key):
"""
Delete an object from the data store.
:param str container_key: Key of the container that the object lives in.
:param str object_key: Key of the object to delete.
:raises augeias.stores.error.NotFoundException: When the object or container could not be found.
"""
@abstractmethod
def get_object(self, container_key, object_key):
"""
Retrieve an object from the data store.
:param str container_key: Key of the container that the object lives in.
:param str object_key: Key of the object to retrieve.
:raises augeias.stores.error.NotFoundException: When the object or container could not be found.
"""
@abstractmethod
def get_object_info(self, container_key, object_key):
"""
Retrieve object info (mimetype, size, time last modification) from the data store.
:param str container_key: Key of the container that the object lives in.
:param str object_key: Key of the object to retrieve.
:raises augeias.stores.error.NotFoundException: When the object or container could not be found.
"""
@abstractmethod
def update_object(self, container_key, object_key, object_data):
"""
Update an object in the data store.
:param str container_key: Key of the container that the object lives in.
:param str object_key: Key of the object to update.
:param str object_data: New data for the object.
:raises augeias.stores.error.NotFoundException: When the object or container could not be found.
"""
@abstractmethod
def list_object_keys_for_container(self, container_key):
"""
List all object keys for a container in the data store.
:param str container_key: Key of the container to list the objects for.
:returns: A list of container keys.
:rtype: lst
:raises augeias.stores.error.NotFoundException: When the container could not be found.
"""
@abstractmethod
def get_container_data(self, container_key, translations=None):
"""
Find a container and return a zip file of its contents.
:param container_key: Key of the container which must be retrieved.
:param translations: Dict of object IDs and file names to use for them.
:return: a zip file containing all files of the container.
"""
@abstractmethod
def create_container(self, container_key):
"""
Create a new container in the data store.
:param str container_key: Key of the container to create.
"""
@abstractmethod
def delete_container(self, container_key):
"""
Delete a container and all it's objects in the data store.
:param str container_key: Key of the container to delete.
:raises augeias.stores.error.NotFoundException: When the container could not be found.
"""
| OnroerendErfgoed/augeias | augeias/stores/StoreInterface.py | Python | gpl-3.0 | 3,870 |
#
# Copyright (c) 2015 Matthew Bentley
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import hashlib
import duplicity.backend
from duplicity.errors import BackendException, FatalBackendException
from duplicity import log
from duplicity import progress
class B2ProgressListener:
def set_total_bytes(self, total_byte_count):
self.total_byte_count = total_byte_count
def bytes_completed(self, byte_count):
progress.report_transfer(byte_count, self.total_byte_count)
def close(self):
pass
class B2Backend(duplicity.backend.Backend):
"""
Backend for BackBlaze's B2 storage service
"""
def __init__(self, parsed_url):
"""
Authorize to B2 api and set up needed variables
"""
duplicity.backend.Backend.__init__(self, parsed_url)
# Import B2 API
try:
global b2
import b2
import b2.api
import b2.account_info
import b2.download_dest
import b2.file_version
except ImportError:
raise BackendException('B2 backend requires B2 Python APIs (pip install b2)')
self.service = b2.api.B2Api(b2.account_info.InMemoryAccountInfo())
self.parsed_url.hostname = 'B2'
account_id = parsed_url.username
account_key = self.get_password()
self.url_parts = [
x for x in parsed_url.path.replace("@", "/").split('/') if x != ''
]
if self.url_parts:
self.username = self.url_parts.pop(0)
bucket_name = self.url_parts.pop(0)
else:
raise BackendException("B2 requires a bucket name")
self.path = "".join([url_part + "/" for url_part in self.url_parts])
self.service.authorize_account('production', account_id, account_key)
log.Log("B2 Backend (path= %s, bucket= %s, minimum_part_size= %s)" %
(self.path, bucket_name, self.service.account_info.get_minimum_part_size()), log.INFO)
try:
self.bucket = self.service.get_bucket_by_name(bucket_name)
log.Log("Bucket found", log.INFO)
except b2.exception.NonExistentBucket:
try:
log.Log("Bucket not found, creating one", log.INFO)
self.bucket = self.service.create_bucket(bucket_name, 'allPrivate')
except:
raise FatalBackendException("Bucket cannot be created")
def _get(self, remote_filename, local_path):
"""
Download remote_filename to local_path
"""
log.Log("Get: %s -> %s" % (self.path + remote_filename, local_path.name), log.INFO)
self.bucket.download_file_by_name(self.path + remote_filename,
b2.download_dest.DownloadDestLocalFile(local_path.name))
def _put(self, source_path, remote_filename):
"""
Copy source_path to remote_filename
"""
log.Log("Put: %s -> %s" % (source_path.name, self.path + remote_filename), log.INFO)
self.bucket.upload_local_file(source_path.name, self.path + remote_filename,
content_type='application/pgp-encrypted',
progress_listener=B2ProgressListener())
def _list(self):
"""
List files on remote server
"""
return [file_version_info.file_name[len(self.path):]
for (file_version_info, folder_name) in self.bucket.ls(self.path)]
def _delete(self, filename):
"""
Delete filename from remote server
"""
log.Log("Delete: %s" % self.path + filename, log.INFO)
file_version_info = self.file_info(self.path + filename)
self.bucket.delete_file_version(file_version_info.id_, file_version_info.file_name)
def _query(self, filename):
"""
Get size info of filename
"""
log.Log("Query: %s" % self.path + filename, log.INFO)
file_version_info = self.file_info(self.path + filename)
return {'size': file_version_info.size
if file_version_info is not None and file_version_info.size is not None else -1}
def file_info(self, filename):
response = self.bucket.list_file_names(filename, 1)
for entry in response['files']:
file_version_info = b2.file_version.FileVersionInfoFactory.from_api_response(entry)
if file_version_info.file_name == filename:
return file_version_info
raise BackendException('File not found')
duplicity.backend.register_backend("b2", B2Backend)
| mjuric/duplicity | duplicity/backends/b2backend.py | Python | gpl-2.0 | 5,646 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-06-07 23:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('crowdsourcing', '0100_auto_20160627_2213'),
]
operations = [
migrations.AlterField(
model_name='project',
name='name',
field=models.CharField(default=b'Untitled Project', error_messages={b'required': b'Please enter the project name!'}, max_length=256),
),
migrations.AlterField(
model_name='qualificationitem',
name='qualification',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='crowdsourcing.Qualification'),
),
]
| shirishgoyal/crowdsource-platform | crowdsourcing/migrations/0093_auto_20160607_2330.py | Python | mit | 839 |
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import pkg_resources
import logging
from rdflib import ConjunctiveGraph
import os
__author__ = 'Fernando Serena'
try:
pkg_resources.declare_namespace(__name__)
except ImportError:
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
log = logging.getLogger('sdh.fragments')
# if not os.path.exists('store'):
# os.makedirs('store')
cache = ConjunctiveGraph('Sleepycat')
cache.open('store', create=True)
cache.store.graph_aware = False
log.info('Ready')
| SmartDeveloperHub/sdh-fragment-server | sdh/fragments/__init__.py | Python | apache-2.0 | 1,569 |
from setuptools import setup, find_packages
version = open('VERSION').read().strip()
license = open('LICENSE').read().strip()
setup(
name = 'python-package-boilerplate',
version = version,
license = license,
author = 'Your Name Here',
author_email = 'you@somewhere.com',
url = 'http://www.somewhere.com',
description = 'Lorem ipsum dolor fiat lux',
long_description = open('README.md').read().strip(),
packages = find_packages(),
install_requires=[
# put packages here
'six',
],
test_suite = 'tests',
entry_points = {
'console_scripts': [
'packagename = packagename.__main__:main',
]
}
)
| dagostinelli/python-package-boilerplate | setup.py | Python | unlicense | 678 |
# -*- coding: utf-8 -*-
import urllib as _ul
import pandas as _pd
from atmPy.general import measurement_site as _ms
def get_all_sites(url = "https://www.esrl.noaa.gov/gmd/dv/site/"):
html = _ul.request.urlopen(url).read()
sites = _pd.read_html(html)[0]
#remove discontinued
sites = sites[~sites.apply(lambda row: '*' in row.Code, axis = 1)].copy()
sites['name'] = sites.apply(lambda row: row.Name.split(',')[0], axis=1)
def gstate(row):
try:
out = row.Name.split(',')[1]
except IndexError:
out = None
return out
sites['state'] = sites.apply(gstate, axis=1)
sites.rename({'Longitude': 'lon',
'Latitude': 'lat',
'Elevation (meters)': 'alt',
'Code': 'abbreviation',
'Country': 'country'}, axis=1, inplace = True)
sites
site_dict_list = []
for idx, si in sites.iterrows():
site_dict_list.append(si.to_dict())
gml_sites = _ms.Network(site_dict_list)
return gml_sites | hagne/atm-py | atmPy/data_archives/noaa_gml/gml_lab.py | Python | mit | 1,097 |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 27 18:16:24 2015
@author: Eugenio Panadero
A raíz del cambio previsto:
DESCONEXIÓN DE LA WEB PÚBLICA CLÁSICA DE E·SIOS
La Web pública clásica de e·sios (http://www.esios.ree.es) será desconectada el día 29 de marzo de 2016.
Continuaremos ofreciendo servicio en la nueva Web del Operador del Sistema:
https://www.esios.ree.es.
Por favor, actualice sus favoritos apuntando a la nueva Web.
IMPORTANTE!!!
En la misma fecha (29/03/2016), también dejará de funcionar el servicio Solicitar y Descargar,
utilizado para descargar información de la Web pública clásica de e·sios.
Por favor, infórmese sobre descarga de información en
https://www.esios.ree.es/es/pagina/api
y actualice sus procesos de descarga.
"""
import json
import pandas as pd
import re
from dataweb.requestweb import get_data_en_intervalo
from esiosdata.esios_config import DATE_FMT, TZ, SERVER, HEADERS, D_TIPOS_REQ_DEM, KEYS_DATA_DEM
from esiosdata.prettyprinting import print_redb, print_err
__author__ = 'Eugenio Panadero'
__copyright__ = "Copyright 2015, AzogueLabs"
__credits__ = ["Eugenio Panadero"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Eugenio Panadero"
RG_FUNC_CONTENT = re.compile('(?P<func>.*)\((?P<json>.*)\);')
def dem_url_dia(dt_day='2015-06-22'):
"""Obtiene las urls de descarga de los datos de demanda energética de un día concreto."""
def _url_tipo_dato(str_dia, k):
url = SERVER + '/archives/{}/download_json?locale=es'.format(D_TIPOS_REQ_DEM[k])
if type(str_dia) is str:
return url + '&date=' + str_dia
else:
return url + '&date=' + str_dia.date().isoformat()
urls = [_url_tipo_dato(dt_day, k) for k in D_TIPOS_REQ_DEM.keys()]
return urls
def _extract_func_json_data(data_raw):
try:
busca = RG_FUNC_CONTENT.match(data_raw).groupdict()
ind, data = busca['func'], None
data = json.loads(busca['json'])
if len(data.keys()) == 1:
return ind, data[list(data.keys())[0]]
else:
return ind, data
except AttributeError:
# print('ERROR REG_EXP [{}] --> RAW: {}'.format(e, data_raw))
return None, None
def _import_daily_max_min(data):
# IND_MaxMinRenovEol / IND_MaxMin
df = pd.DataFrame(data, index=[0])
cols_ts = df.columns.str.startswith('ts')
is_max_min_renov = any(cols_ts)
if is_max_min_renov:
df.index = pd.DatetimeIndex([pd.Timestamp(df['tsMaxRenov'][0]).date()], freq='D')
else:
df = pd.DataFrame(df.set_index(pd.DatetimeIndex([pd.Timestamp(df['date'][0]).date()], freq='D')
).drop('date', axis=1))
cols_ts = df.columns.str.contains('timeStamp', regex=False)
for c, is_ts in zip(df.columns, cols_ts):
if is_ts:
df[c] = df[c].apply(pd.Timestamp)
else:
df[c] = df[c].astype(float)
return df
def _import_json_ts_data(data):
df = pd.DataFrame(data)
try:
return pd.DataFrame(df.set_index(pd.DatetimeIndex(df['ts'].apply(lambda x: pd.Timestamp(x, tz=TZ)),
freq='10T', tz=TZ), verify_integrity=True
).drop('ts', axis=1)).sort_index().applymap(float)
except ValueError: # ES DST
df['ts'] = pd.DatetimeIndex(start=pd.Timestamp(df['ts'].iloc[0]), periods=len(df), freq='10T', tz=TZ)
# , ambiguous="infer")
return df.set_index('ts', verify_integrity=True).sort_index().applymap(float)
def dem_procesa_datos_dia(key_day, response):
"""Procesa los datos descargados en JSON."""
dfs_import, df_import, dfs_maxmin, hay_errores = [], None, [], 0
for r in response:
tipo_datos, data = _extract_func_json_data(r)
if tipo_datos is not None:
if ('IND_MaxMin' in tipo_datos) and data:
df_import = _import_daily_max_min(data)
dfs_maxmin.append(df_import)
elif data:
df_import = _import_json_ts_data(data)
dfs_import.append(df_import)
if tipo_datos is None or df_import is None:
hay_errores += 1
if hay_errores == 4:
# No hay nada, salida temprana sin retry:
print_redb('** No hay datos para el día {}!'.format(key_day))
return None, -2
else: # if hay_errores < 3:
# TODO formar datos incompletos!! (max-min con NaN's, etc.)
data_import = {}
if dfs_import:
data_import[KEYS_DATA_DEM[0]] = dfs_import[0].join(dfs_import[1])
if len(dfs_maxmin) == 2:
data_import[KEYS_DATA_DEM[1]] = dfs_maxmin[0].join(dfs_maxmin[1])
elif dfs_maxmin:
data_import[KEYS_DATA_DEM[1]] = dfs_maxmin[0]
if not data_import:
print_err('DÍA: {} -> # ERRORES: {}'.format(key_day, hay_errores))
return None, -2
return data_import, 0
def dem_data_dia(str_dia='2015-10-10', str_dia_fin=None):
"""Obtiene datos de demanda energética en un día concreto o un intervalo, accediendo directamente a la web."""
params = {'date_fmt': DATE_FMT, 'usar_multithread': False, 'num_retries': 1, "timeout": 10,
'func_procesa_data_dia': dem_procesa_datos_dia, 'func_url_data_dia': dem_url_dia,
'data_extra_request': {'json_req': False, 'headers': HEADERS}}
if str_dia_fin is not None:
params['usar_multithread'] = True
data, hay_errores, str_import = get_data_en_intervalo(str_dia, str_dia_fin, **params)
else:
data, hay_errores, str_import = get_data_en_intervalo(str_dia, str_dia, **params)
if not hay_errores:
return data
else:
print_err(str_import)
return None
| azogue/esiosdata | esiosdata/importdemdata.py | Python | mit | 5,807 |
#! /usr/bin/env python2.5
# -*- coding: utf-8 -*-
"""Script to run the pybrain testsuite."""
__author__ = 'Justin Bayer, bayerj@in.tum.de'
__version__ = '$Id$'
import doctest
import logging
import os
import sys
from unittest import TestLoader, TestSuite, TextTestRunner
def setUpLogging():
logging.basicConfig(level=logging.INFO,
format='%(levelname)s %(message)s')
def testImport(module_name):
"""Tell wether a module can be imported.
This function has a cache, so modules are only tested once on
importability.
"""
try:
return testImport.cache[module_name]
except KeyError:
try:
__import__(module_name)
except ImportError:
result = False
else:
result = True
testImport.cache[module_name] = result
return result
testImport.cache = {} # Import checks are expensive, so cache results
def missingDependencies(target_module):
"""Returns a list of dependencies of the module that the current
interpreter cannot import.
This does not inspect the code, but instead check for a list of strings
called _dependencies in the target_module. This list should contain module
names that the module depends on."""
dependencies = getattr(target_module, '_dependencies', [])
return [i for i in dependencies if not testImport(i)]
def make_test_suite():
# The directory where the tests reside relative to the directory of this
# file.
test_path_list = list(os.path.split(__file__)[:-1]) + ['unittests/']
testdir = os.path.join(*test_path_list)
# All unittest modules have to start with 'test_' and have to be, of
# course, python files
module_names = [f[:-3] for f in os.listdir(testdir)
if f.startswith('test_') and f.endswith('.py')]
if not module_names:
logging.info("No tests found in %s" % testdir)
sys.exit()
# "Magically" import the tests package and its test-modules that we've
# found
test_package_path = 'rnn.tests.unittests'
test_package = __import__(test_package_path, fromlist=module_names)
# Put the test modules in a list that can be passed to the testsuite
modules = (getattr(test_package, n) for n in module_names)
modules = [(m, missingDependencies(m)) for m in modules]
untests = [(m, md) for (m, md) in modules if md]
modules = [m for (m, md) in modules if not md]
# Print out modules that are missing dependencies
for module, miss_dep in untests: # Mr Dep is not around, though
logging.warning("Module %s is missing dependencies: %s" % (
module.__name__, ', '.join(miss_dep)))
# Print out a list of tests that are found
for m in modules:
logging.info("Tests found: %s" % m.__name__)
# Build up the testsuite
suite = TestSuite([TestLoader().loadTestsFromModule(m) for m in modules])
# Add doctests from the unittest modules to the suite
optionflags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
for mod in modules:
try:
suite.addTest(doctest.DocTestSuite(mod, optionflags=optionflags))
except ValueError:
# No tests found.
pass
return suite
if __name__ == "__main__":
setUpLogging()
runner = TextTestRunner()
runner.run(make_test_suite())
| bayerj/theano-rnn | rnn/tests/runtests.py | Python | bsd-3-clause | 3,491 |
# ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
# PEP8 compliant (https://www.python.org/dev/peps/pep-0008)
# ----------------------------------------------------------
# File: window_panel.py
# Main panel for windows
# Author: Antonio Vazquez (antonioya)
#
# This code is base on the windows generator add-on created by SayProduction
# and has been adapted to continuous editing and cycles materials
#
# ----------------------------------------------------------
# noinspection PyUnresolvedReferences
import bpy
from window_tools import remove_doubles, set_normals, create_diffuse_material, create_glass_material
from math import pi, sin, cos, sqrt
def fitil(vr, fc, px, pz, x, y, z, zz, xx):
k3 = z * 2
vr.extend([[px[x] + xx, -z + zz, pz[y] + xx], [px[x] + xx + k3, -z + zz, pz[y] + xx + k3],
[px[x] + xx + k3, z + zz, pz[y] + xx + k3], [px[x] + xx, z + zz, pz[y] + xx]])
vr.extend([[px[x] + xx, -z + zz, pz[y + 1] - xx], [px[x] + xx + k3, -z + zz, pz[y + 1] - xx - k3],
[px[x] + xx + k3, z + zz, pz[y + 1] - xx - k3], [px[x] + xx, z + zz, pz[y + 1] - xx]])
vr.extend([[px[x + 1] - xx, -z + zz, pz[y + 1] - xx], [px[x + 1] - xx - k3, -z + zz, pz[y + 1] - xx - k3],
[px[x + 1] - xx - k3, z + zz, pz[y + 1] - xx - k3], [px[x + 1] - xx, z + zz, pz[y + 1] - xx]])
vr.extend([[px[x + 1] - xx, -z + zz, pz[y] + xx], [px[x + 1] - xx - k3, -z + zz, pz[y] + xx + k3],
[px[x + 1] - xx - k3, z + zz, pz[y] + xx + k3], [px[x + 1] - xx, z + zz, pz[y] + xx]])
n = len(vr)
fc.extend([[n - 16, n - 15, n - 11, n - 12], [n - 15, n - 14, n - 10, n - 11], [n - 14, n - 13, n - 9, n - 10]])
fc.extend([[n - 12, n - 11, n - 7, n - 8], [n - 11, n - 10, n - 6, n - 7], [n - 10, n - 9, n - 5, n - 6]])
fc.extend([[n - 8, n - 7, n - 3, n - 4], [n - 7, n - 6, n - 2, n - 3], [n - 6, n - 5, n - 1, n - 2]])
fc.extend([[n - 4, n - 3, n - 15, n - 16], [n - 3, n - 2, n - 14, n - 15], [n - 2, n - 1, n - 13, n - 14]])
z = 0.005
vr.extend([[px[x] + xx + k3, -z + zz, pz[y] + xx + k3], [px[x] + xx + k3, -z + zz, pz[y + 1] - xx - k3],
[px[x + 1] - xx - k3, -z + zz, pz[y + 1] - xx - k3], [px[x + 1] - xx - k3, -z + zz, pz[y] + xx + k3]])
vr.extend([[px[x] + xx + k3, z + zz, pz[y] + xx + k3], [px[x] + xx + k3, z + zz, pz[y + 1] - xx - k3],
[px[x + 1] - xx - k3, z + zz, pz[y + 1] - xx - k3], [px[x + 1] - xx - k3, z + zz, pz[y] + xx + k3]])
fc.extend([[n + 1, n + 0, n + 3, n + 2], [n + 4, n + 5, n + 6, n + 7]])
def kapak(vr, fc, px, pz, x, y, z, zz):
k2 = z * 2
vr.extend(
[[px[x], -z + zz, pz[y]], [px[x] + k2, -z + zz, pz[y] + k2], [px[x] + k2, z + zz, pz[y] + k2],
[px[x], z + zz, pz[y]]])
vr.extend([[px[x], -z + zz, pz[y + 1]], [px[x] + k2, -z + zz, pz[y + 1] - k2], [px[x] + k2, z + zz, pz[y + 1] - k2],
[px[x], z + zz, pz[y + 1]]])
vr.extend(
[[px[x + 1], -z + zz, pz[y + 1]], [px[x + 1] - k2, -z + zz, pz[y + 1] - k2],
[px[x + 1] - k2, z + zz, pz[y + 1] - k2],
[px[x + 1], z + zz, pz[y + 1]]])
vr.extend([[px[x + 1], -z + zz, pz[y]], [px[x + 1] - k2, -z + zz, pz[y] + k2], [px[x + 1] - k2, z + zz, pz[y] + k2],
[px[x + 1], z + zz, pz[y]]])
n = len(vr)
fc.extend([[n - 16, n - 15, n - 11, n - 12], [n - 15, n - 14, n - 10, n - 11], [n - 14, n - 13, n - 9, n - 10],
[n - 13, n - 16, n - 12, n - 9]])
fc.extend([[n - 12, n - 11, n - 7, n - 8], [n - 11, n - 10, n - 6, n - 7], [n - 10, n - 9, n - 5, n - 6],
[n - 9, n - 12, n - 8, n - 5]])
fc.extend([[n - 8, n - 7, n - 3, n - 4], [n - 7, n - 6, n - 2, n - 3], [n - 6, n - 5, n - 1, n - 2],
[n - 5, n - 8, n - 4, n - 1]])
fc.extend([[n - 4, n - 3, n - 15, n - 16], [n - 3, n - 2, n - 14, n - 15], [n - 2, n - 1, n - 13, n - 14],
[n - 1, n - 4, n - 16, n - 13]])
# -----------------------------------------
# Set default values for each window type
# -----------------------------------------
def set_defaults(s):
if s.prs == '1':
s.gen = 3
s.yuk = 1
s.kl1 = 5
s.kl2 = 5
s.fk = 2
s.gny0 = 190
s.mr = True
s.gnx0 = 60
s.gnx1 = 110
s.gnx2 = 60
s.k00 = True
s.k01 = False
s.k02 = True
if s.prs == '2':
s.gen = 3
s.yuk = 1
s.kl1 = 5
s.kl2 = 5
s.fk = 2
s.gny0 = 190
s.mr = True
s.gnx0 = 60
s.gnx1 = 60
s.gnx2 = 60
s.k00 = True
s.k01 = False
s.k02 = True
if s.prs == '3':
s.gen = 3
s.yuk = 1
s.kl1 = 5
s.kl2 = 5
s.fk = 2
s.gny0 = 190
s.mr = True
s.gnx0 = 55
s.gnx1 = 50
s.gnx2 = 55
s.k00 = True
s.k01 = False
s.k02 = True
if s.prs == '4':
s.gen = 3
s.yuk = 1
s.kl1 = 5
s.kl2 = 5
s.fk = 2
s.gny0 = 150
s.mr = True
s.gnx0 = 55
s.gnx1 = 50
s.gnx2 = 55
s.k00 = True
s.k01 = False
s.k02 = True
if s.prs == '5':
s.gen = 3
s.yuk = 1
s.kl1 = 5
s.kl2 = 5
s.fk = 2
s.gny0 = 150
s.mr = True
s.gnx0 = 50
s.gnx1 = 40
s.gnx2 = 50
s.k00 = True
s.k01 = False
s.k02 = True
if s.prs == '6':
s.gen = 1
s.yuk = 1
s.kl1 = 5
s.kl2 = 5
s.fk = 2
s.gny0 = 40
s.mr = True
s.gnx0 = 40
s.k00 = False
if s.prs == '7':
s.gen = 1
s.yuk = 2
s.kl1 = 5
s.kl2 = 5
s.fk = 2
s.gny0 = 195
s.gny1 = 40
s.gnx0 = 70
s.k00 = True
s.k10 = False
s.mr = False
if s.prs == '8':
s.gen = 1
s.yuk = 2
s.kl1 = 5
s.kl2 = 5
s.fk = 2
s.gny0 = 180
s.gny1 = 35
s.gnx0 = 70
s.k00 = True
s.k10 = False
s.mr = False
# ------------------------------------------------------------------
# Define panel class for main functions.
# ------------------------------------------------------------------
class WindowMainPanel(bpy.types.Panel):
bl_idname = "window_main_panel"
bl_label = "Window"
bl_space_type = 'VIEW_3D'
bl_region_type = "TOOLS"
bl_category = 'Window'
# ------------------------------
# Draw UI
# ------------------------------
def draw(self, context):
layout = self.layout
scene = context.scene
row = layout.row()
row.operator("window.run_action", icon='MOD_LATTICE')
row.prop(scene, "window_from")
# ------------------------------------------------------
# Button Action class
#
# ------------------------------------------------------
class RunAction(bpy.types.Operator):
bl_idname = "window.run_action"
bl_label = "Window"
bl_description = "Create a Window with continuous editable parameters"
# ------------------------------
# Execute
# ------------------------------
# noinspection PyUnusedLocal,PyMethodMayBeStatic
def execute(self, context):
create_window()
return {'FINISHED'}
# ------------------------------------------------------------------------------
# Create the window
# ------------------------------------------------------------------------------
def create_window():
# deselect all objects
for o in bpy.data.objects:
o.select = False
# Create main object
window_mesh = bpy.data.meshes.new("Window")
window_object = bpy.data.objects.new("Window", window_mesh)
# Link object to scene
bpy.context.scene.objects.link(window_object)
window_object.WindowGenerator.add()
# Shape the mesh.
do_mesh(window_object)
# Select, and activate object
window_object.select = True
bpy.context.scene.objects.active = window_object
bpy.context.scene.objects.active.location = bpy.context.scene.cursor_location
# ------------------------------------------------------------------------------
# Update mesh of the window
# ------------------------------------------------------------------------------
# noinspection PyUnusedLocal
def update_window(self, context):
# When update, the active object is the main object.
o = bpy.context.active_object
# Now deselect that object to not delete it.
o.select = False
# Remove data (mesh of active object),
o.data.user_clear()
bpy.data.meshes.remove(o.data)
# and create a new mesh for the object:
objmesh = bpy.data.meshes.new("Window")
o.data = objmesh
o.data.use_fake_user = True
# deselect all objects
for obj in bpy.data.objects:
obj.select = False
# Finally create all that again
do_mesh(o, True)
# and select, and activate, the object.
o.select = True
bpy.context.scene.objects.active = o
# ------------------------------------------------------------------------------
# Generate object
# For object, it only shapes mesh
# ------------------------------------------------------------------------------
# noinspection PyUnusedLocal
def do_mesh(myobject, update=False):
op = myobject.WindowGenerator[0]
# Create only mesh, because the object was created before.
generate_window_object(op, myobject.data)
# refine unit
remove_doubles(myobject)
set_normals(myobject)
# ------------------------------------------------------------------------------
# Update the parameters using a default value
# ------------------------------------------------------------------------------
# noinspection PyUnusedLocal
def update_using_default(self, context):
o = context.object
myobject = o.WindowGenerator[0]
if myobject.son != myobject.prs:
set_defaults(myobject)
myobject.son = myobject.prs
# ------------------------------------------------------------------------------
# Generate window object
# ------------------------------------------------------------------------------
def generate_window_object(op, mymesh):
myvertex = []
myfaces = []
ft1, cam, mer, sm = generate_vertex_data(op, myvertex, myfaces)
mymesh.from_pydata(myvertex, [], myfaces)
# Assign materials
if op.mt1 == '1':
mymesh.materials.append(create_diffuse_material("PVC", False, 1, 1, 1, 1, 1, 1))
elif op.mt1 == '2':
mymesh.materials.append(create_diffuse_material("Wood", False, 0.3, 0.2, 0.1, 0.3, 0.2, 0.1))
elif op.mt1 == '3':
mymesh.materials.append(create_diffuse_material("Plastic", False, 0, 0, 0, 0, 0, 0))
if op.mt2 == '1':
mymesh.materials.append(create_diffuse_material("PVC", False, 1, 1, 1, 1, 1, 1))
elif op.mt2 == '2':
mymesh.materials.append(create_diffuse_material("Wood", False, 0.3, 0.2, 0.1, 0.3, 0.2, 0.1))
elif op.mt2 == '3':
mymesh.materials.append(create_diffuse_material("Plastic", False, 0, 0, 0, 0, 0, 0))
mymesh.materials.append(create_glass_material("Glass", False))
if op.mr is True:
mymesh.materials.append(create_diffuse_material("Marble", False, 0.9, 0.8, 0.7, 0.9, 0.8, 0.7))
for i in ft1:
mymesh.polygons[i].material_index = 1
for i in cam:
mymesh.polygons[i].material_index = 2
for i in mer:
mymesh.polygons[i].material_index = 3
for i in sm:
mymesh.polygons[i].use_smooth = 1
mymesh.update(calc_edges=True)
return
# -----------------------------------------
# Generate vertex and faces data
# -----------------------------------------
def generate_vertex_data(op, myvertex, myfaces):
h1 = 0
c = 0
t1 = 0
mx = op.gen
my = op.yuk
k1 = op.kl1 / 100
k2 = op.kl2 / 100
k3 = op.fk / 200
res = op.res
u = op.kl1 / 100
xlist = [0, round(u, 2)]
if mx > 0:
u += op.gnx0 / 100
xlist.append(round(u, 2))
u += k2
xlist.append(round(u, 2))
if mx > 1:
u += op.gnx1 / 100
xlist.append(round(u, 2))
u += k2
xlist.append(round(u, 2))
if mx > 2:
u += op.gnx2 / 100
xlist.append(round(u, 2))
u += k2
xlist.append(round(u, 2))
if mx > 3:
u += op.gnx3 / 100
xlist.append(round(u, 2))
u += k2
xlist.append(round(u, 2))
if mx > 4:
u += op.gnx4 / 100
xlist.append(round(u, 2))
u += k2
xlist.append(round(u, 2))
if mx > 5:
u += op.gnx5 / 100
xlist.append(round(u, 2))
u += k2
xlist.append(round(u, 2))
if mx > 6:
u += op.gnx6 / 100
xlist.append(round(u, 2))
u += k2
xlist.append(round(u, 2))
if mx > 7:
u += op.gnx7 / 100
xlist.append(round(u, 2))
u += k2
xlist.append(round(u, 2))
xlist[-1] = xlist[-2] + k1
u = op.kl1 / 100
zlist = [0, round(u, 2)]
if my > 0:
u += op.gny0 / 100
zlist.append(round(u, 2))
u += k2
zlist.append(round(u, 2))
if my > 1:
u += op.gny1 / 100
zlist.append(round(u, 2))
u += k2
zlist.append(round(u, 2))
if my > 2:
u += op.gny2 / 100
zlist.append(round(u, 2))
u += k2
zlist.append(round(u, 2))
if my > 3:
u += op.gny3 / 100
zlist.append(round(u, 2))
u += k2
zlist.append(round(u, 2))
if my > 4:
u += op.gny4 / 100
zlist.append(round(u, 2))
u += k2
zlist.append(round(u, 2))
zlist[-1] = zlist[-2] + k1
u = xlist[-1] / 2
for i in range(0, len(xlist)):
xlist[i] -= u
kx = [[op.k00, op.k10, op.k20, op.k30, op.k40],
[op.k01, op.k11, op.k21, op.k31, op.k41],
[op.k02, op.k12, op.k22, op.k32, op.k42],
[op.k03, op.k13, op.k23, op.k33, op.k43],
[op.k04, op.k14, op.k24, op.k34, op.k44],
[op.k05, op.k15, op.k25, op.k35, op.k45],
[op.k06, op.k16, op.k26, op.k36, op.k46],
[op.k07, op.k17, op.k27, op.k37, op.k47]]
cam = []
mer = []
ftl = []
sm = []
# -------------------------
# VERTICES
# -------------------------
myvertex.extend([[xlist[0], -k1 / 2, zlist[0]], [xlist[0], k1 / 2, zlist[0]]])
for x in range(1, len(xlist) - 1):
myvertex.extend([[xlist[x], -k1 / 2, zlist[1]], [xlist[x], k1 / 2, zlist[1]]])
myvertex.extend([[xlist[-1], -k1 / 2, zlist[0]], [xlist[-1], k1 / 2, zlist[0]]])
for z in range(2, len(zlist) - 2, 2):
for x in range(0, len(xlist)):
myvertex.extend([[xlist[x], -k1 / 2, zlist[z]], [xlist[x], k1 / 2, zlist[z]]])
for x in range(0, len(xlist)):
myvertex.extend([[xlist[x], -k1 / 2, zlist[z + 1]], [xlist[x], k1 / 2, zlist[z + 1]]])
z = len(zlist) - 2
myvertex.extend([[xlist[0], -k1 / 2, zlist[z + 1]], [xlist[0], k1 / 2, zlist[z + 1]]])
alt = []
ust = [len(myvertex) - 2, len(myvertex) - 1]
for x in range(1, len(xlist) - 1):
myvertex.extend([[xlist[x], -k1 / 2, zlist[z]], [xlist[x], k1 / 2, zlist[z]]])
alt.extend([len(myvertex) - 2, len(myvertex) - 1])
myvertex.extend([[xlist[-1], -k1 / 2, zlist[z + 1]], [xlist[-1], k1 / 2, zlist[z + 1]]])
son = [len(myvertex) - 2, len(myvertex) - 1]
# -------------------------
# FACES
# -------------------------
myfaces.append([0, 1, 3 + mx * 4, 2 + mx * 4])
fb = [0]
fr = [1]
for i in range(0, mx * 4, 4):
myfaces.append([i + 3, i + 2, i + 4, i + 5])
fb.extend([i + 2, i + 4])
fr.extend([i + 3, i + 5])
fr.append(3 + mx * 4)
fb.append(2 + mx * 4)
fb.reverse()
myfaces.extend([fb, fr])
# Yatay
y = (mx * 4 + 4)
v = mx * 4 + 2
for z in range(0, (my - 1) * y * 2, y * 2):
myfaces.extend([[z + y + 1, z + y, z + y + 4 + mx * 4, z + y + 5 + mx * 4],
[z + y + v, z + y + v + 1, z + y + v + 5 + mx * 4, z + y + v + 4 + mx * 4]])
for i in range(0, mx * 4 + 2, 2):
myfaces.extend([[z + i + y + 0, z + i + y + 2, z + i + y + v + 4, z + i + y + v + 2],
[z + i + y + 3, z + i + y + 1, z + i + y + v + 3, z + i + y + v + 5]])
for i in range(0, mx * 4 - 3, 4):
myfaces.extend([[z + i + y + 2, z + i + y + 3, z + i + y + 5, z + i + y + 4],
[z + i + y + v + 5, z + i + y + v + 4, z + i + y + v + 6,
z + i + y + v + 7]])
# Dikey
for y in range(0, my):
z = y * (mx * 4 + 4) * 2
for i in range(0, mx * 4 + 2, 4):
myfaces.extend([[z + i + 1, z + i + 0, z + i + v + 2, z + i + v + 3],
[z + i + 3, z + i + 1, z + i + v + 3, z + i + v + 5],
[z + i + 2, z + i + 3, z + i + v + 5, z + i + v + 4],
[z + i + 0, z + i + 2, z + i + v + 4, z + i + v + 2]])
# Fitil
if op.UST == '1':
y1 = my
else:
y1 = my - 1
for y in range(0, y1):
for x in range(0, mx):
if kx[x][y] is True:
kapak(myvertex, myfaces, xlist, zlist, x * 2 + 1, y * 2 + 1, k2 / 2, (k1 + k2) * 0.5 - 0.01)
fitil(myvertex, myfaces, xlist, zlist, x * 2 + 1, y * 2 + 1, k3, (k1 + k2) * 0.5 - 0.01, k2)
else:
fitil(myvertex, myfaces, xlist, zlist, x * 2 + 1, y * 2 + 1, k3, 0, 0)
m = len(myfaces)
cam.extend([m - 1, m - 2])
ftl.extend([m - 3, m - 4, m - 5, m - 6, m - 7, m - 8, m - 9, m - 10, m - 11, m - 12, m - 13, m - 14])
# -----------------------------------------------------
if op.UST == '1': # Duz
myfaces.append([ust[1], ust[0], son[0], son[1]])
for i in range(0, mx * 4, 4):
myfaces.append([alt[i], alt[i + 1], alt[i + 3], alt[i + 2]])
on = [ust[0]]
ar = [ust[1]]
for i in range(0, len(alt) - 1, 2):
on.append(alt[i])
ar.append(alt[i + 1])
on.append(son[0])
myfaces.append(on)
ar.append(son[1])
ar.reverse()
myfaces.append(ar)
elif op.UST == '2': # Yay
if op.DT2 == '1':
h1 = op.VL1 / 100
if h1 < 0.01:
h1 = 0.01
op.VL1 = 1
elif h1 >= u:
h1 = u - 0.01
op.VL1 = h1 * 100
h = sqrt(u ** 2 + h1 ** 2) / 2
e = h * (u / h1)
c = sqrt(h ** 2 + e ** 2)
t1 = zlist[-1] - h1
elif op.DT2 == '2':
c = op.VL2 / 100
if c < u + 0.01:
c = u + 0.01
op.VL2 = c * 100
t1 = sqrt(c ** 2 - u ** 2) + zlist[-1] - c
r = c - k1
z = zlist[-1] - c
myvertex[ust[0]][2] = t1
myvertex[ust[1]][2] = t1
myvertex[son[0]][2] = t1
myvertex[son[1]][2] = t1
for i in alt:
myvertex[i][2] = sqrt(r ** 2 - myvertex[i][0] ** 2) + z
on = [son[0]]
u1 = []
for i in range(0, res):
a = i * pi / res
x = cos(a) * c
if -u < x < u:
myvertex.append([x, -k1 / 2, sin(a) * c + z])
on.append(len(myvertex) - 1)
u1.extend(on)
u1.append(ust[0])
on.extend([ust[0], alt[0]])
ar = []
d1 = []
d2 = []
for i in range(0, len(alt) - 2, 4):
x1 = myvertex[alt[i + 0]][0]
x2 = myvertex[alt[i + 2]][0]
on.append(alt[i + 0])
ar.append(alt[i + 1])
t1 = [alt[i + 0]]
t2 = [alt[i + 1]]
for j in range(0, res):
a = j * pi / res
x = -cos(a) * r
if x1 < x < x2:
myvertex.extend([[x, -k1 / 2, sin(a) * r + z], [x, k1 / 2, sin(a) * r + z]])
on.append(len(myvertex) - 2)
ar.append(len(myvertex) - 1)
t1.append(len(myvertex) - 2)
t2.append(len(myvertex) - 1)
on.append(alt[i + 2])
ar.append(alt[i + 3])
t1.append(alt[i + 2])
t2.append(alt[i + 3])
d1.append(t1)
d2.append(t2)
ar.append(son[1])
u2 = [son[1]]
for i in range(0, res):
a = i * pi / res
x = cos(a) * c
if -u < x < u:
myvertex.append([x, k1 / 2, sin(a) * c + z])
ar.append(len(myvertex) - 1)
u2.append(len(myvertex) - 1)
ar.append(ust[1])
u2.append(ust[1])
ar.reverse()
myfaces.extend([on, ar])
for i in range(0, len(u1) - 1):
myfaces.append([u1[i + 1], u1[i], u2[i], u2[i + 1]])
sm.append(len(myfaces) - 1)
for a in range(0, mx):
for i in range(0, len(d1[a]) - 1):
myfaces.append([d1[a][i + 1], d1[a][i], d2[a][i], d2[a][i + 1]])
sm.append(len(myfaces) - 1)
y = my - 1
for x in range(0, mx):
if kx[x][y] is True:
fr = (k1 + k2) * 0.5 - 0.01
ek = k2
r = c - k1
k = r - k2
x1 = xlist[x * 2 + 1]
x2 = xlist[x * 2 + 2]
myvertex.extend([[x2, fr - k2 / 2, z + 1], [x2 - k2, fr - k2 / 2, z + 1],
[x2 - k2, fr + k2 / 2, z + 1],
[x2, fr + k2 / 2, z + 1]])
myvertex.extend([[x2, fr - k2 / 2, zlist[-3]], [x2 - k2, fr - k2 / 2, zlist[-3] + k2],
[x2 - k2, fr + k2 / 2,
zlist[-3] + k2],
[x2, fr + k2 / 2, zlist[-3]]])
myvertex.extend([[x1, fr - k2 / 2, zlist[-3]], [x1 + k2, fr - k2 / 2, zlist[-3] + k2],
[x1 + k2, fr + k2 / 2,
zlist[-3] + k2],
[x1, fr + k2 / 2, zlist[-3]]])
myvertex.extend([[x1, fr - k2 / 2, z + 1], [x1 + k2, fr - k2 / 2, z + 1],
[x1 + k2, fr + k2 / 2, z + 1],
[x1, fr + k2 / 2, z + 1]])
n = len(myvertex)
myfaces.extend([[n - 16, n - 15, n - 11, n - 12], [n - 15, n - 14, n - 10, n - 11],
[n - 14, n - 13, n - 9, n - 10], [n - 13, n - 16, n - 12, n - 9]])
myfaces.extend(
[[n - 12, n - 11, n - 7, n - 8], [n - 11, n - 10, n - 6, n - 7], [n - 10, n - 9, n - 5, n - 6],
[n - 9, n - 12, n - 8, n - 5]])
myfaces.extend(
[[n - 8, n - 7, n - 3, n - 4], [n - 7, n - 6, n - 2, n - 3], [n - 6, n - 5, n - 1, n - 2],
[n - 5, n - 8, n - 4, n - 1]])
alt = [n - 16, n - 15, n - 14, n - 13, n - 4, n - 3, n - 2, n - 1]
myvertex[alt[0]][2] = sqrt(r ** 2 - myvertex[alt[0]][0] ** 2) + z
myvertex[alt[1]][2] = sqrt(k ** 2 - myvertex[alt[1]][0] ** 2) + z
myvertex[alt[2]][2] = sqrt(k ** 2 - myvertex[alt[2]][0] ** 2) + z
myvertex[alt[3]][2] = sqrt(r ** 2 - myvertex[alt[3]][0] ** 2) + z
myvertex[alt[4]][2] = sqrt(r ** 2 - myvertex[alt[4]][0] ** 2) + z
myvertex[alt[5]][2] = sqrt(k ** 2 - myvertex[alt[5]][0] ** 2) + z
myvertex[alt[6]][2] = sqrt(k ** 2 - myvertex[alt[6]][0] ** 2) + z
myvertex[alt[7]][2] = sqrt(r ** 2 - myvertex[alt[7]][0] ** 2) + z
d1 = []
d2 = []
t1 = []
t2 = []
for i in range(0, res):
a = i * pi / res
y1 = cos(a) * r
y2 = -cos(a) * k
if x1 < y1 < x2:
myvertex.extend([[y1, fr - k2 / 2, sin(a) * r + z], [y1, fr + k2 / 2, sin(a) * r + z]])
t1.append(len(myvertex) - 2)
t2.append(len(myvertex) - 1)
if x1 + k2 < y2 < x2 - k2:
myvertex.extend([[y2, fr - k2 / 2, sin(a) * k + z], [y2, fr + k2 / 2, sin(a) * k + z]])
d1.append(len(myvertex) - 2)
d2.append(len(myvertex) - 1)
on = [alt[1], alt[0]]
on.extend(t1)
on.extend([alt[4], alt[5]])
on.extend(d1)
ar = [alt[2], alt[3]]
ar.extend(t2)
ar.extend([alt[7], alt[6]])
ar.extend(d2)
ar.reverse()
if d1 == [] and t1 == []:
myfaces.extend([on, ar, [alt[5], alt[6], alt[2], alt[1]], [alt[7], alt[4], alt[0], alt[
3]]])
m = len(myfaces)
sm.extend(
[m - 1, m - 2])
elif d1 == [] and t1 != []:
myfaces.extend([on, ar, [alt[5], alt[6], alt[2], alt[1]], [alt[7], alt[4], t1[-1], t2[-1]],
[alt[0], alt[3], t2[0], t1[0]]])
m = len(myfaces)
sm.extend(
[m - 1, m - 2, m - 3])
elif d1 != [] and t1 == []:
myfaces.extend([on, ar, [alt[5], alt[6], d2[0], d1[0]], [alt[2], alt[1], d1[-1], d2[-1]],
[alt[7], alt[4], alt[0], alt[3]]])
m = len(myfaces)
sm.extend(
[m - 1, m - 2, m - 3])
else:
myfaces.extend([on, ar, [alt[5], alt[6], d2[0], d1[0]], [alt[2], alt[1], d1[-1], d2[-1]],
[alt[7], alt[4], t1[-1], t2[-1]], [alt[0], alt[3], t2[0], t1[0]]])
m = len(myfaces)
sm.extend(
[m - 1, m - 2, m - 3, m - 4])
for i in range(0, len(d1) - 1):
myfaces.append([d1[i + 1], d1[i], d2[i], d2[i + 1]])
sm.append(len(myfaces) - 1)
for i in range(0, len(t1) - 1):
myfaces.append([t1[i + 1], t1[i], t2[i], t2[i + 1]])
sm.append(len(myfaces) - 1)
r = c - k1 - k2
k = r - k3 * 2
else:
fr = 0
ek = 0
r = c - k1
k = r - k3 * 2
# Fitil
x1 = xlist[x * 2 + 1] + ek
x2 = xlist[x * 2 + 2] - ek
myvertex.extend([[x2, fr - k3, z + 1], [x2 - k3 * 2, fr - k3, z + 1], [x2 - k3 * 2, fr + k3, z + 1],
[x2, fr + k3, z + 1]])
myvertex.extend([[x2, fr - k3, zlist[-3] + ek], [x2 - k3 * 2, fr - k3, zlist[-3] + ek + k3 * 2],
[x2 - k3 * 2, fr + k3, zlist[-3] + ek + k3 * 2], [x2, fr + k3, zlist[-3] + ek]])
myvertex.extend([[x1, fr - k3, zlist[-3] + ek], [x1 + k3 * 2, fr - k3, zlist[-3] + ek + k3 * 2],
[x1 + k3 * 2, fr + k3, zlist[-3] + ek + k3 * 2], [x1, fr + k3, zlist[-3] + ek]])
myvertex.extend([[x1, fr - k3, z + 1], [x1 + k3 * 2, fr - k3, z + 1], [x1 + k3 * 2, fr + k3, z + 1],
[x1, fr + k3, z + 1]])
n = len(myvertex)
myfaces.extend(
[[n - 16, n - 15, n - 11, n - 12], [n - 15, n - 14, n - 10, n - 11], [n - 14, n - 13, n - 9, n - 10]])
myfaces.extend(
[[n - 12, n - 11, n - 7, n - 8], [n - 11, n - 10, n - 6, n - 7], [n - 10, n - 9, n - 5, n - 6]])
myfaces.extend([[n - 8, n - 7, n - 3, n - 4], [n - 7, n - 6, n - 2, n - 3], [n - 6, n - 5, n - 1, n - 2]])
m = len(myfaces)
ftl.extend([m - 1, m - 2, m - 3, m - 4, m - 5, m - 6, m - 7, m - 8, m - 9])
alt = [n - 16, n - 15, n - 14, n - 13, n - 4, n - 3, n - 2, n - 1]
myvertex[alt[0]][2] = sqrt(r ** 2 - myvertex[alt[0]][0] ** 2) + z
myvertex[alt[1]][2] = sqrt(k ** 2 - myvertex[alt[1]][0] ** 2) + z
myvertex[alt[2]][2] = sqrt(k ** 2 - myvertex[alt[2]][0] ** 2) + z
myvertex[alt[3]][2] = sqrt(r ** 2 - myvertex[alt[3]][0] ** 2) + z
myvertex[alt[4]][2] = sqrt(r ** 2 - myvertex[alt[4]][0] ** 2) + z
myvertex[alt[5]][2] = sqrt(k ** 2 - myvertex[alt[5]][0] ** 2) + z
myvertex[alt[6]][2] = sqrt(k ** 2 - myvertex[alt[6]][0] ** 2) + z
myvertex[alt[7]][2] = sqrt(r ** 2 - myvertex[alt[7]][0] ** 2) + z
d1 = []
d2 = []
t1 = []
t2 = []
for i in range(0, res):
a = i * pi / res
y1 = cos(a) * r
y2 = -cos(a) * k
if x1 < y1 < x2:
myvertex.extend([[y1, fr - k3, sin(a) * r + z], [y1, fr + k3, sin(a) * r + z]])
t1.append(len(myvertex) - 2)
t2.append(len(myvertex) - 1)
ftl.extend([len(myfaces) - 1, len(myfaces) - 2])
if x1 + k3 * 2 < y2 < x2 - k3 * 2:
myvertex.extend([[y2, fr - k3, sin(a) * k + z], [y2, fr + k3, sin(a) * k + z]])
d1.append(len(myvertex) - 2)
d2.append(len(myvertex) - 1)
ftl.extend([len(myfaces) - 1, len(myfaces) - 2])
on = [alt[1], alt[0]]
on.extend(t1)
on.extend([alt[4], alt[5]])
on.extend(d1)
ar = [alt[2], alt[3]]
ar.extend(t2)
ar.extend([alt[7], alt[6]])
ar.extend(d2)
ar.reverse()
# if d1 == []:
if not d1:
myfaces.extend([on, ar, [alt[5], alt[6], alt[2], alt[1]]])
m = len(myfaces)
ftl.extend([m - 1, m - 2, m - 3])
sm.extend([m - 1])
else:
myfaces.extend([on, ar, [alt[5], alt[6], d2[0], d1[0]], [alt[2], alt[1], d1[-1], d2[-1]]])
m = len(myfaces)
ftl.extend([m - 1, m - 2, m - 3, m - 4])
sm.extend([m - 1, m - 2])
for i in range(0, len(d1) - 1):
myfaces.append([d1[i + 1], d1[i], d2[i], d2[i + 1]])
ftl.append(len(myfaces) - 1)
sm.append(len(myfaces) - 1)
# Cam
x1 = xlist[x * 2 + 1] + ek + k3 * 2
x2 = xlist[x * 2 + 2] - ek - k3 * 2
on = []
ar = []
for i in range(0, res):
a = i * pi / res
y1 = -cos(a) * k
if x1 < y1 < x2:
myvertex.extend([[y1, fr - 0.005, sin(a) * k + z], [y1, fr + 0.005, sin(a) * k + z]])
n = len(myvertex)
on.append(n - 1)
ar.append(n - 2)
myvertex.extend(
[[x1, fr - 0.005, sqrt(k ** 2 - x1 ** 2) + z], [x1, fr + 0.005, sqrt(k ** 2 - x1 ** 2) + z]])
myvertex.extend([[x1, fr - 0.005, zlist[-3] + ek + k3 * 2], [x1, fr + 0.005, zlist[-3] + ek + k3 * 2]])
myvertex.extend([[x2, fr - 0.005, zlist[-3] + ek + k3 * 2], [x2, fr + 0.005, zlist[-3] + ek + k3 * 2]])
myvertex.extend(
[[x2, fr - 0.005, sqrt(k ** 2 - x2 ** 2) + z], [x2, fr + 0.005, sqrt(k ** 2 - x2 ** 2) + z]])
n = len(myvertex)
on.extend([n - 1, n - 3, n - 5, n - 7])
ar.extend([n - 2, n - 4, n - 6, n - 8])
myfaces.append(on)
ar.reverse()
myfaces.append(ar)
m = len(myfaces)
cam.extend([m - 1, m - 2])
elif op.UST == '3': # Egri
if op.DT3 == '1':
h1 = (op.VL1 / 200) / u
elif op.DT3 == '2':
h1 = op.VL3 / 100
elif op.DT3 == '3':
h1 = sin(op.VL4 * pi / 180) / cos(op.VL4 * pi / 180)
z = sqrt(k1 ** 2 + (k1 * h1) ** 2)
k = sqrt(k2 ** 2 + (k2 * h1) ** 2)
f = sqrt(k3 ** 2 + (k3 * h1) ** 2) * 2
myvertex[ust[0]][2] = zlist[-1] + myvertex[ust[0]][0] * h1
myvertex[ust[1]][2] = zlist[-1] + myvertex[ust[1]][0] * h1
for i in alt:
myvertex[i][2] = zlist[-1] + myvertex[i][0] * h1 - z
myvertex[son[0]][2] = zlist[-1] + myvertex[son[0]][0] * h1
myvertex[son[1]][2] = zlist[-1] + myvertex[son[1]][0] * h1
myfaces.append([ust[1], ust[0], son[0], son[1]])
for i in range(0, mx * 4, 4):
myfaces.append([alt[i], alt[i + 1], alt[i + 3], alt[i + 2]])
on = [ust[0]]
ar = [ust[1]]
for i in range(0, len(alt) - 1, 2):
on.append(alt[i])
ar.append(alt[i + 1])
on.append(son[0])
myfaces.append(on)
ar.append(son[1])
ar.reverse()
myfaces.append(ar)
y = my - 1
for x in range(0, mx):
if kx[x][y] is True:
kapak(myvertex, myfaces, xlist, zlist, x * 2 + 1, y * 2 + 1, k2 / 2, (k1 + k2) * 0.5 - 0.01)
n = len(myvertex)
myvertex[n - 5][2] = zlist[-1] + myvertex[n - 5][0] * h1 - z
myvertex[n - 6][2] = zlist[-1] + myvertex[n - 6][0] * h1 - z - k
myvertex[n - 7][2] = zlist[-1] + myvertex[n - 7][0] * h1 - z - k
myvertex[n - 8][2] = zlist[-1] + myvertex[n - 8][0] * h1 - z
myvertex[n - 9][2] = zlist[-1] + myvertex[n - 9][0] * h1 - z
myvertex[n - 10][2] = zlist[-1] + myvertex[n - 10][0] * h1 - z - k
myvertex[n - 11][2] = zlist[-1] + myvertex[n - 11][0] * h1 - z - k
myvertex[n - 12][2] = zlist[-1] + myvertex[n - 12][0] * h1 - z
fitil(myvertex, myfaces, xlist, zlist, x * 2 + 1, y * 2 + 1, k3, (k1 + k2) * 0.5 - 0.01, k2)
n = len(myvertex)
myvertex[n - 2][2] = zlist[-1] + myvertex[n - 2][0] * h1 - z - k - f
myvertex[n - 3][2] = zlist[-1] + myvertex[n - 3][0] * h1 - z - k - f
myvertex[n - 6][2] = zlist[-1] + myvertex[n - 6][0] * h1 - z - k - f
myvertex[n - 7][2] = zlist[-1] + myvertex[n - 7][0] * h1 - z - k - f
myvertex[n - 13][2] = zlist[-1] + myvertex[n - 13][0] * h1 - z - k
myvertex[n - 14][2] = zlist[-1] + myvertex[n - 14][0] * h1 - z - k - f
myvertex[n - 15][2] = zlist[-1] + myvertex[n - 15][0] * h1 - z - k - f
myvertex[n - 16][2] = zlist[-1] + myvertex[n - 16][0] * h1 - z - k
myvertex[n - 17][2] = zlist[-1] + myvertex[n - 17][0] * h1 - z - k
myvertex[n - 18][2] = zlist[-1] + myvertex[n - 18][0] * h1 - z - k - f
myvertex[n - 19][2] = zlist[-1] + myvertex[n - 19][0] * h1 - z - k - f
myvertex[n - 20][2] = zlist[-1] + myvertex[n - 20][0] * h1 - z - k
else:
fitil(myvertex, myfaces, xlist, zlist, x * 2 + 1, y * 2 + 1, k3, 0, 0)
n = len(myvertex)
myvertex[n - 2][2] = zlist[-1] + myvertex[n - 2][0] * h1 - z - f
myvertex[n - 3][2] = zlist[-1] + myvertex[n - 3][0] * h1 - z - f
myvertex[n - 6][2] = zlist[-1] + myvertex[n - 6][0] * h1 - z - f
myvertex[n - 7][2] = zlist[-1] + myvertex[n - 7][0] * h1 - z - f
myvertex[n - 13][2] = zlist[-1] + myvertex[n - 13][0] * h1 - z
myvertex[n - 14][2] = zlist[-1] + myvertex[n - 14][0] * h1 - z - f
myvertex[n - 15][2] = zlist[-1] + myvertex[n - 15][0] * h1 - z - f
myvertex[n - 16][2] = zlist[-1] + myvertex[n - 16][0] * h1 - z
myvertex[n - 17][2] = zlist[-1] + myvertex[n - 17][0] * h1 - z
myvertex[n - 18][2] = zlist[-1] + myvertex[n - 18][0] * h1 - z - f
myvertex[n - 19][2] = zlist[-1] + myvertex[n - 19][0] * h1 - z - f
myvertex[n - 20][2] = zlist[-1] + myvertex[n - 20][0] * h1 - z
m = len(myfaces)
cam.extend([m - 1, m - 2])
ftl.extend([m - 3, m - 4, m - 5, m - 6, m - 7, m - 8, m - 9, m - 10, m - 11, m - 12, m - 13, m - 14])
elif op.UST == '4': # Ucgen
if op.DT3 == '1':
h1 = (op.VL1 / 100) / u
elif op.DT3 == '2':
h1 = op.VL3 / 100
elif op.DT3 == '3':
h1 = sin(op.VL4 * pi / 180) / cos(op.VL4 * pi / 180)
z = sqrt(k1 ** 2 + (k1 * h1) ** 2)
k = sqrt(k2 ** 2 + (k2 * h1) ** 2)
f = sqrt(k3 ** 2 + (k3 * h1) ** 2) * 2
myvertex[ust[0]][2] = zlist[-1] + myvertex[ust[0]][0] * h1
myvertex[ust[1]][2] = zlist[-1] + myvertex[ust[1]][0] * h1
for i in alt:
myvertex[i][2] = zlist[-1] - abs(myvertex[i][0]) * h1 - z
myvertex[son[0]][2] = zlist[-1] - myvertex[son[0]][0] * h1
myvertex[son[1]][2] = zlist[-1] - myvertex[son[1]][0] * h1
myvertex.extend([[0, -k1 / 2, zlist[-1]], [0, k1 / 2, zlist[-1]]])
x = 0
for j in range(2, len(alt) - 2, 4):
if myvertex[alt[j]][0] < 0 < myvertex[alt[j + 2]][0]:
x = 1
n = len(myvertex)
myfaces.extend([[ust[1], ust[0], n - 2, n - 1], [n - 1, n - 2, son[0], son[1]]])
on = [son[0], n - 2, ust[0]]
ar = [son[1], n - 1, ust[1]]
if x == 0:
myvertex.extend([[0, -k1 / 2, zlist[-1] - z], [0, k1 / 2, zlist[-1] - z]])
for j in range(0, len(alt) - 2, 4):
if myvertex[alt[j]][0] < 0 and myvertex[alt[j + 2]][0] < 0:
myfaces.append([alt[j], alt[j + 1], alt[j + 3], alt[j + 2]])
on.extend([alt[j], alt[j + 2]])
ar.extend([alt[j + 1], alt[j + 3]])
elif myvertex[alt[j]][0] > 0 and myvertex[alt[j + 2]][0] > 0:
myfaces.append([alt[j], alt[j + 1], alt[j + 3], alt[j + 2]])
on.extend([alt[j], alt[j + 2]])
ar.extend([alt[j + 1], alt[j + 3]])
else:
n = len(myvertex)
myfaces.extend([[alt[j], alt[j + 1], n - 1, n - 2], [n - 2, n - 1, alt[j + 3], alt[j + 2]]])
on.extend([alt[j + 0], n - 2, alt[j + 2]])
ar.extend([alt[j + 1], n - 1, alt[j + 3]])
myfaces.append(on)
ar.reverse()
myfaces.append(ar)
y = my - 1
for x in range(0, mx):
if myvertex[alt[x * 4]][0] < 0 and myvertex[alt[x * 4 + 2]][0] < 0:
if kx[x][y] is True:
kapak(myvertex, myfaces, xlist, zlist, x * 2 + 1, y * 2 + 1, k2 / 2, (k1 + k2) * 0.5 - 0.01)
n = len(myvertex)
myvertex[n - 5][2] = zlist[-1] + myvertex[n - 5][0] * h1 - z
myvertex[n - 6][2] = zlist[-1] + myvertex[n - 6][0] * h1 - z - k
myvertex[n - 7][2] = zlist[-1] + myvertex[n - 7][0] * h1 - z - k
myvertex[n - 8][2] = zlist[-1] + myvertex[n - 8][0] * h1 - z
myvertex[n - 9][2] = zlist[-1] + myvertex[n - 9][0] * h1 - z
myvertex[n - 10][2] = zlist[-1] + myvertex[n - 10][0] * h1 - z - k
myvertex[n - 11][2] = zlist[-1] + myvertex[n - 11][0] * h1 - z - k
myvertex[n - 12][2] = zlist[-1] + myvertex[n - 12][0] * h1 - z
fitil(myvertex, myfaces, xlist, zlist, x * 2 + 1, y * 2 + 1, k3, (k1 + k2) * 0.5 - 0.01, k2)
n = len(myvertex)
myvertex[n - 2][2] = zlist[-1] + myvertex[n - 2][0] * h1 - z - k - f
myvertex[n - 3][2] = zlist[-1] + myvertex[n - 3][0] * h1 - z - k - f
myvertex[n - 6][2] = zlist[-1] + myvertex[n - 6][0] * h1 - z - k - f
myvertex[n - 7][2] = zlist[-1] + myvertex[n - 7][0] * h1 - z - k - f
myvertex[n - 13][2] = zlist[-1] + myvertex[n - 13][0] * h1 - z - k
myvertex[n - 14][2] = zlist[-1] + myvertex[n - 14][0] * h1 - z - k - f
myvertex[n - 15][2] = zlist[-1] + myvertex[n - 15][0] * h1 - z - k - f
myvertex[n - 16][2] = zlist[-1] + myvertex[n - 16][0] * h1 - z - k
myvertex[n - 17][2] = zlist[-1] + myvertex[n - 17][0] * h1 - z - k
myvertex[n - 18][2] = zlist[-1] + myvertex[n - 18][0] * h1 - z - k - f
myvertex[n - 19][2] = zlist[-1] + myvertex[n - 19][0] * h1 - z - k - f
myvertex[n - 20][2] = zlist[-1] + myvertex[n - 20][0] * h1 - z - k
else:
fitil(myvertex, myfaces, xlist, zlist, x * 2 + 1, y * 2 + 1, k3, 0, 0)
n = len(myvertex)
myvertex[n - 2][2] = zlist[-1] + myvertex[n - 2][0] * h1 - z - f
myvertex[n - 3][2] = zlist[-1] + myvertex[n - 3][0] * h1 - z - f
myvertex[n - 6][2] = zlist[-1] + myvertex[n - 6][0] * h1 - z - f
myvertex[n - 7][2] = zlist[-1] + myvertex[n - 7][0] * h1 - z - f
myvertex[n - 13][2] = zlist[-1] + myvertex[n - 13][0] * h1 - z
myvertex[n - 14][2] = zlist[-1] + myvertex[n - 14][0] * h1 - z - f
myvertex[n - 15][2] = zlist[-1] + myvertex[n - 15][0] * h1 - z - f
myvertex[n - 16][2] = zlist[-1] + myvertex[n - 16][0] * h1 - z
myvertex[n - 17][2] = zlist[-1] + myvertex[n - 17][0] * h1 - z
myvertex[n - 18][2] = zlist[-1] + myvertex[n - 18][0] * h1 - z - f
myvertex[n - 19][2] = zlist[-1] + myvertex[n - 19][0] * h1 - z - f
myvertex[n - 20][2] = zlist[-1] + myvertex[n - 20][0] * h1 - z
m = len(myfaces)
cam.extend([m - 1, m - 2])
ftl.extend([m - 3, m - 4, m - 5, m - 6, m - 7, m - 8, m - 9, m - 10, m - 11, m - 12, m - 13, m - 14])
elif myvertex[alt[x * 4]][0] > 0 and myvertex[alt[x * 4 + 2]][0] > 0:
if kx[x][y] is True:
kapak(myvertex, myfaces, xlist, zlist, x * 2 + 1, y * 2 + 1, k2 / 2, (k1 + k2) * 0.5 - 0.01)
n = len(myvertex)
myvertex[n - 5][2] = zlist[-1] - myvertex[n - 5][0] * h1 - z
myvertex[n - 6][2] = zlist[-1] - myvertex[n - 6][0] * h1 - z - k
myvertex[n - 7][2] = zlist[-1] - myvertex[n - 7][0] * h1 - z - k
myvertex[n - 8][2] = zlist[-1] - myvertex[n - 8][0] * h1 - z
myvertex[n - 9][2] = zlist[-1] - myvertex[n - 9][0] * h1 - z
myvertex[n - 10][2] = zlist[-1] - myvertex[n - 10][0] * h1 - z - k
myvertex[n - 11][2] = zlist[-1] - myvertex[n - 11][0] * h1 - z - k
myvertex[n - 12][2] = zlist[-1] - myvertex[n - 12][0] * h1 - z
fitil(myvertex, myfaces, xlist, zlist, x * 2 + 1, y * 2 + 1, k3, (k1 + k2) * 0.5 - 0.01, k2)
n = len(myvertex)
myvertex[n - 2][2] = zlist[-1] - myvertex[n - 2][0] * h1 - z - k - f
myvertex[n - 3][2] = zlist[-1] - myvertex[n - 3][0] * h1 - z - k - f
myvertex[n - 6][2] = zlist[-1] - myvertex[n - 6][0] * h1 - z - k - f
myvertex[n - 7][2] = zlist[-1] - myvertex[n - 7][0] * h1 - z - k - f
myvertex[n - 13][2] = zlist[-1] - myvertex[n - 13][0] * h1 - z - k
myvertex[n - 14][2] = zlist[-1] - myvertex[n - 14][0] * h1 - z - k - f
myvertex[n - 15][2] = zlist[-1] - myvertex[n - 15][0] * h1 - z - k - f
myvertex[n - 16][2] = zlist[-1] - myvertex[n - 16][0] * h1 - z - k
myvertex[n - 17][2] = zlist[-1] - myvertex[n - 17][0] * h1 - z - k
myvertex[n - 18][2] = zlist[-1] - myvertex[n - 18][0] * h1 - z - k - f
myvertex[n - 19][2] = zlist[-1] - myvertex[n - 19][0] * h1 - z - k - f
myvertex[n - 20][2] = zlist[-1] - myvertex[n - 20][0] * h1 - z - k
else:
fitil(myvertex, myfaces, xlist, zlist, x * 2 + 1, y * 2 + 1, k3, 0, 0)
n = len(myvertex)
myvertex[n - 2][2] = zlist[-1] - myvertex[n - 2][0] * h1 - z - f
myvertex[n - 3][2] = zlist[-1] - myvertex[n - 3][0] * h1 - z - f
myvertex[n - 6][2] = zlist[-1] - myvertex[n - 6][0] * h1 - z - f
myvertex[n - 7][2] = zlist[-1] - myvertex[n - 7][0] * h1 - z - f
myvertex[n - 13][2] = zlist[-1] - myvertex[n - 13][0] * h1 - z
myvertex[n - 14][2] = zlist[-1] - myvertex[n - 14][0] * h1 - z - f
myvertex[n - 15][2] = zlist[-1] - myvertex[n - 15][0] * h1 - z - f
myvertex[n - 16][2] = zlist[-1] - myvertex[n - 16][0] * h1 - z
myvertex[n - 17][2] = zlist[-1] - myvertex[n - 17][0] * h1 - z
myvertex[n - 18][2] = zlist[-1] - myvertex[n - 18][0] * h1 - z - f
myvertex[n - 19][2] = zlist[-1] - myvertex[n - 19][0] * h1 - z - f
myvertex[n - 20][2] = zlist[-1] - myvertex[n - 20][0] * h1 - z
m = len(myfaces)
cam.extend([m - 1, m - 2])
ftl.extend([m - 3, m - 4, m - 5, m - 6, m - 7, m - 8, m - 9, m - 10, m - 11, m - 12, m - 13, m - 14])
else:
k4 = k3 * 2
if kx[x][y] is True:
zz = (k1 + k2) * 0.5 - 0.01
xx = xlist[x * 2 + 1]
myvertex.extend([[xx, -k2 / 2 + zz, zlist[-3]], [xx + k2, -k2 / 2 + zz, zlist[-3] + k2],
[xx + k2, k2 / 2 + zz, zlist[-3] + k2], [xx, k2 / 2 + zz, zlist[-3]]])
myvertex.extend(
[[xx, -k2 / 2 + zz, zlist[-1] + xx * h1 - z], [xx + k2, -k2 / 2 + zz,
zlist[-1] + (xx + k2) * h1 - z - k],
[xx + k2, k2 / 2 + zz, zlist[-1] + (xx + k2) * h1 - z - k],
[xx, k2 / 2 + zz, zlist[-1] + xx * h1 - z]])
myvertex.extend([[0, -k2 / 2 + zz, zlist[-1] - z], [0, -k2 / 2 + zz, zlist[-1] - z - k],
[0, k2 / 2 + zz, zlist[-1] - z - k], [0, k2 / 2 + zz, zlist[-1] - z]])
xx = xlist[x * 2 + 2]
myvertex.extend(
[[xx, -k2 / 2 + zz, zlist[-1] - xx * h1 - z], [xx - k2, -k2 / 2 + zz,
zlist[-1] - (xx - k2) * h1 - z - k],
[xx - k2, k2 / 2 + zz, zlist[-1] - (xx - k2) * h1 - z - k],
[xx, k2 / 2 + zz, zlist[-1] - xx * h1 - z]])
myvertex.extend([[xx, -k2 / 2 + zz, zlist[-3]], [xx - k2, -k2 / 2 + zz, zlist[-3] + k2],
[xx - k2, k2 / 2 + zz, zlist[-3] + k2], [xx, k2 / 2 + zz, zlist[-3]]])
n = len(myvertex)
myfaces.extend([[n - 20, n - 19, n - 15, n - 16], [n - 19, n - 18, n - 14, n - 15],
[n - 18, n - 17, n - 13, n - 14], [n - 17, n - 20, n - 16, n - 13]])
myfaces.extend([[n - 16, n - 15, n - 11, n - 12], [n - 15, n - 14, n - 10, n - 11],
[n - 14, n - 13, n - 9, n - 10], [n - 13, n - 16, n - 12, n - 9]])
myfaces.extend(
[[n - 12, n - 11, n - 7, n - 8], [n - 11, n - 10, n - 6, n - 7], [n - 10, n - 9, n - 5, n - 6],
[n - 9, n - 12, n - 8, n - 5]])
myfaces.extend(
[[n - 8, n - 7, n - 3, n - 4], [n - 7, n - 6, n - 2, n - 3], [n - 6, n - 5, n - 1, n - 2],
[n - 5, n - 8, n - 4, n - 1]])
myfaces.extend(
[[n - 4, n - 3, n - 19, n - 20], [n - 3, n - 2, n - 18, n - 19], [n - 2, n - 1, n - 17, n - 18],
[n - 1, n - 4, n - 20, n - 17]])
xx = xlist[x * 2 + 1]
myvertex.extend([[xx + k2, -k3 + zz, zlist[-3] + k2], [xx + k4 + k2, -k3 + zz, zlist[-3] + k2 + k4],
[xx + k4 + k2, k3 + zz, zlist[-3] + k2 + k4], [xx + k2, k3 + zz, zlist[-3] + k2]])
myvertex.extend([[xx + k2, -k3 + zz, zlist[-1] + (xx + k2) * h1 - z - k],
[xx + k4 + k2, -k3 + zz, zlist[-1] + (xx + k2 + k4) * h1 - z - k - f],
[xx + k4 + k2, k3 + zz, zlist[-1] + (xx + k2 + k4) * h1 - z - k - f],
[xx + k2, k3 + zz, zlist[-1] + (xx + k2) * h1 - z - k]])
myvertex.extend([[0, -k3 + zz, zlist[-1] - k - z], [0, -k3 + zz, zlist[-1] - k - z - f],
[0, k3 + zz, zlist[-1] - k - z - f], [0, k3 + zz, zlist[-1] - k - z]])
xx = xlist[x * 2 + 2]
myvertex.extend([[xx - k2, -k3 + zz, zlist[-1] - (xx - k2) * h1 - z - k],
[xx - k4 - k2, -k3 + zz, zlist[-1] - (xx - k2 - k4) * h1 - z - k - f],
[xx - k4 - k2, k3 + zz, zlist[-1] - (xx - k2 - k4) * h1 - z - k - f],
[xx - k2, k3 + zz, zlist[-1] - (xx - k2) * h1 - z - k]])
myvertex.extend([[xx - k2, -k3 + zz, zlist[-3] + k2], [xx - k4 - k2, -k3 + zz, zlist[-3] + k2 + k4],
[xx - k4 - k2, k3 + zz, zlist[-3] + k2 + k4], [xx - k2, k3 + zz, zlist[-3] + k2]])
n = len(myvertex)
myfaces.extend([[n - 20, n - 19, n - 15, n - 16], [n - 19, n - 18, n - 14, n - 15],
[n - 18, n - 17, n - 13, n - 14]])
myfaces.extend([[n - 16, n - 15, n - 11, n - 12], [n - 15, n - 14, n - 10, n - 11],
[n - 14, n - 13, n - 9, n - 10]])
myfaces.extend(
[[n - 12, n - 11, n - 7, n - 8], [n - 11, n - 10, n - 6, n - 7], [n - 10, n - 9, n - 5, n - 6]])
myfaces.extend(
[[n - 8, n - 7, n - 3, n - 4], [n - 7, n - 6, n - 2, n - 3], [n - 6, n - 5, n - 1, n - 2]])
myfaces.extend([[n - 4, n - 3, n - 19, n - 20], [n - 3, n - 2, n - 18, n - 19],
[n - 2, n - 1, n - 17, n - 18]])
xx = xlist[x * 2 + 1]
myvertex.extend(
[[xx + k4 + k2, -k3 + zz, zlist[-3] + k2 + k4], [xx + k4 + k2, k3 + zz, zlist[-3] + k2 + k4]])
myvertex.extend([[xx + k4 + k2, -k3 + zz, zlist[-1] + (xx + k2 + k4) * h1 - z - k - f],
[xx + k4 + k2, k3 + zz, zlist[-1] + (xx + k2 + k4) * h1 - z - k - f]])
myvertex.extend([[0, -k3 + zz, zlist[-1] - k - z - f], [0, k3 + zz, zlist[-1] - k - z - f]])
xx = xlist[x * 2 + 2]
myvertex.extend([[xx - k4 - k2, -k3 + zz, zlist[-1] - (xx - k2 - k4) * h1 - z - k - f],
[xx - k4 - k2, k3 + zz, zlist[-1] - (xx - k2 - k4) * h1 - z - k - f]])
myvertex.extend(
[[xx - k4 - k2, -k3 + zz, zlist[-3] + k2 + k4], [xx - k4 - k2, k3 + zz, zlist[-3] + k2 + k4]])
myfaces.extend([[n + 8, n + 6, n + 4, n + 2, n + 0], [n + 1, n + 3, n + 5, n + 7, n + 9]])
else:
xx = xlist[x * 2 + 1]
myvertex.extend(
[[xx, -k3, zlist[-3]], [xx + k4, -k3, zlist[-3] + k4], [xx + k4, k3, zlist[-3] + k4],
[xx, k3, zlist[-3]]])
myvertex.extend(
[[xx, -k3, zlist[-1] + xx * h1 - z], [xx + k4, -k3, zlist[-1] + (xx + k4) * h1 - z - f],
[xx + k4, k3, zlist[-1] + (xx + k4) * h1 - z - f], [xx, k3, zlist[-1] + xx * h1 - z]])
myvertex.extend(
[[0, -k3, zlist[-1] - z], [0, -k3, zlist[-1] - z - f], [0, k3, zlist[-1] - z - f],
[0, k3, zlist[-1] - z]])
xx = xlist[x * 2 + 2]
myvertex.extend(
[[xx, -k3, zlist[-1] - xx * h1 - z], [xx - k4, -k3, zlist[-1] - (xx - k4) * h1 - z - f],
[xx - k4, k3, zlist[-1] - (xx - k4) * h1 - z - f], [xx, k3, zlist[-1] - xx * h1 - z]])
myvertex.extend(
[[xx, -k3, zlist[-3]], [xx - k4, -k3, zlist[-3] + k4], [xx - k4, k3, zlist[-3] + k4],
[xx, k3, zlist[-3]]])
n = len(myvertex)
myfaces.extend([[n - 20, n - 19, n - 15, n - 16], [n - 19, n - 18, n - 14, n - 15],
[n - 18, n - 17, n - 13, n - 14]])
myfaces.extend([[n - 16, n - 15, n - 11, n - 12], [n - 15, n - 14, n - 10, n - 11],
[n - 14, n - 13, n - 9, n - 10]])
myfaces.extend(
[[n - 12, n - 11, n - 7, n - 8], [n - 11, n - 10, n - 6, n - 7], [n - 10, n - 9, n - 5, n - 6]])
myfaces.extend(
[[n - 8, n - 7, n - 3, n - 4], [n - 7, n - 6, n - 2, n - 3], [n - 6, n - 5, n - 1, n - 2]])
myfaces.extend([[n - 4, n - 3, n - 19, n - 20], [n - 3, n - 2, n - 18, n - 19],
[n - 2, n - 1, n - 17, n - 18]])
xx = xlist[x * 2 + 1]
myvertex.extend([[xx + k4, -0.005, zlist[-3] + k4], [xx + k4, 0.005, zlist[-3] + k4]])
myvertex.extend([[xx + k4, -0.005, zlist[-1] + (xx + k4) * h1 - z - f],
[xx + k4, 0.005, zlist[-1] + (xx + k4) * h1 - z - f]])
myvertex.extend([[0, -0.005, zlist[-1] - z - f], [0, 0.005, zlist[-1] - z - f]])
xx = xlist[x * 2 + 2]
myvertex.extend([[xx - k4, -0.005, zlist[-1] - (xx - k4) * h1 - z - f],
[xx - k4, 0.005, zlist[-1] - (xx - k4) * h1 - z - f]])
myvertex.extend([[xx - k4, -0.005, zlist[-3] + k4], [xx - k4, 0.005, zlist[-3] + k4]])
myfaces.extend([[n + 8, n + 6, n + 4, n + 2, n + 0], [n + 1, n + 3, n + 5, n + 7, n + 9]])
m = len(myfaces)
cam.extend([m - 1, m - 2])
ftl.extend(
[m - 3, m - 4, m - 5, m - 6, m - 7, m - 8, m - 9, m - 10, m - 11, m - 12, m - 13, m - 14, m - 15,
m - 16, m - 17])
# Mermer
if op.mr is True:
mrh = -op.mr1 / 100
mrg = op.mr2 / 100
mdv = (op.mr3 / 200) + mrg
msv = -(mdv + (op.mr4 / 100))
myvertex.extend([[-u, mdv, 0], [u, mdv, 0], [-u, msv, 0], [u, msv, 0], [-u, mdv, mrh], [u, mdv, mrh],
[-u, msv, mrh],
[u, msv, mrh]])
n = len(myvertex)
myfaces.extend([[n - 1, n - 2, n - 4, n - 3], [n - 3, n - 4, n - 8, n - 7], [n - 6, n - 5, n - 7, n - 8],
[n - 2, n - 1, n - 5, n - 6], [n - 4, n - 2, n - 6, n - 8], [n - 5, n - 1, n - 3, n - 7]])
n = len(myfaces)
mer.extend([n - 1, n - 2, n - 3, n - 4, n - 5, n - 6])
return ftl, cam, mer, sm
# ------------------------------------------------------------------
# Define property group class to create or modify
# ------------------------------------------------------------------
class GeneralProperties(bpy.types.PropertyGroup):
prs = bpy.props.EnumProperty(items=(('1', "WINDOW 250X200", ""),
('2', "WINDOW 200X200", ""),
('3', "WINDOW 180X200", ""),
('4', "WINDOW 180X160", ""),
('5', "WINDOW 160X160", ""),
('6', "WINDOW 50X50", ""),
('7', "DOOR 80X250", ""),
('8', "DOOR 80X230", "")),
name="",
description='Predefined types',
update=update_using_default)
son = prs
gen = bpy.props.IntProperty(name='H Count', min=1, max=8, default=3, description='Horizontal Panes',
update=update_window)
yuk = bpy.props.IntProperty(name='V Count', min=1, max=5, default=1, description='Vertical Panes',
update=update_window)
kl1 = bpy.props.IntProperty(name='Outer Frame', min=2, max=50, default=5, description='Outside Frame Thickness',
update=update_window)
kl2 = bpy.props.IntProperty(name='Risers', min=2, max=50, default=5, description='Risers Width',
update=update_window)
fk = bpy.props.IntProperty(name='Inner Frame', min=1, max=20, default=2, description='Inside Frame Thickness',
update=update_window)
mr = bpy.props.BoolProperty(name='Sill', default=True, description='Window Sill', update=update_window)
mr1 = bpy.props.IntProperty(name='', min=1, max=20, default=4, description='Height', update=update_window)
mr2 = bpy.props.IntProperty(name='', min=0, max=20, default=4, description='First Depth', update=update_window)
mr3 = bpy.props.IntProperty(name='', min=1, max=50, default=20, description='Second Depth', update=update_window)
mr4 = bpy.props.IntProperty(name='', min=0, max=50, default=0, description='Extrusion for Jamb',
update=update_window)
mt1 = bpy.props.EnumProperty(items=(('1', "PVC", ""), ('2', "WOOD", ""), ('3', "Plastic", "")), name="",
default='1',
description='Material to use',
update=update_window)
mt2 = bpy.props.EnumProperty(items=(('1', "PVC", ""), ('2', "WOOD", ""), ('3', "Plastic", "")), name="",
default='3',
description='Material to use',
update=update_window)
UST = bpy.props.EnumProperty(
items=(('1', "Flat", ""), ('2', "Arch", ""), ('3', "Inclined", ""), ('4', "Triangle", "")),
name="Top", default='1',
description='Type of window upper section',
update=update_window)
DT2 = bpy.props.EnumProperty(items=(('1', "Difference", ""), ('2', "Radius", "")), name="", default='1',
update=update_window)
DT3 = bpy.props.EnumProperty(items=(('1', "Difference", ""), ('2', "Incline %", ""), ('3', "Incline Angle", "")),
name="",
default='1', update=update_window)
VL1 = bpy.props.IntProperty(name='', min=-10000, max=10000, default=30, update=update_window) # Fark
VL2 = bpy.props.IntProperty(name='', min=1, max=10000, default=30, update=update_window) # Cap
VL3 = bpy.props.IntProperty(name='', min=-100, max=100, default=30, update=update_window) # Egim %
VL4 = bpy.props.IntProperty(name='', min=-45, max=45, default=30, update=update_window) # Egim Aci
res = bpy.props.IntProperty(name='Resolution', min=2, max=360, default=36, update=update_window) # Res
gnx0 = bpy.props.IntProperty(name='', min=1, max=300, default=60, description='1st Window Width',
update=update_window)
gnx1 = bpy.props.IntProperty(name='', min=1, max=300, default=110, description='2nd Window Width',
update=update_window)
gnx2 = bpy.props.IntProperty(name='', min=1, max=300, default=60, description='3rd Window Width',
update=update_window)
gnx3 = bpy.props.IntProperty(name='', min=1, max=300, default=60, description='4th Window Width',
update=update_window)
gnx4 = bpy.props.IntProperty(name='', min=1, max=300, default=60, description='5th Window Width',
update=update_window)
gnx5 = bpy.props.IntProperty(name='', min=1, max=300, default=60, description='6th Window Width',
update=update_window)
gnx6 = bpy.props.IntProperty(name='', min=1, max=300, default=60, description='7th Window Width',
update=update_window)
gnx7 = bpy.props.IntProperty(name='', min=1, max=300, default=60, description='8th Window Width',
update=update_window)
gny0 = bpy.props.IntProperty(name='', min=1, max=300, default=190, description='1st Row Height',
update=update_window)
gny1 = bpy.props.IntProperty(name='', min=1, max=300, default=45, description='2nd Row Height',
update=update_window)
gny2 = bpy.props.IntProperty(name='', min=1, max=300, default=45, description='3rd Row Height',
update=update_window)
gny3 = bpy.props.IntProperty(name='', min=1, max=300, default=45, description='4th Row Height',
update=update_window)
gny4 = bpy.props.IntProperty(name='', min=1, max=300, default=45, description='5th Row Height',
update=update_window)
k00 = bpy.props.BoolProperty(name='', default=True, update=update_window)
k01 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k02 = bpy.props.BoolProperty(name='', default=True, update=update_window)
k03 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k04 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k05 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k06 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k07 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k10 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k11 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k12 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k13 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k14 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k15 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k16 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k17 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k20 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k21 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k22 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k23 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k24 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k25 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k26 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k27 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k30 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k31 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k32 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k33 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k34 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k35 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k36 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k37 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k40 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k41 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k42 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k43 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k44 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k45 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k46 = bpy.props.BoolProperty(name='', default=False, update=update_window)
k47 = bpy.props.BoolProperty(name='', default=False, update=update_window)
bpy.utils.register_class(GeneralProperties)
bpy.types.Object.WindowGenerator = bpy.props.CollectionProperty(type=GeneralProperties)
# ------------------------------------------------------------------
# Define panel class to modify myobjects.
# ------------------------------------------------------------------
class WindowEditPanel(bpy.types.Panel):
bl_idname = "window.edit_panel"
bl_label = "Window Properties"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Window'
# -----------------------------------------------------
# Draw (create UI interface)
# -----------------------------------------------------
def draw(self, context):
o = context.object
# If the selected object didn't be created with the group 'WindowGenerator', this panel is not created.
# noinspection PyBroadException
try:
if 'WindowGenerator' not in o:
return
except:
return
layout = self.layout
if bpy.context.mode == 'EDIT_MESH':
layout.label('Warning: Operator does not work in edit mode.', icon='ERROR')
else:
myobject = o.WindowGenerator[0]
layout.prop(myobject, 'prs')
box = layout.box()
box.prop(myobject, 'gen')
box.prop(myobject, 'yuk')
box.prop(myobject, 'kl1')
box.prop(myobject, 'kl2')
box.prop(myobject, 'fk')
box.prop(myobject, 'mr')
if myobject.mr is True:
row = box.row()
row.prop(myobject, 'mr1')
row.prop(myobject, 'mr2')
row = box.row()
row.prop(myobject, 'mr3')
row.prop(myobject, 'mr4')
row = layout.row()
row.label('Frame')
row.label('Inner Frame')
row = layout.row()
row.prop(myobject, 'mt1')
row.prop(myobject, 'mt2')
box.prop(myobject, 'UST')
if myobject.UST == '2':
row = box.row()
row.prop(myobject, 'DT2')
if myobject.DT2 == '1':
row.prop(myobject, 'VL1')
elif myobject.DT2 == '2':
row.prop(myobject, 'VL2')
box.prop(myobject, 'res')
elif myobject.UST == '3':
row = box.row()
row.prop(myobject, 'DT3')
if myobject.DT3 == '1':
row.prop(myobject, 'VL1')
elif myobject.DT3 == '2':
row.prop(myobject, 'VL3')
elif myobject.DT3 == '3':
row.prop(myobject, 'VL4')
elif myobject.UST == '4':
row = box.row()
row.prop(myobject, 'DT3')
if myobject.DT3 == '1':
row.prop(myobject, 'VL1')
elif myobject.DT3 == '2':
row.prop(myobject, 'VL3')
elif myobject.DT3 == '3':
row.prop(myobject, 'VL4')
row = layout.row()
for i in range(0, myobject.gen):
row.prop(myobject, 'gnx' + str(i))
for j in range(0, myobject.yuk):
row = layout.row()
row.prop(myobject, 'gny' + str(myobject.yuk - j - 1))
for i in range(0, myobject.gen):
row.prop(myobject, 'k' + str(myobject.yuk - j - 1) + str(i)) | jemandez/creaturas-magicas | Configuraciones básicas/scripts/addons/add_window/window_panel.py | Python | gpl-3.0 | 70,889 |
from nflpool.viewmodels.viewmodelbase import ViewModelBase
class UpdateWeeklyStats(ViewModelBase):
def __init__(self):
self.player_id = None
self.passyds = None
self.rushyds = None
self.recyds = None
self.sacks = None
self.interceptions = None
self.week = None
self.season = None
def from_dict(self, data_dict):
self.player_id = data_dict.get("player_id")
self.passyds = data_dict.get("passyds")
self.rushyds = data_dict.get("rushyds")
self.recyds = data_dict.get("recyds")
self.sacks = data_dict.get("sacks")
self.interceptions = data_dict.get("interceptions")
self.week = data_dict.get("week")
self.season = data_dict.get("season")
| prcutler/nflpool | nflpool/viewmodels/update_weekly_stats_viewmodel.py | Python | mit | 773 |
__version__ = '19.6.2'
| mlaidouni/Artistfunds | venv/lib/python2.7/site-packages/setuptools/version.py | Python | mit | 23 |
#!/usr/bin/env python
# Copyright 2004-present Facebook. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ast
import jinja2
import logging
import os
import sys
# set DEVELOPING to True for debug statements
DEVELOPING = False
# the log format for the logging module
LOG_FORMAT = "%(levelname)s [Line %(lineno)d]: %(message)s"
# IMPL_TEMPLATE is the jinja template used to generate the virtual table
# implementation file
IMPL_TEMPLATE = """// Copyright 2004-present Facebook. All Rights Reserved.
/*
** This file is generated. Do not modify it manually!
*/
#include <cstring>
#include <string>
#include <vector>
#include <boost/lexical_cast.hpp>
#include "osquery/database.h"
#include "osquery/tables/base.h"
#include "osquery/registry/registry.h"
namespace osquery { namespace tables {
{% if class_name == "" %}
osquery::QueryData {{function}}();
{% else %}
class {{class_name}} {
public:
static osquery::QueryData {{function}}();
};
{% endif %}
struct sqlite3_{{table_name}} {
int n;
{% for col in schema %}\
std::vector<{{col.type}}> {{col.name}};
{% endfor %}\
};
const std::string
sqlite3_{{table_name}}_create_table_statement =
"CREATE TABLE {{table_name}}("
{% for col in schema %}\
"{{col.name}} \
{% if col.type == "std::string" %}VARCHAR{% endif %}\
{% if col.type == "int" %}INTEGER{% endif %}\
{% if col.type == "long long int" %}BIGINT{% endif %}\
{% if not loop.last %}, {% endif %}"
{% endfor %}\
")";
int {{table_name_cc}}Create(
sqlite3 *db,
void *pAux,
int argc,
const char *const *argv,
sqlite3_vtab **ppVtab,
char **pzErr
) {
return xCreate<
x_vtab<sqlite3_{{table_name}}>,
sqlite3_{{table_name}}
>(
db, pAux, argc, argv, ppVtab, pzErr,
sqlite3_{{table_name}}_create_table_statement.c_str()
);
}
int {{table_name_cc}}Column(
sqlite3_vtab_cursor *cur,
sqlite3_context *ctx,
int col
) {
base_cursor *pCur = (base_cursor*)cur;
x_vtab<sqlite3_{{table_name}}> *pVtab =
(x_vtab<sqlite3_{{table_name}}>*)cur->pVtab;
if(pCur->row >= 0 && pCur->row < pVtab->pContent->n) {
switch (col) {
{% for col in schema %}\
// {{ col.name }}
case {{ loop.index0 }}:
{% if col.type == "std::string" %}\
sqlite3_result_text(
ctx,
(pVtab->pContent->{{col.name}}[pCur->row]).c_str(),
-1,
nullptr
);
{% endif %}\
{% if col.type == "int" %}\
sqlite3_result_int(
ctx,
(int)pVtab->pContent->{{col.name}}[pCur->row]
);
{% endif %}\
{% if col.type == "long long int" %}\
sqlite3_result_int64(
ctx,
(long long int)pVtab->pContent->{{col.name}}[pCur->row]
);
{% endif %}\
break;
{% endfor %}\
}
}
return SQLITE_OK;
}
int {{table_name_cc}}Filter(
sqlite3_vtab_cursor *pVtabCursor,
int idxNum,
const char *idxStr,
int argc,
sqlite3_value **argv
) {
base_cursor *pCur = (base_cursor *)pVtabCursor;
x_vtab<sqlite3_{{table_name}}> *pVtab =
(x_vtab<sqlite3_{{table_name}}>*)pVtabCursor->pVtab;
pCur->row = 0;
{% for col in schema %}\
pVtab->pContent->{{col.name}}.clear();
{% endfor %}\
{% if class_name != "" %}
for (auto& row : osquery::tables::{{class_name}}::{{function}}()) {
{% else %}
for (auto& row : osquery::tables::{{function}}()) {
{% endif %}
{% for col in schema %}\
{% if col.type == "std::string" %}\
pVtab->pContent->{{col.name}}.push_back(row["{{col.name}}"]);
{% endif %}\
{% if col.type == "int" %}\
try {
pVtab->pContent->{{col.name}}\
.push_back(boost::lexical_cast<int>(row["{{col.name}}"]));
} catch (const boost::bad_lexical_cast& e) {
LOG(WARNING) << "Error casting " << row["{{col.name}}"] << " to int";
pVtab->pContent->{{col.name}}.push_back(-1);
}
{% endif %}\
{% if col.type == "long long int" %}\
try {
pVtab->pContent->{{col.name}}\
.push_back(boost::lexical_cast<long long>(row["{{col.name}}"]));
} catch (const boost::bad_lexical_cast& e) {
LOG(WARNING) << "Error casting " << row["{{col.name}}"] << " to long long int";
pVtab->pContent->{{col.name}}.push_back(-1);
}
{% endif %}\
{% endfor %}\
}
pVtab->pContent->n = pVtab->pContent->{{schema[0].name}}.size();
return SQLITE_OK;
}
static sqlite3_module {{table_name_cc}}Module = {
0,
{{table_name_cc}}Create,
{{table_name_cc}}Create,
xBestIndex,
xDestroy<x_vtab<sqlite3_{{table_name}}>>,
xDestroy<x_vtab<sqlite3_{{table_name}}>>,
xOpen<base_cursor>,
xClose<base_cursor>,
{{table_name_cc}}Filter,
xNext<base_cursor>,
xEof<base_cursor, x_vtab<sqlite3_{{table_name}}>>,
{{table_name_cc}}Column,
xRowid<base_cursor>,
0,
0,
0,
0,
0,
0,
0,
};
class {{table_name_cc}}TablePlugin : public TablePlugin {
public:
{{table_name_cc}}TablePlugin() {}
int attachVtable(sqlite3 *db) {
return sqlite3_attach_vtable<sqlite3_{{table_name}}>(
db, "{{table_name}}", &{{table_name_cc}}Module);
}
virtual ~{{table_name_cc}}TablePlugin() {}
};
REGISTER_TABLE(
"{{table_name}}",
std::make_shared<{{table_name_cc}}TablePlugin>()
);
}}
"""
def usage():
""" print program usage """
print("Usage: %s <spec.table> <file.cpp>" % sys.argv[0])
def to_camel_case(snake_case):
""" convert a snake_case string to camelCase """
components = snake_case.split('_')
return components[0] + "".join(x.title() for x in components[1:])
class Singleton(object):
"""
Make sure that anything that subclasses Singleton can only be instantiated
once
"""
_instance = None
def __new__(self, *args, **kwargs):
if not self._instance:
self._instance = super(Singleton, self).__new__(
self, *args, **kwargs)
return self._instance
class TableState(Singleton):
"""
Maintain the state of of the table commands during the execution of
the config file
"""
def __init__(self):
self.table_name = ""
self.schema = []
self.header = ""
self.impl = ""
self.function = ""
self.class_name = ""
def generate(self, path):
"""Generate the virtual table files"""
logging.debug("TableState.generate")
self.impl_content = jinja2.Template(IMPL_TEMPLATE).render(
table_name=self.table_name,
table_name_cc=to_camel_case(self.table_name),
schema=self.schema,
header=self.header,
impl=self.impl,
function=self.function,
class_name=self.class_name
)
path_bits = path.split("/")
for i in range(1, len(path_bits)):
dir_path = ""
for j in range(i):
dir_path += "%s/" % path_bits[j]
if not os.path.exists(dir_path):
os.mkdir(dir_path)
logging.debug("generating %s" % path)
with open(path, "w+") as file_h:
file_h.write(self.impl_content)
table = TableState()
class Column(object):
"""
A Column object to get around that fact that list literals in Python are
ordered but dictionaries aren't
"""
def __init__(self, **kwargs):
self.name = kwargs.get("name", "")
self.type = kwargs.get("type", "")
def table_name(name):
"""define the virtual table name"""
logging.debug("- table_name")
logging.debug(" - called with: %s" % name)
table.table_name = name
def schema(schema_list):
"""
define a list of Column object which represent the columns of your virtual
table
"""
logging.debug("- schema")
for col in schema_list:
logging.debug(" - %s (%s)" % (col.name, col.type))
table.schema = schema_list
def implementation(impl_string):
"""
define the path to the implementation file and the function which
implements the virtual table. You should use the following format:
# the path is "osquery/table/implementations/foo.cpp"
# the function is "QueryData genFoo();"
implementation("foo@genFoo")
"""
logging.debug("- implementation")
filename, function = impl_string.split("@")
class_parts = function.split("::")[::-1]
function = class_parts[0]
class_name = class_parts[1] if len(class_parts) > 1 else ""
impl = "%s.cpp" % filename
logging.debug(" - impl => %s" % impl)
logging.debug(" - function => %s" % function)
logging.debug(" - class_name => %s" % class_name)
table.impl = impl
table.function = function
table.class_name = class_name
def main(argc, argv):
if DEVELOPING:
logging.basicConfig(format=LOG_FORMAT, level=logging.DEBUG)
else:
logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
if argc < 3:
usage()
sys.exit(1)
filename = argv[1]
output = argv[2]
with open(filename, "rU") as file_handle:
tree = ast.parse(file_handle.read())
exec(compile(tree, "<string>", "exec"))
table.generate(output)
if __name__ == "__main__":
main(len(sys.argv), sys.argv)
| subailong/testgit | tools/gentable.py | Python | bsd-3-clause | 9,148 |
class Person:
def __init__(self, name, job=None, pay=0):
self.name = name
self.job = job
self.pay = pay
def lastName(self):
return self.name.split()[-1]
def giveRaise(self, percent):
self.pay = int(self.pay * (1 + percent))
def __repr__(self):
return '[Person: %s, %s]' % (self.name, self.pay)
class Manager(Person):
def __init__(self, name, pay): # Redefine constructor
Person.__init__(self, name, 'mgr', pay) # Run original with 'mgr'
def giveRaise(self, percent, bonus=.10):
Person.giveRaise(self, percent + bonus)
class Department:
def __init__(self, *args):
self.members = list(args)
def addMember(self, person):
self.members.append(person)
def giveRaises(self, percent):
for person in self.members:
person.giveRaise(percent)
def showAll(self):
for person in self.members:
print(person)
if __name__ == '__main__':
bob = Person('Bob Smith')
sue = Person('Sue Jones', job='dev', pay=100000)
tom = Manager('Tom Jones', 50000)
development = Department(bob, sue) # Embed objects in a composite
development.addMember(tom)
development.giveRaises(.10) # Runs embedded objects' giveRaise
development.showAll() # Runs embedded objects' __repr__
| dreadrel/UWF_2014_spring_COP3990C-2507 | notebooks/scripts/book_code/code/person-department.py | Python | apache-2.0 | 1,409 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "asnn.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| marvinglenn/asnn-mda | manage.py | Python | gpl-2.0 | 247 |
# vim:sw=4:et:
"""This module contains some helpers that can be used to execute generator
functions in the GObject main loop.
This module provided the following classes:
GIdleThread - Thread like behavior for generators in a main loop
Queue - A simple queue implementation suitable for use with GIdleThread
Exceptions:
QueueEmpty - raised when one tried to get a value of an empty queue
QueueFull - raised when the queue reaches it's max size and the oldest item
may not be disposed.
"""
from __future__ import generators
import gobject
import time
import traceback
class GIdleThread(object):
"""This is a pseudo-"thread" for use with the GTK+ main loop.
This class does act a bit like a thread, all code is executed in
the callers thread though. The provided function should be a generator
(or iterator).
It can be started with start(). While the "thread" is running is_alive()
can be called to see if it's alive. wait([timeout]) will wait till the
generator is finished, or timeout seconds.
If an exception is raised from within the generator, it is stored in
the error property. Execution of the generator is finished.
Note that this routine runs in the current thread, so there is no need
for nasty locking schemes.
Example (runs a counter through the GLib main loop routine):
>>> def counter(max): for x in xrange(max): yield x
>>> t = GIdleThread(counter(123))
>>> t.start()
>>> while gen.is_alive():
... main.iteration(False)
"""
def __init__(self, generator, queue=None):
assert hasattr(generator, 'next'), 'The generator should be an iterator'
self._generator = generator
self._queue = queue
self._idle_id = 0
self._error = None
def start(self, priority=gobject.PRIORITY_LOW):
"""Start the generator. Default priority is low, so screen updates
will be allowed to happen.
"""
idle_id = gobject.idle_add(self.__generator_executer,
priority=priority)
self._idle_id = idle_id
return idle_id
def wait(self, timeout=0):
"""Wait until the corouine is finished or return after timeout seconds.
This is achieved by running the GTK+ main loop.
"""
clock = time.clock
start_time = clock()
main = gobject.main_context_default()
while self.is_alive():
main.iteration(False)
if timeout and (clock() - start_time >= timeout):
return
def interrupt(self):
"""Force the generator to stop running.
"""
if self.is_alive():
gobject.source_remove(self._idle_id)
self._idle_id = 0
def is_alive(self):
"""Returns True if the generator is still running.
"""
return self._idle_id != 0
error = property(lambda self: self._error,
doc="Return a possible exception that had occured "\
"during execution of the generator")
def __generator_executer(self):
try:
result = self._generator.next()
if self._queue:
try:
self._queue.put(result)
except QueueFull:
self.wait(0.5)
# If this doesn't work...
self._queue.put(result)
return True
except StopIteration:
self._idle_id = 0
return False
except Exception, e:
self._error = e
traceback.print_exc()
self._idle_id = 0
return False
class QueueEmpty(Exception):
"""Exception raised whenever the queue is empty and someone tries to fetch
a value.
"""
pass
class QueueFull(Exception):
"""Exception raised when the queue is full and the oldest item may not be
disposed.
"""
pass
class Queue(object):
"""A FIFO queue. If the queue has a max size, the oldest item on the
queue is dropped if that size id exceeded.
"""
def __init__(self, size=0, dispose_oldest=True):
self._queue = []
self._size = size
self._dispose_oldest = dispose_oldest
def put(self, item):
"""Put item on the queue. If the queue size is limited ...
"""
if self._size > 0 and len(self._queue) >= self._size:
if self._dispose_oldest:
self.get()
else:
raise QueueFull
self._queue.insert(0, item)
def get(self):
"""Get the oldest item off the queue.
QueueEmpty is raised if no items are left on the queue.
"""
try:
return self._queue.pop()
except IndexError:
raise QueueEmpty
if __name__ == '__main__':
def counter(max):
for i in range(max):
yield i
def shower(queue):
# Never stop reading the queue:
while True:
try:
cnt = queue.get()
print 'cnt =', cnt
except QueueEmpty:
pass
yield None
print 'Test 1: (should print range 0..22)'
queue = Queue()
c = GIdleThread(counter(23), queue)
s = GIdleThread(shower(queue))
main = gobject.main_context_default()
c.start()
s.start()
s.wait(2)
print 'Test 2: (should only print 22)'
queue = Queue(size=1)
c = GIdleThread(counter(23), queue)
s = GIdleThread(shower(queue))
main = gobject.main_context_default()
c.start(priority=gobject.PRIORITY_DEFAULT)
s.start()
s.wait(3)
| ActiveState/code | recipes/Python/327082_Pseudo_threads_generators/recipe-327082.py | Python | mit | 5,662 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
ds = distributions
class MultivariateNormalDiagPlusLowRankTest(test.TestCase):
"""Well tested because this is a simple override of the base class."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testDiagBroadcastBothBatchAndEvent(self):
# batch_shape: [3], event_shape: [2]
diag = np.array([[1., 2], [3, 4], [5, 6]])
# batch_shape: [1], event_shape: []
identity_multiplier = np.array([5.])
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllClose(
np.array([[[1. + 5, 0],
[0, 2 + 5]],
[[3 + 5, 0],
[0, 4 + 5]],
[[5 + 5, 0],
[0, 6 + 5]]]),
dist.scale.to_dense().eval())
def testDiagBroadcastBothBatchAndEvent2(self):
# This test differs from `testDiagBroadcastBothBatchAndEvent` in that it
# broadcasts batch_shape's from both the `scale_diag` and
# `scale_identity_multiplier` args.
# batch_shape: [3], event_shape: [2]
diag = np.array([[1., 2], [3, 4], [5, 6]])
# batch_shape: [3, 1], event_shape: []
identity_multiplier = np.array([[5.], [4], [3]])
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllEqual(
[3, 3, 2, 2],
dist.scale.to_dense().get_shape())
def testDiagBroadcastOnlyEvent(self):
# batch_shape: [3], event_shape: [2]
diag = np.array([[1., 2], [3, 4], [5, 6]])
# batch_shape: [3], event_shape: []
identity_multiplier = np.array([5., 4, 3])
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllClose(
np.array([[[1. + 5, 0],
[0, 2 + 5]],
[[3 + 4, 0],
[0, 4 + 4]],
[[5 + 3, 0],
[0, 6 + 3]]]), # shape: [3, 2, 2]
dist.scale.to_dense().eval())
def testDiagBroadcastMultiplierAndLoc(self):
# batch_shape: [], event_shape: [3]
loc = np.array([1., 0, -1])
# batch_shape: [3], event_shape: []
identity_multiplier = np.array([5., 4, 3])
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
loc=loc,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllClose(
np.array([[[5, 0, 0],
[0, 5, 0],
[0, 0, 5]],
[[4, 0, 0],
[0, 4, 0],
[0, 0, 4]],
[[3, 0, 0],
[0, 3, 0],
[0, 0, 3]]]),
dist.scale.to_dense().eval())
def testMean(self):
mu = [-1.0, 1.0]
diag_large = [1.0, 5.0]
v = [[2.0], [3.0]]
diag_small = [3.0]
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_diag=diag_large,
scale_perturb_factor=v,
scale_perturb_diag=diag_small,
validate_args=True)
self.assertAllEqual(mu, dist.mean().eval())
def testSample(self):
# TODO(jvdillon): This test should be the basis of a new test fixture which
# is applied to every distribution. When we make this fixture, we'll also
# separate the analytical- and sample-based tests as well as for each
# function tested. For now, we group things so we can recycle one batch of
# samples (thus saving resources).
mu = np.array([-1., 1, 0.5], dtype=np.float32)
diag_large = np.array([1., 0.5, 0.75], dtype=np.float32)
diag_small = np.array([-1.1, 1.2], dtype=np.float32)
v = np.array([[0.7, 0.8],
[0.9, 1],
[0.5, 0.6]], dtype=np.float32) # shape: [k, r] = [3, 2]
true_mean = mu
true_scale = np.diag(diag_large) + np.matmul(np.matmul(
v, np.diag(diag_small)), v.T)
true_covariance = np.matmul(true_scale, true_scale.T)
true_variance = np.diag(true_covariance)
true_stddev = np.sqrt(true_variance)
with self.cached_session() as sess:
dist = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_diag=diag_large,
scale_perturb_factor=v,
scale_perturb_diag=diag_small,
validate_args=True)
# The following distributions will test the KL divergence calculation.
mvn_identity = ds.MultivariateNormalDiag(
loc=np.array([1., 2, 0.25], dtype=np.float32),
validate_args=True)
mvn_scaled = ds.MultivariateNormalDiag(
loc=mvn_identity.loc,
scale_identity_multiplier=2.2,
validate_args=True)
mvn_diag = ds.MultivariateNormalDiag(
loc=mvn_identity.loc,
scale_diag=np.array([0.5, 1.5, 1.], dtype=np.float32),
validate_args=True)
mvn_chol = ds.MultivariateNormalTriL(
loc=np.array([1., 2, -1], dtype=np.float32),
scale_tril=np.array([[6., 0, 0],
[2, 5, 0],
[1, 3, 4]], dtype=np.float32) / 10.,
validate_args=True)
scale = dist.scale.to_dense()
n = int(30e3)
samps = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(samps, 0)
x = samps - sample_mean
sample_covariance = math_ops.matmul(x, x, transpose_a=True) / n
sample_kl_identity = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_identity.log_prob(samps), 0)
analytical_kl_identity = ds.kl_divergence(dist, mvn_identity)
sample_kl_scaled = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_scaled.log_prob(samps), 0)
analytical_kl_scaled = ds.kl_divergence(dist, mvn_scaled)
sample_kl_diag = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_diag.log_prob(samps), 0)
analytical_kl_diag = ds.kl_divergence(dist, mvn_diag)
sample_kl_chol = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_chol.log_prob(samps), 0)
analytical_kl_chol = ds.kl_divergence(dist, mvn_chol)
n = int(10e3)
baseline = ds.MultivariateNormalDiag(
loc=np.array([-1., 0.25, 1.25], dtype=np.float32),
scale_diag=np.array([1.5, 0.5, 1.], dtype=np.float32),
validate_args=True)
samps = baseline.sample(n, seed=0)
sample_kl_identity_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_identity.log_prob(samps), 0)
analytical_kl_identity_diag_baseline = ds.kl_divergence(
baseline, mvn_identity)
sample_kl_scaled_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_scaled.log_prob(samps), 0)
analytical_kl_scaled_diag_baseline = ds.kl_divergence(
baseline, mvn_scaled)
sample_kl_diag_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_diag.log_prob(samps), 0)
analytical_kl_diag_diag_baseline = ds.kl_divergence(baseline, mvn_diag)
sample_kl_chol_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_chol.log_prob(samps), 0)
analytical_kl_chol_diag_baseline = ds.kl_divergence(baseline, mvn_chol)
[
sample_mean_,
analytical_mean_,
sample_covariance_,
analytical_covariance_,
analytical_variance_,
analytical_stddev_,
scale_,
sample_kl_identity_, analytical_kl_identity_,
sample_kl_scaled_, analytical_kl_scaled_,
sample_kl_diag_, analytical_kl_diag_,
sample_kl_chol_, analytical_kl_chol_,
sample_kl_identity_diag_baseline_,
analytical_kl_identity_diag_baseline_,
sample_kl_scaled_diag_baseline_, analytical_kl_scaled_diag_baseline_,
sample_kl_diag_diag_baseline_, analytical_kl_diag_diag_baseline_,
sample_kl_chol_diag_baseline_, analytical_kl_chol_diag_baseline_,
] = sess.run([
sample_mean,
dist.mean(),
sample_covariance,
dist.covariance(),
dist.variance(),
dist.stddev(),
scale,
sample_kl_identity, analytical_kl_identity,
sample_kl_scaled, analytical_kl_scaled,
sample_kl_diag, analytical_kl_diag,
sample_kl_chol, analytical_kl_chol,
sample_kl_identity_diag_baseline,
analytical_kl_identity_diag_baseline,
sample_kl_scaled_diag_baseline, analytical_kl_scaled_diag_baseline,
sample_kl_diag_diag_baseline, analytical_kl_diag_diag_baseline,
sample_kl_chol_diag_baseline, analytical_kl_chol_diag_baseline,
])
sample_variance_ = np.diag(sample_covariance_)
sample_stddev_ = np.sqrt(sample_variance_)
logging.vlog(2, "true_mean:\n{} ".format(true_mean))
logging.vlog(2, "sample_mean:\n{}".format(sample_mean_))
logging.vlog(2, "analytical_mean:\n{}".format(analytical_mean_))
logging.vlog(2, "true_covariance:\n{}".format(true_covariance))
logging.vlog(2, "sample_covariance:\n{}".format(sample_covariance_))
logging.vlog(2, "analytical_covariance:\n{}".format(
analytical_covariance_))
logging.vlog(2, "true_variance:\n{}".format(true_variance))
logging.vlog(2, "sample_variance:\n{}".format(sample_variance_))
logging.vlog(2, "analytical_variance:\n{}".format(analytical_variance_))
logging.vlog(2, "true_stddev:\n{}".format(true_stddev))
logging.vlog(2, "sample_stddev:\n{}".format(sample_stddev_))
logging.vlog(2, "analytical_stddev:\n{}".format(analytical_stddev_))
logging.vlog(2, "true_scale:\n{}".format(true_scale))
logging.vlog(2, "scale:\n{}".format(scale_))
logging.vlog(2, "kl_identity: analytical:{} sample:{}".format(
analytical_kl_identity_, sample_kl_identity_))
logging.vlog(2, "kl_scaled: analytical:{} sample:{}".format(
analytical_kl_scaled_, sample_kl_scaled_))
logging.vlog(2, "kl_diag: analytical:{} sample:{}".format(
analytical_kl_diag_, sample_kl_diag_))
logging.vlog(2, "kl_chol: analytical:{} sample:{}".format(
analytical_kl_chol_, sample_kl_chol_))
logging.vlog(
2, "kl_identity_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_identity_diag_baseline_,
sample_kl_identity_diag_baseline_))
logging.vlog(
2, "kl_scaled_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_scaled_diag_baseline_,
sample_kl_scaled_diag_baseline_))
logging.vlog(2, "kl_diag_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_diag_diag_baseline_,
sample_kl_diag_diag_baseline_))
logging.vlog(2, "kl_chol_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_chol_diag_baseline_,
sample_kl_chol_diag_baseline_))
self.assertAllClose(true_mean, sample_mean_,
atol=0., rtol=0.02)
self.assertAllClose(true_mean, analytical_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(true_covariance, sample_covariance_,
atol=0., rtol=0.02)
self.assertAllClose(true_covariance, analytical_covariance_,
atol=0., rtol=1e-6)
self.assertAllClose(true_variance, sample_variance_,
atol=0., rtol=0.02)
self.assertAllClose(true_variance, analytical_variance_,
atol=0., rtol=1e-6)
self.assertAllClose(true_stddev, sample_stddev_,
atol=0., rtol=0.02)
self.assertAllClose(true_stddev, analytical_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(true_scale, scale_,
atol=0., rtol=1e-6)
self.assertAllClose(sample_kl_identity_, analytical_kl_identity_,
atol=0., rtol=0.02)
self.assertAllClose(sample_kl_scaled_, analytical_kl_scaled_,
atol=0., rtol=0.02)
self.assertAllClose(sample_kl_diag_, analytical_kl_diag_,
atol=0., rtol=0.02)
self.assertAllClose(sample_kl_chol_, analytical_kl_chol_,
atol=0., rtol=0.02)
self.assertAllClose(
sample_kl_identity_diag_baseline_,
analytical_kl_identity_diag_baseline_,
atol=0., rtol=0.02)
self.assertAllClose(
sample_kl_scaled_diag_baseline_,
analytical_kl_scaled_diag_baseline_,
atol=0., rtol=0.02)
self.assertAllClose(
sample_kl_diag_diag_baseline_,
analytical_kl_diag_diag_baseline_,
atol=0., rtol=0.04)
self.assertAllClose(
sample_kl_chol_diag_baseline_,
analytical_kl_chol_diag_baseline_,
atol=0., rtol=0.02)
def testImplicitLargeDiag(self):
mu = np.array([[1., 2, 3],
[11, 22, 33]]) # shape: [b, k] = [2, 3]
u = np.array([[[1., 2],
[3, 4],
[5, 6]],
[[0.5, 0.75],
[1, 0.25],
[1.5, 1.25]]]) # shape: [b, k, r] = [2, 3, 2]
m = np.array([[0.1, 0.2],
[0.4, 0.5]]) # shape: [b, r] = [2, 2]
scale = np.stack([
np.eye(3) + np.matmul(np.matmul(u[0], np.diag(m[0])),
np.transpose(u[0])),
np.eye(3) + np.matmul(np.matmul(u[1], np.diag(m[1])),
np.transpose(u[1])),
])
cov = np.stack([np.matmul(scale[0], scale[0].T),
np.matmul(scale[1], scale[1].T)])
logging.vlog(2, "expected_cov:\n{}".format(cov))
with self.cached_session():
mvn = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_perturb_factor=u,
scale_perturb_diag=m)
self.assertAllClose(cov, mvn.covariance().eval(), atol=0., rtol=1e-6)
if __name__ == "__main__":
test.main()
| kevin-coder/tensorflow-fork | tensorflow/contrib/distributions/python/kernel_tests/mvn_diag_plus_low_rank_test.py | Python | apache-2.0 | 15,463 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.