code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import sublime
import sublime_plugin
import re
import os
import datetime
TMLP_DIR = 'templates'
KEY_SYNTAX = 'syntax'
KEY_FILE_EXT = 'extension'
IS_GTE_ST3 = int(sublime.version()) >= 3000
PACKAGE_NAME = 'new-file-pro'
PACKAGES_PATH = sublime.packages_path()
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
class NewFileBase(sublime_plugin.WindowCommand):
def __init__(self, window):
super(NewFileBase, self).__init__(window)
def appendFileExtension(self, name, t):
tmp = name.split('.')
length = len(tmp)
s_ext = tmp[length - 1]
exts = {'css': 'css', 'html': 'html', 'js': 'js', 'json': 'json', 'php': 'php', 'php-class': 'php', 'php-interface': 'php', 'xml':'xml', 'python': 'python', 'ruby': 'ruby'}
try:
t_ext = exts[t]
if (s_ext == t_ext and length == 1) or s_ext != t_ext:
return name + '.' + t_ext
except KeyError:
pass
return name;
def appendPHPExtension(self, name):
t = name.split('.')
length = len(t)
ext = t[length - 1]
if ext != "php":
return name + '.php'
return name;
def get_code(self, type='text' ):
code = ''
file_name = "%s.tmpl" % type
isIOError = False
if IS_GTE_ST3:
tmpl_dir = 'Packages/' + PACKAGE_NAME + '/' + TMLP_DIR + '/'
user_tmpl_dir = 'Packages/User/' + PACKAGE_NAME + '/' + TMLP_DIR + '/'
else:
tmpl_dir = os.path.join(PACKAGES_PATH, PACKAGE_NAME, TMLP_DIR)
user_tmpl_dir = os.path.join(PACKAGES_PATH, 'User', PACKAGE_NAME, TMLP_DIR)
self.user_tmpl_path = os.path.join(user_tmpl_dir, file_name)
self.tmpl_path = os.path.join(tmpl_dir, file_name)
if IS_GTE_ST3:
try:
code = sublime.load_resource(self.user_tmpl_path)
except IOError:
try:
code = sublime.load_resource(self.tmpl_path)
except IOError:
isIOError = True
else:
if os.path.isfile(self.user_tmpl_path):
code = self.open_file(self.user_tmpl_path)
elif os.path.isfile(self.tmpl_path):
code = self.open_file(self.tmpl_path)
else:
isIOError = True
if isIOError:
sublime.message_dialog('[Warning] No such file: ' + self.tmpl_path + ' or ' + self.user_tmpl_path)
return self.format_tag(code)
def format_tag(self, code):
win = sublime.active_window()
code = code.replace('\r', '') # replace \r\n -> \n
# format
settings = self.get_settings()
format = settings.get('date_format', '%Y-%m-%d')
date = datetime.datetime.now().strftime(format)
if not IS_GTE_ST3:
code = code.decode('utf8') # for st2 && Chinese characters
code = code.replace('${date}', date)
attr = settings.get('attr', {})
for key in attr:
code = code.replace('${%s}' % key, attr.get(key, ''))
if settings.get('enable_project_variables', False) and hasattr(win, 'extract_variables'):
variables = win.extract_variables()
for key in ['project_base_name', 'project_path', 'platform']:
code = code.replace('${%s}' % key, variables.get(key, ''))
code = re.sub(r"(?<!\\)\${(?!\d)", '\${', code)
return code
def open_file(self, path, mode='r'):
fp = open(path, mode)
code = fp.read()
fp.close()
return code
def get_settings(self, type=None):
settings = sublime.load_settings(PACKAGE_NAME + '.sublime-settings')
if not type:
return settings
opts = settings.get(type, [])
return opts
|
KevinHoo/new-file-pro
|
commands/NewFileBase.py
|
Python
|
gpl-3.0
| 3,250
|
#!/usr/bin/env python
'''
create ardupilot terrain database files
'''
from MAVProxy.modules.mavproxy_map import srtm
import math, struct, os, sys
import crc16, time, struct
# MAVLink sends 4x4 grids
TERRAIN_GRID_MAVLINK_SIZE = 4
# a 2k grid_block on disk contains 8x7 of the mavlink grids. Each
# grid block overlaps by one with its neighbour. This ensures that
# the altitude at any point can be calculated from a single grid
# block
TERRAIN_GRID_BLOCK_MUL_X = 7
TERRAIN_GRID_BLOCK_MUL_Y = 8
# this is the spacing between 32x28 grid blocks, in grid_spacing units
TERRAIN_GRID_BLOCK_SPACING_X = ((TERRAIN_GRID_BLOCK_MUL_X-1)*TERRAIN_GRID_MAVLINK_SIZE)
TERRAIN_GRID_BLOCK_SPACING_Y = ((TERRAIN_GRID_BLOCK_MUL_Y-1)*TERRAIN_GRID_MAVLINK_SIZE)
# giving a total grid size of a disk grid_block of 32x28
TERRAIN_GRID_BLOCK_SIZE_X = (TERRAIN_GRID_MAVLINK_SIZE*TERRAIN_GRID_BLOCK_MUL_X)
TERRAIN_GRID_BLOCK_SIZE_Y = (TERRAIN_GRID_MAVLINK_SIZE*TERRAIN_GRID_BLOCK_MUL_Y)
# format of grid on disk
TERRAIN_GRID_FORMAT_VERSION = 1
IO_BLOCK_SIZE = 2048
GRID_SPACING = 100
def to_float32(f):
'''emulate single precision float'''
return struct.unpack('f', struct.pack('f',f))[0]
LOCATION_SCALING_FACTOR = to_float32(0.011131884502145034)
LOCATION_SCALING_FACTOR_INV = to_float32(89.83204953368922)
def longitude_scale(lat):
'''get longitude scale factor'''
scale = to_float32(math.cos(to_float32(math.radians(lat))))
return max(scale, 0.01)
def get_distance_NE_e7(lat1, lon1, lat2, lon2):
'''get distance tuple between two positions in 1e7 format'''
return ((lat2 - lat1) * LOCATION_SCALING_FACTOR, (lon2 - lon1) * LOCATION_SCALING_FACTOR * longitude_scale(lat1*1.0e-7))
def add_offset(lat_e7, lon_e7, ofs_north, ofs_east):
'''add offset in meters to a position'''
dlat = int(float(ofs_north) * LOCATION_SCALING_FACTOR_INV)
dlng = int((float(ofs_east) * LOCATION_SCALING_FACTOR_INV) / longitude_scale(lat_e7*1.0e-7))
return (int(lat_e7+dlat), int(lon_e7+dlng))
def east_blocks(lat_e7, lon_e7):
'''work out how many blocks per stride on disk'''
lat2_e7 = lat_e7
lon2_e7 = lon_e7 + 10*1000*1000
# shift another two blocks east to ensure room is available
lat2_e7, lon2_e7 = add_offset(lat2_e7, lon2_e7, 0, 2*GRID_SPACING*TERRAIN_GRID_BLOCK_SIZE_Y)
offset = get_distance_NE_e7(lat_e7, lon_e7, lat2_e7, lon2_e7)
return int(offset[1] / (GRID_SPACING*TERRAIN_GRID_BLOCK_SPACING_Y))
def pos_from_file_offset(lat_degrees, lon_degrees, file_offset):
'''return a lat/lon in 1e7 format given a file offset'''
ref_lat = int(lat_degrees*10*1000*1000)
ref_lon = int(lon_degrees*10*1000*1000)
stride = east_blocks(ref_lat, ref_lon)
blocks = file_offset // IO_BLOCK_SIZE
grid_idx_x = blocks // stride
grid_idx_y = blocks % stride
idx_x = grid_idx_x * TERRAIN_GRID_BLOCK_SPACING_X
idx_y = grid_idx_y * TERRAIN_GRID_BLOCK_SPACING_Y
offset = (idx_x * GRID_SPACING, idx_y * GRID_SPACING)
(lat_e7, lon_e7) = add_offset(ref_lat, ref_lon, offset[0], offset[1])
offset = get_distance_NE_e7(ref_lat, ref_lon, lat_e7, lon_e7)
grid_idx_x = int(idx_x / TERRAIN_GRID_BLOCK_SPACING_X)
grid_idx_y = int(idx_y / TERRAIN_GRID_BLOCK_SPACING_Y)
(lat_e7, lon_e7) = add_offset(ref_lat, ref_lon,
grid_idx_x * TERRAIN_GRID_BLOCK_SPACING_X * float(GRID_SPACING),
grid_idx_y * TERRAIN_GRID_BLOCK_SPACING_Y * float(GRID_SPACING))
return (lat_e7, lon_e7)
class GridBlock(object):
def __init__(self, lat_int, lon_int, lat, lon):
'''
a grid block is a structure in a local file containing height
information. Each grid block is 2048 bytes in size, to keep file IO to
block oriented SD cards efficient
'''
# crc of whole block, taken with crc=0
self.crc = 0
# format version number
self.version = TERRAIN_GRID_FORMAT_VERSION
# grid spacing in meters
self.spacing = GRID_SPACING
# heights in meters over a 32*28 grid
self.height = []
for x in range(TERRAIN_GRID_BLOCK_SIZE_X):
self.height.append([0]*TERRAIN_GRID_BLOCK_SIZE_Y)
# bitmap of 4x4 grids filled in from GCS (56 bits are used)
self.bitmap = (1<<56)-1
lat_e7 = int(lat * 1.0e7)
lon_e7 = int(lon * 1.0e7)
# grids start on integer degrees. This makes storing terrain data on
# the SD card a bit easier. Note that this relies on the python floor
# behaviour with integer division
self.lat_degrees = lat_int
self.lon_degrees = lon_int
# create reference position for this rounded degree position
ref_lat = self.lat_degrees*10*1000*1000
ref_lon = self.lon_degrees*10*1000*1000
# find offset from reference
offset = get_distance_NE_e7(ref_lat, ref_lon, lat_e7, lon_e7)
offset = (round(offset[0]), round(offset[1]))
# get indices in terms of grid_spacing elements
idx_x = int(offset[0] / GRID_SPACING)
idx_y = int(offset[1] / GRID_SPACING)
# find indexes into 32*28 grids for this degree reference. Note
# the use of TERRAIN_GRID_BLOCK_SPACING_{X,Y} which gives a one square
# overlap between grids
self.grid_idx_x = idx_x // TERRAIN_GRID_BLOCK_SPACING_X
self.grid_idx_y = idx_y // TERRAIN_GRID_BLOCK_SPACING_Y
# calculate lat/lon of SW corner of 32*28 grid_block
(ref_lat, ref_lon) = add_offset(ref_lat, ref_lon,
self.grid_idx_x * TERRAIN_GRID_BLOCK_SPACING_X * float(GRID_SPACING),
self.grid_idx_y * TERRAIN_GRID_BLOCK_SPACING_Y * float(GRID_SPACING))
self.lat = ref_lat
self.lon = ref_lon
def fill(self, gx, gy, altitude):
'''fill a square'''
self.height[gx][gy] = int(altitude)
def blocknum(self):
'''find IO block number'''
stride = east_blocks(self.lat_degrees*1e7, self.lon_degrees*1e7)
return stride * self.grid_idx_x + self.grid_idx_y
class DataFile(object):
def __init__(self, lat, lon):
if lat < 0:
NS = 'S'
else:
NS = 'N'
if lon < 0:
EW = 'W'
else:
EW = 'E'
name = "terrain/%c%02u%c%03u.DAT" % (NS, min(abs(int(lat)), 99),
EW, min(abs(int(lon)), 999))
try:
os.mkdir("terrain")
except Exception:
pass
if not os.path.exists(name):
self.fh = open(name, 'w+b')
else:
self.fh = open(name, 'r+b')
def seek_offset(self, block):
'''seek to right offset'''
# work out how many longitude blocks there are at this latitude
file_offset = block.blocknum() * IO_BLOCK_SIZE
self.fh.seek(file_offset)
def pack(self, block):
'''pack into a block'''
buf = bytes()
buf += struct.pack("<QiiHHH", block.bitmap, block.lat, block.lon, block.crc, block.version, block.spacing)
for gx in range(TERRAIN_GRID_BLOCK_SIZE_X):
buf += struct.pack("<%uh" % TERRAIN_GRID_BLOCK_SIZE_Y, *block.height[gx])
buf += struct.pack("<HHhb", block.grid_idx_x, block.grid_idx_y, block.lon_degrees, block.lat_degrees)
return buf
def write(self, block):
'''write a grid block'''
self.seek_offset(block)
block.crc = 0
buf = self.pack(block)
block.crc = crc16.crc16xmodem(buf)
buf = self.pack(block)
self.fh.write(buf)
def check_filled(self, block):
'''read a grid block and check if already filled'''
self.seek_offset(block)
buf = self.fh.read(IO_BLOCK_SIZE)
if len(buf) != IO_BLOCK_SIZE:
return False
(bitmap, lat, lon, crc, version, spacing) = struct.unpack("<QiiHHH", buf[:22])
if (version != TERRAIN_GRID_FORMAT_VERSION or
abs(lat - block.lat)>2 or
abs(lon - block.lon)>2 or
spacing != GRID_SPACING or
bitmap != (1<<56)-1):
return False
buf = buf[:16] + struct.pack("<H", 0) + buf[18:]
crc2 = crc16.crc16xmodem(buf[:1821])
if crc2 != crc:
return False
return True
def create_degree(lat, lon):
'''create data file for one degree lat/lon'''
lat_int = int(math.floor(lat))
lon_int = int(math.floor((lon)))
tiles = {}
dfile = DataFile(lat_int, lon_int)
print("Creating for %d %d" % (lat_int, lon_int))
total_blocks = east_blocks(lat_int*1e7, lon_int*1e7) * TERRAIN_GRID_BLOCK_SIZE_Y
for blocknum in range(total_blocks):
(lat_e7, lon_e7) = pos_from_file_offset(lat_int, lon_int, blocknum * IO_BLOCK_SIZE)
lat = lat_e7 * 1.0e-7
lon = lon_e7 * 1.0e-7
grid = GridBlock(lat_int, lon_int, lat, lon)
if grid.blocknum() != blocknum:
continue
if not args.force and dfile.check_filled(grid):
continue
for gx in range(TERRAIN_GRID_BLOCK_SIZE_X):
for gy in range(TERRAIN_GRID_BLOCK_SIZE_Y):
lat_e7, lon_e7 = add_offset(lat*1.0e7, lon*1.0e7, gx*GRID_SPACING, gy*GRID_SPACING)
lat2_int = int(math.floor(lat_e7*1.0e-7))
lon2_int = int(math.floor(lon_e7*1.0e-7))
tile_idx = (lat2_int, lon2_int)
while not tile_idx in tiles:
tile = downloader.getTile(lat2_int, lon2_int)
waited = False
if tile == 0:
print("waiting on download of %d,%d" % (lat2_int, lon2_int))
time.sleep(0.3)
waited = True
continue
if waited:
print("downloaded %d,%d" % (lat2_int, lon2_int))
tiles[tile_idx] = tile
altitude = tiles[tile_idx].getAltitudeFromLatLon(lat_e7*1.0e-7, lon_e7*1.0e-7)
grid.fill(gx, gy, altitude)
dfile.write(grid)
from argparse import ArgumentParser
parser = ArgumentParser(description='terrain data creator')
parser.add_argument("lat", type=float, default=-35.363261)
parser.add_argument("lon", type=float, default=149.165230)
parser.add_argument("--force", action='store_true', help="overwrite existing full blocks")
parser.add_argument("--radius", type=int, default=100, help="radius in km")
parser.add_argument("--debug", action='store_true', default=False)
parser.add_argument("--spacing", type=int, default=100, help="grid spacing in meters")
args = parser.parse_args()
downloader = srtm.SRTMDownloader(debug=args.debug)
downloader.loadFileList()
GRID_SPACING = args.spacing
done = set()
for dx in range(-args.radius, args.radius):
for dy in range(-args.radius, args.radius):
(lat2,lon2) = add_offset(args.lat*1e7, args.lon*1e7, dx*1000.0, dy*1000.0)
lat_int = int(round(lat2 * 1.0e-7))
lon_int = int(round(lon2 * 1.0e-7))
tag = (lat_int, lon_int)
if tag in done:
continue
done.add(tag)
create_degree(lat_int, lon_int)
create_degree(args.lat, args.lon)
|
matternet/ardupilot
|
libraries/AP_Terrain/tools/create_terrain.py
|
Python
|
gpl-3.0
| 11,310
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
DGA: Implementation of the Distributed Greedy Algorithm for channel assignment
The DMP class handles the control flow after the initialization of DGA.
Authors: Simon Seif <seif.simon@googlemail.com>,
Felix Juraschek <fjuraschek@gmail.com>
Copyright 2008-2013, Freie Universitaet Berlin (FUB). All rights reserved.
These sources were developed at the Freie Universitaet Berlin,
Computer Systems and Telematics / Distributed, embedded Systems (DES) group
(http://cst.mi.fu-berlin.de, http://www.des-testbed.net)
-------------------------------------------------------------------------------
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see http://www.gnu.org/licenses/ .
--------------------------------------------------------------------------------
For further information and questions please use the web site
http://www.des-testbed.net
"""
import dga
from dga import hostname
from twisted.internet import protocol, reactor
from twisted.protocols import basic
from logger import Logger
import random
import socket
import sys
import re
import os
import subprocess
# insert path to des_chan framework
p = subprocess.Popen('logname', stdout=subprocess.PIPE,stderr=subprocess.PIPE)
logname, errors = p.communicate()
sys.path.insert(0, '/home/' + logname.strip())
from des_chan import util
### CONSTANTS
# LOGGING
DEBUG = True
log = Logger(DEBUG)
# STATES
STATE_MOVING = 1
STATE_STALLED = 0
# MESSAGE HEADERS
MESSAGE_REQUEST = "REQUEST"
MESSAGE_ABORT = "ABORT"
MESSAGE_UPDATE = "UPDATE"
MESSAGE_ACCEPT = "ACCEPT"
MESSAGE_REJECT = "REJECT"
MESSAGE_QUERY = "QUERY"
# OTHER MESSAGE RELATED STUFF
DELIMITER = ";"
# TIMEOUT
STALLED_TIMEOUT = 15 # time between retrieving query and getting either abort or update
MOVING_TIMEOUT = 15 # timeout between sending request and getting accept/reject
GRACE_TIMEOUT = 5*60 # time between last "action" (except query) and shutdown
RETRY_DELAY = 2 # delay between two connection retries
MAX_RETRIES = 10 # max number of retries for each conncection
QUERY_TIMEOUT = 40 # max time allowed to query
CONNECTION_TIMEOUT = 5 # general connection timeout for any outgoing TCP connection
# time the protocol shall wait before issueing the own request
STALL_MOVE_TIMEOUT_LOWER_BOUND = 0
STALL_MOVE_TIMEOUT_UPPER_BOUND = 4
LAMBDA = 0.2 # lambda for the exponential distribution
### MESSAGE PROCESSING
def createAssignmentString(assignment):
"""Creates a textual representation of a list.
"""
return repr(assignment)[1:-1].replace(" ","")
def parseAssignmentString(line):
"""Parses a textual representation of a list back to an integer array.
"""
try:
return map(lambda x:int(x),line.split(","))
except:
log.error("cannot parse assignment string:"+line)
return list()
def parseRequest(line):
"""Parses a request message.
Returns a quadruple containing old_channel,new_channel,reduction,assignment.
"""
tokens = line.split(DELIMITER)
old_channel = int(tokens[1])
new_channel = int(tokens[2])
reduction = int(tokens[3])
assignment = parseAssignmentString(tokens[4])
return old_channel, new_channel, reduction, assignment
def parseQuery(line):
"""Parses a query message. The assignment piggy bakced in the message is returned.
"""
return parseReject(line) # both message formats are the same
def parseReject(line):
"""Parses a reject message. The assignment piggy backed in the message is returned.
"""
tokens = line.split(DELIMITER)
return parseAssignmentString(tokens[1])
def createRequest(request, assignment):
"""Creates a request message.
Format: <REQUEST>;old_channel;new_channel;reduction;c1,...,cn
"""
line = MESSAGE_REQUEST
line += DELIMITER
line += repr(request.channel)
line += DELIMITER
line += repr(request.new_channel)
line += DELIMITER
line += repr(request.reduction)
line += DELIMITER
line += createAssignmentString(assignment)
return line
def createQuery(assignment):
"""Creates a query message piggy backing an assignment.
Format: <Query>;c1,...,cn
"""
assignmentString = createAssignmentString(assignment)
return MESSAGE_QUERY + DELIMITER + assignmentString
def createReject(assignment):
"""Creates a reject message piggy backing an assignment.
Format: <Reject>;c1,...,cn
"""
assignmentString = createAssignmentString(assignment)
return MESSAGE_REJECT + DELIMITER + assignmentString
def createAbort():
"""Creates an aboort message.
"""
return MESSAGE_ABORT
def createAccept():
"""Creates an accept message.
"""
return MESSAGE_ACCEPT
def createUpdate():
"""Creates an update message.
"""
return MESSAGE_UPDATE
### STALLED STATE
class StalledFactory(protocol.Factory):
"""Factory for the Stalled Protocol.
Handels incoming foreign requests.
"""
def __init__(self, dmp):
self.dmp = dmp
self.protocol = Stalled
def notifyRequest(self, protocol, request, assignment):
"""Callback for the protocol instances.
Will drop the latter of two concurrent requests.
Determines if the request can be approved or not.
"""
self.dmp.cancelGraceTimeOut()
if self.dmp.state != STATE_STALLED:
log.warn("Incoming request from " + protocol.node + ", but I am moving.")
# remember the request so that the abort message will not cause confusion
self.dmp.foreignRequests[protocol.node] = request
protocol.sendReject()
else:
self.dmp.foreignRequests[protocol.node] = request
if self.dmp.dga.isAssignmentUpToDate(assignment): # it's valid
# possible that we've lost a node...but now got req from it, so put it again in our interference set
if protocol.node not in self.dmp.dga.interferenceSet:
log.error("Got request from node that is not in interference set..."+repr(protocol.node))
self.dmp.dga.interferenceSet[protocol.node]=list()
reactor.callLater(1,self.dmp.move) # in any case (apply/abort) something changed -> check for new possible assignment
if self.dmp.request is not None and self.dmp.request.conflicts(request): # we've got something on our own
if self.dmp.request.wins(request): # and i win
log.debug("Incoming request from node " + protocol.node + " is losing.")
protocol.sendReject()
else: # but i loose
log.debug("Incoming request from node " + protocol.node + " is winning.")
protocol.sendAccept()
self.dmp.request.invalidate()
else: # not conflicting and up to date
log.debug("Incoming request from node " + protocol.node + " is not conflicting.")
protocol.sendAccept()
else: # out of date
log.warn("Incoming request from node " + protocol.node + " is out of date.")
protocol.sendReject()
def notifyUpdate(self,client):
"""Callback for the protocol instances.
Will commit the foreign request.
"""
request = self.dmp.foreignRequests.get(client.node)
if request is not None:
log.info("Appyling update from " + client.node)
self.dmp.dga.updateInterferenceSet(request)
del self.dmp.foreignRequests[client.node]
if self.dmp.request is not None:
self.dmp.request.invalidate()
if len(self.dmp.foreignRequests)==0:
reactor.callLater(0,self.dmp.stall)
else:
log.error("Got Update for unknown request! From:"+(client.node))
def notifyAbort(self,client):
"""Callback for the protocol instances.
Will discard the foreign request.
"""
request = self.dmp.foreignRequests.get(client.node)
if request is not None:
log.info("Aborting update from " + client.node)
del self.dmp.foreignRequests[client.node]
if self.dmp.request is not None:
self.dmp.request.validate()
if len(self.dmp.foreignRequests)==0:
reactor.callLater(0,self.dmp.stall)
else:
log.error("Got Abort for unknown request! From:"+(client.node))
class Stalled(basic.LineReceiver):
"""Protocol that is enabled during Stalled State.
Events that do not need further interaction with this protocol cause a disconnect automatically.
"""
def __init__(self):
self.node = None
self.timeOutActive = False
reactor.callLater(STALLED_TIMEOUT, self.onTimeOut)
def onTimeOut(self):
"""Callback for timeouts.
"""
if self.timeOutActive:
if self.node in self.factory.dmp.foreignRequests:
log.warn("Foreign Request " + self.node + " timed out.")
self.factory.notifyAbort(self)
self.loseConnection()
def lineReceived(self, line):
"""Handle incoming message.
"""
self.node = socket.gethostbyaddr(self.transport.getPeer().host)[0]
self.node = re.match("(.*)(-ch)",self.node).group(1)
if line.startswith(MESSAGE_REQUEST):
self.timeOutActive = True
old_channel, new_channel, reduction, assignment = parseRequest(line)
request = dga.Request(self.factory.dmp.dga, self.node, None, old_channel, new_channel, reduction)
self.factory.notifyRequest(self, request, assignment)
elif line.startswith(MESSAGE_ABORT):
self.timeOutActive = False
self.factory.notifyAbort(self)
self.loseConnection()
elif line.startswith(MESSAGE_UPDATE):
self.timeOutActive = False
self.factory.notifyUpdate(self)
self.loseConnection()
elif line.startswith(MESSAGE_QUERY):
assignment = parseQuery(line)
self.factory.dmp.dga.interferenceSet[self.node] = assignment
asnwer = createQuery(self.factory.dmp.dga.assignment.values())
self.sendLine(asnwer)
else: # only allow above messages
log.error("Received unexpected message from "+repr(self.node)+":"+line)
self.loseConnection()
def sendReject(self):
"""Sends a reject message to the destination.
"""
log.message()
msg = createReject(self.factory.dmp.dga.assignment.values())
self.sendLine(msg)
def sendAccept(self):
"""Sends an accept message.
"""
log.message()
msg = createAccept()
self.sendLine(msg)
def loseConnection(self):
""" Shutdown connection.
"""
self.transport.loseConnection()
### MOVING STATE
class MovingFactory(protocol.ClientFactory):
"""Factory for the Moving Protocol.
Keeps track of approval/disproval of an issued request.
"""
def __init__(self, dmp, request):
self.dmp = dmp
self.issued_requests = set() # list of issued_requests (protocol objects)
self.answerCount = 0
self.request = request
self.protocol = Moving
self.react_to_connection_failed = True
self.gotRejected = False
self.failingNodes=dict()
def clientConnectionFailed(self, connector, reason):
"""Called when a connection has failed to connect.
Will remove the unresponsive node from the interference set and abort the issued request.
"""
if self.react_to_connection_failed:
node = util.resolve_node_name(connector.getDestination().host)
if node in self.failingNodes:
self.failingNodes[node] = self.failingNodes[node] + 1
if self.failingNodes[node] > MAX_RETRIES: # last chance over
log.error("[MOVING] Removing " + node+" from Interference set")
try:
self.dmp.dga.removeNode(node)
except:
log.error("moving:clientConnectionFailed: Wanted to remove "+node+" from interference set, but not found")
print self.dmp.foreignRequests.items()
log.interferenceSet(self.dmp.dga.interferenceSet)
self.answerCount += 1 # fugly hack
self.notifyReject()
return
else:
self.failingNodes[node] = 1
reactor.callLater(RETRY_DELAY,connector.connect)
def clientConnectionLost(self, connector, reason):
"""Called when an established connection is lost.
This is intended behaviour if the remote node calls transport.loseConnection()
"""
pass
def notifyReject(self):
"""Callback for protocol.
Will abort the current request.
"""
self.gotRejected = True
self.answerCount = self.answerCount + 1
self.react_to_connection_failed = False
if self.answerCount >= len(self.issued_requests):
self.abort()
def notifyAccept(self):
"""Callback for protocol.
"""
self.answerCount = self.answerCount + 1
if self.answerCount >= len(self.issued_requests):
self.react_to_connection_failed = False
if self.gotRejected: # this is necessary due to late arrival of accept messages (after a reject message).
self.abort()
else:
self.update()
def update(self):
"""Sends update messages to all nodes in the interference set.
Causes an immediate state transistion to STALLED.
"""
for request in self.issued_requests:
request.sendUpdate()
self.issued_requests = set()
self.request.commit()
self.request.invalidate() # otherwise we are stuck
log.debug("Request got accepted =)")
reactor.callLater(0, self.dmp.stall)
def abort(self):
"""Sends abort messages to all nodes in the interference set.
Causses an immediuate state transistion to STALLED.
"""
for request in self.issued_requests:
request.sendAbort()
self.issued_requests = set()
self.request.invalidate() #otherwise this request will be retried over and over again
log.debug("Own request got rejected =(")
reactor.callLater(0, self.dmp.stall)
def notifyTimedOut(self, node):
"""Callback for the protocols.
A timeout is treated like a reject.
"""
log.warn("[MOVING] Removing " + node + " from interference set due to timeout.")
try:
self.dmp.dga.removeNode(node)
except:
log.error("moving::notifytimedout:: cannot remove"+node+" from interference set")
print self.dmp.foreignRequests.items()
self.notifyReject()
class Moving(basic.LineReceiver):
"""Protocol that is enabled during Moving State.
Events that do not need further interaction with this protocol cause a disconnect automatically.
"""
def __init__(self):
self.node = None
self.timeOutActive = True
def connectionMade(self):
"""Callback for the reactor. Sends the line
"""
reactor.callLater(MOVING_TIMEOUT, self.timedOut)
self.factory.issued_requests.add(self)
self.node = socket.gethostbyaddr(self.transport.getPeer().host)[0]
self.node = re.match("(.*)(-ch)",self.node).group(1)
self.sendRequest(self.factory.request)
def timedOut(self):
"""Callback for timeout.
"""
if self.timeOutActive:
self.factory.notifyTimedOut(self.node)
def lineReceived(self, line):
"""Callback for the reactor. Will be called, when a complete line (response to our initial message) is received.
"""
self.timeOutActive = False
if line.startswith(MESSAGE_ACCEPT):
self.factory.notifyAccept()
elif line.startswith(MESSAGE_REJECT):
assignment = parseReject(line)
self.factory.dmp.dga.interferenceSet[self.node] = assignment # in case due to bad info -> update the rejecter's assignment
self.factory.notifyReject()
else: # only allow above two messages right now
log.error("Got unexpected line from node: " + self.node + " " + line)
self.loseConnection()
def sendRequest(self, request):
"""Sends a request message.
"""
# node can be lost in meantime, so requery for assignment
log.message()
try:
assignment = self.factory.dmp.dga.interferenceSet[self.node]
except:
assignment = [0,0]
msg = createRequest(request, assignment)
self.sendLine(msg)
def sendUpdate(self):
"""Sends an update message.
"""
log.message()
msg = createUpdate()
self.sendLine(msg)
def sendAbort(self):
"""Sends an abort message.
"""
log.message()
msg = createAbort()
self.sendLine(msg)
def loseConnection(self):
self.timeOutActive = False
self.transport.loseConnection()
### QUERYING STATE
class QueryingFactory(protocol.ClientFactory):
"""Factory for the Querying Protocol.
Keeps track of unresponsive nodes and creating a usable interference set.
"""
def __init__(self, dmp):
self.protocol = Querying
self.dmp = dmp
self.failingNodes = dict()
reactor.callLater(QUERY_TIMEOUT,self.barrierStallCallback)
def clientConnectionFailed(self, connector, reason):
"""Called when a connection has failed to connect.
If called more than QUERY_MAX_RETRIES from the same node, the node is considered
down and will be removed from the interference set.
Otherwise the connection will be retried in QUERYING_RETRY_TIMEOUT.
"""
node = connector.getDestination().host
node = util.resolve_node_name(node)
if node in self.failingNodes:
self.failingNodes[node] = self.failingNodes[node] + 1
if self.failingNodes[node] > MAX_RETRIES:
log.error("Cannot connect to " + repr(node) + ". Removing from interference set!")
try:
self.dmp.dga.removeNode(node)
except:
log.error("Cannot remove "+repr(node)+" from interference set")
log.interferenceSet(self.dmp.dga.interferenceSet)
return
else:
self.failingNodes[node] = 1
reactor.callLater(RETRY_DELAY,connector.connect)
def clientConnectionLost(self, connector, reason):
"""Called when an established connection is lost.
Expected behaviour if remote host closes transport.
"""
pass
def onTimeout(self, protocol):
"""Called when a connection has been established but timed out.
If called more than QUERY_MAX_RETRIES from the same node, the node is considered
down and will be removed from the interference set.
"""
if protocol.node in self.failingNodes:
self.failingNodes[protocol.node] = self.failingNodes[protocol.node] + 1
if self.failingNodes[protocol.node] > MAX_RETRIES:
log.error("Connection to " + repr(protocol.node) + "timed out.")
try:
self.dmp.dga.removeNode(protocol.node)
except:
log.error("querying timeout cannot remove "+repr(protocol.node)+" from interferenceset")
log.interferenceSet(self.dmp.dga.interferenceSet)
return
else:
self.failingNodes[protocol.node] = 1
protocol.transport.loseConnection()
reactor.connectTCP(protocol.node,self.dmp.port, self,CONNECTION_TIMEOUT)
def barrierStallCallback(self):
"""Will be called after QUERY_TIMEOUT.
Cleans up incomplete information in the interference set.
Causes an immedieate transistion to STALLED.
"""
self.dmp.dga.cleanInterferenceSet()
log.info("Interferenceset is ready.")
log.interferenceSet(self.dmp.dga.interferenceSet)
reactor.callLater(0,self.dmp.stall)
class Querying(basic.LineReceiver):
def __init__(self):
self.node = None
self.timeOutActive = True
def connectionMade(self):
"""Callback for the reactor.
Sends the query message.
"""
reactor.callLater(QUERY_TIMEOUT, self.timedOut)
self.node = socket.gethostbyaddr(self.transport.getPeer().host)[0]
self.node= re.match("(.*)(-ch)",self.node).group(1)
msg = createQuery(self.factory.dmp.dga.assignment.values())
self.sendLine(msg)
def lineReceived(self, line):
"""Callback for the reactor. Parses incoming messages (replies to our initial message).
"""
if line.startswith(MESSAGE_QUERY):
assignment = parseQuery(line)
self.factory.dmp.dga.interferenceSet[self.node] = assignment
else:
log.error("Got unexpected line from " + repr(self.node) + " " + line)
self.timeOutActive = False
self.transport.loseConnection()
def timedOut(self):
"""Callback for timeouts.
"""
if self.timeOutActive:
self.factory.onTimeout(self)
self.transport.loseConnection()
### MAIN OBJECT
class DMP:
def __init__(self, dga, port):
self.dga = dga
self.port = port
self.request = None
self.foreignRequests = dict() # {node:request}
self.graceTimeoutCancelled = False # prevents premature shutdown
self.moveCalled = False # this prevents "move" being called several times subsequently
self.finishCalled=False
self.state = STATE_STALLED
def start(self):
"""Starts the protocol.
This will give control to the reactor until the algorithm has terminated.
"""
factory = StalledFactory(self)
reactor.listenTCP(self.port, factory,50,self.dga.hostname+"-ch"+repr(self.dga.default_channel))
reactor.callLater(0, self.query)
def query(self):
"""Asks neighbours for their assignment.
"""
log.info("Querying neighbours.")
factory = QueryingFactory(self)
for node, assignment in self.dga.interferenceSet.iteritems():
if len(assignment) == 0:
log.debug("Querying "+node)
reactor.connectTCP(util.get_node_ip(node, self.dga.default_channel), self.port, factory,CONNECTION_TIMEOUT)
# if no neighbours found, prevent deadlock
if len(self.dga.interferenceSet) == 0:
reactor.callLater(QUERY_TIMEOUT,self.stall)
def move(self):
"""Transfers the current state into moving state.
"""
if len(self.foreignRequests)==0:
self.moveCalled=False
if self.request is None or not self.request.valid:
if self.graceTimeoutCancelled: # apparently grace timeout has been cancelled, go back to stalling
self.state = STATE_STALLED
reactor.callLater(0, self.stall)
else: # no pending foreign request and our's is valid.
log.info("MOVING"+repr(self.request))
if len(self.dga.interferenceSet) ==0:
self.dga.finish()
self.state=STATE_MOVING
self.sendRequests()
else: # still a pending foreign request, retry later
reactor.callLater(1, self.move)
def sendRequests(self):
"""Send the request to all neighbors.
"""
log.info("Requesting neighbours")
factory = MovingFactory(self, self.request)
print self.dga.interferenceSet.keys()
for node in self.dga.interferenceSet.iterkeys():
reactor.connectTCP(util.get_node_ip(node, self.dga.default_channel), self.port, factory, CONNECTION_TIMEOUT)
if len(self.dga.interferenceSet) == 0:
reactor.callLater(GRACE_TIMEOUT,self.finish)
def finish(self):
"""Begin shutdown.
"""
self.finishCalled=False
if not self.graceTimeoutCancelled:
self.dga.finish()
def stall(self):
"""Transfers the current state into stalled state.
"""
log.info("STALLED")
self.state = STATE_STALLED
if self.request is None or not self.request.valid: # so we dont overwrite our previously calculated (and still valid) request
self.createRequest()
if self.request.valid:
self.graceTimeoutCancelled = True
timeout = random.expovariate(LAMBDA)%STALL_MOVE_TIMEOUT_UPPER_BOUND
if not self.moveCalled:
self.moveCalled = True
reactor.callLater(timeout, self.move)
else:
# cant find any better assignment, give grace time to other nodes
log.debug("Giving grace time...")
self.graceTimeoutCancelled = False
if not self.finishCalled:
self.finishCalled=True
reactor.callLater(GRACE_TIMEOUT, self.finish)
def createRequest(self):
"""Creates a new request.
"""
log.debug("Creating new request...")
self.request = self.dga.findNewAssignment()
def cancelGraceTimeOut(self):
"""Cancels the ticking grace time out.
"""
self.graceTimeoutCancelled = True
|
des-testbed/des_chan_algorithms
|
dga/dmp.py
|
Python
|
gpl-3.0
| 27,779
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import markupfield.fields
class Migration(migrations.Migration):
dependencies = [
('cal', '0005_attachment_comment'),
]
operations = [
migrations.AddField(
model_name='task',
name='_body_rendered',
field=models.TextField(default='', editable=False),
preserve_default=False,
),
migrations.AddField(
model_name='task',
name='body',
field=markupfield.fields.MarkupField(verbose_name='Body', default='', rendered_field=True),
preserve_default=False,
),
migrations.AddField(
model_name='task',
name='body_markup_type',
field=models.CharField(default='markdown', choices=[('', '--'), ('markdown', 'markdown')], max_length=30),
),
]
|
csebastian2/study
|
cal/migrations/0006_auto_20151211_0747.py
|
Python
|
gpl-3.0
| 940
|
#!/usr/bin/env python
#
# Calculate the change in the betweenness centrality of each residue over the
# course of an MD simulation
#
# Script distributed under GNU GPL 3.0
#
# Author: David Brown
# Date: 17-11-2016
import argparse, calc_delta
from lib.cli import CLI
from lib.utils import Logger
def main(args):
args.matrix_type = "BC"
calc_delta.main(args)
log = Logger()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--reference", help="The reference BC matrix (.dat)")
parser.add_argument("--alternatives", help="The alternative BC matrices (.dat)", nargs="*")
parser.add_argument("--normalize", help="Normalizes the values", action='store_true', default=False)
parser.add_argument('--normalization-mode', help="Method used to normalize - default: (Delta BC/(BC+1))", default=None)
parser.add_argument("--generate-plots", help="Plot results - without setting this flag, no graph will be generated", action='store_true', default=False)
CLI(parser, main, log)
|
RUBi-ZA/MD-TASK
|
calc_delta_BC.py
|
Python
|
gpl-3.0
| 1,043
|
#!/usr/bin/env python3
# Exits with exit code 0 (i.e., allows sleep) if all load averages
# (1, 5, 15 minutes) are below ``MAX_IDLE_LOAD``.
from os import getloadavg
MAX_IDLE_LOAD = .09
def check_load(time_span, load):
if load > MAX_IDLE_LOAD:
print(
" Won't sleep because %i minute load average" % time_span,
"of %.2f is above threshold of %.2f." % (load, MAX_IDLE_LOAD)
)
exit(1)
loads = getloadavg()
check_load(1, loads[0])
check_load(5, loads[1])
check_load(15, loads[2])
|
lpirl/autosuspend
|
autosuspend.pre/200-ensure-low-loadavg.py
|
Python
|
gpl-3.0
| 507
|
#!/usr/bin/env python
# -*- coding: utf-8 *-*
import pygame
from const import *
class MultiSprite(object):
def __init__(self, path, res_x, res_y=None, offX=0, offY=0, gX=0, gY=0):
"""path = file path of the Multi sprite.
res_x and res_y are the X, Y size of each sub sprite.
offX and offY can specify an internal offset which are applied inside of a field (used for char sets).
gX and gY specify global offsets (used for char sets)."""
self.sprite = pygame.image.load(path)
self.res_x = res_x
self.res_y = res_y if res_y else res_x
self.offX = offX
self.offY = offY
self.gX = gX
self.gY = gY
def draw2dungeon(self, x, y, target, t_x=SCALE, t_y=SCALE, pX=0, pY=0): # Dont even ask ^^
"""x and y are the position of the subsprite in the MultiSprite.
target is the target surface and
t_x and t_y are the positions to where the subsprite shall be blitted.
All coordinates are scaled accordingly inside this funtion.
pX and pY are additional Pixel Offsets because we can"""
# make this a _little_ bit more readable ^^
rx, ry = self.res_x, self.res_y
offX, offY = self.offX, self.offY
gX, gY = self.gX, self.gY
subsprite_rect = (gX+rx*x+pX, gY+ry*y+pY, rx, ry) # square around the sub sprite we want to draw
topleft = (t_x*SCALE+offX, t_y*SCALE+offY) # topleft target coordinates; here goes the subsprite
#print subsprite_rect, topleft
target.blit(self.sprite, topleft, subsprite_rect)
class TileSetMultiSprite(MultiSprite):
def __init__(self, path, res_x, res_y=None):
super(TileSetMultiSprite, self).__init__(path, res_x, res_y)
class CharSetMultiSprite(MultiSprite):
def __init__(self, path, res_x, res_y=None, offX=0, offY=0, gX=0, gY=0):
super(CharSetMultiSprite, self).__init__(path, res_x, res_y, offX, offY, gX, gY)
|
r41d/flaming-octo-geezus
|
src/sprites.py
|
Python
|
gpl-3.0
| 1,802
|
# -*- coding: utf-8 -*-
# providerbootstrapper.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Provider bootstrapping
"""
import logging
import socket
import os
import sys
import requests
from leap.bitmask import provider
from leap.bitmask import util
from leap.bitmask.config import flags
from leap.bitmask.config.providerconfig import ProviderConfig, MissingCACert
from leap.bitmask.provider import get_provider_path
from leap.bitmask.provider.pinned import PinnedProviders
from leap.bitmask.services.abstractbootstrapper import AbstractBootstrapper
from leap.bitmask.util.constants import REQUEST_TIMEOUT
from leap.bitmask.util.request_helpers import get_content
from leap.common import ca_bundle
from leap.common.certs import get_digest
from leap.common.check import leap_assert, leap_assert_type, leap_check
from leap.common.files import check_and_fix_urw_only, get_mtime, mkdir_p
logger = logging.getLogger(__name__)
class UnsupportedProviderAPI(Exception):
"""
Raised when attempting to use a provider with an incompatible API.
"""
pass
class UnsupportedClientVersionError(Exception):
"""
Raised when attempting to use a provider with an older
client than supported.
"""
pass
class WrongFingerprint(Exception):
"""
Raised when a fingerprint comparison does not match.
"""
pass
class ProviderBootstrapper(AbstractBootstrapper):
"""
Given a provider URL performs a series of checks and emits signals
after they are passed.
If a check fails, the subsequent checks are not executed
"""
MIN_CLIENT_VERSION = 'x-minimum-client-version'
def __init__(self, signaler=None, bypass_checks=False):
"""
Constructor for provider bootstrapper object
:param signaler: Signaler object used to receive notifications
from the backend
:type signaler: Signaler
:param bypass_checks: Set to true if the app should bypass
first round of checks for CA
certificates at bootstrap
:type bypass_checks: bool
"""
AbstractBootstrapper.__init__(self, signaler, bypass_checks)
self._domain = None
self._provider_config = None
self._download_if_needed = False
if signaler is not None:
self._cancel_signal = signaler.prov_cancelled_setup
@property
def verify(self):
"""
Verify parameter for requests.
:returns: either False, if checks are skipped, or the
path to the ca bundle.
:rtype: bool or str
"""
if self._bypass_checks:
return False
cert = flags.CA_CERT_FILE
if cert is not None:
verify = cert
else:
verify = ca_bundle.where()
return verify
def _check_name_resolution(self):
"""
Checks that the name resolution for the provider name works
"""
leap_assert(self._domain, "Cannot check DNS without a domain")
logger.debug("Checking name resolution for %r" % (self._domain))
# We don't skip this check, since it's basic for the whole
# system to work
# err --- but we can do it after a failure, to diagnose what went
# wrong. Right now we're just adding connection overhead. -- kali
socket.gethostbyname(self._domain.encode('idna'))
def _check_https(self, *args):
"""
Checks that https is working and that the provided certificate
checks out
"""
leap_assert(self._domain, "Cannot check HTTPS without a domain")
logger.debug("Checking https for %r" % (self._domain))
# We don't skip this check, since it's basic for the whole
# system to work.
# err --- but we can do it after a failure, to diagnose what went
# wrong. Right now we're just adding connection overhead. -- kali
verify = self.verify
if verify:
verify = self.verify.encode(sys.getfilesystemencoding())
try:
uri = "https://{0}".format(self._domain.encode('idna'))
res = self._session.get(uri, verify=verify,
timeout=REQUEST_TIMEOUT)
res.raise_for_status()
except requests.exceptions.SSLError as exc:
self._err_msg = self.tr("Provider certificate could "
"not be verified")
raise
except Exception as exc:
# XXX careful!. The error might be also a SSL handshake
# timeout error, in which case we should retry a couple of times
# more, for cases where the ssl server gives high latencies.
self._err_msg = self.tr("Provider does not support HTTPS")
raise
def _download_provider_info(self, *args):
"""
Downloads the provider.json defition
"""
leap_assert(self._domain,
"Cannot download provider info without a domain")
logger.debug("Downloading provider info for %r" % (self._domain))
# --------------------------------------------------------------
# TODO factor out with the download routines in services.
# Watch out! We're handling the verify paramenter differently here.
headers = {}
domain = self._domain.encode(sys.getfilesystemencoding())
provider_json = os.path.join(util.get_path_prefix(),
get_provider_path(domain))
if domain in PinnedProviders.domains() and \
not os.path.exists(provider_json):
mkdir_p(os.path.join(os.path.dirname(provider_json),
"keys", "ca"))
cacert = os.path.join(os.path.dirname(provider_json),
"keys", "ca", "cacert.pem")
PinnedProviders.save_hardcoded(domain, provider_json, cacert)
mtime = get_mtime(provider_json)
if self._download_if_needed and mtime:
headers['if-modified-since'] = mtime
uri = "https://%s/%s" % (self._domain, "provider.json")
verify = self.verify
if mtime: # the provider.json exists
# So, we're getting it from the api.* and checking against
# the provider ca.
try:
provider_config = ProviderConfig()
provider_config.load(provider_json)
uri = provider_config.get_api_uri() + '/provider.json'
verify = provider_config.get_ca_cert_path()
except MissingCACert:
# no ca? then download from main domain again.
pass
if verify:
verify = verify.encode(sys.getfilesystemencoding())
logger.debug("Requesting for provider.json... "
"uri: {0}, verify: {1}, headers: {2}".format(
uri, verify, headers))
res = self._session.get(uri.encode('idna'), verify=verify,
headers=headers, timeout=REQUEST_TIMEOUT)
res.raise_for_status()
logger.debug("Request status code: {0}".format(res.status_code))
min_client_version = res.headers.get(self.MIN_CLIENT_VERSION, '0')
# Not modified
if res.status_code == 304:
logger.debug("Provider definition has not been modified")
# --------------------------------------------------------------
# end refactor, more or less...
# XXX Watch out, have to check the supported api yet.
else:
if flags.APP_VERSION_CHECK:
# TODO split
if not provider.supports_client(min_client_version):
if self._signaler is not None:
self._signaler.signal(
self._signaler.prov_unsupported_client)
raise UnsupportedClientVersionError()
provider_definition, mtime = get_content(res)
provider_config = ProviderConfig()
provider_config.load(data=provider_definition, mtime=mtime)
provider_config.save(["leap", "providers",
domain, "provider.json"])
if flags.API_VERSION_CHECK:
# TODO split
api_version = provider_config.get_api_version()
if provider.supports_api(api_version):
logger.debug("Provider definition has been modified")
else:
api_supported = ', '.join(provider.SUPPORTED_APIS)
error = ('Unsupported provider API version. '
'Supported versions are: {0}. '
'Found: {1}.').format(api_supported, api_version)
logger.error(error)
if self._signaler is not None:
self._signaler.signal(
self._signaler.prov_unsupported_api)
raise UnsupportedProviderAPI(error)
def run_provider_select_checks(self, domain, download_if_needed=False):
"""
Populates the check queue.
:param domain: domain to check
:type domain: unicode
:param download_if_needed: if True, makes the checks do not
overwrite already downloaded data
:type download_if_needed: bool
"""
leap_assert(domain and len(domain) > 0, "We need a domain!")
self._domain = ProviderConfig.sanitize_path_component(domain)
self._download_if_needed = download_if_needed
name_resolution = None
https_connection = None
down_provider_info = None
if self._signaler is not None:
name_resolution = self._signaler.prov_name_resolution
https_connection = self._signaler.prov_https_connection
down_provider_info = self._signaler.prov_download_provider_info
cb_chain = [
(self._check_name_resolution, name_resolution),
(self._check_https, https_connection),
(self._download_provider_info, down_provider_info)
]
return self.addCallbackChain(cb_chain)
def _should_proceed_cert(self):
"""
Returns False if the certificate already exists for the given
provider. True otherwise
:rtype: bool
"""
leap_assert(self._provider_config, "We need a provider config!")
if not self._download_if_needed:
return True
return not os.path.exists(self._provider_config
.get_ca_cert_path(about_to_download=True))
def _download_ca_cert(self, *args):
"""
Downloads the CA cert that is going to be used for the api URL
"""
# XXX maybe we can skip this step if
# we have a fresh one.
leap_assert(self._provider_config, "Cannot download the ca cert "
"without a provider config!")
logger.debug("Downloading ca cert for %r at %r" %
(self._domain, self._provider_config.get_ca_cert_uri()))
if not self._should_proceed_cert():
check_and_fix_urw_only(
self._provider_config
.get_ca_cert_path(about_to_download=True))
return
res = self._session.get(self._provider_config.get_ca_cert_uri(),
verify=self.verify,
timeout=REQUEST_TIMEOUT)
res.raise_for_status()
cert_path = self._provider_config.get_ca_cert_path(
about_to_download=True)
cert_dir = os.path.dirname(cert_path)
mkdir_p(cert_dir)
with open(cert_path, "w") as f:
f.write(res.content)
check_and_fix_urw_only(cert_path)
def _check_ca_fingerprint(self, *args):
"""
Checks the CA cert fingerprint against the one provided in the
json definition
"""
leap_assert(self._provider_config, "Cannot check the ca cert "
"without a provider config!")
logger.debug("Checking ca fingerprint for %r and cert %r" %
(self._domain,
self._provider_config.get_ca_cert_path()))
if not self._should_proceed_cert():
return
parts = self._provider_config.get_ca_cert_fingerprint().split(":")
error_msg = "Wrong fingerprint format"
leap_check(len(parts) == 2, error_msg, WrongFingerprint)
method = parts[0].strip()
fingerprint = parts[1].strip()
cert_data = None
with open(self._provider_config.get_ca_cert_path()) as f:
cert_data = f.read()
leap_assert(len(cert_data) > 0, "Could not read certificate data")
digest = get_digest(cert_data, method)
error_msg = "Downloaded certificate has a different fingerprint!"
leap_check(digest == fingerprint, error_msg, WrongFingerprint)
def _check_api_certificate(self, *args):
"""
Tries to make an API call with the downloaded cert and checks
if it validates against it
"""
leap_assert(self._provider_config, "Cannot check the ca cert "
"without a provider config!")
logger.debug("Checking api certificate for %s and cert %s" %
(self._provider_config.get_api_uri(),
self._provider_config.get_ca_cert_path()))
if not self._should_proceed_cert():
return
test_uri = "%s/%s/cert" % (self._provider_config.get_api_uri(),
self._provider_config.get_api_version())
ca_cert_path = self._provider_config.get_ca_cert_path()
ca_cert_path = ca_cert_path.encode(sys.getfilesystemencoding())
res = self._session.get(test_uri, verify=ca_cert_path,
timeout=REQUEST_TIMEOUT)
res.raise_for_status()
def run_provider_setup_checks(self,
provider_config,
download_if_needed=False):
"""
Starts the checks needed for a new provider setup.
:param provider_config: Provider configuration
:type provider_config: ProviderConfig
:param download_if_needed: if True, makes the checks do not
overwrite already downloaded data.
:type download_if_needed: bool
"""
leap_assert(provider_config, "We need a provider config!")
leap_assert_type(provider_config, ProviderConfig)
self._provider_config = provider_config
self._download_if_needed = download_if_needed
download_ca_cert = None
check_ca_fingerprint = None
check_api_certificate = None
if self._signaler is not None:
download_ca_cert = self._signaler.prov_download_ca_cert
check_ca_fingerprint = self._signaler.prov_check_ca_fingerprint
check_api_certificate = self._signaler.prov_check_api_certificate
cb_chain = [
(self._download_ca_cert, download_ca_cert),
(self._check_ca_fingerprint, check_ca_fingerprint),
(self._check_api_certificate, check_api_certificate)
]
return self.addCallbackChain(cb_chain)
|
andrejb/bitmask_client
|
src/leap/bitmask/provider/providerbootstrapper.py
|
Python
|
gpl-3.0
| 16,068
|
from lxml import etree
import requests
import re
#coding utf-8
def getResource(word):
r = requests.get("http://www.dictionaryapi.com/api/v1/references/learners/xml/"+word+"?key=508b6e11-3920-41fe-a57a-d379deacf188")
return r.text[39:]
def isWord(entry,word):
g=re.compile(entry)
return re.fullmatch(word+"\[*\d*\]*",entry) is not None
def parse_entry(entry):
meanings=[]
for children in entry:
if children.tag=="fl":
meanings.append(children.text)
if children.tag=="def":
for x in children:
if x.tag=="dt":
if x.text is not None:
meanings.append(x.text[1:])
return meanings
# main loop
def getDefintion(word):
root = etree.XML(getResource(word), etree.XMLParser(remove_blank_text=True))
meaning_list=[]
for entry in root:
if isWord(entry.attrib["id"],word):
meaning_list.append(parse_entry(entry))
return meaning_list
|
jzcxer/0Math
|
python/dictionary/Merriam_Webster_api.py
|
Python
|
gpl-3.0
| 987
|
from qit.base.type import Type
class File(Type):
pass_by_value = True
def build(self, builder):
return "FILE*"
|
spirali/qit
|
src/qit/base/file.py
|
Python
|
gpl-3.0
| 133
|
# Author: Travis Oliphant
# 1999 -- 2002
import sigtools
from scipy import linalg
from scipy.fftpack import fft, ifft, ifftshift, fft2, ifft2, fftn, \
ifftn, fftfreq
from numpy import polyadd, polymul, polydiv, polysub, roots, \
poly, polyval, polyder, cast, asarray, isscalar, atleast_1d, \
ones, real_if_close, zeros, array, arange, where, rank, \
newaxis, product, ravel, sum, r_, iscomplexobj, take, \
argsort, allclose, expand_dims, unique, prod, sort, reshape, \
transpose, dot, mean, ndarray, atleast_2d
import numpy as np
from scipy.misc import factorial
from windows import get_window
from _arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'deconvolve', 'hilbert', 'hilbert2', 'cmplx_sort',
'unique_roots', 'invres', 'invresz', 'residue', 'residuez',
'resample', 'detrend', 'lfilter_zi', 'filtfilt', 'decimate']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
def _valfrommode(mode):
try:
val = _modedict[mode]
except KeyError:
if mode not in [0, 1, 2]:
raise ValueError("Acceptable mode flags are 'valid' (0),"
" 'same' (1), or 'full' (2).")
val = mode
return val
def _bvalfromboundary(boundary):
try:
val = _boundarydict[boundary] << 2
except KeyError:
if val not in [0, 1, 2]:
raise ValueError("Acceptable boundary flags are 'fill', 'wrap'"
" (or 'circular'), \n and 'symm' (or 'symmetric').")
val = boundary << 2
return val
def correlate(in1, in2, mode='full'):
"""
Cross-correlate two N-dimensional arrays.
Cross-correlate in1 and in2 with the output size determined by the mode
argument.
Parameters
----------
in1: array
first input.
in2: array
second input. Should have the same number of dimensions as in1.
mode: str {'valid', 'same', 'full'}
a string indicating the size of the output:
- 'valid': the output consists only of those elements that do not
rely on the zero-padding.
- 'same': the output is the same size as the largest input centered
with respect to the 'full' output.
- 'full': the output is the full discrete linear cross-correlation
of the inputs. (Default)
Returns
-------
out: array
an N-dimensional array containing a subset of the discrete linear
cross-correlation of in1 with in2.
Notes
-----
The correlation z of two arrays x and y of rank d is defined as
z[...,k,...] = sum[..., i_l, ...]
x[..., i_l,...] * conj(y[..., i_l + k,...])
"""
val = _valfrommode(mode)
if mode == 'valid':
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
for i in range(len(ps)):
if ps[i] <= 0:
raise ValueError("Dimension of x(%d) < y(%d) " \
"not compatible with valid mode" % \
(in1.shape[i], in2.shape[i]))
z = sigtools._correlateND(in1, in2, out, val)
else:
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
else:
raise ValueError("Uknown mode %s" % mode)
return z
def _centered(arr, newsize):
# Return the center newsize portion of the array.
newsize = asarray(newsize)
currsize = array(arr.shape)
startind = (currsize - newsize) / 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
"""
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1 + s2 - 1
# Always use 2**n-sized FFT
fsize = 2 ** np.ceil(np.log2(size))
IN1 = fftn(in1, fsize)
IN1 *= fftn(in2, fsize)
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if product(s1, axis=0) > product(s2, axis=0):
osize = s1
else:
osize = s2
return _centered(ret, osize)
elif mode == "valid":
return _centered(ret, abs(s2 - s1) + 1)
def convolve(in1, in2, mode='full'):
"""
Convolve two N-dimensional arrays.
Convolve in1 and in2 with output size determined by mode.
Parameters
----------
in1: array
first input.
in2: array
second input. Should have the same number of dimensions as in1.
mode: str {'valid', 'same', 'full'}
a string indicating the size of the output:
``valid`` : the output consists only of those elements that do not
rely on the zero-padding.
``same`` : the output is the same size as the largest input centered
with respect to the 'full' output.
``full`` : the output is the full discrete linear cross-correlation
of the inputs. (Default)
Returns
-------
out: array
an N-dimensional array containing a subset of the discrete linear
cross-correlation of in1 with in2.
"""
volume = asarray(in1)
kernel = asarray(in2)
if rank(volume) == rank(kernel) == 0:
return volume * kernel
elif not volume.ndim == kernel.ndim:
raise ValueError("in1 and in2 should have the same rank")
slice_obj = [slice(None, None, -1)] * len(kernel.shape)
if mode == 'valid':
for d1, d2 in zip(volume.shape, kernel.shape):
if not d1 >= d2:
raise ValueError(
"in1 should have at least as many items as in2 in " \
"every dimension for valid mode.")
if np.iscomplexobj(kernel):
return correlate(volume, kernel[slice_obj].conj(), mode)
else:
return correlate(volume, kernel[slice_obj], mode)
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `in`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `in`.
Examples
--------
>>> import scipy.signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> sp.signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> sp.signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by kernel_size.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * len(volume.shape)
kernel_size = asarray(kernel_size)
if len(kernel_size.shape) == 0:
kernel_size = [kernel_size.item()] * len(volume.shape)
kernel_size = asarray(kernel_size)
for k in range(len(volume.shape)):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = int(numels / 2)
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or arraylike, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * len(im.shape)
mysize = asarray(mysize)
# Estimate the local mean
lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, ones(mysize), 'same') / product(mysize, axis=0)
- lMean ** 2)
# Estimate the noise power if needed.
if noise == None:
noise = mean(ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by mode and boundary
conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : ndarray
Two-dimensional input arrays to be convolved.
mode: str, optional
A string indicating the size of the output:
``valid`` : the output consists only of those elements that do not
rely on the zero-padding.
``same`` : the output is the same size as the largest input centered
with respect to the 'full' output.
``full`` : the output is the full discrete linear cross-correlation
of the inputs. (Default)
boundary : str, optional
A flag indicating how to handle boundaries:
- 'fill' : pad input arrays with fillvalue. (default)
- 'wrap' : circular boundary conditions.
- 'symm' : symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
"""
if mode == 'valid':
for d1, d2 in zip(np.shape(in1), np.shape(in2)):
if not d1 >= d2:
raise ValueError(
"in1 should have at least as many items as in2 in " \
"every dimension for valid mode.")
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
return sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""Cross-correlate two 2-dimensional arrays.
Cross correlate in1 and in2 with output size determined by mode and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : ndarray
Two-dimensional input arrays to be convolved.
mode: str, optional
A string indicating the size of the output:
``valid`` : the output consists only of those elements that do not
rely on the zero-padding.
``same`` : the output is the same size as the largest input centered
with respect to the 'full' output.
``full`` : the output is the full discrete linear cross-correlation
of the inputs. (Default)
boundary : str, optional
A flag indicating how to handle boundaries:
- 'fill' : pad input arrays with fillvalue. (default)
- 'wrap' : circular boundary conditions.
- 'symm' : symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
"""
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
return sigtools._convolve2d(in1, in2, 0, val, bval, fillvalue)
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if len(kernel_size.shape) == 0:
kernel_size = [kernel_size.item()] * 2
kernel_size = asarray(kernel_size)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, x, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If a[0]
is not 1, then both a and b are normalized by a[0].
x : array_like
An N-dimensional input array.
axis : int
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis (*Default* = -1)
zi : array_like (optional)
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
max(len(a),len(b))-1. If zi=None or is not given then initial
rest is assumed. SEE signal.lfiltic for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array (optional)
If zi is None, this is not returned, otherwise, zf holds the
final filter delay values.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements
::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[nb]*x[n-nb]
- a[1]*y[n-1] - ... - a[na]*y[n-na]
using the following difference equations::
y[m] = b[0]*x[m] + z[0,m-1]
z[0,m] = b[1]*x[m] + z[1,m-1] - a[1]*y[m]
...
z[n-3,m] = b[n-2]*x[m] + z[n-2,m-1] - a[n-2]*y[m]
z[n-2,m] = b[n-1]*x[m] - a[n-1]*y[m]
where m is the output sample number and n=max(len(a),len(b)) is the
model order.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -nb
b[0] + b[1]z + ... + b[nb] z
Y(z) = ---------------------------------- X(z)
-1 -na
a[0] + a[1]z + ... + a[na] z
"""
if isscalar(a):
a = [a]
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter
Given a linear filter (b,a) and initial conditions on the output y
and the input x, return the inital conditions on the state vector zi
which is used by lfilter to generate the output given the input.
If M=len(b)-1 and N=len(a)-1. Then, the initial conditions are given
in the vectors x and y as::
x = {x[-1],x[-2],...,x[-M]}
y = {y[-1],y[-2],...,y[-N]}
If x is not given, its inital conditions are assumed zero.
If either vector is too short, then zeros are added
to achieve the proper length.
The output vector zi contains::
zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]} where K=max(M,N).
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = asarray(y)
zi = zeros(K, y.dtype.char)
if x is None:
x = zeros(M, y.dtype.char)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x, zeros(M - L)]
L = np.size(y)
if L < N:
y = r_[y, zeros(N - L)]
for m in range(M):
zi[m] = sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves divisor out of signal.
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = ones(N - D + 1, float)
input[1:] = 0
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
Notes
-----
The analytic signal `x_a(t)` of `x(t)` is::
x_a = F^{-1}(F(x) 2U) = x + i y
where ``F`` is the Fourier transform, ``U`` the unit step function,
and ``y`` the Hilbert transform of ``x``. [1]_
`axis` argument is new in scipy 0.8.0.
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = asarray(x)
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N / 2] = 1
h[1:N / 2] = 2
else:
h[0] = 1
h[1:(N + 1) / 2] = 2
if len(x.shape) > 1:
ind = [newaxis] * x.ndim
ind[axis] = slice(None)
h = h[ind]
x = ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = atleast_2d(x)
if len(x.shape) > 2:
raise ValueError("x must be rank 2.")
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or any(n <= 0 for n in N):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = fft2(x, N, axes=(0, 1))
h1 = zeros(N[0], 'd')
h2 = zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 / 2] = 1
h[1:N1 / 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) / 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, newaxis] * h2[newaxis, :]
k = len(x.shape)
while k > 2:
h = h[:, newaxis]
k -= 1
x = ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"sort roots based on magnitude."
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = sp.signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
comproot = np.maximum
elif rtype in ['min', 'minimum']:
comproot = np.minimum
elif rtype in ['avg', 'mean']:
comproot = np.mean
p = asarray(p) * 1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5 * tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""Compute b(s) and a(s) from partial fraction expansion: r,p,k
If M = len(b) and N = len(a)
b(s) b[0] x**(M-1) + b[1] x**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] x**(N-1) + a[1] x**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
See Also
--------
residue, poly, polyval, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
b = polyadd(b, r[indx] * poly(t2))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If ``M = len(b)`` and ``N = len(a)``, then the partial-fraction
expansion H(s) is defined as::
b(s) b[0] s**(M-1) + b[1] s**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] s**(N-1) + a[1] s**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, numpy.poly, unique_roots
"""
b, a = map(asarray, (b, a))
rscale = a[0]
k, b = polydiv(b, a)
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = polyval(bn, pout[n]) / polyval(an, pout[n]) \
/ factorial(sig - m)
indx += sig
return r / rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""Compute partial-fraction expansion of b(z) / a(z).
If M = len(b) and N = len(a)
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See also
--------
invresz, poly, polyval, unique_roots
"""
b, a = map(asarray, (b, a))
gain = a[0]
brev, arev = b[::-1], a[::-1]
krev, brev = polydiv(brev, arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
polyval(an, 1.0 / pout[n]) /
factorial(sig - m) / (-pout[n]) ** (sig - m))
indx += sig
return r / gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""Compute b(z) and a(z) from partial fraction expansion: r,p,k
If M = len(b) and N = len(a)
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See also
--------
residuez, poly, polyval, unique_roots
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
brev = polyadd(brev, (r[indx] * poly(t2))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from dx to:
dx * len(x) / num
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
"""
x = asarray(x)
X = fft(x, axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftfreq(Nx))
elif isinstance(window, ndarray) and window.shape == (Nx,):
W = window
else:
W = ifftshift(get_window(window, Nx))
newshape = ones(len(x.shape))
newshape[axis] = len(W)
W.shape = newshape
X = X * W
sl = [slice(None)] * len(x.shape)
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num, Nx))
Y = zeros(newshape, 'D')
sl[axis] = slice(0, (N + 1) / 2)
Y[sl] = X[sl]
sl[axis] = slice(-(N - 1) / 2, None)
Y[sl] = X[sl]
y = ifft(Y, axis=axis) * (float(num) / float(Nx))
if x.dtype.char not in ['F', 'D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> randgen = np.random.RandomState(9)
>>> npoints = 1e3
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (sp.signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - expand_dims(mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = r_[axis, 0:axis, axis + 1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, prod(dshape, axis=0) / N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = ones((Npts, 2), dtype)
A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - dot(A, coef)
# Put data back in original shape.
tdshape = take(dshape, newdims, 0)
ret = reshape(newdata, tuple(tdshape))
vals = range(1, rnk)
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Compute an initial state `zi` for the lfilter function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `scipy.signal.lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be rank 1.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be rank 1.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
a = a / a[0]
b = b / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None):
"""A forward-backward filter.
This function applies a linear filter twice, once forward
and once backwards. The combined filter has linear phase.
Before applying the filter, the function can pad the data along the
given axis in one of three ways: odd, even or constant. The odd
and even extensions have the corresponding symmetry about the end point
of the data. The constant extension extends the data with the values
at end points. On both the forward and backwards passes, the
initial condition of the filter is found by using lfilter_zi and
scaling it by the end point of the extended data.
Parameters
----------
b : array_like, 1-D
The numerator coefficient vector of the filter.
a : array_like, 1-D
The denominator coefficient vector of the filter. If a[0]
is not 1, then both a and b are normalized by a[0].
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
`x.shape[axis]-1`. `padlen=0` implies no padding.
The default value is 3*max(len(a),len(b)).
Returns
-------
y : ndarray
The filtered output, an array of type numpy.float64 with the same
shape as `x`.
See Also
--------
lfilter_zi
lfilter
Examples
--------
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist rate, or 125 Hz, and apply it to x with filtfilt. The
result should be approximately xlow, with no phase shift.
>>> from scipy.signal import butter
>>> b, a = butter(8, 0.125)
>>> y = filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
"""
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype must "
"be 'even', 'odd', 'constant', or None.") %
padtype)
b = np.asarray(b)
a = np.asarray(a)
x = np.asarray(x)
ntaps = max(len(a), len(b))
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
from scipy.signal.filter_design import cheby1
from scipy.signal.fir_filter_design import firwin
def decimate(x, q, n=None, ftype='iir', axis=-1):
"""Downsample the signal x by an integer factor q, using an order n filter.
By default an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with hamming window is used if ftype is 'fir'.
Parameters
----------
x : N-d array
the signal to be downsampled
q : int
the downsampling factor
n : int or None
the order of the filter (1 less than the length for 'fir')
ftype : {'iir' or 'fir'}
the type of the lowpass filter
axis : int
the axis along which to decimate
Returns
-------
y : N-d array
the down-sampled signal
See also
--------
resample
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is None:
if ftype == 'fir':
n = 30
else:
n = 8
if ftype == 'fir':
b = firwin(n + 1, 1. / q, window='hamming')
a = 1.
else:
b, a = cheby1(n, 0.05, 0.8 / q)
y = lfilter(b, a, x, axis=axis)
sl = [slice(None)] * y.ndim
sl[axis] = slice(None, None, q)
return y[sl]
|
jrversteegh/softsailor
|
deps/scipy-0.10.0b2/scipy/signal/signaltools.py
|
Python
|
gpl-3.0
| 49,881
|
#! /usr/bin/python
# _*_ coding: utf-8 _*_
#
# Dell EMC OpenManage Ansible Modules
#
# Copyright © 2017 Dell Inc. or its subsidiaries. All rights reserved.
# Dell, EMC, and other trademarks are trademarks of Dell Inc. or its
# subsidiaries. Other trademarks may be trademarks of their respective owners.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dellemc_idrac_nic
short_description: Configure iDRAC Network settings
version_added: "2.3"
description:
- Configure iDRAC Network settings
options:
idrac_ip:
required: True
description:
- iDRAC IP Address
type: 'str'
idrac_user:
required: True
description:
- iDRAC user name
type: 'str'
idrac_pwd:
required: False
description:
- iDRAC user password
type: 'str'
idrac_port:
required: False
description:
- iDRAC port
default: 443
type: 'int'
share_name:
required: True
description:
- CIFS or NFS Network share
share_user:
required: True
description:
- Network share user in the format user@domain if user is part of a domain else 'user'
type: 'str'
share_pwd:
required: True
description:
- Network share user password
type: 'str'
share_mnt:
required: True
description:
- Local mount path of the network file share with read-write permission for ansible user
type: 'path'
nic_selection:
required: False
description:
- NIC Selection mode
choices: ['Dedicated','LOM1','LOM2','LOM3','LOM4']
default: "Dedicated"
nic_failover:
required: False
description:
- Failover network if NIC selection fails
choices: ["None", "LOM1", "LOM2", "LOM3", "LOM4", "All"]
default: "None"
nic_autoneg:
required: False
description:
- if C(True), will enable auto negotiation
- if C(False), will disable auto negotiation
default: False
nic_speed:
required: False
description:
- Network Speed
choices: ["10", "100", "1000"]
default: "1000"
nic_duplex:
required: False
description:
- if C(Full), will enable the Full-Duplex mode
- if C(Half), will enable the Half-Duplex mode
choices: ["Full", "Half"]
default: "Full"
nic_autodedicated:
required: False
description:
- if C(True), will enable the auto-dedicated NIC option
- if C(False), will disable the auto-dedicated NIC option
default: False
requirements: ['omsdk']
author: "anupam.aloke@dell.com"
'''
EXAMPLES = '''
# Configure NIC Selection using a CIFS Network share
- name: Configure NIC Selection
dellemc_idrac_nic:
idrac_ip: "192.168.1.1"
idrac_user: "root"
idrac_pwd: "calvin"
share_name: "\\\\192.168.10.10\\share"
share_user: "user1"
share_pwd: "password"
share_mnt: "/mnt/share"
nic_selection: "Dedicated"
state: "enable"
'''
RETURN = '''
'''
import traceback
from ansible.module_utils.dellemc_idrac import iDRACConnection
from ansible.module_utils.basic import AnsibleModule
try:
from omsdk.sdkcenum import TypeHelper
from omdrivers.enums.iDRAC.iDRAC import (
AutoConfig_NICTypes, Autoneg_NICTypes, DHCPEnable_IPv4Types,
DNSDomainFromDHCP_NICStaticTypes, DNSFromDHCP_IPv4StaticTypes,
DNSRegister_NICTypes, Duplex_NICTypes, Enable_IPv4Types,
Enable_NICTypes, Failover_NICTypes, Selection_NICTypes, Speed_NICTypes,
VLanEnable_NICTypes
)
HAS_OMSDK = True
except ImportError:
HAS_OMSDK = False
def _setup_nic(idrac, module):
"""
Setup iDRAC NIC attributes
Keyword arguments:
idrac -- iDRAC handle
module -- Ansible module
"""
# Get the current NIC settings
curr_nic_selection = idrac.config_mgr._sysconfig.iDRAC.NIC.Selection_NIC
curr_nic_failover = idrac.config_mgr._sysconfig.iDRAC.NIC.Failover_NIC
curr_nic_autoneg = idrac.config_mgr._sysconfig.iDRAC.NIC.Autoneg_NIC
idrac.config_mgr._sysconfig.iDRAC.NIC.Enable_NIC = \
TypeHelper.convert_to_enum(module.params['nic_enable'],
Enable_NICTypes)
idrac.config_mgr._sysconfig.iDRAC.NIC.Selection_NIC = \
TypeHelper.convert_to_enum(module.params['nic_selection'],
Selection_NICTypes)
# NIC Selection mode and failover mode should not be same
if module.params['nic_selection'] == module.params['nic_failover']:
module.fail_json(msg="NIC Selection mode and Failover mode cannot be same")
elif curr_nic_selection != Selection_NICTypes.Dedicated and \
module.params['nic_selection'] != 'Dedicated':
idrac.config_mgr._sysconfig.iDRAC.NIC.Failover_NIC = \
TypeHelper.convert_to_enum(module.params['nic_failover'],
Failover_NICTypes)
# if NIC Selection is not 'Dedicated', then Auto-Negotiation is always ON
if curr_nic_selection != Selection_NICTypes.Dedicated and \
module.params['nic_selection'] != 'Dedicated':
idrac.config_mgr._sysconfig.iDRAC.NIC.Autoneg_NIC = Autoneg_NICTypes.Enabled
else:
idrac.config_mgr._sysconfig.iDRAC.NIC.Autoneg_NIC = \
TypeHelper.convert_to_enum(module.params['nic_autoneg'],
Autoneg_NICTypes)
# NIC Speed and Duplex mode can only be set when Auto-Negotiation is not ON
if curr_nic_autoneg != Autoneg_NICTypes.Enabled and \
module.params['nic_autoneg'] != 'Enabled':
if curr_nic_selection != Selection_NICTypes.Enabled and \
module.params['nic_selection'] != 'Dedicated':
idrac.config_mgr._sysconfig.iDRAC.NIC.Speed_NIC = Speed_NICTypes.T_100
else:
idrac.config_mgr._sysconfig.iDRAC.NIC.Speed_NIC = \
TypeHelper.convert_to_enum(module.params['nic_speed'],
Speed_NICTypes)
idrac.config_mgr._sysconfig.iDRAC.NIC.Duplex_NIC = \
TypeHelper.convert_to_enum(module.params['nic_duplex'],
Duplex_NICTypes)
idrac.config_mgr._sysconfig.iDRAC.NIC.MTU_NIC = module.params['nic_mtu']
# DNS Registration
idrac.config_mgr._sysconfig.iDRAC.NIC.DNSRegister_NIC = \
TypeHelper.convert_to_enum(module.params['dns_register'],
DNSRegister_NICTypes)
if module.params['dns_idrac_name']:
idrac.config_mgr._sysconfig.iDRAC.NIC.DNSRacName = module.params['dns_idrac_name']
# Enable Auto-Config
if module.params['nic_auto_config'] != 'Disabled':
if module.params['ipv4_enable'] != 'Enabled' or \
module.params['ipv4_dhcp_enable'] != 'Enabled':
module.fail_json(msg="IPv4 and DHCPv4 must be enabled for Auto-Config")
idrac.config_mgr._sysconfig.iDRAC.NIC.AutoConfig_NIC = \
TypeHelper.convert_to_enum(module.params['nic_auto_config'],
AutoConfig_NICTypes)
# VLAN
idrac.config_mgr._sysconfig.iDRAC.NIC.VLanEnable_NIC = \
TypeHelper.convert_to_enum(module.params['vlan_enable'],
VLanEnable_NICTypes)
idrac.config_mgr._sysconfig.iDRAC.NIC.VLanID_NIC = module.params['vlan_id']
idrac.config_mgr._sysconfig.iDRAC.NIC.VLanPriority_NIC = module.params['vlan_priority']
def _setup_nic_static(idrac, module):
"""
Setup iDRAC NIC Static attributes
Keyword arguments:
idrac -- iDRAC handle
module -- Ansible module
"""
idrac.config_mgr._sysconfig.iDRAC.NICStatic.DNSDomainFromDHCP_NICStatic = \
TypeHelper.convert_to_enum(module.params['dns_domain_from_dhcp'],
DNSDomainFromDHCP_NICStaticTypes)
if module.params['dns_domain_name']:
idrac.config_mgr._sysconfig.iDRAC.NICStatic.DNSDomainName_NICStatic = \
module.params['dns_domain_name']
def _setup_ipv4(idrac, module):
"""
Setup IPv4 parameters
Keyword arguments:
idrac -- iDRAC handle
module -- Ansible module
"""
idrac.config_mgr._sysconfig.iDRAC.IPv4.Enable_IPv4 = \
TypeHelper.convert_to_enum(module.params['ipv4_enable'],
Enable_IPv4Types)
idrac.config_mgr._sysconfig.iDRAC.IPv4.DHCPEnable_IPv4 = \
TypeHelper.convert_to_enum(module.params['ipv4_dhcp_enable'],
DHCPEnable_IPv4Types)
def _setup_ipv4_static(idrac, module):
"""
Setup IPv4 Static parameters
Keyword arguments:
idrac -- iDRAC handle
module -- Ansible module
"""
if module.params['ipv4_dhcp_enable'] == 'Disabled':
if module.params['ipv4_static']:
idrac.config_mgr._sysconfig.iDRAC.IPv4Static.Address_IPv4Static = \
module.params['ipv4_static']
if module.params['ipv4_static_gw']:
idrac.config_mgr._sysconfig.iDRAC.IPv4Static.Gateway_IPv4Static = \
module.params['ipv4_static_gw']
if module.params['ipv4_static_mask']:
idrac.config_mgr._sysconfig.iDRAC.IPv4Static.Netmask_IPv4Static = \
module.params['ipv4_static_mask']
idrac.config_mgr._sysconfig.iDRAC.IPv4Static.DNSFromDHCP_IPv4Static = \
TypeHelper.convert_to_enum(module.params['ipv4_dns_from_dhcp'],
DNSFromDHCP_IPv4StaticTypes)
if module.params['ipv4_dns_from_dhcp'] != 'Enabled':
if module.params['ipv4_preferred_dns']:
idrac.config_mgr._sysconfig.iDRAC.IPv4Static.DNS1_IPv4Static = \
module.params['ipv4_prefered_dns']
if module.params['ipv4_alternate_dns']:
idrac.config_mgr._sysconfig.iDRAC.IPv4Static.DNS2_IPv4Static = \
module.params['ipv4_alternate_dns']
def setup_idrac_nic (idrac, module):
"""
Setup iDRAC NIC configuration settings
Keyword arguments:
idrac -- iDRAC handle
module -- Ansible module
"""
msg = {}
msg['changed'] = False
msg['failed'] = False
msg['msg'] = {}
err = False
try:
_setup_nic(idrac, module)
_setup_nic_static(idrac, module)
_setup_ipv4(idrac, module)
_setup_ipv4_static(idrac, module)
msg['changed'] = idrac.config_mgr._sysconfig.is_changed()
if module.check_mode:
# since it is running in check mode, reject the changes
idrac.config_mgr._sysconfig.reject()
else:
msg['msg'] = idrac.config_mgr.apply_changes()
if 'Status' in msg['msg'] and msg['msg']["Status"] != "Success":
msg['failed'] = True
msg['changed'] = False
except Exception as e:
err = True
msg['msg'] = "Error: %s" % str(e)
msg['exception'] = traceback.format_exc()
msg['failed'] = True
return msg, err
# Main
def main():
module = AnsibleModule(
argument_spec=dict(
# iDRAC handle
idrac=dict(required=False, type='dict'),
# iDRAC Credentials
idrac_ip=dict(required=True, type='str'),
idrac_user=dict(required=True, type='str'),
idrac_pwd=dict(required=True, type='str', no_log=True),
idrac_port=dict(required=False, default=443, type='int'),
# Network File Share
share_name=dict(required=True, type='str'),
share_user=dict(required=True, type='str'),
share_pwd=dict(required=True, type='str', no_log=True),
share_mnt=dict(required=True, type='path'),
# iDRAC Network Settings
nic_enable=dict(required=False, choices=['Enabled', 'Disabled'],
default='Enabled', type='str'),
nic_selection=dict(required=False,
choices=['Dedicated', 'LOM1', 'LOM2', 'LOM3', 'LOM4'],
default='Dedicated', type='str'),
nic_failover=dict(required=False,
choices=['ALL', 'LOM1', 'LOM2', 'LOM3', 'LOM4', 'None'],
default='None'),
nic_autoneg=dict(required=False, choices=['Enabled', 'Disabled'],
default='Enabled', type='str'),
nic_speed=dict(required=False, choices=['10', '100', '1000'],
default='1000', type='str'),
nic_duplex=dict(required=False, choices=['Full', 'Half'],
default='Full', type='str'),
nic_mtu=dict(required=False, default=1500, type='int'),
# Network Common Settings
dns_register=dict(required=False, choices=['Enabled', 'Disabled'],
default='Disabled', type='str'),
dns_idrac_name=dict(required=False, default=None, type='str'),
dns_domain_from_dhcp=dict(required=False,
choices=['Disabled', 'Enabled'],
default='Disabled', type='str'),
dns_domain_name=dict(required=False, default=None, type='str'),
# Auto-Config Settings
nic_auto_config=dict(required=False,
choices=['Disabled', 'Enable Once', 'Enable Once After Reset'],
default='Disabled', type='str'),
# IPv4 Settings
ipv4_enable=dict(required=False, choices=['Enabled', 'Disabled'],
default='Enabled', type='str'),
ipv4_dhcp_enable=dict(required=False, choices=['Enabled', 'Disabled'],
default='Disabled', type='str'),
ipv4_static=dict(required=False, default=None, type='str'),
ipv4_static_gw=dict(required=False, default=None, type='str'),
ipv4_static_mask=dict(required=False, default=None, type='str'),
ipv4_dns_from_dhcp=dict(required=False,
choices=['Enabled', 'Disabled'],
default='Disabled', type='str'),
ipv4_preferred_dns=dict(required=False, default=None, type='str'),
ipv4_alternate_dns=dict(required=False, default=None, type='str'),
# VLAN Settings
vlan_enable=dict(required=False, choices=['Enabled', 'Disabled'],
default='Disabled', type='str'),
vlan_id=dict(required=False, default=None, type='int'),
vlan_priority=dict(required=False, default=None, type='int'),
),
supports_check_mode=True)
if not HAS_OMSDK:
module.fail_json(msg="Dell EMC OpenManage Python SDK required for this module")
# Connect to iDRAC
idrac_conn = iDRACConnection(module)
idrac = idrac_conn.connect()
# Setup network share as local mount
if not idrac_conn.setup_nw_share_mount():
module.fail_json(msg="Failed to setup network share local mount point")
# Setup iDRAC NIC
(msg, err) = setup_idrac_nic(idrac, module)
# Disconnect from iDRAC
idrac_conn.disconnect()
if err:
module.fail_json(**msg)
module.exit_json(**msg)
if __name__ == '__main__':
main()
|
anupamaloke/Dell-EMC-Ansible-Modules-for-iDRAC
|
library/dellemc_idrac_nic.py
|
Python
|
gpl-3.0
| 16,156
|
# -*- coding: utf-8 -*-
import os
import sys
sys.path.append(os.path.join(os.getcwd(), os.path.pardir))
import unittest
from digraph import digraph
from graph import graph
from graph_algorithms import *
class test_graph(unittest.TestCase):
def setUp(self):
self.gr = graph()
self.gr.add_nodes(["s", "a", "b", "c", "d", "e",
"f", "g", "h", "j", "k", "l"])
self.gr.add_edges([("s", "a"), ("s", "b"), ("a", "c"), ("c", "e")])
self.gr.add_edges([("e", "d"), ("d", "b"), ("a", "b"), ("c", "d")])
self.gr.add_edges([("g", "h"), ("f", "g")])
self.gr.add_edges([("j", "k"), ("j", "l")])
self.digr = digraph()
self.digr.add_nodes(['s', 'a', 'b', 'c', 'd', 'e', 'f'])
self.digr.add_edges([("s", "a"), ("a", "b"), ("b", "a"), ("c", "b")])
self.digr.add_edges([("b", "s"), ("s", "d"), ("d", "e"), ("e", "d")])
self.digr.add_edges([("b", "f"), ("e", "f")])
def test_bfs_undirected_graph(self):
self.assertEqual(len(BFS(self.gr, "s")), 6)
self.assertEqual(len(BFS(self.gr, "j")), 3)
self.assertEqual(len(BFS(self.gr, "g")), 3)
def test_bfs_directed_graph(self):
self.assertEqual(len(BFS(self.digr, "s")), 6)
self.assertEqual(len(BFS(self.digr, "c")), 7)
self.assertEqual(len(BFS(self.digr, "f")), 1)
def test_dfs_undirected_graph(self):
self.assertEqual(len(DFS(self.gr, "s")), 6)
self.assertEqual(len(DFS(self.gr, "j")), 3)
self.assertEqual(len(DFS(self.gr, "g")), 3)
def test_dfs_directed_graph(self):
self.assertEqual(len(DFS(self.digr, "s")), 6)
self.assertEqual(len(DFS(self.digr, "c")), 7)
self.assertEqual(len(DFS(self.digr, "f")), 1)
def test_shortest_hops_undirected_graph(self):
self.assertEqual(shortest_hops(self.gr, "s")["c"], 2)
self.assertEqual(shortest_hops(self.gr, "c")["s"], 2)
self.assertEqual(shortest_hops(self.gr, "s")["s"], 0)
self.assertEqual(shortest_hops(self.gr, "c")["j"], float('inf'))
def test_shortest_hops_directed_graph(self):
self.assertEqual(shortest_hops(self.digr, "s")["f"], 3)
self.assertEqual(shortest_hops(self.digr, "f")["s"], float('inf'))
self.assertEqual(shortest_hops(self.digr, "s")["s"], 0)
self.assertEqual(shortest_hops(self.digr, "s")["c"], float('inf'))
def test_undirected_connected_component(self):
self.assertEqual(len(undirected_connected_components(self.gr)), 3)
self.assertRaises(
Exception, undirected_connected_components, self.digr)
def test_topological_ordering(self):
dag = digraph() # directed acyclic graph
dag.add_nodes(["a", "b", "c", "d", "e", "f", "g", "h"])
dag.add_edges([("a", "b"), ("a", "c"), ("a", "e"), ("d", "a")])
dag.add_edges(
[("g", "b"), ("g", "f"), ("f", "e"), ("h", "f"), ("h", "a")])
order = {o[0]: o[1] for o in topological_ordering(dag)}
self.assertEqual(sum([order[u] < order[v] for (u, v) in
dag.edges()]), len(dag.edges())) # all comparisons are True
def test_directed_connected_components(self):
digr = digraph()
digr.add_nodes(["a", "b", "c", "d", "e", "f", "g", "h", "i"])
digr.add_edges([("b", "a"), ("a", "c"), ("c", "b"), ("d", "b")])
digr.add_edges([("d", "f"), ("f", "e"), ("e", "d"), ("g", "e")])
digr.add_edges([("g", "h"), ("h", "i"), ("i", "g")])
self.assertEqual(len(directed_connected_components(digr)), 3)
digr2 = digraph()
digr2.add_nodes(
["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"])
digr2.add_edges(
[("a", "b"), ("b", "c"), ("c", "a"), ("b", "d"), ("d", "e")])
digr2.add_edges(
[("e", "f"), ("f", "g"), ("g", "e"), ("d", "g"), ("i", "f")])
digr2.add_edges(
[("h", "g"), ("c", "h"), ("c", "k"), ("h", "i"), ("i", "j")])
digr2.add_edges([("h", "j"), ("j", "k"), ("k", "h")])
self.assertEqual(len(directed_connected_components(digr2)), 4)
def test_shortest_path_in_directed_graph(self):
digr = digraph()
digr.add_nodes(["a", "b", "c", "d", "e", "f"])
digr.add_edge(("a", "b"), 7)
digr.add_edge(("a", "c"), 9)
digr.add_edge(("a", "f"), 14)
digr.add_edge(("f", "e"), 9)
digr.add_edge(("c", "f"), 2)
digr.add_edge(("c", "d"), 11)
digr.add_edge(("b", "c"), 10)
digr.add_edge(("b", "d"), 15)
digr.add_edge(("d", "e"), 6)
self.assertEqual(shortest_path(digr, "a")["a"], 0)
self.assertEqual(shortest_path(digr, "a")["b"], 7)
self.assertEqual(shortest_path(digr, "a")["c"], 9)
self.assertEqual(shortest_path(digr, "a")["d"], 20)
self.assertEqual(shortest_path(digr, "a")["e"], 20)
self.assertEqual(shortest_path(digr, "a")["f"], 11)
def test_prims_minimum_spanning_tree(self):
gr = graph()
gr.add_nodes(["a", "b", "c", "d"])
gr.add_edge(("a", "b"), 4)
gr.add_edge(("b", "c"), 3)
gr.add_edge(("a", "c"), 1)
gr.add_edge(("c", "d"), 2)
min_cost = minimum_spanning_tree(gr)
self.assertEqual(min_cost, 6)
def test_kruskals_minimum_spanning_tree(self):
gr = graph()
gr.add_nodes(["a", "b", "c", "d"])
gr.add_edge(("a", "b"), 4)
gr.add_edge(("b", "c"), 3)
gr.add_edge(("a", "c"), 1)
gr.add_edge(("c", "d"), 2)
min_cost = kruskal_MST(gr)
self.assertEqual(min_cost, 6)
if __name__ == "__main__":
unittest.main()
os.system("pause")
|
NicovincX2/Python-3.5
|
Algorithmique/Algorithme/Algorithme de la théorie des graphes/graph_algorithms_test.py
|
Python
|
gpl-3.0
| 5,705
|
import random
import math
import collections
import tree_decomposition as td
import create_production_rules as pr
import graph_sampler as gs
import stochastic_growth
import probabilistic_growth
import net_metrics
import matplotlib.pyplot as plt
import product
import networkx as nx
import numpy as np
import snap
#G = snap.GenRndGnm(snap.PUNGraph, 10000, 5000)
#G = nx.grid_2d_graph(4,4)
#line
#G = nx.Graph()
#G.add_edge(1, 2)
#G.add_edge(2, 3)
#G.add_edge(3, 4)
#G.add_edge(4, 5)
#G.add_edge(5, 6)
#G.add_edge(6, 7)
#G.add_edge(7, 8)
#G.add_edge(8, 9)
#G.add_edge(9, 10)
#G.add_edge(10, 1) #circle
#G = nx.star_graph(6)
#G = nx.ladder_graph(10)
#G = nx.karate_club_graph()
#nx.write_edgelist((G.to_directed()), '../demo_graphs/karate.txt', comments="#", delimiter=' ', data=False)
#exit()
#G = nx.barabasi_albert_graph(1000,3)
#G = nx.connected_watts_strogatz_graph(200,8,.2)
#G = nx.read_edgelist("../demo_graphs/as20000102.txt")
G = nx.read_edgelist("../demo_graphs/CA-GrQc.txt")
#G = nx.read_edgelist("../demo_graphs/Email-Enron.txt")
#G = nx.read_edgelist("../demo_graphs/Brightkite_edges.txt")
G= list(nx.connected_component_subgraphs(G))[0]
##board example
#G = nx.Graph()
#G.add_edge(1, 2)
#G.add_edge(2, 3)
#G.add_edge(2, 4)
#G.add_edge(3, 4)
#G.add_edge(3, 5)
#G.add_edge(4, 6)
#G.add_edge(5, 6)
#G.add_edge(1, 5)
# print G.number_of_nodes()
num_nodes = G.number_of_nodes()
print num_nodes
print
print "--------------------"
print "------- Edges ------"
print "--------------------"
num_edges = G.number_of_edges()
print num_edges
#print
#print "--------------------"
#print "------ Cliques -----"
#print "--------------------"
#print list(nx.find_cliques(G))
if not nx.is_connected(G):
print "Graph must be connected";
exit()
G.remove_edges_from(G.selfloop_edges())
if G.number_of_selfloops() > 0:
print "Graph must be not contain self-loops";
exit()
Ggl = gs.subgraphs_cnt(G,100)
setlendf = []
if num_nodes>400:
#for i in range(0,10):
# setlen = []
# for i in range(10,510, 20):
for Gprime in gs.rwr_sample(G, 10, 500):
pr.prod_rules = {}
T = td.quickbb(Gprime)
prod_rules = pr.learn_production_rules(Gprime, T)
# setlen.append(len(rule_probabilities))
print prod_rules
else:
T = td.quickbb(G)
prod_rules = pr.learn_production_rules(G, T)
print "Rule Induction Complete"
exit()
Gergm = []
Gergmgl = []
for run in range(1, 3):
f = open('../demo_graphs/ergm_sim/enron/data '+str(run)+' .csv', 'r')
E=nx.Graph()
header = 0
for line in f:
line=line.rstrip()
if header == 0:
header+=1
continue
c = line.split("\t")
if(len(c) is not 3): continue
E.add_edge(int(c[1]),int(c[2]))
if int(c[1]) > num_nodes or int(c[2]) > num_nodes:
continue
Gergm.append(E)
print "G ergm iteration " + str(run) + " of 20"
Gergmgl.append(gs.subgraphs_cnt(E,50))
k = int(math.floor(math.log(num_nodes, 10)))
P = [[.9716,.658],[.5684,.1256]] #karate
P = [[.8581,.5116],[.5063,.2071]] #as20000102
#P = [[.7317,.5533],[.5354,.2857]] #dblp
#P = [[.9031,.5793],[.5051,.2136]] #ca-grqc
#P = [[.9124,.5884],[.5029,.2165]] #enron
P = [[.8884,.5908],[.5628,.2736]] #brightkite
Gkron = product.kronecker_random_graph(k,P).to_undirected()
print("GKron finished")
sum = .9716+.5382+.5684+.1256 #karate
#sum = .8581+.5116+.5063+.2071 #as20000102
#sum = .7317+.5533+.5354+.2857 # dblp
#sum = .9031+.5793+.5051+.2136 #ca-grqc
#sum = .9124+.5884+.5029+.2165 #enron
sum = .8884+.5908+.5628+.2736 #brightkite
GRmatSNAP = snap.GenRMat(num_nodes, num_edges, P[0][0]/sum, P[0][1]/sum, P[1][0]/sum)
GRmat = nx.Graph()
for EI in GRmatSNAP.Edges():
GRmat.add_edge(EI.GetSrcNId(), EI.GetDstNId())
print("GRMAT finished")
GRmatgl = gs.subgraphs_cnt(GRmat,100)
n_distribution = {}
Gstar = []
Dstar = []
Gstargl = []
for run in range(0, 20):
nG, nD = stochastic_growth.grow(prod_rules, num_nodes/10,0)#num_nodes/50)
Gstar.append(nG)
Dstar.append(nD)
Gstargl.append(gs.subgraphs_cnt(nG,100))
#Gstar.append(probabilistic_growth.grow(rule_probabilities,prod_rule_set, num_nodes))
print "G* iteration " + str(run) + " of 20"
print(nD)
print ""
print "G* Samples Complete"
label = "AS"
net_metrics.draw_graphlet_plot(Ggl, Gstargl, Gergmgl, Gkron, GRmatgl, label, plt.figure())
exit()
net_metrics.draw_diam_plot(G, Dstar, Gergm, Gkron, GRmat, label, plt.figure())
net_metrics.draw_degree_rank_plot(G, Gstar, Gergm, Gkron, GRmat, label, plt.figure())
#net_metrics.draw_scree_plot(G, Gstar, label, ax1)
net_metrics.draw_network_value(G, Gstar, Gergm, Gkron, GRmat, label, plt.figure())
net_metrics.draw_hop_plot(G, Gstar, Gergm, Gkron, GRmat, label, plt.figure())
#ax1.plot(ef.mean().index, ef.mean()[1],'b')
net_metrics.save_plot_figure_2disk()
|
abitofalchemy/hrg_nets
|
karate_chop.py
|
Python
|
gpl-3.0
| 4,865
|
#!/usr/bin/python
print " __ "
print " |__|____ ___ __ "
print " | \__ \\\\ \/ / "
print " | |/ __ \\\\ / "
print " /\__| (____ /\_/ "
print " \______| \/ "
print " "
print 'Module 5'
print 'Exploitation Techniques'
print
"""
Write a pyCommand script to find if the DEP, ASLR, SafeSEH modules are enabled
"""
import immlib
import struct
DESC = "DEP, ASLR and SafeSEH Detection in all Modules"
# More information
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms680339(v=vs.85).aspx
# How to detect presence of security mechanisms
IMAGE_DLLCHARACTERISTICS_NX_COMPAT = 0x0100 # DEP compatible
IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE = 0x0040 # ASLR
def main(args) :
imm = immlib.Debugger()
# code borrowed from safeseh pycommand
allmodules=imm.getAllModules()
for key in allmodules.keys():
dep = aslr = "NO"
module = imm.getModule(key)
module_baseAddress = module.getBaseAddress()
pe_offset = struct.unpack('<L',imm.readMemory(module_baseAddress + 0x3c,4))[0]
pebase = module_baseAddress + pe_offset
flags = struct.unpack('<H',imm.readMemory(pebase + 0x5e,2))[0]
if (flags & IMAGE_DLLCHARACTERISTICS_NX_COMPAT != 0) :
dep = "YES"
if (flags & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE != 0) :
aslr = "YES"
imm.log("---- %s ----" %key)
imm.log("DEP: %s ASLR: %s" %(dep, aslr))
imm.log("--------------")
return "[+] Executed Successfully"
|
nomad-vino/SPSE-1
|
Module 5/x5_7.py
|
Python
|
gpl-3.0
| 1,696
|
from django.test import TestCase, tag
from member.tests.test_mixins import MemberMixin
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from django.urls.base import reverse
from enumeration.views import DashboardView, ListBoardView
class TestEnumeration(MemberMixin, TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = User.objects.create_user(username='erik')
self.household_structure = self.make_household_ready_for_enumeration(make_hoh=False)
def test_dashboard_view(self):
url = reverse('enumeration:dashboard_url', kwargs=dict(
household_identifier=self.household_structure.household.household_identifier,
survey=self.household_structure.survey))
request = self.factory.get(url)
request.user = self.user
response = DashboardView.as_view()(request)
self.assertEqual(response.status_code, 200)
def test_dashboard_view2(self):
url = reverse('enumeration:dashboard_url', kwargs=dict(
household_identifier=self.household_structure.household.household_identifier,
survey=self.household_structure.survey))
self.client.force_login(self.user)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_list_view1(self):
url = reverse('enumeration:listboard_url')
request = self.factory.get(url)
request.user = self.user
response = ListBoardView.as_view()(request)
self.assertEqual(response.status_code, 200)
def test_list_view2(self):
url = reverse('enumeration:listboard_url', kwargs=dict(page=1))
request = self.factory.get(url)
request.user = self.user
response = ListBoardView.as_view()(request)
self.assertEqual(response.status_code, 200)
def test_list_view3(self):
url = reverse('enumeration:listboard_url', kwargs=dict(
household_identifier=self.household_structure.household.household_identifier))
request = self.factory.get(url)
request.user = self.user
response = ListBoardView.as_view()(request)
self.assertEqual(response.status_code, 200)
def test_list_view4(self):
url = reverse('enumeration:listboard_url', kwargs=dict(
household_identifier=self.household_structure.household.household_identifier,
survey=self.household_structure.survey))
request = self.factory.get(url)
request.user = self.user
response = ListBoardView.as_view()(request)
self.assertEqual(response.status_code, 200)
def test_list_view5(self):
url = reverse('enumeration:listboard_url', kwargs=dict(
plot_identifier=self.household_structure.household.plot.plot_identifier))
request = self.factory.get(url)
request.user = self.user
response = ListBoardView.as_view()(request)
self.assertEqual(response.status_code, 200)
|
botswana-harvard/bcpp
|
bcpp/tests/test_views/test_enumeration.py
|
Python
|
gpl-3.0
| 3,020
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2003 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Eric6 Documentation Generator.
This is the main Python script of the documentation generator. It is
this script that gets called via the source documentation interface.
This script can be used via the commandline as well.
"""
from __future__ import unicode_literals
import Toolbox.PyQt4ImportHook # __IGNORE_WARNING__
try: # Only for Py2
import Utilities.compatibility_fixes # __IGNORE_WARNING__
except (ImportError):
pass
import glob
import os
import sys
import fnmatch
import Utilities.ModuleParser
from DocumentationTools.ModuleDocumentor import ModuleDocument
from DocumentationTools.IndexGenerator import IndexGenerator
from DocumentationTools.QtHelpGenerator import QtHelpGenerator
from DocumentationTools.Config import eric6docDefaultColors
from UI.Info import Version
import Utilities
# list of supported filename extensions
supportedExtensions = [".py", ".pyw", ".ptl", ".rb"]
def usage():
"""
Function to print some usage information.
It prints a reference of all commandline parameters that may
be used and ends the application.
"""
print("eric6_doc")
print()
print("Copyright (c) 2003 - 2014 Detlev Offenbach"
" <detlev@die-offenbachs.de>.")
print()
print("Usage:")
print()
print(" eric6_doc [options] files...")
print()
print("where files can be either python modules, package")
print("directories or ordinary directories.")
print()
print("Options:")
print()
print(" -c filename or --style-sheet=filename")
print(" Specify a CSS style sheet file to be used.")
print(" -e or --noempty")
print(" Don't include empty modules.")
print(" --eol=eol-type")
print(" Use the given eol type to terminate lines.")
print(" Valid values are 'cr', 'lf' and 'crlf'.")
print(" --exclude-file=pattern")
print(" Specify a filename pattern of files to be excluded.")
print(" This option may be repeated multiple times.")
print(" -h or --help")
print(" Show this help and exit.")
print(" -i or --noindex")
print(" Don't generate index files.")
print(" -o directory or --outdir=directory")
print(" Generate files in the named directory.")
print(" -R, -r or --recursive")
print(" Perform a recursive search for Python files.")
print(" -t ext or --extension=ext")
print(" Add the given extension to the list of file extensions.")
print(" This option may be given multiple times.")
print(" -V or --version")
print(" Show version information and exit.")
print(" -x directory or --exclude=directory")
print(" Specify a directory basename to be excluded.")
print(" This option may be repeated multiple times.")
print()
print(" --body-color=color")
print(" Specify the text color.")
print(" --body-background-color=color")
print(" Specify the text background color.")
print(" --l1header-color=color")
print(" Specify the text color of level 1 headers.")
print(" --l1header-background-color=color")
print(" Specify the text background color of level 1 headers.")
print(" --l2header-color=color")
print(" Specify the text color of level 2 headers.")
print(" --l2header-background-color=color")
print(" Specify the text background color of level 2 headers.")
print(" --cfheader-color=color")
print(" Specify the text color of class and function headers.")
print(" --cfheader-background-color=color")
print(" Specify the text background color of class and"
" function headers.")
print(" --link-color=color")
print(" Specify the text color of hyperlinks.")
print()
print(" --create-qhp")
print(" Enable generation of QtHelp files.")
print(" --qhp-outdir=directory")
print(" Generate QtHelp files in the named directory.")
print(" --qhp-namespace=namespace")
print(" Use the given namespace (mandatory).")
print(" --qhp-virtualfolder=folder")
print(" Use the given virtual folder (mandatory).")
print(" The virtual folder must not contain '/'.")
print(" --qhp-filtername=name")
print(" Use the given name for the custom filter.")
print(" --qhp-filterattribs=attributes")
print(" Add the given attributes to the filter list.")
print(" Attributes must be separated by ':'.")
print(" --qhp-title=title")
print(" Use this as the title for the generated help (mandatory).")
print(" --create-qhc")
print(" Enable generation of QtHelp Collection files.")
sys.exit(1)
def version():
"""
Function to show the version information.
"""
print(
"""eric6_doc {0}\n"""
"""\n"""
"""Eric6 API documentation generator.\n"""
"""\n"""
"""Copyright (c) 2003-2014 Detlev Offenbach"""
""" <detlev@die-offenbachs.de>\n"""
"""This is free software; see the LICENSE.GPL3 for copying"""
""" conditions.\n"""
"""There is NO warranty; not even for MERCHANTABILITY or FITNESS"""
""" FOR A\n"""
"""PARTICULAR PURPOSE.""".format(Version))
sys.exit(1)
def main():
"""
Main entry point into the application.
"""
import getopt
try:
opts, args = getopt.getopt(
sys.argv[1:], "c:ehio:Rrt:Vx:",
["exclude=", "extension=", "help", "noindex", "noempty", "outdir=",
"recursive", "style-sheet=", "version",
"exclude-file=", "eol=",
"body-color=", "body-background-color=",
"l1header-color=", "l1header-background-color=",
"l2header-color=", "l2header-background-color=",
"cfheader-color=", "cfheader-background-color=",
"link-color=",
"create-qhp", "qhp-outdir=", "qhp-namespace=",
"qhp-virtualfolder=", "qhp-filtername=", "qhp-filterattribs=",
"qhp-title=", "create-qhc",
])
except getopt.error:
usage()
excludeDirs = ["CVS", ".svn", "_svn", ".ropeproject", "_ropeproject",
".eric6project", "_eric6project", "dist", "build", "doc",
"docs"]
excludePatterns = []
outputDir = "doc"
recursive = False
doIndex = True
noempty = False
newline = None
stylesheetFile = ""
colors = eric6docDefaultColors.copy()
qtHelpCreation = False
qtHelpOutputDir = "help"
qtHelpNamespace = ""
qtHelpFolder = "source"
qtHelpFilterName = "unknown"
qtHelpFilterAttribs = ""
qtHelpTitle = ""
qtHelpCreateCollection = False
for k, v in opts:
if k in ["-o", "--outdir"]:
outputDir = v
elif k in ["-R", "-r", "--recursive"]:
recursive = True
elif k in ["-x", "--exclude"]:
excludeDirs.append(v)
elif k == "--exclude-file":
excludePatterns.append(v)
elif k in ["-i", "--noindex"]:
doIndex = False
elif k in ["-e", "--noempty"]:
noempty = True
elif k in ["-h", "--help"]:
usage()
elif k in ["-V", "--version"]:
version()
elif k in ["-c", "--style-sheet"]:
stylesheetFile = v
elif k in ["-t", "--extension"]:
if not v.startswith("."):
v = ".{0}".format(v)
supportedExtensions.append(v)
elif k == "--eol":
if v.lower() == "cr":
newline = '\r'
elif v.lower() == "lf":
newline = '\n'
elif v.lower() == "crlf":
newline = '\r\n'
elif k == "--body-color":
colors['BodyColor'] = v
elif k == "--body-background-color":
colors['BodyBgColor'] = v
elif k == "--l1header-color":
colors['Level1HeaderColor'] = v
elif k == "--l1header-background-color":
colors['Level1HeaderBgColor'] = v
elif k == "--l2header-color":
colors['Level2HeaderColor'] = v
elif k == "--l2header-background-color":
colors['Level2HeaderBgColor'] = v
elif k == "--cfheader-color":
colors['CFColor'] = v
elif k == "--cfheader-background-color":
colors['CFBgColor'] = v
elif k == "--link-color":
colors['LinkColor'] = v
elif k == "--create-qhp":
qtHelpCreation = True
elif k == "--qhp-outdir":
qtHelpOutputDir = v
elif k == "--qhp-namespace":
qtHelpNamespace = v
elif k == "--qhp-virtualfolder":
qtHelpFolder = v
elif k == "--qhp-filtername":
qtHelpFilterName = v
elif k == "--qhp-filterattribs":
qtHelpFilterAttribs = v
elif k == "--qhp-title":
qtHelpTitle = v
elif k == "--create-qhc":
qtHelpCreateCollection = True
if not args:
usage()
if qtHelpCreation and \
(qtHelpNamespace == "" or
qtHelpFolder == "" or '/' in qtHelpFolder or
qtHelpTitle == ""):
usage()
if qtHelpCreation:
from PyQt5.QtCore import QCoreApplication
app = QCoreApplication(sys.argv) # __IGNORE_WARNING__
input = output = 0
basename = ""
if outputDir:
if not os.path.isdir(outputDir):
try:
os.makedirs(outputDir)
except EnvironmentError:
sys.stderr.write(
"Could not create output directory {0}.".format(outputDir))
sys.exit(2)
else:
outputDir = os.getcwd()
outputDir = os.path.abspath(outputDir)
if stylesheetFile:
try:
sf = open(stylesheetFile, "r", encoding="utf-8")
stylesheet = sf.read()
sf.close()
except IOError:
sys.stderr.write(
"The CSS stylesheet '{0}' does not exist\n".format(
stylesheetFile))
sys.stderr.write("Disabling CSS usage.\n")
stylesheet = None
else:
stylesheet = None
indexGenerator = IndexGenerator(outputDir, colors, stylesheet)
if qtHelpCreation:
if qtHelpOutputDir:
if not os.path.isdir(qtHelpOutputDir):
try:
os.makedirs(qtHelpOutputDir)
except EnvironmentError:
sys.stderr.write(
"Could not create QtHelp output directory {0}.".format(
qtHelpOutputDir))
sys.exit(2)
else:
qtHelpOutputDir = os.getcwd()
qtHelpOutputDir = os.path.abspath(qtHelpOutputDir)
qtHelpGenerator = QtHelpGenerator(outputDir,
qtHelpOutputDir, qtHelpNamespace,
qtHelpFolder, qtHelpFilterName,
qtHelpFilterAttribs, qtHelpTitle,
qtHelpCreateCollection)
for arg in args:
if os.path.isdir(arg):
if os.path.exists(os.path.join(
arg, Utilities.joinext("__init__", ".py"))):
basename = os.path.dirname(arg)
if arg == '.':
sys.stderr.write("The directory '.' is a package.\n")
sys.stderr.write(
"Please repeat the call giving its real name.\n")
sys.stderr.write("Ignoring the directory.\n")
continue
else:
basename = arg
if basename:
basename = "{0}{1}".format(basename, os.sep)
if recursive and not os.path.islink(arg):
names = [arg] + Utilities.getDirs(arg, excludeDirs)
else:
names = [arg]
else:
basename = ""
names = [arg]
for filename in names:
inpackage = False
if os.path.isdir(filename):
files = []
for ext in supportedExtensions:
files.extend(glob.glob(os.path.join(
filename, Utilities.joinext("*", ext))))
initFile = os.path.join(
filename, Utilities.joinext("__init__", ext))
if initFile in files:
inpackage = True
files.remove(initFile)
files.insert(0, initFile)
else:
if Utilities.isWindowsPlatform() and glob.has_magic(filename):
files = glob.glob(filename)
else:
files = [filename]
for file in files:
skipIt = False
for pattern in excludePatterns:
if fnmatch.fnmatch(os.path.basename(file), pattern):
skipIt = True
break
if skipIt:
continue
try:
module = Utilities.ModuleParser.readModule(
file, basename=basename,
inpackage=inpackage, extensions=supportedExtensions)
moduleDocument = ModuleDocument(module, colors, stylesheet)
doc = moduleDocument.genDocument()
except IOError as v:
sys.stderr.write("{0} error: {1}\n".format(file, v[1]))
continue
except ImportError as v:
sys.stderr.write("{0} error: {1}\n".format(file, v))
continue
input = input + 1
f = Utilities.joinext(os.path.join(
outputDir, moduleDocument.name()), ".html")
# remember for index file generation
indexGenerator.remember(file, moduleDocument, basename)
# remember for QtHelp generation
if qtHelpCreation:
qtHelpGenerator.remember(file, moduleDocument, basename)
if (noempty or file.endswith('__init__.py')) \
and moduleDocument.isEmpty():
continue
# generate output
try:
out = open(f, "w", encoding="utf-8", newline=newline)
out.write(doc)
out.close()
except IOError as v:
sys.stderr.write("{0} error: {1}\n".format(file, v[1]))
else:
sys.stdout.write("{0} ok\n".format(f))
output = output + 1
sys.stdout.flush()
sys.stderr.flush()
# write index files
if doIndex:
indexGenerator.writeIndices(basename, newline=newline)
# generate the QtHelp files
if qtHelpCreation:
qtHelpGenerator.generateFiles(newline=newline)
sys.exit(0)
if __name__ == '__main__':
main()
|
davy39/eric
|
eric6_doc.py
|
Python
|
gpl-3.0
| 15,567
|
from .circumcision_model_mixin import CircumcisionModelMixin
from .crf_model_mixin import CrfModelManager, CrfModelMixin
# CrfModelMixinNonUniqueVisit
from .detailed_sexual_history_mixin import DetailedSexualHistoryMixin
from .hiv_testing_supplemental_mixin import HivTestingSupplementalMixin
from .mobile_test_model_mixin import MobileTestModelMixin
from .pregnancy_model_mixin import PregnancyModelMixin
from .search_slug_model_mixin import SearchSlugModelMixin
from .sexual_partner_model_mixin import SexualPartnerMixin
|
botswana-harvard/bcpp-subject
|
bcpp_subject/models/model_mixins/__init__.py
|
Python
|
gpl-3.0
| 523
|
import urllib
import urllib2
import xml.dom.minidom
import re
import socket
from util import hook
chatbot_re = (r'(^.*\b(taiga|taigabot)\b.*$)', re.I)
@hook.regex(*chatbot_re)
@hook.command
def chatbot(inp, reply=None, nick=None, conn=None):
inp = inp.group(1).lower().replace('taigabot', '').replace('taiga', '').replace(':', '')
args = {'bot_id': '6', 'say': inp.strip(), 'convo_id': conn.nick, 'format': 'xml'}
data = urllib.urlencode(args)
resp = False
url_response = urllib2.urlopen('http://api.program-o.com/v2/chatbot/?', data)
response = url_response.read()
response_dom = xml.dom.minidom.parseString(response)
text = response_dom.getElementsByTagName('response')[0].childNodes[0].data.strip()
return nick + ': ' + str(text.lower().replace('programo', 'taiga').replace('program-o', 'taigabot').replace('elizabeth', 'wednesday'))
|
FrozenPigs/Taigabot
|
plugins/_broken/chatbot.py
|
Python
|
gpl-3.0
| 874
|
"""Smoke tests to check installation health
:Requirement: Installer
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: Installer
:Assignee: desingh
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import re
import pytest
from robottelo import ssh
from robottelo.config import settings
from robottelo.constants import RHEL_6_MAJOR_VERSION
from robottelo.constants import RHEL_7_MAJOR_VERSION
from robottelo.helpers import get_host_info
PREVIOUS_INSTALLER_OPTIONS = {
'--[no-]colors',
'--[no-]enable-certs',
'--[no-]enable-foreman',
'--[no-]enable-foreman-cli',
'--[no-]enable-foreman-compute-ec2',
'--[no-]enable-foreman-compute-gce',
'--[no-]enable-foreman-compute-libvirt',
'--[no-]enable-foreman-compute-openstack',
'--[no-]enable-foreman-compute-ovirt',
'--[no-]enable-foreman-compute-vmware',
'--[no-]enable-foreman-cli-kubevirt',
'--[no-]enable-foreman-cli-katello',
'--[no-]enable-foreman-cli-remote-execution',
'--[no-]enable-foreman-plugin-ansible',
'--[no-]enable-foreman-plugin-bootdisk',
'--[no-]enable-foreman-plugin-discovery',
'--[no-]enable-foreman-plugin-hooks',
'--[no-]enable-foreman-plugin-kubevirt',
'--[no-]enable-foreman-plugin-leapp',
'--[no-]enable-foreman-plugin-openscap',
'--[no-]enable-foreman-plugin-remote-execution',
'--[no-]enable-foreman-plugin-rh-cloud',
'--[no-]enable-foreman-plugin-tasks',
'--[no-]enable-foreman-plugin-templates',
'--[no-]enable-foreman-plugin-webhooks',
'--[no-]enable-foreman-proxy',
'--[no-]enable-foreman-proxy-content',
'--[no-]enable-foreman-proxy-plugin-ansible',
'--[no-]enable-foreman-proxy-plugin-dhcp-infoblox',
'--[no-]enable-foreman-proxy-plugin-dhcp-remote-isc',
'--[no-]enable-foreman-proxy-plugin-discovery',
'--[no-]enable-foreman-proxy-plugin-dns-infoblox',
'--[no-]enable-foreman-proxy-plugin-openscap',
'--[no-]enable-foreman-proxy-plugin-remote-execution-ssh',
'--[no-]enable-foreman-proxy-plugin-shellhooks',
'--[no-]enable-katello',
'--[no-]enable-puppet',
'--[no-]lock-package-versions',
'--[no-]parser-cache',
'--certs-ca-common-name',
'--certs-ca-expiration',
'--certs-city',
'--certs-cname',
'--certs-country',
'--certs-default-ca-name',
'--certs-deploy',
'--certs-expiration',
'--certs-generate',
'--certs-group',
'--certs-node-fqdn',
'--certs-org',
'--certs-org-unit',
'--certs-pki-dir',
'--certs-regenerate',
'--certs-reset',
'--certs-server-ca-cert',
'--certs-server-ca-name',
'--certs-server-cert',
'--certs-server-cert-req',
'--certs-server-key',
'--certs-skip-check',
'--certs-ssl-build-dir',
'--certs-state',
'--certs-tar-file',
'--certs-update-all',
'--certs-update-server',
'--certs-update-server-ca',
'--certs-user',
'--color-of-background',
'--compare-scenarios',
'--detailed-exitcodes',
'--disable-scenario',
'--disable-system-checks',
'--dont-save-answers',
'--enable-scenario',
'--force',
'--foreman-app-root',
'--foreman-apache',
'--foreman-cli-foreman-url',
'--foreman-cli-hammer-plugin-prefix',
'--foreman-cli-manage-root-config',
'--foreman-cli-password',
'--foreman-cli-refresh-cache',
'--foreman-cli-request-timeout',
'--foreman-cli-ssl-ca-file',
'--foreman-cli-username',
'--foreman-cli-use-sessions',
'--foreman-cli-version',
'--foreman-client-ssl-ca',
'--foreman-client-ssl-cert',
'--foreman-client-ssl-key',
'--foreman-compute-ec2-version',
'--foreman-compute-gce-version',
'--foreman-compute-libvirt-version',
'--foreman-compute-openstack-version',
'--foreman-compute-ovirt-version',
'--foreman-compute-vmware-version',
'--foreman-cors-domains',
'--foreman-db-database',
'--foreman-db-host',
'--foreman-db-manage',
'--foreman-db-manage-rake',
'--foreman-db-password',
'--foreman-db-pool',
'--foreman-db-port',
'--foreman-db-root-cert',
'--foreman-db-sslmode',
'--foreman-db-username',
'--foreman-dynflow-manage-services',
'--foreman-dynflow-orchestrator-ensure',
'--foreman-dynflow-redis-url',
'--foreman-dynflow-worker-concurrency',
'--foreman-dynflow-worker-instances',
'--foreman-email-delivery-method',
'--foreman-email-smtp-address',
'--foreman-email-smtp-authentication',
'--foreman-email-smtp-domain',
'--foreman-email-smtp-password',
'--foreman-email-smtp-port',
'--foreman-email-smtp-user-name',
'--foreman-foreman-service-puma-threads-max',
'--foreman-foreman-service-puma-threads-min',
'--foreman-foreman-service-puma-workers',
'--foreman-foreman-url',
'--foreman-group',
'--foreman-hsts-enabled',
'--foreman-http-keytab',
'--foreman-initial-admin-email',
'--foreman-initial-admin-first-name',
'--foreman-initial-admin-last-name',
'--foreman-initial-admin-locale',
'--foreman-initial-admin-password',
'--foreman-initial-admin-username',
'--foreman-initial-admin-timezone',
'--foreman-initial-location',
'--foreman-initial-organization',
'--foreman-ipa-authentication',
'--foreman-ipa-manage-sssd',
'--foreman-keycloak-realm',
'--foreman-keycloak',
'--foreman-keycloak-app-name',
'--foreman-loggers',
'--foreman-logging-layout',
'--foreman-logging-level',
'--foreman-logging-type',
'--foreman-manage-user',
'--foreman-oauth-active',
'--foreman-oauth-consumer-key',
'--foreman-oauth-consumer-secret',
'--foreman-oauth-map-users',
'--foreman-pam-service',
'--foreman-plugin-prefix',
'--foreman-plugin-tasks-automatic-cleanup',
'--foreman-plugin-tasks-cron-line',
'--foreman-plugin-tasks-backup',
'--foreman-plugin-version',
'--foreman-proxy-autosignfile',
'--foreman-proxy-bind-host',
'--foreman-proxy-bmc',
'--foreman-proxy-bmc-default-provider',
'--foreman-proxy-bmc-listen-on',
'--foreman-proxy-bmc-ssh-key',
'--foreman-proxy-bmc-ssh-powercycle',
'--foreman-proxy-bmc-ssh-poweroff',
'--foreman-proxy-bmc-ssh-poweron',
'--foreman-proxy-bmc-ssh-powerstatus',
'--foreman-proxy-bmc-ssh-user',
'--foreman-proxy-content-enable-ansible',
'--foreman-proxy-content-enable-deb',
'--foreman-proxy-content-enable-docker',
'--foreman-proxy-content-enable-file',
'--foreman-proxy-content-enable-katello-agent',
'--foreman-proxy-content-enable-yum',
'--foreman-proxy-content-pulpcore-allowed-content-checksums',
'--foreman-proxy-content-pulpcore-api-service-worker-timeout',
'--foreman-proxy-content-pulpcore-content-service-worker-timeout',
'--foreman-proxy-content-pulpcore-cache-enabled',
'--foreman-proxy-content-pulpcore-cache-expires-ttl',
'--foreman-proxy-content-pulpcore-django-secret-key',
'--foreman-proxy-content-pulpcore-mirror',
'--foreman-proxy-content-pulpcore-use-rq-tasking-system',
'--foreman-proxy-content-pulpcore-postgresql-db-name',
'--foreman-proxy-content-pulpcore-manage-postgresql',
'--foreman-proxy-content-pulpcore-postgresql-host',
'--foreman-proxy-content-pulpcore-postgresql-password',
'--foreman-proxy-content-pulpcore-postgresql-port',
'--foreman-proxy-content-pulpcore-postgresql-ssl',
'--foreman-proxy-content-pulpcore-postgresql-ssl-cert',
'--foreman-proxy-content-pulpcore-postgresql-ssl-key',
'--foreman-proxy-content-pulpcore-postgresql-ssl-require',
'--foreman-proxy-content-pulpcore-postgresql-ssl-root-ca',
'--foreman-proxy-content-pulpcore-postgresql-user',
'--foreman-rails-cache-store',
'--foreman-proxy-registration',
'--foreman-proxy-registration-listen-on',
'--foreman-server-ssl-verify-client',
'--puppet-server-ca-client-self-delete',
'--puppet-server-multithreaded',
'--puppet-server-storeconfigs',
'--puppet-server-trusted-external-command',
'--puppet-server-versioned-code-content',
'--puppet-server-versioned-code-id',
'--foreman-proxy-content-pulpcore-worker-count',
'--foreman-proxy-content-puppet',
'--foreman-proxy-content-qpid-router-agent-addr',
'--foreman-proxy-content-qpid-router-agent-port',
'--foreman-proxy-content-qpid-router-broker-addr',
'--foreman-proxy-content-qpid-router-broker-port',
'--foreman-proxy-content-qpid-router-hub-addr',
'--foreman-proxy-content-qpid-router-hub-port',
'--foreman-proxy-content-qpid-router-logging',
'--foreman-proxy-content-qpid-router-logging-level',
'--foreman-proxy-content-qpid-router-logging-path',
'--foreman-proxy-content-qpid-router-ssl-ciphers',
'--foreman-proxy-content-qpid-router-ssl-protocols',
'--foreman-proxy-content-reverse-proxy',
'--foreman-proxy-content-reverse-proxy-port',
'--foreman-proxy-dhcp',
'--foreman-proxy-dhcp-additional-interfaces',
'--foreman-proxy-dhcp-config',
'--foreman-proxy-dhcp-failover-address',
'--foreman-proxy-dhcp-failover-port',
'--foreman-proxy-dhcp-gateway',
'--foreman-proxy-dhcp-interface',
'--foreman-proxy-dhcp-key-name',
'--foreman-proxy-dhcp-key-secret',
'--foreman-proxy-dhcp-leases',
'--foreman-proxy-dhcp-listen-on',
'--foreman-proxy-dhcp-load-balance',
'--foreman-proxy-dhcp-load-split',
'--foreman-proxy-dhcp-manage-acls',
'--foreman-proxy-dhcp-managed',
'--foreman-proxy-dhcp-max-response-delay',
'--foreman-proxy-dhcp-max-unacked-updates',
'--foreman-proxy-dhcp-mclt',
'--foreman-proxy-dhcp-nameservers',
'--foreman-proxy-dhcp-netmask',
'--foreman-proxy-dhcp-network',
'--foreman-proxy-dhcp-node-type',
'--foreman-proxy-dhcp-omapi-port',
'--foreman-proxy-dhcp-option-domain',
'--foreman-proxy-dhcp-peer-address',
'--foreman-proxy-dhcp-ping-free-ip',
'--foreman-proxy-dhcp-provider',
'--foreman-proxy-dhcp-pxefilename',
'--foreman-proxy-dhcp-pxeserver',
'--foreman-proxy-dhcp-range',
'--foreman-proxy-dhcp-search-domains',
'--foreman-proxy-dhcp-server',
'--foreman-proxy-dhcp-subnets',
'--foreman-proxy-dns',
'--foreman-proxy-dns-forwarders',
'--foreman-proxy-dns-interface',
'--foreman-proxy-dns-listen-on',
'--foreman-proxy-dns-managed',
'--foreman-proxy-dns-provider',
'--foreman-proxy-dns-reverse',
'--foreman-proxy-dns-server',
'--foreman-proxy-dns-tsig-keytab',
'--foreman-proxy-dns-tsig-principal',
'--foreman-proxy-dns-ttl',
'--foreman-proxy-dns-zone',
'--foreman-proxy-ensure-packages-version',
'--foreman-proxy-foreman-base-url',
'--foreman-proxy-foreman-ssl-ca',
'--foreman-proxy-foreman-ssl-cert',
'--foreman-proxy-foreman-ssl-key',
'--foreman-proxy-freeipa-config',
'--foreman-proxy-freeipa-remove-dns',
'--foreman-proxy-gpgcheck',
'--foreman-proxy-groups',
'--foreman-proxy-http',
'--foreman-proxy-http-port',
'--foreman-proxy-httpboot',
'--foreman-proxy-httpboot-listen-on',
'--foreman-proxy-keyfile',
'--foreman-proxy-libvirt-connection',
'--foreman-proxy-libvirt-network',
'--foreman-proxy-log',
'--foreman-proxy-log-buffer',
'--foreman-proxy-log-buffer-errors',
'--foreman-proxy-log-level',
'--foreman-proxy-logs',
'--foreman-proxy-logs-listen-on',
'--foreman-proxy-manage-puppet-group',
'--foreman-proxy-manage-sudoersd',
'--foreman-proxy-oauth-consumer-key',
'--foreman-proxy-oauth-consumer-secret',
'--foreman-proxy-oauth-effective-user',
'--foreman-proxy-plugin-ansible-ansible-dir',
'--foreman-proxy-plugin-ansible-callback',
'--foreman-proxy-plugin-ansible-install-runner',
'--foreman-proxy-plugin-ansible-manage-runner-repo',
'--foreman-proxy-plugin-ansible-roles-path',
'--foreman-proxy-plugin-ansible-runner-package-name',
'--foreman-proxy-plugin-ansible-enabled',
'--foreman-proxy-plugin-ansible-host-key-checking',
'--foreman-proxy-plugin-ansible-listen-on',
'--foreman-proxy-plugin-ansible-stdout-callback',
'--foreman-proxy-plugin-ansible-working-dir',
'--foreman-proxy-plugin-ansible-ssh-args',
'--foreman-proxy-plugin-dhcp-infoblox-dns-view',
'--foreman-proxy-plugin-dhcp-infoblox-network-view',
'--foreman-proxy-plugin-dhcp-infoblox-password',
'--foreman-proxy-plugin-dhcp-infoblox-record-type',
'--foreman-proxy-plugin-dhcp-infoblox-username',
'--foreman-proxy-plugin-dhcp-remote-isc-dhcp-config',
'--foreman-proxy-plugin-dhcp-remote-isc-dhcp-leases',
'--foreman-proxy-plugin-dhcp-remote-isc-key-name',
'--foreman-proxy-plugin-dhcp-remote-isc-key-secret',
'--foreman-proxy-plugin-dhcp-remote-isc-omapi-port',
'--foreman-proxy-plugin-discovery-image-name',
'--foreman-proxy-plugin-discovery-install-images',
'--foreman-proxy-plugin-discovery-source-url',
'--foreman-proxy-plugin-discovery-tftp-root',
'--foreman-proxy-plugin-dns-infoblox-dns-server',
'--foreman-proxy-plugin-dns-infoblox-password',
'--foreman-proxy-plugin-dns-infoblox-username',
'--foreman-proxy-plugin-dns-infoblox-dns-view',
'--foreman-proxy-plugin-openscap-contentdir',
'--foreman-proxy-plugin-openscap-enabled',
'--foreman-proxy-plugin-openscap-failed-dir',
'--foreman-proxy-plugin-openscap-listen-on',
'--foreman-proxy-plugin-openscap-openscap-send-log-file',
'--foreman-proxy-plugin-openscap-proxy-name',
'--foreman-proxy-plugin-openscap-reportsdir',
'--foreman-proxy-plugin-openscap-spooldir',
'--foreman-proxy-plugin-openscap-timeout',
'--foreman-proxy-plugin-openscap-version',
'--foreman-proxy-plugin-openscap-corrupted-dir',
'--foreman-proxy-plugin-remote-execution-ssh-async-ssh',
'--foreman-proxy-plugin-remote-execution-ssh-enabled',
'--foreman-proxy-plugin-remote-execution-ssh-generate-keys',
'--foreman-proxy-plugin-remote-execution-ssh-install-key',
'--foreman-proxy-plugin-remote-execution-ssh-listen-on',
'--foreman-proxy-plugin-remote-execution-ssh-local-working-dir',
'--foreman-proxy-plugin-remote-execution-ssh-remote-working-dir',
'--foreman-proxy-plugin-remote-execution-ssh-ssh-identity-dir',
'--foreman-proxy-plugin-remote-execution-ssh-ssh-identity-file',
'--foreman-proxy-plugin-remote-execution-ssh-ssh-kerberos-auth',
'--foreman-proxy-plugin-remote-execution-ssh-ssh-keygen',
'--foreman-proxy-plugin-shellhooks-directory',
'--foreman-proxy-plugin-shellhooks-enabled',
'--foreman-proxy-plugin-shellhooks-listen-on',
'--foreman-proxy-plugin-shellhooks-version',
'--foreman-proxy-puppet',
'--foreman-proxy-puppet-api-timeout',
'--foreman-proxy-puppet-group',
'--foreman-proxy-puppet-listen-on',
'--foreman-proxy-puppet-ssl-ca',
'--foreman-proxy-puppet-ssl-cert',
'--foreman-proxy-puppet-ssl-key',
'--foreman-proxy-puppet-url',
'--foreman-proxy-puppetca',
'--foreman-proxy-puppetca-certificate',
'--foreman-proxy-puppetca-cmd',
'--foreman-proxy-puppetca-listen-on',
'--foreman-proxy-puppetca-provider',
'--foreman-proxy-puppetca-sign-all',
'--foreman-proxy-puppetca-token-ttl',
'--foreman-proxy-puppetca-tokens-file',
'--foreman-proxy-puppetdir',
'--foreman-proxy-realm',
'--foreman-proxy-realm-keytab',
'--foreman-proxy-realm-listen-on',
'--foreman-proxy-realm-principal',
'--foreman-proxy-realm-provider',
'--foreman-proxy-register-in-foreman',
'--foreman-proxy-registered-name',
'--foreman-proxy-registered-proxy-url',
'--foreman-proxy-repo',
'--foreman-proxy-ssl',
'--foreman-proxy-ssl-ca',
'--foreman-proxy-ssl-cert',
'--foreman-proxy-ssl-disabled-ciphers',
'--foreman-proxy-ssl-key',
'--foreman-proxy-ssl-port',
'--foreman-proxy-ssldir',
'--foreman-proxy-template-url',
'--foreman-proxy-templates',
'--foreman-proxy-templates-listen-on',
'--foreman-proxy-tftp',
'--foreman-proxy-tftp-dirs',
'--foreman-proxy-tftp-listen-on',
'--foreman-proxy-tftp-manage-wget',
'--foreman-proxy-tftp-managed',
'--foreman-proxy-tftp-replace-grub2-cfg',
'--foreman-proxy-tftp-root',
'--foreman-proxy-tftp-servername',
'--foreman-proxy-tftp-syslinux-filenames',
'--foreman-proxy-tls-disabled-versions',
'--foreman-proxy-trusted-hosts',
'--foreman-proxy-use-sudoers',
'--foreman-proxy-use-sudoersd',
'--foreman-proxy-version',
'--foreman-rails-env',
'--foreman-server-port',
'--foreman-server-ssl-ca',
'--foreman-server-ssl-cert',
'--foreman-server-ssl-certs-dir',
'--foreman-server-ssl-chain',
'--foreman-server-ssl-crl',
'--foreman-server-ssl-key',
'--foreman-server-ssl-port',
'--foreman-server-ssl-protocol',
'--foreman-serveraliases',
'--foreman-servername',
'--foreman-ssl',
'--foreman-telemetry-logger-enabled',
'--foreman-telemetry-logger-level',
'--foreman-telemetry-prefix',
'--foreman-telemetry-prometheus-enabled',
'--foreman-telemetry-statsd-enabled',
'--foreman-telemetry-statsd-host',
'--foreman-telemetry-statsd-protocol',
'--foreman-unattended',
'--foreman-unattended-url',
'--foreman-user',
'--foreman-user-groups',
'--foreman-version',
'--foreman-vhost-priority',
'--foreman-websockets-encrypt',
'--foreman-websockets-ssl-cert',
'--foreman-websockets-ssl-key',
'--full-help',
'--help',
'--ignore-undocumented',
'--interactive',
'--katello-candlepin-db-host',
'--katello-candlepin-db-name',
'--katello-candlepin-db-password',
'--katello-candlepin-db-port',
'--katello-candlepin-db-ssl',
'--katello-candlepin-db-ssl-verify',
'--katello-candlepin-db-user',
'--katello-candlepin-manage-db',
'--katello-candlepin-oauth-key',
'--katello-candlepin-oauth-secret',
'--katello-hosts-queue-workers',
'--katello-qpid-hostname',
'--katello-qpid-interface',
'--katello-qpid-wcache-page-size',
'--katello-rest-client-timeout',
'--list-scenarios',
'--log-level',
'--migrations-only',
'--noop',
'--[no-]enable-foreman-cli-ansible',
'--[no-]enable-foreman-cli-azure',
'--[no-]enable-foreman-cli-virt-who-configure',
'--[no-]enable-foreman-plugin-azure',
'--[no-]enable-foreman-plugin-remote-execution-cockpit',
'--[no-]enable-foreman-plugin-virt-who-configure',
'--profile',
'--puppet-additional-settings',
'--puppet-agent',
'--puppet-agent-additional-settings',
'--puppet-agent-noop',
'--puppet-agent-restart-command',
'--puppet-allow-any-crl-auth',
'--puppet-auth-allowed',
'--puppet-auth-template',
'--puppet-autosign',
'--puppet-autosign-content',
'--puppet-autosign-entries',
'--puppet-autosign-mode',
'--puppet-autosign-source',
'--puppet-ca-crl-filepath',
'--puppet-ca-port',
'--puppet-ca-server',
'--puppet-classfile',
'--puppet-client-certname',
'--puppet-client-package',
'--puppet-codedir',
'--puppet-cron-cmd',
'--puppet-dir',
'--puppet-dir-group',
'--puppet-dir-owner',
'--puppet-dns-alt-names',
'--puppet-environment',
'--puppet-group',
'--puppet-hiera-config',
'--puppet-http-connect-timeout',
'--puppet-http-read-timeout',
'--puppet-logdir',
'--puppet-manage-packages',
'--puppet-module-repository',
'--puppet-package-install-options',
'--puppet-package-provider',
'--puppet-package-source',
'--puppet-pluginfactsource',
'--puppet-pluginsource',
'--puppet-pluginsync',
'--puppet-port',
'--puppet-postrun-command',
'--puppet-prerun-command',
'--puppet-puppetmaster',
'--puppet-remove-lock',
'--puppet-report',
'--puppet-run-hour',
'--puppet-run-minute',
'--puppet-rundir',
'--puppet-runinterval',
'--puppet-runmode',
'--puppet-server',
'--puppet-server-acceptor-threads',
'--puppet-server-additional-settings',
'--puppet-server-admin-api-whitelist',
'--puppet-server-allow-header-cert-info',
'--puppet-server-ca',
'--puppet-server-ca-allow-auth-extensions',
'--puppet-server-ca-allow-sans',
'--puppet-server-ca-auth-required',
'--puppet-server-ca-client-whitelist',
'--puppet-server-ca-crl-sync',
'--puppet-server-ca-enable-infra-crl',
'--puppet-server-certname',
'--puppet-server-check-for-updates',
'--puppet-server-cipher-suites',
'--puppet-server-common-modules-path',
'--puppet-server-compile-mode',
'--puppet-server-config-version',
'--puppet-server-connect-timeout',
'--puppet-server-crl-enable',
'--puppet-server-custom-trusted-oid-mapping',
'--puppet-server-default-manifest',
'--puppet-server-default-manifest-content',
'--puppet-server-default-manifest-path',
'--puppet-server-dir',
'--puppet-server-environment-class-cache-enabled',
'--puppet-server-environment-timeout',
'--puppet-server-environments-group',
'--puppet-server-environments-mode',
'--puppet-server-environments-owner',
'--puppet-server-envs-dir',
'--puppet-server-envs-target',
'--puppet-server-external-nodes',
'--puppet-server-foreman',
'--puppet-server-foreman-facts',
'--puppet-server-foreman-ssl-ca',
'--puppet-server-foreman-ssl-cert',
'--puppet-server-foreman-ssl-key',
'--puppet-server-foreman-url',
'--puppet-server-git-branch-map',
'--puppet-server-git-repo',
'--puppet-server-git-repo-group',
'--puppet-server-git-repo-mode',
'--puppet-server-git-repo-path',
'--puppet-server-git-repo-user',
'--puppet-server-group',
'--puppet-server-http',
'--puppet-server-http-port',
'--puppet-server-idle-timeout',
'--puppet-server-ip',
'--puppet-server-jruby-gem-home',
'--puppet-server-jvm-cli-args',
'--puppet-server-jvm-config',
'--puppet-server-jvm-extra-args',
'--puppet-server-jvm-java-bin',
'--puppet-server-jvm-max-heap-size',
'--puppet-server-jvm-min-heap-size',
'--puppet-server-manage-user',
'--puppet-server-max-active-instances',
'--puppet-server-max-open-files',
'--puppet-server-max-queued-requests',
'--puppet-server-max-requests-per-instance',
'--puppet-server-max-retry-delay',
'--puppet-server-max-threads',
'--puppet-server-metrics-allowed',
'--puppet-server-metrics-graphite-enable',
'--puppet-server-metrics-graphite-host',
'--puppet-server-metrics-graphite-interval',
'--puppet-server-metrics-graphite-port',
'--puppet-server-metrics-jmx-enable',
'--puppet-server-metrics-server-id',
'--puppet-server-package',
'--puppet-server-parser',
'--puppet-server-port',
'--puppet-server-post-hook-content',
'--puppet-server-post-hook-name',
'--puppet-server-puppet-basedir',
'--puppet-server-puppetserver-dir',
'--puppet-server-puppetserver-experimental',
'--puppet-server-puppetserver-jruby9k',
'--puppet-server-puppetserver-logdir',
'--puppet-server-puppetserver-metrics',
'--puppet-server-puppetserver-profiler',
'--puppet-server-puppetserver-rundir',
'--puppet-server-puppetserver-trusted-agents',
'--puppet-server-puppetserver-trusted-certificate-extensions',
'--puppet-server-puppetserver-vardir',
'--puppet-server-puppetserver-version',
'--puppet-server-puppetserver-auth-template',
'--puppet-server-reports',
'--puppet-server-request-timeout',
'--puppet-server-ruby-load-paths',
'--puppet-server-selector-threads',
'--puppet-server-ssl-acceptor-threads',
'--puppet-server-ssl-chain-filepath',
'--puppet-server-ssl-dir',
'--puppet-server-ssl-dir-manage',
'--puppet-server-ssl-key-manage',
'--puppet-server-ssl-protocols',
'--puppet-server-ssl-selector-threads',
'--puppet-server-strict-variables',
'--puppet-server-use-legacy-auth-conf',
'--puppet-server-user',
'--puppet-server-version',
'--puppet-server-web-idle-timeout',
'--puppet-service-name',
'--puppet-sharedir',
'--puppet-show-diff',
'--puppet-splay',
'--puppet-splaylimit',
'--puppet-srv-domain',
'--puppet-ssldir',
'--puppet-syslogfacility',
'--puppet-systemd-cmd',
'--puppet-systemd-randomizeddelaysec',
'--puppet-systemd-unit-name',
'--puppet-unavailable-runmodes',
'--puppet-use-srv-records',
'--puppet-usecacheonfailure',
'--puppet-user',
'--puppet-vardir',
'--puppet-version',
'--register-with-insights',
'--reset-certs-ca-common-name',
'--reset-certs-ca-expiration',
'--reset-certs-city',
'--reset-certs-cname',
'--reset-certs-country',
'--reset-certs-default-ca-name',
'--reset-certs-deploy',
'--reset-certs-expiration',
'--reset-certs-generate',
'--reset-certs-group',
'--reset-certs-node-fqdn',
'--reset-certs-org',
'--reset-certs-org-unit',
'--reset-certs-pki-dir',
'--reset-certs-regenerate',
'--reset-certs-server-ca-cert',
'--reset-certs-server-ca-name',
'--reset-certs-server-cert',
'--reset-certs-server-cert-req',
'--reset-certs-server-key',
'--reset-certs-ssl-build-dir',
'--reset-certs-state',
'--reset-certs-tar-file',
'--reset-certs-user',
'--reset-data',
'--reset-foreman-apache',
'--reset-foreman-app-root',
'--reset-foreman-cli-foreman-url',
'--reset-foreman-cli-hammer-plugin-prefix',
'--reset-foreman-cli-manage-root-config',
'--reset-foreman-cli-password',
'--reset-foreman-cli-refresh-cache',
'--reset-foreman-cli-request-timeout',
'--reset-foreman-cli-ssl-ca-file',
'--reset-foreman-cli-username',
'--reset-foreman-cli-use-sessions',
'--reset-foreman-cli-version',
'--reset-foreman-client-ssl-ca',
'--reset-foreman-client-ssl-cert',
'--reset-foreman-client-ssl-key',
'--reset-foreman-compute-ec2-version',
'--reset-foreman-compute-gce-version',
'--reset-foreman-compute-libvirt-version',
'--reset-foreman-compute-openstack-version',
'--reset-foreman-compute-ovirt-version',
'--reset-foreman-compute-vmware-version',
'--reset-foreman-cors-domains',
'--reset-foreman-db-database',
'--reset-foreman-db-host',
'--reset-foreman-db-manage',
'--reset-foreman-db-manage-rake',
'--reset-foreman-db-password',
'--reset-foreman-db-pool',
'--reset-foreman-db-port',
'--reset-foreman-db-root-cert',
'--reset-foreman-db-sslmode',
'--reset-foreman-db-username',
'--reset-foreman-dynflow-manage-services',
'--reset-foreman-dynflow-orchestrator-ensure',
'--reset-foreman-dynflow-redis-url',
'--reset-foreman-dynflow-worker-concurrency',
'--reset-foreman-dynflow-worker-instances',
'--reset-foreman-email-delivery-method',
'--reset-foreman-email-smtp-address',
'--reset-foreman-email-smtp-authentication',
'--reset-foreman-email-smtp-domain',
'--reset-foreman-email-smtp-password',
'--reset-foreman-email-smtp-port',
'--reset-foreman-email-smtp-user-name',
'--reset-foreman-foreman-url',
'--reset-foreman-foreman-service-puma-threads-max',
'--reset-foreman-foreman-service-puma-threads-min',
'--reset-foreman-foreman-service-puma-workers',
'--reset-foreman-group',
'--reset-foreman-hsts-enabled',
'--reset-foreman-http-keytab',
'--reset-foreman-initial-admin-email',
'--reset-foreman-initial-admin-first-name',
'--reset-foreman-initial-admin-last-name',
'--reset-foreman-initial-admin-locale',
'--reset-foreman-initial-admin-password',
'--reset-foreman-initial-admin-timezone',
'--reset-foreman-initial-admin-username',
'--reset-foreman-initial-location',
'--reset-foreman-initial-organization',
'--reset-foreman-ipa-authentication',
'--reset-foreman-ipa-manage-sssd',
'--reset-foreman-keycloak',
'--reset-foreman-keycloak-realm',
'--reset-foreman-keycloak-app-name',
'--reset-foreman-loggers',
'--reset-foreman-logging-layout',
'--reset-foreman-logging-level',
'--reset-foreman-logging-type',
'--reset-foreman-manage-user',
'--reset-foreman-oauth-active',
'--reset-foreman-oauth-consumer-key',
'--reset-foreman-oauth-consumer-secret',
'--reset-foreman-oauth-map-users',
'--reset-foreman-pam-service',
'--reset-foreman-plugin-prefix',
'--reset-foreman-plugin-tasks-automatic-cleanup',
'--reset-foreman-plugin-tasks-backup',
'--reset-foreman-plugin-tasks-cron-line',
'--reset-foreman-plugin-version',
'--reset-foreman-proxy-autosignfile',
'--reset-foreman-proxy-bind-host',
'--reset-foreman-proxy-bmc',
'--reset-foreman-proxy-bmc-default-provider',
'--reset-foreman-proxy-bmc-listen-on',
'--reset-foreman-proxy-bmc-ssh-key',
'--reset-foreman-proxy-bmc-ssh-powercycle',
'--reset-foreman-proxy-bmc-ssh-poweroff',
'--reset-foreman-proxy-bmc-ssh-poweron',
'--reset-foreman-proxy-bmc-ssh-powerstatus',
'--reset-foreman-proxy-bmc-ssh-user',
'--reset-foreman-proxy-content-enable-ansible',
'--reset-foreman-proxy-content-enable-deb',
'--reset-foreman-proxy-content-enable-docker',
'--reset-foreman-proxy-content-enable-file',
'--reset-foreman-proxy-content-enable-katello-agent',
'--reset-foreman-proxy-content-enable-yum',
'--reset-foreman-proxy-content-pulpcore-mirror',
'--reset-foreman-proxy-content-pulpcore-allowed-content-checksums',
'--reset-foreman-proxy-content-pulpcore-api-service-worker-timeout',
'--reset-foreman-proxy-content-pulpcore-content-service-worker-timeout',
'--reset-foreman-proxy-content-pulpcore-django-secret-key',
'--reset-foreman-proxy-content-pulpcore-postgresql-db-name',
'--reset-foreman-proxy-content-pulpcore-manage-postgresql',
'--reset-foreman-proxy-content-pulpcore-postgresql-host',
'--reset-foreman-proxy-content-pulpcore-postgresql-password',
'--reset-foreman-proxy-content-pulpcore-postgresql-port',
'--reset-foreman-proxy-content-pulpcore-postgresql-ssl',
'--reset-foreman-proxy-content-pulpcore-postgresql-ssl-cert',
'--reset-foreman-proxy-content-pulpcore-postgresql-ssl-key',
'--reset-foreman-proxy-content-pulpcore-postgresql-ssl-require',
'--reset-foreman-proxy-content-pulpcore-postgresql-ssl-root-ca',
'--reset-foreman-proxy-content-pulpcore-postgresql-user',
'--reset-foreman-proxy-content-pulpcore-worker-count',
'--reset-foreman-proxy-content-pulpcore-cache-enabled',
'--reset-foreman-proxy-content-pulpcore-cache-expires-ttl',
'--reset-foreman-proxy-content-pulpcore-use-rq-tasking-system',
'--reset-foreman-proxy-content-puppet',
'--reset-foreman-proxy-content-qpid-router-agent-addr',
'--reset-foreman-proxy-content-qpid-router-agent-port',
'--reset-foreman-proxy-content-qpid-router-broker-addr',
'--reset-foreman-proxy-content-qpid-router-broker-port',
'--reset-foreman-proxy-content-qpid-router-hub-addr',
'--reset-foreman-proxy-content-qpid-router-hub-port',
'--reset-foreman-proxy-content-qpid-router-logging',
'--reset-foreman-proxy-content-qpid-router-logging-level',
'--reset-foreman-proxy-content-qpid-router-logging-path',
'--reset-foreman-proxy-content-qpid-router-ssl-ciphers',
'--reset-foreman-proxy-content-qpid-router-ssl-protocols',
'--reset-foreman-proxy-content-reverse-proxy',
'--reset-foreman-proxy-content-reverse-proxy-port',
'--reset-foreman-proxy-dhcp',
'--reset-foreman-proxy-dhcp-additional-interfaces',
'--reset-foreman-proxy-dhcp-config',
'--reset-foreman-proxy-dhcp-failover-address',
'--reset-foreman-proxy-dhcp-failover-port',
'--reset-foreman-proxy-dhcp-gateway',
'--reset-foreman-proxy-dhcp-interface',
'--reset-foreman-proxy-dhcp-key-name',
'--reset-foreman-proxy-dhcp-key-secret',
'--reset-foreman-proxy-dhcp-leases',
'--reset-foreman-proxy-dhcp-listen-on',
'--reset-foreman-proxy-dhcp-load-balance',
'--reset-foreman-proxy-dhcp-load-split',
'--reset-foreman-proxy-dhcp-manage-acls',
'--reset-foreman-proxy-dhcp-managed',
'--reset-foreman-proxy-dhcp-max-response-delay',
'--reset-foreman-proxy-dhcp-max-unacked-updates',
'--reset-foreman-proxy-dhcp-mclt',
'--reset-foreman-proxy-dhcp-nameservers',
'--reset-foreman-proxy-dhcp-netmask',
'--reset-foreman-proxy-dhcp-network',
'--reset-foreman-proxy-dhcp-node-type',
'--reset-foreman-proxy-dhcp-omapi-port',
'--reset-foreman-proxy-dhcp-option-domain',
'--reset-foreman-proxy-dhcp-peer-address',
'--reset-foreman-proxy-dhcp-ping-free-ip',
'--reset-foreman-proxy-dhcp-provider',
'--reset-foreman-proxy-dhcp-pxefilename',
'--reset-foreman-proxy-dhcp-pxeserver',
'--reset-foreman-proxy-dhcp-range',
'--reset-foreman-proxy-dhcp-search-domains',
'--reset-foreman-proxy-dhcp-server',
'--reset-foreman-proxy-dhcp-subnets',
'--reset-foreman-proxy-dns',
'--reset-foreman-proxy-dns-forwarders',
'--reset-foreman-proxy-dns-interface',
'--reset-foreman-proxy-dns-listen-on',
'--reset-foreman-proxy-dns-managed',
'--reset-foreman-proxy-dns-provider',
'--reset-foreman-proxy-dns-reverse',
'--reset-foreman-proxy-dns-server',
'--reset-foreman-proxy-dns-tsig-keytab',
'--reset-foreman-proxy-dns-tsig-principal',
'--reset-foreman-proxy-dns-ttl',
'--reset-foreman-proxy-dns-zone',
'--reset-foreman-proxy-ensure-packages-version',
'--reset-foreman-proxy-foreman-base-url',
'--reset-foreman-proxy-foreman-ssl-ca',
'--reset-foreman-proxy-foreman-ssl-cert',
'--reset-foreman-proxy-foreman-ssl-key',
'--reset-foreman-proxy-freeipa-config',
'--reset-foreman-proxy-freeipa-remove-dns',
'--reset-foreman-proxy-gpgcheck',
'--reset-foreman-proxy-groups',
'--reset-foreman-proxy-http',
'--reset-foreman-proxy-http-port',
'--reset-foreman-proxy-httpboot',
'--reset-foreman-proxy-httpboot-listen-on',
'--reset-foreman-proxy-keyfile',
'--reset-foreman-proxy-libvirt-connection',
'--reset-foreman-proxy-libvirt-network',
'--reset-foreman-proxy-log',
'--reset-foreman-proxy-log-buffer',
'--reset-foreman-proxy-log-buffer-errors',
'--reset-foreman-proxy-log-level',
'--reset-foreman-proxy-logs',
'--reset-foreman-proxy-logs-listen-on',
'--reset-foreman-proxy-manage-puppet-group',
'--reset-foreman-proxy-manage-sudoersd',
'--reset-foreman-proxy-oauth-consumer-key',
'--reset-foreman-proxy-oauth-consumer-secret',
'--reset-foreman-proxy-oauth-effective-user',
'--reset-foreman-proxy-plugin-ansible-ansible-dir',
'--reset-foreman-proxy-plugin-ansible-callback',
'--reset-foreman-proxy-plugin-ansible-enabled',
'--reset-foreman-proxy-plugin-ansible-host-key-checking',
'--reset-foreman-proxy-plugin-ansible-listen-on',
'--reset-foreman-proxy-plugin-ansible-install-runner',
'--reset-foreman-proxy-plugin-ansible-manage-runner-repo',
'--reset-foreman-proxy-plugin-ansible-roles-path',
'--reset-foreman-proxy-plugin-ansible-runner-package-name',
'--reset-foreman-proxy-plugin-ansible-ssh-args',
'--reset-foreman-proxy-plugin-ansible-stdout-callback',
'--reset-foreman-proxy-plugin-ansible-working-dir',
'--reset-foreman-proxy-plugin-dhcp-infoblox-dns-view',
'--reset-foreman-proxy-plugin-dhcp-infoblox-network-view',
'--reset-foreman-proxy-plugin-dhcp-infoblox-password',
'--reset-foreman-proxy-plugin-dhcp-infoblox-record-type',
'--reset-foreman-proxy-plugin-dhcp-infoblox-username',
'--reset-foreman-proxy-plugin-dhcp-remote-isc-dhcp-config',
'--reset-foreman-proxy-plugin-dhcp-remote-isc-dhcp-leases',
'--reset-foreman-proxy-plugin-dhcp-remote-isc-key-name',
'--reset-foreman-proxy-plugin-dhcp-remote-isc-key-secret',
'--reset-foreman-proxy-plugin-dhcp-remote-isc-omapi-port',
'--reset-foreman-proxy-plugin-discovery-image-name',
'--reset-foreman-proxy-plugin-discovery-install-images',
'--reset-foreman-proxy-plugin-discovery-source-url',
'--reset-foreman-proxy-plugin-discovery-tftp-root',
'--reset-foreman-proxy-plugin-dns-infoblox-dns-server',
'--reset-foreman-proxy-plugin-dns-infoblox-password',
'--reset-foreman-proxy-plugin-dns-infoblox-username',
'--reset-foreman-proxy-plugin-dns-infoblox-dns-view',
'--reset-foreman-proxy-plugin-openscap-contentdir',
'--reset-foreman-proxy-plugin-openscap-corrupted-dir',
'--reset-foreman-proxy-plugin-openscap-enabled',
'--reset-foreman-proxy-plugin-openscap-failed-dir',
'--reset-foreman-proxy-plugin-openscap-listen-on',
'--reset-foreman-proxy-plugin-openscap-openscap-send-log-file',
'--reset-foreman-proxy-plugin-openscap-proxy-name',
'--reset-foreman-proxy-plugin-openscap-reportsdir',
'--reset-foreman-proxy-plugin-openscap-spooldir',
'--reset-foreman-proxy-plugin-openscap-timeout',
'--reset-foreman-proxy-plugin-openscap-version',
'--reset-foreman-proxy-plugin-remote-execution-ssh-async-ssh',
'--reset-foreman-proxy-plugin-remote-execution-ssh-enabled',
'--reset-foreman-proxy-plugin-remote-execution-ssh-generate-keys',
'--reset-foreman-proxy-plugin-remote-execution-ssh-install-key',
'--reset-foreman-proxy-plugin-remote-execution-ssh-listen-on',
'--reset-foreman-proxy-plugin-remote-execution-ssh-local-working-dir',
'--reset-foreman-proxy-plugin-remote-execution-ssh-remote-working-dir',
'--reset-foreman-proxy-plugin-remote-execution-ssh-ssh-identity-dir',
'--reset-foreman-proxy-plugin-remote-execution-ssh-ssh-identity-file',
'--reset-foreman-proxy-plugin-remote-execution-ssh-ssh-kerberos-auth',
'--reset-foreman-proxy-plugin-remote-execution-ssh-ssh-keygen',
'--reset-foreman-proxy-plugin-shellhooks-directory',
'--reset-foreman-proxy-plugin-shellhooks-enabled',
'--reset-foreman-proxy-plugin-shellhooks-listen-on',
'--reset-foreman-proxy-plugin-shellhooks-version',
'--reset-foreman-proxy-puppet',
'--reset-foreman-proxy-puppet-api-timeout',
'--reset-foreman-proxy-puppet-group',
'--reset-foreman-proxy-puppet-listen-on',
'--reset-foreman-proxy-puppet-ssl-ca',
'--reset-foreman-proxy-puppet-ssl-cert',
'--reset-foreman-proxy-puppet-ssl-key',
'--reset-foreman-proxy-puppet-url',
'--reset-foreman-proxy-puppetca',
'--reset-foreman-proxy-puppetca-certificate',
'--reset-foreman-proxy-puppetca-cmd',
'--reset-foreman-proxy-puppetca-listen-on',
'--reset-foreman-proxy-puppetca-provider',
'--reset-foreman-proxy-puppetca-sign-all',
'--reset-foreman-proxy-puppetca-token-ttl',
'--reset-foreman-proxy-puppetca-tokens-file',
'--reset-foreman-proxy-puppetdir',
'--reset-foreman-proxy-realm',
'--reset-foreman-proxy-realm-keytab',
'--reset-foreman-proxy-realm-listen-on',
'--reset-foreman-proxy-realm-principal',
'--reset-foreman-proxy-realm-provider',
'--reset-foreman-proxy-register-in-foreman',
'--reset-foreman-proxy-registered-name',
'--reset-foreman-proxy-registered-proxy-url',
'--reset-foreman-proxy-registration',
'--reset-foreman-proxy-registration-listen-on',
'--reset-foreman-proxy-repo',
'--reset-foreman-proxy-ssl',
'--reset-foreman-proxy-ssl-ca',
'--reset-foreman-proxy-ssl-cert',
'--reset-foreman-proxy-ssl-disabled-ciphers',
'--reset-foreman-proxy-ssl-key',
'--reset-foreman-proxy-ssl-port',
'--reset-foreman-proxy-ssldir',
'--reset-foreman-proxy-template-url',
'--reset-foreman-proxy-templates',
'--reset-foreman-proxy-templates-listen-on',
'--reset-foreman-proxy-tftp',
'--reset-foreman-proxy-tftp-dirs',
'--reset-foreman-proxy-tftp-listen-on',
'--reset-foreman-proxy-tftp-manage-wget',
'--reset-foreman-proxy-tftp-managed',
'--reset-foreman-proxy-tftp-replace-grub2-cfg',
'--reset-foreman-proxy-tftp-root',
'--reset-foreman-proxy-tftp-servername',
'--reset-foreman-proxy-tftp-syslinux-filenames',
'--reset-foreman-proxy-tls-disabled-versions',
'--reset-foreman-proxy-trusted-hosts',
'--reset-foreman-proxy-use-sudoers',
'--reset-foreman-proxy-use-sudoersd',
'--reset-foreman-proxy-version',
'--reset-foreman-rails-cache-store',
'--reset-foreman-rails-env',
'--reset-puppet-server-ca-client-self-delete',
'--reset-foreman-server-port',
'--reset-foreman-server-ssl-ca',
'--reset-foreman-server-ssl-cert',
'--reset-foreman-server-ssl-certs-dir',
'--reset-foreman-server-ssl-chain',
'--reset-foreman-server-ssl-crl',
'--reset-foreman-server-ssl-key',
'--reset-foreman-server-ssl-port',
'--reset-foreman-server-ssl-protocol',
'--reset-foreman-server-ssl-verify-client',
'--reset-foreman-serveraliases',
'--reset-foreman-servername',
'--reset-foreman-ssl',
'--reset-foreman-telemetry-logger-enabled',
'--reset-foreman-telemetry-logger-level',
'--reset-foreman-telemetry-prefix',
'--reset-foreman-telemetry-prometheus-enabled',
'--reset-foreman-telemetry-statsd-enabled',
'--reset-foreman-telemetry-statsd-host',
'--reset-foreman-telemetry-statsd-protocol',
'--reset-foreman-unattended',
'--reset-foreman-unattended-url',
'--reset-foreman-user',
'--reset-foreman-user-groups',
'--reset-foreman-version',
'--reset-foreman-vhost-priority',
'--reset-foreman-websockets-encrypt',
'--reset-foreman-websockets-ssl-cert',
'--reset-foreman-websockets-ssl-key',
'--reset-katello-candlepin-db-host',
'--reset-katello-candlepin-db-name',
'--reset-katello-candlepin-db-password',
'--reset-katello-candlepin-db-port',
'--reset-katello-candlepin-db-ssl',
'--reset-katello-candlepin-db-ssl-verify',
'--reset-katello-candlepin-db-user',
'--reset-katello-candlepin-manage-db',
'--reset-katello-candlepin-oauth-key',
'--reset-katello-candlepin-oauth-secret',
'--reset-katello-hosts-queue-workers',
'--reset-katello-qpid-hostname',
'--reset-katello-qpid-interface',
'--reset-katello-qpid-wcache-page-size',
'--reset-katello-rest-client-timeout',
'--reset-puppet-additional-settings',
'--reset-puppet-agent',
'--reset-puppet-agent-additional-settings',
'--reset-puppet-agent-noop',
'--reset-puppet-agent-restart-command',
'--reset-puppet-allow-any-crl-auth',
'--reset-puppet-auth-allowed',
'--reset-puppet-auth-template',
'--reset-puppet-autosign',
'--reset-puppet-autosign-content',
'--reset-puppet-autosign-entries',
'--reset-puppet-autosign-mode',
'--reset-puppet-autosign-source',
'--reset-puppet-ca-crl-filepath',
'--reset-puppet-ca-port',
'--reset-puppet-ca-server',
'--reset-puppet-classfile',
'--reset-puppet-client-certname',
'--reset-puppet-client-package',
'--reset-puppet-codedir',
'--reset-puppet-cron-cmd',
'--reset-puppet-dir',
'--reset-puppet-dir-group',
'--reset-puppet-dir-owner',
'--reset-puppet-dns-alt-names',
'--reset-puppet-environment',
'--reset-puppet-group',
'--reset-puppet-hiera-config',
'--reset-puppet-http-connect-timeout',
'--reset-puppet-http-read-timeout',
'--reset-puppet-logdir',
'--reset-puppet-manage-packages',
'--reset-puppet-module-repository',
'--reset-puppet-package-provider',
'--reset-puppet-package-source',
'--reset-puppet-package-install-options',
'--reset-puppet-pluginfactsource',
'--reset-puppet-pluginsource',
'--reset-puppet-pluginsync',
'--reset-puppet-port',
'--reset-puppet-postrun-command',
'--reset-puppet-prerun-command',
'--reset-puppet-puppetmaster',
'--reset-puppet-remove-lock',
'--reset-puppet-report',
'--reset-puppet-run-hour',
'--reset-puppet-run-minute',
'--reset-puppet-rundir',
'--reset-puppet-runinterval',
'--reset-puppet-runmode',
'--reset-puppet-server',
'--reset-puppet-server-acceptor-threads',
'--reset-puppet-server-additional-settings',
'--reset-puppet-server-admin-api-whitelist',
'--reset-puppet-server-allow-header-cert-info',
'--reset-puppet-server-ca',
'--reset-puppet-server-ca-allow-auth-extensions',
'--reset-puppet-server-ca-allow-sans',
'--reset-puppet-server-ca-auth-required',
'--reset-puppet-server-ca-client-whitelist',
'--reset-puppet-server-ca-crl-sync',
'--reset-puppet-server-ca-enable-infra-crl',
'--reset-puppet-server-certname',
'--reset-puppet-server-check-for-updates',
'--reset-puppet-server-cipher-suites',
'--reset-puppet-server-common-modules-path',
'--reset-puppet-server-compile-mode',
'--reset-puppet-server-config-version',
'--reset-puppet-server-connect-timeout',
'--reset-puppet-server-crl-enable',
'--reset-puppet-server-custom-trusted-oid-mapping',
'--reset-puppet-server-default-manifest',
'--reset-puppet-server-default-manifest-content',
'--reset-puppet-server-default-manifest-path',
'--reset-puppet-server-dir',
'--reset-puppet-server-environment-class-cache-enabled',
'--reset-puppet-server-environment-timeout',
'--reset-puppet-server-environments-group',
'--reset-puppet-server-environments-mode',
'--reset-puppet-server-environments-owner',
'--reset-puppet-server-envs-dir',
'--reset-puppet-server-envs-target',
'--reset-puppet-server-external-nodes',
'--reset-puppet-server-foreman',
'--reset-puppet-server-foreman-facts',
'--reset-puppet-server-foreman-ssl-ca',
'--reset-puppet-server-foreman-ssl-cert',
'--reset-puppet-server-foreman-ssl-key',
'--reset-puppet-server-foreman-url',
'--reset-puppet-server-git-branch-map',
'--reset-puppet-server-git-repo',
'--reset-puppet-server-git-repo-group',
'--reset-puppet-server-git-repo-mode',
'--reset-puppet-server-git-repo-path',
'--reset-puppet-server-git-repo-user',
'--reset-puppet-server-group',
'--reset-puppet-server-http',
'--reset-puppet-server-http-port',
'--reset-puppet-server-idle-timeout',
'--reset-puppet-server-ip',
'--reset-puppet-server-jruby-gem-home',
'--reset-puppet-server-jvm-cli-args',
'--reset-puppet-server-jvm-config',
'--reset-puppet-server-jvm-extra-args',
'--reset-puppet-server-jvm-java-bin',
'--reset-puppet-server-jvm-max-heap-size',
'--reset-puppet-server-jvm-min-heap-size',
'--reset-puppet-server-manage-user',
'--reset-puppet-server-max-active-instances',
'--reset-puppet-server-max-open-files',
'--reset-puppet-server-max-queued-requests',
'--reset-puppet-server-max-requests-per-instance',
'--reset-puppet-server-max-retry-delay',
'--reset-puppet-server-max-threads',
'--reset-puppet-server-metrics-allowed',
'--reset-puppet-server-metrics-graphite-enable',
'--reset-puppet-server-metrics-graphite-host',
'--reset-puppet-server-metrics-graphite-interval',
'--reset-puppet-server-metrics-graphite-port',
'--reset-puppet-server-metrics-jmx-enable',
'--reset-puppet-server-metrics-server-id',
'--reset-puppet-server-multithreaded',
'--reset-puppet-server-package',
'--reset-puppet-server-parser',
'--reset-puppet-server-port',
'--reset-puppet-server-post-hook-content',
'--reset-puppet-server-post-hook-name',
'--reset-puppet-server-puppet-basedir',
'--reset-puppet-server-puppetserver-dir',
'--reset-puppet-server-puppetserver-experimental',
'--reset-puppet-server-puppetserver-jruby9k',
'--reset-puppet-server-puppetserver-logdir',
'--reset-puppet-server-puppetserver-metrics',
'--reset-puppet-server-puppetserver-rundir',
'--reset-puppet-server-puppetserver-trusted-agents',
'--reset-puppet-server-puppetserver-trusted-certificate-extensions',
'--reset-puppet-server-puppetserver-vardir',
'--reset-puppet-server-puppetserver-version',
'--reset-puppet-server-puppetserver-auth-template',
'--reset-puppet-server-puppetserver-profiler',
'--reset-puppet-server-reports',
'--reset-puppet-server-request-timeout',
'--reset-puppet-server-ruby-load-paths',
'--reset-puppet-server-storeconfigs',
'--reset-puppet-server-selector-threads',
'--reset-puppet-server-ssl-acceptor-threads',
'--reset-puppet-server-ssl-chain-filepath',
'--reset-puppet-server-ssl-dir',
'--reset-puppet-server-ssl-dir-manage',
'--reset-puppet-server-ssl-key-manage',
'--reset-puppet-server-ssl-protocols',
'--reset-puppet-server-ssl-selector-threads',
'--reset-puppet-server-strict-variables',
'--reset-puppet-server-trusted-external-command',
'--reset-puppet-server-use-legacy-auth-conf',
'--reset-puppet-server-user',
'--reset-puppet-server-versioned-code-content',
'--reset-puppet-server-versioned-code-id',
'--reset-puppet-server-version',
'--reset-puppet-server-web-idle-timeout',
'--reset-puppet-service-name',
'--reset-puppet-sharedir',
'--reset-puppet-show-diff',
'--reset-puppet-splay',
'--reset-puppet-splaylimit',
'--reset-puppet-srv-domain',
'--reset-puppet-ssldir',
'--reset-puppet-syslogfacility',
'--reset-puppet-systemd-cmd',
'--reset-puppet-systemd-randomizeddelaysec',
'--reset-puppet-systemd-unit-name',
'--reset-puppet-unavailable-runmodes',
'--reset-puppet-use-srv-records',
'--reset-puppet-usecacheonfailure',
'--reset-puppet-user',
'--reset-puppet-vardir',
'--reset-puppet-version',
'--scenario',
'--skip-checks-i-know-better',
'--skip-puppet-version-check',
'--tuning',
'--[no-]verbose',
'--verbose-log-level',
'-S',
'-h',
'-i',
'-l',
'-n',
'-p',
'-s',
'-v',
'-',
}
LAST_SAVED_SECTIONS = {
'= Generic:',
'= Module certs:',
'= Module foreman:',
'= Module foreman_cli:',
'= Module foreman_compute_ec2:',
'= Module foreman_compute_gce:',
'= Module foreman_compute_libvirt:',
'= Module foreman_compute_openstack:',
'= Module foreman_compute_ovirt:',
'= Module foreman_compute_vmware:',
'= Module foreman_plugin_tasks:',
'= Module foreman_proxy:',
'= Module foreman_proxy_content:',
'= Module foreman_proxy_plugin_ansible:',
'= Module foreman_proxy_plugin_dhcp_infoblox:',
'= Module foreman_proxy_plugin_dhcp_remote_isc:',
'= Module foreman_proxy_plugin_discovery:',
'= Module foreman_proxy_plugin_dns_infoblox:',
'= Module foreman_proxy_plugin_openscap:',
'= Module foreman_proxy_plugin_shellhooks:',
'= Module foreman_proxy_plugin_remote_execution_ssh:',
'= Module katello:',
'= Module puppet:',
}
SATELLITE_SERVICES = {
'dynflow-sidekiq@orchestrator',
'dynflow-sidekiq@worker-1',
'dynflow-sidekiq@worker-hosts-queue-1',
'foreman-proxy',
'foreman',
'httpd',
'postgresql',
'pulpcore-api',
'pulpcore-content',
'rh-redis5-redis',
'puppetserver',
}
def extract_help(filter='params'):
"""Generator function to extract satellite installer params and sections from lines of help text.
In general lst is cmd.stdout, e.g., a list of strings representing host
stdout
:param string filter: Filter `sections` or `params` in full help, default is params
:return: generator with params or sections depends on filter parameter
"""
stdout = ssh.command('satellite-installer --full-help').stdout
for line in stdout or []:
line = line.strip()
if filter == 'sections':
if line.startswith('= '):
yield line
else:
first_2_tokens = line.split()[:2]
for token in first_2_tokens:
if token[0] == '-':
yield token.replace(',', '')
@pytest.mark.upgrade
@pytest.mark.tier1
def test_positive_foreman_module():
"""Check if SELinux foreman module has the right version
:id: a0736b3a-3d42-4a09-a11a-28c1d58214a5
:steps:
1. Check "foreman-selinux" package availability on satellite.
2. Check SELinux foreman module on satellite.
:CaseImportance: Critical
:CaseLevel: System
:expectedresults: Foreman RPM and SELinux module versions match
"""
rpm_result = ssh.command('rpm -q foreman-selinux')
assert rpm_result.return_code == 0
semodule_result = ssh.command('semodule -l | grep foreman')
assert semodule_result.return_code == 0
# Sample rpm output: foreman-selinux-1.7.2.8-1.el7sat.noarch
version_regex = re.compile(r'((\d\.?)+[-.]\d)')
rpm_version = version_regex.search(''.join(rpm_result.stdout)).group(1)
# Sample semodule output: foreman 1.7.2.8
semodule_version = version_regex.search(''.join(semodule_result.stdout)).group(1)
rpm_version = rpm_version[:-2]
assert rpm_version.replace('-', '.') == semodule_version
@pytest.mark.skip_if_open('BZ:1987288')
@pytest.mark.upgrade
@pytest.mark.tier1
def test_positive_check_installer_services():
"""Check if services start correctly
:id: 85fd4388-6d94-42f5-bed2-24be38e9f104
:steps:
1. Run 'systemctl status <tomcat>' command to check tomcat service status on satellite.
2. Run 'foreman-maintain service status' command on satellite to check the satellite
services.
3. Run the 'hammer ping' command on satellite.
:BZ: 1987288
:expectedresults: All services are started
:CaseImportance: Critical
:CaseLevel: System
"""
major_version = get_host_info()[1]
service_name = 'tomcat6' if major_version == RHEL_6_MAJOR_VERSION else 'tomcat'
SATELLITE_SERVICES.add(service_name)
if major_version >= RHEL_7_MAJOR_VERSION:
status_format = "systemctl status {0}"
else:
status_format = "service {0} status"
for service in SATELLITE_SERVICES:
result = ssh.command(status_format.format(service))
assert result.return_code == 0
assert len(result.stderr) == 0
# check status reported by hammer ping command
username = settings.server.admin_username
password = settings.server.admin_password
result = ssh.command(f'hammer -u {username} -p {password} ping')
result_output = [
service.strip() for service in result.stdout if not re.search(r'message:', service)
]
# iterate over the lines grouping every 3 lines
# example [1, 2, 3, 4, 5, 6] will return [(1, 2, 3), (4, 5, 6)]
for service, status, response in zip(*[iter(result_output)] * 3):
service = service.replace(':', '').strip()
status = status.split(':')[1].strip().lower()
response = response.split(':', 1)[1].strip()
assert status == 'ok', f'{service} responded with {response}'
@pytest.mark.upgrade
@pytest.mark.tier3
@pytest.mark.parametrize('filter', ['params', 'sections'])
def test_installer_options_and_sections(filter):
"""Look for changes on installer sections and options/flags
:id: a51d3b9f-f347-4a96-a31a-770349db08c7
:parametrized: yes
:Steps:
1. parse installer sections and options/flags
2. compare with last saved data
:expectedresults: Ideally sections and options should not change on zstreams.
Documentation must be updated accordingly when such changes occur.
So when this test fail we QE can act on it, asking dev if
changes occurs on zstream and checking docs are up to date.
:CaseImportance: Medium
"""
current = set(extract_help(filter=filter))
previous = PREVIOUS_INSTALLER_OPTIONS if filter == 'params' else LAST_SAVED_SECTIONS
removed = list(previous - current)
removed.sort()
added = list(current - previous)
added.sort()
msg = f"###Removed {filter}:\n{removed}\n###Added {filter}:\n{added}"
assert previous == current, msg
@pytest.mark.stubbed
@pytest.mark.tier3
def test_satellite_installation_on_ipv6():
"""
Check the satellite installation on ipv6 machine.
:id: 24fa5ef0-1673-427c-82ab-740758683cff
steps:
1. Install satellite on ipv6 machine.
:expectedresults:
1: Installation should be successful.
2: After installation, All the services should be up and running.
3. Status of hammer ping should be ok.
4: Satellite service restart should work.
5: After system reboot all the services comes to up state.
:CaseImportance: Critical
:CaseLevel: System
:CaseAutomation: NotAutomated
"""
@pytest.mark.stubbed
@pytest.mark.tier3
def test_capsule_installation_on_ipv6():
"""
Check the capsule installation over ipv6 machine
:id: 75341e29-342f-41fc-aaa8-cda013b7dfa1
:steps:
1. Install capsule on ipv6 machine.
:expectedresults:
1. Capsule installation should be successful.
2. After installation, All the Services should be up and running.
3. Satellite service restart should work.
4. After system reboot all the services come to up state.
:CaseImportance: Critical
:CaseLevel: System
:CaseAutomation: NotAutomated
"""
@pytest.mark.stubbed
@pytest.mark.tier3
def test_installer_check_on_ipv6():
"""
Check the satellite-installer command execution with tuning options and updated config file.
:id: 411bbffb-027f-4df0-8566-1719d1d0651a
steps:
1. Install satellite on ipv6 machine
2. Trigger the satellite-installer command with "--tuning medium" flag.
3. Update the custom-hira.yaml file(add any supportable config parameter).
4. Trigger the satellite-installer command with no option.
:expectedresults:
1. Tuning parameter set successfully for medium size.
2. custom-hiera.yaml related changes should be successfully applied.
:CaseImportance: Critical
:CaseLevel: System
:CaseAutomation: NotAutomated
"""
@pytest.mark.stubbed
@pytest.mark.tier1
def test_installer_verbose_stdout():
"""Look for Satellite installer verbose STDOUT
:id: 5d0fb30a-4a63-41b3-bc6f-c4057942ce3c
steps:
1. Install satellite package.
2. Run Satellite installer
3. Observe installer STDOUT.
:expectedresults:
1. Installer STDOUTs following groups hooks completion.
pre_migrations, boot, init, pre_values, pre_validations, pre_commit, pre, post
2. Installer STDOUTs system configuration completion.
3. Finally, Installer informs running satellite url, credentials,
external capsule installation pre-requisite, upgrade capsule instruction,
running internal capsule url, log file.
:CaseImportance: Critical
:CaseLevel: System
:CaseAutomation: NotAutomated
"""
@pytest.mark.stubbed
@pytest.mark.tier1
def test_installer_answers_file():
"""Answers file to configure plugins and hooks
:id: 5cb40e4b-1acb-49f9-a085-a7dead1664b5
steps:
1. Install satellte package
2. Modify `/etc/foreman-installer/scenarios.d/satellite-answers.yaml` file to
configure hook/plugin on satellite
3. Run Satellite installer
:expectedresults: Installer configures plugins and hooks in answers file.
:CaseImportance: Critical
:CaseLevel: System
:CaseAutomation: NotAutomated
"""
@pytest.mark.stubbed
@pytest.mark.tier1
def test_capsule_installer_verbose_stdout():
"""Look for Capsule installer verbose STDOUT
:id: 323e85e3-2ad1-4018-aa35-1d51f1e7f5a2
steps:
1. Install capsule package.
2. Run Satellite installer --scenario capsule
3. Observe installer STDOUT.
:expectedresults:
1. Installer STDOUTs following groups hooks completion.
pre_migrations, boot, init, pre_values, pre_validations, pre_commit, pre, post
2. Installer STDOUTs system configuration completion.
3. Finally, Installer informs running capsule url, log file.
:CaseImportance: Critical
:CaseLevel: System
:CaseAutomation: NotAutomated
"""
@pytest.mark.stubbed
@pytest.mark.tier3
def test_installer_timestamp_logs():
"""Look for Satellite installer timestamp based logs
:id: 9b4d32f6-d471-4bdb-8a79-9bb20ecb86aa
steps:
1. Install satellite package.
2. Run Satellite installer
3. Observe installer log file `/var/log/foreman-installer/satellite.log`.
:expectedresults:
1. Installer logs satellite installation with timestamps in following format
YYYY-MM-DD HH:MM:SS
:CaseImportance: Critical
:CaseLevel: System
:CaseAutomation: NotAutomated
"""
|
jyejare/robottelo
|
tests/foreman/installer/test_installer.py
|
Python
|
gpl-3.0
| 60,651
|
from operator import itemgetter
__author__ = 'davide'
def pairwise(l):
for t in zip(l, l[1:]):
yield t
def pijavskij(f, L, a, b, eps=1E-5):
l = [(a, f(a)), (b, f(b))]
while True:
imin, Rmin, xmin = -1, float("inf"), -1
for i, t in enumerate(pairwise(l)):
(xi, fi), (xj, fj) = t
R = (fi + fj - L * (xj - xi)) / 2
if R < Rmin:
imin = i
Rmin = R
xmin = (xi + xj) / 2 - (fj - fi) / (2 * L)
if l[imin + 1][0] - l[imin][0] < eps:
return l[imin], l[imin + 1]
l.append((xmin, f(xmin)))
l.sort(key=itemgetter(0))
print(l)
if __name__ == "__main__":
f = lambda x: x ** 4
t = pijavskij(f, 50, -100, 100, eps=1E-10)
print(t)
|
DavideCanton/Python3
|
num/pijavskij.py
|
Python
|
gpl-3.0
| 830
|
# -*- coding: utf-8 -*-
#
# API configuration
#####################
DEBUG = False
# Top-level URL for deployment. Numerous other URLs depend on this.
CYCLADES_BASE_URL = "https://compute.example.synnefo.org/compute/"
# The API will return HTTP Bad Request if the ?changes-since
# parameter refers to a point in time more than POLL_LIMIT seconds ago.
POLL_LIMIT = 3600
# Astakos groups that have access to '/admin' views.
ADMIN_STATS_PERMITTED_GROUPS = ["admin-stats"]
# Enable/Disable the snapshots feature altogether at the API level.
# If set to False, Cyclades will not expose the '/snapshots' API URL
# of the 'volume' app.
CYCLADES_SNAPSHOTS_ENABLED = True
# Enable/Disable the feature of a sharing a resource to the members of the
# project to which it belongs, at the API level.
CYCLADES_SHARED_RESOURCES_ENABLED = False
# Enable/Disable the of feature of rescuing a Virtual Machine at the API
# level
RESCUE_ENABLED = False
#
# Network Configuration
#
# CYCLADES_DEFAULT_SERVER_NETWORKS setting contains a list of networks to
# connect a newly created server to, *if the user has not* specified them
# explicitly in the POST /server API call.
# Each member of the list may be a network UUID, a tuple of network UUIDs,
# "SNF:ANY_PUBLIC_IPV4" [any public network with an IPv4 subnet defined],
# "SNF:ANY_PUBLIC_IPV6 [any public network with only an IPV6 subnet defined],
# or "SNF:ANY_PUBLIC" [any public network].
#
# Access control and quota policy are enforced, just as if the user had
# specified the value of CYCLADES_DEFAULT_SERVER_NETWORKS in the content
# of the POST /call, after processing of "SNF:*" directives."
CYCLADES_DEFAULT_SERVER_NETWORKS = []
# This setting contains a list of networks which every new server
# will be forced to connect to, regardless of the contents of the POST
# /servers call, or the value of CYCLADES_DEFAULT_SERVER_NETWORKS.
# Its format is identical to that of CYCLADES_DEFAULT_SERVER_NETWORKS.
# WARNING: No access control or quota policy are enforced.
# The server will get all IPv4/IPv6 addresses needed to connect to the
# networks specified in CYCLADES_FORCED_SERVER_NETWORKS, regardless
# of the state of the floating IP pool of the user, and without
# allocating any floating IPs."
CYCLADES_FORCED_SERVER_NETWORKS = []
# Maximum allowed network size for private networks.
MAX_CIDR_BLOCK = 22
# Default settings used by network flavors
DEFAULT_MAC_PREFIX = 'aa:00:0'
DEFAULT_BRIDGE = 'br0'
# Network flavors that users are allowed to create through API requests
# Available flavors are IP_LESS_ROUTED, MAC_FILTERED, PHYSICAL_VLAN
API_ENABLED_NETWORK_FLAVORS = ['MAC_FILTERED']
# Settings for MAC_FILTERED network:
# ------------------------------------------
# All networks of this type are bridged to the same bridge. Isolation between
# networks is achieved by assigning a unique MAC-prefix to each network and
# filtering packets via ebtables.
DEFAULT_MAC_FILTERED_BRIDGE = 'prv0'
# Firewalling. Firewall tags should contain '%d' to be filled with the NIC
# ID.
GANETI_FIREWALL_ENABLED_TAG = 'synnefo:network:%s:protected'
GANETI_FIREWALL_DISABLED_TAG = 'synnefo:network:%s:unprotected'
GANETI_FIREWALL_PROTECTED_TAG = 'synnefo:network:%s:limited'
# The default firewall profile that will be in effect if no tags are defined
DEFAULT_FIREWALL_PROFILE = 'DISABLED'
# Fixed mapping of user VMs to a specific backend.
# e.g. BACKEND_PER_USER = {'example@synnefo.org': 2}
BACKEND_PER_USER = {}
# Encryption key for the instance hostname in the stat graphs URLs. Set it to
# a random string and update the STATS_SECRET_KEY setting in the snf-stats-app
# host (20-snf-stats-app-settings.conf) accordingly.
CYCLADES_STATS_SECRET_KEY = "secret_key"
# URL templates for the stat graphs.
# The API implementation replaces '%s' with the encrypted backend id.
CPU_BAR_GRAPH_URL = 'http://stats.example.synnefo.org/stats/v1.0/cpu-bar/%s'
CPU_TIMESERIES_GRAPH_URL = \
'http://stats.example.synnefo.org/stats/v1.0/cpu-ts/%s'
NET_BAR_GRAPH_URL = 'http://stats.example.synnefo.org/stats/v1.0/net-bar/%s'
NET_TIMESERIES_GRAPH_URL = \
'http://stats.example.synnefo.org/stats/v1.0/net-ts/%s'
# Recommended refresh period for server stats
STATS_REFRESH_PERIOD = 60
# The maximum number of file path/content pairs that can be supplied on server
# build
MAX_PERSONALITY = 5
# The maximum size, in bytes, for each personality file
MAX_PERSONALITY_SIZE = 10240
# Authentication URL of the astakos instance to be used for user management
ASTAKOS_AUTH_URL = 'https://accounts.example.synnefo.org/identity/v2.0'
# Tune the size of the Astakos http client connection pool
# This limit the number of concurrent requests to Astakos.
CYCLADES_ASTAKOSCLIENT_POOLSIZE = 50
# Key for password encryption-decryption. After changing this setting, synnefo
# will be unable to decrypt all existing Backend passwords. You will need to
# store again the new password by using 'snf-manage backend-modify'.
# SECRET_ENCRYPTION_KEY may up to 32 bytes. Keys bigger than 32 bytes are not
# supported.
SECRET_ENCRYPTION_KEY = "Password Encryption Key"
# Astakos service token
# The token used for astakos service api calls (e.g. api to retrieve user email
# using a user uuid)
CYCLADES_SERVICE_TOKEN = ''
# Template to use to build the FQDN of VMs. The setting will be formated with
# the id of the VM.
CYCLADES_SERVERS_FQDN = 'snf-%(id)s.vm.example.synnefo.org'
# Description of applied port forwarding rules (DNAT) for Cyclades VMs. This
# setting contains a mapping from the port of each VM to a tuple contaning the
# destination IP/hostname and the new port: (host, port). Instead of a tuple a
# python callable object may be used which must return such a tuple. The caller
# will pass to the callable the following positional arguments, in the
# following order:
# * server_id: The ID of the VM in the DB
# * ip_address: The IPv4 address of the public VM NIC
# * fqdn: The FQDN of the VM
# * user: The UUID of the owner of the VM
#
# Here is an example describing the mapping of the SSH port of all VMs to
# the external address 'gate.example.synnefo.org' and port 60000+server_id.
# e.g. iptables -t nat -A prerouting -d gate.example.synnefo.org \
# --dport (61000 + $(VM_ID)) -j DNAT --to-destination $(VM_IP):22
#CYCLADES_PORT_FORWARDING = {
# 22: lambda ip_address, server_id, fqdn, user:
# ("gate.example.synnefo.org", 61000 + server_id),
#}
CYCLADES_PORT_FORWARDING = {}
# Extra configuration options required for snf-vncauthproxy (>=1.5). Each dict
# of the list, describes one vncauthproxy instance.
CYCLADES_VNCAUTHPROXY_OPTS = [
{
# These values are required for VNC console support. They should match
# a user / password configured in the snf-vncauthproxy authentication /
# users file (/var/lib/vncauthproxy/users).
'auth_user': 'synnefo',
'auth_password': 'secret_password',
# server_address and server_port should reflect the --listen-address and
# --listen-port options passed to the vncauthproxy daemon
'server_address': '127.0.0.1',
'server_port': 24999,
# Set to True to enable SSL support on the control socket.
'enable_ssl': False,
# If you enabled SSL support for snf-vncauthproxy you can optionally
# provide a path to a CA file and enable strict checkfing for the server
# certficiate.
'ca_cert': None,
'strict': False,
},
]
# The maximum allowed size(GB) for a Cyclades Volume
CYCLADES_VOLUME_MAX_SIZE = 200
# The maximum allowed metadata items for a Cyclades Volume
CYCLADES_VOLUME_MAX_METADATA = 10
# The volume types that Cyclades allow to be detached
CYCLADES_DETACHABLE_DISK_TEMPLATES = ("ext_archipelago", "ext_vlmc")
# The maximum number of tags allowed for a Cyclades Virtual Machine
CYCLADES_VM_MAX_TAGS = 50
# The maximmum allowed metadata items for a Cyclades Virtual Machine
CYCLADES_VM_MAX_METADATA = 10
# Define cache for public stats
PUBLIC_STATS_CACHE = {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
"KEY_PREFIX": "publicstats",
"TIMEOUT": 300,
}
# Permit users of specific groups to override the flavor allow_create policy
CYCLADES_FLAVOR_OVERRIDE_ALLOW_CREATE = {}
# Define cache for VM password
VM_PASSWORD_CACHE = {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
"KEY_PREFIX": "vmpassword",
"TIMEOUT": None,
}
|
grnet/synnefo
|
snf-cyclades-app/synnefo/app_settings/default/api.py
|
Python
|
gpl-3.0
| 8,466
|
#!/usr/bin/env python
digits = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
found = True
while found:
input_string = input('Please give me some digits... \n')
found = False
for character in input_string:
if character not in digits:
# we have a non digit!
print('Error, you gave me non digits')
found = True
break
print('starting real work on', input_string)
|
veltzer/demos-python
|
src/exercises/basic/digits_report/solution9.py
|
Python
|
gpl-3.0
| 433
|
#
# Copyright © 2012 - 2021 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""File format specific behavior."""
from weblate.formats.convert import (
HTMLFormat,
IDMLFormat,
OpenDocumentFormat,
PlainTextFormat,
WindowsRCFormat,
)
from weblate.formats.helpers import BytesIOMode
from weblate.formats.tests.test_formats import AutoFormatTest
from weblate.trans.tests.utils import get_test_file
IDML_FILE = get_test_file("en.idml")
HTML_FILE = get_test_file("cs.html")
OPENDOCUMENT_FILE = get_test_file("cs.odt")
TEST_RC = get_test_file("cs-CZ.rc")
TEST_TXT = get_test_file("cs.txt")
class ConvertFormatTest(AutoFormatTest):
NEW_UNIT_MATCH = None
EXPECTED_FLAGS = ""
def parse_file(self, filename):
return self.FORMAT(filename, template_store=self.FORMAT(filename))
class HTMLFormatTest(ConvertFormatTest):
FORMAT = HTMLFormat
FILE = HTML_FILE
MIME = "text/html"
EXT = "html"
COUNT = 5
MASK = "*/translations.html"
EXPECTED_PATH = "cs_CZ/translations.html"
FIND_CONTEXT = "+html.body.p:5-1"
FIND_MATCH = "Orangutan has five bananas."
MATCH = b"<body>"
NEW_UNIT_MATCH = None
BASE = HTML_FILE
EXPECTED_FLAGS = ""
EDIT_OFFSET = 1
class OpenDocumentFormatTest(ConvertFormatTest):
FORMAT = OpenDocumentFormat
FILE = OPENDOCUMENT_FILE
MIME = "application/vnd.oasis.opendocument.text"
EXT = "odt"
COUNT = 4
MASK = "*/translations.odt"
EXPECTED_PATH = "cs_CZ/translations.odt"
FIND_CONTEXT = (
"odf///office:document-content[0]/office:body[0]/office:text[0]/text:p[1]"
)
FIND_MATCH = "Orangutan has five bananas."
MATCH = b"PK"
NEW_UNIT_MATCH = None
BASE = OPENDOCUMENT_FILE
EXPECTED_FLAGS = ""
EDIT_OFFSET = 1
@staticmethod
def extract_document(content):
return bytes(
OpenDocumentFormat.convertfile(BytesIOMode("test.odt", content), None)
).decode()
def assert_same(self, newdata, testdata):
self.assertEqual(
self.extract_document(newdata),
self.extract_document(testdata),
)
class IDMLFormatTest(ConvertFormatTest):
FORMAT = IDMLFormat
FILE = IDML_FILE
MIME = "application/octet-stream"
EXT = "idml"
COUNT = 6
MASK = "*/translations.idml"
EXPECTED_PATH = "cs_CZ/translations.idml"
FIND_CONTEXT = (
"idPkg:Story[0]/{}Story[0]/{}XMLElement[0]/{}ParagraphStyleRange[0]"
"Stories/Story_mainmainmainmainmainmainmainmainmainmainmainu188.xml"
)
FIND_MATCH = """<g id="0"><g id="1">THE HEADLINE HERE</g></g>"""
MATCH = b"PK"
NEW_UNIT_MATCH = None
BASE = IDML_FILE
EXPECTED_FLAGS = ""
EDIT_OFFSET = 1
@staticmethod
def extract_document(content):
return bytes(
IDMLFormat.convertfile(BytesIOMode("test.idml", content), None)
).decode()
def assert_same(self, newdata, testdata):
self.assertEqual(
self.extract_document(newdata),
self.extract_document(testdata),
)
class WindowsRCFormatTest(ConvertFormatTest):
FORMAT = WindowsRCFormat
FILE = TEST_RC
BASE = TEST_RC
MIME = "text/plain"
EXT = "rc"
COUNT = 5
MASK = "rc/*.rc"
EXPECTED_PATH = "rc/cs-CZ.rc"
MATCH = "STRINGTABLE"
FIND_CONTEXT = "STRINGTABLE.IDS_MSG1"
FIND_MATCH = "Hello, world!\n"
EDIT_OFFSET = 1
class PlainTextFormatTest(ConvertFormatTest):
FORMAT = PlainTextFormat
FILE = TEST_TXT
BASE = TEST_TXT
MIME = "text/plain"
EXT = "txt"
COUNT = 5
MASK = "txt/*.txt"
EXPECTED_PATH = "txt/cs_CZ.txt"
MATCH = "Hello"
FIND_CONTEXT = "cs.txt:2"
FIND_MATCH = "Hello, world!"
EDIT_OFFSET = 1
|
phw/weblate
|
weblate/formats/tests/test_convert.py
|
Python
|
gpl-3.0
| 4,423
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# HiPart is a program to analyze the electronic structure of molecules with
# fuzzy-atom partitioning methods.
# Copyright (C) 2007 - 2012 Toon Verstraelen <Toon.Verstraelen@UGent.be>
#
# This file is part of HiPart.
#
# HiPart is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HiPart is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
from hipart.gint.gint_ext import gaux
import numpy
from scipy.special import erf
def test_gaux():
for t in 0.1, 0.5, 0.7, 1.0, 2.0, 4.0:
u1 = gaux(t, 0)
u2 = numpy.sqrt(numpy.pi/t)/2*erf(numpy.sqrt(t))
|
molmod/hipart
|
hipart/gint/tests/test_gaux.py
|
Python
|
gpl-3.0
| 1,117
|
#!/usr/bin/env python
import numpy as np
def tran_op(op, tmat):
"""
transform quantum operator from representation A to
another representation B
Args:
op: the matrix form of operator in representation A
tmat: the unitary transform matrix
"""
return np.dot(np.dot(np.conj(np.transpose(tmat)), op), tmat)
def tmat_c2r(case, ispin=False):
"""
the transform matrix from complex shperical harmonics to
real spherical harmonics
Args:
case: label for different systems
ispin: whether to include spin or not
"""
sqrt2 = np.sqrt(2.0)
ci = np.complex128(0.0+1.0j)
cone = np.complex128(1.0+0.0j)
if case.strip() == 's':
nband = 1
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
t_c2r[0,0] = cone
elif case.strip() == 'p':
nband = 3
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
# px=1/sqrt(2)( |1,-1> - |1,1> )
t_c2r[0,0] = cone/sqrt2
t_c2r[2,0] = -cone/sqrt2
# py=i/sqrt(2)( |1,-1> + |1,1> )
t_c2r[0,1] = ci/sqrt2
t_c2r[2,1] = ci/sqrt2
# pz=|1,0>
t_c2r[1,2] = cone
elif case.strip() == 'pwien': # in wien by default px py pz
nband = 3
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
# px=1/sqrt(2)( |1,-1> - |1,1> )
t_c2r[0,0] = cone/sqrt2
t_c2r[2,0] = -cone/sqrt2
# py=i/sqrt(2)( |1,-1> + |1,1> )
t_c2r[0,1] = ci/sqrt2
t_c2r[2,1] = ci/sqrt2
# pz=|1,0>
t_c2r[1,2] = cone
elif case.strip() == 'pwann': # p in wannier basis vasp
nband = 3
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
t_r2w = np.zeros((nband, nband), dtype=np.complex128)
t_c2w = np.zeros((nband, nband), dtype=np.complex128)
# px=1/sqrt(2)( |1,-1> - |1,1> )
t_c2r[0,0] = cone/sqrt2
t_c2r[2,0] = -cone/sqrt2
# py=i/sqrt(2)( |1,-1> + |1,1> )
t_c2r[0,1] = ci/sqrt2
t_c2r[2,1] = ci/sqrt2
# pz=|1,0>
t_c2r[1,2] = cone
# pz = (px,py,pz) (0,0,1)^T
t_r2w[2,0] = cone
# px = (px,py,pz) (1,0,0)^T
t_r2w[0,1] = cone
# py = (px,py,pz) (0,1,0)^T
t_r2w[1,2] = cone
t_c2w = np.dot(t_c2r,t_r2w)
t_c2r = t_c2w
elif case.strip() == 't2g':
nband = 3
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
# dzx --> py=i/sqrt(2)( |1,-1> + |1,1> )
t_c2r[0,0] = ci/sqrt2
t_c2r[2,0] = ci/sqrt2
# dzy --> px=1/sqrt(2)( |1,-1> - |1,1> )
t_c2r[0,1] = cone/sqrt2
t_c2r[2,1] = -cone/sqrt2
# dxy --> pz=|1,0>
t_c2r[1,2] = cone
elif case.strip() == 'd':
nband = 5
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
# dz2=|2,0>
t_c2r[2,0] = cone
# dzx=1/sqrt(2)( |2,-1> - |2,1> )
t_c2r[1,1] = cone/sqrt2
t_c2r[3,1] = -cone/sqrt2
# dzy=i/sqrt(2)( |2,-1> + |2,1> )
t_c2r[1,2] = ci/sqrt2
t_c2r[3,2] = ci/sqrt2
# dx2-y2=1/sqrt(2)( |2,-2> + |2,2> )
t_c2r[0,3] = cone/sqrt2
t_c2r[4,3] = cone/sqrt2
# dxy=i/sqrt(2)( |2,-2> - |2,2> )
t_c2r[0,4] = ci/sqrt2
t_c2r[4,4] = -ci/sqrt2
elif case.strip() == 'dwien': # by default wien2k: dxy dzx dyz dx2-y2 dz2
nband = 5
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
# dz2=|2,4>
t_c2r[2,4] = cone
# dzx=1/sqrt(2)( |2,-1> - |2,1> )
t_c2r[1,1] = cone/sqrt2
t_c2r[3,1] = -cone/sqrt2
# dzy=i/sqrt(2)( |2,-1> + |2,1> )
t_c2r[1,2] = ci/sqrt2
t_c2r[3,2] = ci/sqrt2
# dx2-y2=1/sqrt(2)( |2,-2> + |2,2> )
t_c2r[0,3] = cone/sqrt2
t_c2r[4,3] = cone/sqrt2
# dxy=i/sqrt(2)( |2,-2> - |2,2> )
t_c2r[0,0] = ci/sqrt2
t_c2r[4,0] = -ci/sqrt2
elif case.strip() == 'f':
nband = 7
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
# fz3 = |3,0>
t_c2r[3, 0] = cone
# fxz2 = 1/sqrt(2)( |3,-1> - |3,1> )
t_c2r[2, 1] = cone/sqrt2
t_c2r[4, 1] = -cone/sqrt2
# fyz2 = i/sqrt(2)( |3,-1> + |3,1> )
t_c2r[2, 2] = ci/sqrt2
t_c2r[4, 2] = ci/sqrt2
# fz(x2-y2) = 1/sqrt(2)( |3,-2> + |3,2> )
t_c2r[1, 3] = cone/sqrt2
t_c2r[5, 3] = cone/sqrt2
# fxyz = i/sqrt(2)( |3,-2> - |3,2> )
t_c2r[1, 4] = ci/sqrt2
t_c2r[5, 4] = -ci/sqrt2
# fx(x2-3y2) = 1/sqrt(2) ( |3,-3> - |3,3> )
t_c2r[0, 5] = cone/sqrt2
t_c2r[6, 5] = -cone/sqrt2
# fy(3x2-y2) = i/sqrt(2) ( |3,-3> + |3,3> )
t_c2r[0, 6] = ci/sqrt2
t_c2r[6, 6] = ci/sqrt2
elif case.strip() == 'fwien': # fxz2 fyz2 fz3 fx(x2-3y2) fy(3x2-y2) fz(x2-y2) fxyz
nband = 7
t_c2r = np.zeros((nband, nband), dtype=np.complex128)
# fz3 = |3,0>
t_c2r[3, 2] = cone
# fxz2 = 1/sqrt(2)( |3,-1> - |3,1> )
t_c2r[2, 0] = cone/sqrt2
t_c2r[4, 0] = -cone/sqrt2
# fyz2 = i/sqrt(2)( |3,-1> + |3,1> )
t_c2r[2, 1] = ci/sqrt2
t_c2r[4, 1] = ci/sqrt2
# fz(x2-y2) = 1/sqrt(2)( |3,-2> + |3,2> )
t_c2r[1, 5] = cone/sqrt2
t_c2r[5, 5] = cone/sqrt2
# fxyz = i/sqrt(2)( |3,-2> - |3,2> )
t_c2r[1, 6] = ci/sqrt2
t_c2r[5, 6] = -ci/sqrt2
# fx(x2-3y2) = 1/sqrt(2) ( |3,-3> - |3,3> )
t_c2r[0, 3] = cone/sqrt2
t_c2r[6, 3] = -cone/sqrt2
# fy(3x2-y2) = i/sqrt(2) ( |3,-3> + |3,3> )
t_c2r[0, 4] = ci/sqrt2
t_c2r[6, 4] = ci/sqrt2
else:
print "don't support t_c2r for this case: ", case
return
if ispin:
norbs=2*nband
t_c2r_spin = np.zeros((norbs,norbs), dtype=np.complex128)
t_c2r_spin[0:norbs:2,0:norbs:2] = t_c2r
t_c2r_spin[1:norbs:2,1:norbs:2] = t_c2r
return t_c2r_spin
else:
return t_c2r
def tmat_r2c(case, ispin=False):
"""
the transform matrix from real spherical harmonics to
complex shperical harmonics
Args:
case: label for different systems
ispin: whether to include spin or not
"""
return np.conj(np.transpose(tmat_c2r(case, ispin)))
def tmat_r2cub(ispin=False):
"""
the transform matrix from real spherical harmonics to the cubic
spherical harmonics, just for f system
Args:
ispin: whether to include spin or not
"""
a = np.sqrt(10.0) / 4.0 + 0.0j
b = np.sqrt(6.0) / 4.0 + 0.0j
c = 1.0 + 0.0j
nband = 7
t_r2cub = np.zeros((nband,nband), dtype=np.complex128)
# fx3 = -sqrt(6)/4 fxz2 + sqrt(10)/4 fx(x2-3y2)
t_r2cub[1, 0] = -b
t_r2cub[5, 0] = a
# fy3 = -sqrt(6)/4 fyz2 - sqrt(10)/4 fy(3x2-y2)
t_r2cub[2, 1] = -b
t_r2cub[6, 1] = -a
# fz3 = fz3
t_r2cub[0, 2] = c
# fx(y2-z2) = -sqrt(10)/4 fxz2 - sqrt(6)/4 fx(x2-3y2)
t_r2cub[1, 3] = -a
t_r2cub[5, 3] = -b
# fy(z2-x2) = sqrt(10)/4 fyz2 - sqrt(6)/4 fy(3x2-y2)
t_r2cub[2, 4] = a
t_r2cub[6, 4] = -b
# fz(x2-y2) = fz(x2-y2)
t_r2cub[3, 5] = c
# fxyz = fxyz
t_r2cub[4, 6] = c
if ispin:
norbs = 2 * nband
t_r2cub_spin = np.zeros((norbs, norbs), dtype=np.complex128)
t_r2cub_spin[0:norbs:2,0:norbs:2] = t_r2cub
t_r2cub_spin[1:norbs:2,1:norbs:2] = t_r2cub
return t_r2cub_spin
else:
return t_r2cub
def tmat_cub2r(ispin=False):
"""
the transform matrix from cubic spherical harmonics to
real spherical harmonics, just for f system
Args:
ispin: whether to include spin or not
"""
return np.conj( np.transpose( tmat_r2cub(ispin) ) )
def tmat_c2j(l):
"""
the transform matrix from complex shperical harmonics to
j2,jz basis
Args:
case: label for different systems
"""
if l == 1:
t_c2j = np.zeros((6, 6), dtype=np.complex128)
t_c2j[0,0] = -np.sqrt(2.0/3.0)
t_c2j[3,0] = np.sqrt(1.0/3.0)
t_c2j[2,1] = -np.sqrt(1.0/3.0)
t_c2j[5,1] = np.sqrt(2.0/3.0)
t_c2j[1,2] = 1.0
t_c2j[0,3] = np.sqrt(1.0/3.0)
t_c2j[3,3] = np.sqrt(2.0/3.0)
t_c2j[2,4] = np.sqrt(2.0/3.0)
t_c2j[5,4] = np.sqrt(1.0/3.0)
t_c2j[4,5] = 1.0
return t_c2j
elif l == 2:
t_c2j = np.zeros((10, 10), dtype=np.complex128)
t_c2j[0,0] = -np.sqrt(4.0/5.0)
t_c2j[3,0] = np.sqrt(1.0/5.0)
t_c2j[2,1] = -np.sqrt(3.0/5.0)
t_c2j[5,1] = np.sqrt(2.0/5.0)
t_c2j[4,2] = -np.sqrt(2.0/5.0)
t_c2j[7,2] = np.sqrt(3.0/5.0)
t_c2j[6,3] = -np.sqrt(1.0/5.0)
t_c2j[9,3] = np.sqrt(4.0/5.0)
t_c2j[1,4] = 1.0
t_c2j[0,5] = np.sqrt(1.0/5.0)
t_c2j[3,5] = np.sqrt(4.0/5.0)
t_c2j[2,6] = np.sqrt(2.0/5.0)
t_c2j[5,6] = np.sqrt(3.0/5.0)
t_c2j[4,7] = np.sqrt(3.0/5.0)
t_c2j[7,7] = np.sqrt(2.0/5.0)
t_c2j[6,8] = np.sqrt(4.0/5.0)
t_c2j[9,8] = np.sqrt(1.0/5.0)
t_c2j[8,9] = 1.0
return t_c2j
elif l == 3:
t_c2j = np.zeros((14,14), dtype=np.complex128)
t_c2j[0,0] = -np.sqrt(6.0/7.0)
t_c2j[3,0] = np.sqrt(1.0/7.0)
t_c2j[2,1] = -np.sqrt(5.0/7.0)
t_c2j[5,1] = np.sqrt(2.0/7.0)
t_c2j[4,2] = -np.sqrt(4.0/7.0)
t_c2j[7,2] = np.sqrt(3.0/7.0)
t_c2j[6,3] = -np.sqrt(3.0/7.0)
t_c2j[9,3] = np.sqrt(4.0/7.0)
t_c2j[8,4] = -np.sqrt(2.0/7.0)
t_c2j[11,4] = np.sqrt(5.0/7.0)
t_c2j[10,5] = -np.sqrt(1.0/7.0)
t_c2j[13,5] = np.sqrt(6.0/7.0)
t_c2j[1,6] = 1.0
t_c2j[0,7] = np.sqrt(1.0/7.0)
t_c2j[3,7] = np.sqrt(6.0/7.0)
t_c2j[2,8] = np.sqrt(2.0/7.0)
t_c2j[5,8] = np.sqrt(5.0/7.0)
t_c2j[4,9] = np.sqrt(3.0/7.0)
t_c2j[7,9] = np.sqrt(4.0/7.0)
t_c2j[6,10] = np.sqrt(4.0/7.0)
t_c2j[9,10] = np.sqrt(3.0/7.0)
t_c2j[8,11] = np.sqrt(5.0/7.0)
t_c2j[11,11] = np.sqrt(2.0/7.0)
t_c2j[10,12] = np.sqrt(6.0/7.0)
t_c2j[13,12] = np.sqrt(1.0/7.0)
t_c2j[12,13] = 1.0
return t_c2j
else:
print "NOT Implemented !!!"
def fourier_hr2hk(norbs, nkpt, kvec, nrpt, rvec, deg_rpt, hr):
"""
Fourier transform from R-space to K-space
Args:
norbs: number of orbitals
nkpt: number of K-points
kvec: fractional coordinate for K-points
nrpt: number of R-points
rvec: fractional coordinate for R-points
deg_rpt: the degenerate for each R-point
hr: Hamiltonian in R-space
Return:
hk: Hamiltonian in K-space
"""
hk = np.zeros((nkpt, norbs, norbs), dtype=np.complex128)
for i in range(nkpt):
#print "kvec", i, kvec[i,:]
for j in range(nrpt):
coef = 2*np.pi*np.dot(kvec[i,:], np.float64(rvec[j,:]))
ratio = (np.cos(coef) + np.sin(coef) * 1j) / np.float64(deg_rpt[j])
hk[i,:,:] = hk[i,:,:] + ratio * hr[j,:,:]
return hk
def fourier_hr2h1k(norbs, kvec, nrpt, rvec, deg_rpt, hr):
"""
Fourier transform from R-space to K-space
Args:
norbs: number of orbitals
nkpt: number of K-points
kvec: fractional coordinate for K-points
nrpt: number of R-points
rvec: fractional coordinate for R-points
deg_rpt: the degenerate for each R-point
hr: Hamiltonian in R-space
Return:
hk: Hamiltonian in K-space
"""
hk = np.zeros((norbs, norbs), dtype=np.complex128)
for i in range(nrpt):
coef = 2*np.pi*np.dot(kvec, np.float64(rvec[i,:]))
ratio = (np.cos(coef) + np.sin(coef) * 1.0j) / np.float64(deg_rpt[i])
hk[:,:] = hk[:,:] + ratio * hr[i,:,:]
return hk
def myfourier_hr2hk(norbs, nkpt, kvec, nrpt, rvec, deg_rpt, hr):
"""
Fourier transform from R-space to K-space
Args:
norbs: number of orbitals
nkpt: number of K-points
kvec: fractional coordinate for K-points
nrpt: number of R-points
rvec: fractional coordinate for R-points
deg_rpt: the degenerate for each R-point
hr: Hamiltonian in R-space
Return:
hk: Hamiltonian in K-space
"""
print "ALERT: Gauge changed: not exp(-ikR) but exp(ikR)*exp(ik*tau_mu)"
hk = np.zeros((nkpt, norbs, norbs), dtype=np.complex128)
for i in range(nkpt):
for j in range(nrpt):
coef = 2*np.pi*np.dot(kvec[i,:], rvec[j,:])
ratio = (np.cos(coef) + np.sin(coef) * 1j) / float(deg_rpt[j])
hk[i,:,:] = hk[i,:,:] + ratio * hr[j,:,:]
return hk
def fourier_hr2hk_gauge(norbs, nkpt, kvec, nrpt, rvec, deg_rpt, hr):
"""
Fourier transform from R-space to K-space
Args:
norbs: number of orbitals
nkpt: number of K-points
kvec: fractional coordinate for K-points
nrpt: number of R-points
rvec: fractional coordinate for R-points
deg_rpt: the degenerate for each R-point
hr: Hamiltonian in R-space
Return:
hk: Hamiltonian in K-space
"""
hk = np.zeros((nkpt, norbs, norbs), dtype=np.complex128)
for i in range(nkpt):
print "kvec", i, kvec[i,:]
for j in range(nrpt):
coef = 2*np.pi*np.dot(kvec[i,:], rvec[j,:])
ratio = (np.cos(coef) + np.sin(coef) * 1j) / float(deg_rpt[j])
hk[i,:,:] = hk[i,:,:] + ratio * hr[j,:,:]
return hk
|
quanshengwu/wannier_tools
|
utility/wannhr_symm/lib/tran.py
|
Python
|
gpl-3.0
| 13,790
|
import textacy.texts as ttx
import textacy.preprocess as pre
import feedparser as fp
class FeedCorpus(ttx.TextCorpus):
"""
Extends textacy TextCorpus with methods for feeds
"""
def from_feed(self, url):
fdict = fp.parse(url)
for entry in fdict.entries:
# Each entry may have multiple pieces of content. Here they're just concatenated.
body = ""
for c in entry.content:
body += " " + c.value
# Preprocessing
body = pre.preprocess_text(body, no_urls=True, no_emails=True, no_phone_numbers=True)
metadata = {'title': entry.title,
'author': entry.author,
'date_updated': entry.updated,
'publication_title': fdict.feed.title}
self.add_text(body, metadata = metadata)
|
morrna/RSStacy
|
RSStacy.py
|
Python
|
gpl-3.0
| 862
|
#!/usr/bin/python
AGO_TELLSTICK_VERSION = '0.0.9'
"""
############################################
#
# Tellstick Duo class
#
# Date of origin: 2014-01-25
#
__author__ = "Joakim Lindbom"
__copyright__ = "Copyright 2014, Joakim Lindbom"
__credits__ = ["Joakim Lindbom", "The ago control team"]
__license__ = "GPL Public License Version 3"
__maintainer__ = "Joakim Lindbom"
__email__ = 'Joakim.Lindbom@gmail.com'
__status__ = "Experimental"
__version__ = AGO_TELLSTICK_VERSION
############################################
"""
from tellstickbase import tellstickbase
import td
class tellstickduo(tellstickbase):
"""Class used for Tellstick & Tellstick Duo devices"""
def __get__(self, obj, objtype=None):
pass
def __set__(self, obj, val):
pass
def __delete__(self, obj):
pass
def __init__(self):
self.SensorEvent = None
def init(self, SensorPollDelay, TempUnits):
# TELLSTICK_BELL | TELLSTICK_TOGGLE | TELLSTICK_LEARN | TELLSTICK_EXECUTE | TELLSTICK_UP | TELLSTICK_DOWN | TELLSTICK_STOP
td.init(defaultMethods=td.TELLSTICK_TURNON | td.TELLSTICK_TURNOFF | td.TELLSTICK_DIM)
self.log.info("Init executed")
def close(self):
return td.close()
def turnOn(self, devId):
resCode = td.turnOn(devId)
return self.getErrorString(resCode).lower()
def turnOff(self, devId):
resCode = td.turnOff(devId)
return self.getErrorString(resCode).lower()
def getErrorString(self, resCode):
return td.getErrorString(resCode)
def dim(self, devId, level):
resCode = td.dim(devId, level)
return self.getErrorString(resCode).lower()
def getName(self, devId):
return td.getName(devId)
def methodsReadable(self, method, default):
return td.methodsReadable(method, default)
def getNumberOfDevices(self):
return td.getNumberOfDevices()
def getNumberOfSensors(self):
return td.getNumberOfDevices() # wrong
def getDeviceId(self, i):
return td.getDeviceId(i)
def getModel(self, devId):
return td.getModel(devId)
def registerDeviceEvent(self, deviceEvent):
return td.registerDeviceEvent(deviceEvent)
def registerDeviceChangedEvent(self, deviceEvent):
return td.registerDeviceChangedEvent(deviceEvent)
def newTempSensor(self, devId, model, value):
self.log.debug("New temperature sensor intercepted: devId=" + devId + " model=" + model)
s = {
"id" : devId,
"description" : "",
"model" : model,
"new" : True,
"temp" : float(value), # C/F
"offset" : 0.0, # TODO: Add to parameter & config file
"lastTemp" : float(-274.0),
"isTempSensor" : True,
"isHumiditySensor" : False,
"ignore" : False}
return s
def newHumiditySensor(self, devId, model, value):
self.log.debug("New humidity sensor intercepted: devId=" + devId + " model=" + model)
s = {
"id" : devId,
"description" : "",
"model" : model,
"new" : True,
"humidity" : float(value),
"offset" : 0.0, # TODO: Add to parameter & config file
"lastHumidity" : float(-999.0),
"isHumiditySensor" : True,
"isTempSensor" : False,
"ignore" : False}
return s
def SensorEventInterceptor(self, protocol, model, id, dataType, value, timestamp, callbackId):
devId = 'S' + str(id) # Prefix 'S' to make sure name doesn't clash with self-defined devices
devIdT = devId + "-temp"
devIdH = devId + "-hum"
# self.checkIgnore(self, devId) #TODO: Add once moved
self.log.trace("SensorEventInterceptor called for " + devId)
if str(id) not in self.ignoreDevices:
# New temperature sensor?
if devIdT not in self.sensors and dataType & td.TELLSTICK_TEMPERATURE == td.TELLSTICK_TEMPERATURE:
self.sensors[devIdT] = self.newTempSensor(devIdT, model, value)
# New humidity sensor?
if devIdH not in self.sensors and dataType & td.TELLSTICK_HUMIDITY == td.TELLSTICK_HUMIDITY:
self.sensors[devIdH] = self.newHumiditySensor(devIdH, model, value)
# Call registered callback
self.SensorEvent(protocol, model, devId, dataType, value, timestamp, callbackId)
def registerSensorEvent(self, deviceEvent):
self.SensorEvent = deviceEvent
return td.registerSensorEvent(self.SensorEventInterceptor)
def listSensors(self):
sensors = td.listSensors()
if len(sensors) != 0:
for id, value in sensors.iteritems():
self.log.trace("listSensors: devId: %s ", str(id))
if id not in self.ignoreDevices:
devId = str(id) + "-temp"
if devId not in self.sensors:
if value["isTempSensor"]:
# New temp sensor found
self.sensors[devId] = self.newTempSensor(devId, value["model"], value["temp"])
devId = str(id) + "-hum"
if devId not in self.sensors:
if value["isHumiditySensor"]:
# New hum sensor found
self.sensors[devId] = self.newHumiditySensor(devId, value["model"], value["humidity"])
if not value["new"]:
continue
return self.sensors
def listSwitches(self):
if len(self.switches) == 0:
for i in range(self.getNumberOfDevices()):
devId = self.getDeviceId(i)
model = self.getModel(devId)
if ('switch' in model or 'dimmer' in model):
dev = {
"id" : devId,
"name" : self.getName(devId),
"model" : model}
if 'dimmer' in model:
dev["isDimmer"] = True
else:
dev["isDimmer"] = False
self.switches[devId] = dev
return self.switches
def listRemotes(self):
self.log.trace("listRemotes start")
if len(self.remotes) == 0:
self.log.info("getNumberOfDevices=" + str(self.getNumberOfDevices()))
for i in range(self.getNumberOfDevices()):
devId = self.getDeviceId(i)
model = self.getModel(devId)
self.log.info("devId=" + str(devId) + " model=" + model)
if 'switch' not in model and 'dimmer' not in model:
dev = {
"id" : str(devId),
"name" : self.getName(devId),
"model" : model}
self.log.info("devId=" + str(devId) + " model=" + model)
self.remotes[devId] = dev
return self.remotes
|
JoakimLindbom/ago
|
tellstick/tellstickduo.py
|
Python
|
gpl-3.0
| 7,092
|
# EMU code from https://github.com/rainforestautomation/Emu-Serial-API
from emu import *
import sys
import json
import msgpack
from xbos import get_client
from bw2python.bwtypes import PayloadObject
import time
with open("params.json") as f:
try:
params = json.loads(f.read())
except ValueError as e:
print "Invalid parameter file"
sys.exit(1)
emu_instance = emu(params["port"])
emu_instance.start_serial()
# get network info
emu_instance.get_network_info()
while not hasattr(emu_instance, 'NetworkInfo'):
time.sleep(10)
macid = emu_instance.NetworkInfo.DeviceMacId
c = get_client(agent=params["agent"], entity=params["entity"])
PONUM = (2,0,9,1)
baseuri = params["baseuri"]
signaluri = "{0}/s.emu2/{1}/i.meter/signal/meter".format(baseuri, macid)
print ">",signaluri
def send_message(msg):
"""
msg has keys:
current_demand
current_price
current_tier
current_summation_delivered
current_summation_received
"""
po = PayloadObject(PONUM, None, msgpack.packb(msg))
c.publish(signaluri, payload_objects=(po,))
msg = {}
while True:
#print emu_instance.get_instantaneous_demand()
emu_instance.get_current_summation_delivered()
emu_instance.get_instantaneous_demand('Y')
emu_instance.get_current_price('Y')
time.sleep(10)
msg['current_time'] = time.time()#int(pc.TimeStamp) + 00:00:00 1 Jan 2000
# handle PriceCluster
if hasattr(emu_instance, "PriceCluster"):
pc = emu_instance.PriceCluster
print dir(emu_instance.PriceCluster)
msg['current_price'] = float(int(pc.Price, 16)) / (10**int(pc.TrailingDigits,16))
msg['current_tier'] = int(pc.Tier, 16)
# handle demand
if hasattr(emu_instance, "InstantaneousDemand"):
d = emu_instance.InstantaneousDemand
msg['current_demand'] = int(d.Demand, 16)
print dir(emu_instance)
# handle current summation
if hasattr(emu_instance, "CurrentSummationDelivered"):
d = emu_instance.CurrentSummationDelivered
multiplier = int(d.Multiplier, 16)
divisor = float(int(d.Divisor, 16))
msg['current_summation_delivered'] = int(d.SummationDelivered, 16) * multiplier / divisor
msg['current_summation_received'] = int(d.SummationReceived, 16) * multiplier / divisor
send_message(msg)
emu_instance.stop_serial()
|
SoftwareDefinedBuildings/bw2-contrib
|
driver/emu2/driver.py
|
Python
|
gpl-3.0
| 2,380
|
# -*- coding: utf-8 -*
# Filename: div.py
__author__ = 'Piratf'
from pygame.locals import *
import pygame
class Div(object):
"""small panel in the frame"""
def __init__(self, (width, height), (x, y) = (0, 0)):
# super(Div, self).__init__()
self.width = width
self.height = height
self.x = x;
self.y = y;
self.rect = Rect((x, y), (width, height))
|
piratf/Game_Boxes
|
lib/div.py
|
Python
|
gpl-3.0
| 404
|
# -*- coding: utf-8 -*-
import ConfigParser, sys, os, urllib2, json, time, shutil, filecmp
import Levenshtein
config = ConfigParser.ConfigParser()
config.read("config.ini")
def clean(chaine):
#print chaine
return chaine.lower().strip()
def decode(chaine):
chaine = chaine.replace(u"\u2018", "'").replace(u"\u2019", "'")
try:
chaine = unicodedata.normalize('NFKD', chaine).encode('ascii','ignore')
return chaine
except:
return chaine
def remove_accents(input_str):
try:
nkfd_form = unicodedata.normalize('NFKD', unicode(input_str))
return u"".join([c for c in nkfd_form if not unicodedata.combining(c)])
except:
return input_str
def cc(i):
return decode(clean(remove_accents(i)))
def getKey(item):
return item[0]
class playlist:
def __init__(self, limit, page=1, period="overall"):
self.api_key = config.get("lastfm",'key')
self.music_dir = config.get("lastfm",'directory')
self.page = page
self.mp_dir = config.get("lastfm",'mudir')
self.user = config.get("lastfm",'user')
self.dossier = os.listdir(self.music_dir)
self.period = period
self.limit = limit
self.notfound = []
#for i in req!
def lastfm(self, meth):
try:
url = 'http://ws.audioscrobbler.com/2.0/?api_key='+self.api_key+'&autocorrect=1'+meth+'&format=json&page='+str(self.page)
txt = urllib2.urlopen(url).read()
return json.loads(txt)
except urllib2.HTTPError:
#print '\n Error : '+art
return None
def toptracks(self):
url = '&method=user.gettoptracks&user='+self.user+'&limit='+self.limit+'&period='+self.period;
req = self.lastfm(url)
for i in req["toptracks"]["track"]:
#if cc(i['artist']['name']) == "high tone":
yield {'name':i['name'],'artist':cc(i['artist']['name'])}
"""Rechercher le dossier artiste, exacte ou levenshtein inferieure a longueur moins 2"""
def findartist(self, artist):
textlog = " find (" + artist + "):\n"
lev = {}
# Chaque artiste dans le dossier
for art in self.dossier:
ar = cc(art)
# Correspondance exacte (pas de majuscule, pas d'accents, pas d'expace)
if ar == artist:
##print "YES BITCH"
return art
# Distance de levenshtein: on stocke si pas trop de difference
elif abs(len(ar) - len(artist)) < 5:
l = Levenshtein.distance(ar, artist)
if l < (len(art)/2):
if not l in lev.keys():
lev[l] = []
lev[l].append(art)
# On process
textlog += str(lev) + "\n"
if lev != {} and len( lev[min(lev.keys())] ) == 1:
##print lev[min(lev.keys())][0]
##print "YES BIS BITCHY BITCH"
return lev[min(lev.keys())][0]
else:
pass ##print textlog
"""Rechercher le dossier artiste, exacte ou levenshtein inferieure a longueur moins 2"""
def findtrack(self, artist, track, i=0, lev=False):
# Chaque truc dans le dossier
base = self.music_dir + "/" + artist
for fil in os.listdir(base):
if os.path.isdir(base +"/"+ fil):
##print ("findtrack " + artist + " / " + fil + " - " + track)
try:
for result in self.findtrack(artist + "/" + fil, track, i=i+1, lev=lev):
yield result
except UnicodeDecodeError:
pass
if os.path.isfile(base +"/"+ fil):
if lev:
nfil = cc(clean(unicode(fil[:-4],'utf-8')))
ntr = cc(clean(track))
l = Levenshtein.distance(ntr, nfil)
if l < len(ntr):
##print "lev |" + ntr + "|" + nfil + "|"
##print str(l) + " - " + str(len(cc(track)))
yield [l, base+"/"+fil]
else:
if clean(track) in clean(unicode(fil,'utf-8')):
##print base+"/"+fil
yield base+"/"+fil
def mkdirs(self, li, pat):
if li != []:
dd = os.path.join(pat, li[0])
if not os.path.isdir( dd ):
##print "mkdir(" + dd+")"
os.mkdir(dd)
return self.mkdirs(li[1:], dd)
else:
return pat
def move(self, t):
dirs = t[len(self.music_dir)+1:].split("/")
new = self.mkdirs(dirs[:-1], self.mp_dir)
dst = os.path.join(new, dirs[-1])
if os.path.isfile( dst ):
if os.path.getsize(t) != os.path.getsize(dst):
os.remove(dst)
else:
return 1
shutil.copyfile(t, dst)
##print "exist"
#shutil.copyfile(t, dst)
def findtrackall(self, a, i):
for t in self.findtrack(a, i['name']):
return t
##print "### :: " + i['artist'] + '-' + i['name'] + ""
ties = []
for t in self.findtrack(a, i['name'], lev=True):
ties.append(t)
if len(ties) == 0:
return 0
if len(ties) == 1:
##print ties[0][1]
return ties[0][1]
else:
ties = sorted(ties, key=getKey)
##print ties[0][1]
return ties[0][1]
def run(self):
file = time.strftime("TOP"+self.limit+"_%m%d%H%M.m3u")
fo = open(file, 'w+')
number = 0
for i in self.toptracks():
number += 1
print number
#for i in [{'name':u"The sound of silence",'artist':u"Simon and Garfunkel"}]:
a = self.findartist(i['artist'])
t = 0
if a:
t = self.findtrackall(a, i)
if t == 0:
t = self.findtrackall("Various Artists", i)
##print t
if t != 0:
fo.write(t+"\n")
if os.path.isdir( self.mp_dir ):
self.move(t)
else:
#print "###########"
#print i['artist'] + '-' + i['name']
pass
#print self.notfound
#print '--finished--'
fo.close()
# <?xml version="1.0" encoding="UTF-8"?>
# <playlist version="1" xmlns="http://xspf.org/ns/0/">
# <trackList>
# <track><location>file:///media/data/Musique/Cypress Hill/2010 - Rise Up/Cypress Hill - Rise Up - 13 - Armed and Dangerous.mp3</location></track>
# <track><location>file:///media/data/Musique/The Black Keys/Attack & Release/The Black Keys - Psychotic Girl.mp3</location></track>
# <track><location>file:///media/data/Musique/Odezenne/2012 - OVNI edition Louis XIV/13 - Hirondelles.mp3</location></track>
# </trackList>
# </playlist>
pass
if len(sys.argv) == 0 :
print "usage : python playlist.py length page"
else:
if len(sys.argv) <= 1 :
p = playlist(100)
elif len(sys.argv) <= 2 :
p = playlist(sys.argv[1])
elif len(sys.argv) <= 3 :
p = playlist(sys.argv[1], sys.argv[2])
else: p = playlist(sys.argv[1], sys.argv[2], sys.argv[3])
p.run()
|
pdevetto/misc
|
lastfm/playlist.py
|
Python
|
gpl-3.0
| 7,352
|
from api.callers.api_caller import ApiCaller
class ApiSystemStats(ApiCaller):
endpoint_url = '/system/stats'
endpoint_auth_level = ApiCaller.CONST_API_AUTH_LEVEL_DEFAULT
request_method_name = ApiCaller.CONST_REQUEST_METHOD_GET
|
PayloadSecurity/VxAPI
|
api/callers/system/api_system_stats.py
|
Python
|
gpl-3.0
| 241
|
import sys, os, json, time
from shapely.geometry import Polygon
# http://toblerity.org/shapely/manual.html
contains = {}
intersects = {}
dPoly = {}
unmatched = []
TRACTCOL = 'BoroCT2010' # rename this for 2000 census
def addPoly(coords):
polys = []
if (isinstance(coords[0][0], float)):
polys.append(Polygon(coords))
else:
for (c) in coords:
polys.extend(addPoly(c))
return polys
def inDistrict(tract):
tPoly = addPoly(tract['geometry']['coordinates'])
tractNum = tract['properties'][TRACTCOL]
intersects = set()
area = 0
intersection = {}
iap = {}
for (i) in range (0, len(tPoly)):
tractPolygon = tPoly[i]
area += tractPolygon.area
for (dn, dp) in dPoly.items():
for (p) in dp:
if (p.contains(tractPolygon)):
iap[dn] = 1
break;
elif (p.intersects(tractPolygon)):
intersects.add(dn)
if dn not in intersection:
intersection[dn] = p.intersection(tractPolygon).area
else:
intersection[dn] += p.intersection(tractPolygon).area
if (len(intersection) > 0):
for (dn, inter) in intersection.items():
iap[dn] = inter / area
return (tractNum, iap)
if __name__ == '__main__':
if (len(sys.argv) < 2):
print ("Usage: tract2council.py tract.json council.json")
exit()
tractfile = sys.argv[1]
councilfile = sys.argv[2]
for (f) in (tractfile, councilfile):
if (not os.path.isfile(f)):
print ("File " + f + " is not readable")
exit()
try:
with open(tractfile) as tractfo:
tractData = json.load(tractfo)
except Exception:
print ("Unable to read tract file " + tractfile)
exit()
try:
with open(councilfile) as councilfo:
councilData = json.load(councilfo)
except Exception as e:
print ("Unable to read council file " + councilfile+": {0}".format(e))
exit()
for (district) in councilData['features']:
dn = district['properties']['CounDist']
c = district['geometry']['coordinates']
dPoly[dn] = addPoly(c)
print ("there are " + str(len(tractData['features'])) + " census tracts")
for (tract) in tractData['features']:
(tn, i) = inDistrict(tract)
intersects[tn] = i
intersectsFile = 'tracts_' + str(round(time.time())) + '.json'
with open(intersectsFile, 'w') as intersectsfo:
json.dump(intersects, intersectsfo)
|
capntransit/tract2council
|
tract2council.py
|
Python
|
gpl-3.0
| 2,643
|
# AsteriskLint -- an Asterisk PBX config syntax checker
# Copyright (C) 2015-2016 Walter Doekes, OSSO B.V.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .config import ConfigAggregator
from .dialplan import DialplanAggregator
from .file import FileReader
from .func_odbc import FuncOdbcAggregator
class FileConfigParser(ConfigAggregator, FileReader):
pass
class FileDialplanParser(DialplanAggregator, FileReader):
pass
class FileFuncOdbcParser(FuncOdbcAggregator, FileReader):
pass
|
ossobv/asterisklint
|
asterisklint/__init__.py
|
Python
|
gpl-3.0
| 1,096
|
from __future__ import print_function
from __future__ import division
import numpy as np
np.set_printoptions(threshold=np.inf)
import matplotlib.pyplot as plt
from matplotlib import cm
# from mpl_toolkits.mplot3d import axes3d
from matplotlib.colors import LogNorm
# import time
import math
# import cPickle
import gym as gym
from PIL import Image
from PIL import ImageOps
from collections import deque
import copy
import qnn
class q_learning():
def __init__(self,
gamma=0.99,
init_epsilon=1.0,
end_epsilon=0.1,
update_epsilon=True,
exploration_decrease_length = 1e6,
policy_mode='deterministic',
environment='MountainCar-v0',
# environment = 'Acrobot-v0',
lambda_=0.5,
plot_resolution=30,
nn_size_hidden = [300,400,400],
nn_batch_size = 50,
nn_learning_rate = 1e-4,
qnn_target = 'q-learning', # 'sarsa'
replay_memory_size = 1e6,
descent_method = 'grad',
dropout_keep_prob = 1.0,
ema_decay_rate = 0.999,
init_weights = None,
num_steps_until_train_step = None,
train_frequency = 1.0,
from_pixels = False,
repeat_action_times = 2,
reg_weight = 0.0,
do_pretrain = False,
pretrain_steps = 5000
):
self.from_pixels = from_pixels
self.repeat_action_times = repeat_action_times
self.frame_downscaling = 6
if num_steps_until_train_step is None:
num_steps_until_train_step = nn_batch_size
self.env = gym.make(environment)
self.env_name = environment
self.num_actions = self.env.action_space.n
self.prob_distrib = np.zeros(self.num_actions)
self.statedim = self.env.observation_space.shape[0]
# lengths of all the played episodes
self.episode_lengths = []
self.total_train_episodes = 0
# lengths of all the tested episodes
if self.env_name=='MountainCar-v0':
self.max_test_length = 10000
elif self.env_name=='CartPole-v0':
self.max_test_length = 10000
else:
self.max_test_length = 10000
self.test_lengths = []
self.test_lengths_std = []
self.test_its = []
self.test_runs_to_average = 5
self.plot_resolution = plot_resolution
self.lambda_ = lambda_
## stochastic or deterministic softmax-based actions
self.policy_mode = policy_mode
normalization_mean = None
normalization_var = None
if not self.from_pixels:
# STATE NORMALIZATION
print('Calculating normalization by random action sampling...')
states = []
while len(states) < 1e5:
self.env.reset()
done = False
while not done:
state, _, done, _ = self.env.step(self.env.action_space.sample())
states.append(state)
normalization_mean = np.mean(states, axis=(0)).astype(np.float32)
normalization_var = np.var(states, axis=(0)).astype(np.float32)
# if self.env_name == 'CartPole-v0':
# normalization_mean = np.zeros_like(normalization_mean)
# normalization_var = np.ones_like(normalization_var)
## exploration parameters
# too much exploration is wrong!!!
self.epsilon = init_epsilon # explore probability
self.init_epsilon = init_epsilon
self.end_epsilon = end_epsilon
self.exploration_decrease_length = exploration_decrease_length
self.update_epsilon = update_epsilon
self.total_runs = 0.
# too long episodes give too much negative reward!!!!
# self.max_episode_length = 1000000
# ----> Use gamma!!!!! TODO: slower decrease?
self.gamma = gamma # similar to 0.9
# DEPRECATED
if qnn_target == 'q-learning':
self.is_a_prime_external = False
elif qnn_target == 'sarsa':
self.is_a_prime_external = True
else:
throw('ValueError')
# set pixel state parameters
if self.from_pixels or True:
self.env.render()
self.img_height = self.env.viewer.height
self.img_width = self.env.viewer.width
self.reduced_height = 84#self.img_height//self.frame_downscaling
self.reduced_width = 84#self.img_width//self.frame_downscaling
self.qnn = qnn.qnn(self.statedim,
self.num_actions,
discount=self.gamma,
size_hidden=nn_size_hidden,
batch_size=nn_batch_size,
learning_rate=nn_learning_rate,
is_a_prime_external=self.is_a_prime_external,
replay_memory_size=replay_memory_size,
descent_method=descent_method,
keep_prob_val=dropout_keep_prob,
ema_decay_rate=ema_decay_rate,
normalization_mean=normalization_mean,
normalization_var=normalization_var,
env_name=environment,
init_weights=init_weights,
from_pixels=self.from_pixels,
input_width=self.reduced_width,
input_height=self.reduced_height,
input_channels=self.repeat_action_times,
reg_weight=reg_weight,
do_pretrain=do_pretrain,
pretrain_steps=pretrain_steps
)
self.learning_rate = nn_learning_rate
self.train_frequency = train_frequency
print('using environment', environment)
print('qnn target', qnn_target, self.is_a_prime_external, self.qnn.is_a_prime_external)
# epsilon-greedy but deterministic or stochastic is a choice
def policy(self, state, mode='deterministic', deepQ=False):
explore = bool(np.random.choice([1, 0], p=[self.epsilon, 1 - self.epsilon]))
# print(explore, features, end="")
if mode == 'deterministic' and not explore:
if deepQ:
q = self.qnn.evaluate_all_actions(state)
# print(state, q)
return np.argmax(q.squeeze())#np.random.choice(np.argwhere(q == np.amax(q)).flatten())
if not deepQ:
raise ValueError('Option not defined')
# q = features.dot(w)
# return np.random.choice(np.argwhere(q == np.amax(q)).flatten())
elif explore:
# print('explore')
return self.env.action_space.sample()
def get_render(self):
return np.asarray(\
ImageOps.flip(\
self.env.render('rgb_array')\
.convert('L')\
.resize((self.reduced_width, self.reduced_height), \
Image.BILINEAR)))
def get_cnn_input_tensor_from_deque(self, pixel_state_deque):
return np.swapaxes(\
np.swapaxes(\
np.array(pixel_state_deque, ndmin=4),1,2),2,3)
def deepq_learning(self, num_iter=1000, max_steps=5000, max_learning_steps=np.inf, learning_rate=None, reset_replay_memory=False):
if learning_rate is None:
learning_rate = self.learning_rate
if reset_replay_memory:
self.qnn.replay_memory.clear()
# Show initial state, since algorithm is highly biased by the initial conditions
if self.statedim == 2:
# print('last w', self.w)
self.plot_deepQ_policy(mode='deterministic')
self.plot_deepQ_function()
prev_writeout = self.qnn.samples_count
prev_writeout_1 = self.qnn.samples_count
ref_learning_steps = self.qnn.training_steps_count
is_first = True
for it in range(num_iter):
if (self.qnn.training_steps_count - ref_learning_steps) > max_learning_steps:
print('done training, I\'m tired. I started by', ref_learning_steps)
break
self.total_train_episodes += 1
# episode = []
prev_state = self.env.reset()
count = 0
done = False
if self.from_pixels:
# running list of the last pixel states
pixel_state = deque(maxlen=self.repeat_action_times)
# fill initially
for _ in range(self.repeat_action_times):
pixel_state.append(self.get_render())
state_tensor = self.get_cnn_input_tensor_from_deque(pixel_state)
# choose first action
if not self.from_pixels:
prev_action = self.policy(np.array(prev_state).reshape((1,-1)),
mode=self.policy_mode,
deepQ=True)
if self.from_pixels:
prev_action = self.policy(state_tensor,
mode=self.policy_mode,
deepQ=True)
# run episode
while (not done):
if self.from_pixels:
state_prev_tensor = state_tensor
pixel_state_prev = copy.copy(pixel_state) # shallow copy
if count > max_steps:
self.episode_lengths.append(count)
break
for _ in range(self.repeat_action_times):
count += 1
state, reward, done, info = self.env.step(prev_action)
if self.from_pixels:
pixel_state.append(self.get_render())
if done: break
if self.from_pixels:
state_tensor = self.get_cnn_input_tensor_from_deque(pixel_state)
if not self.from_pixels:
action = self.policy(np.array(state).reshape((1,-1)), mode=self.policy_mode, deepQ=True)
if self.from_pixels:
action = self.policy(state_tensor,
mode=self.policy_mode,
deepQ=True)
# action = self.policy(state, mode=self.policy_mode, deepQ=True)
# episode.append((state, action, reward))
# evaluation alone, to test a neural network
if not self.is_a_prime_external:
# Q learning
if not self.from_pixels:
self.qnn.train_batch(prev_state.reshape(1,-1),
np.array(prev_action).reshape(-1),
np.array(reward).reshape(-1),
state.reshape(1,-1),
done,
learning_rate=learning_rate,
train_frequency=self.train_frequency)
if self.from_pixels:
self.qnn.train_batch(state_prev_tensor,
np.array(prev_action).reshape(-1),
np.array(reward).reshape(-1),
state_tensor,
done,
learning_rate=learning_rate,
train_frequency=self.train_frequency)
# self.qnn.train_batch()
else:
# SARSA (not converging)
raise ValueError('Option not defined')
# self.qnn.train_batch(prev_state.reshape(1,-1), np.array(prev_action).reshape(-1), np.array(reward).reshape(-1), state.reshape(1,-1), np.array(action).reshape(-1))
prev_state = state
prev_action = action
if (done):
self.episode_lengths.append(count)
# decrease exploration
if self.update_epsilon:
if self.epsilon > 0.1:
self.epsilon -= (self.init_epsilon - self.end_epsilon)*(1./self.exploration_decrease_length)
# if (it + 1) % 5 == 0:
if self.qnn.training_steps_count > 0 or is_first:
if (self.qnn.samples_count - prev_writeout) > 1e4/self.train_frequency:
prev_writeout = self.qnn.samples_count
print("Episode %d" % (it), "total samples", self.qnn.samples_count, "train steps", self.qnn.training_steps_count)
if (done): print("Length %d" % (self.episode_lengths[-1]))
# if (it + 1) % 100 == 0:
if (self.qnn.samples_count - prev_writeout_1) > 1e5/self.train_frequency:
prev_writeout_1 = self.qnn.samples_count
print("exploration ", self.epsilon)
self.plot_training()
test_runs = [self.run_test_episode(limit=self.max_test_length) for _ in range(self.test_runs_to_average)]
self.test_lengths.append(np.mean(test_runs))
# self.test_lengths_std.append(np.std(test_runs))
self.test_lengths_std.append( (- np.min(test_runs) + self.test_lengths[-1], np.max(test_runs) - self.test_lengths[-1]) )
self.test_its.append(self.total_train_episodes)
self.plot_testing()
if self.statedim == 2:
# print('last w', self.w)
self.plot_deepQ_policy(mode='deterministic')
self.plot_deepQ_function()
is_first = False
self.plot_replay_memory_2d_state_histogramm()
def run_test_episode(self, enable_render=False, limit=5000):
save_epsilon = self.epsilon
self.epsilon = 0.
episode_length = 0.
state = self.env.reset()
if self.from_pixels:
# running list of the last pixel states
pixel_state = deque(maxlen=self.repeat_action_times)
# fill initially
for _ in range(self.repeat_action_times):
pixel_state.append(self.get_render())
done = False
while (not done):
if episode_length > limit:
self.epsilon = save_epsilon
return episode_length
if not self.from_pixels:
action = self.policy(np.array(state).reshape((1,-1)), mode=self.policy_mode, deepQ=True)
if self.from_pixels:
action = self.policy(self.get_cnn_input_tensor_from_deque(pixel_state),
mode=self.policy_mode,
deepQ=True)
for _ in range(self.repeat_action_times):
episode_length += 1
state, _, done, _ = self.env.step(action)
if self.from_pixels:
pixel_state.append(self.get_render())
if done: break
if enable_render: self.env.render()
# if count > self.max_episode_length: break;
if enable_render: print("This episode took {} steps".format(count))
self.epsilon = save_epsilon
return episode_length
def plot_deepQ_function(self):
if self.from_pixels:
print("plot test. May burn!!")
obs_low = self.env.observation_space.low
obs_high = self.env.observation_space.high
# values to evaluate policy at
x_range = np.linspace(obs_low[0], obs_high[0], self.plot_resolution)
v_range = np.linspace(obs_low[1], obs_high[1], self.plot_resolution)
states = []
# the second index will change faster when doing np.reshape
# this fits with the raw-wise change of X in np.meshgrid
for state2 in v_range:
for state1 in x_range:
if not self.from_pixels:
states.append((state1, state2))
if self.from_pixels:
states.append(self.get_approx_pixel_state_from_state((state1,state2)).squeeze())
states = np.array(states)
deepQ_all = self.qnn.evaluate_all_actions(states)
print('statesshape', states.shape)
print('deepQshape', deepQ_all.shape)
for action in range(self.num_actions):
print('plotting the evaluated deepQ-function for action {}'.format(action))
# get values in a grid
q_func = np.reshape(deepQ_all[:,action], (v_range.shape[0], x_range.shape[0]))
print("")
fig = plt.figure()
ax = fig.add_subplot(111)
X, Y = np.meshgrid(x_range, v_range)
# ax.plot_surface(X, Y, q_func, rstride=1, cstride=1, cmap=cm.jet, linewidth=0.1, antialiased=True)
im = ax.pcolormesh(X, Y, q_func)
fig.colorbar(im)
ax.set_xlabel("x")
ax.set_ylabel("v")
# ax.set_zlabel("negative value")
plt.show()
# plotting Q*
print('plotting the evaluated deepQ-function star (optimal)')
# get values in a grid
q_func = np.reshape(np.max(deepQ_all, axis=1), (v_range.shape[0], x_range.shape[0]))
print("")
fig = plt.figure()
ax = fig.add_subplot(111)
X, Y = np.meshgrid(x_range, v_range)
# ax.plot_surface(X, Y, q_func, rstride=1, cstride=1, cmap=cm.jet, linewidth=0.1, antialiased=True)
im = ax.pcolormesh(X, Y, q_func)
fig.colorbar(im)
ax.set_xlabel("x")
ax.set_ylabel("v")
# ax.set_zlabel("negative value")
plt.show()
def get_approx_pixel_state_from_state(self, state):
"""
state should be a 1-D array
"""
np_state = np.array(state)
self.env.reset()
self.env.state = np_state
# print('---------------------------')
# print(self.env.state)
pixel_state = []
pixel_state.append(self.get_render())
for _ in range(self.repeat_action_times -1):
state, reward, done, info = self.env.step(1)
# print(state)
pixel_state.append(self.get_render())
# print('---------------------------')
return self.get_cnn_input_tensor_from_deque(pixel_state)
def plot_deepQ_policy(self, mode='deterministic'):
if self.from_pixels:
print("plot experiment. Watch out!")
resolution = self.plot_resolution
# backup of value
save_epsilon = self.epsilon
self.epsilon = 0.0 # no exploration
obs_low = self.env.observation_space.low
obs_high = self.env.observation_space.high
# values to evaluate policy at
x_range = np.linspace(obs_low[0], obs_high[0], resolution)
v_range = np.linspace(obs_low[1], obs_high[1], resolution)
# get actions in a grid
greedy_policy = np.zeros((resolution, resolution))
for i, x in enumerate(x_range):
for j, v in enumerate(v_range):
# print(np.argmax(self.get_features((x,v)).dot(self.theta)), end="")
if not self.from_pixels:
greedy_policy[i, j] = self.policy(np.array((x, v)).reshape((1,-1)), mode, deepQ=True)
if self.from_pixels:
greedy_policy[i, j] = self.policy(self.get_approx_pixel_state_from_state((x,v)), mode, deepQ=True)
print("")
# plot policy
fig = plt.figure()
plt.imshow(greedy_policy,
cmap=plt.get_cmap('gray'),
interpolation='none',
extent=[obs_low[1], obs_high[1], obs_high[0], obs_low[0]],
aspect="auto")
plt.xlabel("velocity")
plt.ylabel("position")
plt.show()
# restore value
self.epsilon = save_epsilon
def plot_training(self):
if any(np.array(np.array(self.episode_lengths) > 0).flatten()):
fig = plt.figure()
if len(self.episode_lengths) > 1000:
plt.plot(np.arange(len(self.episode_lengths))[range(0,len(self.episode_lengths),10)],
np.array(self.episode_lengths)[range(0,len(self.episode_lengths),10)],
'.', linewidth=0)
else:
plt.plot(self.episode_lengths, '.', linewidth=0)
plt.yscale('log')
plt.xlabel("episodes")
plt.ylabel("timesteps")
plt.show()
def plot_testing(self):
if any(np.array(np.array(self.test_lengths) > 0).flatten()):
fig = plt.figure()
if len(self.test_lengths) > 1000:
# plt.plot(np.convolve(self.test_lengths, np.ones(10)/10, mode='same'), '.', linewidth=0)
plt.plot(np.convolve(self.test_lengths, np.ones(10)/10, mode='same'), '.', linewidth=0)
else:
# plt.plot(self.test_its, self.test_lengths, '.', linewidth=0)
plt.errorbar(self.test_its, self.test_lengths, yerr=np.transpose(self.test_lengths_std), fmt='.')#, linewidth=0)
plt.yscale('log')
plt.xlabel("test episodes")
plt.ylabel("timesteps")
plt.show()
def plot_replay_memory_2d_state_histogramm(self):
if self.from_pixels:
print("plot not available. Move on.")
return
if self.statedim == 2:
rm=np.array(self.qnn.replay_memory)
states, _,_,_,_,_ = zip(*rm)
states_np = np.array(states)
states_np = np.squeeze(states_np)
x,v = zip(*states_np)
plt.hist2d(x, v, bins=40, norm=LogNorm())
plt.xlabel("position")
plt.ylabel("velocity")
plt.colorbar()
plt.show()
|
febert/DeepRL
|
q_learning_sarsa/dqn_learning.py
|
Python
|
gpl-3.0
| 22,540
|
import re
from utilRegex import database
class regex:
def __init__(self, botCfg):
"""class initialization function
"""
#intitialize database variables
self.db = database()
#initialize regex variables
self.phrase = ''
self.url = ''
#initialize status variables
self.phraseReady = False
self.urlReady = False
#load database configuration settings
self.db.loadConfig(botCfg)
def buildPhrase(self):
"""compile phrase regex object
builds a regex object that includes
existing phrases from the phrase table
in the database.
returns:
success: True
failure: False
"""
#initialize function
self.phraseReady = False
#open database connection
try:
self.db.connect()
except:
print 'utilRegex/regex.buildPhrase: failed to connect to database.'
return False
#pull records from database
try:
self.db.cursor.execute('SELECT *' + \
' FROM phraseprint()' + \
' f(id bigint, phrase text, username text)')
records = self.db.cursor.fetchall()
except:
print 'utilRegex/regex.buildPhrase: failed to retrieve records from database.'
self.db.cursor.close()
self.db.disconnect()
return False
#close database connection
self.db.cursor.close()
self.db.disconnect()
#build pattern string
if len(records) > 0: #only build the string if records are present
pattern = ''.join(['%s|' % (re.escape(record[1])) for record in records])
pattern = pattern[:-1]
else: #otherwise a placeholder (literally xD)
pattern = re.escape('a placeholder')
pattern = r'(^|\s|[a-z]-)(%s)+([a-z]{1,4})?(\'[a-z]{1,4})?(\s|\.|,|\?|\!|$)' % (pattern)
#compile the regex object
self.phrase = re.compile(pattern, re.IGNORECASE)
#exit the function
self.phraseReady = True
return True
|
stickybath/BetaMaleBot
|
src/utilRegex/regex.py
|
Python
|
gpl-3.0
| 2,247
|
from django.core.management.base import BaseCommand
from django.db.models import Q
from ajapaik.ajapaik.models import Album
class Command(BaseCommand):
help = 'Connects to TartuNLP API and retrieves neuro machine translations for empty name fields'
def handle(self, *args, **options):
albums = Album.objects.exclude(
Q(atype=Album.AUTO) |
Q(name_original_language__isnull=False) |
Q(atype=Album.PERSON) |
Q(atype=Album.COLLECTION)
).filter(
Q(name_et__isnull=False)
| Q(name_lv__isnull=False)
| Q(name_lt__isnull=False)
| Q(name_fi__isnull=False)
| Q(name_ru__isnull=False)
| Q(name_de__isnull=False)
| Q(name_en__isnull=False)
)
for each in albums:
print(f'Processing Album {each.pk}')
each: Album
each.fill_untranslated_fields()
|
Ajapaik/ajapaik-web
|
ajapaik/ajapaik/management/commands/tartunlp_on_all_albums.py
|
Python
|
gpl-3.0
| 963
|
#!/usr/bin/env python
# Wheat price prediction using Baysian classification.
# Version 1.0
# Christophe Foyer - 2016
from xlrd import open_workbook
import random
import math
#set filename:
filename = 'Wheat-price-data.xlsx'
#import wheat price data (will automate downloading later, probably a different script that writes to the excel file)
def importExcel(filename):
#this function is a very ugly, and not that effecient. but it should work...
excel = open_workbook(filename)
#extract data from excel sheet
for sheet in excel.sheets():
number_of_rows = sheet.nrows
number_of_columns = sheet.ncols
dataset = [[0.0 for x in range(number_of_columns + 3)] for y in range(number_of_rows)]
date = []
date_string = []
price = []
rows = []
for row in range(1, number_of_rows):
#excel stores dates as the number of days since 1900-Jan-0 (not sure if that means january 1st or december 31st but that won't matter much in our case)
#new method: substract number of days in year until negative
date_string = str(sheet.cell(row,0).value)
days = float(date_string)
dataset[row-1][0] = float(days)
[dataset[row-1][1], dataset[row-1][2], dataset[row-1][3]] = excelDate(days)
value = (sheet.cell(row,1).value)
try:
value = str(int(value))
dataset[row-1][4] = float(value)
except ValueError:
pass
finally:
dataset[row-1][4] = round(float(value)/10,0)*10
#now the rest of the data
for col in range(2, number_of_columns):
value = (sheet.cell(row,col).value)
try:
dataset[row-1][col + 3] = float(value)
except ValueError:
pass
#now all the data should be accessible from the "dataset" array
del dataset[-1]
#print dataset
return dataset
def excelDate(days):
month_day_count = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
leap_years = [1900, 1904, 1908, 1912, 1916, 1920, 1924, 1928, 1932, 1936, 1940, 1944, 1948, 1952, 1956, 1960, 1964, 1968, 1972, 1976, 1980, 1984, 1988, 1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020, 2024, 2028, 2032, 2036, 2040, 2044, 2048, 2052, 2056, 2060, 2064, 2068, 2072, 2076, 2080, 2084, 2088, 2092, 2096]
i = 0
leap = 0
#this will find how many years and how many leftover days for that year
while days >= (365 + leap):
leap = 0
if i + 1900 in leap_years:
leap = 1
days = days - 365 - leap
i = i + 1
year = i
#now find the month and leftover days given leftover days
month = 1
for i in range(1, 12):
if (year + 1900 in leap_years) and (i == 2):
leap = 1
else:
leap = 0
if days <= (month_day_count[i-1] + leap):
break
else:
days = days - month_day_count[i-1] - leap
month = i + 1
#now we should have the exact date seperated in day, month and year
return [year, month, days]
def splitDataset(dataset, splitRatio):
trainSize = int(len(dataset) * splitRatio)
trainSet = []
copy = list(dataset)
while len(trainSet) < trainSize:
index = random.randrange(len(copy))
trainSet.append(copy.pop(index))
return [trainSet, copy]
def separateByClass(dataset):
separated = {}
for i in range(len(dataset)):
vector = dataset[i]
if (vector[4] not in separated):
separated[vector[4]] = []
separated[vector[4]].append(vector)
return separated
def mean(numbers):
return sum(numbers)/float(len(numbers))
def stdev(numbers):
if len(numbers) > 1:
avg = mean(numbers)
variance = sum([pow(x-avg,2) for x in numbers])/float(len(numbers)-1)
return math.sqrt(variance)
else:
return 0
def summarize(dataset):
summaries = [(mean(attribute), stdev(attribute)) for attribute in zip(*dataset)]
del summaries[4]
#print summaries
return summaries
def summarizeByClass(dataset):
separated = separateByClass(dataset)
print separated
summaries = {}
for classValue, instances in separated.iteritems():
summaries[classValue] = summarize(instances)
return summaries
def calculateProbability(x, mean, stdev):
if stdev !=0:
exponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))
return (1 / (math.sqrt(2*math.pi) * stdev)) * exponent
else:
return 1
def calculateClassProbabilities(summaries, inputVector):
probabilities = {}
for classValue, classSummaries in summaries.iteritems():
probabilities[classValue] = 1
for i in range(len(classSummaries)):
mean, stdev = classSummaries[i]
x = inputVector[i]
probabilities[classValue] *= calculateProbability(x, mean, stdev)
return probabilities
def predict(summaries, inputVector):
probabilities = calculateClassProbabilities(summaries, inputVector)
bestLabel, bestProb = None, -1
for classValue, probability in probabilities.iteritems():
if bestLabel is None or probability > bestProb:
bestProb = probability
bestLabel = classValue
return bestLabel
def getPredictions(summaries, testSet):
predictions = []
for i in range(len(testSet)):
result = predict(summaries, testSet[i])
predictions.append(result)
return predictions
def getAccuracy(testSet, predictions):
correct = 0
for i in range(len(testSet)):
if testSet[i][4] == predictions[i]:
correct += 1
return (correct/float(len(testSet))) * 100.0
def reorganizeData(dataset):
reorganizedData = [["unknown"] for x in range(len(dataset))]
for i in range(len(dataset)):
for j in range(1, int(dataset[i][0]-min([l[0] for l in dataset]))):
reorganizedData[i][0] = dataset[i][4]
if (dataset[i][0]-j) in ([l[0] for l in dataset]):
index = [l[0] for l in dataset].index(dataset[i][0]-j)
for k in range(0, len(dataset[index])):
reorganizedData[i].append(dataset[index][k])
else:
for k in range(0, len(dataset[i])):
reorganizedData[i].append("unknown")
return reorganizedData
def main():
splitRatio = 0.67
dataset = importExcel(filename)
#reorganise data to include past days
dataset = reorganizeData(dataset)
print dataset
print('Loaded data file {0} with {1} rows').format(filename, len(dataset))
trainingSet, testSet = splitDataset(dataset, splitRatio)
print('Split {0} rows into train={1} and test={2} rows').format(len(dataset), len(trainingSet), len(testSet))
# prepare model
summaries = summarizeByClass(trainingSet)
# test model
predictions = getPredictions(summaries, testSet)
accuracy = getAccuracy(testSet, predictions)
print('Accuracy: {0}%').format(accuracy)
main()
|
Christophe-Foyer/Naive_Bayes_Price_Prediction
|
Old files and backups/Naive Bayes Classifier - Copie.py
|
Python
|
gpl-3.0
| 7,147
|
""" A SpriteSheet is the overall collection of individual frames (which may be spread across files) that define one layer of the final sprite.
"""
from models.sprite_action import SpriteAction
class SpriteSheet():
def __init__( self, data, group_name ):
if "file_path" not in data:
raise "file_path element missing in layer. Unable to load .spec file if we don't know which sheet you mean"
self.file_path = data.get( "file_path" )
self.group_name = group_name
self.name = data.get( "name", "Unnamed Layer" )
self.layer = data.get( "layer", "Unspecified Layer" )
self.credit_name = data.get( "credit_name", "Unknown Artist" )
self.credit_url = data.get( "credit_url", "" )
self.license = data.get( "license", "Not specified (do not use this artwork without explicit written permission from the artist!)" )
self.actions = {}
avail_actions = data.get( "actions", [] )
for action_data in avail_actions:
new_action = SpriteAction( action_data )
self.actions[new_action.name] = new_action
|
xaroth8088/sprite-magic
|
models/sprite_sheet.py
|
Python
|
gpl-3.0
| 1,116
|
from pupa.scrape import Jurisdiction, Organization
from .people import IDPersonScraper
from .committees import IDCommitteeScraper
from .bills import IDBillScraper
class Idaho(Jurisdiction):
"""
IDAHO Scraper
"""
division_id = "ocd-division/country:us/state:id"
classification = "government"
name = "Idaho"
url = "http://www.legislature.idaho.gov"
scrapers = {
'people': IDPersonScraper,
'committees': IDCommitteeScraper,
'bills': IDBillScraper
}
parties = [
{'name': 'Republican'},
{'name': 'Democratic'}
]
legislative_sessions = [
{
"_scraped_name": "2011 Session",
"classification": "primary",
"end_date": "2011-04-07",
"identifier": "2011",
"name": "61st Legislature, 1st Regular Session (2011)",
"start_date": "2011-01-10"
},
{
"_scraped_name": "2012 Session",
"classification": "primary",
"identifier": "2012",
"name": "61st Legislature, 2nd Regular Session (2012)"
},
{
"_scraped_name": "2013 Session",
"classification": "primary",
"identifier": "2013",
"name": "62nd Legislature, 1st Regular Session (2013)"
},
{
"_scraped_name": "2014 Session",
"classification": "primary",
"identifier": "2014",
"name": "63nd Legislature, 1st Regular Session (2014)"
},
{
"_scraped_name": "2015 Session",
"classification": "primary",
"end_date": "2015-04-10",
"identifier": "2015",
"name": "64th Legislature, 1st Regular Session (2015)",
"start_date": "2015-01-12"
},
{
"_scraped_name": "2015 Extraordinary Session",
"classification": "special",
"end_date": "2015-05-18",
"identifier": "2015spcl",
"name": "65th Legislature, 1st Extraordinary Session (2015)",
"start_date": "2015-05-18"
},
{
"_scraped_name": "2016 Session",
"classification": "primary",
"end_date": "2016-03-25",
"identifier": "2016",
"name": "63rd Legislature, 2nd Regular Session (2016)",
"start_date": "2016-01-11"
},
{
"_scraped_name": "2017 Session",
"classification": "primary",
"end_date": "2017-04-07",
"identifier": "2017",
"name": "64th Legislature, 1st Regular Session (2017)",
"start_date": "2017-01-09"
}
]
ignored_scraped_sessions = [
"2010 Session",
"2009 Session",
"2008 Session",
"2007 Session",
"2006 Extraordinary Session",
"2006 Session",
"2005 Session",
"2004 Session",
"2003 Session",
"2002 Session",
"2001 Session",
"2000 Extraordinary Session",
"2000 Session",
"1999 Session",
"1998 Session"
]
def get_organizations(self):
legislature_name = "Idaho State Legislature"
lower_chamber_name = "House"
lower_seats = 35
lower_title = "Representative"
upper_chamber_name = "Senate"
upper_seats = 35
upper_title = "Senator"
legislature = Organization(name=legislature_name,
classification="legislature")
upper = Organization(upper_chamber_name, classification='upper',
parent_id=legislature._id)
lower = Organization(lower_chamber_name, classification='lower',
parent_id=legislature._id)
for n in range(1, upper_seats+1):
upper.add_post(
label=str(n), role=upper_title,
division_id='{}/sldu:{}'.format(self.division_id, n))
for n in range(1, lower_seats+1):
lower.add_post(
label=str(n), role=lower_title,
division_id='{}/sldl:{}'.format(self.division_id, n))
yield legislature
yield upper
yield lower
|
cliftonmcintosh/openstates
|
openstates/id/__init__.py
|
Python
|
gpl-3.0
| 4,242
|
#coding: UTF-8
'''This script would guide the seafile admin to setup seafile with MySQL'''
import sys
import os
import time
import re
import shutil
import glob
import subprocess
import hashlib
import getpass
import uuid
import warnings
import MySQLdb
from ConfigParser import ConfigParser
try:
import readline # pylint: disable=W0611
except ImportError:
pass
SERVER_MANUAL_HTTP = 'https://github.com/haiwen/seafile/wiki'
class Utils(object):
'''Groups all helper functions here'''
@staticmethod
def welcome():
'''Show welcome message'''
welcome_msg = '''\
-----------------------------------------------------------------
This script will guide you to setup your seafile server using MySQL.
Make sure you have read seafile server manual at
%s
Press ENTER to continue
-----------------------------------------------------------------''' % SERVER_MANUAL_HTTP
print welcome_msg
raw_input()
@staticmethod
def highlight(content):
'''Add ANSI color to content to get it highlighted on terminal'''
return '\x1b[33m%s\x1b[m' % content
@staticmethod
def info(msg):
print msg
@staticmethod
def error(msg):
'''Print error and exit'''
print
print 'Error: ' + msg
sys.exit(1)
@staticmethod
def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
'''Run a program and wait it to finish, and return its exit code. The
standard output of this program is supressed.
'''
with open(os.devnull, 'w') as devnull:
if suppress_stdout:
stdout = devnull
else:
stdout = sys.stdout
if suppress_stderr:
stderr = devnull
else:
stderr = sys.stderr
proc = subprocess.Popen(argv,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env)
return proc.wait()
@staticmethod
def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
'''Like run_argv but specify a command line string instead of argv'''
with open(os.devnull, 'w') as devnull:
if suppress_stdout:
stdout = devnull
else:
stdout = sys.stdout
if suppress_stderr:
stderr = devnull
else:
stderr = sys.stderr
proc = subprocess.Popen(cmdline,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env,
shell=True)
return proc.wait()
@staticmethod
def prepend_env_value(name, value, env=None, seperator=':'):
'''prepend a new value to a list'''
if env is None:
env = os.environ
try:
current_value = env[name]
except KeyError:
current_value = ''
new_value = value
if current_value:
new_value += seperator + current_value
env[name] = new_value
@staticmethod
def must_mkdir(path):
'''Create a directory, exit on failure'''
try:
os.mkdir(path)
except OSError, e:
Utils.error('failed to create directory %s:%s' % (path, e))
@staticmethod
def must_copy(src, dst):
'''Copy src to dst, exit on failure'''
try:
shutil.copy(src, dst)
except Exception, e:
Utils.error('failed to copy %s to %s: %s' % (src, dst, e))
@staticmethod
def find_in_path(prog):
if 'win32' in sys.platform:
sep = ';'
else:
sep = ':'
dirs = os.environ['PATH'].split(sep)
for d in dirs:
d = d.strip()
if d == '':
continue
path = os.path.join(d, prog)
if os.path.exists(path):
return path
return None
@staticmethod
def get_python_executable():
'''Return the python executable. This should be the PYTHON environment
variable which is set in setup-seafile-mysql.sh
'''
return os.environ['PYTHON']
@staticmethod
def read_config(fn):
'''Return a case sensitive ConfigParser by reading the file "fn"'''
cp = ConfigParser()
cp.optionxform = str
cp.read(fn)
return cp
@staticmethod
def write_config(cp, fn):
'''Return a case sensitive ConfigParser by reading the file "fn"'''
with open(fn, 'w') as fp:
cp.write(fp)
@staticmethod
def ask_question(desc,
key=None,
note=None,
default=None,
validate=None,
yes_or_no=False,
password=False):
'''Ask a question, return the answer.
@desc description, e.g. "What is the port of ccnet?"
@key a name to represent the target of the question, e.g. "port for
ccnet server"
@note additional information for the question, e.g. "Must be a valid
port number"
@default the default value of the question. If the default value is
not None, when the user enter nothing and press [ENTER], the default
value would be returned
@validate a function that takes the user input as the only parameter
and validate it. It should return a validated value, or throws an
"InvalidAnswer" exception if the input is not valid.
@yes_or_no If true, the user must answer "yes" or "no", and a boolean
value would be returned
@password If true, the user input would not be echoed to the
console
'''
assert key or yes_or_no
# Format description
print
if note:
desc += '\n' + note
desc += '\n'
if yes_or_no:
desc += '[ yes or no ]'
else:
if default:
desc += '[ default "%s" ]' % default
else:
desc += '[ %s ]' % key
desc += ' '
while True:
# prompt for user input
if password:
answer = getpass.getpass(desc).strip()
else:
answer = raw_input(desc).strip()
# No user input: use default
if not answer:
if default:
answer = default
else:
continue
# Have user input: validate answer
if yes_or_no:
if answer not in ['yes', 'no']:
print Utils.highlight('\nPlease answer yes or no\n')
continue
else:
return answer == 'yes'
else:
if validate:
try:
return validate(answer)
except InvalidAnswer, e:
print Utils.highlight('\n%s\n' % e)
continue
else:
return answer
@staticmethod
def validate_port(port):
try:
port = int(port)
except ValueError:
raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port))
if port <= 0 or port > 65535:
raise InvalidAnswer('%s is not a valid port' % Utils.highlight(port))
return port
class InvalidAnswer(Exception):
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __str__(self):
return self.msg
### END of Utils
####################
class EnvManager(object):
'''System environment and directory layout'''
def __init__(self):
self.install_path = os.path.dirname(os.path.abspath(__file__))
self.top_dir = os.path.dirname(self.install_path)
self.bin_dir = os.path.join(self.install_path, 'seafile', 'bin')
def check_pre_condiction(self):
def error_if_not_exists(path):
if not os.path.exists(path):
Utils.error('"%s" not found' % path)
paths = [
os.path.join(self.install_path, 'seafile'),
os.path.join(self.install_path, 'seahub'),
os.path.join(self.install_path, 'runtime'),
]
for path in paths:
error_if_not_exists(path)
if os.path.exists(ccnet_config.ccnet_dir):
Utils.error('Ccnet config dir \"%s\" already exists.' % ccnet_config.ccnet_dir)
def get_seahub_env(self):
'''Prepare for seahub syncdb'''
env = dict(os.environ)
env['CCNET_CONF_DIR'] = ccnet_config.ccnet_dir
env['SEAFILE_CONF_DIR'] = seafile_config.seafile_dir
self.setup_python_path(env)
return env
def setup_python_path(self, env):
'''And PYTHONPATH and CCNET_CONF_DIR/SEAFILE_CONF_DIR to env, which is
needed by seahub
'''
install_path = self.install_path
pro_pylibs_dir = os.path.join(install_path, 'pro', 'python')
extra_python_path = [
pro_pylibs_dir,
os.path.join(install_path, 'seahub', 'thirdpart'),
os.path.join(install_path, 'seafile/lib/python2.6/site-packages'),
os.path.join(install_path, 'seafile/lib64/python2.6/site-packages'),
os.path.join(install_path, 'seafile/lib/python2.7/site-packages'),
os.path.join(install_path, 'seafile/lib64/python2.7/site-packages'),
]
for path in extra_python_path:
Utils.prepend_env_value('PYTHONPATH', path, env=env)
def get_binary_env(self):
'''Set LD_LIBRARY_PATH for seafile server executables'''
env = dict(os.environ)
lib_dir = os.path.join(self.install_path, 'seafile', 'lib')
lib64_dir = os.path.join(self.install_path, 'seafile', 'lib64')
Utils.prepend_env_value('LD_LIBRARY_PATH', lib_dir, env=env)
Utils.prepend_env_value('LD_LIBRARY_PATH', lib64_dir, env=env)
return env
class AbstractConfigurator(object):
'''Abstract Base class for ccnet/seafile/seahub/db configurator'''
def __init__(self):
pass
def ask_questions(self):
raise NotImplementedError
def generate(self):
raise NotImplementedError
class AbstractDBConfigurator(AbstractConfigurator):
'''Abstract class for database related configuration'''
def __init__(self):
AbstractConfigurator.__init__(self)
self.mysql_host = 'localhost'
self.mysql_port = 3306
self.use_existing_db = False
self.seafile_mysql_user = ''
self.seafile_mysql_password = ''
self.ccnet_db_name = ''
self.seafile_db_name = ''
self.seahub_db_name = ''
self.seahub_admin_email = ''
self.seahub_admin_password = ''
@staticmethod
def ask_use_existing_db():
def validate(choice):
if choice not in ['1', '2']:
raise InvalidAnswer('Please choose 1 or 2')
return choice == '2'
question = '''\
-------------------------------------------------------
Please choose a way to initialize seafile databases:
-------------------------------------------------------
'''
note = '''\
[1] Create new ccnet/seafile/seahub databases
[2] Use existing ccnet/seafile/seahub databases
'''
return Utils.ask_question(question,
key='1 or 2',
note=note,
validate=validate)
def ask_mysql_host_port(self):
def validate(host):
if not re.match(r'^[a-zA-Z0-9_\-\.]+$', host):
raise InvalidAnswer('%s is not a valid host' % Utils.highlight(host))
if host == 'localhost':
host = '127.0.0.1'
question = 'What is the port of mysql server?'
key = 'mysql server port'
default = '3306'
port = Utils.ask_question(question,
key=key,
default=default,
validate=Utils.validate_port)
# self.check_mysql_server(host, port)
self.mysql_port = port
return host
question = 'What is the host of mysql server?'
key = 'mysql server host'
default = 'localhost'
self.mysql_host = Utils.ask_question(question,
key=key,
default=default,
validate=validate)
def check_mysql_server(self, host, port):
print '\nverifying mysql server running ... ',
try:
dummy = MySQLdb.connect(host=host, port=port)
except Exception:
print
raise InvalidAnswer('Failed to connect to mysql server at "%s:%s"' \
% (host, port))
print 'done'
def check_mysql_user(self, user, password):
print '\nverifying password of user %s ... ' % user,
kwargs = dict(host=self.mysql_host,
port=self.mysql_port,
user=user,
passwd=password)
try:
conn = MySQLdb.connect(**kwargs)
except Exception, e:
if isinstance(e, MySQLdb.OperationalError):
raise InvalidAnswer('Failed to connect to mysql server using user "%s" and password "***": %s' \
% (user, e.args[1]))
else:
raise InvalidAnswer('Failed to connect to mysql server using user "%s" and password "***": %s' \
% (user, e))
print 'done'
return conn
def create_seahub_admin(self):
try:
conn = MySQLdb.connect(host=self.mysql_host,
port=self.mysql_port,
user=self.seafile_mysql_user,
passwd=self.seafile_mysql_password,
db=self.ccnet_db_name)
except Exception, e:
if isinstance(e, MySQLdb.OperationalError):
Utils.error('Failed to connect to mysql database %s: %s' % (self.ccnet_db_name, e.args[1]))
else:
Utils.error('Failed to connect to mysql database %s: %s' % (self.ccnet_db_name, e))
cursor = conn.cursor()
sql = '''\
CREATE TABLE IF NOT EXISTS EmailUser (id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, email VARCHAR(255), passwd CHAR(64), is_staff BOOL NOT NULL, is_active BOOL NOT NULL, ctime BIGINT, UNIQUE INDEX (email)) ENGINE=INNODB'''
try:
cursor.execute(sql)
except Exception, e:
if isinstance(e, MySQLdb.OperationalError):
Utils.error('Failed to create ccnet user table: %s' % e.args[1])
else:
Utils.error('Failed to create ccnet user table: %s' % e)
sql = '''REPLACE INTO EmailUser(email, passwd, is_staff, is_active, ctime) VALUES ('%s', '%s', 1, 1, 0)''' \
% (seahub_config.admin_email, seahub_config.hashed_admin_password())
try:
cursor.execute(sql)
except Exception, e:
if isinstance(e, MySQLdb.OperationalError):
Utils.error('Failed to create admin user: %s' % e.args[1])
else:
Utils.error('Failed to create admin user: %s' % e)
conn.commit()
def ask_questions(self):
'''Ask questions and do database operations'''
raise NotImplementedError
class NewDBConfigurator(AbstractDBConfigurator):
'''Handles the case of creating new mysql databases for ccnet/seafile/seahub'''
def __init__(self):
AbstractDBConfigurator.__init__(self)
self.root_password = ''
self.root_conn = ''
def ask_questions(self):
self.ask_mysql_host_port()
self.ask_root_password()
self.ask_seafile_mysql_user_password()
self.ask_db_names()
def generate(self):
if not self.mysql_user_exists(self.seafile_mysql_user):
self.create_user()
self.create_databases()
def ask_root_password(self):
def validate(password):
self.root_conn = self.check_mysql_user('root', password)
return password
question = 'What is the password of the mysql root user?'
key = 'root password'
self.root_password = Utils.ask_question(question,
key=key,
validate=validate,
password=True)
def mysql_user_exists(self, user):
cursor = self.root_conn.cursor()
sql = '''SELECT EXISTS(SELECT 1 FROM mysql.user WHERE user = '%s')''' % user
try:
cursor.execute(sql)
return cursor.fetchall()[0][0]
except Exception, e:
if isinstance(e, MySQLdb.OperationalError):
Utils.error('Failed to check mysql user %s: %s' % (user, e.args[1]))
else:
Utils.error('Failed to check mysql user %s: %s' % (user, e))
finally:
cursor.close()
def ask_seafile_mysql_user_password(self):
def validate(user):
if user == 'root':
self.seafile_mysql_password = self.root_password
else:
question = 'Enter the password for mysql user "%s":' % Utils.highlight(user)
key = 'password for %s' % user
password = Utils.ask_question(question, key=key, password=True)
# If the user already exists, check the password here
if self.mysql_user_exists(user):
self.check_mysql_user(user, password)
self.seafile_mysql_password = password
return user
question = 'Enter the name for mysql user of seafile. It would be created if not exists.'
key = 'mysql user for seafile'
default = 'root'
self.seafile_mysql_user = Utils.ask_question(question,
key=key,
default=default,
validate=validate)
def ask_db_name(self, program, default):
question = 'Enter the database name for %s:' % program
key = '%s database' % program
return Utils.ask_question(question,
key=key,
default=default,
validate=self.validate_db_name)
def ask_db_names(self):
self.ccnet_db_name = self.ask_db_name('ccnet-server', 'ccnet-db')
self.seafile_db_name = self.ask_db_name('seafile-server', 'seafile-db')
self.seahub_db_name = self.ask_db_name('seahub', 'seahub-db')
def validate_db_name(self, db_name):
return db_name
def create_user(self):
cursor = self.root_conn.cursor()
sql = '''CREATE USER '%s'@'localhost' IDENTIFIED BY '%s' ''' \
% (self.seafile_mysql_user, self.seafile_mysql_password)
try:
cursor.execute(sql)
except Exception, e:
if isinstance(e, MySQLdb.OperationalError):
Utils.error('Failed to create mysql user %s: %s' % (self.seafile_mysql_user, e.args[1]))
else:
Utils.error('Failed to create mysql user %s: %s' % (self.seafile_mysql_user, e))
finally:
cursor.close()
def create_db(self, db_name):
cursor = self.root_conn.cursor()
sql = '''CREATE DATABASE IF NOT EXISTS `%s` CHARACTER SET UTF8''' \
% db_name
try:
cursor.execute(sql)
except Exception, e:
if isinstance(e, MySQLdb.OperationalError):
Utils.error('Failed to create database %s: %s' % (db_name, e.args[1]))
else:
Utils.error('Failed to create database %s: %s' % (db_name, e))
finally:
cursor.close()
def grant_db_permission(self, db_name):
cursor = self.root_conn.cursor()
sql = '''GRANT ALL PRIVILEGES ON `%s`.* to `%s` ''' \
% (db_name, self.seafile_mysql_user)
try:
cursor.execute(sql)
except Exception, e:
if isinstance(e, MySQLdb.OperationalError):
Utils.error('Failed to grant permission of database %s: %s' % (db_name, e.args[1]))
else:
Utils.error('Failed to grant permission of database %s: %s' % (db_name, e))
finally:
cursor.close()
def create_databases(self):
self.create_db(self.ccnet_db_name)
self.create_db(self.seafile_db_name)
self.create_db(self.seahub_db_name)
if self.seafile_mysql_user != 'root':
self.grant_db_permission(self.ccnet_db_name)
self.grant_db_permission(self.seafile_db_name)
self.grant_db_permission(self.seahub_db_name)
class ExistingDBConfigurator(AbstractDBConfigurator):
'''Handles the case of use existing mysql databases for ccnet/seafile/seahub'''
def __init__(self):
AbstractDBConfigurator.__init__(self)
self.use_existing_db = True
def ask_questions(self):
self.ask_mysql_host_port()
self.ask_existing_mysql_user_password()
self.ccnet_db_name = self.ask_db_name('ccnet')
self.seafile_db_name = self.ask_db_name('seafile')
self.seahub_db_name = self.ask_db_name('seahub')
def generate(self):
pass
def ask_existing_mysql_user_password(self):
def validate(user):
question = 'What is the password for mysql user "%s"?' % Utils.highlight(user)
key = 'password for %s' % user
password = Utils.ask_question(question, key=key, password=True)
self.check_mysql_user(user, password)
self.seafile_mysql_password = password
return user
question = 'Which mysql user to use for seafile?'
key = 'mysql user for seafile'
self.seafile_mysql_user = Utils.ask_question(question,
key=key,
validate=validate)
def ask_db_name(self, program):
def validate(db_name):
if self.seafile_mysql_user != 'root':
self.check_user_db_access(db_name)
return db_name
question = 'Enter the existing database name for %s:' % program
key = '%s database' % program
return Utils.ask_question(question,
key=key,
validate=validate)
def check_user_db_access(self, db_name):
user = self.seafile_mysql_user
password = self.seafile_mysql_password
print '\nverifying user "%s" access to database %s ... ' % (user, db_name),
try:
conn = MySQLdb.connect(host=self.mysql_host,
port=self.mysql_port,
user=user,
passwd=password,
db=db_name)
except Exception, e:
if isinstance(e, MySQLdb.OperationalError):
raise InvalidAnswer('Failed to access database %s using user "%s" and password "***": %s' \
% (db_name, user, e.args[1]))
else:
raise InvalidAnswer('Failed to access database %s using user "%s" and password "***": %s' \
% (db_name, user, e))
print 'done'
return conn
class CcnetConfigurator(AbstractConfigurator):
SERVER_NAME_REGEX = r'^[a-zA-Z0-9_\-]{3,14}$'
SERVER_IP_OR_DOMAIN_REGEX = '^[^.].+\..+[^.]$'
def __init__(self):
'''Initialize default values of ccnet configuration'''
AbstractConfigurator.__init__(self)
self.ccnet_dir = os.path.join(env_mgr.top_dir, 'ccnet')
self.port = 10001
self.server_name = 'my-seafile'
self.ip_or_domain = None
def ask_questions(self):
self.ask_server_name()
self.ask_server_ip_or_domain()
self.ask_port()
def generate(self):
print 'Generating ccnet configuration ...\n'
ccnet_init = os.path.join(env_mgr.bin_dir, 'ccnet-init')
argv = [
ccnet_init,
'--config-dir', self.ccnet_dir,
'--name', self.server_name,
'--host', self.ip_or_domain,
'--port', str(self.port),
]
if Utils.run_argv(argv, env=env_mgr.get_binary_env()) != 0:
Utils.error('Failed to generate ccnet configuration')
time.sleep(1)
self.generate_db_conf()
def generate_db_conf(self):
ccnet_conf = os.path.join(self.ccnet_dir, 'ccnet.conf')
config = Utils.read_config(ccnet_conf)
# [Database]
# ENGINE=
# HOST=
# USER=
# PASSWD=
# DB=
db_section = 'Database'
if not config.has_section(db_section):
config.add_section(db_section)
config.set(db_section, 'ENGINE', 'mysql')
config.set(db_section, 'HOST', db_config.mysql_host)
config.set(db_section, 'PORT', db_config.mysql_port)
config.set(db_section, 'USER', db_config.seafile_mysql_user)
config.set(db_section, 'PASSWD', db_config.seafile_mysql_password)
config.set(db_section, 'DB', db_config.ccnet_db_name)
config.set(db_section, 'CONNECTION_CHARSET', 'utf8')
Utils.write_config(config, ccnet_conf)
def ask_server_name(self):
def validate(name):
if not re.match(self.SERVER_NAME_REGEX, name):
raise InvalidAnswer('%s is not a valid name' % Utils.highlight(name))
return name
question = 'What is the name of the server? It will be displayed on the client.'
key = 'server name'
note = '3 - 15 letters or digits'
self.server_name = Utils.ask_question(question,
key=key,
note=note,
validate=validate)
def ask_server_ip_or_domain(self):
def validate(ip_or_domain):
if not re.match(self.SERVER_IP_OR_DOMAIN_REGEX, ip_or_domain):
raise InvalidAnswer('%s is not a valid ip or domain' % ip_or_domain)
return ip_or_domain
question = 'What is the ip or domain of the server?'
key = 'This server\'s ip or domain'
note = 'For example: www.mycompany.com, 192.168.1.101'
self.ip_or_domain = Utils.ask_question(question,
key=key,
note=note,
validate=validate)
def ask_port(self):
def validate(port):
return Utils.validate_port(port)
question = 'Which port do you want to use for the ccnet server?'
key = 'ccnet server port'
default = 10001
self.port = Utils.ask_question(question,
key=key,
default=default,
validate=validate)
class SeafileConfigurator(AbstractConfigurator):
def __init__(self):
AbstractConfigurator.__init__(self)
self.seafile_dir = os.path.join(env_mgr.top_dir, 'seafile-data')
self.port = 12001
self.httpserver_port = 8082
def ask_questions(self):
self.ask_seafile_dir()
self.ask_port()
self.ask_httpserver_port()
def generate(self):
print 'Generating seafile configuration ...\n'
seafserv_init = os.path.join(env_mgr.bin_dir, 'seaf-server-init')
argv = [
seafserv_init,
'--seafile-dir', self.seafile_dir,
'--port', str(self.port),
'--httpserver-port', str(self.httpserver_port),
]
if Utils.run_argv(argv, env=env_mgr.get_binary_env()) != 0:
Utils.error('Failed to generate ccnet configuration')
time.sleep(1)
self.generate_db_conf()
self.write_seafile_ini()
print 'done'
def generate_db_conf(self):
seafile_conf = os.path.join(self.seafile_dir, 'seafile.conf')
config = Utils.read_config(seafile_conf)
# [database]
# type=
# host=
# user=
# password=
# db_name=
# unix_socket=
db_section = 'database'
if not config.has_section(db_section):
config.add_section(db_section)
config.set(db_section, 'type', 'mysql')
config.set(db_section, 'host', db_config.mysql_host)
config.set(db_section, 'port', db_config.mysql_port)
config.set(db_section, 'user', db_config.seafile_mysql_user)
config.set(db_section, 'password', db_config.seafile_mysql_password)
config.set(db_section, 'db_name', db_config.seafile_db_name)
config.set(db_section, 'connection_charset', 'utf8')
Utils.write_config(config, seafile_conf)
def ask_seafile_dir(self):
def validate(path):
if os.path.exists(path):
raise InvalidAnswer('%s already exists' % Utils.highlight(path))
return path
question = 'Where do you want to put your seafile data?'
key = 'seafile-data'
note = 'Please use a volume with enough free space'
default = os.path.join(env_mgr.top_dir, 'seafile-data')
self.seafile_dir = Utils.ask_question(question,
key=key,
note=note,
default=default,
validate=validate)
def ask_port(self):
def validate(port):
port = Utils.validate_port(port)
if port == ccnet_config.port:
raise InvalidAnswer('%s is used by ccnet server, choose another one' \
% Utils.highlight(port))
return port
question = 'Which port do you want to use for the seafile server?'
key = 'seafile server port'
default = 12001
self.port = Utils.ask_question(question,
key=key,
default=default,
validate=validate)
def ask_httpserver_port(self):
def validate(port):
port = Utils.validate_port(port)
if port == ccnet_config.port:
raise InvalidAnswer('%s is used by ccnet server, choose another one' \
% Utils.highlight(port))
if port == seafile_config.port:
raise InvalidAnswer('%s is used by seafile server, choose another one' \
% Utils.highlight(port))
return port
question = 'Which port do you want to use for the seafile httpserver?'
key = 'seafile httpserver port'
default = 8082
self.httpserver_port = Utils.ask_question(question,
key=key,
default=default,
validate=validate)
def write_seafile_ini(self):
seafile_ini = os.path.join(ccnet_config.ccnet_dir, 'seafile.ini')
with open(seafile_ini, 'w') as fp:
fp.write(self.seafile_dir)
class SeahubConfigurator(AbstractConfigurator):
def __init__(self):
AbstractConfigurator.__init__(self)
self.admin_email = ''
self.admin_password = ''
def hashed_admin_password(self):
return hashlib.sha1(self.admin_password).hexdigest() # pylint: disable=E1101
def ask_questions(self):
pass
# self.ask_admin_email()
# self.ask_admin_password()
def generate(self):
'''Generating seahub_settings.py'''
print 'Generating seahub configuration ...\n'
time.sleep(1)
seahub_settings_py = os.path.join(env_mgr.top_dir, 'seahub_settings.py')
with open(seahub_settings_py, 'w') as fp:
self.write_secret_key(fp)
self.write_database_config(fp)
def write_secret_key(self, fp):
text = 'SECRET_KEY = "%s"\n\n' % self.gen_secret_key()
fp.write(text)
def write_database_config(self, fp):
template = '''\
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': '%(name)s',
'USER': '%(username)s',
'PASSWORD': '%(password)s',
'HOST': '%(host)s',
'PORT': '%(port)s',
'OPTIONS': {
'init_command': 'SET storage_engine=INNODB',
}
}
}
'''
text = template % dict(name=db_config.seahub_db_name,
username=db_config.seafile_mysql_user,
password=db_config.seafile_mysql_password,
host=db_config.mysql_host,
port=db_config.mysql_port)
fp.write(text)
def gen_secret_key(self):
data = str(uuid.uuid4()) + str(uuid.uuid4())
return data[:40]
def ask_admin_email(self):
print
print '----------------------------------------'
print 'Now let\'s create the admin account'
print '----------------------------------------'
def validate(email):
# whitespace is not allowed
if re.match(r'[\s]', email):
raise InvalidAnswer('%s is not a valid email address' % Utils.highlight(email))
# must be a valid email address
if not re.match(r'^.+@.*\..+$', email):
raise InvalidAnswer('%s is not a valid email address' % Utils.highlight(email))
return email
key = 'admin email'
question = 'What is the ' + Utils.highlight('email') + ' for the admin account?'
self.admin_email = Utils.ask_question(question,
key=key,
validate=validate)
def ask_admin_password(self):
def validate(password):
key = 'admin password again'
question = 'Enter the ' + Utils.highlight('password again:')
password_again = Utils.ask_question(question,
key=key,
password=True)
if password_again != password:
raise InvalidAnswer('password mismatch')
return password
key = 'admin password'
question = 'What is the ' + Utils.highlight('password') + ' for the admin account?'
self.admin_password = Utils.ask_question(question,
key=key,
password=True,
validate=validate)
def do_syncdb(self):
print '----------------------------------------'
print 'Now creating seahub database tables ...\n'
print '----------------------------------------'
env = env_mgr.get_seahub_env()
cwd = os.path.join(env_mgr.install_path, 'seahub')
argv = [
Utils.get_python_executable(),
'manage.py',
'syncdb',
]
if Utils.run_argv(argv, cwd=cwd, env=env) != 0:
Utils.error("Failed to create seahub databases")
def prepare_avatar_dir(self):
# media_dir=${INSTALLPATH}/seahub/media
# orig_avatar_dir=${INSTALLPATH}/seahub/media/avatars
# dest_avatar_dir=${TOPDIR}/seahub-data/avatars
# if [[ ! -d ${dest_avatar_dir} ]]; then
# mkdir -p "${TOPDIR}/seahub-data"
# mv "${orig_avatar_dir}" "${dest_avatar_dir}"
# ln -s ../../../seahub-data/avatars ${media_dir}
# fi
try:
media_dir = os.path.join(env_mgr.install_path, 'seahub', 'media')
orig_avatar_dir = os.path.join(media_dir, 'avatars')
seahub_data_dir = os.path.join(env_mgr.top_dir, 'seahub-data')
dest_avatar_dir = os.path.join(seahub_data_dir, 'avatars')
if os.path.exists(dest_avatar_dir):
return
if not os.path.exists(seahub_data_dir):
os.mkdir(seahub_data_dir)
shutil.move(orig_avatar_dir, dest_avatar_dir)
os.symlink('../../../seahub-data/avatars', orig_avatar_dir)
except Exception, e:
Utils.error('Failed to prepare seahub avatars dir: %s' % e)
class SeafDavConfigurator(AbstractConfigurator):
def __init__(self):
AbstractConfigurator.__init__(self)
self.conf_dir = None
self.seafdav_conf = None
def ask_questions(self):
pass
def generate(self):
self.conf_dir = os.path.join(env_mgr.top_dir, 'conf')
if not os.path.exists('conf'):
Utils.must_mkdir(self.conf_dir)
self.seafdav_conf = os.path.join(self.conf_dir, 'seafdav.conf')
text = '''
[WEBDAV]
enabled = false
port = 8080
fastcgi = false
share_name = /
'''
with open(self.seafdav_conf, 'w') as fp:
fp.write(text)
class UserManualHandler(object):
def __init__(self):
self.src_docs_dir = os.path.join(env_mgr.install_path, 'seafile', 'docs')
self.library_template_dir = None
def copy_user_manuals(self):
self.library_template_dir = os.path.join(seafile_config.seafile_dir, 'library-template')
Utils.must_mkdir(self.library_template_dir)
pattern = os.path.join(self.src_docs_dir, '*.doc')
for doc in glob.glob(pattern):
Utils.must_copy(doc, self.library_template_dir)
def report_config():
print
print '---------------------------------'
print 'This is your configuration'
print '---------------------------------'
print
template = '''\
server name: %(server_name)s
server ip/domain: %(ip_or_domain)s
ccnet port: %(ccnet_port)s
seafile data dir: %(seafile_dir)s
seafile port: %(seafile_port)s
httpserver port: %(httpserver_port)s
database: %(use_existing_db)s
ccnet database: %(ccnet_db_name)s
seafile database: %(seafile_db_name)s
seahub database: %(seahub_db_name)s
database user: %(db_user)s
'''
config = {
'server_name' : ccnet_config.server_name,
'ip_or_domain' : ccnet_config.ip_or_domain,
'ccnet_port' : ccnet_config.port,
'seafile_dir' : seafile_config.seafile_dir,
'seafile_port' : seafile_config.port,
'httpserver_port' : seafile_config.httpserver_port,
'admin_email' : seahub_config.admin_email,
'use_existing_db': 'use exising' if db_config.use_existing_db else 'create new',
'ccnet_db_name': db_config.ccnet_db_name,
'seafile_db_name': db_config.seafile_db_name,
'seahub_db_name': db_config.seahub_db_name,
'db_user': db_config.seafile_mysql_user
}
print template % config
print
print '---------------------------------'
print 'Press ENTER to continue, or Ctrl-C to abort'
print '---------------------------------'
raw_input()
def create_seafile_server_symlink():
print '\ncreating seafile-server-latest symbolic link ... ',
seafile_server_symlink = os.path.join(env_mgr.top_dir, 'seafile-server-latest')
try:
os.symlink(os.path.basename(env_mgr.install_path), seafile_server_symlink)
except Exception, e:
print '\n'
Utils.error('Failed to create symbolic link %s: %s' % (seafile_server_symlink, e))
else:
print 'done\n\n'
env_mgr = EnvManager()
ccnet_config = CcnetConfigurator()
seafile_config = SeafileConfigurator()
seafdav_config = SeafDavConfigurator()
seahub_config = SeahubConfigurator()
user_manuals_handler = UserManualHandler()
# Would be created after AbstractDBConfigurator.ask_use_existing_db()
db_config = None
def main():
global db_config
Utils.welcome()
warnings.filterwarnings('ignore', category=MySQLdb.Warning)
env_mgr.check_pre_condiction()
# Part 1: collect configuration
ccnet_config.ask_questions()
seafile_config.ask_questions()
seahub_config.ask_questions()
if AbstractDBConfigurator.ask_use_existing_db():
db_config = ExistingDBConfigurator()
else:
db_config = NewDBConfigurator()
db_config.ask_questions()
report_config()
# Part 2: generate configuration
db_config.generate()
ccnet_config.generate()
seafile_config.generate()
seafdav_config.generate()
seahub_config.generate()
seahub_config.do_syncdb()
seahub_config.prepare_avatar_dir()
# db_config.create_seahub_admin()
user_manuals_handler.copy_user_manuals()
create_seafile_server_symlink()
report_success()
def report_success():
message = '''\
-----------------------------------------------------------------
Your seafile server configuration has been finished successfully.
-----------------------------------------------------------------
run seafile server: ./seafile.sh { start | stop | restart }
run seahub server: ./seahub.sh { start <port> | stop | restart <port> }
-----------------------------------------------------------------
If you are behind a firewall, remember to allow input/output of these tcp ports:
-----------------------------------------------------------------
port of ccnet server: %(ccnet_port)s
port of seafile server: %(seafile_port)s
port of seafile httpserver: %(httpserver_port)s
port of seahub: 8000
When problems occur, Refer to
%(server_manual_http)s
for information.
'''
print message % dict(ccnet_port=ccnet_config.port,
seafile_port=seafile_config.port,
httpserver_port=seafile_config.httpserver_port,
server_manual_http=SERVER_MANUAL_HTTP)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print
print Utils.highlight('The setup process is aborted')
print
|
Chilledheart/seafile
|
scripts/setup-seafile-mysql.py
|
Python
|
gpl-3.0
| 43,261
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-21 06:20
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('laboratory', '0034_group_perms'),
]
operations = [
migrations.RemoveField(
model_name='laboratory',
name='related_labs',
),
migrations.AlterField(
model_name='laboratory',
name='organization',
field=mptt.fields.TreeForeignKey(on_delete=django.db.models.deletion.CASCADE, to='laboratory.OrganizationStructure'),
),
]
|
solvo/organilab
|
src/laboratory/migrations/0035_auto_20180621_0020.py
|
Python
|
gpl-3.0
| 694
|
# -*- coding: utf-8 -*-
import re
import time
import pycurl
from module.network.HTTPRequest import BadHeader
from ..internal.Account import Account
class OneFichierCom(Account):
__name__ = "OneFichierCom"
__type__ = "account"
__version__ = "0.23"
__status__ = "testing"
__description__ = """1fichier.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("Elrick69", "elrick69[AT]rocketmail[DOT]com"),
("Walter Purcaro", "vuolter@gmail.com")]
VALID_UNTIL_PATTERN = r'Your Premium offer subscription is valid until <span style="font-weight:bold">(\d+\-\d+\-\d+)'
def grab_info(self, user, password, data):
validuntil = None
trafficleft = -1
premium = None
html = self.load("https://1fichier.com/console/abo.pl")
m = re.search(self.VALID_UNTIL_PATTERN, html)
if m is not None:
expiredate = m.group(1)
self.log_debug("Expire date: " + expiredate)
try:
validuntil = time.mktime(time.strptime(expiredate, "%Y-%m-%d"))
except Exception, e:
self.log_error(e, trace=True)
else:
premium = True
return {'validuntil': validuntil,
'trafficleft': trafficleft, 'premium': premium or False}
def signin(self, user, password, data):
self.req.http.c.setopt(
pycurl.REFERER,
"https://1fichier.com/login.pl?lg=en")
try:
html = self.load("https://1fichier.com/login.pl?lg=en",
post={'mail': user,
'pass': password,
'It': "on",
'purge': "off",
'valider': "Send"})
if any(_x in html for _x in
('>Invalid username or Password', '>Invalid email address', '>Invalid password')):
self.fail_login()
except BadHeader, e:
if e.code == 403:
self.fail_login()
else:
raise
|
thispc/download-manager
|
module/plugins/accounts/OneFichierCom.py
|
Python
|
gpl-3.0
| 2,134
|
# vi: ts=4 expandtab
#
# Copyright (C) 2014 Amazon.com, Inc. or its affiliates.
#
# Author: Jeff Bauer <jbauer@rubic.com>
# Author: Andrew Jorgensen <ajorgens@amazon.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from cloudinit import util
# Note: see http://saltstack.org/topics/installation/
def handle(name, cfg, cloud, log, _args):
# If there isn't a salt key in the configuration don't do anything
if 'salt_minion' not in cfg:
log.debug(("Skipping module named %s,"
" no 'salt_minion' key in configuration"), name)
return
salt_cfg = cfg['salt_minion']
# Start by installing the salt package ...
cloud.distro.install_packages(('salt-minion',))
# Ensure we can configure files at the right dir
config_dir = salt_cfg.get("config_dir", '/etc/salt')
util.ensure_dir(config_dir)
# ... and then update the salt configuration
if 'conf' in salt_cfg:
# Add all sections from the conf object to /etc/salt/minion
minion_config = os.path.join(config_dir, 'minion')
minion_data = util.yaml_dumps(salt_cfg.get('conf'))
util.write_file(minion_config, minion_data)
# ... copy the key pair if specified
if 'public_key' in salt_cfg and 'private_key' in salt_cfg:
pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki')
with util.umask(077):
util.ensure_dir(pki_dir)
pub_name = os.path.join(pki_dir, 'minion.pub')
pem_name = os.path.join(pki_dir, 'minion.pem')
util.write_file(pub_name, salt_cfg['public_key'])
util.write_file(pem_name, salt_cfg['private_key'])
# start / restart salt-minion. if it was started, it needs to be restarted
# for config change.
if cloud.distro.service_running('salt-minion'):
cloud.distro.service_control('salt-minion', 'restart', capture=False)
else:
cloud.distro.service_control('salt-minion', 'start', capture=False)
|
henrysher/aws-cloudinit
|
cloudinit/config/cc_salt_minion.py
|
Python
|
gpl-3.0
| 2,541
|
# -*- coding: utf-8 -*-
__author__ = """Kevin Wierman"""
__email__ = 'kevin.wierman@pnnl.gov'
__version__ = '0.1.0'
|
HEP-DL/root2hdf5
|
root2hdf5/__init__.py
|
Python
|
gpl-3.0
| 117
|
# -*- coding: utf-8 -*-
from ..provider.g5k import G5K
from constants import SYMLINK_NAME
from functools import wraps
import os
import yaml
import logging
def load_env():
env = {
'config' : {}, # The config
'resultdir': '', # Path to the result directory
'config_file' : '', # The initial config file
'nodes' : {}, # Roles with nodes
'phase' : '', # Last phase that have been run
'user' : '', # User id for this job
'kolla_repo': 'https://git.openstack.org/openstack/kolla',
'kolla_branch': 'stable/newton'
}
# Loads the previously saved environment (if any)
env_path = os.path.join(SYMLINK_NAME, 'env')
if os.path.isfile(env_path):
with open(env_path, 'r') as f:
env.update(yaml.load(f))
logging.debug("Reloaded config %s", env['config'])
# Resets the configuration of the environment
if os.path.isfile(env['config_file']):
with open(env['config_file'], 'r') as f:
env['config'].update(yaml.load(f))
logging.debug("Reloaded config %s", env['config'])
return env
def save_env(env):
env_path = os.path.join(env['resultdir'], 'env')
if os.path.isdir(env['resultdir']):
with open(env_path, 'w') as f:
yaml.dump(env, f)
def hamtask(doc):
"""Decorator for a Ham Task."""
def decorator(fn):
fn.__doc__ = doc
@wraps(fn)
def decorated(*args, **kwargs):
# TODO: Dynamically loads the provider
if kwargs.has_key('--provider'):
provider_name = kwargs['--provider']
kwargs['provider'] = G5K()
# Loads the environment & set the config
env = load_env()
kwargs['env'] = env
# Proceeds with the function executio
fn(*args, **kwargs)
# Save the environment
save_env(env)
return decorated
return decorator
|
NHebrard/ham-multisite
|
ham/utils/hamtask.py
|
Python
|
gpl-3.0
| 1,998
|
'''Enhanced Thread with support for return values and exception propagation.'''
# Copyright (C) 2007 Canonical Ltd.
# Author: Martin Pitt <martin.pitt@ubuntu.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version. See http://www.gnu.org/copyleft/gpl.html for
# the full text of the license.
import threading, sys
class REThread(threading.Thread):
'''Thread with return values and exception propagation.'''
def __init__(self, group=None, target=None, name=None, args=(), kwargs={},
verbose=None):
'''Initialize Thread, identical to threading.Thread.__init__().'''
threading.Thread.__init__(self, group, target, name, args, kwargs,
verbose)
self.__target = target
self.__args = args
self.__kwargs = kwargs
self._retval = None
self._exception = None
def run(self):
'''Run target function, identical to threading.Thread.run().'''
if self.__target:
try:
self._retval = self.__target(*self.__args, **self.__kwargs)
except:
if sys:
self._exception = sys.exc_info()
def return_value(self):
'''Return value from target function.
This can only be called after the thread has finished, i. e. when
isAlive() is False and did not terminate with an exception.
'''
assert not self.isAlive()
assert not self._exception
return self._retval
def exc_info(self):
'''Return (type, value, traceback) of the exception caught in run().'''
return self._exception
def exc_raise(self):
'''Raise the exception caught in the thread.
Do nothing if no exception was caught.
'''
if self._exception:
raise self._exception[0], self._exception[1], self._exception[2]
#
# Unit test
#
if __name__ == '__main__':
import unittest, time, traceback
def idle(seconds):
'''Test thread to just wait a bit.'''
time.sleep(seconds)
def div(x, y):
'''Test thread to divide two numbers.'''
return x / y
class _T(unittest.TestCase):
def test_return_value(self):
'''return value works properly.'''
t = REThread(target=div, args=(42, 2))
t.start()
t.join()
# exc_raise() should be a no-op on successful functions
t.exc_raise()
self.assertEqual(t.return_value(), 21)
self.assertEqual(t.exc_info(), None)
def test_no_return_value(self):
'''REThread works if run() does not return anything.'''
t = REThread(target=idle, args=(0.5,))
t.start()
# thread must be joined first
self.assertRaises(AssertionError, t.return_value)
t.join()
self.assertEqual(t.return_value(), None)
self.assertEqual(t.exc_info(), None)
def test_exception(self):
'''exception in thread is caught and passed.'''
t = REThread(target=div, args=(1, 0))
t.start()
t.join()
# thread did not terminate normally, no return value
self.assertRaises(AssertionError, t.return_value)
self.assertTrue(t.exc_info()[0] == ZeroDivisionError)
exc = traceback.format_exception(t.exc_info()[0], t.exc_info()[1],
t.exc_info()[2])
self.assertTrue(exc[-1].startswith('ZeroDivisionError'), 'not a ZeroDivisionError:' + str(exc))
self.assertTrue(exc[-2].endswith('return x / y\n'))
def test_exc_raise(self):
'''exc_raise() raises caught thread exception.'''
t = REThread(target=div, args=(1, 0))
t.start()
t.join()
# thread did not terminate normally, no return value
self.assertRaises(AssertionError, t.return_value)
raised = False
try:
t.exc_raise()
except:
raised = True
e = sys.exc_info()
exc = traceback.format_exception(e[0], e[1], e[2])
self.assertTrue(exc[-1].startswith('ZeroDivisionError'), 'not a ZeroDivisionError:' + str(e))
self.assertTrue(exc[-2].endswith('return x / y\n'))
self.assertTrue(raised)
unittest.main()
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/apport/REThread.py
|
Python
|
gpl-3.0
| 4,593
|
# pylint: disable=line-too-long, unused-argument
import json
from django.template.loader import render_to_string
def format_reading(probe_name, json_payload):
item = json.loads(json_payload)
status = item['DEVICE_ACTIVE']
if item['DEVICE_ACTIVE'] is True:
status = "Active"
elif item['DEVICE_ACTIVE'] is False:
status = "Inactive"
return status
def visualize(probe_name, readings):
report = []
for reading in readings:
payload = json.loads(reading.payload)
timestamp = payload['TIMESTAMP']
device_active = payload['DEVICE_ACTIVE']
if device_active is True:
device_active = 1
elif device_active is False:
device_active = 0
rep_dict = {}
rep_dict["y"] = device_active
rep_dict["x"] = timestamp
report.append(rep_dict)
return render_to_string('visualization_device.html', {'probe_name': probe_name, 'readings': readings, 'device_report': json.dumps(report)})
|
cbitstech/Purple-Robot-Django
|
formatters/features_deviceinusefeature.py
|
Python
|
gpl-3.0
| 1,013
|
import numpy as np
import os
import pandas as pa
import scipy.stats as stats
import unittest
from pbsea import PandasBasedEnrichmentAnalysis, EnrichmentAnalysisExperimental, preprocessing_files
from unittest.mock import patch
test_data_directory = 'test_data/'
test_data_directory_cdf = test_data_directory + 'test_cdf/'
test_data_directory_sf = test_data_directory + 'test_sf/'
test_data_directory_multiple = test_data_directory + 'test_multiple/'
test_data_directory_enrichment = test_data_directory + 'test_enrichment/'
class enrichmentAnalysis_test(unittest.TestCase):
def setUp(self):
df, column_interest, column_reference = preprocessing_files('GOs',
test_data_directory_enrichment+'counting_objects_in_interest.tsv',
test_data_directory_enrichment+'counting_objects_in_genome.tsv')
self.obj = PandasBasedEnrichmentAnalysis(df, column_interest, column_reference, 122, 1293, 0.05, 10000)
self.class_sgof_test = EnrichmentAnalysisExperimental(df, column_interest, column_reference, 122, 1293, 0.05, 10000)
def tearDown(self):
del self.obj
def test_hypergeometric_on_dataframe(self):
'''
Datas are from : https://fr.mathworks.com/help/stats/hygecdf.html?s_tid=gn_loc_drop
'''
print("\nTesting hypergeometric sf on dataframe ")
counts_df = pa.read_csv(test_data_directory_sf + 'data_interest_test_sf_hypergeometric' + ".tsv", sep = "\t")
counts_df_reference = pa.read_csv(test_data_directory_sf + 'data_reference_test_sf_hypergeometric' + ".tsv", sep = "\t")
counts_df.set_index('Genes', inplace = True)
counts_df_reference.set_index('Genes', inplace = True)
df_joined = counts_df.join(counts_df_reference)
df_joined_wih_results = pa.read_csv(test_data_directory_sf + 'result_test_sf_hypergeometric' + ".tsv", sep = "\t")
df_joined_wih_results.set_index('Genes', inplace = True)
enrichment_analysis_test = PandasBasedEnrichmentAnalysis(df_joined, 'Counts', 'CountsReference', 300, 10000, 0.05, 10000)
df_joined = enrichment_analysis_test.test_on_dataframe(df_joined)
np.testing.assert_array_almost_equal(df_joined['pvalue_hypergeometric'].tolist(),
df_joined_wih_results['pvalue_hypergeometric'].tolist(), decimal = 4)
def test_normal_approximation_on_dataframe(self):
'''
Datas have been invented for the test.
Results for the hypergeometric test are from : https://www.geneprof.org/GeneProf/tools/hypergeometric.jsp
pvalue_hypergeometric found : 4.834533775884863e-8
'''
print("\nTesting normal approximation sf on dataframe ")
counts_df = pa.read_csv(test_data_directory_sf + 'data_interest_test_sf_normal' + ".tsv", sep = "\t")
counts_df_reference = pa.read_csv(test_data_directory_sf + 'data_reference_test_sf_normal' + ".tsv", sep = "\t")
counts_df.set_index('Genes', inplace = True)
counts_df_reference.set_index('Genes', inplace = True)
df_joined = counts_df.join(counts_df_reference)
df_joined_wih_results = pa.read_csv(test_data_directory_sf + 'result_test_sf_normal' + ".tsv", sep = "\t")
df_joined_wih_results.set_index('Genes', inplace = True)
enrichment_analysis_test = PandasBasedEnrichmentAnalysis(df_joined, 'Counts', 'CountsReference', 10000, 100000, 0.05, 1000)
df_joined = enrichment_analysis_test.test_on_dataframe(df_joined)
np.testing.assert_array_almost_equal(df_joined_wih_results['pvalue_normal'].tolist(),
df_joined['pvalue_normal_approximation'].tolist(), decimal = 4)
def test_correction_bonferroni(self):
'''
Datas are from : http://www.pmean.com/05/MultipleComparisons.asp
'''
print("\nTesting Bonferroni multiple testing correction ")
pvalue_df = pa.read_csv(test_data_directory_multiple + 'multiple_test_data_pmean' + ".tsv", sep = "\t")
self.obj.statistic_method = "pvalue_hypergeometric"
pvalue_df = self.obj.correction_bonferroni(pvalue_df)
pvalue_truth_df = pa.read_csv(test_data_directory_multiple + 'multiple_test_result_pmean' + ".tsv", sep = "\t")
np.testing.assert_array_almost_equal(pvalue_df['pValueBonferroni'].tolist(), pvalue_truth_df['PvalueBonferroni'].tolist(), decimal = 4)
def test_correction_holm(self):
'''
Datas are from : http://www.pmean.com/05/MultipleComparisons.asp
'''
print("\nTesting Holm multiple testing correction ")
pvalue_df = pa.read_csv(test_data_directory_multiple + 'multiple_test_data_pmean' + ".tsv", sep = "\t")
self.obj.statistic_method = "pvalue_hypergeometric"
pvalue_df = self.obj.correction_holm(pvalue_df)
pvalue_truth_df = pa.read_csv(test_data_directory_multiple + 'multiple_test_result_pmean' + ".tsv", sep = "\t")
pvalue_truth_df = pvalue_truth_df.sort_values(by = "pvalue_hypergeometric")
np.testing.assert_array_almost_equal(pvalue_df['pValueHolm'].tolist(), pvalue_truth_df['PvalueHolm'].tolist(), decimal = 4)
def test_correction_benjamini_hochberg(self):
'''
Datas and results are from : www.biostathandbook.com/multiplecomparisons.html
Data are from the article : onlinelibrary.wiley.com/doi/10.1002/ijc.28513/full
'''
print("\nTesting Benjamini and Hochberg multiple testing correction ")
df_data = pa.read_csv(test_data_directory_multiple + 'multiple_test_data_BH.tsv', sep='\t')
self.obj.statistic_method = "pvalue_hypergeometric"
df_data = self.obj.correction_benjamini_hochberg(df_data)
pvalue_truth_df = pa.read_csv(test_data_directory_multiple + 'multiple_test_result_BH.tsv', sep='\t')
np.testing.assert_array_almost_equal(df_data['pValueBenjaminiHochberg'].tolist(), pvalue_truth_df['pValueBenjaminiHochberg'].tolist(), decimal = 4)
def test_correction_sgof_G(self):
'''
Datas have been created for the example.
To obtain the results, they have been used on MATLAB script for SGoF here : http://acraaj.webs.uvigo.es/software/matlab_sgof.m
'''
print("\nTesting SGoF multiple testing correction using G test")
pvalue_df = pa.read_csv(test_data_directory_multiple + 'multiple_test_data_sgof_G_test' + ".tsv", sep = "\t")
self.class_sgof_test.statistic_method = "pvalue_hypergeometric"
self.class_sgof_test.object_to_analyze= "pvalue_hypergeometric"
pvalue_df = self.class_sgof_test.correction_sgof(pvalue_df)
pvalue_truth_df = pa.read_csv(test_data_directory_multiple + 'multiple_test_result_sgof_G_test' + ".tsv", sep = "\t")
pvalue_truth_df = pvalue_truth_df.sort_values(by = "pvalue_hypergeometric")
np.testing.assert_array_equal(pvalue_df['pValueSGoF'].tolist(), pvalue_truth_df['pValueSGoF'].tolist())
def test_correction_sgof_bino(self):
'''
Datas have been created for the example.
To obtain the results, they have been used on MATLAB script for SGoF here : http://acraaj.webs.uvigo.es/software/matlab_sgof.m
'''
print("\nTesting SGoF multiple testing correction using binomial test ")
pvalue_df = pa.read_csv(test_data_directory_multiple + 'multiple_test_data_sgof_binomial' + ".tsv", sep = "\t")
self.class_sgof_test.statistic_method = "pvalue_hypergeometric"
self.class_sgof_test.object_to_analyze= "pvalue_hypergeometric"
pvalue_df = self.class_sgof_test.correction_sgof(pvalue_df)
pvalue_truth_df = pa.read_csv(test_data_directory_multiple + 'multiple_test_result_sgof_binomial' + ".tsv", sep = "\t")
pvalue_truth_df = pvalue_truth_df.sort_values(by = "pvalue_hypergeometric")
np.testing.assert_array_equal(pvalue_df['pValueSGoF'].tolist(), pvalue_truth_df['pValueSGoF'].tolist())
def test_error_rate_adjustement_bonferroni(self):
'''
Datas and results are from : www.biostathandbook.com/multiplecomparisons.html
'''
print("\nTesting error rate adjustement Bonferroni ")
datas = {'pvalue_hypergeometric':[0.001,0.008,0.039,0.041,0.042,0.06,0.074,0.205,0.212,0.216,0.222,
0.251,0.269,0.275,0.34,0.341,0.384,0.569,0.594,0.696,0.762,0.94,0.942,0.975,0.986]}
df = pa.DataFrame(datas)
error_rate_adjusted = self.obj.error_rate_adjustement_bonferroni(df)
self.assertEqual(error_rate_adjusted, 0.002)
def test_error_rate_adjustement_sidak(self):
'''
Datas have been created for the example (the only important thing here is the numver of pvalue).
The example and the result are from : www.spc.univ-lyon1.fr/polycop/comparaisons multiples.htm
'''
print("\nTesting error rate adjustement Sidak ")
datas_10 = {'pvalue_hypergeometric_10':[0.01,0.02,0.3,0.02,0.05,0.07,0.9,0.001,0.09,0.008]}
df_10_pvalue = pa.DataFrame(datas_10)
error_rate_adjusted_10 = self.obj.error_rate_adjustement_sidak(df_10_pvalue)
datas_20 = {'pvalue_hypergeometric_20':[0.01,0.02,0.05,0.04,0.2,0.04,0.9,0.05,0.06,0.0545,
0.048766,0.02,0.04,0.03,0.365,0.21,0.0234,0.2,0.156]}
df_20_pvalue = pa.DataFrame(datas_20)
error_rate_adjusted_20 = self.obj.error_rate_adjustement_sidak(df_20_pvalue)
np.testing.assert_array_almost_equal([error_rate_adjusted_10, error_rate_adjusted_20], [0.0051, 0.0026], decimal = 4)
if __name__ == '__main__':
unittest.main()
|
ArnaudBelcour/liasis
|
test/test_pbsea.py
|
Python
|
gpl-3.0
| 9,677
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='agarnet',
packages=['agarnet'],
py_modules=['agarnet'],
version='0.2.4',
description='agar.io client and connection toolkit',
install_requires=['websocket-client>=0.32.0'],
author='Gjum',
author_email='code.gjum@gmail.com',
url='https://github.com/Gjum/agarnet',
license='GPLv3',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Education',
'Topic :: Games/Entertainment',
],
)
|
Gjum/agarnet
|
setup.py
|
Python
|
gpl-3.0
| 1,095
|
# Initialization of All Modules of UnivMathSys
# Copyright (C) 2016 Zhang Chang-kai #
# Contact via: phy.zhangck@gmail.com #
# General Public License version 3.0 #
'''Initialization of All Modules'''
from Foundation import *
from Elementary import *
from Structure import *
# End of Initialization of All Modules
|
Phy-David-Zhang/UnivMathSys
|
Technology/TopModuleInit/__init__.py
|
Python
|
gpl-3.0
| 329
|
import os
import string
#Enter your username and password below within double quotes
# eg. username="username" and password="password"
username="username"
password="password"
com="wget -O - https://"+username+":"+password+"@mail.google.com/mail/feed/atom --no-check-certificate"
temp=os.popen(com)
msg=temp.read()
index=string.find(msg,"<fullcount>")
index2=string.find(msg,"</fullcount>")
fc=int(msg[index+11:index2])
if fc==0:
print "0 new"
else:
print str(fc)+" new"
|
maximilianofaccone/puppy-siberian
|
root/.conky/gmail.py
|
Python
|
gpl-3.0
| 480
|
"""todolist URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^app/', include("todoListApp.urls")),
]
|
Ketcomp/todolist
|
source/todolist/urls.py
|
Python
|
gpl-3.0
| 821
|
# AsteriskLint -- an Asterisk PBX config syntax checker
# Copyright (C) 2018 Walter Doekes, OSSO B.V.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ..base import E_APP_ARG_BADOPT, App, AppArg
class AppOrExten(AppArg):
def validate(self, arg, where):
if arg not in ('app', 'exten'):
E_APP_ARG_BADOPT(
where, argno=self.argno, app=self.app, opts=arg)
class Originate(App):
def __init__(self):
super().__init__(
# arg1 means Application-name or Context
args=[AppArg('tech_data'), AppOrExten('type'), AppArg('arg1'),
AppArg('arg2'), AppArg('arg3'), AppArg('timeout')],
min_args=3)
def register(app_loader):
app_loader.register(Originate())
|
ossobv/asterisklint
|
asterisklint/app/vall/app_originate.py
|
Python
|
gpl-3.0
| 1,348
|
#!/usr/bin/env python
import arff
import numpy as np
import sys
from sklearn import preprocessing
#from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import RFE
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import StratifiedKFold
from sklearn import cross_validation
from sklearn import metrics
#Parameters:
filename = sys.argv[1]
n_estimators = [100,200,300,500,1000,3000]
#n_estimators = [1000]
n_featuresToSelect = [2,3,5,10,20]
#n_featuresToSelect = [20]
maxDepth = [2,3,4,5,10]
#maxDepth = [2]
verboseLevel=10
n_jobs=10
n_crossValidation=10
n_accuracy=10
#n_accuracy=2
n_folds=10
# 10% removal of features
stepSize=0.1
print "Load dataset"
# Load dataset
arffDecoder = arff.ArffDecoder()
dataset = arffDecoder.decode(open(filename, 'rb'), encode_nominal=True)
print "Preprocess dataset"
# Get categorical features
categoricals = []
# NOTE: skip last (class) 'feature'
for feature in range(0,len(dataset['attributes'])-1):
if isinstance(dataset['attributes'][feature][1], list):
categoricals.append(feature)
print "Categorical indices: {0}".format(categoricals)
# Apply OneHotEncoder
oneHotEncoder = preprocessing.OneHotEncoder(categorical_features=categoricals, sparse=False)
print "Number of features: {0}".format(len(dataset['data'][0]))
print "Number of samples: {0}".format(len(dataset['data']))
binData = oneHotEncoder.fit_transform(np.array(dataset['data']))
print "n-values: {0}".format(oneHotEncoder.n_values_)
print "feature indices: {0}".format(oneHotEncoder.feature_indices_)
print "Number of binarised features: {0}".format(len(binData[0]))
print "Number of binarised samples: {0}".format(len(binData))
# Setting up input and outputs
inputs = binData[:,:-1]
output = binData[:,-1]
print "Start grid search"
# Setup experimental pipeline
scaler = preprocessing.RobustScaler()
#classifier = RandomForestClassifier(n_estimators=n_estimators[0],max_depth=maxDepth[0],oob_score=True,bootstrap=True)
classifier = ExtraTreesClassifier(n_estimators=n_estimators[0],max_depth=maxDepth[0],oob_score=True,bootstrap=True)
selector = RFE(classifier,n_features_to_select=n_featuresToSelect[0],step=stepSize)
pipeline = Pipeline([("scaler",scaler),("RFE",selector),("classifier",classifier)])
paramGrid = dict(RFE__n_features_to_select=n_featuresToSelect, classifier__max_depth=maxDepth, classifier__n_estimators=n_estimators)
# Do grid search
gridSearch = GridSearchCV(pipeline,param_grid=paramGrid,verbose=verboseLevel,n_jobs=n_jobs,cv=n_crossValidation)
gridSearch.fit(inputs,output)
estimator = gridSearch.best_estimator_
print "Results: "
print "Selected features: {0}".format(estimator.named_steps['RFE'].n_features_to_select)
print "Max depth: {0}".format(estimator.named_steps['classifier'].max_depth)
print "Number of trees: {0}".format(estimator.named_steps['classifier'].n_estimators)
# Calculate accuracies
print "Calculate accuracies"
accuracy = []
for count in range(0,n_accuracy):
cv = StratifiedKFold(output,n_folds=n_folds,shuffle=True)
predicted = cross_validation.cross_val_predict(estimator,inputs,output,cv=cv,verbose=verboseLevel,n_jobs=n_jobs,)
score = metrics.accuracy_score(output,predicted,normalize=True)
accuracy.append(score)
print "Accuracy array: {0}".format(accuracy)
print "Cross-validation accuracy of final model {0}".format(np.mean(accuracy))
|
bcraenen/KFClassifier
|
other/methods/ExtraTreesSample.py
|
Python
|
gpl-3.0
| 3,466
|
#***************************************************************
#* Name: LMS7002_DCCAL.py
#* Purpose: Class implementing LMS7002 DCCAL functions
#* Author: Lime Microsystems ()
#* Created: 2017-02-10
#* Copyright: Lime Microsystems (limemicro.com)
#* License:
#**************************************************************
from LMS7002_base import *
class LMS7002_DCCAL(LMS7002_base):
__slots__ = [] # Used to generate error on typos
def __init__(self, chip):
self.chip = chip
self.channel = None
self.prefix = "DCCAL_"
#
# DCCAL_CFG (0x05C0)
#
# DCMODE
@property
def DCMODE(self):
"""
Get the value of DCMODE
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'DCMODE')
else:
raise ValueError("Bitfield DCMODE is not supported on chip version "+str(self.chip.chipID))
@DCMODE.setter
def DCMODE(self, value):
"""
Set the value of DCMODE
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1, 'MANUAL', 'AUTO']:
raise ValueError("Value must be [0,1,'MANUAL','AUTO']")
if value==0 or value=='MANUAL':
val = 0
else:
val = 1
self._writeReg('CFG', 'DCMODE', val)
else:
raise ValueError("Bitfield DCMODE is not supported on chip version "+str(self.chip.chipID))
# PD_DCDAC_RXB
@property
def PD_DCDAC_RXB(self):
"""
Get the value of PD_DCDAC_RXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCDAC_RXB')
else:
raise ValueError("Bitfield PD_DCDAC_RXB is not supported on chip version "+str(self.chip.chipID))
@PD_DCDAC_RXB.setter
def PD_DCDAC_RXB(self, value):
"""
Set the value of PD_DCDAC_RXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCDAC_RXB', value)
else:
raise ValueError("Bitfield PD_DCDAC_RXB is not supported on chip version "+str(self.chip.chipID))
# PD_DCDAC_RXA
@property
def PD_DCDAC_RXA(self):
"""
Get the value of PD_DCDAC_RXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCDAC_RXA')
else:
raise ValueError("Bitfield PD_DCDAC_RXA is not supported on chip version "+str(self.chip.chipID))
@PD_DCDAC_RXA.setter
def PD_DCDAC_RXA(self, value):
"""
Set the value of PD_DCDAC_RXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCDAC_RXA', value)
else:
raise ValueError("Bitfield PD_DCDAC_RXA is not supported on chip version "+str(self.chip.chipID))
# PD_DCDAC_TXB
@property
def PD_DCDAC_TXB(self):
"""
Get the value of PD_DCDAC_TXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCDAC_TXB')
else:
raise ValueError("Bitfield PD_DCDAC_TXB is not supported on chip version "+str(self.chip.chipID))
@PD_DCDAC_TXB.setter
def PD_DCDAC_TXB(self, value):
"""
Set the value of PD_DCDAC_TXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCDAC_TXB', value)
else:
raise ValueError("Bitfield PD_DCDAC_TXB is not supported on chip version "+str(self.chip.chipID))
# PD_DCDAC_TXA
@property
def PD_DCDAC_TXA(self):
"""
Get the value of PD_DCDAC_TXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCDAC_TXA')
else:
raise ValueError("Bitfield PD_DCDAC_TXA is not supported on chip version "+str(self.chip.chipID))
@PD_DCDAC_TXA.setter
def PD_DCDAC_TXA(self, value):
"""
Set the value of PD_DCDAC_TXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCDAC_TXA', value)
else:
raise ValueError("Bitfield PD_DCDAC_TXA is not supported on chip version "+str(self.chip.chipID))
# PD_DCCMP_RXB
@property
def PD_DCCMP_RXB(self):
"""
Get the value of PD_DCCMP_RXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCCMP_RXB')
else:
raise ValueError("Bitfield PD_DCCMP_RXB is not supported on chip version "+str(self.chip.chipID))
@PD_DCCMP_RXB.setter
def PD_DCCMP_RXB(self, value):
"""
Set the value of PD_DCCMP_RXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCCMP_RXB', value)
else:
raise ValueError("Bitfield PD_DCCMP_RXB is not supported on chip version "+str(self.chip.chipID))
# PD_DCCMP_RXA
@property
def PD_DCCMP_RXA(self):
"""
Get the value of PD_DCCMP_RXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCCMP_RXA')
else:
raise ValueError("Bitfield PD_DCCMP_RXA is not supported on chip version "+str(self.chip.chipID))
@PD_DCCMP_RXA.setter
def PD_DCCMP_RXA(self, value):
"""
Set the value of PD_DCCMP_RXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCCMP_RXA', value)
else:
raise ValueError("Bitfield PD_DCCMP_RXA is not supported on chip version "+str(self.chip.chipID))
# PD_DCCMP_TXB
@property
def PD_DCCMP_TXB(self):
"""
Get the value of PD_DCCMP_TXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCCMP_TXB')
else:
raise ValueError("Bitfield PD_DCCMP_TXB is not supported on chip version "+str(self.chip.chipID))
@PD_DCCMP_TXB.setter
def PD_DCCMP_TXB(self, value):
"""
Set the value of PD_DCCMP_TXB
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCCMP_TXB', value)
else:
raise ValueError("Bitfield PD_DCCMP_TXB is not supported on chip version "+str(self.chip.chipID))
# PD_DCCMP_TXA
@property
def PD_DCCMP_TXA(self):
"""
Get the value of PD_DCCMP_TXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG', 'PD_DCCMP_TXA')
else:
raise ValueError("Bitfield PD_DCCMP_TXA is not supported on chip version "+str(self.chip.chipID))
@PD_DCCMP_TXA.setter
def PD_DCCMP_TXA(self, value):
"""
Set the value of PD_DCCMP_TXA
"""
if self.chip.chipID == self.chip.chipIDMR3:
if value not in [0, 1]:
raise ValueError("Value must be [0,1]")
self._writeReg('CFG', 'PD_DCCMP_TXA', value)
else:
raise ValueError("Bitfield PD_DCCMP_TXA is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_STAT (0x05C1)
#
# DCCAL_CALSTATUS<7:0>
@property
def DCCAL_CALSTATUS(self):
"""
Get the value of DCCAL_CALSTATUS<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('STAT', 'DCCAL_CALSTATUS<7:0>')
else:
raise ValueError("Bitfield DCCAL_CALSTATUS<7:0> is not supported on chip version "+str(self.chip.chipID))
@DCCAL_CALSTATUS.setter
def DCCAL_CALSTATUS(self, value):
"""
Set the value of DCCAL_CALSTATUS<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('STAT', 'DCCAL_CALSTATUS<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_CALSTATUS<7:0> is not supported on chip version "+str(self.chip.chipID))
# DCCAL_CMPSTATUS<7:0>
@property
def DCCAL_CMPSTATUS(self):
"""
Get the value of DCCAL_CMPSTATUS<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('STAT', 'DCCAL_CMPSTATUS<7:0>')
else:
raise ValueError("Bitfield DCCAL_CMPSTATUS<7:0> is not supported on chip version "+str(self.chip.chipID))
@DCCAL_CMPSTATUS.setter
def DCCAL_CMPSTATUS(self, value):
"""
Set the value of DCCAL_CMPSTATUS<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('STAT', 'DCCAL_CMPSTATUS<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_CMPSTATUS<7:0> is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_CFG2 (0x05C2)
#
# DCCAL_CMPCFG<7:0>
@property
def DCCAL_CMPCFG(self):
"""
Get the value of DCCAL_CMPCFG<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG2', 'DCCAL_CMPCFG<7:0>')
else:
raise ValueError("Bitfield DCCAL_CMPCFG<7:0> is not supported on chip version "+str(self.chip.chipID))
@DCCAL_CMPCFG.setter
def DCCAL_CMPCFG(self, value):
"""
Set the value of DCCAL_CMPCFG<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('CFG2', 'DCCAL_CMPCFG<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_CMPCFG<7:0> is not supported on chip version "+str(self.chip.chipID))
# DCCAL_START<7:0>
@property
def DCCAL_START(self):
"""
Get the value of DCCAL_START<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CFG2', 'DCCAL_START<7:0>')
else:
raise ValueError("Bitfield DCCAL_START<7:0> is not supported on chip version "+str(self.chip.chipID))
@DCCAL_START.setter
def DCCAL_START(self, value):
"""
Set the value of DCCAL_START<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('CFG2', 'DCCAL_START<7:0>', value)
else:
raise ValueError("Bitfield DCCAL_START<7:0> is not supported on chip version "+str(self.chip.chipID))
def startRXBQ(self):
"""
Starts RXBQ calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<7
def startRXBI(self):
"""
Starts RXBI calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<6
def startRXAQ(self):
"""
Starts RXAQ calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<5
def startRXAI(self):
"""
Starts RXAI calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<4
def startTXBQ(self):
"""
Starts TXBQ calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<3
def startTXBI(self):
"""
Starts TXBI calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<2
def startTXAQ(self):
"""
Starts TXAQ calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1<<1
def startTXAI(self):
"""
Starts TXAI calibration.
"""
self.DCCAL_START = 0
self.DCCAL_START = 1
#
# DCCAL_TXAI (0x05C3)
#
@property
def DC_TXAI(self):
"""
Get the value of DC_TXAI
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXAI', 'DCRD_TXAI', 0)
self._writeReg('TXAI', 'DCRD_TXAI', 1)
val = self._readReg('TXAI', 'DC_TXAI<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXAI is not supported on chip version "+str(self.chip.chipID))
@DC_TXAI.setter
def DC_TXAI(self, value):
"""
Set the value of DC_TXAI
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-1024<= value <=1024):
raise ValueError("Value must be [-1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXAI', 'DC_TXAI<10:0>', val)
self._writeReg('TXAI', 'DCWR_TXAI', 0)
self._writeReg('TXAI', 'DCWR_TXAI', 1)
else:
raise ValueError("Bitfield TXAI is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_TXAQ (0x05C4)
#
@property
def DC_TXAQ(self):
"""
Get the value of DC_TXAQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXAQ', 'DCRD_TXAQ', 0)
self._writeReg('TXAQ', 'DCRD_TXAQ', 1)
val = self._readReg('TXAQ', 'DC_TXAQ<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXAQ is not supported on chip version "+str(self.chip.chipID))
@DC_TXAQ.setter
def DC_TXAQ(self, value):
"""
Set the value of DC_TXAQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-1024<= value <=1024):
raise ValueError("Value must be [-1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXAQ', 'DC_TXAQ<10:0>', val)
self._writeReg('TXAQ', 'DCWR_TXAQ', 0)
self._writeReg('TXAQ', 'DCWR_TXAQ', 1)
else:
raise ValueError("Bitfield TXAQ is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_TXBI (0x05C5)
#
@property
def DC_TXBI(self):
"""
Get the value of DC_TXBI
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXBI', 'DCRD_TXBI', 0)
self._writeReg('TXBI', 'DCRD_TXBI', 1)
val = self._readReg('TXBI', 'DC_TXBI<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXBI is not supported on chip version "+str(self.chip.chipID))
@DC_TXBI.setter
def DC_TXBI(self, value):
"""
Set the value of DC_TXBI
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-1024<= value <=1024):
raise ValueError("Value must be [-1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXBI', 'DC_TXBI<10:0>', val)
self._writeReg('TXBI', 'DCWR_TXBI', 0)
self._writeReg('TXBI', 'DCWR_TXBI', 1)
else:
raise ValueError("Bitfield TXBI is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_TXBQ (0x05C6)
#
@property
def DC_TXBQ(self):
"""
Get the value of DC_TXBQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('TXBQ', 'DCRD_TXBQ', 0)
self._writeReg('TXBQ', 'DCRD_TXBQ', 1)
val = self._readReg('TXBQ', 'DC_TXBQ<10:0>')
return self.signMagnitudeToInt(val, 11)
else:
raise ValueError("Bitfield DC_TXBQ is not supported on chip version "+str(self.chip.chipID))
@DC_TXBQ.setter
def DC_TXBQ(self, value):
"""
Set the value of DC_TXBQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-1024<= value <=1024):
raise ValueError("Value must be [-1024..1024]")
val = self.intToSignMagnitude(value, 11)
self._writeReg('TXBQ', 'DC_TXBQ<10:0>', val)
self._writeReg('TXBQ', 'DCWR_TXBQ', 0)
self._writeReg('TXBQ', 'DCWR_TXBQ', 1)
else:
raise ValueError("Bitfield TXBQ is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_RXAI (0x05C7)
#
@property
def DC_RXAI(self):
"""
Get the value of DC_RXAI
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('RXAI', 'DCRD_RXAI', 0)
self._writeReg('RXAI', 'DCRD_RXAI', 1)
val = self._readReg('RXAI', 'DC_RXAI<6:0>')
return self.signMagnitudeToInt(val, 7)
else:
raise ValueError("Bitfield DC_RXAI is not supported on chip version "+str(self.chip.chipID))
@DC_RXAI.setter
def DC_RXAI(self, value):
"""
Set the value of DC_RXAI
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-63<= value <=63):
raise ValueError("Value must be [-63..63]")
val = self.intToSignMagnitude(value, 7)
self._writeReg('RXAI', 'DC_RXAI<6:0>', val)
self._writeReg('RXAI', 'DCWR_RXAI', 0)
self._writeReg('RXAI', 'DCWR_RXAI', 1)
else:
raise ValueError("Bitfield RXAI is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_RXAQ (0x05C8)
#
@property
def DC_RXAQ(self):
"""
Get the value of DC_RXAQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('RXAQ', 'DCRD_RXAQ', 0)
self._writeReg('RXAQ', 'DCRD_RXAQ', 1)
val = self._readReg('RXAQ', 'DC_RXAQ<6:0>')
return self.signMagnitudeToInt(val, 7)
else:
raise ValueError("Bitfield DC_RXAQ is not supported on chip version "+str(self.chip.chipID))
@DC_RXAQ.setter
def DC_RXAQ(self, value):
"""
Set the value of DC_RXAQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-63<= value <=63):
raise ValueError("Value must be [-63..63]")
val = self.intToSignMagnitude(value, 7)
self._writeReg('RXAQ', 'DC_RXAQ<6:0>', val)
self._writeReg('RXAQ', 'DCWR_RXAQ', 0)
self._writeReg('RXAQ', 'DCWR_RXAQ', 1)
else:
raise ValueError("Bitfield RXAQ is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_RXBI (0x05C9)
#
@property
def DC_RXBI(self):
"""
Get the value of DC_RXBI
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('RXBI', 'DCRD_RXBI', 0)
self._writeReg('RXBI', 'DCRD_RXBI', 1)
val = self._readReg('RXBI', 'DC_RXBI<6:0>')
return self.signMagnitudeToInt(val, 7)
else:
raise ValueError("Bitfield DC_RXBI is not supported on chip version "+str(self.chip.chipID))
@DC_RXBI.setter
def DC_RXBI(self, value):
"""
Set the value of DC_RXBI
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-63<= value <=63):
raise ValueError("Value must be [-63..63]")
val = self.intToSignMagnitude(value, 7)
self._writeReg('RXBI', 'DC_RXBI<6:0>', val)
self._writeReg('RXBI', 'DCWR_RXBI', 0)
self._writeReg('RXBI', 'DCWR_RXBI', 1)
else:
raise ValueError("Bitfield RXBI is not supported on chip version "+str(self.chip.chipID))
#
# DCCAL_RXBQ (0x05CA)
#
@property
def DC_RXBQ(self):
"""
Get the value of DC_RXBQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
self._writeReg('RXBQ', 'DCRD_RXBQ', 0)
self._writeReg('RXBQ', 'DCRD_RXBQ', 1)
val = self._readReg('RXBQ', 'DC_RXBQ<6:0>')
return self.signMagnitudeToInt(val, 7)
else:
raise ValueError("Bitfield DC_RXBQ is not supported on chip version "+str(self.chip.chipID))
@DC_RXBQ.setter
def DC_RXBQ(self, value):
"""
Set the value of DC_RXBQ
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(-63<= value <=63):
raise ValueError("Value must be [-63..63]")
val = self.intToSignMagnitude(value, 7)
self._writeReg('RXBQ', 'DC_RXBQ<6:0>', val)
self._writeReg('RXBQ', 'DCWR_RXBQ', 0)
self._writeReg('RXBQ', 'DCWR_RXBQ', 1)
else:
raise ValueError("Bitfield RXBQ is not supported on chip version "+str(self.chip.chipID))
# DC_RXCDIV<7:0>
@property
def DC_RXCDIV(self):
"""
Get the value of DC_RXCDIV<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CLKDIV', 'DC_RXCDIV<7:0>')
else:
raise ValueError("Bitfield DC_RXCDIV<7:0> is not supported on chip version "+str(self.chip.chipID))
@DC_RXCDIV.setter
def DC_RXCDIV(self, value):
"""
Set the value of DC_RXCDIV<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('CLKDIV', 'DC_RXCDIV<7:0>', value)
else:
raise ValueError("Bitfield DC_RXCDIV<7:0> is not supported on chip version "+str(self.chip.chipID))
# DC_TXCDIV<7:0>
@property
def DC_TXCDIV(self):
"""
Get the value of DC_TXCDIV<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('CLKDIV', 'DC_TXCDIV<7:0>')
else:
raise ValueError("Bitfield DC_TXCDIV<7:0> is not supported on chip version "+str(self.chip.chipID))
@DC_TXCDIV.setter
def DC_TXCDIV(self, value):
"""
Set the value of DC_TXCDIV<7:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0<= value <=255):
raise ValueError("Value must be [0..255]")
self._writeReg('CLKDIV', 'DC_TXCDIV<7:0>', value)
else:
raise ValueError("Bitfield DC_TXCDIV<7:0> is not supported on chip version "+str(self.chip.chipID))
# HYSCMP_RXB<2:0>
@property
def HYSCMP_RXB(self):
"""
Get the value of HYSCMP_RXB<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('HYSTCFG', 'HYSCMP_RXB<2:0>')
else:
raise ValueError("Bitfield HYSCMP_RXB<2:0> is not supported on chip version "+str(self.chip.chipID))
@HYSCMP_RXB.setter
def HYSCMP_RXB(self, value):
"""
Set the value of HYSCMP_RXB<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0 <= value <= 7):
raise ValueError("Value must be [0..7]")
self._writeReg('HYSTCFG', 'HYSCMP_RXB<2:0>', value)
else:
raise ValueError("Bitfield HYSCMP_RXB<2:0> is not supported on chip version "+str(self.chip.chipID))
# HYSCMP_RXA<2:0>
@property
def HYSCMP_RXA(self):
"""
Get the value of HYSCMP_RXA<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('HYSTCFG', 'HYSCMP_RXA<2:0>')
else:
raise ValueError("Bitfield HYSCMP_RXA<2:0> is not supported on chip version "+str(self.chip.chipID))
@HYSCMP_RXA.setter
def HYSCMP_RXA(self, value):
"""
Set the value of HYSCMP_RXA<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0 <= value <= 7):
raise ValueError("Value must be [0..7]")
self._writeReg('HYSTCFG', 'HYSCMP_RXA<2:0>', value)
else:
raise ValueError("Bitfield HYSCMP_RXA<2:0> is not supported on chip version "+str(self.chip.chipID))
# HYSCMP_TXB<2:0>
@property
def HYSCMP_TXB(self):
"""
Get the value of HYSCMP_TXB<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('HYSTCFG', 'HYSCMP_TXB<2:0>')
else:
raise ValueError("Bitfield HYSCMP_TXB<2:0> is not supported on chip version "+str(self.chip.chipID))
@HYSCMP_TXB.setter
def HYSCMP_TXB(self, value):
"""
Set the value of HYSCMP_TXB<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0 <= value <= 7):
raise ValueError("Value must be [0..7]")
self._writeReg('HYSTCFG', 'HYSCMP_TXB<2:0>', value)
else:
raise ValueError("Bitfield HYSCMP_TXB<2:0> is not supported on chip version "+str(self.chip.chipID))
# HYSCMP_TXA<2:0>
@property
def HYSCMP_TXA(self):
"""
Get the value of HYSCMP_TXA<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
return self._readReg('HYSTCFG', 'HYSCMP_TXA<2:0>')
else:
raise ValueError("Bitfield HYSCMP_TXA<2:0> is not supported on chip version "+str(self.chip.chipID))
@HYSCMP_TXA.setter
def HYSCMP_TXA(self, value):
"""
Set the value of HYSCMP_TXA<2:0>
"""
if self.chip.chipID == self.chip.chipIDMR3:
if not(0 <= value <= 7):
raise ValueError("Value must be [0..7]")
self._writeReg('HYSTCFG', 'HYSCMP_TXA<2:0>', value)
else:
raise ValueError("Bitfield HYSCMP_TXA<2:0> is not supported on chip version "+str(self.chip.chipID))
|
bvacaliuc/pyrasdr
|
plugins/pyLMS7002M/pyLMS7002M/LMS7002_DCCAL.py
|
Python
|
gpl-3.0
| 27,053
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, throw
from frappe.utils import flt, cint, add_days, cstr, add_months
import json
from erpnext.accounts.doctype.pricing_rule.pricing_rule import get_pricing_rule_for_item, set_transaction_type
from erpnext.setup.utils import get_exchange_rate
from frappe.model.meta import get_field_precision
from erpnext.stock.doctype.batch.batch import get_batch_no
from erpnext import get_company_currency
from erpnext.stock.doctype.item.item import get_item_defaults, get_uom_conv_factor
from erpnext.setup.doctype.item_group.item_group import get_item_group_defaults
from six import string_types, iteritems
sales_doctypes = ['Quotation', 'Sales Order', 'Delivery Note', 'Sales Invoice']
purchase_doctypes = ['Material Request', 'Supplier Quotation', 'Purchase Order', 'Purchase Receipt', 'Purchase Invoice']
@frappe.whitelist()
def get_item_details(args):
"""
args = {
"item_code": "",
"warehouse": None,
"customer": "",
"conversion_rate": 1.0,
"selling_price_list": None,
"price_list_currency": None,
"plc_conversion_rate": 1.0,
"doctype": "",
"name": "",
"supplier": None,
"transaction_date": None,
"conversion_rate": 1.0,
"buying_price_list": None,
"is_subcontracted": "Yes" / "No",
"ignore_pricing_rule": 0/1
"project": ""
"set_warehouse": ""
}
"""
args = process_args(args)
item = frappe.get_cached_doc("Item", args.item_code)
validate_item_details(args, item)
out = get_basic_details(args, item)
get_party_item_code(args, item, out)
set_valuation_rate(out, args)
update_party_blanket_order(args, out)
get_price_list_rate(args, item, out)
if args.customer and cint(args.is_pos):
out.update(get_pos_profile_item_details(args.company, args))
if out.get("warehouse"):
out.update(get_bin_details(args.item_code, out.warehouse))
# update args with out, if key or value not exists
for key, value in iteritems(out):
if args.get(key) is None:
args[key] = value
out.update(get_pricing_rule_for_item(args))
update_stock(args, out)
if args.transaction_date and item.lead_time_days:
out.schedule_date = out.lead_time_date = add_days(args.transaction_date,
item.lead_time_days)
if args.get("is_subcontracted") == "Yes":
out.bom = args.get('bom') or get_default_bom(args.item_code)
get_gross_profit(out)
if args.doctype == 'Material Request':
out.rate = args.rate or out.price_list_rate
out.amount = flt(args.qty * out.rate)
return out
def update_stock(args, out):
if (args.get("doctype") == "Delivery Note" or
(args.get("doctype") == "Sales Invoice" and args.get('update_stock'))) \
and out.warehouse and out.stock_qty > 0:
if out.has_batch_no and not args.get("batch_no"):
out.batch_no = get_batch_no(out.item_code, out.warehouse, out.qty)
actual_batch_qty = get_batch_qty(out.batch_no, out.warehouse, out.item_code)
if actual_batch_qty:
out.update(actual_batch_qty)
if out.has_serial_no and args.get('batch_no'):
reserved_so = get_so_reservation_for_item(args)
out.batch_no = args.get('batch_no')
out.serial_no = get_serial_no(out, args.serial_no, sales_order=reserved_so)
elif out.has_serial_no:
reserved_so = get_so_reservation_for_item(args)
out.serial_no = get_serial_no(out, args.serial_no, sales_order=reserved_so)
def set_valuation_rate(out, args):
if frappe.db.exists("Product Bundle", args.item_code, cache=True):
valuation_rate = 0.0
bundled_items = frappe.get_doc("Product Bundle", args.item_code)
for bundle_item in bundled_items.items:
valuation_rate += \
flt(get_valuation_rate(bundle_item.item_code, args.company, out.get("warehouse")).get("valuation_rate") \
* bundle_item.qty)
out.update({
"valuation_rate": valuation_rate
})
else:
out.update(get_valuation_rate(args.item_code, args.company, out.get("warehouse")))
def process_args(args):
if isinstance(args, string_types):
args = json.loads(args)
args = frappe._dict(args)
if not args.get("price_list"):
args.price_list = args.get("selling_price_list") or args.get("buying_price_list")
if args.barcode:
args.item_code = get_item_code(barcode=args.barcode)
elif not args.item_code and args.serial_no:
args.item_code = get_item_code(serial_no=args.serial_no)
set_transaction_type(args)
return args
@frappe.whitelist()
def get_item_code(barcode=None, serial_no=None):
if barcode:
item_code = frappe.db.get_value("Item Barcode", {"barcode": barcode}, fieldname=["parent"])
if not item_code:
frappe.throw(_("No Item with Barcode {0}").format(barcode))
elif serial_no:
item_code = frappe.db.get_value("Serial No", serial_no, "item_code")
if not item_code:
frappe.throw(_("No Item with Serial No {0}").format(serial_no))
return item_code
def validate_item_details(args, item):
if not args.company:
throw(_("Please specify Company"))
from erpnext.stock.doctype.item.item import validate_end_of_life
validate_end_of_life(item.name, item.end_of_life, item.disabled)
if args.transaction_type == "selling" and cint(item.has_variants):
throw(_("Item {0} is a template, please select one of its variants").format(item.name))
elif args.transaction_type == "buying" and args.doctype != "Material Request":
if args.get("is_subcontracted") == "Yes" and item.is_sub_contracted_item != 1:
throw(_("Item {0} must be a Sub-contracted Item").format(item.name))
def get_basic_details(args, item):
"""
:param args: {
"item_code": "",
"warehouse": None,
"customer": "",
"conversion_rate": 1.0,
"selling_price_list": None,
"price_list_currency": None,
"price_list_uom_dependant": None,
"plc_conversion_rate": 1.0,
"doctype": "",
"name": "",
"supplier": None,
"transaction_date": None,
"conversion_rate": 1.0,
"buying_price_list": None,
"is_subcontracted": "Yes" / "No",
"ignore_pricing_rule": 0/1
"project": "",
barcode: "",
serial_no: "",
currency: "",
update_stock: "",
price_list: "",
company: "",
order_type: "",
is_pos: "",
project: "",
qty: "",
stock_qty: "",
conversion_factor: ""
}
:param item: `item_code` of Item object
:return: frappe._dict
"""
if not item:
item = frappe.get_doc("Item", args.get("item_code"))
if item.variant_of:
item.update_template_tables()
from frappe.defaults import get_user_default_as_list
user_default_warehouse_list = get_user_default_as_list('Warehouse')
user_default_warehouse = user_default_warehouse_list[0] \
if len(user_default_warehouse_list) == 1 else ""
item_defaults = get_item_defaults(item.name, args.company)
item_group_defaults = get_item_group_defaults(item.name, args.company)
warehouse = args.get("set_warehouse") or user_default_warehouse or item_defaults.get("default_warehouse") or\
item_group_defaults.get("default_warehouse") or args.warehouse
if args.get('doctype') == "Material Request" and not args.get('material_request_type'):
args['material_request_type'] = frappe.db.get_value('Material Request',
args.get('name'), 'material_request_type', cache=True)
#Set the UOM to the Default Sales UOM or Default Purchase UOM if configured in the Item Master
if not args.uom:
if args.get('doctype') in sales_doctypes:
args.uom = item.sales_uom if item.sales_uom else item.stock_uom
elif (args.get('doctype') in ['Purchase Order', 'Purchase Receipt', 'Purchase Invoice']) or \
(args.get('doctype') == 'Material Request' and args.get('material_request_type') == 'Purchase'):
args.uom = item.purchase_uom if item.purchase_uom else item.stock_uom
else:
args.uom = item.stock_uom
out = frappe._dict({
"item_code": item.name,
"item_name": item.item_name,
"description": cstr(item.description).strip(),
"image": cstr(item.image).strip(),
"warehouse": warehouse,
"income_account": get_default_income_account(args, item_defaults, item_group_defaults),
"expense_account": get_default_expense_account(args, item_defaults, item_group_defaults),
"cost_center": get_default_cost_center(args, item_defaults, item_group_defaults),
'has_serial_no': item.has_serial_no,
'has_batch_no': item.has_batch_no,
"batch_no": None,
"item_tax_rate": json.dumps(dict(([d.tax_type, d.tax_rate] for d in
item.get("taxes")))),
"uom": args.uom,
"min_order_qty": flt(item.min_order_qty) if args.doctype == "Material Request" else "",
"qty": args.qty or 1.0,
"stock_qty": args.qty or 1.0,
"price_list_rate": 0.0,
"base_price_list_rate": 0.0,
"rate": 0.0,
"base_rate": 0.0,
"amount": 0.0,
"base_amount": 0.0,
"net_rate": 0.0,
"net_amount": 0.0,
"discount_percentage": 0.0,
"supplier": get_default_supplier(args, item_defaults, item_group_defaults),
"update_stock": args.get("update_stock") if args.get('doctype') in ['Sales Invoice', 'Purchase Invoice'] else 0,
"delivered_by_supplier": item.delivered_by_supplier if args.get("doctype") in ["Sales Order", "Sales Invoice"] else 0,
"is_fixed_asset": item.is_fixed_asset,
"weight_per_unit":item.weight_per_unit,
"weight_uom":item.weight_uom,
"last_purchase_rate": item.last_purchase_rate if args.get("doctype") in ["Purchase Order"] else 0,
"transaction_date": args.get("transaction_date")
})
if item.get("enable_deferred_revenue") or item.get("enable_deferred_expense"):
out.update(calculate_service_end_date(args, item))
# calculate conversion factor
if item.stock_uom == args.uom:
out.conversion_factor = 1.0
else:
out.conversion_factor = args.conversion_factor or \
get_conversion_factor(item.name, args.uom).get("conversion_factor")
args.conversion_factor = out.conversion_factor
out.stock_qty = out.qty * out.conversion_factor
# calculate last purchase rate
if args.get('doctype') in purchase_doctypes:
from erpnext.buying.doctype.purchase_order.purchase_order import item_last_purchase_rate
out.last_purchase_rate = item_last_purchase_rate(args.name, args.conversion_rate, item.name, out.conversion_factor)
# if default specified in item is for another company, fetch from company
for d in [
["Account", "income_account", "default_income_account"],
["Account", "expense_account", "default_expense_account"],
["Cost Center", "cost_center", "cost_center"],
["Warehouse", "warehouse", ""]]:
if not out[d[1]]:
out[d[1]] = frappe.get_cached_value('Company', args.company, d[2]) if d[2] else None
for fieldname in ("item_name", "item_group", "barcodes", "brand", "stock_uom"):
out[fieldname] = item.get(fieldname)
child_doctype = args.doctype + ' Item'
meta = frappe.get_meta(child_doctype)
if meta.get_field("barcode"):
update_barcode_value(out)
return out
def update_barcode_value(out):
from erpnext.accounts.doctype.sales_invoice.pos import get_barcode_data
barcode_data = get_barcode_data([out])
# If item has one barcode then update the value of the barcode field
if barcode_data and len(barcode_data.get(out.item_code)) == 1:
out['barcode'] = barcode_data.get(out.item_code)[0]
@frappe.whitelist()
def calculate_service_end_date(args, item=None):
args = process_args(args)
if not item:
item = frappe.get_cached_doc("Item", args.item_code)
doctype = args.get("parenttype") or args.get("doctype")
if doctype == "Sales Invoice":
enable_deferred = "enable_deferred_revenue"
no_of_months = "no_of_months"
account = "deferred_revenue_account"
else:
enable_deferred = "enable_deferred_expense"
no_of_months = "no_of_months_exp"
account = "deferred_expense_account"
service_start_date = args.service_start_date if args.service_start_date else args.transaction_date
service_end_date = add_months(service_start_date, item.get(no_of_months))
deferred_detail = {
"service_start_date": service_start_date,
"service_end_date": service_end_date
}
deferred_detail[enable_deferred] = item.get(enable_deferred)
deferred_detail[account] = get_default_deferred_account(args, item, fieldname=account)
return deferred_detail
def get_default_income_account(args, item, item_group):
return (item.get("income_account")
or item_group.get("income_account")
or args.income_account)
def get_default_expense_account(args, item, item_group):
return (item.get("expense_account")
or item_group.get("expense_account")
or args.expense_account)
def get_default_deferred_account(args, item, fieldname=None):
if item.get("enable_deferred_revenue") or item.get("enable_deferred_expense"):
return (item.get(fieldname)
or args.get(fieldname)
or frappe.get_cached_value('Company', args.company, "default_"+fieldname))
else:
return None
def get_default_cost_center(args, item, item_group, company=None):
cost_center = None
if args.get('project'):
cost_center = frappe.db.get_value("Project", args.get("project"), "cost_center", cache=True)
if not cost_center:
if args.get('customer'):
cost_center = item.get('selling_cost_center') or item_group.get('selling_cost_center')
else:
cost_center = item.get('buying_cost_center') or item_group.get('buying_cost_center')
cost_center = cost_center or args.get("cost_center")
if (company and cost_center
and frappe.get_cached_value("Cost Center", cost_center, "company") != company):
return None
return cost_center
def get_default_supplier(args, item, item_group):
return (item.get("default_supplier")
or item_group.get("default_supplier"))
def get_price_list_rate(args, item_doc, out):
meta = frappe.get_meta(args.parenttype or args.doctype)
if meta.get_field("currency") or args.get('currency'):
pl_details = get_price_list_currency_and_exchange_rate(args)
args.update(pl_details)
validate_price_list(args)
if meta.get_field("currency"):
validate_conversion_rate(args, meta)
price_list_rate = get_price_list_rate_for(args, item_doc.name) or 0
# variant
if not price_list_rate and item_doc.variant_of:
price_list_rate = get_price_list_rate_for(args, item_doc.variant_of)
# insert in database
if not price_list_rate:
if args.price_list and args.rate:
insert_item_price(args)
return {}
out.price_list_rate = flt(price_list_rate) * flt(args.plc_conversion_rate) \
/ flt(args.conversion_rate)
if not out.price_list_rate and args.transaction_type=="buying":
from erpnext.stock.doctype.item.item import get_last_purchase_details
out.update(get_last_purchase_details(item_doc.name,
args.name, args.conversion_rate))
def insert_item_price(args):
"""Insert Item Price if Price List and Price List Rate are specified and currency is the same"""
if frappe.db.get_value("Price List", args.price_list, "currency", cache=True) == args.currency \
and cint(frappe.db.get_single_value("Stock Settings", "auto_insert_price_list_rate_if_missing")):
if frappe.has_permission("Item Price", "write"):
price_list_rate = (args.rate / args.get('conversion_factor')
if args.get("conversion_factor") else args.rate)
item_price = frappe.db.get_value('Item Price',
{'item_code': args.item_code, 'price_list': args.price_list, 'currency': args.currency},
['name', 'price_list_rate'], as_dict=1)
if item_price and item_price.name:
if item_price.price_list_rate != price_list_rate:
frappe.db.set_value('Item Price', item_price.name, "price_list_rate", price_list_rate)
frappe.msgprint(_("Item Price updated for {0} in Price List {1}").format(args.item_code,
args.price_list), alert=True)
else:
item_price = frappe.get_doc({
"doctype": "Item Price",
"price_list": args.price_list,
"item_code": args.item_code,
"currency": args.currency,
"price_list_rate": price_list_rate
})
item_price.insert()
frappe.msgprint(_("Item Price added for {0} in Price List {1}").format(args.item_code,
args.price_list), alert=True)
def get_item_price(args, item_code, ignore_party=False):
"""
Get name, price_list_rate from Item Price based on conditions
Check if the desired qty is within the increment of the packing list.
:param args: dict (or frappe._dict) with mandatory fields price_list, uom
optional fields min_qty, transaction_date, customer, supplier
:param item_code: str, Item Doctype field item_code
"""
args['item_code'] = item_code
conditions = """where item_code=%(item_code)s
and price_list=%(price_list)s
and ifnull(uom, '') in ('', %(uom)s)"""
if not ignore_party:
if args.get("customer"):
conditions += " and customer=%(customer)s"
elif args.get("supplier"):
conditions += " and supplier=%(supplier)s"
else:
conditions += " and (customer is null or customer = '') and (supplier is null or supplier = '')"
if args.get('min_qty'):
conditions += " and ifnull(min_qty, 0) <= %(min_qty)s"
if args.get('transaction_date'):
conditions += """ and %(transaction_date)s between
ifnull(valid_from, '2000-01-01') and ifnull(valid_upto, '2500-12-31')"""
return frappe.db.sql(""" select name, price_list_rate, uom
from `tabItem Price` {conditions}
order by uom desc, min_qty desc """.format(conditions=conditions), args)
def get_price_list_rate_for(args, item_code):
"""
Return Price Rate based on min_qty of each Item Price Rate.\
For example, desired qty is 10 and Item Price Rates exists
for min_qty 9 and min_qty 20. It returns Item Price Rate for qty 9 as
the best fit in the range of avaliable min_qtyies
:param customer: link to Customer DocType
:param supplier: link to Supplier DocType
:param price_list: str (Standard Buying or Standard Selling)
:param item_code: str, Item Doctype field item_code
:param qty: Desired Qty
:param transaction_date: Date of the price
"""
item_price_args = {
"item_code": item_code,
"price_list": args.get('price_list'),
"customer": args.get('customer'),
"supplier": args.get('supplier'),
"uom": args.get('uom'),
"min_qty": args.get('qty'),
"transaction_date": args.get('transaction_date'),
}
item_price_data = 0
price_list_rate = get_item_price(item_price_args, item_code)
if price_list_rate:
desired_qty = args.get("qty")
if desired_qty and check_packing_list(price_list_rate[0][0], desired_qty, item_code):
item_price_data = price_list_rate
else:
for field in ["customer", "supplier", "min_qty"]:
del item_price_args[field]
general_price_list_rate = get_item_price(item_price_args, item_code, ignore_party=args.get("ignore_party"))
if not general_price_list_rate and args.get("uom") != args.get("stock_uom"):
item_price_args["uom"] = args.get("stock_uom")
general_price_list_rate = get_item_price(item_price_args, item_code, ignore_party=args.get("ignore_party"))
if general_price_list_rate:
item_price_data = general_price_list_rate
if item_price_data:
if item_price_data[0][2] == args.get("uom"):
return item_price_data[0][1]
elif not args.get('price_list_uom_dependant'):
return flt(item_price_data[0][1] * flt(args.get("conversion_factor", 1)))
else:
return item_price_data[0][1]
def check_packing_list(price_list_rate_name, desired_qty, item_code):
"""
Check if the desired qty is within the increment of the packing list.
:param price_list_rate_name: Name of Item Price
:param desired_qty: Desired Qt
:param item_code: str, Item Doctype field item_code
:param qty: Desired Qt
"""
flag = True
item_price = frappe.get_doc("Item Price", price_list_rate_name)
if item_price.packing_unit:
packing_increment = desired_qty % item_price.packing_unit
if packing_increment != 0:
flag = False
return flag
def validate_price_list(args):
if args.get("price_list"):
if not frappe.db.get_value("Price List",
{"name": args.price_list, args.transaction_type: 1, "enabled": 1}):
throw(_("Price List {0} is disabled or does not exist").format(args.price_list))
elif not args.get("supplier"):
throw(_("Price List not selected"))
def validate_conversion_rate(args, meta):
from erpnext.controllers.accounts_controller import validate_conversion_rate
if (not args.conversion_rate
and args.currency==frappe.get_cached_value('Company', args.company, "default_currency")):
args.conversion_rate = 1.0
# validate currency conversion rate
validate_conversion_rate(args.currency, args.conversion_rate,
meta.get_label("conversion_rate"), args.company)
args.conversion_rate = flt(args.conversion_rate,
get_field_precision(meta.get_field("conversion_rate"),
frappe._dict({"fields": args})))
if args.price_list:
if (not args.plc_conversion_rate
and args.price_list_currency==frappe.db.get_value("Price List", args.price_list, "currency", cache=True)):
args.plc_conversion_rate = 1.0
# validate price list currency conversion rate
if not args.get("price_list_currency"):
throw(_("Price List Currency not selected"))
else:
validate_conversion_rate(args.price_list_currency, args.plc_conversion_rate,
meta.get_label("plc_conversion_rate"), args.company)
if meta.get_field("plc_conversion_rate"):
args.plc_conversion_rate = flt(args.plc_conversion_rate,
get_field_precision(meta.get_field("plc_conversion_rate"),
frappe._dict({"fields": args})))
def get_party_item_code(args, item_doc, out):
if args.transaction_type=="selling" and args.customer:
out.customer_item_code = None
if args.quotation_to and args.quotation_to != 'Customer':
return
customer_item_code = item_doc.get("customer_items", {"customer_name": args.customer})
if customer_item_code:
out.customer_item_code = customer_item_code[0].ref_code
else:
customer_group = frappe.get_cached_value("Customer", args.customer, "customer_group")
customer_group_item_code = item_doc.get("customer_items", {"customer_group": customer_group})
if customer_group_item_code and not customer_group_item_code[0].customer_name:
out.customer_item_code = customer_group_item_code[0].ref_code
if args.transaction_type=="buying" and args.supplier:
item_supplier = item_doc.get("supplier_items", {"supplier": args.supplier})
out.supplier_part_no = item_supplier[0].supplier_part_no if item_supplier else None
def get_pos_profile_item_details(company, args, pos_profile=None, update_data=False):
res = frappe._dict()
if not frappe.flags.pos_profile and not pos_profile:
pos_profile = frappe.flags.pos_profile = get_pos_profile(company, args.get('pos_profile'))
if pos_profile:
for fieldname in ("income_account", "cost_center", "warehouse", "expense_account"):
if (not args.get(fieldname) or update_data) and pos_profile.get(fieldname):
res[fieldname] = pos_profile.get(fieldname)
if res.get("warehouse"):
res.actual_qty = get_bin_details(args.item_code,
res.warehouse).get("actual_qty")
return res
@frappe.whitelist()
def get_pos_profile(company, pos_profile=None, user=None):
if pos_profile: return frappe.get_cached_doc('POS Profile', pos_profile)
if not user:
user = frappe.session['user']
condition = "pfu.user = %(user)s AND pfu.default=1"
if user and company:
condition = "pfu.user = %(user)s AND pf.company = %(company)s AND pfu.default=1"
pos_profile = frappe.db.sql("""SELECT pf.*
FROM
`tabPOS Profile` pf LEFT JOIN `tabPOS Profile User` pfu
ON
pf.name = pfu.parent
WHERE
{cond} AND pf.disabled = 0
""".format(cond = condition), {
'user': user,
'company': company
}, as_dict=1)
if not pos_profile and company:
pos_profile = frappe.db.sql("""SELECT pf.*
FROM
`tabPOS Profile` pf LEFT JOIN `tabPOS Profile User` pfu
ON
pf.name = pfu.parent
WHERE
pf.company = %(company)s AND pf.disabled = 0
""", {
'company': company
}, as_dict=1)
return pos_profile and pos_profile[0] or None
def get_serial_nos_by_fifo(args, sales_order=None):
if frappe.db.get_single_value("Stock Settings", "automatically_set_serial_nos_based_on_fifo"):
return "\n".join(frappe.db.sql_list("""select name from `tabSerial No`
where item_code=%(item_code)s and warehouse=%(warehouse)s and
sales_order=IF(%(sales_order)s IS NULL, sales_order, %(sales_order)s)
order by timestamp(purchase_date, purchase_time)
asc limit %(qty)s""",
{
"item_code": args.item_code,
"warehouse": args.warehouse,
"qty": abs(cint(args.stock_qty)),
"sales_order": sales_order
}))
def get_serial_no_batchwise(args, sales_order=None):
if frappe.db.get_single_value("Stock Settings", "automatically_set_serial_nos_based_on_fifo"):
return "\n".join(frappe.db.sql_list("""select name from `tabSerial No`
where item_code=%(item_code)s and warehouse=%(warehouse)s and
sales_order=IF(%(sales_order)s IS NULL, sales_order, %(sales_order)s)
and batch_no=IF(%(batch_no)s IS NULL, batch_no, %(batch_no)s) order
by timestamp(purchase_date, purchase_time) asc limit %(qty)s""", {
"item_code": args.item_code,
"warehouse": args.warehouse,
"batch_no": args.batch_no,
"qty": abs(cint(args.stock_qty)),
"sales_order": sales_order
}))
@frappe.whitelist()
def get_conversion_factor(item_code, uom):
variant_of = frappe.db.get_value("Item", item_code, "variant_of", cache=True)
filters = {"parent": item_code, "uom": uom}
if variant_of:
filters["parent"] = ("in", (item_code, variant_of))
conversion_factor = frappe.db.get_value("UOM Conversion Detail",
filters, "conversion_factor")
if not conversion_factor:
stock_uom = frappe.db.get_value("Item", item_code, "stock_uom")
conversion_factor = get_uom_conv_factor(uom, stock_uom)
return {"conversion_factor": conversion_factor or 1.0}
@frappe.whitelist()
def get_projected_qty(item_code, warehouse):
return {"projected_qty": frappe.db.get_value("Bin",
{"item_code": item_code, "warehouse": warehouse}, "projected_qty")}
@frappe.whitelist()
def get_bin_details(item_code, warehouse):
return frappe.db.get_value("Bin", {"item_code": item_code, "warehouse": warehouse},
["projected_qty", "actual_qty", "reserved_qty"], as_dict=True, cache=True) \
or {"projected_qty": 0, "actual_qty": 0, "reserved_qty": 0}
@frappe.whitelist()
def get_serial_no_details(item_code, warehouse, stock_qty, serial_no):
args = frappe._dict({"item_code":item_code, "warehouse":warehouse, "stock_qty":stock_qty, "serial_no":serial_no})
serial_no = get_serial_no(args)
return {'serial_no': serial_no}
@frappe.whitelist()
def get_bin_details_and_serial_nos(item_code, warehouse, has_batch_no, stock_qty=None, serial_no=None):
bin_details_and_serial_nos = {}
bin_details_and_serial_nos.update(get_bin_details(item_code, warehouse))
if flt(stock_qty) > 0:
if has_batch_no:
args = frappe._dict({"item_code":item_code, "warehouse":warehouse, "stock_qty":stock_qty})
serial_no = get_serial_no(args)
bin_details_and_serial_nos.update({'serial_no': serial_no})
return bin_details_and_serial_nos
bin_details_and_serial_nos.update(get_serial_no_details(item_code, warehouse, stock_qty, serial_no))
return bin_details_and_serial_nos
@frappe.whitelist()
def get_batch_qty_and_serial_no(batch_no, stock_qty, warehouse, item_code, has_serial_no):
batch_qty_and_serial_no = {}
batch_qty_and_serial_no.update(get_batch_qty(batch_no, warehouse, item_code))
if (flt(batch_qty_and_serial_no.get('actual_batch_qty')) >= flt(stock_qty)) and has_serial_no:
args = frappe._dict({"item_code":item_code, "warehouse":warehouse, "stock_qty":stock_qty, "batch_no":batch_no})
serial_no = get_serial_no(args)
batch_qty_and_serial_no.update({'serial_no': serial_no})
return batch_qty_and_serial_no
@frappe.whitelist()
def get_batch_qty(batch_no, warehouse, item_code):
from erpnext.stock.doctype.batch import batch
if batch_no:
return {'actual_batch_qty': batch.get_batch_qty(batch_no, warehouse)}
@frappe.whitelist()
def apply_price_list(args, as_doc=False):
"""Apply pricelist on a document-like dict object and return as
{'parent': dict, 'children': list}
:param args: See below
:param as_doc: Updates value in the passed dict
args = {
"doctype": "",
"name": "",
"items": [{"doctype": "", "name": "", "item_code": "", "brand": "", "item_group": ""}, ...],
"conversion_rate": 1.0,
"selling_price_list": None,
"price_list_currency": None,
"price_list_uom_dependant": None,
"plc_conversion_rate": 1.0,
"doctype": "",
"name": "",
"supplier": None,
"transaction_date": None,
"conversion_rate": 1.0,
"buying_price_list": None,
"ignore_pricing_rule": 0/1
}
"""
args = process_args(args)
parent = get_price_list_currency_and_exchange_rate(args)
children = []
if "items" in args:
item_list = args.get("items")
args.update(parent)
for item in item_list:
args_copy = frappe._dict(args.copy())
args_copy.update(item)
item_details = apply_price_list_on_item(args_copy)
children.append(item_details)
if as_doc:
args.price_list_currency = parent.price_list_currency,
args.plc_conversion_rate = parent.plc_conversion_rate
if args.get('items'):
for i, item in enumerate(args.get('items')):
for fieldname in children[i]:
# if the field exists in the original doc
# update the value
if fieldname in item and fieldname not in ("name", "doctype"):
item[fieldname] = children[i][fieldname]
return args
else:
return {
"parent": parent,
"children": children
}
def apply_price_list_on_item(args):
item_details = frappe._dict()
item_doc = frappe.get_doc("Item", args.item_code)
get_price_list_rate(args, item_doc, item_details)
item_details.update(get_pricing_rule_for_item(args))
return item_details
def get_price_list_currency(price_list):
if price_list:
result = frappe.db.get_value("Price List", {"name": price_list,
"enabled": 1}, ["name", "currency"], as_dict=True)
if not result:
throw(_("Price List {0} is disabled or does not exist").format(price_list))
return result.currency
def get_price_list_uom_dependant(price_list):
if price_list:
result = frappe.db.get_value("Price List", {"name": price_list,
"enabled": 1}, ["name", "price_not_uom_dependent"], as_dict=True)
if not result:
throw(_("Price List {0} is disabled or does not exist").format(price_list))
return not result.price_not_uom_dependent
def get_price_list_currency_and_exchange_rate(args):
if not args.price_list:
return {}
if args.doctype in ['Quotation', 'Sales Order', 'Delivery Note', 'Sales Invoice']:
args.update({"exchange_rate": "for_selling"})
elif args.doctype in ['Purchase Order', 'Purchase Receipt', 'Purchase Invoice']:
args.update({"exchange_rate": "for_buying"})
price_list_currency = get_price_list_currency(args.price_list)
price_list_uom_dependant = get_price_list_uom_dependant(args.price_list)
plc_conversion_rate = args.plc_conversion_rate
company_currency = get_company_currency(args.company)
if (not plc_conversion_rate) or (price_list_currency and args.price_list_currency \
and price_list_currency != args.price_list_currency):
# cksgb 19/09/2016: added args.transaction_date as posting_date argument for get_exchange_rate
plc_conversion_rate = get_exchange_rate(price_list_currency, company_currency,
args.transaction_date, args.exchange_rate) or plc_conversion_rate
return frappe._dict({
"price_list_currency": price_list_currency,
"price_list_uom_dependant": price_list_uom_dependant,
"plc_conversion_rate": plc_conversion_rate
})
@frappe.whitelist()
def get_default_bom(item_code=None):
if item_code:
bom = frappe.db.get_value("BOM", {"docstatus": 1, "is_default": 1, "is_active": 1, "item": item_code})
if bom:
return bom
def get_valuation_rate(item_code, company, warehouse=None):
item = get_item_defaults(item_code, company)
item_group = get_item_group_defaults(item_code, company)
# item = frappe.get_doc("Item", item_code)
if item.get("is_stock_item"):
if not warehouse:
warehouse = item.get("default_warehouse") or item_group.get("default_warehouse")
return frappe.db.get_value("Bin", {"item_code": item_code, "warehouse": warehouse},
["valuation_rate"], as_dict=True) or {"valuation_rate": 0}
elif not item.get("is_stock_item"):
valuation_rate =frappe.db.sql("""select sum(base_net_amount) / sum(qty*conversion_factor)
from `tabPurchase Invoice Item`
where item_code = %s and docstatus=1""", item_code)
if valuation_rate:
return {"valuation_rate": valuation_rate[0][0] or 0.0}
else:
return {"valuation_rate": 0.0}
def get_gross_profit(out):
if out.valuation_rate:
out.update({
"gross_profit": ((out.base_rate - out.valuation_rate) * out.stock_qty)
})
return out
@frappe.whitelist()
def get_serial_no(args, serial_nos=None, sales_order=None):
serial_no = None
if isinstance(args, string_types):
args = json.loads(args)
args = frappe._dict(args)
if args.get('doctype') == 'Sales Invoice' and not args.get('update_stock'):
return ""
if args.get('warehouse') and args.get('stock_qty') and args.get('item_code'):
has_serial_no = frappe.get_value('Item', {'item_code': args.item_code}, "has_serial_no")
if args.get('batch_no') and has_serial_no == 1:
return get_serial_no_batchwise(args, sales_order)
elif has_serial_no == 1:
args = json.dumps({"item_code": args.get('item_code'),"warehouse": args.get('warehouse'),"stock_qty": args.get('stock_qty')})
args = process_args(args)
serial_no = get_serial_nos_by_fifo(args, sales_order)
if not serial_no and serial_nos:
# For POS
serial_no = serial_nos
return serial_no
def update_party_blanket_order(args, out):
blanket_order_details = get_blanket_order_details(args)
if blanket_order_details:
out.update(blanket_order_details)
@frappe.whitelist()
def get_blanket_order_details(args):
if isinstance(args, string_types):
args = frappe._dict(json.loads(args))
blanket_order_details = None
condition = ''
if args.item_code:
if args.customer and args.doctype == "Sales Order":
condition = ' and bo.customer=%(customer)s'
elif args.supplier and args.doctype == "Purchase Order":
condition = ' and bo.supplier=%(supplier)s'
if args.blanket_order:
condition += ' and bo.name =%(blanket_order)s'
if args.transaction_date:
condition += ' and bo.to_date>=%(transaction_date)s'
blanket_order_details = frappe.db.sql('''
select boi.rate as blanket_order_rate, bo.name as blanket_order
from `tabBlanket Order` bo, `tabBlanket Order Item` boi
where bo.company=%(company)s and boi.item_code=%(item_code)s
and bo.docstatus=1 and bo.name = boi.parent {0}
'''.format(condition), args, as_dict=True)
blanket_order_details = blanket_order_details[0] if blanket_order_details else ''
return blanket_order_details
def get_so_reservation_for_item(args):
reserved_so = None
if args.get('against_sales_order'):
if get_reserved_qty_for_so(args.get('against_sales_order'), args.get('item_code')):
reserved_so = args.get('against_sales_order')
elif args.get('against_sales_invoice'):
sales_order = frappe.db.sql("""select sales_order from `tabSales Invoice Item` where
parent=%s and item_code=%s""", (args.get('against_sales_invoice'), args.get('item_code')))
if sales_order and sales_order[0]:
if get_reserved_qty_for_so(sales_order[0][0], args.get('item_code')):
reserved_so = sales_order[0]
elif args.get("sales_order"):
if get_reserved_qty_for_so(args.get('sales_order'), args.get('item_code')):
reserved_so = args.get('sales_order')
return reserved_so
def get_reserved_qty_for_so(sales_order, item_code):
reserved_qty = frappe.db.sql("""select sum(qty) from `tabSales Order Item`
where parent=%s and item_code=%s and ensure_delivery_based_on_produced_serial_no=1
""", (sales_order, item_code))
if reserved_qty and reserved_qty[0][0]:
return reserved_qty[0][0]
else:
return 0
|
shubhamgupta123/erpnext
|
erpnext/stock/get_item_details.py
|
Python
|
gpl-3.0
| 35,632
|
# A simple timer for executing gcode templates
#
# Copyright (C) 2019 Eric Callahan <arksine.code@gmail.com>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import logging
class DelayedGcode:
def __init__(self, config):
self.printer = config.get_printer()
self.reactor = self.printer.get_reactor()
self.name = config.get_name().split()[1]
self.gcode = self.printer.lookup_object('gcode')
gcode_macro = self.printer.load_object(config, 'gcode_macro')
self.timer_gcode = gcode_macro.load_template(config, 'gcode')
self.duration = config.getfloat('initial_duration', 0., minval=0.)
self.timer_handler = None
self.inside_timer = self.repeat = False
self.printer.register_event_handler("klippy:ready", self._handle_ready)
self.gcode.register_mux_command(
"UPDATE_DELAYED_GCODE", "ID", self.name,
self.cmd_UPDATE_DELAYED_GCODE,
desc=self.cmd_UPDATE_DELAYED_GCODE_help)
def _handle_ready(self):
waketime = self.reactor.NEVER
if self.duration:
waketime = self.reactor.monotonic() + self.duration
self.timer_handler = self.reactor.register_timer(
self._gcode_timer_event, waketime)
def _gcode_timer_event(self, eventtime):
self.inside_timer = True
try:
self.gcode.run_script(self.timer_gcode.render())
except Exception:
logging.exception("Script running error")
nextwake = self.reactor.NEVER
if self.repeat:
nextwake = eventtime + self.duration
self.inside_timer = self.repeat = False
return nextwake
cmd_UPDATE_DELAYED_GCODE_help = "Update the duration of a delayed_gcode"
def cmd_UPDATE_DELAYED_GCODE(self, gcmd):
self.duration = gcmd.get_float('DURATION', minval=0.)
if self.inside_timer:
self.repeat = (self.duration != 0.)
else:
waketime = self.reactor.NEVER
if self.duration:
waketime = self.reactor.monotonic() + self.duration
self.reactor.update_timer(self.timer_handler, waketime)
def load_config_prefix(config):
return DelayedGcode(config)
|
KevinOConnor/klipper
|
klippy/extras/delayed_gcode.py
|
Python
|
gpl-3.0
| 2,248
|
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import os
import math
import logging
import numpy as np
import scipy as sp
from hyperspy.defaults_parser import preferences
from hyperspy.misc.physical_constants import R, a0
from hyperspy.misc.eels.base_gos import GOSBase
from hyperspy.misc.elements import elements
from hyperspy.misc.export_dictionary import (
export_to_dictionary, load_from_dictionary)
_logger = logging.getLogger(__name__)
class HartreeSlaterGOS(GOSBase):
"""Read Hartree-Slater Generalized Oscillator Strenght parametrized
from files.
Parameters
----------
element_subshell : {str, dict}
Usually a string, for example, 'Ti_L3' for the GOS of the titanium L3
subshell. If a dictionary is passed, it is assumed that Hartree Slater
GOS was exported using `GOS.as_dictionary`, and will be reconstructed.
Methods
-------
readgosfile()
Read the GOS files of the element subshell from the location
defined in Preferences.
get_qaxis_and_gos(ienergy, qmin, qmax)
given the energy axis index and qmin and qmax values returns
the qaxis and gos between qmin and qmax using linear
interpolation to include qmin and qmax in the range.
as_dictionary()
Export the GOS as a dictionary that can be saved.
Attributes
----------
energy_axis : array
The tabulated energy axis
qaxis : array
The tabulated qaxis
energy_onset: float
The energy onset for the given element subshell as obtained
from iternal tables.
"""
_name = 'Hartree-Slater'
def __init__(self, element_subshell):
"""
Parameters
----------
element_subshell : str
For example, 'Ti_L3' for the GOS of the titanium L3 subshell
"""
self._whitelist = {'gos_array': None,
'rel_energy_axis': None,
'qaxis': None,
'element': None,
'subshell': None
}
if isinstance(element_subshell, dict):
self.element = element_subshell['element']
self.subshell = element_subshell['subshell']
self.read_elements()
self._load_dictionary(element_subshell)
else:
# Check if the Peter Rez's Hartree Slater GOS distributed by
# Gatan are available. Otherwise exit
if not os.path.isdir(preferences.EELS.eels_gos_files_path):
raise IOError(
"The parametrized Hartree-Slater GOS files could not "
"found in %s ." % preferences.EELS.eels_gos_files_path +
"Please define a valid location for the files "
"in the preferences.")
self.element, self.subshell = element_subshell.split('_')
self.read_elements()
self.readgosfile()
def _load_dictionary(self, dictionary):
load_from_dictionary(self, dictionary)
self.energy_axis = self.rel_energy_axis + self.onset_energy
def as_dictionary(self, fullcopy=True):
"""Export the GOS as a dictionary
"""
dic = {}
export_to_dictionary(self, self._whitelist, dic, fullcopy)
return dic
def readgosfile(self):
info_str = (
"Hartree-Slater GOS\n" +
("\tElement: %s " % self.element) +
("\tSubshell: %s " % self.subshell) +
("\tOnset Energy = %s " % self.onset_energy))
_logger.info(info_str)
element = self.element
subshell = self.subshell
filename = os.path.join(
preferences.EELS.eels_gos_files_path,
(elements[element]['Atomic_properties']['Binding_energies']
[subshell]['filename']))
with open(filename) as f:
GOS_list = f.read().replace('\r', '').split()
# Map the parameters
info1_1 = float(GOS_list[2])
info1_2 = float(GOS_list[3])
ncol = int(GOS_list[5])
info2_1 = float(GOS_list[6])
info2_2 = float(GOS_list[7])
nrow = int(GOS_list[8])
self.gos_array = np.array(GOS_list[9:], dtype=np.float64)
# The division by R is not in the equations, but it seems that
# the the GOS was tabulated this way
self.gos_array = self.gos_array.reshape(nrow, ncol) / R
del GOS_list
# Calculate the scale of the matrix
self.rel_energy_axis = self.get_parametrized_energy_axis(
info2_1, info2_2, nrow)
self.qaxis = self.get_parametrized_qaxis(
info1_1, info1_2, ncol)
self.energy_axis = self.rel_energy_axis + self.onset_energy
def integrateq(self, onset_energy, angle, E0):
energy_shift = onset_energy - self.onset_energy
self.energy_shift = energy_shift
qint = np.zeros((self.energy_axis.shape[0]))
# Calculate the cross section at each energy position of the
# tabulated GOS
gamma = 1 + E0 / 511.06
T = 511060 * (1 - 1 / gamma ** 2) / 2
for i in range(0, self.gos_array.shape[0]):
E = self.energy_axis[i] + energy_shift
# Calculate the limits of the q integral
qa0sqmin = (E ** 2) / (4 * R * T) + (E ** 3) / (
8 * gamma ** 3 * R * T ** 2)
p02 = T / (R * (1 - 2 * T / 511060))
pp2 = p02 - E / R * (gamma - E / 1022120)
qa0sqmax = qa0sqmin + 4 * np.sqrt(p02 * pp2) * \
(math.sin(angle / 2)) ** 2
qmin = math.sqrt(qa0sqmin) / a0
qmax = math.sqrt(qa0sqmax) / a0
# Perform the integration in a log grid
qaxis, gos = self.get_qaxis_and_gos(i, qmin, qmax)
logsqa0qaxis = np.log((a0 * qaxis) ** 2)
qint[i] = sp.integrate.simps(gos, logsqa0qaxis)
E = self.energy_axis + energy_shift
# Energy differential cross section in (barn/eV/atom)
qint *= (4.0 * np.pi * a0 ** 2.0 * R ** 2 / E / T *
self.subshell_factor) * 1e28
self.qint = qint
return sp.interpolate.interp1d(E, qint, kind=3)
|
dnjohnstone/hyperspy
|
hyperspy/misc/eels/hartree_slater_gos.py
|
Python
|
gpl-3.0
| 6,912
|
import facedetect
import cv2
def test_fd():
image = cv2.imread('abba.jpg')
print image.shape
FD = facedetect.FeatureDetect(image)
FD.detectEyes()
FD.detectFace()
print FD.features
if __name__ == '__main__':
test_fd()
|
emofeedback/facedetection
|
tests/test_fd.py
|
Python
|
gpl-3.0
| 247
|
# coding=utf-8
import unittest
"""754. Reach a Number
https://leetcode.com/problems/reach-a-number/description/
You are standing at position `0` on an infinite number line. There is a goal
at position `target`.
On each move, you can either go left or right. During the _n_ -th move
(starting from 1), you take _n_ steps.
Return the minimum number of steps required to reach the destination.
**Example 1:**
**Input:** target = 3
**Output:** 2
**Explanation:**
On the first move we step from 0 to 1.
On the second step we step from 1 to 3.
**Example 2:**
**Input:** target = 2
**Output:** 3
**Explanation:**
On the first move we step from 0 to 1.
On the second move we step from 1 to -1.
On the third move we step from -1 to 2.
**Note:**
* `target` will be a non-zero integer in the range `[-10^9, 10^9]`.
Similar Questions:
"""
class Solution(object):
def reachNumber(self, target):
"""
:type target: int
:rtype: int
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
|
openqt/algorithms
|
leetcode/python/lc754-reach-a-number.py
|
Python
|
gpl-3.0
| 1,163
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2016 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
from horton.meanfield.test.common import helper_compute
def test_project_msg_identical():
mol = IOData.from_file(context.get_fn('test/water_sto3g_hf_g03.fchk'))
exp = mol.lf.create_expansion()
project_orbitals_mgs(mol.obasis, mol.obasis, mol.exp_alpha, exp)
assert (exp.energies == 0.0).all()
assert (exp.occupations == mol.exp_alpha.occupations).all()
assert abs(exp.coeffs[:,:-2] - mol.exp_alpha.coeffs[:,:-2]).max() < 1e-9
assert (exp.coeffs[:,-2:] == 0.0).all()
def test_project_ortho_basis_identical():
mol = IOData.from_file(context.get_fn('test/water_sto3g_hf_g03.fchk'))
exp = mol.lf.create_expansion()
project_orbitals_ortho(mol.obasis, mol.obasis, mol.exp_alpha, exp)
assert (exp.energies == 0.0).all()
assert (exp.occupations == mol.exp_alpha.occupations).all()
assert abs(exp.coeffs - mol.exp_alpha.coeffs).max() < 1e-9
def test_project_ortho_olp_identical():
mol = IOData.from_file(context.get_fn('test/water_sto3g_hf_g03.fchk'))
olp = mol.lf.create_two_index()
for i in xrange(olp.nbasis):
olp.set_element(i, i, 1.0)
exp = mol.lf.create_expansion()
project_orbitals_ortho(mol.obasis, mol.obasis, mol.exp_alpha, exp)
assert (exp.energies == 0.0).all()
assert (exp.occupations == mol.exp_alpha.occupations).all()
assert abs(exp.coeffs - mol.exp_alpha.coeffs).max() < 1e-9
def test_project_msg_larger():
# Load STO3G system and keep essential results
mol = IOData.from_file(context.get_fn('test/water_sto3g_hf_g03.fchk'))
obasis0 = mol.obasis
exp0 = mol.exp_alpha
# Upgrade the basis to 3-21G and project
obasis1 = get_gobasis(mol.coordinates, mol.numbers, '3-21G')
lf1 = DenseLinalgFactory(obasis1.nbasis)
exp1 = lf1.create_expansion()
project_orbitals_mgs(obasis0, obasis1, exp0, exp1)
assert (exp1.energies == 0.0).all()
assert exp0.occupations.sum() == exp1.occupations.sum()
assert (exp1.coeffs[:,5:] == 0.0).all()
# Check the normalization of the projected orbitals
olp = obasis1.compute_overlap(lf1)
exp1.check_orthonormality(olp)
# Setup HF hamiltonian and compute energy
kin = obasis1.compute_kinetic(lf1)
na = obasis1.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, lf1)
er = obasis1.compute_electron_repulsion(lf1)
terms = [
RTwoIndexTerm(kin, 'kin'),
RDirectTerm(er, 'hartree'),
RExchangeTerm(er, 'x_hf'),
RTwoIndexTerm(na, 'ne'),
]
ham = REffHam(terms)
# Compute energy after projection
energy1 = helper_compute(ham, lf1, exp1)[0]
# Optimize wfn
scf_solver = PlainSCFSolver(1e-6)
occ_model = AufbauOccModel(5)
scf_solver(ham, lf1, olp, occ_model, exp1)
energy2 = ham.cache['energy']
assert energy2 < energy1 # the energy should decrease after scf convergence
# Construct a core initial guess
guess_core_hamiltonian(olp, kin, na, exp1)
energy3 = helper_compute(ham, lf1, exp1)[0]
assert energy3 > energy1 # the projected guess should be better than the core guess
def test_project_msg_smaller():
# Load 3-21G system and keep essential results
mol = IOData.from_file(context.get_fn('test/li_h_3-21G_hf_g09.fchk'))
obasis0 = mol.obasis
exp0_alpha = mol.exp_alpha
exp0_beta = mol.exp_beta
# Downgrade the basis to sto-3g and project
obasis1 = get_gobasis(mol.coordinates, mol.numbers, 'sto-3g')
lf1 = DenseLinalgFactory(obasis1.nbasis)
exp1_alpha = lf1.create_expansion()
exp1_beta = lf1.create_expansion()
project_orbitals_mgs(obasis0, obasis1, exp0_alpha, exp1_alpha)
project_orbitals_mgs(obasis0, obasis1, exp0_beta, exp1_beta)
assert (exp1_alpha.energies == 0.0).all()
assert (exp1_beta.energies == 0.0).all()
assert exp1_alpha.occupations.sum() == 2
assert exp1_beta.occupations.sum() == 1
assert (exp1_alpha.coeffs[:,2:] == 0.0).all()
assert (exp1_beta.coeffs[:,1:] == 0.0).all()
# Check the normalization of the projected orbitals
olp = obasis1.compute_overlap(lf1)
exp1_alpha.check_orthonormality(olp)
exp1_beta.check_orthonormality(olp)
# Setup HF hamiltonian and compute energy
kin = obasis1.compute_kinetic(lf1)
na = obasis1.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, lf1)
er = obasis1.compute_electron_repulsion(lf1)
terms = [
UTwoIndexTerm(kin, 'kin'),
UDirectTerm(er, 'hartree'),
UExchangeTerm(er, 'x_hf'),
UTwoIndexTerm(na, 'ne'),
]
ham = UEffHam(terms)
# Compute energy before SCF
energy1 = helper_compute(ham, lf1, exp1_alpha, exp1_beta)[0]
scf_solver = PlainSCFSolver(1e-6)
occ_model = AufbauOccModel(2, 1)
scf_solver(ham, lf1, olp, occ_model, exp1_alpha, exp1_beta)
energy2 = ham.cache['energy']
assert energy2 < energy1 # the energy should decrease after scf convergence
def get_basis_pair_geometry():
'''Prepare two basis sets that only differ in geometry'''
# Create initial system
mol = IOData.from_file(context.get_fn('test/water.xyz'))
obasis0 = get_gobasis(mol.coordinates, mol.numbers, 'sto-3g')
lf = DenseLinalgFactory(obasis0.nbasis)
exp0 = lf.create_expansion()
# Occupy all orbitals such that orthogonality is well tested
exp0.occupations[:] = 1.0
# core-hamiltonian guess
olp = obasis0.compute_overlap(lf)
kin = obasis0.compute_kinetic(lf)
na = obasis0.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, lf)
er = obasis0.compute_electron_repulsion(lf)
guess_core_hamiltonian(olp, kin, na, exp0)
# Internal consistency check
exp0.check_orthonormality(obasis0.compute_overlap(lf))
# Change geometry
mol.coordinates[1,2] += 0.5
mol.coordinates[0,1] -= 1.5
obasis1 = get_gobasis(mol.coordinates, mol.numbers, 'sto-3g')
exp1 = lf.create_expansion()
return obasis0, obasis1, exp0, exp1, lf
def test_project_msg_geometry():
obasis0, obasis1, exp0, exp1, lf = get_basis_pair_geometry()
# Project from one to other:
project_orbitals_mgs(obasis0, obasis1, exp0, exp1)
# Basic checks
assert (exp1.energies == 0.0).all()
assert (exp1.occupations == exp0.occupations).all()
assert abs(exp1.coeffs[:,:5] - exp0.coeffs[:,:5]).max() > 1e-3 # something should change
# Check orthonormality
exp1.check_orthonormality(obasis1.compute_overlap(lf))
def test_project_ortho_basis_geometry():
obasis0, obasis1, exp0, exp1, lf = get_basis_pair_geometry()
# Project from one to other:
project_orbitals_ortho(obasis0, obasis1, exp0, exp1)
# Basic checks
assert (exp1.energies == 0.0).all()
assert (exp1.occupations == exp0.occupations).all()
assert abs(exp1.coeffs[:,:5] - exp0.coeffs[:,:5]).max() > 1e-3 # something should change
# Check orthonormality
exp1.check_orthonormality(obasis1.compute_overlap(lf))
def test_project_ortho_olp_geometry():
obasis0, obasis1, exp0, exp1, lf = get_basis_pair_geometry()
# Project from one to other:
olp0 = obasis0.compute_overlap(lf)
olp1 = obasis1.compute_overlap(lf)
project_orbitals_ortho(olp0, olp1, exp0, exp1)
# Basic checks
assert (exp1.energies == 0.0).all()
assert (exp1.occupations == exp0.occupations).all()
assert abs(exp1.coeffs[:,:5] - exp0.coeffs[:,:5]).max() > 1e-3 # something should change
# Check orthonormality
exp1.check_orthonormality(obasis1.compute_overlap(lf))
|
crisely09/horton
|
horton/meanfield/test/test_project.py
|
Python
|
gpl-3.0
| 8,371
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from Tkinter import *
from ttk import *
from ScrolledText import ScrolledText as Text
from PIL import Image, ImageTk
import tkFileDialog,os,cairo,tempfile,time,shutil,tkFont
from laveqed import laveqed
from rsvg_windows import rsvg_windows
try:
import rsvg
except ImportError:
rsvg=rsvg_windows() # Untested
TITLE = 'laveqed GUI'
APP_WIN_WIDTH = 800
APP_WIN_HEIGHT = 400
FONTNAME='Ubuntu Mono'
LOGOFILENAME='laveqed_logo.svg'
CONFIGFILE='laveqed_config.xml'
class laveqed_gui(object):
def __init__(self, title):
print('Starting \t\t:\tWelcome in laveqed\'s GUI!')
os.environ['XMODIFIERS'] = "@im=none" # Fix for non-working ^ after a while
self.win=Tk()
self.win.title(title)
self.center(self.win)
self.win.protocol("WM_DELETE_WINDOW", self.close)
self.previewSize=(713,45)
self._topLevelOpened = False
self.displayScale = 1
self.buildGUI()
self._set_vars() # Sets variables for use by laveqed, also creates temp folder and cd into it
self._makelogo() # Either loads pre-calculated logo or generates it.
self.text_widget.focus() # So we can type right away!
def _buildFrames(self):
self.main_frame=Frame(self.win)
self.main_frame.pack(fill='both',expand=True)
# So the picture isn't too much to the top
self.space_frame=Label(self.main_frame)
self.space_frame.pack(side='top',fill='both', expand=False, padx=4, pady=10)
self.top_frame=LabelFrame(self.main_frame,relief=FLAT)
self.top_frame.pack(side='top',fill='both', expand=True, padx=4, pady=4)
self.png_frame=Label(self.top_frame,anchor='center')
self.png_frame.pack(fill='both', expand=True, padx=4, pady=4)
self.bottom_frame=LabelFrame(self.main_frame,relief=FLAT)
self.bottom_frame.pack(side='bottom', fill=X, expand=False, padx=4, pady=4)
self.text_frame=LabelFrame(self.bottom_frame,relief=FLAT)
self.text_frame.pack(side='left', fill=X, expand=True, padx=4, pady=4)
def _tag_configure(self, text):
# Color tags for syntax highlight
text.tag_configure('red',foreground='red')
text.tag_configure('green',foreground='green')
text.tag_configure('purple',foreground='purple')
text.tag_configure('blue',foreground='blue')
# Bold baby!
text.tag_configure('bold',font=self.bold_font)
def _buildWidgets(self):
self.text_widget=Text(self.text_frame,bd=2,padx=4,pady=4,\
wrap=WORD,font=(FONTNAME,14),undo=True)
self.text_widget.pack(fill='both',expand=True,padx=4,pady=4)
self.bold_font = tkFont.Font(self.text_widget, self.text_widget.cget("font"))
self.bold_font.configure(weight="bold")
self._tag_configure(self.text_widget)
# self.text_widget=Text(self.text_frame,bd=2,padx=4,pady=4,\
# wrap=WORD,font=(FONTNAME,14),undo=True)
# self.text_widget.pack(fill='both',expand=True,padx=4,pady=4)
# # Color tags for syntax highlight
# self.text_widget.tag_configure('red',foreground='red')
# self.text_widget.tag_configure('green',foreground='green')
# self.text_widget.tag_configure('purple',foreground='purple')
# self.text_widget.tag_configure('blue',foreground='blue')
# # Bold baby!
# #self.orig_font = tkFont.Font(self.text_widget, self.text_widget.cget("font"))
# self.bold_font = tkFont.Font(self.text_widget, self.text_widget.cget("font"))
# self.bold_font.configure(weight="bold")
# self.text_widget.tag_configure('bold',font=self.bold_font)
# #self.text_widget.tag_configure('plain',font=self.orig_font,foreground='black',background='white')
def _buildMenus(self):
self.menubar=Menu(self.win)
# File menu
filemenu=Menu(self.menubar,tearoff=0)
filemenu.add_command(label="Open", command=self.open_svg,accelerator='Ctrl+O')
filemenu.add_command(label="Save as...", command=self.save_svg,accelerator='Ctrl+S')
filemenu.add_separator()
filemenu.add_command(label="Exit", command=self.close,accelerator='Ctrl+Q')
self.menubar.add_cascade(label="File", menu=filemenu)
# laveqed menu
laveqedmenu=Menu(self.menubar,tearoff=0)
laveqedmenu.add_command(label='Run',command=self.build_svg,accelerator='Ctrl-Enter')
laveqedmenu.add_command(label='Preferences',command=self.preferences,accelerator='Ctrl-P')
self.menubar.add_cascade(label="laveqed", menu=laveqedmenu)
self.win.config(menu=self.menubar)
def _set_vars(self):
if os.path.isfile(CONFIGFILE):
pass # Parse the xml and set vars accordingly
else: # No config file? -> Get defaults laveqed -ambles and scale
tmp=laveqed()
self.preamble=tmp.preamble
self.postamble=tmp.postamble
self.scale=tmp.scale
self.eqonly=False # Loads -ambles by default if eqonly == False
# Creating a temporary folder to work inside of
self.owd=os.getcwd() # Original Working Directory, for friendly fileOpenDialog
self.cwd=tempfile.mkdtemp()
print('Making temp folder\t:\t'+self.cwd)
os.chdir(self.cwd)
try :
shutil.copy2(self.owd+'/'+LOGOFILENAME,self.cwd+'/'+LOGOFILENAME)
except:
pass
def _binding(self):
# Allows select all in Text Widget
self.win.bind_class("Text","<Control-a>", self.selectall)
# Main window binds
self.win.bind('<Control-Return>',self.build_svg_fixCtrlReturn)
self.win.bind('<Control-s>',self.save_svg)
self.win.bind('<Control-o>',self.open_svg_fixCtrlO)
self.win.bind('<Control-p>', self.preferences)
self.win.bind('<Control-q>',self.close)
# Text widget binds
self.text_widget.bind('<Control-h>',self.hat)
self.text_widget.bind('<KeyRelease>',self.set_syntax)
# SVG binds
self.win.bind('<Control-plus>', self.ZoomInSVG)
self.win.bind('<Control-minus>', self.ZoomOutSVG)
self.win.bind('<Control-0>', self.ZoomResetSVG)
def _makelogo(self):
self.name=LOGOFILENAME[:-4]
if not os.path.isfile(LOGOFILENAME):
equation=r'\text{L\hspace{-3.5pt}\raisebox{2pt}{\scriptsize A}\!}{\color{gray!68}\text{\TeX}}\text{ V{\color{gray!80}ectorial} Eq{\color{gray!80}uation} Ed{\color{gray!80}itor}}'
self.text_widget.insert('1.0',equation)
self.build_svg()
self.text_widget.delete('1.0',END)
self.load_svg()
def buildGUI(self):
# Style
#self.win.style=Style()
#self.win.style.theme_use('clam')
#Order matters for some elements; e.g. better build frames before widgets
self._buildFrames()
self._buildWidgets()
self._buildMenus()
self._binding()
def center(self, win, x=APP_WIN_WIDTH, y=APP_WIN_HEIGHT):
win.update_idletasks()
width = x
height = y
x = (win.winfo_screenwidth() // 2) - (width // 2)
y = (win.winfo_screenheight() // 2) - (height // 2)
win.geometry('{}x{}+{}+{}'.format(width, height, x, y))
def load_svg(self,event=None):
filename=self.name+'.svg'
if os.path.isfile(filename):
#self.tk_image=self.svgPhotoImage(filename, scale=self.displayScale)
self.openDisplaySVG(filename)
print('Loading svg file\t:\t'+filename+' (Success!)')
# If opening failed, put a blank image the same size as SVGLOGOFILE
else: # Note, this should never occurs now... Left here because
print('Loading svg file\t:\t'+filename+' (Failed!)')
self.tk_image = ImageTk.PhotoImage('RGBA')
self.image=Image.new('RGB',self.previewSize,self.win['background'])
self.tk_image.paste(self.image)
self.png_frame.config(image=self.tk_image)
def close(self,var=None):
print('Removing temp folder\t:\t'+self.cwd)
shutil.rmtree(self.cwd) # Removing the temporary folder we worked in
print("Exiting\t\t\t:\tCiao!")
self.win.destroy()
def selectall(self, event):
event.widget.tag_add("sel","1.0","end")
def hat(self,event):
event.widget.insert(INSERT,'^^')
def fixCtrlReturn(self,event=None):
self.text_widget.delete('%s-1c'%INSERT, INSERT)
def fixCtrlO(self,event=None):
self.text_widget.delete(INSERT, '%s+1c'%INSERT)
def run(self):
self.win.mainloop()
def build_preferences(self, pref, event=None):
pre_label = Label(pref, text='Preamble:')
post_label = Label(pref, text='Postamble:')
pre_text = Text(pref, bd=2,padx=4, pady=4, wrap=WORD,\
font=(FONTNAME,12), undo=True)
post_text = Text(pref, bd=2,padx=4, pady=4, wrap=WORD,\
font=(FONTNAME,12), undo=True)
scale_label = Label(pref, text='Scale:')
scale_entry = Entry(pref, width = 2)
save_button = Button(pref, text='OK')
padval=10
pre_label.grid(row=0, column=0, sticky='w', padx=padval,\
pady=(padval,0))
pre_text.grid(row=2, column=0, sticky='nwse', padx=padval,\
pady=(0,padval))
post_label.grid(row=3, column=0, sticky='w', padx=padval,\
pady=(padval,0))
post_text.grid(row=4, column=0, sticky='nwse', padx=padval,\
pady=(0,padval))
scale_label.grid(row=5, column=0, sticky='w', padx=padval,\
pady=(0,padval))
scale_entry.grid(row=5, column=0, sticky='w', padx=50,\
pady=(0,padval))
save_button.grid(row=5, column=0, sticky='e', padx=padval,\
pady=(0,padval))
pref.columnconfigure(0, weight=1)
pref.rowconfigure(2, weight=4)
pref.rowconfigure(4, weight=5)
pre_text.insert('1.0', self.preamble)
post_text.insert('1.0', self.postamble)
scale_entry.insert(0, self.scale)
self._tag_configure(pre_text)
self._tag_configure(post_text)
def set_syntax_pref(event=None):
self._set_syntax(pre_text)
self._set_syntax(post_text)
pre_text.bind('<KeyRelease>', set_syntax_pref)
post_text.bind('<KeyRelease>', set_syntax_pref)
set_syntax_pref()
pre_text.focus()
def save_pref(event=None):
self.preamble = os.linesep.join([s for s \
in pre_text.get('1.0', END).splitlines() if s.strip()])
self.postamble = os.linesep.join([s for s \
in post_text.get('1.0', END).splitlines() if s.strip()])
self.scale = scale_entry.get()
print('Editing Preferences\t:\tSaving Preferences')
pref._destroy()
save_button.bind('<ButtonRelease-1>', save_pref)
save_button.bind('<Return>', save_pref)
pref.bind('<Control-s>', save_pref)
pref.bind('<Control-q>', pref._destroy)
def preferences(self,event=None):
if self._topLevelOpened:
return
else:
print('Editing Preferences\t:\tOpening Dialog')
self._topLevelOpened = True
def _destroy(self_, event=None):
print('Editing Preferences\t:\tClosing Dialog')
self_.destroy()
self._topLevelOpened = False
Toplevel._destroy = _destroy
# Create the window
pref = Toplevel(self.win)
pref.title('Preferences')
self.center(pref, x=APP_WIN_WIDTH/2, y=APP_WIN_HEIGHT)
# Create the Widgets
self.build_preferences(pref, event)
# Closes with a message
pref.protocol("WM_DELETE_WINDOW", pref._destroy)
# Pref dialog always on top and to focus
pref.grab_set()
pref.wm_attributes("-topmost", 1)
def build_svg_fixCtrlReturn(self,event=None):
# Fixes accidental <Return> catched by text_widget when <C-Return> is pressed
self.fixCtrlReturn()
self.build_svg()
def build_svg(self,event=None):
self.name=time.strftime('%Y-%m-%d_%H-%M-%S') # Temp filename is time in seconds since epoch.
# Strips first char in case it's a '-' which latex understands as an argument
print('Building svg\t\t:\t'+self.name+'.svg')
equation=os.linesep.join([s for s \
in self.text_widget.get('1.0',END).splitlines() if s.strip()])
# Removes empty lines so latex doesn't freak out
tmp=laveqed(equation,name=self.name,scale=self.scale,cleanAfter=False,eqonly=self.eqonly)
tmp.preamble=self.preamble
tmp.postamble=self.postamble
print('\n\t======= LaTeX output begins =======')
try:
tmp.makesvg()
print('\t======= LaTeX output ends =======\n')
self.load_svg()
except:
print('\t======= LaTeX output ends =======\n')
print('Error building svg file\t:\tCheck LaTeX Syntax')
def save_svg(self,event=None):
print('Saving svg\t\t:\tOpening Dialog')
filename=tkFileDialog.asksaveasfilename(filetypes=[('laveqed SVG file','.svg')],\
initialdir=self.owd,defaultextension='.svg',initialfile=self.name+'.svg')
try:
shutil.copy2(self.name+'.svg',filename)
print('Saving svg\t\t:\tFile saved as '+filename)
except:
print('Saving svg\t\t:\tOperation cancelled')
def open_svg_fixCtrlO(self,event=None):
# Fixes accidental linebreak at INSERT+1 catched by text_widget when <C-o> is pressed
self.fixCtrlO()
self.open_svg()
def open_svg(self,event=None):
print('Opening svg\t\t:\tOpening Dialog')
self.name=tkFileDialog.askopenfilename(filetypes=[('laveqed SVG file','.svg')],\
initialdir=self.owd,defaultextension='.svg').replace('.svg','')
if not self.name:
print('Opening svg\t\t:\tOperation cancelled')
else:
print('Opening svg\t\t:\t'+self.name+'.svg')
tmp=laveqed(eqonly=self.eqonly)
tmp.loadsvg(self.name)
if self.eqonly:
print('Loading laveqed equation\t:\tequation')
self.equation=tmp.equation
else:
print('Loading laveqed vars\t:\t-ambles, equation and scale')
self.preamble,equation,self.postamble,self.scale\
=tmp.preamble,tmp.equation,tmp.postamble,tmp.scale
self.load_svg()
self.text_widget.delete('1.0',END)
self.text_widget.insert('1.0',equation)
self.set_syntax()
def highlight_pattern(self, event, pattern, tag, start="1.0", end="end", regexp=False):
# Adapted from http://bit.ly/UMdj9q
'''Apply the given tag to all text that matches the given pattern
If 'regexp' is set to True, pattern will be treated as a regular expression
'''
start = event.index(start)
end = event.index(end)
event.mark_set("matchStart",start)
event.mark_set("matchEnd",start)
event.mark_set("searchLimit", end)
count = IntVar()
while True:
index = event.search(pattern, "matchEnd","searchLimit",
count=count, regexp=regexp)
if index == "": break
event.mark_set("matchStart", index)
event.mark_set("matchEnd", "%s+%sc" % (index,count.get()))
event.tag_add(tag, "matchStart","matchEnd")
def _set_syntax(self, text):
# \\ ^ & and numbers including reals and size in "pt" -> red
# {}[] -> purple
# \alpha or (\!\#\&\$\,\;\:) -> green
# % until EOL is comment (blue)
# Alignment '&' is bold
# First clearing all tags. e.g. avoids '{}<left>A' to color '{A}' all in purple
for i in ['red','purple','green','blue','bold']:
text.tag_remove(i,'1.0','end')
# Parsing the text and setting tags
self.highlight_pattern(text,r'\\\\|\^|([-\.]?|^)[0-9]\.?(pt| pt)?|\\%','red',regexp=True)
self.highlight_pattern(text,r'[\[\]\{\}\(\)]', 'purple',regexp=True)
self.highlight_pattern(text,r'\\([a-zA-Z]+|[!#&\$,;:])', 'green',regexp=True)
self.highlight_pattern(text,r'([^\\]|^)%.*','blue',regexp=True)
self.highlight_pattern(text,r'&','bold',regexp=True)
def set_syntax(self, event=None):
self._set_syntax(self.text_widget)
# # \\ ^ & and numbers including reals and size in "pt" -> red
# # {}[] -> purple
# # \alpha or (\!\#\&\$\,\;\:) -> green
# # % until EOL is comment (blue)
# # Alignment '&' is bold
# text=self.text_widget
# # First clearing all tags. e.g. avoids '{}<left>A' to color '{A}' all in purple
# for i in ['red','purple','green','blue','bold']:
# text.tag_remove(i,'1.0','end')
# # Parsing the text and setting tags
# self.highlight_pattern(text,r'\\\\|\^|([-\.]?|^)[0-9]\.?(pt| pt)?|\\%','red',regexp=True)
# self.highlight_pattern(text,r'[\[\]\{\}\(\)]', 'purple',regexp=True)
# self.highlight_pattern(text,r'\\([a-zA-Z]+|[!#&\$,;:])', 'green',regexp=True)
# self.highlight_pattern(text,r'([^\\]|^)%.*','blue',regexp=True)
# self.highlight_pattern(text,r'&','bold',regexp=True)
def svgPhotoImage(self,file_path_name, scale=1): # TODO Fix (if can be) AA artefacts at sharp alpha edges
"Returns a ImageTk.PhotoImage object represeting the svg file"
# Based on pygame.org/wiki/CairoPygame and http://bit.ly/1hnpYZY
svg = rsvg.Handle(path=file_path_name)
width, height = svg.get_dimension_data()[:2]
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, int(width*scale), int(height*scale))
context = cairo.Context(surface)
context.scale(scale,scale)
#context.set_antialias(cairo.ANTIALIAS_SUBPIXEL)
svg.render_cairo(context)
tk_image=ImageTk.PhotoImage('RGBA')
image=Image.frombuffer('RGBA',(int(width*scale),int(height*scale)),surface.get_data(),'raw','BGRA',0,1)
tk_image.paste(image)
return(tk_image)
def openSVG(self, file_path_name):
svg = rsvg.Handle(path=file_path_name)
width, height = svg.get_dimension_data()[:2]
self.openedSVG = dict(svg=svg, width=width, height=height)
def updateOpenedSVG(self):
svg = self.openedSVG['svg']
width = self.openedSVG['width']
height = self.openedSVG['height']
scale = self.displayScale
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, int(width*scale), int(height*scale))
context = cairo.Context(surface)
context.scale(scale,scale)
context.set_antialias(cairo.ANTIALIAS_DEFAULT)
svg.render_cairo(context)
tk_image=ImageTk.PhotoImage('RGBA')
image=Image.frombuffer('RGBA',(int(width*scale),int(height*scale)),surface.get_data(),'raw','BGRA',0,1)
tk_image.paste(image)
self.tk_image = tk_image
self.tk_image.paste(image)
self.png_frame.config(image=self.tk_image)
def openDisplaySVG(self, file_path_name):
self.openSVG(file_path_name)
self.updateOpenedSVG()
def ZoomInSVG(self, event=None):
# Avoids slugginess caused by zooming too much by accident
if self.displayScale < 5:
self.displayScale *= 1.05
self.updateOpenedSVG()
def ZoomOutSVG(self, event=None):
# Avoid problems with too small image
self.displayScale /= 1.05
if self.displayScale < 0.05:
self.displayScale = 0.05
self.updateOpenedSVG()
def ZoomResetSVG(self, event=None):
self.displayScale = 1
self.updateOpenedSVG()
if __name__ == '__main__':
tmp=laveqed_gui(TITLE).run()
|
JeanOlivier/Laveqed
|
gui_laveqed.py
|
Python
|
gpl-3.0
| 20,280
|
## INFO ########################################################################
## ##
## plastey ##
## ======= ##
## ##
## Oculus Rift + Leap Motion + Python 3 + C + Blender + Arch Linux ##
## Version: 0.2.0.980 (20150510) ##
## File: hud.py ##
## ##
## For more information about the project, visit ##
## <http://plastey.kibu.hu>. ##
## Copyright (C) 2015 Peter Varo, Kitchen Budapest ##
## ##
## This program is free software: you can redistribute it and/or modify it ##
## under the terms of the GNU General Public License as published by the ##
## Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ##
## See the GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program, most likely a file in the root directory, ##
## called 'LICENSE'. If not, see <http://www.gnu.org/licenses>. ##
## ##
######################################################################## INFO ##
# Import python modules
from collections import deque
#------------------------------------------------------------------------------#
class Text:
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def __init__(self, text_first_object,
text_other_object,
time_getter,
interval):
self._text_first = text_first_object
self._text_other = text_other_object
self._get_time = time_getter
self._interval = interval
self._last_time = time_getter()
self._messages = deque()
self._still_empty = True
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def _update(self):
# Write the changed and constructed messages to display
messages = iter(self._messages)
try:
self._text_first.text = next(messages)
self._text_other.text = '\n'.join(messages)
except StopIteration:
self._text_first.text = self._text_other.text = ''
# Update timer
self._last_time = self._get_time()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def clear(self):
self._messages = deque()
self._update()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def update(self):
# If there are any messages left
if len(self._messages):
# If interval passed
if (self._last_time + self._interval) <= self._get_time():
# Remove oldest item
self._messages.pop()
# Update display
self._update()
# If deque just become empty
elif not self._still_empty:
# Switch state flag and update display
self._still_empty = True
self._update()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def write(self, message):
# Add new message and update display
self._messages.appendleft(message)
self._update()
|
kitchenbudapest/vr
|
hud.py
|
Python
|
gpl-3.0
| 4,413
|
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class GetKillmailsKillmailIdKillmailHashInternalServerError(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, error=None):
"""
GetKillmailsKillmailIdKillmailHashInternalServerError - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'error': 'str'
}
self.attribute_map = {
'error': 'error'
}
self._error = error
@property
def error(self):
"""
Gets the error of this GetKillmailsKillmailIdKillmailHashInternalServerError.
Internal server error message
:return: The error of this GetKillmailsKillmailIdKillmailHashInternalServerError.
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""
Sets the error of this GetKillmailsKillmailIdKillmailHashInternalServerError.
Internal server error message
:param error: The error of this GetKillmailsKillmailIdKillmailHashInternalServerError.
:type: str
"""
self._error = error
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, GetKillmailsKillmailIdKillmailHashInternalServerError):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
minlexx/pyevemon
|
esi_client/models/get_killmails_killmail_id_killmail_hash_internal_server_error.py
|
Python
|
gpl-3.0
| 3,191
|
# (c) 2019, Evgeni Golov <evgeni@redhat.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class ModuleDocFragment(object):
# Foreman documentation fragment
DOCUMENTATION = '''
requirements:
- requests
options:
server_url:
description:
- URL of the Foreman server.
- If the value is not specified in the task, the value of environment variable C(FOREMAN_SERVER_URL) will be used instead.
required: true
type: str
username:
description:
- Username accessing the Foreman server.
- If the value is not specified in the task, the value of environment variable C(FOREMAN_USERNAME) will be used instead.
required: true
type: str
password:
description:
- Password of the user accessing the Foreman server.
- If the value is not specified in the task, the value of environment variable C(FOREMAN_PASSWORD) will be used instead.
required: true
type: str
validate_certs:
description:
- Whether or not to verify the TLS certificates of the Foreman server.
- If the value is not specified in the task, the value of environment variable C(FOREMAN_VALIDATE_CERTS) will be used instead.
default: true
type: bool
'''
NESTED_PARAMETERS = '''
options:
parameters:
description:
- Entity domain specific host parameters
required: false
type: list
elements: dict
suboptions:
name:
description:
- Name of the parameter
required: true
type: str
value:
description:
- Value of the parameter
required: true
type: raw
parameter_type:
description:
- Type of the parameter
default: 'string'
choices:
- 'string'
- 'boolean'
- 'integer'
- 'real'
- 'array'
- 'hash'
- 'yaml'
- 'json'
type: str
'''
OS_FAMILY = '''
options:
os_family:
description:
- The OS family the entity shall be assigned with.
required: false
choices:
- AIX
- Altlinux
- Archlinux
- Coreos
- Debian
- Freebsd
- Gentoo
- Junos
- NXOS
- Rancheros
- Redhat
- Solaris
- Suse
- Windows
- Xenserver
type: str
'''
TAXONOMY = '''
options:
organizations:
description: List of organizations the entity should be assigned to
type: list
elements: str
locations:
description: List of locations the entity should be assigned to
type: list
elements: str
'''
ENTITY_STATE = '''
options:
state:
description:
- State of the entity
default: present
choices:
- present
- absent
type: str
'''
ENTITY_STATE_WITH_DEFAULTS = '''
options:
state:
description:
- State of the entity
- C(present_with_defaults) will ensure the entity exists, but won't update existing ones
default: present
choices:
- present
- present_with_defaults
- absent
type: str
'''
HOST_OPTIONS = '''
options:
compute_resource:
description: Compute resource name
required: false
type: str
compute_profile:
description: Compute profile name
required: false
type: str
domain:
description: Domain name
required: false
type: str
subnet:
description: IPv4 Subnet name
required: false
type: str
subnet6:
description: IPv6 Subnet name
required: false
type: str
root_pass:
description:
- Root password.
- Will result in the entity always being updated, as the current password cannot be retrieved.
type: str
required: false
realm:
description: Realm name
required: false
type: str
architecture:
description: Architecture name
required: False
type: str
medium:
aliases: [ media ]
description:
- Medium name
- Mutually exclusive with I(kickstart_repository).
required: False
type: str
pxe_loader:
description: PXE Bootloader
required: false
choices:
- PXELinux BIOS
- PXELinux UEFI
- Grub UEFI
- Grub2 BIOS
- Grub2 ELF
- Grub2 UEFI
- Grub2 UEFI SecureBoot
- Grub2 UEFI HTTP
- Grub2 UEFI HTTPS
- Grub2 UEFI HTTPS SecureBoot
- iPXE Embedded
- iPXE UEFI HTTP
- iPXE Chain BIOS
- iPXE Chain UEFI
- None
type: str
ptable:
description: Partition table name
required: False
type: str
environment:
description: Puppet environment name
required: false
type: str
puppetclasses:
description: List of puppet classes to include in this host group. Must exist for hostgroup's puppet environment.
required: false
type: list
elements: str
config_groups:
description: Config groups list
required: false
type: list
elements: str
puppet_proxy:
description: Puppet server proxy name
required: false
type: str
puppet_ca_proxy:
description: Puppet CA proxy name
required: false
type: str
openscap_proxy:
description:
- OpenSCAP proxy name.
- Only available when the OpenSCAP plugin is installed.
required: false
type: str
content_source:
description:
- Content source.
- Only available for Katello installations.
required: false
type: str
lifecycle_environment:
description:
- Lifecycle environment.
- Only available for Katello installations.
required: false
type: str
kickstart_repository:
description:
- Kickstart repository name.
- You need to provide this to use the "Synced Content" feature.
- Mutually exclusive with I(medium).
- Only available for Katello installations.
required: false
type: str
content_view:
description:
- Content view.
- Only available for Katello installations.
required: false
type: str
activation_keys:
description:
- Activation Keys used for deployment.
- Comma separated list.
- Only available for Katello installations.
required: false
type: str
'''
ORGANIZATION = '''
options:
organization:
description:
- Organization that the entity is in
required: true
type: str
'''
SCAP_DATASTREAM = '''
options:
scap_file:
description:
- File containing XML DataStream content.
- Required when creating a new DataStream.
required: false
type: path
original_filename:
description:
- Original file name of the XML file.
- If unset, the filename of I(scap_file) will be used.
required: false
type: str
'''
OPERATINGSYSTEMS = '''
options:
operatingsystems:
description:
- List of operating systems the entity should be assigned to.
- Operating systems are looked up by their title which is composed as "<name> <major>.<minor>".
- You can omit the version part as long as you only have one operating system by that name.
required: false
type: list
elements: str
'''
OPERATINGSYSTEM = '''
options:
operatingsystem:
description:
- Operating systems are looked up by their title which is composed as "<name> <major>.<minor>".
- You can omit the version part as long as you only have one operating system by that name.
type: str
required: False
'''
INFOMODULE = '''
options:
name:
description:
- Name of the resource to fetch information for.
- Mutually exclusive with I(search).
required: false
type: str
location:
description:
- Label of the Location to scope the search for.
required: false
type: str
organization:
description:
- Name of the Organization to scope the search for.
required: false
type: str
search:
description:
- Search query to use
- If None, and I(name) is not set, all resources are returned.
- Mutually exclusive with I(name).
type: str
'''
INFOMODULEWITHOUTNAME = '''
options:
location:
description:
- Label of the Location to scope the search for.
required: false
type: str
organization:
description:
- Name of the Organization to scope the search for.
required: false
type: str
search:
description:
- Search query to use
- If None, all resources are returned.
type: str
'''
KATELLOINFOMODULE = '''
options:
organization:
required: true
'''
|
ATIX-AG/foreman-ansible-modules
|
plugins/doc_fragments/foreman.py
|
Python
|
gpl-3.0
| 9,183
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <jjgomera@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.'''
from lib import unidades
from lib.meos import MEoS
class R114(MEoS):
"""Multiparameter equation of state for R114"""
name = "1,2-dichloro-1,1,2,2-tetrafluoroethane"
CASNumber = "76-14-2"
formula = "CClF2CClF2"
synonym = "R114"
_refPropName = "R114"
_coolPropName = "R114"
rhoc = unidades.Density(579.969)
Tc = unidades.Temperature(418.83)
Pc = unidades.Pressure(3257.0, "kPa")
M = 170.921 # g/mol
Tt = unidades.Temperature(180.63)
Tb = unidades.Temperature(276.741)
f_acent = 0.25253
momentoDipolar = unidades.DipoleMoment(0.658, "Debye")
id = 231
f = 1/8.31451*170.93
CP1 = {"ao": 0.97651380e-1*f,
"an": [0.3240861e-2*f, -0.5895364e-5*f, 0.6737929e-8*f,
-0.3546364e-11*f],
"pow": [1, 2, 3, 4]}
platzer = {
"__type__": "Helmholtz",
"__name__": "Bender equation of state for R-114 of Platzer (1990)",
"__doi__": {"autor": "Platzer, B., Polt, A., Maurer, G.",
"title": "Thermophysical Properties of Refrigerants",
"ref": "Berlin: Springer-Verlag, 1990.",
"doi": ""},
"R": 8.31451,
"cp": CP1,
"ref": "NBP",
"Tmin": 273.15, "Tmax": 507.0, "Pmax": 21000.0, "rhomax": 8.942,
"nr1": [-0.340776521414, 0.323001398420, -0.424950537596e-1,
0.107938879710e1, -0.199243619673e1, -0.155135133506,
-0.121465790553, -0.165038582393e-1, -0.186915808643,
0.308074612567, 0.115861416115, 0.276358316589e-1,
0.108043243088, 0.460683793064e-1, -0.174821616881,
0.317530854287e-1],
"d1": [0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5],
"t1": [3, 4, 5, 0, 1, 2, 3, 4, 0, 1, 2, 0, 1, 0, 1, 1],
"nr2": [0.340776521414, -0.323001398420, 0.424950537596e-1,
-0.166940100976e1, 0.408693082002e1, -0.241738963889e1],
"d2": [0, 0, 0, 2, 2, 2],
"t2": [3, 4, 5, 3, 4, 5],
"c2": [2]*6,
"gamma2": [1.21103865]*6}
eq = platzer,
_PR = [-0.1804, -16.3839]
_surface = {"sigma": [0.05239], "exp": [1.258]}
_vapor_Pressure = {
"eq": 3,
"n": [-0.72195e1, 0.16357e1, -0.14576e1, -0.69580e1, 0.57181e1],
"t": [1.0, 1.5, 2.2, 4.8, 6.2]}
_liquid_Density = {
"eq": 1,
"n": [0.43023, 0.22722e2, -0.27118e2, 0.13247e2, -0.90529e1],
"t": [0.095, 0.93, 1.1, 2.0, 3.0]}
_vapor_Density = {
"eq": 2,
"n": [-0.46609, -6.8355, -167.15, 1.5805e4, -3.1859e4, 2.1548e4],
"t": [0.09, 0.76, 4.0, 6.5, 7.0, 8.0]}
|
jjgomera/pychemqt
|
lib/mEoS/R114.py
|
Python
|
gpl-3.0
| 3,439
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_vip6
short_description: Configure virtual IP for IPv6 in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and vip6 category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
version_added: 2.9
firewall_vip6:
description:
- Configure virtual IP for IPv6.
default: null
type: dict
suboptions:
arp_reply:
description:
- Enable to respond to ARP requests for this virtual IP address. Enabled by default.
type: str
choices:
- disable
- enable
color:
description:
- Color of icon on the GUI.
type: int
comment:
description:
- Comment.
type: str
extip:
description:
- IP address or address range on the external interface that you want to map to an address or address range on the destination network.
type: str
extport:
description:
- Incoming port number range that you want to map to a port number range on the destination network.
type: str
http_cookie_age:
description:
- Time in minutes that client web browsers should keep a cookie. Default is 60 seconds. 0 = no time limit.
type: int
http_cookie_domain:
description:
- Domain that HTTP cookie persistence should apply to.
type: str
http_cookie_domain_from_host:
description:
- Enable/disable use of HTTP cookie domain from host field in HTTP.
type: str
choices:
- disable
- enable
http_cookie_generation:
description:
- Generation of HTTP cookie to be accepted. Changing invalidates all existing cookies.
type: int
http_cookie_path:
description:
- Limit HTTP cookie persistence to the specified path.
type: str
http_cookie_share:
description:
- Control sharing of cookies across virtual servers. same-ip means a cookie from one virtual server can be used by another. Disable stops
cookie sharing.
type: str
choices:
- disable
- same-ip
http_ip_header:
description:
- For HTTP multiplexing, enable to add the original client IP address in the XForwarded-For HTTP header.
type: str
choices:
- enable
- disable
http_ip_header_name:
description:
- For HTTP multiplexing, enter a custom HTTPS header name. The original client IP address is added to this header. If empty,
X-Forwarded-For is used.
type: str
http_multiplex:
description:
- Enable/disable HTTP multiplexing.
type: str
choices:
- enable
- disable
https_cookie_secure:
description:
- Enable/disable verification that inserted HTTPS cookies are secure.
type: str
choices:
- disable
- enable
id:
description:
- Custom defined ID.
type: int
ldb_method:
description:
- Method used to distribute sessions to real servers.
type: str
choices:
- static
- round-robin
- weighted
- least-session
- least-rtt
- first-alive
- http-host
mappedip:
description:
- Mapped IP address range in the format startIP-endIP.
type: str
mappedport:
description:
- Port number range on the destination network to which the external port number range is mapped.
type: str
max_embryonic_connections:
description:
- Maximum number of incomplete connections.
type: int
monitor:
description:
- Name of the health check monitor to use when polling to determine a virtual server's connectivity status.
type: list
suboptions:
name:
description:
- Health monitor name. Source firewall.ldb-monitor.name.
required: true
type: str
name:
description:
- Virtual ip6 name.
required: true
type: str
outlook_web_access:
description:
- Enable to add the Front-End-Https header for Microsoft Outlook Web Access.
type: str
choices:
- disable
- enable
persistence:
description:
- Configure how to make sure that clients connect to the same server every time they make a request that is part of the same session.
type: str
choices:
- none
- http-cookie
- ssl-session-id
portforward:
description:
- Enable port forwarding.
type: str
choices:
- disable
- enable
protocol:
description:
- Protocol to use when forwarding packets.
type: str
choices:
- tcp
- udp
- sctp
realservers:
description:
- Select the real servers that this server load balancing VIP will distribute traffic to.
type: list
suboptions:
client_ip:
description:
- Only clients in this IP range can connect to this real server.
type: str
healthcheck:
description:
- Enable to check the responsiveness of the real server before forwarding traffic.
type: str
choices:
- disable
- enable
- vip
holddown_interval:
description:
- Time in seconds that the health check monitor continues to monitor an unresponsive server that should be active.
type: int
http_host:
description:
- HTTP server domain name in HTTP header.
type: str
id:
description:
- Real server ID.
required: true
type: int
ip:
description:
- IPv6 address of the real server.
type: str
max_connections:
description:
- Max number of active connections that can directed to the real server. When reached, sessions are sent to other real servers.
type: int
monitor:
description:
- Name of the health check monitor to use when polling to determine a virtual server's connectivity status. Source firewall
.ldb-monitor.name.
type: str
port:
description:
- Port for communicating with the real server. Required if port forwarding is enabled.
type: int
status:
description:
- Set the status of the real server to active so that it can accept traffic, or on standby or disabled so no traffic is sent.
type: str
choices:
- active
- standby
- disable
weight:
description:
- Weight of the real server. If weighted load balancing is enabled, the server with the highest weight gets more connections.
type: int
server_type:
description:
- Protocol to be load balanced by the virtual server (also called the server load balance virtual IP).
type: str
choices:
- http
- https
- imaps
- pop3s
- smtps
- ssl
- tcp
- udp
- ip
src_filter:
description:
- "Source IP6 filter (x:x:x:x:x:x:x:x/x). Separate addresses with spaces."
type: list
suboptions:
range:
description:
- Source-filter range.
required: true
type: str
ssl_algorithm:
description:
- Permitted encryption algorithms for SSL sessions according to encryption strength.
type: str
choices:
- high
- medium
- low
- custom
ssl_certificate:
description:
- The name of the SSL certificate to use for SSL acceleration. Source vpn.certificate.local.name.
type: str
ssl_cipher_suites:
description:
- SSL/TLS cipher suites acceptable from a client, ordered by priority.
type: list
suboptions:
cipher:
description:
- Cipher suite name.
type: str
choices:
- TLS-RSA-WITH-3DES-EDE-CBC-SHA
- TLS-DHE-RSA-WITH-DES-CBC-SHA
- TLS-DHE-DSS-WITH-DES-CBC-SHA
priority:
description:
- SSL/TLS cipher suites priority.
required: true
type: int
versions:
description:
- SSL/TLS versions that the cipher suite can be used with.
type: str
choices:
- ssl-3.0
- tls-1.0
- tls-1.1
- tls-1.2
ssl_client_fallback:
description:
- Enable/disable support for preventing Downgrade Attacks on client connections (RFC 7507).
type: str
choices:
- disable
- enable
ssl_client_renegotiation:
description:
- Allow, deny, or require secure renegotiation of client sessions to comply with RFC 5746.
type: str
choices:
- allow
- deny
- secure
ssl_client_session_state_max:
description:
- Maximum number of client to FortiGate SSL session states to keep.
type: int
ssl_client_session_state_timeout:
description:
- Number of minutes to keep client to FortiGate SSL session state.
type: int
ssl_client_session_state_type:
description:
- How to expire SSL sessions for the segment of the SSL connection between the client and the FortiGate.
type: str
choices:
- disable
- time
- count
- both
ssl_dh_bits:
description:
- Number of bits to use in the Diffie-Hellman exchange for RSA encryption of SSL sessions.
type: str
choices:
- 768
- 1024
- 1536
- 2048
- 3072
- 4096
ssl_hpkp:
description:
- Enable/disable including HPKP header in response.
type: str
choices:
- disable
- enable
- report-only
ssl_hpkp_age:
description:
- Number of minutes the web browser should keep HPKP.
type: int
ssl_hpkp_backup:
description:
- Certificate to generate backup HPKP pin from. Source vpn.certificate.local.name vpn.certificate.ca.name.
type: str
ssl_hpkp_include_subdomains:
description:
- Indicate that HPKP header applies to all subdomains.
type: str
choices:
- disable
- enable
ssl_hpkp_primary:
description:
- Certificate to generate primary HPKP pin from. Source vpn.certificate.local.name vpn.certificate.ca.name.
type: str
ssl_hpkp_report_uri:
description:
- URL to report HPKP violations to.
type: str
ssl_hsts:
description:
- Enable/disable including HSTS header in response.
type: str
choices:
- disable
- enable
ssl_hsts_age:
description:
- Number of seconds the client should honour the HSTS setting.
type: int
ssl_hsts_include_subdomains:
description:
- Indicate that HSTS header applies to all subdomains.
type: str
choices:
- disable
- enable
ssl_http_location_conversion:
description:
- Enable to replace HTTP with HTTPS in the reply's Location HTTP header field.
type: str
choices:
- enable
- disable
ssl_http_match_host:
description:
- Enable/disable HTTP host matching for location conversion.
type: str
choices:
- enable
- disable
ssl_max_version:
description:
- Highest SSL/TLS version acceptable from a client.
type: str
choices:
- ssl-3.0
- tls-1.0
- tls-1.1
- tls-1.2
ssl_min_version:
description:
- Lowest SSL/TLS version acceptable from a client.
type: str
choices:
- ssl-3.0
- tls-1.0
- tls-1.1
- tls-1.2
ssl_mode:
description:
- Apply SSL offloading between the client and the FortiGate (half) or from the client to the FortiGate and from the FortiGate to the
server (full).
type: str
choices:
- half
- full
ssl_pfs:
description:
- Select the cipher suites that can be used for SSL perfect forward secrecy (PFS). Applies to both client and server sessions.
type: str
choices:
- require
- deny
- allow
ssl_send_empty_frags:
description:
- Enable/disable sending empty fragments to avoid CBC IV attacks (SSL 3.0 & TLS 1.0 only). May need to be disabled for compatibility with
older systems.
type: str
choices:
- enable
- disable
ssl_server_algorithm:
description:
- Permitted encryption algorithms for the server side of SSL full mode sessions according to encryption strength.
type: str
choices:
- high
- medium
- low
- custom
- client
ssl_server_cipher_suites:
description:
- SSL/TLS cipher suites to offer to a server, ordered by priority.
type: list
suboptions:
cipher:
description:
- Cipher suite name.
type: str
choices:
- TLS-RSA-WITH-3DES-EDE-CBC-SHA
- TLS-DHE-RSA-WITH-DES-CBC-SHA
- TLS-DHE-DSS-WITH-DES-CBC-SHA
priority:
description:
- SSL/TLS cipher suites priority.
required: true
type: int
versions:
description:
- SSL/TLS versions that the cipher suite can be used with.
type: str
choices:
- ssl-3.0
- tls-1.0
- tls-1.1
- tls-1.2
ssl_server_max_version:
description:
- Highest SSL/TLS version acceptable from a server. Use the client setting by default.
type: str
choices:
- ssl-3.0
- tls-1.0
- tls-1.1
- tls-1.2
- client
ssl_server_min_version:
description:
- Lowest SSL/TLS version acceptable from a server. Use the client setting by default.
type: str
choices:
- ssl-3.0
- tls-1.0
- tls-1.1
- tls-1.2
- client
ssl_server_session_state_max:
description:
- Maximum number of FortiGate to Server SSL session states to keep.
type: int
ssl_server_session_state_timeout:
description:
- Number of minutes to keep FortiGate to Server SSL session state.
type: int
ssl_server_session_state_type:
description:
- How to expire SSL sessions for the segment of the SSL connection between the server and the FortiGate.
type: str
choices:
- disable
- time
- count
- both
type:
description:
- Configure a static NAT or server load balance VIP.
type: str
choices:
- static-nat
- server-load-balance
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
type: str
weblogic_server:
description:
- Enable to add an HTTP header to indicate SSL offloading for a WebLogic server.
type: str
choices:
- disable
- enable
websphere_server:
description:
- Enable to add an HTTP header to indicate SSL offloading for a WebSphere server.
type: str
choices:
- disable
- enable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure virtual IP for IPv6.
fortios_firewall_vip6:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_vip6:
arp_reply: "disable"
color: "4"
comment: "Comment."
extip: "<your_own_value>"
extport: "<your_own_value>"
http_cookie_age: "8"
http_cookie_domain: "<your_own_value>"
http_cookie_domain_from_host: "disable"
http_cookie_generation: "11"
http_cookie_path: "<your_own_value>"
http_cookie_share: "disable"
http_ip_header: "enable"
http_ip_header_name: "<your_own_value>"
http_multiplex: "enable"
https_cookie_secure: "disable"
id: "18"
ldb_method: "static"
mappedip: "<your_own_value>"
mappedport: "<your_own_value>"
max_embryonic_connections: "22"
monitor:
-
name: "default_name_24 (source firewall.ldb-monitor.name)"
name: "default_name_25"
outlook_web_access: "disable"
persistence: "none"
portforward: "disable"
protocol: "tcp"
realservers:
-
client_ip: "<your_own_value>"
healthcheck: "disable"
holddown_interval: "33"
http_host: "myhostname"
id: "35"
ip: "<your_own_value>"
max_connections: "37"
monitor: "<your_own_value> (source firewall.ldb-monitor.name)"
port: "39"
status: "active"
weight: "41"
server_type: "http"
src_filter:
-
range: "<your_own_value>"
ssl_algorithm: "high"
ssl_certificate: "<your_own_value> (source vpn.certificate.local.name)"
ssl_cipher_suites:
-
cipher: "TLS-RSA-WITH-3DES-EDE-CBC-SHA"
priority: "49"
versions: "ssl-3.0"
ssl_client_fallback: "disable"
ssl_client_renegotiation: "allow"
ssl_client_session_state_max: "53"
ssl_client_session_state_timeout: "54"
ssl_client_session_state_type: "disable"
ssl_dh_bits: "768"
ssl_hpkp: "disable"
ssl_hpkp_age: "58"
ssl_hpkp_backup: "<your_own_value> (source vpn.certificate.local.name vpn.certificate.ca.name)"
ssl_hpkp_include_subdomains: "disable"
ssl_hpkp_primary: "<your_own_value> (source vpn.certificate.local.name vpn.certificate.ca.name)"
ssl_hpkp_report_uri: "<your_own_value>"
ssl_hsts: "disable"
ssl_hsts_age: "64"
ssl_hsts_include_subdomains: "disable"
ssl_http_location_conversion: "enable"
ssl_http_match_host: "enable"
ssl_max_version: "ssl-3.0"
ssl_min_version: "ssl-3.0"
ssl_mode: "half"
ssl_pfs: "require"
ssl_send_empty_frags: "enable"
ssl_server_algorithm: "high"
ssl_server_cipher_suites:
-
cipher: "TLS-RSA-WITH-3DES-EDE-CBC-SHA"
priority: "76"
versions: "ssl-3.0"
ssl_server_max_version: "ssl-3.0"
ssl_server_min_version: "ssl-3.0"
ssl_server_session_state_max: "80"
ssl_server_session_state_timeout: "81"
ssl_server_session_state_type: "disable"
type: "static-nat"
uuid: "<your_own_value>"
weblogic_server: "disable"
websphere_server: "disable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_vip6_data(json):
option_list = ['arp_reply', 'color', 'comment',
'extip', 'extport', 'http_cookie_age',
'http_cookie_domain', 'http_cookie_domain_from_host', 'http_cookie_generation',
'http_cookie_path', 'http_cookie_share', 'http_ip_header',
'http_ip_header_name', 'http_multiplex', 'https_cookie_secure',
'id', 'ldb_method', 'mappedip',
'mappedport', 'max_embryonic_connections', 'monitor',
'name', 'outlook_web_access', 'persistence',
'portforward', 'protocol', 'realservers',
'server_type', 'src_filter', 'ssl_algorithm',
'ssl_certificate', 'ssl_cipher_suites', 'ssl_client_fallback',
'ssl_client_renegotiation', 'ssl_client_session_state_max', 'ssl_client_session_state_timeout',
'ssl_client_session_state_type', 'ssl_dh_bits', 'ssl_hpkp',
'ssl_hpkp_age', 'ssl_hpkp_backup', 'ssl_hpkp_include_subdomains',
'ssl_hpkp_primary', 'ssl_hpkp_report_uri', 'ssl_hsts',
'ssl_hsts_age', 'ssl_hsts_include_subdomains', 'ssl_http_location_conversion',
'ssl_http_match_host', 'ssl_max_version', 'ssl_min_version',
'ssl_mode', 'ssl_pfs', 'ssl_send_empty_frags',
'ssl_server_algorithm', 'ssl_server_cipher_suites', 'ssl_server_max_version',
'ssl_server_min_version', 'ssl_server_session_state_max', 'ssl_server_session_state_timeout',
'ssl_server_session_state_type', 'type', 'uuid',
'weblogic_server', 'websphere_server']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_vip6(data, fos):
vdom = data['vdom']
state = data['state']
firewall_vip6_data = data['firewall_vip6']
filtered_data = underscore_to_hyphen(filter_firewall_vip6_data(firewall_vip6_data))
if state == "present":
return fos.set('firewall',
'vip6',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'vip6',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_vip6']:
resp = firewall_vip6(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"firewall_vip6": {
"required": False, "type": "dict", "default": None,
"options": {
"arp_reply": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"color": {"required": False, "type": "int"},
"comment": {"required": False, "type": "str"},
"extip": {"required": False, "type": "str"},
"extport": {"required": False, "type": "str"},
"http_cookie_age": {"required": False, "type": "int"},
"http_cookie_domain": {"required": False, "type": "str"},
"http_cookie_domain_from_host": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"http_cookie_generation": {"required": False, "type": "int"},
"http_cookie_path": {"required": False, "type": "str"},
"http_cookie_share": {"required": False, "type": "str",
"choices": ["disable", "same-ip"]},
"http_ip_header": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"http_ip_header_name": {"required": False, "type": "str"},
"http_multiplex": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"https_cookie_secure": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"id": {"required": False, "type": "int"},
"ldb_method": {"required": False, "type": "str",
"choices": ["static", "round-robin", "weighted",
"least-session", "least-rtt", "first-alive",
"http-host"]},
"mappedip": {"required": False, "type": "str"},
"mappedport": {"required": False, "type": "str"},
"max_embryonic_connections": {"required": False, "type": "int"},
"monitor": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"name": {"required": True, "type": "str"},
"outlook_web_access": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"persistence": {"required": False, "type": "str",
"choices": ["none", "http-cookie", "ssl-session-id"]},
"portforward": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"protocol": {"required": False, "type": "str",
"choices": ["tcp", "udp", "sctp"]},
"realservers": {"required": False, "type": "list",
"options": {
"client_ip": {"required": False, "type": "str"},
"healthcheck": {"required": False, "type": "str",
"choices": ["disable", "enable", "vip"]},
"holddown_interval": {"required": False, "type": "int"},
"http_host": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"ip": {"required": False, "type": "str"},
"max_connections": {"required": False, "type": "int"},
"monitor": {"required": False, "type": "str"},
"port": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["active", "standby", "disable"]},
"weight": {"required": False, "type": "int"}
}},
"server_type": {"required": False, "type": "str",
"choices": ["http", "https", "imaps",
"pop3s", "smtps", "ssl",
"tcp", "udp", "ip"]},
"src_filter": {"required": False, "type": "list",
"options": {
"range": {"required": True, "type": "str"}
}},
"ssl_algorithm": {"required": False, "type": "str",
"choices": ["high", "medium", "low",
"custom"]},
"ssl_certificate": {"required": False, "type": "str"},
"ssl_cipher_suites": {"required": False, "type": "list",
"options": {
"cipher": {"required": False, "type": "str",
"choices": ["TLS-RSA-WITH-3DES-EDE-CBC-SHA", "TLS-DHE-RSA-WITH-DES-CBC-SHA",
"TLS-DHE-DSS-WITH-DES-CBC-SHA"]},
"priority": {"required": True, "type": "int"},
"versions": {"required": False, "type": "str",
"choices": ["ssl-3.0", "tls-1.0", "tls-1.1",
"tls-1.2"]}
}},
"ssl_client_fallback": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"ssl_client_renegotiation": {"required": False, "type": "str",
"choices": ["allow", "deny", "secure"]},
"ssl_client_session_state_max": {"required": False, "type": "int"},
"ssl_client_session_state_timeout": {"required": False, "type": "int"},
"ssl_client_session_state_type": {"required": False, "type": "str",
"choices": ["disable", "time", "count",
"both"]},
"ssl_dh_bits": {"required": False, "type": "str",
"choices": ["768", "1024", "1536",
"2048", "3072", "4096"]},
"ssl_hpkp": {"required": False, "type": "str",
"choices": ["disable", "enable", "report-only"]},
"ssl_hpkp_age": {"required": False, "type": "int"},
"ssl_hpkp_backup": {"required": False, "type": "str"},
"ssl_hpkp_include_subdomains": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"ssl_hpkp_primary": {"required": False, "type": "str"},
"ssl_hpkp_report_uri": {"required": False, "type": "str"},
"ssl_hsts": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"ssl_hsts_age": {"required": False, "type": "int"},
"ssl_hsts_include_subdomains": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"ssl_http_location_conversion": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssl_http_match_host": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssl_max_version": {"required": False, "type": "str",
"choices": ["ssl-3.0", "tls-1.0", "tls-1.1",
"tls-1.2"]},
"ssl_min_version": {"required": False, "type": "str",
"choices": ["ssl-3.0", "tls-1.0", "tls-1.1",
"tls-1.2"]},
"ssl_mode": {"required": False, "type": "str",
"choices": ["half", "full"]},
"ssl_pfs": {"required": False, "type": "str",
"choices": ["require", "deny", "allow"]},
"ssl_send_empty_frags": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssl_server_algorithm": {"required": False, "type": "str",
"choices": ["high", "medium", "low",
"custom", "client"]},
"ssl_server_cipher_suites": {"required": False, "type": "list",
"options": {
"cipher": {"required": False, "type": "str",
"choices": ["TLS-RSA-WITH-3DES-EDE-CBC-SHA", "TLS-DHE-RSA-WITH-DES-CBC-SHA",
"TLS-DHE-DSS-WITH-DES-CBC-SHA"]},
"priority": {"required": True, "type": "int"},
"versions": {"required": False, "type": "str",
"choices": ["ssl-3.0", "tls-1.0", "tls-1.1",
"tls-1.2"]}
}},
"ssl_server_max_version": {"required": False, "type": "str",
"choices": ["ssl-3.0", "tls-1.0", "tls-1.1",
"tls-1.2", "client"]},
"ssl_server_min_version": {"required": False, "type": "str",
"choices": ["ssl-3.0", "tls-1.0", "tls-1.1",
"tls-1.2", "client"]},
"ssl_server_session_state_max": {"required": False, "type": "int"},
"ssl_server_session_state_timeout": {"required": False, "type": "int"},
"ssl_server_session_state_type": {"required": False, "type": "str",
"choices": ["disable", "time", "count",
"both"]},
"type": {"required": False, "type": "str",
"choices": ["static-nat", "server-load-balance"]},
"uuid": {"required": False, "type": "str"},
"weblogic_server": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"websphere_server": {"required": False, "type": "str",
"choices": ["disable", "enable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
amenonsen/ansible
|
lib/ansible/modules/network/fortios/fortios_firewall_vip6.py
|
Python
|
gpl-3.0
| 46,189
|
#This file is part of Tryton. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
from .wizard import Wizard, StateView, StateTransition, StateAction, Button
__all__ = ['Wizard', 'StateView', 'StateTransition', 'StateAction', 'Button']
|
openlabs/trytond
|
trytond/wizard/__init__.py
|
Python
|
gpl-3.0
| 297
|
from django import forms
from django.contrib.admin.widgets import AdminDateWidget
from django.forms import ModelForm
from basic.models import *
from battles.models import *
from itertools import chain
def getMemberAsOptions():
members = Member.objects.all()
return [(member.id, member.get_username()) for member in members]
def getTerritoriesAsOptions():
territories = Territory.objects.all()
return [(terittory.id, terittory.get_name()) for terittory in territories]
# todo: modelForm!!!
class Battle(forms.Form):
status = forms.ChoiceField(Battle.STATUS_CHOICES)
date = forms.DateField(initial=datetime.date.today, widget=forms.SelectDateWidget)
class TerritoryBattleForm(Battle):
assigned_users = forms.MultipleChoiceField(choices=getMemberAsOptions(), widget=forms.CheckboxSelectMultiple())
planet=forms.ChoiceField(Territory.PLANET_CHOICES)
territory=forms.ChoiceField(choices=getTerritoriesAsOptions())
class ClanWarForm(Battle):
clan_war_type = forms.ChoiceField(ClanWar.CLAN_WAR_TYPES)
result = forms.ChoiceField(ClanWar.RESULT_CHOICES)
enemy_clans=forms.CharField(max_length=50)
class HypothesisForm(forms.Form):
def __init__(self, *args, **kwargs):
super(HypothesisForm, self).__init__(*args, **kwargs)
for i in Hypothesis.OPPONENTS:
self.fields['opponent_' + str(i)] = forms.ChoiceField(Hypothesis.STAR_CHOICES)
|
Ralev93/Clan-site
|
clan_site/battles/forms.py
|
Python
|
gpl-3.0
| 1,375
|
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
from scap.Model import Model
import logging
logger = logging.getLogger(__name__)
class ChoiceGroupType(Model):
MODEL_MAP = {
'elements': [
{'tag_name': 'choice', 'list': 'choices', 'class': 'ChoiceType', 'min': 1},
],
'attributes': {
'id': {'type': 'ChoiceGroupIDPattern', 'required': True},
},
}
|
cjaymes/pyscap
|
src/scap/model/ocil_2_0/ChoiceGroupType.py
|
Python
|
gpl-3.0
| 1,043
|
from south.db import db
from django.db import models
from albums.models import *
class Migration:
def forwards(self, orm):
# Deleting field 'AlbumItem.slug'
db.delete_column('albums_albumitem', 'slug')
def backwards(self, orm):
# Adding field 'AlbumItem.slug'
db.add_column('albums_albumitem', 'slug', orm['albums.albumitem:slug'])
models = {
'albums.album': {
'album_slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'albumitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['albums.AlbumItem']", 'unique': 'True', 'primary_key': 'True'}),
'highlight': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'highlight_parent'", 'null': 'True', 'to': "orm['albums.AlbumItem']"}),
'owners': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']"})
},
'albums.albumconvertableitem': {
'Meta': {'unique_together': "(('item_slug', 'parent'),)"},
'albumitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['albums.AlbumItem']", 'unique': 'True', 'primary_key': 'True'}),
'allow_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'allow_ratings': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'appearances': ('django.contrib.contenttypes.generic.GenericRelation', [], {'to': "orm['appearances.Appearance']"}),
'converted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True'}),
'item_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['albums.Album']"}),
'rating_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'rating_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'thumbFilename': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
'albums.albumitem': {
'created': ('django.db.models.fields.DateTimeField', [], {}),
'edited': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'albums.image': {
'albumconvertableitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['albums.AlbumConvertableItem']", 'unique': 'True', 'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'imageFilename': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
'albums.video': {
'albumconvertableitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['albums.AlbumConvertableItem']", 'unique': 'True', 'primary_key': 'True'}),
'flvFilename': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'video': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'appearances.appearance': {
'Meta': {'unique_together': "(('user', 'content_type', 'object_id'),)"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created_date': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'updated_date': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['albums']
|
jgroszko/django-albums
|
albums/migrations/0011_drop_old_slug.py
|
Python
|
gpl-3.0
| 7,471
|
import gobject
from kupfer import pretty
from kupfer.core import settings
from kupfer.core.settings import UserNamePassword
__all__ = [
"UserNamePassword",
"PluginSettings",
"check_dbus_connection",
]
def _is_core_setting(key):
return key.startswith("kupfer_")
class PluginSettings (gobject.GObject, pretty.OutputMixin):
"""Allows plugins to have preferences by assigning an instance
of this class to the plugin's __kupfer_settings__ attribute.
Setting values are accessed by the getitem operator [] with
the setting's 'key' attribute
"""
__gtype_name__ = "PluginSettings"
def __init__(self, *setdescs):
"""Create a settings collection by passing in dictionaries
as arguments, where each dictionary must have the following keys:
key
type
value (default value)
label (localized label)
the @key may be any string except strings starting with
'kupfer_', which are reserved
"""
gobject.GObject.__init__(self)
self.setting_descriptions = {}
self.setting_key_order = []
req_keys = set(("key", "value", "type", "label"))
for desc in setdescs:
if not req_keys.issubset(desc.keys()):
missing = req_keys.difference(desc.keys())
raise KeyError("Plugin setting missing keys: %s" % missing)
self.setting_descriptions[desc["key"]] = dict(desc)
self.setting_key_order.append(desc["key"])
def __iter__(self):
return iter(self.setting_key_order)
def initialize(self, plugin_name):
"""Init by reading from global settings and setting up callbacks"""
setctl = settings.GetSettingsController()
for key in self:
value_type = self.setting_descriptions[key]["type"]
value = setctl.get_plugin_config(plugin_name, key, value_type)
if value is not None:
self[key] = value
elif _is_core_setting(key):
default = self.setting_descriptions[key]["value"]
setctl.set_plugin_config(plugin_name, key, default, value_type)
setctl.connect("value-changed", self._value_changed, plugin_name)
def __getitem__(self, key):
return self.setting_descriptions[key]["value"]
def __setitem__(self, key, value):
value_type = self.setting_descriptions[key]["type"]
self.setting_descriptions[key]["value"] = value_type(value)
if not _is_core_setting(key):
self.emit("plugin-setting-changed", key, value)
def _value_changed(self, setctl, section, key, value, plugin_name):
"""Preferences changed, update object"""
if key in self and plugin_name in section:
self[key] = value
def get_value_type(self, key):
"""Return type of setting @key"""
return self.setting_descriptions[key]["type"]
def get_label(self, key):
"""Return label for setting @key"""
return self.setting_descriptions[key]["label"]
def get_alternatives(self, key):
"""Return alternatives for setting @key (if any)"""
return self.setting_descriptions[key].get("alternatives")
def get_tooltip(self, key):
"""Return tooltip string for setting @key (if any)"""
return self.setting_descriptions[key].get("tooltip")
def connect_settings_changed_cb(self, callback, *args):
self.connect("plugin-setting-changed", callback, *args)
# Section, Key, Value
gobject.signal_new("plugin-setting-changed", PluginSettings,
gobject.SIGNAL_RUN_LAST, gobject.TYPE_BOOLEAN,
(gobject.TYPE_STRING, gobject.TYPE_PYOBJECT))
# Plugin convenience functions for dependencies
_has_dbus_connection = None
def check_dbus_connection():
"""
Check if a connection to the D-Bus daemon is available,
else raise ImportError with an explanatory error message.
For plugins that can not be used without contact with D-Bus;
if this check is used, the plugin may use D-Bus and assume it
is available in the Plugin's code.
"""
global _has_dbus_connection
if _has_dbus_connection is None:
import dbus
try:
dbus.Bus()
_has_dbus_connection = True
except dbus.DBusException, err:
_has_dbus_connection = False
if not _has_dbus_connection:
raise ImportError(_("No D-Bus connection to desktop session"))
|
cjparsons74/Kupfer-cjparsons74
|
kupfer/plugin_support.py
|
Python
|
gpl-3.0
| 3,958
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# conf.py
"""
Loading a configuration
~~~~~~~~~~~~~~~~~~~~~~~
Various aspects of PyPhi's behavior can be configured.
When PyPhi is imported, it checks for a YAML file named ``pyphi_config.yml`` in
the current directory and automatically loads it if it exists; otherwise the
default configuration is used.
.. only:: never
This py.test fixture resets PyPhi config back to defaults after running
this doctest. This will not be shown in the output markup.
>>> getfixture('restore_config_afterwards')
The various settings are listed here with their defaults.
>>> import pyphi
>>> defaults = pyphi.config.defaults()
Print the ``config`` object to see the current settings:
>>> print(pyphi.config) # doctest: +SKIP
{ 'ASSUME_CUTS_CANNOT_CREATE_NEW_CONCEPTS': False,
'CACHE_SIAS': False,
'CACHE_POTENTIAL_PURVIEWS': True,
'CACHING_BACKEND': 'fs',
...
Setting can be changed on the fly by assigning them a new value:
>>> pyphi.config.PROGRESS_BARS = False
It is also possible to manually load a configuration file:
>>> pyphi.config.load_file('pyphi_config.yml')
Or load a dictionary of configuration values:
>>> pyphi.config.load_dict({'PRECISION': 1})
Approximations and theoretical options
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These settings control the algorithms PyPhi uses.
- :attr:`~pyphi.conf.PyphiConfig.ASSUME_CUTS_CANNOT_CREATE_NEW_CONCEPTS`
- :attr:`~pyphi.conf.PyphiConfig.CUT_ONE_APPROXIMATION`
- :attr:`~pyphi.conf.PyphiConfig.MEASURE`
- :attr:`~pyphi.conf.PyphiConfig.ACTUAL_CAUSATION_MEASURE`
- :attr:`~pyphi.conf.PyphiConfig.PARTITION_TYPE`
- :attr:`~pyphi.conf.PyphiConfig.PICK_SMALLEST_PURVIEW`
- :attr:`~pyphi.conf.PyphiConfig.USE_SMALL_PHI_DIFFERENCE_FOR_CES_DISTANCE`
- :attr:`~pyphi.conf.PyphiConfig.SYSTEM_CUTS`
- :attr:`~pyphi.conf.PyphiConfig.SINGLE_MICRO_NODES_WITH_SELFLOOPS_HAVE_PHI`
- :attr:`~pyphi.conf.PyphiConfig.VALIDATE_SUBSYSTEM_STATES`
- :attr:`~pyphi.conf.PyphiConfig.VALIDATE_CONDITIONAL_INDEPENDENCE`
Parallelization and system resources
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These settings control how much processing power and memory is available for
PyPhi to use. The default values may not be appropriate for your use-case or
machine, so **please check these settings before running anything**. Otherwise,
there is a risk that simulations might crash (potentially after running for a
long time!), resulting in data loss.
- :attr:`~pyphi.conf.PyphiConfig.PARALLEL_CONCEPT_EVALUATION`
- :attr:`~pyphi.conf.PyphiConfig.PARALLEL_CUT_EVALUATION`
- :attr:`~pyphi.conf.PyphiConfig.PARALLEL_COMPLEX_EVALUATION`
- :attr:`~pyphi.conf.PyphiConfig.NUMBER_OF_CORES`
- :attr:`~pyphi.conf.PyphiConfig.MAXIMUM_CACHE_MEMORY_PERCENTAGE`
.. important::
Only one of ``PARALLEL_CONCEPT_EVALUATION``, ``PARALLEL_CUT_EVALUATION``,
and ``PARALLEL_COMPLEX_EVALUATION`` can be set to ``True`` at a time.
**For most networks,** ``PARALLEL_CUT_EVALUATION`` **is the most
efficient.** This is because the algorithm is exponential time in the
number of nodes, so most of the time is spent on the largest subsystem.
You should only parallelize concept evaluation if you are just computing a
|CauseEffectStructure|.
Memoization and caching
~~~~~~~~~~~~~~~~~~~~~~~
PyPhi provides a number of ways to cache intermediate results.
- :attr:`~pyphi.conf.PyphiConfig.CACHE_SIAS`
- :attr:`~pyphi.conf.PyphiConfig.CACHE_REPERTOIRES`
- :attr:`~pyphi.conf.PyphiConfig.CACHE_POTENTIAL_PURVIEWS`
- :attr:`~pyphi.conf.PyphiConfig.CLEAR_SUBSYSTEM_CACHES_AFTER_COMPUTING_SIA`
- :attr:`~pyphi.conf.PyphiConfig.CACHING_BACKEND`
- :attr:`~pyphi.conf.PyphiConfig.FS_CACHE_VERBOSITY`
- :attr:`~pyphi.conf.PyphiConfig.FS_CACHE_DIRECTORY`
- :attr:`~pyphi.conf.PyphiConfig.MONGODB_CONFIG`
- :attr:`~pyphi.conf.PyphiConfig.REDIS_CACHE`
- :attr:`~pyphi.conf.PyphiConfig.REDIS_CONFIG`
Logging
~~~~~~~
These settings control how PyPhi handles messages. Logs can be written to
standard output, a file, both, or none. If these simple default controls are
not flexible enough for you, you can override the entire logging configuration.
See the `documentation on Python's logger
<https://docs.python.org/3/library/logging.html>`_ for more information.
- :attr:`~pyphi.conf.PyphiConfig.WELCOME_OFF`
- :attr:`~pyphi.conf.PyphiConfig.LOG_STDOUT_LEVEL`
- :attr:`~pyphi.conf.PyphiConfig.LOG_FILE_LEVEL`
- :attr:`~pyphi.conf.PyphiConfig.LOG_FILE`
- :attr:`~pyphi.conf.PyphiConfig.PROGRESS_BARS`
- :attr:`~pyphi.conf.PyphiConfig.REPR_VERBOSITY`
- :attr:`~pyphi.conf.PyphiConfig.PRINT_FRACTIONS`
Numerical precision
~~~~~~~~~~~~~~~~~~~
- :attr:`~pyphi.conf.PyphiConfig.PRECISION`
The ``config`` API
~~~~~~~~~~~~~~~~~~
"""
# pylint: disable=protected-access
import contextlib
import logging
import logging.config
import os
import pprint
from copy import copy
from pathlib import Path
import joblib
import yaml
from . import __about__, constants
log = logging.getLogger(__name__)
_VALID_LOG_LEVELS = [None, "CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"]
class Option:
"""A descriptor implementing PyPhi configuration options.
Args:
default: The default value of this ``Option``.
Keyword Args:
values (list): Allowed values for this option. A ``ValueError`` will
be raised if ``values`` is not ``None`` and the option is set to
be a value not in the list.
on_change (function): Optional callback that is called when the value
of the option is changed. The ``Config`` instance is passed as
the only argument to the callback.
doc (str): Optional docstring for the option.
"""
def __init__(self, default, values=None, type=None, on_change=None, doc=None):
self.default = default
self.values = values
self.type = type
self.on_change = on_change
self.doc = doc
self.__doc__ = self._docstring()
def __set_name__(self, owner, name):
self.name = name
def _docstring(self):
default = "``default={}``".format(repr(self.default))
values = (
", ``values={}``".format(repr(self.values))
if self.values is not None
else ""
)
on_change = (
", ``on_change={}``".format(self.on_change.__name__)
if self.on_change is not None
else ""
)
return "{}{}{}\n{}".format(default, values, on_change, self.doc or "")
def __get__(self, obj, cls=None):
if obj is None:
return self
return obj._values[self.name]
def __set__(self, obj, value):
self._validate(value)
obj._values[self.name] = value
self._callback(obj)
def _validate(self, value):
"""Validate the new value."""
if self.type is not None and not isinstance(value, self.type):
raise ValueError(
"{} must be of type {} for {}; got {}".format(
value, self.type, self.name, type(value)
)
)
if self.values and value not in self.values:
raise ValueError(
"{} ({}) is not a valid value for {}; must be one of:\n {}".format(
value,
type(value),
self.name,
"\n ".join(["{} ({})".format(v, type(v)) for v in self.values]),
)
)
def _callback(self, obj):
"""Trigger any callbacks."""
if self.on_change is not None:
self.on_change(obj)
class Config:
"""Base configuration object.
See ``PyphiConfig`` for usage.
"""
def __init__(self):
self._values = {}
self._loaded_files = []
# Set the default value of each ``Option``
for name, opt in self.options().items():
opt._validate(opt.default)
self._values[name] = opt.default
# Call hooks for each Option
# (This must happen *after* all default values are set so that
# logging can be properly configured.
for opt in self.options().values():
opt._callback(self)
def __str__(self):
return pprint.pformat(self._values, indent=2)
def __setattr__(self, name, value):
if name.startswith("_") or name in self.options().keys():
super().__setattr__(name, value)
else:
raise ValueError("{} is not a valid config option".format(name))
def __iter__(self):
return iter(self._values.items())
@classmethod
def options(cls):
"""Return a dictionary of the ``Option`` objects for this config."""
return {k: v for k, v in cls.__dict__.items() if isinstance(v, Option)}
def defaults(self):
"""Return the default values of this configuration."""
return {k: v.default for k, v in self.options().items()}
def load_dict(self, dct):
"""Load a dictionary of configuration values."""
for k, v in dct.items():
setattr(self, k, v)
def load_file(self, filename):
"""Load config from a YAML file."""
filename = os.path.abspath(filename)
with open(filename) as f:
self.load_dict(yaml.safe_load(f))
self._loaded_files.append(filename)
def snapshot(self):
"""Return a snapshot of the current values of this configuration."""
return copy(self._values)
def override(self, **new_values):
"""Decorator and context manager to override configuration values.
The initial configuration values are reset after the decorated function
returns or the context manager completes it block, even if the function
or block raises an exception. This is intended to be used by tests
which require specific configuration values.
Example:
>>> from pyphi import config
>>> @config.override(PRECISION=20000)
... def test_something():
... assert config.PRECISION == 20000
...
>>> test_something()
>>> with config.override(PRECISION=100):
... assert config.PRECISION == 100
...
"""
return _override(self, **new_values)
class _override(contextlib.ContextDecorator):
"""See ``Config.override`` for usage."""
def __init__(self, conf, **new_values):
self.conf = conf
self.new_values = new_values
self.initial_values = conf.snapshot()
def __enter__(self):
"""Save original config values; override with new ones."""
self.conf.load_dict(self.new_values)
def __exit__(self, *exc):
"""Reset config to initial values; reraise any exceptions."""
self.conf.load_dict(self.initial_values)
return False
def configure_logging(conf):
"""Reconfigure PyPhi logging based on the current configuration."""
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "%(asctime)s [%(name)s] %(levelname)s "
"%(processName)s: %(message)s"
}
},
"handlers": {
"file": {
"level": conf.LOG_FILE_LEVEL,
"filename": conf.LOG_FILE,
"class": "logging.FileHandler",
"formatter": "standard",
},
"stdout": {
"level": conf.LOG_STDOUT_LEVEL,
"class": "pyphi.log.TqdmHandler",
"formatter": "standard",
},
},
"root": {
"level": "DEBUG",
"handlers": (["file"] if conf.LOG_FILE_LEVEL else [])
+ (["stdout"] if conf.LOG_STDOUT_LEVEL else []),
},
}
)
def configure_joblib(conf):
constants.joblib_memory = joblib.Memory(
location=conf.FS_CACHE_DIRECTORY, verbose=conf.FS_CACHE_VERBOSITY
)
def configure_precision(conf):
constants.EPSILON = 10 ** (-conf.PRECISION)
class PyphiConfig(Config):
"""``pyphi.config`` is an instance of this class."""
ASSUME_CUTS_CANNOT_CREATE_NEW_CONCEPTS = Option(
False,
type=bool,
doc="""
In certain cases, making a cut can actually cause a previously reducible
concept to become a proper, irreducible concept. Assuming this can never
happen can increase performance significantly, however the obtained results
are not strictly accurate.""",
)
CUT_ONE_APPROXIMATION = Option(
False,
type=bool,
doc="""
When determining the MIP for |big_phi|, this restricts the set of system
cuts that are considered to only those that cut the inputs or outputs of a
single node. This restricted set of cuts scales linearly with the size of
the system; the full set of all possible bipartitions scales
exponentially. This approximation is more likely to give theoretically
accurate results with modular, sparsely-connected, or homogeneous
networks.""",
)
MEASURE = Option(
"EMD",
doc="""
The measure to use when computing distances between repertoires and
concepts. A full list of currently installed measures is available by
calling ``print(pyphi.distance.measures.all())``. Note that some measures
cannot be used for calculating |big_phi| because they are asymmetric.
Custom measures can be added using the ``pyphi.distance.measures.register``
decorator. For example::
from pyphi.distance import measures
@measures.register('ALWAYS_ZERO')
def always_zero(a, b):
return 0
This measure can then be used by setting
``config.MEASURE = 'ALWAYS_ZERO'``.
If the measure is asymmetric you should register it using the
``asymmetric`` keyword argument. See :mod:`~pyphi.distance` for examples.
""",
)
ACTUAL_CAUSATION_MEASURE = Option(
"PMI",
doc="""
The measure to use when computing the pointwise information between state
probabilities in the actual causation module.
See documentation for ``config.MEASURE`` for more information on
configuring measures.
""",
)
PARALLEL_CONCEPT_EVALUATION = Option(
False,
type=bool,
doc="""
Controls whether concepts are evaluated in parallel when computing
cause-effect structures.""",
)
PARALLEL_CUT_EVALUATION = Option(
True,
type=bool,
doc="""
Controls whether system cuts are evaluated in parallel, which is faster but
requires more memory. If cuts are evaluated sequentially, only two
|SystemIrreducibilityAnalysis| instances need to be in memory at once.""",
)
PARALLEL_COMPLEX_EVALUATION = Option(
False,
type=bool,
doc="""
Controls whether systems are evaluated in parallel when computing
complexes.""",
)
NUMBER_OF_CORES = Option(
-1,
type=int,
doc="""
Controls the number of CPU cores used to evaluate unidirectional cuts.
Negative numbers count backwards from the total number of available cores,
with ``-1`` meaning 'use all available cores.'""",
)
MAXIMUM_CACHE_MEMORY_PERCENTAGE = Option(
50,
type=int,
doc="""
PyPhi employs several in-memory caches to speed up computation. However,
these can quickly use a lot of memory for large networks or large numbers
of them; to avoid thrashing, this setting limits the percentage of a
system's RAM that the caches can collectively use.""",
)
CACHE_SIAS = Option(
False,
type=bool,
doc="""
PyPhi is equipped with a transparent caching system for
|SystemIrreducibilityAnalysis| objects which stores them as they are
computed to avoid having to recompute them later. This makes it easy to
play around interactively with the program, or to accumulate results with
minimal effort. For larger projects, however, it is recommended that you
manage the results explicitly, rather than relying on the cache. For this
reason it is disabled by default.""",
)
CACHE_REPERTOIRES = Option(
True,
type=bool,
doc="""
PyPhi caches cause and effect repertoires. This greatly improves speed, but
can consume a significant amount of memory. If you are experiencing memory
issues, try disabling this.""",
)
CACHE_POTENTIAL_PURVIEWS = Option(
True,
type=bool,
doc="""
Controls whether the potential purviews of mechanisms of a network are
cached. Caching speeds up computations by not recomputing expensive
reducibility checks, but uses additional memory.""",
)
CLEAR_SUBSYSTEM_CACHES_AFTER_COMPUTING_SIA = Option(
False,
type=bool,
doc="""
Controls whether a |Subsystem|'s repertoire and MICE caches are cleared
with |Subsystem.clear_caches()| after computing the
|SystemIrreducibilityAnalysis|. If you don't need to do any more
computations after running |compute.sia()|, then enabling this may help
conserve memory.""",
)
CACHING_BACKEND = Option(
"fs",
values=["fs", "db"],
doc="""
Controls whether precomputed results are stored and read from a local
filesystem-based cache in the current directory or from a database. Set
this to ``'fs'`` for the filesystem, ``'db'`` for the database.""",
)
FS_CACHE_VERBOSITY = Option(
0,
type=int,
on_change=configure_joblib,
doc="""
Controls how much caching information is printed if the filesystem cache is
used. Takes a value between ``0`` and ``11``.""",
)
FS_CACHE_DIRECTORY = Option(
"__pyphi_cache__",
type=(str, Path),
on_change=configure_joblib,
doc="""
If the filesystem is used for caching, the cache will be stored in this
directory. This directory can be copied and moved around if you want to
reuse results *e.g.* on a another computer, but it must be in the same
directory from which Python is being run.""",
)
MONGODB_CONFIG = Option(
{
"host": "localhost",
"port": 27017,
"database_name": "pyphi",
"collection_name": "cache",
},
type=dict,
doc="""
Set the configuration for the MongoDB database backend (only has an
effect if ``CACHING_BACKEND`` is ``'db'``).""",
)
REDIS_CACHE = Option(
False,
type=bool,
doc="""
Specifies whether to use Redis to cache |MICE|.""",
)
REDIS_CONFIG = Option(
{"host": "localhost", "port": 6379, "db": 0, "test_db": 1,},
type=dict,
doc="""
Configure the Redis database backend. These are the defaults in the
provided ``redis.conf`` file.""",
)
WELCOME_OFF = Option(
False,
type=bool,
doc="""
Specifies whether to suppress the welcome message when PyPhi is imported.
Alternatively, you may suppress the message by setting the environment
variable ``PYPHI_WELCOME_OFF`` to any value in your shell:
.. code-block:: bash
export PYPHI_WELCOME_OFF='yes'
The message will not print if either this option is ``True`` or the
environment variable is set.""",
)
LOG_FILE = Option(
"pyphi.log",
type=(str, Path),
on_change=configure_logging,
doc="""
Controls the name of the log file.""",
)
LOG_FILE_LEVEL = Option(
"INFO",
values=_VALID_LOG_LEVELS,
on_change=configure_logging,
doc="""
Controls the level of log messages written to the log
file. This setting has the same possible values as
``LOG_STDOUT_LEVEL``.""",
)
LOG_STDOUT_LEVEL = Option(
"WARNING",
values=_VALID_LOG_LEVELS,
on_change=configure_logging,
doc="""
Controls the level of log messages written to standard
output. Can be one of ``'DEBUG'``, ``'INFO'``, ``'WARNING'``, ``'ERROR'``,
``'CRITICAL'``, or ``None``. ``'DEBUG'`` is the least restrictive level and
will show the most log messages. ``'CRITICAL'`` is the most restrictive
level and will only display information about fatal errors. If set to
``None``, logging to standard output will be disabled entirely.""",
)
PROGRESS_BARS = Option(
True,
type=bool,
doc="""
Controls whether to show progress bars on the console.
.. tip::
If you are iterating over many systems rather than doing one
long-running calculation, consider disabling this for speed.""",
)
PRECISION = Option(
6,
type=int,
on_change=configure_precision,
doc="""
If ``MEASURE`` is ``EMD``, then the Earth Mover's Distance is calculated
with an external C++ library that a numerical optimizer to find a good
approximation. Consequently, systems with analytically zero |big_phi| will
sometimes be numerically found to have a small but non-zero amount. This
setting controls the number of decimal places to which PyPhi will consider
EMD calculations accurate. Values of |big_phi| lower than ``10e-PRECISION``
will be considered insignificant and treated as zero. The default value is
about as accurate as the EMD computations get.""",
)
VALIDATE_SUBSYSTEM_STATES = Option(
True,
type=bool,
doc="""
Controls whether PyPhi checks if the subsystems's state is possible
(reachable with nonzero probability from some previous state), given the
subsystem's TPM (**which is conditioned on background conditions**). If
this is turned off, then **calculated** |big_phi| **values may not be
valid**, since they may be associated with a subsystem that could never be
in the given state.""",
)
VALIDATE_CONDITIONAL_INDEPENDENCE = Option(
True,
type=bool,
doc="""
Controls whether PyPhi checks if a system's TPM is conditionally
independent.""",
)
SINGLE_MICRO_NODES_WITH_SELFLOOPS_HAVE_PHI = Option(
False,
type=bool,
doc="""
If set to ``True``, the |big_phi| value of single micro-node subsystems is
the difference between their unpartitioned |CauseEffectStructure| (a single
concept) and the null concept. If set to False, their |big_phi| is defined
to be zero. Single macro-node subsystems may always be cut, regardless of
circumstances.""",
)
REPR_VERBOSITY = Option(
2,
type=int,
values=[0, 1, 2],
doc="""
Controls the verbosity of ``__repr__`` methods on PyPhi objects. Can be set
to ``0``, ``1``, or ``2``. If set to ``1``, calling ``repr`` on PyPhi
objects will return pretty-formatted and legible strings, excluding
repertoires. If set to ``2``, ``repr`` calls also include repertoires.
Although this breaks the convention that ``__repr__`` methods should return
a representation which can reconstruct the object, readable representations
are convenient since the Python REPL calls ``repr`` to represent all
objects in the shell and PyPhi is often used interactively with the
REPL. If set to ``0``, ``repr`` returns more traditional object
representations.""",
)
PRINT_FRACTIONS = Option(
True,
type=bool,
doc="""
Controls whether numbers in a ``repr`` are printed as fractions. Numbers
are still printed as decimals if the fraction's denominator would be
large. This only has an effect if ``REPR_VERBOSITY > 0``.""",
)
PARTITION_TYPE = Option(
"BI",
doc="""
Controls the type of partition used for |small_phi| computations.
If set to ``'BI'``, partitions will have two parts.
If set to ``'TRI'``, partitions will have three parts. In addition,
computations will only consider partitions that strictly partition the
mechanism. That is, for the mechanism ``(A, B)`` and purview ``(B, C, D)``
the partition::
A,B ∅
─── ✕ ───
B C,D
is not considered, but::
A B
─── ✕ ───
B C,D
is. The following is also valid::
A,B ∅
─── ✕ ─────
∅ B,C,D
In addition, this setting introduces "wedge" tripartitions of the form::
A B ∅
─── ✕ ─── ✕ ───
B C D
where the mechanism in the third part is always empty.
Finally, if set to ``'ALL'``, all possible partitions will be tested.
You can experiment with custom partitioning strategies using the
``pyphi.partition.partition_types.register`` decorator. For example::
from pyphi.models import KPartition, Part
from pyphi.partition import partition_types
@partition_types.register('SINGLE_NODE')
def single_node_partitions(mechanism, purview, node_labels=None):
for element in mechanism:
element = tuple([element])
others = tuple(sorted(set(mechanism) - set(element)))
part1 = Part(mechanism=element, purview=())
part2 = Part(mechanism=others, purview=purview)
yield KPartition(part1, part2, node_labels=node_labels)
This generates the set of partitions that cut connections between a single
mechanism element and the entire purview. The mechanism and purview of each
|Part| remain undivided - only connections *between* parts are severed.
You can use this new partititioning scheme by setting
``config.PARTITION_TYPE = 'SINGLE_NODE'``.
See :mod:`~pyphi.partition` for more examples.""",
)
PICK_SMALLEST_PURVIEW = Option(
False,
type=bool,
doc="""
When computing a |MIC| or |MIE|, it is possible for several MIPs to have
the same |small_phi| value. If this setting is set to ``True`` the MIP with
the smallest purview is chosen; otherwise, the one with largest purview is
chosen.""",
)
USE_SMALL_PHI_DIFFERENCE_FOR_CES_DISTANCE = Option(
False,
type=bool,
doc="""
If set to ``True``, the distance between cause-effect structures (when
computing a |SystemIrreducibilityAnalysis|) is calculated using the
difference between the sum of |small_phi| in the cause-effect structures
instead of the extended EMD.""",
)
SYSTEM_CUTS = Option(
"3.0_STYLE",
values=["3.0_STYLE", "CONCEPT_STYLE"],
doc="""
If set to ``'3.0_STYLE'``, then traditional IIT 3.0 cuts will be used when
computing |big_phi|. If set to ``'CONCEPT_STYLE'``, then experimental
concept-style system cuts will be used instead.""",
)
def log(self):
"""Log current settings."""
log.info("PyPhi v%s", __about__.__version__)
if self._loaded_files:
log.info("Loaded configuration from %s", self._loaded_files)
else:
log.info("Using default configuration (no configuration file " "provided)")
log.info("Current PyPhi configuration:\n %s", str(self))
PYPHI_CONFIG_FILENAME = "pyphi_config.yml"
config = PyphiConfig()
# Try and load the config file
if os.path.exists(PYPHI_CONFIG_FILENAME):
config.load_file(PYPHI_CONFIG_FILENAME)
# Log the PyPhi version and loaded configuration
config.log()
|
wmayner/pyphi
|
pyphi/conf.py
|
Python
|
gpl-3.0
| 27,730
|
# This file is part of the mantidqt package
#
# Copyright (C) 2017 mantidproject
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, unicode_literals)
from mantidqt.utils.qt import import_qtlib
ManageUserDirectories = import_qtlib('_widgetscore', 'mantidqt.widgets', 'ManageUserDirectories')
|
ScreamingUdder/mantid
|
qt/python/mantidqt/widgets/manageuserdirectories.py
|
Python
|
gpl-3.0
| 933
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
class QuestionSet(models.Model):
'''
Тест. Состоит из нескольких вопросов
'''
author = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(max_length=255)
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-created', 'author')
def __str__(self):
return 'Автор теста: {}'.format(self.author)
class Question(models.Model):
'''
Является 'атомом' теста
'''
question = models.CharField(max_length=255)
ordering = models.PositiveSmallIntegerField(default=1, unique=True)
question_sets = models.ManyToManyField(QuestionSet)
class Meta:
ordering = ('ordering',)
def __str__(self):
return self.question
class Option(models.Model):
'''
Вариант ответа на вопрос
'''
question = models.ForeignKey(Question, on_delete=models.CASCADE)
value = models.CharField(max_length=255)
ordering = models.PositiveSmallIntegerField(default=1, unique=True)
is_correct = models.BooleanField()
class Meta:
ordering = ('ordering',)
def __str__(self):
return self.value
class RespondentSubmission(models.Model):
'''
Ответ респондента на вопрос теста
'''
respondent = models.ForeignKey(User, on_delete=models.CASCADE)
option = models.ForeignKey(Option, on_delete=models.CASCADE)
class Meta:
ordering = ('respondent', 'option')
def __str__(self):
return '{} {}'.format(self.respondent, self.option)
|
mxmaslin/Test-tasks
|
django_test_tasks/old_django_test_tasks/apps/testing/models.py
|
Python
|
gpl-3.0
| 1,778
|
from .synonym import *
|
MWisBest/PyBot
|
Commands/synonym/__init__.py
|
Python
|
gpl-3.0
| 23
|
import os
import pwd
import grp
import argparse
import shutil
from twisted.web import server, resource, static, script
from twisted.internet import reactor
from zope.interface import Interface, Attribute, implements
from twisted.python.components import registerAdapter
from twisted.web.server import Session
import globalVals
import staticFiles
import api
class IUser(Interface):
loggedIn = Attribute("Is the user logged in")
userId = Attribute("Id of the user")
userName = Attribute("Username")
permission = Attribute("User's permission mode")
_points = Attribute("Private points")
points = Attribute("Real points")
class User(object):
def __init__(self, sess):
self.loggedIn = False
startedSession = False
def startSession():
global startedSession
if not startedSession:
print "Started session adapter"
registerAdapter(User, Session, IUser)
startedSession = True
def drop_privileges(uid_name='nobody', gid_name='nogroup'):
if os.getuid() != 0:
# We're not root so, like, whatever dude
return
# Get the uid/gid from the name
running_uid = pwd.getpwnam(uid_name).pw_uid
running_gid = grp.getgrnam(gid_name).gr_gid
# Remove group privileges
os.setgroups([])
# Try setting the new uid/gid
os.setgid(running_gid)
os.setuid(running_uid)
# Ensure a very conservative umask
old_umask = os.umask(077)
def startServer():
ap = argparse.ArgumentParser(description='Server options')
ap.add_argument('--clean',action='store_true',default=False)
ap.add_argument('--port',type=int,default=1337)
ap.add_argument('--domain',default='192.168.220.130')
args = ap.parse_args()
root = staticFiles.FileNoList('root')
root.indexNames = ['index.rpy']
root.ignoreExt('.rpy')
root.processors = {'.rpy': script.ResourceScript}
root.putChild('api',api.Api())
if args.clean:
try: shutil.rmtree('root/uploads')
except: pass
try: shutil.rmtree('root/config')
except: pass
os.mkdir('root/uploads/')
os.mkdir('root/config/')
shutil.copyfile('memeOfTheDay.png','root/uploads/memeOfTheDay.png')
shutil.copyfile('admin.config','root/config/admin.config')
os.system('chown -R nobody:nogroup root/uploads')
os.system('chown -R nobody:nogroup root/config')
globalVals.init(args,root)
site = server.Site(root, logPath=b"access.log")
reactor.listenTCP(args.port, site)
drop_privileges('nobody','nogroup')
reactor.run()
if __name__ == "__main__":
startServer()
|
itsZN/vulnsite
|
mainserver.py
|
Python
|
gpl-3.0
| 2,724
|
import random
import re
from io import BytesIO
from typing import Awaitable, List
import matplotlib.pyplot as plt
import seaborn as sns
from curio.thread import async_thread
from curious.commands import Context, Plugin
from curious.commands.decorators import autoplugin, ratelimit
from yapf.yapflib.style import CreatePEP8Style
from yapf.yapflib.yapf_api import FormatCode
from jokusoramame.utils import rgbize
code_regexp = re.compile(r"```([^\n]+)\n?(.+)\n?```", re.DOTALL)
ADJECTIVES = {
"Trans-Exclusionary ": 1,
"Smithian ": 2,
"Ricardian ": 2,
"Randian ": 3,
"Hegelian ": 3,
"Synthesist ": 3,
"Woke ": 4,
"Vegan ": 4,
"Green ": 6,
"Insurrectionary ": 6,
"Anti-Imperialist ": 6,
"Jewish ": 8,
"Bolshevik ": 8,
"Post-left ": 8,
"Inclusive ": 9,
"Individualist ": 9,
"Queer ": 10,
"Atheist ": 10,
"Liberal ": 10,
"Libertarian ": 10,
"Conservative ": 10,
"Social ": 12,
"Islamic ": 12,
"Radical ": 12,
"Catholic ": 12,
"Esoteric ": 12,
"Christian ": 12,
"Progressive ": 12,
"Post-Colonial ": 12,
"Democratic ": 13,
"": 30
}
PREFIXES = {
"Alt-": 1,
"Bio-": 1,
"Taoist ": 2,
"Left ": 3,
"Post-": 3,
"Anarcha-": 3,
"Avant Garde ": 3,
"Eco-": 4,
"Communal ": 6,
"Afro-": 8,
"Ethno-": 8,
"Ultra-": 8,
"Neo-": 10,
"Pan-": 10,
"Anti-": 10,
"Paleo-": 10,
"Techno-": 10,
"Market ": 10,
"Revolutionary ": 10,
"Crypto-": 12,
"Anarcho-": 12,
"National ": 12,
"Orthodox ": 12,
"": 40
}
IDEOLOGIES = {
"Posadism": 1,
"Sexualism": 1,
"Kemalism": 2,
"Unruheism": 2,
"Distributism": 2,
"Titoism": 3,
"Putinism": 3,
"Makhnovism": 3,
"Georgism": 4,
"Keynesian": 4,
"Platformism": 4,
"Municipalism": 5,
"Confederalism": 5,
"Egoism": 6,
"Luddite": 6,
"Agorism": 6,
"Unionism": 6,
"Thatcherite": 6,
"Minarchism": 7,
"Ba'athism": 8,
"Trotskyism": 8,
"Syndicalism": 8,
"Luxemburgism": 8,
"Strasserism": 10,
"Maoism": 12,
"Fascism": 12,
"Marxism": 12,
"Zionism": 12,
"Centrism": 12,
"Pacifism": 12,
"Leninism": 12,
"Populism": 12,
"Futurism": 12,
"Feminism": 12,
"Humanism": 12,
"Mutualism": 12,
"Communism": 12,
"Stalinism": 12,
"Globalism": 12,
"Socialism": 12,
"Capitalism": 12,
"Monarchism": 12,
"Primitivism": 12,
"Nationalism": 12,
"Transhumanism": 12,
"Traditionalism": 12,
"Environmentalism": 12,
"Accelerationism": 12
}
SUFFIXES = {
" in One Country": 1,
" with Masonic elements": 1,
' with "rational debate"': 1,
" with Phlegmsky's vanguardism": 1,
" with Chinese characteristics": 1,
" with a new mode of production": 1,
"": 100
}
@autoplugin
class Misc(Plugin):
"""
Miscellaneous commands.
"""
async def command_ideology(self, ctx: Context):
"""
Creates an ideology just for you!
"""
message = ''
for d in (ADJECTIVES, PREFIXES, IDEOLOGIES, SUFFIXES):
message += random.choices(list(d.keys()), list(d.values()))[0]
await ctx.channel.messages.send(message)
@ratelimit(limit=1, time=30)
async def command_palette(self, ctx: Context, *, colours: List[int]):
"""
Shows a palette plot.
"""
pal_colours = rgbize(colours[:12])
@async_thread
def plot_palette() -> Awaitable[BytesIO]:
with ctx.bot._plot_lock:
sns.palplot(pal_colours, size=1)
plt.tight_layout() # remove useless padding
buf = BytesIO()
plt.savefig(buf, format="png")
buf.seek(0)
plt.clf()
plt.cla()
return buf
@async_thread()
def plot_dark_palette() -> Awaitable[BytesIO]:
with ctx.bot._plot_lock:
with plt.style.context("dark_background"):
sns.palplot(pal_colours, size=1)
plt.tight_layout() # remove useless padding
buf = BytesIO()
plt.savefig(buf, format="png")
buf.seek(0)
plt.clf()
plt.cla()
return buf
if ctx.bot._plot_lock.locked():
await ctx.channel.messages.send("Waiting for plot lock...")
async with ctx.channel.typing:
buf = await plot_palette()
buf2 = await plot_dark_palette()
await ctx.channel.messages.upload(fp=buf.read(), filename="plot.png")
await ctx.channel.messages.upload(fp=buf2, filename="plot_dark.png")
def _normalize_language(self, lang: str) -> str:
"""
Normalizes a language name into consistency.
"""
lang = lang.lower().rstrip("\n")
print(repr(lang))
if lang in ["py", "python", "py3k"]:
return "python"
return lang
async def command_reformat(self, ctx: Context, *, message: str):
"""
Reformats some code.
"""
code_match = code_regexp.match(message)
if code_match is None:
return await ctx.channel.messages.send(":x: Could not find a valid code block with "
"language.")
language, code = code_match.groups()
code = code.replace("\t", " ")
language = self._normalize_language(language)
if language == "python":
# yapfify
style = CreatePEP8Style()
style['COLUMN_LIMIT'] = 100
reformatted, changes = FormatCode(code, style_config=style)
return await ctx.channel.messages.send(f"```py\n{reformatted}```")
return await ctx.channel.messages.send(":x: Unknown language.")
|
SunDwarf/Jokusoramame
|
jokusoramame/plugins/misc.py
|
Python
|
gpl-3.0
| 5,938
|
from django.utils.translation import ugettext_lazy as _
from oioioi.base.menu import MenuRegistry
from oioioi.base.permissions import not_anonymous
from oioioi.contests.utils import contest_exists
top_links_registry = MenuRegistry(_("Top Links Menu"), contest_exists & not_anonymous)
|
sio2project/oioioi
|
oioioi/dashboard/menu.py
|
Python
|
gpl-3.0
| 286
|
#!/usr/bin/env python
""" Copyright (C) 2012 mountainpenguin (pinguino.de.montana@googlemail.com)
<http://github.com/mountainpenguin/pyrt>
This file is part of pyRT.
pyRT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyRT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyRT. If not, see <http://www.gnu.org/licenses/>.
"""
import cPickle as pickle
import os
try:
import json
except ImportError:
import simplejson as json
class ConfigError(Exception):
def __init__(self, value):
self.parameter = value
def __repr__(self):
return repr(self.parameter)
def __str__(self):
return self.__repr__()
class ConfigStore(object):
def __init__(self, sockpath, serverhost, serverport, password, ssl_certificate=None, ssl_private_key=None, ca_certs=None, root_directory="/", logfile="pyrt.log", refresh=10, scgi_username=None, scgi_password=None, scgi_method="Digest"):
self.rtorrent_socket = sockpath
self.host = serverhost
self.port = serverport
self.password = password
self.ssl_certificate = ssl_certificate
self.ssl_private_key = ssl_private_key
self.ssl_ca_certs = ca_certs
self.root_directory = root_directory
self.logfile = logfile
self.refresh = refresh
self.scgi_username = scgi_username
self.scgi_password = scgi_password
self.scgi_method = scgi_method
class Config:
def __init__(self):
# look for saved config file
if os.path.exists(os.path.join("config", ".pyrtconfig")):
try:
self.CONFIG = pickle.load(open(os.path.join("config", ".pyrtconfig")))
except:
os.remove(os.path.join("config", ".pyrtconfig"))
self.loadconfig()
else:
self.loadconfig()
def set(self, key, value):
if key not in self.CONFIG.__dict__:
return False
else:
self.CONFIG.__dict__[key] = value
self._flush()
return self.CONFIG.__dict__[key]
def _flush(self):
pickle.dump(self.CONFIG, open(os.path.join("config", ".pyrtconfig"), "w"))
def loadconfig(self):
if not os.path.exists(os.path.join("config", ".pyrtrc")):
raise ConfigError("Config File doesn't exist")
config_ = open(os.path.join("config", ".pyrtrc")).read()
config_stripped = ""
for line in config_.split("\n"):
if line == "":
pass
else:
for char in line:
if char == "#":
break
else:
config_stripped += char
config_stripped += "\n"
try:
configfile = json.loads(config_stripped)
if "ssl_certificate" in configfile.keys() and "ssl_private_key" in configfile.keys():
cert = configfile["ssl_certificate"]
pkey = configfile["ssl_private_key"]
else:
cert, pkey = None, None
if "ssl_ca_certs" in configfile.keys():
ca_certs = configfile["ssl_ca_certs"]
else:
ca_certs = None
if "root_directory" in configfile:
root_dir = configfile["root_directory"]
else:
root_dir = "/"
if "logfile" in configfile:
logfile = configfile["logfile"]
else:
logfile = "pyrt.log"
try:
refresh = int(configfile["refresh"])
except:
refresh = 10
if "scgi_username" in configfile:
scgi_username = configfile["scgi_username"]
else:
scgi_username = None
if "scgi_password" in configfile:
scgi_password = configfile["scgi_password"]
else:
scgi_password = None
if "scgi_method" in configfile:
scgi_method = configfile["scgi_method"]
else:
scgi_method = "Digest"
self.CONFIG = ConfigStore(
sockpath=configfile["rtorrent_socket"],
serverhost=configfile["host"],
serverport=configfile["port"],
password=configfile["password"],
ssl_certificate=cert,
ssl_private_key=pkey,
ca_certs=ca_certs,
root_directory=root_dir,
logfile=logfile,
refresh=refresh,
scgi_username=scgi_username,
scgi_password=scgi_password,
scgi_method=scgi_method,
)
self._flush()
except KeyError:
raise ConfigError("Config File is malformed")
def get(self, conf):
if conf in self.CONFIG.__dict__.keys():
return self.CONFIG.__dict__[conf]
else:
return None
|
mountainpenguin/pyrt
|
modules/config.py
|
Python
|
gpl-3.0
| 5,462
|
"""
Created on Feb 15, 2014
@author: alex
"""
from sqlalchemy import Column
from sqlalchemy.types import SmallInteger
from sqlalchemy.types import Unicode
from .meta import Base
class ParameterType(Base):
"""
classdocs
"""
__tablename__ = 'ParameterTypes'
_id = Column(SmallInteger, primary_key=True, autoincrement=True, nullable=False, unique=True)
name = Column(Unicode(250), nullable=False, unique=True)
unit = Column(Unicode(250), nullable=False)
def __init__(self, name, unit):
self.name = name
self.unit = unit
@property
def id(self):
return self._id
@property
def serialize(self):
"""Return data in serializeable (dictionary) format"""
ret_dict = {
'id': self.id,
'name': self.name,
'unit': self.unit
}
return ret_dict
def __repr__(self):
return str(self.serialize)
def init_parameter_types(db_session):
db_session.add(ParameterType('Temperature', '°C'))
db_session.add(ParameterType('Humidity', '%'))
db_session.add(ParameterType('Volume', 'Liter'))
db_session.add(ParameterType('pH', 'pH'))
db_session.add(ParameterType('Conductivity', 'mS'))
|
AlexanderLang/OpenAutomatedFarm
|
FarmGUI/farmgui/models/ParameterType.py
|
Python
|
gpl-3.0
| 1,237
|
# Copyright 2015 Joel Granados joel.granados@gmail.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import glob
from os import path
import shutil
import textwrap
from unittest import TestCase
from run_pipeline import maincli
from tests import helpers
from tests.helpers import (
FILES,
PIPELINES_DIR,
TMPDIR,
)
class PipelineRunTestcase(TestCase):
"""Tests all example pipeline YAMLs under ./pipelines with run_pipeline"""
def setUp(self):
self.tmp_in = FILES["timestream_good_images"]
self.tmp_out = path.join(TMPDIR, 'out')
def _run_pipeline_yaml(self, ymlfile):
yml_opts = {'--comp': False, '--conf': False, '--doc': False,
'--gui': False, '--help': False, '--logfile': None,
'--recalculate': False, '--set': None,
'-i': self.tmp_in,
'-o': self.tmp_out,
'-p': path.join(PIPELINES_DIR, ymlfile),
'-s': True, '-t': None, '-v': 0}
maincli(yml_opts)
def _run_yaml_str(self, ymlstr):
# NB: you have to start the 'pipeline:' bit on a new line, indented
# correctly, and start the triple-quote string with '"""\', so the
# whole string is indented in the same way.
ymlstr = textwrap.dedent(ymlstr)
ymlfile = helpers.make_tmp_file()
with open(ymlfile, 'w') as ymlfh:
ymlfh.write(ymlstr + '\n') # Extra newline, just in case
yml_opts = {'--comp': False, '--conf': False, '--doc': False,
'--gui': False, '--help': False, '--logfile': None,
'--recalculate': False, '--set': None,
'-i': self.tmp_in,
'-o': self.tmp_out,
'-p': ymlfile,
'-s': True, '-t': None, '-v': 0}
maincli(yml_opts)
def tearDown(self):
if path.isdir(self.tmp_out):
shutil.rmtree(self.tmp_out)
class TestPipelinesInPLDir(PipelineRunTestcase):
"""Ensure all demo pipelines work with test dataset"""
def test_all_demo_pipelines(self):
"""Ensure all demo pipelines work with test dataset"""
for config in glob.glob(path.join(PIPELINES_DIR, '*.yml')):
self._run_pipeline_yaml(config)
class TestResizingPipelines(PipelineRunTestcase):
"""Test the resizing in ResultingImageWriter"""
fs = """\
pipeline:
- name: imagewrite
mess: '---Write image---'
outstream: -small
size: %s
outstreams:
- { name: -small }
general:
visualise: False
"""
def _test_resize_pl(self, size):
self._run_yaml_str(self.fs % size)
def test_resize_xy(self):
"""Test the resizing in ResultingImageWriter with cols x rows"""
self._test_resize_pl('[50,30]')
self._test_resize_pl('50x30')
def test_resize_float(self):
"""Test the resizing in ResultingImageWriter with scaling factor"""
self._test_resize_pl('1.5')
self._test_resize_pl('0.5')
self._test_resize_pl('0.1')
def test_resize_fullsize(self):
"""Test the resizing in ResultingImageWriter with no resizing"""
self._test_resize_pl('1.0')
self._test_resize_pl('fullres')
|
borevitzlab/timestreamlib
|
tests/test_pipeline_yml.py
|
Python
|
gpl-3.0
| 3,863
|
#!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "arsoft.web.crashupload.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
aroth-arsoft/arsoft-web-crashupload
|
app/manage.py
|
Python
|
gpl-3.0
| 266
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import re
import sys
from ansible import constants as C
from ansible.plugins.action.network import ActionModule as ActionNetworkModule
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.nxos.nxos import nxos_provider_spec
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
module_name = self._task.action.split('.')[-1]
self._config_module = True if module_name == 'nxos_config' else False
persistent_connection = self._play_context.connection.split('.')[-1]
warnings = []
if (self._play_context.connection in ('httpapi', 'local') or self._task.args.get('provider', {}).get('transport') == 'nxapi') \
and module_name in ('nxos_file_copy', 'nxos_nxapi'):
return {'failed': True, 'msg': "Transport type 'nxapi' is not valid for '%s' module." % (module_name)}
if module_name == 'nxos_file_copy':
self._task.args['host'] = self._play_context.remote_addr
self._task.args['password'] = self._play_context.password
if self._play_context.connection == 'network_cli':
self._task.args['username'] = self._play_context.remote_user
elif self._play_context.connection == 'local':
self._task.args['username'] = self._play_context.connection_user
if module_name == 'nxos_install_os':
connection = self._connection
if connection.transport == 'local':
persistent_command_timeout = C.PERSISTENT_COMMAND_TIMEOUT
persistent_connect_timeout = C.PERSISTENT_CONNECT_TIMEOUT
else:
persistent_command_timeout = connection.get_option('persistent_command_timeout')
persistent_connect_timeout = connection.get_option('persistent_connect_timeout')
display.vvvv('PERSISTENT_COMMAND_TIMEOUT is %s' % str(persistent_command_timeout), self._play_context.remote_addr)
display.vvvv('PERSISTENT_CONNECT_TIMEOUT is %s' % str(persistent_connect_timeout), self._play_context.remote_addr)
if persistent_command_timeout < 600 or persistent_connect_timeout < 600:
msg = 'PERSISTENT_COMMAND_TIMEOUT and PERSISTENT_CONNECT_TIMEOUT'
msg += ' must be set to 600 seconds or higher when using nxos_install_os module.'
msg += ' Current persistent_command_timeout setting:' + str(persistent_command_timeout)
msg += ' Current persistent_connect_timeout setting:' + str(persistent_connect_timeout)
return {'failed': True, 'msg': msg}
if persistent_connection in ('network_cli', 'httpapi'):
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using %s and will be ignored' % self._play_context.connection)
del self._task.args['provider']
if self._task.args.get('transport'):
display.warning('transport is unnecessary when using %s and will be ignored' % self._play_context.connection)
del self._task.args['transport']
elif self._play_context.connection == 'local':
provider = load_provider(nxos_provider_spec, self._task.args)
transport = provider['transport'] or 'cli'
display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr)
if transport == 'cli':
pc = copy.deepcopy(self._play_context)
pc.connection = 'ansible.netcommon.network_cli'
pc.network_os = 'cisco.nxos.nxos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.become = provider['authorize'] or False
if pc.become:
pc.become_method = 'enable'
pc.become_pass = provider['auth_pass']
connection = self._shared_loader_obj.connection_loader.get('ansible.netcommon.persistent', pc, sys.stdin,
task_uuid=self._task._uuid)
# TODO: Remove below code after ansible minimal is cut out
if connection is None:
pc.connection = 'network_cli'
pc.network_os = 'nxos'
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin, task_uuid=self._task._uuid)
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
command_timeout = int(provider['timeout']) if provider['timeout'] else connection.get_option('persistent_command_timeout')
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
else:
self._task.args['provider'] = ActionModule.nxapi_implementation(provider, self._play_context)
warnings.append(['connection local support for this module is deprecated and will be removed in version 2.14,'
' use connection either httpapi or ansible.netcommon.httpapi (whichever is applicable)'])
else:
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
result = super(ActionModule, self).run(task_vars=task_vars)
if warnings:
if 'warnings' in result:
result['warnings'].extend(warnings)
else:
result['warnings'] = warnings
return result
@staticmethod
def nxapi_implementation(provider, play_context):
provider['transport'] = 'nxapi'
if provider.get('host') is None:
provider['host'] = play_context.remote_addr
if provider.get('port') is None:
if provider.get('use_ssl'):
provider['port'] = 443
else:
provider['port'] = 80
if provider.get('timeout') is None:
provider['timeout'] = C.PERSISTENT_COMMAND_TIMEOUT
if provider.get('username') is None:
provider['username'] = play_context.connection_user
if provider.get('password') is None:
provider['password'] = play_context.password
if provider.get('use_ssl') is None:
provider['use_ssl'] = False
if provider.get('validate_certs') is None:
provider['validate_certs'] = True
return provider
|
mikewiebe-ansible/ansible
|
lib/ansible/plugins/action/nxos.py
|
Python
|
gpl-3.0
| 8,307
|
import os
import unittest
from vsg.rules import ieee
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_500_test_input.vhd'))
lExpected_lower = []
lExpected_lower.append('')
utils.read_file(os.path.join(sTestDir, 'rule_500_test_input.fixed_lower.vhd'), lExpected_lower)
lExpected_upper = []
lExpected_upper.append('')
utils.read_file(os.path.join(sTestDir, 'rule_500_test_input.fixed_upper.vhd'), lExpected_upper)
class test_port_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_500_lower(self):
oRule = ieee.rule_500()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'ieee')
self.assertEqual(oRule.identifier, '500')
self.assertEqual(oRule.groups, ['case', 'case::keyword'])
lExpected = [66, 67, 68, 69, 70]
lExpected.extend([73, 74, 76, 77, 78,79])
lExpected.extend(range(87, 89))
lExpected.extend([91])
lExpected.extend(range(93, 95))
lExpected.extend(range(100, 105))
lExpected.extend([107, 108, 110, 111, 112, 113])
oRule.analyze(self.oFile)
self.assertEqual(utils.extract_violation_lines_from_violation_object(oRule.violations), lExpected)
def test_rule_500_upper(self):
oRule = ieee.rule_500()
oRule.case = 'upper'
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'ieee')
self.assertEqual(oRule.identifier, '500')
lExpected = []
lExpected.extend(range(5, 10))
lExpected.extend([12, 13, 15, 16, 17, 18])
lExpected.extend(range(26, 28))
lExpected.extend([30])
lExpected.extend(range(32, 34))
lExpected.extend(range(39, 44))
lExpected.extend([46, 47, 49, 50, 51, 52])
oRule.analyze(self.oFile)
self.assertEqual(utils.extract_violation_lines_from_violation_object(oRule.violations), lExpected)
def test_fix_rule_500_lower(self):
oRule = ieee.rule_500()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected_lower, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
def test_fix_rule_500_upper(self):
oRule = ieee.rule_500()
oRule.case = 'upper'
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected_upper, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
|
jeremiah-c-leary/vhdl-style-guide
|
vsg/tests/ieee/test_rule_500.py
|
Python
|
gpl-3.0
| 2,648
|
import numpy as np
from math import floor
from ..weight import RankingBasedSelection
class PBILSelection(RankingBasedSelection):
"""
This selection scheme is used by PBIL and compact GA.
Also, PBIL selection scheme also used in natural gradient update
with Bernoulli distribution. See also,
[Shirakawa et al. 2018 (AAAI-2018)]<https://arxiv.org/abs/1801.07650>
"""
def __init__(self, selection_rate=0.5, is_use_negative=True, is_minimize=True, is_normalize=False):
super(PBILSelection, self).__init__(is_minimize, is_normalize)
self.selection_rate = selection_rate
self.is_use_negative = is_use_negative
def transform(self, rank_based_vals, xp=np):
weights = xp.zeros_like(rank_based_vals)
worst_rank = len(rank_based_vals)
idx_sorted_rank = xp.argsort(rank_based_vals)
if self.is_use_negative:
half_num_weight = floor(worst_rank * self.selection_rate / 2.)
# the best floor(lam * selection_rate / 2) samples get the positive weights
idx_positive = idx_sorted_rank[:half_num_weight]
weights[idx_positive] = 1
# the worst floor(lam * selection_rate / 2) samples get the negative weights
idx_negative = idx_sorted_rank[-half_num_weight:]
weights[idx_negative] = -1
else:
# the best floor(lam * selection_rate) samples get the positive weights
num_weight = floor(worst_rank * self.selection_rate)
idx_positive = idx_sorted_rank[:num_weight]
weights[idx_positive] = 1
return weights
|
satuma777/evoltier
|
evoltier/selection/pbil_selection.py
|
Python
|
gpl-3.0
| 1,621
|
import optunity
import numpy as np
import hpolib_generator as hpo
import cloudpickle as pickle
import sklearn.datasets
import random
positive_digit = 2
name='covtype-%d' % positive_digit
num_folds=5
budget = 150
npos = 500
nneg = 1000
search={'logC': [-8, 1], 'logGamma': [-8, 1]}
covtype = sklearn.datasets.fetch_covtype()
n = covtype.data.shape[0]
positive_idx = [i for i in range(n) if covtype.target[i] == positive_digit]
negative_idx = [i for i in range(n) if not covtype.target[i] == positive_digit]
# draw random subsamples
positive_idx = random.sample(positive_idx, npos)
negative_idx = random.sample(negative_idx, nneg)
original_data = covtype.data[positive_idx + negative_idx, ...]
data = original_data # + 10 * np.random.randn(original_data.shape[0], original_data.shape[1])
labels = [True] * len(positive_idx) + [False] * len(negative_idx)
objfun = hpo.make_svm_objfun(data, labels, num_folds)
hpo_search = hpo.svm_search_space(**search)
hpo.setup_hpolib(hpo.negate(objfun), hpo_search, budget, name)
|
claesenm/optunity-benchmark
|
benchmarks/optunity/covtype-2.py
|
Python
|
gpl-3.0
| 1,022
|
#!/usr/bin/env python2
from glob import glob
import re
import matplotlib.pyplot as plt
import numpy as np
from sys import argv
def get_a1(pattern):
a1 = {}
for fit_file in glob(pattern):
with open(fit_file) as f:
line = f.readline()
coeffs = line.split(' ')
fit_params = fit_file.split('-')
if fit_params[0] not in a1:
a1[fit_params[0]] = []
a1[fit_params[0]].append((float(fit_params[1]), float(coeffs[1])))
# Sort and remove the soring hints
for key in a1.keys():
a1[key] = sorted(a1[key], key=lambda x: x[0])
a1[key] = dict(y=map(lambda x: float(x[1]), a1[key]),
x=map(lambda x: float(x[0]), a1[key]))
return a1
def plot_a1():
a1 = get_a1(argv[1])
fig, ax = plt.subplots()
for domain in sorted(a1.keys(), key=lambda x: float(x)):
ax.plot(a1[domain]['x'], a1[domain]['y'],
label='%s pi' % (domain))
ax.legend(loc=0)
fig.savefig('a1.png', dpi=300)
plt.show()
if __name__ == '__main__':
plot_a1()
|
mkawalec/masters
|
contrib/plot_decay/plot2.py
|
Python
|
gpl-3.0
| 1,103
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from ./eqpt_equipment import EQPT_TYPES
class Paddler(models.Model):
_name = 'eqpt.paddler'
_description = "Paddler Cycle Equipment"
_description = "Cycle paddler equipment"
eqpt_type = fields.Selection(selection=EQPT_TYPES, string="")
eqpt_id = fields.Reference(selection='_get_eqpt_models', string="Equipment")
cycle_id = fields.Many2one(comodel_name='pac.cycle', string="Cycle")
member_id = fields.Many2one(comodel_name='adm.asso.member', string="Member")
|
RemiFr82/ck_addons
|
ck_equipment/models/eqpt_paddler.py
|
Python
|
gpl-3.0
| 551
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author(s): Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_local_role_manager
short_description: Manage local roles on an ESXi host
description:
- Manage local roles on an ESXi host
version_added: "2.5"
author: Abhijeet Kasurde (@akasurde) <akasurde@redhat.com>
notes:
- Tested on ESXi 6.5
- Be sure that the ESXi user used for login, has the appropriate rights to create / delete / edit roles
requirements:
- "python >= 2.6"
- PyVmomi
options:
local_role_name:
description:
- The local role name to be managed.
required: True
local_privilege_ids:
description:
- The list of privileges that role needs to have.
- Please see U(https://docs.vmware.com/en/VMware-vSphere/6.0/com.vmware.vsphere.security.doc/GUID-ED56F3C4-77D0-49E3-88B6-B99B8B437B62.html)
default: []
state:
description:
- Indicate desired state of the role.
- If the role already exists when C(state=present), the role info is updated.
choices: ['present', 'absent']
default: present
force_remove:
description:
- If set to C(False) then prevents the role from being removed if any permissions are using it.
default: False
type: bool
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example vmware_local_role_manager command from Ansible Playbooks
- name: Add local role to ESXi
vmware_local_role_manager:
hostname: esxi_hostname
username: root
password: vmware
local_role_name: vmware_qa
state: present
- name: Add local role with privileges to ESXi
vmware_local_role_manager:
hostname: esxi_hostname
username: root
password: vmware
local_role_name: vmware_qa
local_privilege_ids: [ 'Folder.Create', 'Folder.Delete']
state: present
- name: Remove local role from ESXi
vmware_local_role_manager:
hostname: esxi_hostname
username: root
password: vmware
local_role_name: vmware_qa
state: absent
'''
RETURN = r'''
local_role_name:
description: Name of local role
returned: always
type: string
role_id:
description: ESXi generated local role id
returned: always
type: int
old_privileges:
description: List of privileges of role before update
returned: on update
type: list
new_privileges:
description: List of privileges of role after update
returned: on update
type: list
'''
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
class VMwareLocalRoleManager(PyVmomi):
def __init__(self, module):
super(VMwareLocalRoleManager, self).__init__(module)
self.module = module
self.params = module.params
self.role_name = self.params['local_role_name']
self.state = self.params['state']
self.priv_ids = self.params['local_privilege_ids']
self.force = not self.params['force_remove']
self.current_role = None
if self.content.authorizationManager is None:
self.module.fail_json(msg="Failed to get local authorization manager settings.",
details="It seems that %s is a vCenter server "
"instead of an ESXi server" % self.params['hostname'])
def process_state(self):
local_role_manager_states = {
'absent': {
'present': self.state_remove_role,
'absent': self.state_exit_unchanged,
},
'present': {
'present': self.state_update_role,
'absent': self.state_create_role,
}
}
try:
local_role_manager_states[self.state][self.check_local_role_manager_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def check_local_role_manager_state(self):
auth_role = self.find_authorization_role()
if auth_role:
self.current_role = auth_role
return 'present'
else:
return 'absent'
def find_authorization_role(self):
desired_role = None
for role in self.content.authorizationManager.roleList:
if role.name == self.role_name:
desired_role = role
return desired_role
def state_create_role(self):
try:
role_id = self.content.authorizationManager.AddAuthorizationRole(name=self.role_name,
privIds=self.priv_ids)
except vim.fault.AlreadyExists as e:
self.module.fail_json(msg="Failed to create a role %s as the user specified role name "
"already exists." % self.role_name,
details=e.msg)
except vim.fault.InvalidName as e:
self.module.fail_json(msg="Failed to create a role %s as the user specified role name "
"is empty" % self.role_name,
details=e.msg)
except vmodl.fault.InvalidArgument as e:
self.module.fail_json(msg="Failed to create a role %s as the user specified privileges "
"are unknown" % self.role_name,
details=e.msg)
result = {
'changed': True,
'role_id': role_id,
'privileges': self.priv_ids,
'local_role_name': self.role_name,
}
self.module.exit_json(**result)
def state_remove_role(self):
try:
self.content.authorizationManager.RemoveAuthorizationRole(roleId=self.current_role.roleId,
failIfUsed=self.force)
except vim.fault.NotFound as e:
self.module.fail_json(msg="Failed to remove a role %s as the user specified role name "
"does not exist." % self.role_name,
details=e.msg)
except vim.fault.RemoveFailed as e:
msg = "Failed to remove a role %s as the user specified role name." % self.role_name
if self.force:
msg += " Use force_remove as True."
self.module.fail_json(msg=msg, details=e.msg)
except vmodl.fault.InvalidArgument as e:
self.module.fail_json(msg="Failed to remove a role %s as the user specified "
"role is a system role" % self.role_name,
details=e.msg)
result = {
'changed': True,
'role_id': self.current_role.roleId,
'local_role_name': self.role_name,
}
self.module.exit_json(**result)
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_update_role(self):
current_privileges = set(self.current_role.privilege)
# Add system-defined privileges, "System.Anonymous", "System.View", and "System.Read".
self.params['local_privilege_ids'].extend(['System.Anonymous', 'System.Read', 'System.View'])
desired_privileges = set(self.params['local_privilege_ids'])
changed_privileges = current_privileges ^ desired_privileges
changed_privileges = list(changed_privileges)
if not changed_privileges:
self.state_exit_unchanged()
# Delete unwanted privileges that are not required
for priv in changed_privileges:
if priv not in desired_privileges:
changed_privileges.remove(priv)
try:
self.content.authorizationManager.UpdateAuthorizationRole(roleId=self.current_role.roleId,
newName=self.current_role.name,
privIds=changed_privileges)
except vim.fault.NotFound as e:
self.module.fail_json(msg="Failed to update Role %s. Please check privileges "
"provided for update" % self.role_name,
details=e.msg)
except vim.fault.InvalidName as e:
self.module.fail_json(msg="Failed to update Role %s as role name is empty" % self.role_name,
details=e.msg)
except vim.fault.AlreadyExists as e:
self.module.fail_json(msg="Failed to update Role %s." % self.role_name,
details=e.msg)
except vmodl.fault.InvalidArgument as e:
self.module.fail_json(msg="Failed to update Role %s as user specified "
"role is system role which can not be changed" % self.role_name,
details=e.msg)
except vim.fault.NoPermission as e:
self.module.fail_json(msg="Failed to update Role %s as current session does not"
" have any privilege to update specified role" % self.role_name,
details=e.msg)
role = self.find_authorization_role()
result = {
'changed': True,
'role_id': role.roleId,
'local_role_name': role.name,
'new_privileges': role.privilege,
'old_privileges': current_privileges,
}
self.module.exit_json(**result)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(local_role_name=dict(required=True, type='str'),
local_privilege_ids=dict(default=[], type='list'),
force_remove=dict(default=False, type='bool'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False)
vmware_local_role_manager = VMwareLocalRoleManager(module)
vmware_local_role_manager.process_state()
if __name__ == '__main__':
main()
|
dataxu/ansible
|
lib/ansible/modules/cloud/vmware/vmware_local_role_manager.py
|
Python
|
gpl-3.0
| 10,915
|
import f311.filetypes as ft
def test_DataFile():
_ = ft.DataFile()
print(_)
def test_FileFits():
_ = ft.FileFits()
print(_)
def test_FilePy():
_ = ft.FilePy()
def test_FileSQLiteDB():
_ = ft.FileSQLiteDB()
def test_FileSpectrum():
_ = ft.FileSpectrum()
print(_)
def test_FileSpectrumFits():
_ = ft.FileSpectrumFits()
print(_)
def test_FileSpectrumXY():
_ = ft.FileSpectrumXY()
print(_)
def test_Spectrum():
_ = ft.Spectrum()
print(_)
|
trevisanj/f311
|
tests/test_filetypes/test_instantialize_classes_filetypes.py
|
Python
|
gpl-3.0
| 507
|
#!/usr/bin/python
# Example using an RGB character LCD wired directly to Raspberry Pi or BeagleBone Black.
import time
import Adafruit_CharLCD as LCD
# Raspberry Pi configuration:
lcd_rs = 27 # Change this to pin 21 on older revision Raspberry Pi's
lcd_en = 22
lcd_d4 = 25
lcd_d5 = 24
lcd_d6 = 23
lcd_d7 = 18
lcd_red = 4
lcd_green = 17
lcd_blue = 7 # Pin 7 is CE1
# Define LCD column and row size for 16x2 LCD.
lcd_columns = 16
lcd_rows = 2
# Initialize the LCD using the pins above.
lcd = LCD.Adafruit_RGBCharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,
lcd_columns, lcd_rows, lcd_red, lcd_green, lcd_blue)
# Show some basic colors.
lcd.set_color(1.0, 0.0, 0.0)
lcd.clear()
lcd.message('Joyeux')
time.sleep(3.0)
lcd.set_color(0.0, 1.0, 0.0)
lcd.clear()
lcd.message('Noel')
time.sleep(3.0)
lcd.set_color(0.0, 0.0, 1.0)
lcd.clear()
lcd.message('Je vais')
time.sleep(3.0)
lcd.set_color(1.0, 1.0, 0.0)
lcd.clear()
lcd.message('te faire')
time.sleep(3.0)
lcd.set_color(0.0, 1.0, 1.0)
lcd.clear()
lcd.message('des trucs')
time.sleep(3.0)
lcd.set_color(1.0, 0.0, 1.0)
lcd.clear()
lcd.message('de fou')
time.sleep(3.0)
lcd.set_color(1.0, 1.0, 1.0)
lcd.clear()
lcd.message('MOUAHHH')
time.sleep(3.0)
|
qkzk/sncf_lcd
|
adafruit_lcd/examples/red.py
|
Python
|
gpl-3.0
| 1,310
|
# -*- coding: utf-8 -*-
#
# SRL 5 documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 16 15:51:55 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = ['sphinx.ext.pngmath', 'sphinx.ext.jsmath']
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SRL 5'
copyright = u'2010, SRL Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '5'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nature'
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'SRL5doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'SRL5.tex', u'SRL 5 Documentation',
u'SRL Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
SRL/SRL-5
|
doc/sphinx/conf.py
|
Python
|
gpl-3.0
| 6,360
|
#!/usr/bin/env python
"""
A simple example of how to get or set environment variables from python
"""
import os
print(os.environ['USER'])
if 'HOSTNAME' in os.environ:
print(os.environ['HOSTNAME'])
else:
print(
'you dont have a HOSTNAME in your environment, it is probably just a shell variable')
# lets delete an environment variable
del os.environ['USER']
assert 'USER' not in os.environ
for k, v in os.environ.items():
print(k, v)
|
veltzer/demos-python
|
src/examples/short/environment_variables/simple.py
|
Python
|
gpl-3.0
| 458
|
#!/usr/bin/env python
from glob import glob
from distutils.core import setup
setup( name="mythutils_recfail_alarm",
version="1.0",
description="Autoamtically notify on Recorder Failed via Prowl service",
author="Wylie Swanson",
author_email="wylie@pingzero.net",
url="http://www.pingzero.net",
scripts=glob("bin/*"),
data_files=[
( '/etc/mythutils/', glob('etc/mythutils/*') ),
( '/etc/cron.d/', glob('etc/cron.d/*') ),
]
)
|
wylieswanson/mythutils
|
mythutils_recfail_alarm/setup.py
|
Python
|
gpl-3.0
| 441
|
import copy, time, StringIO
import unittest
from datetime import datetime
from datetime import date
from nive.utils.dataPool2.structure import *
from nive.tests import __local
from nive.utils.dataPool2.tests import test_Base
ftypes = {}
ftypes[u"data2"] = {u"fstr":"string",
u"ftext":"text",
u"ftime":"timestamp",
u"fmselection":"mselection",
u"fmcheckboxes":"mcheckboxes",
u"furllist":"urllist",
u"funitlist":"unitlist",
u"fbool":"bool",
u"fjson":"json"}
ftypes[u"pool_meta"] = {}
for i in test_Base.SystemFlds:
ftypes[u"pool_meta"][i["id"]] = i
class StructureTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_set1(self):
structure = PoolStructure(structure=test_Base.struct,
fieldtypes=ftypes,
stdMeta=[u"id",u"pool_type"])
self.assert_(structure.get(u"pool_meta"))
self.assert_(len(structure.get(u"pool_meta")) == len(test_Base.struct[u"pool_meta"]))
self.assert_(len(structure.get(u"data1"))==len(test_Base.struct[u"data1"]))
self.assert_(len(structure.get(u"data2"))==len(test_Base.struct[u"data2"]))
self.assert_(len(structure.stdMeta)==2)
self.assert_(structure.fieldtypes[u"data2"][u"fstr"]=="string")
self.assert_(structure.codepage==u"utf-8")
def test_set2(self):
structure = PoolStructure()
structure.Init(structure=test_Base.struct,
fieldtypes=ftypes,
codepage="latin-1")
self.assert_(structure.get(u"pool_meta"))
self.assert_(len(structure.get(u"pool_meta")) == len(test_Base.struct[u"pool_meta"]))
self.assert_(len(structure.get(u"data1"))==len(test_Base.struct[u"data1"]))
self.assert_(len(structure.get(u"data2"))==len(test_Base.struct[u"data2"]))
self.assert_(len(structure.stdMeta)==0)
self.assert_(structure.fieldtypes[u"data2"][u"fstr"]=="string")
self.assert_(structure.codepage==u"latin-1")
def test_set3(self):
structure = PoolStructure()
structure.Init(structure={u"pool_meta": [], u"data1": [], u"data2": []},
fieldtypes=ftypes,
codepage="latin-1")
self.assert_(structure.get(u"pool_meta"))
self.assert_(len(structure.get(u"pool_meta"))==2)
self.assert_(len(structure.get(u"data1"))==0)
self.assert_(len(structure.get(u"data2"))==0)
def test_empty(self):
structure = PoolStructure()
self.assert_(structure.IsEmpty())
def test_func(self):
structure = PoolStructure(structure=test_Base.struct,
fieldtypes=ftypes,
stdMeta=[u"id",u"pool_type"])
self.assertFalse(structure.IsEmpty())
self.assert_(structure.get("pool_meta"))
self.assert_(structure.get("none","aaa")=="aaa")
self.assert_(structure["pool_meta"])
self.assert_(structure["data1"])
self.assert_(structure["data2"])
self.assert_(structure.has_key("data2"))
self.assert_(len(structure.keys())==3)
class ConversionTest(unittest.TestCase):
def setUp(self):
self.structure = PoolStructure(structure=test_Base.struct,
fieldtypes=ftypes,
stdMeta=[u"id",u"pool_type"])
def tearDown(self):
pass
def test_serialize_notype(self):
self.assert_(self.structure.serialize(u"pool_meta", u"somevalue", 123)==123)
self.assert_(isinstance(self.structure.serialize(u"pool_meta", u"somevalue", "123"), unicode))
value = datetime.now()
self.assert_(self.structure.serialize(u"pool_meta", u"somevalue", value)==value.strftime("%Y-%m-%d %H:%M:%S"))
value = ("aaa","bbb")
self.assert_(self.structure.serialize(u"pool_meta", u"somevalue", value).startswith(u"_json_"))
value = (u"aaa",u"bbb")
self.assert_(self.structure.serialize(u"pool_meta", u"somevalue", value).startswith(u"_json_"))
value = [1,2,3]
self.assert_(self.structure.serialize(u"pool_meta", u"somevalue", value).startswith(u"_json_"))
def test_se_mselection(self):
v = {u"id":u"123", u"pool_sort":u"123.12", u"pool_wfa":["value"], u"somevalue": "test"}
values = self.structure.serialize(u"pool_meta", None, v)
self.assert_(values[u"id"]==123)
self.assert_(values[u"pool_sort"]==123.12)
self.assert_(values[u"pool_wfa"]==u"value")
def test_se_number(self):
self.assert_(self.structure.serialize(u"pool_meta", u"id", 123)==123)
self.assert_(self.structure.serialize(u"pool_meta", u"id", u"123")==123)
self.assert_(self.structure.serialize(u"pool_meta", u"id", "123")==123)
self.assert_(self.structure.serialize(u"pool_meta", u"id", 123.12)==123)
def test_se_float(self):
self.assert_(self.structure.serialize(u"pool_meta", u"pool_sort", 123)==123.0)
self.assert_(self.structure.serialize(u"pool_meta", u"pool_sort", u"123.12")==123.12)
self.assert_(self.structure.serialize(u"pool_meta", u"pool_sort", "123.0")==123.0)
self.assert_(self.structure.serialize(u"pool_meta", u"pool_sort", 123.12)==123.12)
def test_se_date(self):
value = datetime.now()
self.assert_(self.structure.serialize(u"pool_meta", u"pool_change", value)==unicode(value))
value = date.today()
self.assert_(self.structure.serialize(u"pool_meta", u"pool_create", value)==unicode(value))
value = time.time()
self.assert_(self.structure.serialize(u"data2", u"ftime", value)==unicode(value))
def test_se_list(self):
self.assert_(self.structure.serialize(u"pool_meta", u"pool_wfa", u"value")==u"value")
self.assert_(self.structure.serialize(u"pool_meta", u"pool_wfa", ["value"])=="value")
self.assert_(self.structure.serialize(u"pool_meta", u"pool_wfa", ())=="")
def test_se_mlist(self):
self.assert_(self.structure.serialize(u"data2", u"fmselection", u"value"))
self.assert_(self.structure.serialize(u"data2", u"fmselection", [u"value"]))
self.assert_(self.structure.serialize(u"data2", u"fmselection", ("value",)))
self.assertFalse(self.structure.serialize(u"data2", u"fmselection", u""))
self.assert_(self.structure.serialize(u"data2", u"mcheckboxes", u"value"))
self.assert_(self.structure.serialize(u"data2", u"furllist", u"value"))
self.assert_(self.structure.serialize(u"data2", u"funitlist", u"value"))
def test_se_bool(self):
self.assert_(self.structure.serialize(u"data2", u"fbool", u"true")==1)
self.assert_(self.structure.serialize(u"data2", u"fbool", u"false")==0)
self.assert_(self.structure.serialize(u"data2", u"fbool", True)==1)
self.assert_(self.structure.serialize(u"data2", u"fbool", False)==0)
self.assert_(self.structure.serialize(u"data2", u"fbool", u"True")==1)
self.assert_(self.structure.serialize(u"data2", u"fbool", u"False")==0)
self.assert_(self.structure.serialize(u"data2", u"fbool", ("???",))==0)
def test_se_json(self):
self.assert_(self.structure.serialize(u"data2", u"fjson", {"a":123,"b":"aaa"}))
self.assert_(json.loads(self.structure.serialize(u"data2", u"fjson", {"a":123,"b":"aaa"}))["a"]==123)
self.assert_(json.loads(self.structure.serialize(u"data2", u"fjson", {"a":123,"b":"aaa"}))["b"]==u"aaa")
def test_deserialize_notype(self):
value = u"_json_"+json.dumps(("aaa","bbb"))
self.assert_(self.structure.deserialize(u"pool_meta", u"somevalue", value)[0]==u"aaa")
self.assert_(self.structure.deserialize(u"pool_meta", u"somevalue", "somevalue")==u"somevalue")
def test_ds_mselection(self):
v = {u"fmselection": json.dumps(["aaa","bbb"]),u"furllist":json.dumps(["aaa","bbb"]), u"somevalue": "test"}
values = self.structure.deserialize(u"data2", None, v)
self.assert_(values[u"fmselection"][0]=="aaa")
self.assert_(values[u"furllist"][0]=="aaa")
def test_ds_date(self):
value = datetime.now()
x=self.structure.deserialize(u"pool_meta", u"pool_change", unicode(value))
self.assert_(x.strftime("%Y-%m-%d %H:%M:%S")==value.strftime("%Y-%m-%d %H:%M:%S"))
value = date.today()
x=self.structure.deserialize(u"pool_meta", u"pool_create", unicode(value))
self.assert_(x.strftime("%Y-%m-%d")==value.strftime("%Y-%m-%d"))
value = time.time()
self.assert_(self.structure.deserialize(u"data2", u"ftime", value))
def test_ds_mselection(self):
self.assert_(self.structure.deserialize(u"data2", u"fmselection", json.dumps(["aaa","bbb"]))[0]=="aaa")
self.assert_(self.structure.deserialize(u"data2", u"fmcheckboxes", json.dumps(["aaa","bbb"]))[0]=="aaa")
self.assert_(self.structure.deserialize(u"data2", u"furllist", json.dumps(["aaa","bbb"]))[0]=="aaa")
self.assert_(self.structure.deserialize(u"data2", u"funitlist", json.dumps(["aaa","bbb"]))[0]=="aaa")
def test_ds_json(self):
self.assert_(self.structure.deserialize(u"data2", u"fjson", json.dumps(["aaa","bbb"]))[0]=="aaa")
def seCallback(value, field):
return value.swapcase()
def deCallback(value, field):
return value.capitalize()
class CallbackTest(unittest.TestCase):
def setUp(self):
self.structure = PoolStructure(structure=test_Base.struct,
fieldtypes=ftypes,
stdMeta=[u"id",u"pool_type"])
self.structure.serializeCallbacks = {"string": seCallback}
self.structure.deserializeCallbacks = {"string": deCallback}
def tearDown(self):
pass
def test_serialize_callback(self):
self.assert_(self.structure.serialize(u"pool_meta", u"title", u"somevalue")==u"SOMEVALUE")
self.assert_(self.structure.deserialize(u"pool_meta", u"title", u"somevalue")==u"Somevalue")
def test_se_mselection(self):
v = {u"id":u"123", u"pool_sort":u"123.12", u"pool_wfa":["value"], u"somevalue": "test"}
values = self.structure.serialize(u"pool_meta", None, v)
self.assert_(values[u"id"]==123)
self.assert_(values[u"pool_sort"]==123.12)
self.assert_(values[u"pool_wfa"]==u"value")
def test_se_number(self):
self.assert_(self.structure.serialize(u"pool_meta", u"id", 123)==123)
self.assert_(self.structure.serialize(u"pool_meta", u"id", u"123")==123)
self.assert_(self.structure.serialize(u"pool_meta", u"id", "123")==123)
self.assert_(self.structure.serialize(u"pool_meta", u"id", 123.12)==123)
def test_se_float(self):
self.assert_(self.structure.serialize(u"pool_meta", u"pool_sort", 123)==123.0)
self.assert_(self.structure.serialize(u"pool_meta", u"pool_sort", u"123.12")==123.12)
self.assert_(self.structure.serialize(u"pool_meta", u"pool_sort", "123.0")==123.0)
self.assert_(self.structure.serialize(u"pool_meta", u"pool_sort", 123.12)==123.12)
|
nive-cms/nive
|
nive/utils/dataPool2/tests/test_structure.py
|
Python
|
gpl-3.0
| 11,499
|
#!/usr/bin/env python
"""
Copyright (C) 2017, California Institute of Technology
This file is part of addm_toolbox.
addm_toolbox is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
addm_toolbox is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with addm_toolbox. If not, see <http://www.gnu.org/licenses/>.
---
Module: ddm_mla_test.py
Author: Gabriela Tavares, gtavares@caltech.edu
Performs a test to check the validity of the maximum likelihood algorithm (MLA)
for the drift-diffusion model (DDM). Artificial data is generated using
specific parameters for the model. These parameters are then recovered through
a maximum likelihood estimation procedure, using a grid search over the 2 free
parameters of the model.
"""
from __future__ import absolute_import
import numpy as np
import pkg_resources
from builtins import range, str
from multiprocessing import Pool
from .ddm_mla import DDM
from .util import load_trial_conditions_from_csv
def wrap_ddm_get_model_log_likelihood(args):
"""
Wrapper for DDM.get_model_log_likelihood(), intended for parallel
computation using a threadpool.
Args:
args: a tuple where the first item is a DDM object, and the remaining
item are the same arguments required by
DDM.get_model_log_likelihood().
Returns:
The output of DDM.get_model_log_likelihood().
"""
model = args[0]
return model.get_model_log_likelihood(*args[1:])
def main(d, sigma, rangeD, rangeSigma, trialsFileName=None, numTrials=10,
numSimulations=10, binStep=100, maxRT=8000, numThreads=9,
verbose=False):
"""
Args:
d: float, DDM parameter for generating artificial data.
sigma: float, DDM parameter for generating artificial data.
rangeD: list of floats, search range for parameter d.
rangeSigma: list of floats, search range for parameter sigma.
trialsFileName: string, path of trial conditions file.
numTrials: int, number of artificial data trials to be generated per
trial condition.
numSimulations: int, number of simulations to be generated per trial
condition, to be used in the RT histograms.
binStep: int, size of the bin step to be used in the RT histograms.
maxRT: int, maximum RT to be used in the RT histograms.
numThreads: int, size of the thread pool.
verbose: boolean, whether or not to increase output verbosity.
"""
pool = Pool(numThreads)
histBins = list(range(0, maxRT + binStep, binStep))
# Load trial conditions.
if not trialsFileName:
trialsFileName = pkg_resources.resource_filename(
u"addm_toolbox", u"test_data/test_trial_conditions.csv")
trialConditions = load_trial_conditions_from_csv(trialsFileName)
# Generate artificial data.
dataRTLeft = dict()
dataRTRight = dict()
for trialCondition in trialConditions:
dataRTLeft[trialCondition] = list()
dataRTRight[trialCondition] = list()
model = DDM(d, sigma)
for trialCondition in trialConditions:
t = 0
while t < numTrials:
try:
trial = model.simulate_trial(
trialCondition[0], trialCondition[1])
except:
print(u"An exception occurred while generating artificial "
"trial " + str(t) + u" for condition " +
str(trialCondition[0]) + u", " + str(trialCondition[1]) +
u".")
raise
if trial.choice == -1:
dataRTLeft[trialCondition].append(trial.RT)
elif trial.choice == 1:
dataRTRight[trialCondition].append(trial.RT)
t += 1
# Generate histograms for artificial data.
dataHistLeft = dict()
dataHistRight = dict()
for trialCondition in trialConditions:
dataHistLeft[trialCondition] = np.histogram(
dataRTLeft[trialCondition], bins=histBins)[0]
dataHistRight[trialCondition] = np.histogram(
dataRTRight[trialCondition], bins=histBins)[0]
# Grid search on the parameters of the model.
if verbose:
print(u"Performing grid search over the model parameters...")
listParams = list()
models = list()
for d in rangeD:
for sigma in rangeSigma:
model = DDM(d, sigma)
models.append(model)
listParams.append((model, trialConditions, numSimulations,
histBins, dataHistLeft, dataHistRight))
logLikelihoods = pool.map(wrap_ddm_get_model_log_likelihood, listParams)
pool.close()
if verbose:
for i, model in enumerate(models):
print(u"L" + str(model.params) + u" = " + str(logLikelihoods[i]))
bestIndex = logLikelihoods.index(max(logLikelihoods))
print(u"Best fit: " + str(models[bestIndex].params))
|
goptavares/aDDM-Toolbox
|
addm_toolbox/ddm_mla_test.py
|
Python
|
gpl-3.0
| 5,299
|
from rest_framework import viewsets, mixins
from .models import Member
from .serializers import MemberSerializer, MemberDetailSerializer
# Team app views.
# All team members
class MembersViewSet (viewsets.ReadOnlyModelViewSet):
serializer_class = MemberSerializer
queryset = Member.objects
def list(self, request, **kwargs):
response = super(__class__, self).list(request, **kwargs)
response['Access-Control-Allow-Origin'] = 'http://192.168.18.107:4200' # TODO: REMOVE, TESTING ONLY
return response
class PublishedMembersViewSet (mixins.ListModelMixin,
viewsets.GenericViewSet):
serializer_class = MemberSerializer
queryset = Member.objects.filter(published=True).order_by('-published_date')
def list(self, request, **kwargs):
response = super(__class__, self).list(request, **kwargs)
response['Access-Control-Allow-Origin'] = 'http://192.168.18.107:4200' # TODO: REMOVE, TESTING ONLY
return response
class PublishedMembersDetailViewSet (mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
serializer_class = MemberDetailSerializer
queryset = Member.objects.filter(published=True).order_by('-published_date')
def retrieve(self, request, *args, **kwargs):
response = super(__class__, self).retrieve(request, *args, **kwargs)
response['Access-Control-Allow-Origin'] = 'http://192.168.18.107:4200' # TODO: REMOVE, TESTING ONLY
return response
# Employees only
class EmployeesViewSet (viewsets.ReadOnlyModelViewSet):
serializer_class = MemberSerializer
queryset = Member.objects.filter(status__in=(Member.FULLTIME, Member.PARTTIME))
def list(self, request, **kwargs):
response = super(__class__, self).list(request, **kwargs)
response['Access-Control-Allow-Origin'] = 'http://192.168.18.107:4200' # TODO: REMOVE, TESTING ONLY
return response
class PublishedEmployeesViewSet (mixins.ListModelMixin,
viewsets.GenericViewSet):
serializer_class = MemberSerializer
queryset = Member.objects.filter(published=True, status__in=(Member.FULLTIME, Member.PARTTIME)).order_by('published_date')
def list(self, request, **kwargs):
response = super(__class__, self).list(request, **kwargs)
response['Access-Control-Allow-Origin'] = 'http://192.168.18.107:4200' # TODO: REMOVE, TESTING ONLY
return response
class PublishedEmployeesDetailViewSet (mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
serializer_class = MemberDetailSerializer
queryset = Member.objects.filter(published=True, status__in=(Member.FULLTIME, Member.PARTTIME)).order_by('published_date')
def retrieve(self, request, *args, **kwargs):
response = super(__class__, self).retrieve(request, *args, **kwargs)
response['Access-Control-Allow-Origin'] = 'http://192.168.18.107:4200' # TODO: REMOVE, TESTING ONLY
return response
# Associates only
class AssociatesViewSet (viewsets.ReadOnlyModelViewSet):
serializer_class = MemberSerializer
queryset = Member.objects.filter(status=Member.ASSOCIATE)
def list(self, request, **kwargs):
response = super(__class__, self).list(request, **kwargs)
response['Access-Control-Allow-Origin'] = 'http://192.168.18.107:4200' # TODO: REMOVE, TESTING ONLY
return response
class PublishedAssociatesViewSet (mixins.ListModelMixin,
viewsets.GenericViewSet):
serializer_class = MemberSerializer
queryset = Member.objects.filter(published=True, status=Member.ASSOCIATE).order_by('-last_name')
def list(self, request, **kwargs):
response = super(__class__, self).list(request, **kwargs)
response['Access-Control-Allow-Origin'] = 'http://192.168.18.107:4200' # TODO: REMOVE, TESTING ONLY
return response
class PublishedAssociatesDetailViewSet (mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
serializer_class = MemberDetailSerializer
queryset = Member.objects.filter(published=True, status=Member.ASSOCIATE).order_by('-last_name')
def retrieve(self, request, *args, **kwargs):
response = super(__class__, self).retrieve(request, *args, **kwargs)
response['Access-Control-Allow-Origin'] = 'http://192.168.18.107:4200' # TODO: REMOVE, TESTING ONLY
return response
|
aileron-split/aileron-web
|
server/team/views.py
|
Python
|
gpl-3.0
| 4,519
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-14 17:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import posgradmin.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posgradmin', '0021_auto_20181109_2042'),
]
operations = [
migrations.CreateModel(
name='AnexoExpediente',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fecha', models.DateTimeField(auto_now_add=True)),
('archivo', models.FileField(upload_to=posgradmin.models.anexo_expediente_path)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
sostenibilidad-unam/posgrado
|
posgradmin/posgradmin/migrations/0022_anexoexpediente.py
|
Python
|
gpl-3.0
| 975
|